diff --git a/go.mod b/go.mod index 97b3af1a7..cb5f2dc8c 100644 --- a/go.mod +++ b/go.mod @@ -27,10 +27,10 @@ require ( k8s.io/kubectl v0.32.3 kmodules.xyz/cert-manager-util v0.29.0 kmodules.xyz/client-go v0.32.7 - kmodules.xyz/custom-resources v0.32.0 + kmodules.xyz/custom-resources v0.32.2 kmodules.xyz/monitoring-agent-api v0.32.1 - kubedb.dev/apimachinery v0.58.0 - kubedb.dev/db-client-go v0.13.0 + kubedb.dev/apimachinery v0.59.0 + kubedb.dev/db-client-go v0.14.0 kubeops.dev/petset v0.0.12 sigs.k8s.io/controller-runtime v0.20.4 sigs.k8s.io/yaml v1.4.0 @@ -50,11 +50,12 @@ require ( github.com/cyphar/filepath-securejoin v0.3.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/elastic/elastic-transport-go/v8 v8.1.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.7.0 // indirect github.com/elastic/go-elasticsearch/v5 v5.6.1 // indirect github.com/elastic/go-elasticsearch/v6 v6.8.10 // indirect github.com/elastic/go-elasticsearch/v7 v7.15.1 // indirect github.com/elastic/go-elasticsearch/v8 v8.4.0 // indirect + github.com/elastic/go-elasticsearch/v9 v9.1.0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch v5.9.11+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect @@ -64,6 +65,7 @@ require ( github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect @@ -116,6 +118,10 @@ require ( github.com/yudai/gojsondiff v1.0.0 // indirect github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect go.virtual-secrets.dev/apimachinery v0.0.1 // indirect golang.org/x/net v0.40.0 // indirect golang.org/x/oauth2 v0.28.0 // indirect @@ -148,7 +154,7 @@ require ( kmodules.xyz/resource-metadata v0.32.1 // indirect kubeops.dev/csi-driver-cacerts v0.1.0 // indirect kubeops.dev/sidekick v0.0.11 // indirect - kubestash.dev/apimachinery v0.20.0 // indirect + kubestash.dev/apimachinery v0.21.0 // indirect open-cluster-management.io/api v1.0.0 // indirect sigs.k8s.io/gateway-api v1.1.0 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect diff --git a/go.sum b/go.sum index 2055e8ad4..202a2a2cf 100644 --- a/go.sum +++ b/go.sum @@ -86,8 +86,9 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/elastic/elastic-transport-go/v8 v8.1.0 h1:NeqEz1ty4RQz+TVbUrpSU7pZ48XkzGWQj02k5koahIE= github.com/elastic/elastic-transport-go/v8 v8.1.0/go.mod h1:87Tcz8IVNe6rVSLdBux1o/PEItLtyabHU3naC7IoqKI= +github.com/elastic/elastic-transport-go/v8 v8.7.0 h1:OgTneVuXP2uip4BA658Xi6Hfw+PeIOod2rY3GVMGoVE= +github.com/elastic/elastic-transport-go/v8 v8.7.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/elastic/go-elasticsearch/v5 v5.6.1 h1:RnL2wcXepOT5SdoKMMO1j1OBX0vxHYbBtkQNL2E3xs4= github.com/elastic/go-elasticsearch/v5 v5.6.1/go.mod h1:r7uV7HidpfkYh7D8SB4lkS13TNlNy3oa5GNmTZvuVqY= github.com/elastic/go-elasticsearch/v6 v6.8.10 h1:2lN0gJ93gMBXvkhwih5xquldszpm8FlUwqG5sPzr6a8= @@ -96,6 +97,8 @@ github.com/elastic/go-elasticsearch/v7 v7.15.1 h1:Wd8RLHb5D8xPBU8vGlnLXyflkso9G+ github.com/elastic/go-elasticsearch/v7 v7.15.1/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= github.com/elastic/go-elasticsearch/v8 v8.4.0 h1:Rn1mcqaIMcNT43hnx2H62cIFZ+B6mjWtzj85BDKrvCE= github.com/elastic/go-elasticsearch/v8 v8.4.0/go.mod h1:yY52i2Vj0unLz+N3Nwx1gM5LXwoj3h2dgptNGBYkMLA= +github.com/elastic/go-elasticsearch/v9 v9.1.0 h1:+qmeMi+Zuyc/BzTWxHUouGJX5aF567IA2De7OoDgagE= +github.com/elastic/go-elasticsearch/v9 v9.1.0/go.mod h1:2PB5YQPpY5tWbF65MRqzEXA31PZOdXCkloQSOZtU14I= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= @@ -122,9 +125,12 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= @@ -418,6 +424,16 @@ github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaD go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -692,8 +708,8 @@ kmodules.xyz/cert-manager-util v0.29.0 h1:HjEaEsv66xeMKiI8LwfTRls0z9V9L0qEvPBdOc kmodules.xyz/cert-manager-util v0.29.0/go.mod h1:0YuaPwp+P3Pw7Spxf6eDhOCMDDNXfNV2xIuxIexT1QU= kmodules.xyz/client-go v0.32.7 h1:vBAbp8vs4coYRhY4wqm1Hw/eBEDiVU238AyMLSoRJ1c= kmodules.xyz/client-go v0.32.7/go.mod h1:ZwLnc7UqEXUNSe43n/SnER6+7YAQCu38L2te6YefoHU= -kmodules.xyz/custom-resources v0.32.0 h1:wzmJTtswO+OmvtqYc70pVoopZyt5UJHKTom4Jh6bfhM= -kmodules.xyz/custom-resources v0.32.0/go.mod h1:aLFrfbUKS+AkKtxFYWpPGzuLNarRtGlkHwC07APHba8= +kmodules.xyz/custom-resources v0.32.2 h1:NkRqL/4AWHiXdT5WKFcJlBcvRuoNdeYIrBGvQIRJRn4= +kmodules.xyz/custom-resources v0.32.2/go.mod h1:YKFNcsFQU7Z3AcPvYVCdFtgAdWiG1Wd1HQMOxCrAoWc= kmodules.xyz/monitoring-agent-api v0.32.1 h1:F0cm5NJWfgiANw3eiKkXXSXoClMBpAolMXE/N7Xts74= kmodules.xyz/monitoring-agent-api v0.32.1/go.mod h1:zgRKiJcuK7FOHy0Y1TsONRbJfgnPCs8t4Zh/6Afr+yU= kmodules.xyz/objectstore-api v0.32.0 h1:A45lWKNb+02fJV1Mo4IDIpC1hWvLh/wuHKErovxKmQw= @@ -704,18 +720,18 @@ kmodules.xyz/prober v0.32.0 h1:8Z6pFRAu8kP0wwX2BooPCRy2SE6ZkUMHQmZDH5VUEGY= kmodules.xyz/prober v0.32.0/go.mod h1:h0fH4m9DaIwuNZq85zOlWUvBycyy4LvCPMUUhpS3iSE= kmodules.xyz/resource-metadata v0.32.1 h1:hWQbL0Xb+GaF7qn+rY0CNh7FUfKZw29VBUKTxjHFGYI= kmodules.xyz/resource-metadata v0.32.1/go.mod h1:wHC24BVzKb1gzkDCSI5l9CXK4AKD5gMamxEqVys50lI= -kubedb.dev/apimachinery v0.58.0 h1:bsDqWcYsfjbZ6Ca4PyXbKr7jj3dhzzlPBS5NfQ9CD+I= -kubedb.dev/apimachinery v0.58.0/go.mod h1:t6BwVURkvyLKpx7teRZ20hBkjAgF8JB1CCLSjBbbPqo= -kubedb.dev/db-client-go v0.13.0 h1:qxyqhCxHj48zzX/Gc1RzfpabzOCUb2VM2Sq7c8+MSYY= -kubedb.dev/db-client-go v0.13.0/go.mod h1:agK2zOLzI19YR9f0P2gt9FoU2KOgitCpI93VheE93Bo= +kubedb.dev/apimachinery v0.59.0 h1:6daQ4dS6xayoyaZ67N5NXxOD1wH4H7v5JKPSwjPDbAk= +kubedb.dev/apimachinery v0.59.0/go.mod h1:cdAy0z4ED/iunIQprmaB4yCSxgBkFaT5fcOT/ogxl0Q= +kubedb.dev/db-client-go v0.14.0 h1:fezFxcp9OJV4MNuqw70Kvj4f38xvbu/2mFxNyoFxRyc= +kubedb.dev/db-client-go v0.14.0/go.mod h1:plxwwWv8t3C56JZCFTNbAzg41Dnxktx/IC37ot3ch3A= kubeops.dev/csi-driver-cacerts v0.1.0 h1:WDgKNo5QAiMoVy4c/4ARWeCXJbqdcXdcn8VLImV4VZU= kubeops.dev/csi-driver-cacerts v0.1.0/go.mod h1:5a/ZOn5LFw26PPBpTKvsivBjcvVArOrJX24C+k+przk= kubeops.dev/petset v0.0.12 h1:NSFEeuckBVm44f3cAL4HhcQWvnfOE4qgbfug7+FEyaY= kubeops.dev/petset v0.0.12/go.mod h1:akG9QH1JaOZQcuQKEKWvkVWI8P3im/5O554aTRvB6Y0= kubeops.dev/sidekick v0.0.11 h1:OydXdIH6cYSiWxKIWvrywk95WhhHSERkc7RNPOmTekc= kubeops.dev/sidekick v0.0.11/go.mod h1:90KMNmJOPoMKHbrdC1cpEsMx+1KjTea/lHDAbGRDzHc= -kubestash.dev/apimachinery v0.20.0 h1:X4v7u/4N+RT3bP17VlSVXwvCZ69JLZtBUNiyhyV1bfo= -kubestash.dev/apimachinery v0.20.0/go.mod h1:YH3Jf9daruSJPd83EPdWTtRLQHuevEFDstpkPG3kIGE= +kubestash.dev/apimachinery v0.21.0 h1:2qHROfY6RdxNjoEPm2yzQOuaqKlIeEMEn7bP+a/xezQ= +kubestash.dev/apimachinery v0.21.0/go.mod h1:1NMimrVgNaqO5mEtrCVa48XUCA2+lrjtLMq9jZsNeBc= open-cluster-management.io/api v1.0.0 h1:54QllH9DTudCk6VrGt0q8CDsE3MghqJeTaTN4RHZpE0= open-cluster-management.io/api v1.0.0/go.mod h1:/OeqXycNBZQoe3WG6ghuWsMgsKGuMZrK8ZpsU6gWL0Y= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/connection.go b/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/connection.go index d8ca8bfc6..9050c2d20 100644 --- a/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/connection.go +++ b/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/connection.go @@ -33,13 +33,11 @@ var ( ) // Selector defines the interface for selecting connections from the pool. -// type Selector interface { Select([]*Connection) (*Connection, error) } // ConnectionPool defines the interface for the connection pool. -// type ConnectionPool interface { Next() (*Connection, error) // Next returns the next available connection. OnSuccess(*Connection) error // OnSuccess reports that the connection was successful. @@ -47,8 +45,11 @@ type ConnectionPool interface { URLs() []*url.URL // URLs returns the list of URLs of available connections. } +type UpdatableConnectionPool interface { + Update([]*Connection) error // Update injects newly found nodes in the cluster. +} + // Connection represents a connection to a node. -// type Connection struct { sync.Mutex @@ -63,6 +64,15 @@ type Connection struct { Attributes map[string]interface{} } +func (c *Connection) Cmp(connection *Connection) bool { + if c.URL.Hostname() == connection.URL.Hostname() { + if c.URL.Port() == connection.URL.Port() { + return c.URL.Path == connection.URL.Path + } + } + return false +} + type singleConnectionPool struct { connection *Connection @@ -86,7 +96,6 @@ type roundRobinSelector struct { } // NewConnectionPool creates and returns a default connection pool. -// func NewConnectionPool(conns []*Connection, selector Selector) (ConnectionPool, error) { if len(conns) == 1 { return &singleConnectionPool{connection: conns[0]}, nil @@ -98,7 +107,6 @@ func NewConnectionPool(conns []*Connection, selector Selector) (ConnectionPool, } // Next returns the connection from pool. -// func (cp *singleConnectionPool) Next() (*Connection, error) { return cp.connection, nil } @@ -115,7 +123,6 @@ func (cp *singleConnectionPool) URLs() []*url.URL { return []*url.URL{cp.connect func (cp *singleConnectionPool) connections() []*Connection { return []*Connection{cp.connection} } // Next returns a connection from pool, or an error. -// func (cp *statusConnectionPool) Next() (*Connection, error) { cp.Lock() defer cp.Unlock() @@ -136,25 +143,30 @@ func (cp *statusConnectionPool) Next() (*Connection, error) { } // OnSuccess marks the connection as successful. -// func (cp *statusConnectionPool) OnSuccess(c *Connection) error { + // Short-circuit for live connection + c.Lock() + if !c.IsDead { + c.Unlock() + return nil + } + c.Unlock() + + cp.Lock() + defer cp.Unlock() + c.Lock() defer c.Unlock() - // Short-circuit for live connection if !c.IsDead { return nil } c.markAsHealthy() - - cp.Lock() - defer cp.Unlock() return cp.resurrect(c, true) } // OnFailure marks the connection as failed. -// func (cp *statusConnectionPool) OnFailure(c *Connection) error { cp.Lock() defer cp.Unlock() @@ -176,20 +188,6 @@ func (cp *statusConnectionPool) OnFailure(c *Connection) error { cp.scheduleResurrect(c) c.Unlock() - // Push item to dead list and sort slice by number of failures - cp.dead = append(cp.dead, c) - sort.Slice(cp.dead, func(i, j int) bool { - c1 := cp.dead[i] - c2 := cp.dead[j] - c1.Lock() - c2.Lock() - defer c1.Unlock() - defer c2.Unlock() - - res := c1.Failures > c2.Failures - return res - }) - // Check if connection exists in the list, return error if not. index := -1 for i, conn := range cp.live { @@ -201,6 +199,16 @@ func (cp *statusConnectionPool) OnFailure(c *Connection) error { return errors.New("connection not in live list") } + // Push item to dead list and sort slice by number of failures + cp.dead = append(cp.dead, c) + sort.Slice(cp.dead, func(i, j int) bool { + c1 := cp.dead[i] + c2 := cp.dead[j] + + res := c1.Failures > c2.Failures + return res + }) + // Remove item; https://github.com/golang/go/wiki/SliceTricks copy(cp.live[index:], cp.live[index+1:]) cp.live = cp.live[:len(cp.live)-1] @@ -208,8 +216,72 @@ func (cp *statusConnectionPool) OnFailure(c *Connection) error { return nil } +// Update merges the existing live and dead connections with the latest nodes discovered from the cluster. +// ConnectionPool must be locked before calling. +func (cp *statusConnectionPool) Update(connections []*Connection) error { + if len(connections) == 0 { + return errors.New("no connections provided, connection pool left untouched") + } + + // Remove hosts that are no longer in the new list of connections + for i := 0; i < len(cp.live); i++ { + found := false + for _, c := range connections { + if cp.live[i].Cmp(c) { + found = true + break + } + } + + if !found { + // Remove item; https://github.com/golang/go/wiki/SliceTricks + copy(cp.live[i:], cp.live[i+1:]) + cp.live = cp.live[:len(cp.live)-1] + i-- + } + } + + // Remove hosts that are no longer in the dead list of connections + for i := 0; i < len(cp.dead); i++ { + found := false + for _, c := range connections { + if cp.dead[i].Cmp(c) { + found = true + break + } + } + + if !found { + copy(cp.dead[i:], cp.dead[i+1:]) + cp.dead = cp.dead[:len(cp.dead)-1] + i-- + } + } + + // Add new connections that are not already in the live or dead list + for _, c := range connections { + found := false + for _, conn := range cp.live { + if conn.Cmp(c) { + found = true + break + } + } + for _, conn := range cp.dead { + if conn.Cmp(c) { + found = true + break + } + } + if !found { + cp.live = append(cp.live, c) + } + } + + return nil +} + // URLs returns the list of URLs of available connections. -// func (cp *statusConnectionPool) URLs() []*url.URL { var urls []*url.URL @@ -233,7 +305,6 @@ func (cp *statusConnectionPool) connections() []*Connection { // resurrect adds the connection to the list of available connections. // When removeDead is true, it also removes it from the dead list. // The calling code is responsible for locking. -// func (cp *statusConnectionPool) resurrect(c *Connection, removeDead bool) error { if debugLogger != nil { debugLogger.Logf("Resurrecting %s\n", c.URL) @@ -260,7 +331,6 @@ func (cp *statusConnectionPool) resurrect(c *Connection, removeDead bool) error } // scheduleResurrect schedules the connection to be resurrected. -// func (cp *statusConnectionPool) scheduleResurrect(c *Connection) { factor := math.Min(float64(c.Failures-1), float64(defaultResurrectTimeoutFactorCutoff)) timeout := time.Duration(defaultResurrectTimeoutInitial.Seconds() * math.Exp2(factor) * float64(time.Second)) @@ -287,7 +357,6 @@ func (cp *statusConnectionPool) scheduleResurrect(c *Connection) { } // Select returns the connection in a round-robin fashion. -// func (s *roundRobinSelector) Select(conns []*Connection) (*Connection, error) { s.Lock() defer s.Unlock() @@ -297,7 +366,6 @@ func (s *roundRobinSelector) Select(conns []*Connection) (*Connection, error) { } // markAsDead marks the connection as dead. -// func (c *Connection) markAsDead() { c.IsDead = true if c.DeadSince.IsZero() { @@ -307,13 +375,11 @@ func (c *Connection) markAsDead() { } // markAsLive marks the connection as alive. -// func (c *Connection) markAsLive() { c.IsDead = false } // markAsHealthy marks the connection as healthy. -// func (c *Connection) markAsHealthy() { c.IsDead = false c.DeadSince = time.Time{} @@ -321,7 +387,6 @@ func (c *Connection) markAsHealthy() { } // String returns a readable connection representation. -// func (c *Connection) String() string { c.Lock() defer c.Unlock() diff --git a/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/discovery.go b/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/discovery.go index c4f857a37..4c2ea21e8 100644 --- a/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/discovery.go +++ b/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/discovery.go @@ -18,6 +18,7 @@ package elastictransport import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -30,7 +31,6 @@ import ( ) // Discoverable defines the interface for transports supporting node discovery. -// type Discoverable interface { DiscoverNodes() error } @@ -38,7 +38,6 @@ type Discoverable interface { // nodeInfo represents the information about node in a cluster. // // See: https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-info.html -// type nodeInfo struct { ID string Name string @@ -51,7 +50,6 @@ type nodeInfo struct { } // DiscoverNodes reloads the client connections by fetching information from the cluster. -// func (c *Client) DiscoverNodes() error { var conns []*Connection @@ -109,10 +107,18 @@ func (c *Client) DiscoverNodes() error { if c.poolFunc != nil { c.pool = c.poolFunc(conns, c.selector) } else { - // TODO(karmi): Replace only live connections, leave dead scheduled for resurrect? - c.pool, err = NewConnectionPool(conns, c.selector) - if err != nil { - return err + if p, ok := c.pool.(UpdatableConnectionPool); ok { + err = p.Update(conns) + if err != nil { + if debugLogger != nil { + debugLogger.Logf("Error updating pool: %s\n", err) + } + } + } else { + c.pool, err = NewConnectionPool(conns, c.selector) + if err != nil { + return err + } } } @@ -125,7 +131,17 @@ func (c *Client) getNodesInfo() ([]nodeInfo, error) { scheme = c.urls[0].Scheme ) - req, err := http.NewRequest("GET", "/_nodes/http", nil) + var ctx context.Context + var cancel context.CancelFunc + + if c.discoverNodeTimeout != nil { + ctx, cancel = context.WithTimeout(context.Background(), *c.discoverNodeTimeout) + defer cancel() + } else { + ctx = context.Background() // Use default context if no timeout is set + } + + req, err := http.NewRequestWithContext(ctx, "GET", "/_nodes/http", nil) if err != nil { return out, err } @@ -141,6 +157,7 @@ func (c *Client) getNodesInfo() ([]nodeInfo, error) { c.setReqURL(conn.URL, req) c.setReqAuth(conn.URL, req) c.setReqUserAgent(req) + c.setReqGlobalHeader(req) res, err := c.transport.RoundTrip(req) if err != nil { diff --git a/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/elastictransport.go b/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/elastictransport.go index 5f209211d..1595cd509 100644 --- a/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/elastictransport.go +++ b/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/elastictransport.go @@ -47,13 +47,16 @@ var ( ) // Interface defines the interface for HTTP client. -// type Interface interface { Perform(*http.Request) (*http.Response, error) } +// Instrumented allows to retrieve the current transport Instrumentation +type Instrumented interface { + InstrumentationEnabled() Instrumentation +} + // Config represents the configuration of HTTP client. -// type Config struct { UserAgent string @@ -83,12 +86,18 @@ type Config struct { MaxRetries int RetryBackoff func(attempt int) time.Duration - CompressRequestBody bool + CompressRequestBody bool + CompressRequestBodyLevel int + // If PoolCompressor is true, a sync.Pool based gzip writer is used. Should be enabled with CompressRequestBody. + PoolCompressor bool EnableMetrics bool EnableDebugLogger bool + Instrumentation Instrumentation + DiscoverNodesInterval time.Duration + DiscoverNodeTimeout *time.Duration Transport http.RoundTripper Logger Logger @@ -100,7 +109,6 @@ type Config struct { } // Client represents the HTTP client. -// type Client struct { sync.Mutex @@ -123,8 +131,13 @@ type Client struct { retryBackoff func(attempt int) time.Duration discoverNodesInterval time.Duration discoverNodesTimer *time.Timer + discoverNodeTimeout *time.Duration + + compressRequestBody bool + compressRequestBodyLevel int + gzipCompressor gzipCompressor - compressRequestBody bool + instrumentation Instrumentation metrics *metrics @@ -138,10 +151,13 @@ type Client struct { // New creates new transport client. // // http.DefaultTransport will be used if no transport is passed in the configuration. -// func New(cfg Config) (*Client, error) { if cfg.Transport == nil { - cfg.Transport = http.DefaultTransport + defaultTransport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + return nil, errors.New("cannot clone http.DefaultTransport") + } + cfg.Transport = defaultTransport.Clone() } if transport, ok := cfg.Transport.(*http.Transport); ok { @@ -216,12 +232,19 @@ func New(cfg Config) (*Client, error) { retryBackoff: cfg.RetryBackoff, discoverNodesInterval: cfg.DiscoverNodesInterval, - compressRequestBody: cfg.CompressRequestBody, + compressRequestBody: cfg.CompressRequestBody, + compressRequestBodyLevel: cfg.CompressRequestBodyLevel, transport: cfg.Transport, logger: cfg.Logger, selector: cfg.Selector, poolFunc: cfg.ConnectionPoolFunc, + + instrumentation: cfg.Instrumentation, + } + + if cfg.DiscoverNodeTimeout != nil { + client.discoverNodeTimeout = cfg.DiscoverNodeTimeout } if client.poolFunc != nil { @@ -251,11 +274,20 @@ func New(cfg Config) (*Client, error) { }) } + if client.compressRequestBodyLevel == 0 { + client.compressRequestBodyLevel = gzip.DefaultCompression + } + + if cfg.PoolCompressor { + client.gzipCompressor = newPooledGzipCompressor(client.compressRequestBodyLevel) + } else { + client.gzipCompressor = newSimpleGzipCompressor(client.compressRequestBodyLevel) + } + return &client, nil } // Perform executes the request and returns a response or error. -// func (c *Client) Perform(req *http.Request) (*http.Response, error) { var ( res *http.Response @@ -275,17 +307,15 @@ func (c *Client) Perform(req *http.Request) (*http.Response, error) { if req.Body != nil && req.Body != http.NoBody { if c.compressRequestBody { - var buf bytes.Buffer - zw := gzip.NewWriter(&buf) - if _, err := io.Copy(zw, req.Body); err != nil { - return nil, fmt.Errorf("failed to compress request body: %s", err) - } - if err := zw.Close(); err != nil { - return nil, fmt.Errorf("failed to compress request body (during close): %s", err) + buf, err := c.gzipCompressor.compress(req.Body) + if err != nil { + return nil, err } + defer c.gzipCompressor.collectBuffer(buf) req.GetBody = func() (io.ReadCloser, error) { - r := buf + // Copy value of buf so it's not destroyed on first read + r := *buf return ioutil.NopCloser(&r), nil } req.Body, _ = req.GetBody() @@ -299,6 +329,7 @@ func (c *Client) Perform(req *http.Request) (*http.Response, error) { buf.ReadFrom(req.Body) req.GetBody = func() (io.ReadCloser, error) { + // Copy value of buf so it's not destroyed on first read r := buf return ioutil.NopCloser(&r), nil } @@ -307,6 +338,7 @@ func (c *Client) Perform(req *http.Request) (*http.Response, error) { } } + originalPath := req.URL.Path for i := 0; i <= c.maxRetries; i++ { var ( conn *Connection @@ -380,6 +412,10 @@ func (c *Client) Perform(req *http.Request) (*http.Response, error) { c.metrics.Unlock() } + if res != nil && c.instrumentation != nil { + c.instrumentation.AfterResponse(req.Context(), res) + } + // Retry on configured response statuses if res != nil && !c.disableRetry { for _, code := range c.retryOnStatus { @@ -419,6 +455,10 @@ func (c *Client) Perform(req *http.Request) (*http.Response, error) { break } } + + // Re-init the path of the request to its original state + // This will be re-enriched by the connection upon retry + req.URL.Path = originalPath } // TODO(karmi): Wrap error @@ -426,12 +466,14 @@ func (c *Client) Perform(req *http.Request) (*http.Response, error) { } // URLs returns a list of transport URLs. -// -// func (c *Client) URLs() []*url.URL { return c.pool.URLs() } +func (c *Client) InstrumentationEnabled() Instrumentation { + return c.instrumentation +} + func (c *Client) setReqURL(u *url.URL, req *http.Request) *http.Request { req.URL.Scheme = u.Scheme req.URL.Host = u.Host diff --git a/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/gzip.go b/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/gzip.go new file mode 100644 index 000000000..843367865 --- /dev/null +++ b/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/gzip.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package elastictransport + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "sync" +) + +type gzipCompressor interface { + // compress compresses the given io.ReadCloser and returns the gzip compressed data as a bytes.Buffer. + compress(io.ReadCloser) (*bytes.Buffer, error) + // collectBuffer collects the given bytes.Buffer for reuse. + collectBuffer(*bytes.Buffer) +} + +// simpleGzipCompressor is a simple implementation of gzipCompressor that creates a new gzip.Writer for each call. +type simpleGzipCompressor struct { + compressionLevel int +} + +func newSimpleGzipCompressor(compressionLevel int) gzipCompressor { + return &simpleGzipCompressor{ + compressionLevel: compressionLevel, + } +} + +func (sg *simpleGzipCompressor) compress(rc io.ReadCloser) (*bytes.Buffer, error) { + var buf bytes.Buffer + zw, err := gzip.NewWriterLevel(&buf, sg.compressionLevel) + if err != nil { + return nil, fmt.Errorf("failed setting up up compress request body (level %d): %s", + sg.compressionLevel, err) + } + + if _, err = io.Copy(zw, rc); err != nil { + return nil, fmt.Errorf("failed to compress request body: %s", err) + } + if err := zw.Close(); err != nil { + return nil, fmt.Errorf("failed to compress request body (during close): %s", err) + } + return &buf, nil +} + +func (sg *simpleGzipCompressor) collectBuffer(buf *bytes.Buffer) { + // no-op +} + +type pooledGzipCompressor struct { + gzipWriterPool *sync.Pool + bufferPool *sync.Pool + compressionLevel int +} + +type gzipWriter struct { + writer *gzip.Writer + err error +} + +// newPooledGzipCompressor returns a new pooledGzipCompressor that uses a sync.Pool to reuse gzip.Writers. +func newPooledGzipCompressor(compressionLevel int) gzipCompressor { + gzipWriterPool := sync.Pool{ + New: func() any { + writer, err := gzip.NewWriterLevel(io.Discard, compressionLevel) + return &gzipWriter{ + writer: writer, + err: err, + } + }, + } + + bufferPool := sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + } + + return &pooledGzipCompressor{ + gzipWriterPool: &gzipWriterPool, + bufferPool: &bufferPool, + compressionLevel: compressionLevel, + } +} + +func (pg *pooledGzipCompressor) compress(rc io.ReadCloser) (*bytes.Buffer, error) { + writer := pg.gzipWriterPool.Get().(*gzipWriter) + defer pg.gzipWriterPool.Put(writer) + + if writer.err != nil { + return nil, fmt.Errorf("failed setting up up compress request body (level %d): %s", + pg.compressionLevel, writer.err) + } + + buf := pg.bufferPool.Get().(*bytes.Buffer) + buf.Reset() + writer.writer.Reset(buf) + + if _, err := io.Copy(writer.writer, rc); err != nil { + return nil, fmt.Errorf("failed to compress request body: %s", err) + } + if err := writer.writer.Close(); err != nil { + return nil, fmt.Errorf("failed to compress request body (during close): %s", err) + } + return buf, nil +} + +func (pg *pooledGzipCompressor) collectBuffer(buf *bytes.Buffer) { + pg.bufferPool.Put(buf) +} diff --git a/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/instrumentation.go b/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/instrumentation.go new file mode 100644 index 000000000..55924df7b --- /dev/null +++ b/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/instrumentation.go @@ -0,0 +1,217 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package elastictransport + +import ( + "bytes" + "context" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "io" + "net/http" + "strconv" +) + +const schemaUrl = "https://opentelemetry.io/schemas/1.21.0" +const tracerName = "elasticsearch-api" + +// Constants for Semantic Convention +// see https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/ for details. +const attrDbSystem = "db.system" +const attrDbStatement = "db.statement" +const attrDbOperation = "db.operation" +const attrDbElasticsearchClusterName = "db.elasticsearch.cluster.name" +const attrDbElasticsearchNodeName = "db.elasticsearch.node.name" +const attrHttpRequestMethod = "http.request.method" +const attrUrlFull = "url.full" +const attrServerAddress = "server.address" +const attrServerPort = "server.port" +const attrPathParts = "db.elasticsearch.path_parts." + +// Instrumentation defines the interface the client uses to propagate information about the requests. +// Each method is called with the current context or request for propagation. +type Instrumentation interface { + // Start creates the span before building the request, returned context will be propagated to the request by the client. + Start(ctx context.Context, name string) context.Context + + // Close will be called once the client has returned. + Close(ctx context.Context) + + // RecordError propagates an error. + RecordError(ctx context.Context, err error) + + // RecordPathPart provides the path variables, called once per variable in the url. + RecordPathPart(ctx context.Context, pathPart, value string) + + // RecordRequestBody provides the endpoint name as well as the current request payload. + RecordRequestBody(ctx context.Context, endpoint string, query io.Reader) io.ReadCloser + + // BeforeRequest provides the request and endpoint name, called before sending to the server. + BeforeRequest(req *http.Request, endpoint string) + + // AfterRequest provides the request, system used (e.g. elasticsearch) and endpoint name. + // Called after the request has been enhanced with the information from the transport and sent to the server. + AfterRequest(req *http.Request, system, endpoint string) + + // AfterResponse provides the response. + AfterResponse(ctx context.Context, res *http.Response) +} + +type ElasticsearchOpenTelemetry struct { + tracer trace.Tracer + recordBody bool +} + +// NewOtelInstrumentation returns a new instrument for Open Telemetry traces +// If no provider is passed, the instrumentation will fall back to the global otel provider. +// captureSearchBody sets the query capture behavior for search endpoints. +// version should be set to the version provided by the caller. +func NewOtelInstrumentation(provider trace.TracerProvider, captureSearchBody bool, version string, options ...trace.TracerOption) *ElasticsearchOpenTelemetry { + if provider == nil { + provider = otel.GetTracerProvider() + } + + options = append(options, trace.WithInstrumentationVersion(version), trace.WithSchemaURL(schemaUrl)) + + return &ElasticsearchOpenTelemetry{ + tracer: provider.Tracer( + tracerName, + options..., + ), + recordBody: captureSearchBody, + } +} + +// Start begins a new span in the given context with the provided name. +// Span will always have a kind set to trace.SpanKindClient. +// The context span aware is returned for use within the client. +func (i ElasticsearchOpenTelemetry) Start(ctx context.Context, name string) context.Context { + newCtx, _ := i.tracer.Start(ctx, name, trace.WithSpanKind(trace.SpanKindClient)) + return newCtx +} + +// Close call for the end of the span, preferably defered by the client once started. +func (i ElasticsearchOpenTelemetry) Close(ctx context.Context) { + span := trace.SpanFromContext(ctx) + if span.IsRecording() { + span.End() + } +} + +// shouldRecordRequestBody filters for search endpoints. +func (i ElasticsearchOpenTelemetry) shouldRecordRequestBody(endpoint string) bool { + // allow list of endpoints that will propagate query to OpenTelemetry. + // see https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/#call-level-attributes + var searchEndpoints = map[string]struct{}{ + "search": {}, + "async_search.submit": {}, + "msearch": {}, + "eql.search": {}, + "terms_enum": {}, + "search_template": {}, + "msearch_template": {}, + "render_search_template": {}, + } + + if i.recordBody { + if _, ok := searchEndpoints[endpoint]; ok { + return true + } + } + return false +} + +// RecordRequestBody add the db.statement attributes only for search endpoints. +// Returns a new reader if the query has been recorded, nil otherwise. +func (i ElasticsearchOpenTelemetry) RecordRequestBody(ctx context.Context, endpoint string, query io.Reader) io.ReadCloser { + if i.shouldRecordRequestBody(endpoint) == false { + return nil + } + + span := trace.SpanFromContext(ctx) + if span.IsRecording() { + buf := bytes.Buffer{} + buf.ReadFrom(query) + span.SetAttributes(attribute.String(attrDbStatement, buf.String())) + getBody := func() (io.ReadCloser, error) { + reader := buf + return io.NopCloser(&reader), nil + } + reader, _ := getBody() + return reader + } + + return nil +} + +// RecordError sets any provided error as an OTel error in the active span. +func (i ElasticsearchOpenTelemetry) RecordError(ctx context.Context, err error) { + span := trace.SpanFromContext(ctx) + if span.IsRecording() { + span.SetStatus(codes.Error, "an error happened while executing a request") + span.RecordError(err) + } +} + +// RecordPathPart sets the couple for a specific path part. +// An index placed in the path would translate to `db.elasticsearch.path_parts.index`. +func (i ElasticsearchOpenTelemetry) RecordPathPart(ctx context.Context, pathPart, value string) { + span := trace.SpanFromContext(ctx) + if span.IsRecording() { + span.SetAttributes(attribute.String(attrPathParts+pathPart, value)) + } +} + +// BeforeRequest noop for interface. +func (i ElasticsearchOpenTelemetry) BeforeRequest(req *http.Request, endpoint string) {} + +// AfterRequest enrich the span with the available data from the request. +func (i ElasticsearchOpenTelemetry) AfterRequest(req *http.Request, system, endpoint string) { + span := trace.SpanFromContext(req.Context()) + if span.IsRecording() { + span.SetAttributes( + attribute.String(attrDbSystem, system), + attribute.String(attrDbOperation, endpoint), + attribute.String(attrHttpRequestMethod, req.Method), + attribute.String(attrUrlFull, req.URL.String()), + attribute.String(attrServerAddress, req.URL.Hostname()), + ) + if value, err := strconv.ParseInt(req.URL.Port(), 10, 32); err == nil { + span.SetAttributes(attribute.Int64(attrServerPort, value)) + } + } +} + +// AfterResponse enric the span with the cluster id and node name if the query was executed on Elastic Cloud. +func (i ElasticsearchOpenTelemetry) AfterResponse(ctx context.Context, res *http.Response) { + span := trace.SpanFromContext(ctx) + if span.IsRecording() { + if id := res.Header.Get("X-Found-Handling-Cluster"); id != "" { + span.SetAttributes( + attribute.String(attrDbElasticsearchClusterName, id), + ) + } + if name := res.Header.Get("X-Found-Handling-Instance"); name != "" { + span.SetAttributes( + attribute.String(attrDbElasticsearchNodeName, name), + ) + } + } +} diff --git a/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/logger.go b/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/logger.go index 45df50cb8..f4986b6e2 100644 --- a/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/logger.go +++ b/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/logger.go @@ -239,7 +239,10 @@ func (l *CurlLogger) LogRoundTrip(req *http.Request, res *http.Response, err err } } - b.WriteString(" 'http://localhost:9200") + b.WriteString(" '") + b.WriteString(req.URL.Scheme) + b.WriteString("://") + b.WriteString(req.URL.Host) b.WriteString(req.URL.Path) b.WriteString("?pretty") if query != "" { diff --git a/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/version/version.go b/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/version/version.go index 9800dee3b..074cd6a3a 100644 --- a/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/version/version.go +++ b/vendor/github.com/elastic/elastic-transport-go/v8/elastictransport/version/version.go @@ -17,4 +17,4 @@ package version -const Transport = "8.1.0" +const Transport = "8.7.0" diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/.codecov.yml b/vendor/github.com/elastic/go-elasticsearch/v9/.codecov.yml new file mode 100644 index 000000000..e6210f886 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/.codecov.yml @@ -0,0 +1,8 @@ +comment: off + +coverage: + status: + patch: off + +ignore: + - "esapi/api.*.go" diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/.dockerignore b/vendor/github.com/elastic/go-elasticsearch/v9/.dockerignore new file mode 100644 index 000000000..4c8512b08 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/.dockerignore @@ -0,0 +1,2 @@ +.git/ +tmp/ diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/.gitignore b/vendor/github.com/elastic/go-elasticsearch/v9/.gitignore new file mode 100644 index 000000000..24cd90eb5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/.gitignore @@ -0,0 +1,5 @@ +tmp/ +*.test + +#jetBrains editors +.idea diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/CHANGELOG.md b/vendor/github.com/elastic/go-elasticsearch/v9/CHANGELOG.md new file mode 100644 index 000000000..6b448d407 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/CHANGELOG.md @@ -0,0 +1,732 @@ +# 9.1.0 + +# API + +* Updated APIs to 9.1.0 + +# Typed API + +* Update TypedAPI to latest [elasticsearch-specification 9.1](https://github.com/elastic/elasticsearch-specification/commit/907d11a) +* This release introduces a new `MethodAPI` used by the `TypedClient` which makes the client friendlier for dead code elimination. + Reducing the size of the client when only a subset of the APIs are used. The old `API` structure remains available for backward compatibility, but it is now deprecated. + +# 8.19.0 + +# API + +* Updated APIs to 8.19.0 + +# Typed API + +* Update TypedAPI to latest [elasticsearch-specification 8.19](https://github.com/elastic/elasticsearch-specification/commit/470b4b9) + +# 9.0.1 + +# API + +* Updated APIs to 9.0.4 + +# Typed API + +* Update TypedAPI to latest [elasticsearch-specification 9.0](https://github.com/elastic/elasticsearch-specification/commit/e585438) + +# 9.0.0 + +* The client now requires **Go 1.23** or later. + +## New + +* This release introduces an optional package for the `TypedAPI` named `esdsl`. + It provides a domain-specific language (DSL) for building Elasticsearch queries in Go. + The DSL is designed to simplify query construction, making it easier to build complex queries without writing raw JSON. + +```go +// create index +{ + // delete index if exists + if existsRes, err := es.Indices.Exists("test").IsSuccess(context.Background()); err != nil { + log.Println(err) + return + } else if existsRes { + if ok, _ := es.Indices.Delete("test").IsSuccess(context.Background()); !ok { + log.Fatalf("Error deleting index: %v\n", err) + } + log.Println("Index deleted:", "test") + } else { + log.Println("Index does not exist:", "test") + } + + mappings := esdsl.NewTypeMapping(). + AddProperty("name", esdsl.NewTextProperty()). + AddProperty("age", esdsl.NewIntegerNumberProperty()) + + createRes, err := es.Indices.Create("test").Mappings(mappings).Do(context.Background()) + if err != nil { + log.Println(err) + return + } + + log.Printf("Index created: %#v\n", createRes) +} + +// index document +{ + documents := []Document{ + {"Alice", 30}, + {"Bob", 25}, + {"Charlie", 35}, + } + + bulk := es.Bulk().Index("test") + for _, document := range documents { + err := bulk.IndexOp(types.IndexOperation{}, document) + if err != nil { + log.Println("Error indexing document:", err) + } + } + bulkRes, err := bulk.Refresh(refresh.Waitfor).Do(context.Background()) + if err != nil { + log.Println(err) + return + } + if bulkRes.Errors { + log.Println("Some documents failed to index") + for _, item := range bulkRes.Items { + for operationType, responseItem := range item { + if responseItem.Error != nil { + log.Println("Operation:", operationType) + log.Println("Response:", responseItem) + } + } + } + } + indexedDocs := 0 + for _, item := range bulkRes.Items { + for _, responseItem := range item { + if responseItem.Error == nil { + indexedDocs++ + } + } + } + + log.Println("Documents indexed:", indexedDocs) +} + +// calculate median age +{ + searchRes, err := es.Search(). + Index("test"). + Size(0). + AddAggregation("median_age", esdsl.NewPercentilesAggregation().Field("age").Percents(50)). + Do(context.Background()) + if err != nil { + log.Println(err) + return + } + + if agg, ok := searchRes.Aggregations["median_age"].(*types.TDigestPercentilesAggregate); ok { + if val, ok := agg.Values.(map[string]interface{})["50.0"]; ok { + log.Println("Median age:", val) + } + } +} + +// search documents +{ + matchRes, err := es.Search(). + Index("test"). + Query(esdsl.NewBoolQuery(). + Must(esdsl.NewMatchQuery("name", "Alice")). + Filter(esdsl.NewNumberRangeQuery("age").Gte(20).Lte(40))). + Sort(esdsl.NewSortOptions().AddSortOption("age", esdsl.NewFieldSort(sortorder.Asc))). + Size(10). + Do(context.Background()) + if err != nil { + log.Println(err) + return + } + if matchRes.Hits.Total.Value > 0 { + for _, hit := range matchRes.Hits.Hits { + doc := Document{} + err := json.Unmarshal(hit.Source_, &doc) + if err != nil { + log.Println("Error unmarshalling document:", err) + continue + } + log.Printf("Document ID: %s, Name: %s, Age: %d\n", *hit.Id_, doc.Name, doc.Age) + } + } else { + log.Println("No documents found") + } +} +``` + +# API + +* Updated APIs to 9.0.0 + +# Typed API + +* Update APIs to 9.0.0 ([52c473e](https://github.com/elastic/elasticsearch-specification/tree/52c473efb1fb5320a5bac12572d0b285882862fb)) + + +# 8.18.0 + +* Update `elastictransport` to `8.7.0`. +* Thanks to @zaneli, the `TypedClient` can now be used in the `BulkIndexer`. + +# New + +* This release adds a `BaseClient` constructor with no attached APIs, allowing it to be used purely as a transport layer instead of a full-featured API client. + +```go +baseClient, err := elasticsearch.NewBaseClient(elasticsearch.Config{ + Addresses: []string{ + "http://localhost:9200", + }, +}) + +if err != nil { + log.Println(err) + return +} + +res, err := esapi.InfoRequest{ + Pretty: false, + Human: false, + ErrorTrace: false, + FilterPath: nil, + Header: nil, + Instrument: baseClient.InstrumentationEnabled(), +}.Do(context.Background(), baseClient) + +if err != nil { + log.Println(err) + return +} +defer res.Body.Close() +if res.IsError() { + log.Println("Error response:", res) + return +} +var infoMap map[string]interface{} +if err := json.NewDecoder(res.Body).Decode(&infoMap); err != nil { + log.Println("Error parsing response:", err) + return +} +log.Printf("Elasticsearch version esapi: %s\n", infoMap["version"].(map[string]interface{})["number"]) + +typedRes, err := info.New(baseClient).Do(context.Background()) +if err != nil { + log.Println(err) + return +} +log.Printf("Elasticsearch version typedapi: %s\n", typedRes.Version.Int) +``` + +# API + +* Updated APIs to 8.18.0 + +# Typed API + +* Update APIs to 8.18.0 ([f6a370d](https://github.com/elastic/elasticsearch-specification/tree/f6a370d0fba975752c644fc730f7c45610e28f36)) + +# 8.17.1 + +* Update elastictransport to 8.6.1 + +Thanks to @AkisAya and @jmfrees for their contributions! + +# 8.17.0 + +* Expose BulkIndexer total flushed bytes metric [#914](https://github.com/elastic/go-elasticsearch/pull/914) thanks to @aureleoules + +# API + +Updated APIs to 8.17.0 + +# Typed API + +Update APIs to latest [elasticsearch-specification 8.17](https://github.com/elastic/elasticsearch-specification/tree/2f823ff6fcaa7f3f0f9b990dc90512d8901e5d64) + +# 8.16.0 + +# API + +* `InferenceStreamInference` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/stream-inference-api.html) +* `QueryRulesTest` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/test-query-ruleset.html) +* `Ingest.DeleteIPLocationDatabase` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-ip-location-database-api.html) +* `Ingest.GetIPLocationDatabase` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ip-location-database-api.html) +* `Ingest.PutIPLocationDatabase` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/put-ip-location-database-api.html) + +# Typed API + +Update APIs to latest [elasticsearch-specification 8.16](https://github.com/elastic/elasticsearch-specification/tree/4fcf747dfafc951e1dcf3077327e3dcee9107db3) + +# 8.15.0 + +# API + +* API is generated from the Elasticsearch 8.15.0 specification. + +# Typed API + +Update APIs to latest [elasticsearch-specification 8.15](https://github.com/elastic/elasticsearch-specification/tree/19027dbdd366978ccae41842a040a636730e7c10) + +# 8.14.0 + +# API + +New APIs: + + * ConnectorUpdateActiveFiltering [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html) + * ConnectorUpdateFilteringValidation [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html) + * TextStructureFindFieldStructure [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/find-field-structure.html) + * TextStructureFindMessageStructure [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/find-message-structure.html) + +# Typed API + +New APIs: + + * UpdateTrainedModelDeployment [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/update-trained-model-deployment.html) + +# Transport + * Fixed a deadlock in the connection pool https://github.com/elastic/elastic-transport-go/issues/20 + +# 8.13.1 + +# Typed API + +Update APIs to latest [elasticsearch-specification 8.13](https://github.com/elastic/elasticsearch-specification/tree/5fb8f1ce9c4605abcaa44aa0f17dbfc60497a757) + +## Fixes + +This patch release brings a fix to the initialisation of the `Request` in endpoints which would prevent using the shortcuts for fields. +Canonical`.Request()` method was unaffected. + +* `Autoscaling.PutAutoscalingPolicy` +* `Indices.Downsample` +* `Indices.PutSettings` +* `Indices.SimulateTemplate` +* `Inference.PutModel` +* `Logstash.PutPipeline` +* `Ml.ValidateDetector` +* `SearchApplication.Put` + +# 8.13.0 + +# API + +New APIS: + +* `ConnectorSecretGet` +* `ConnectorSecretPost` +* `ConnectorSecretPut` +* `ConnectorSecretDelete` +* `ConnectorUpdateIndexName` +* `ConnectorUpdateNative` +* `ConnectorUpdateStatus` +* `ConnectorUpdateAPIKeyDocumentID` +* `ConnectorUpdateServiceDocumentType` + + +* `EsqlAsyncQuery` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-api.html) +* `EsqlAsyncQueryGet` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-get-api.html) +* `ProfilingFlamegraph` [Documentation](https://www.elastic.co/guide/en/observability/current/universal-profiling.html) +* `ProfilingStacktraces` [Documentation](https://www.elastic.co/guide/en/observability/current/universal-profiling.html) +* `TextStructureTestGrokPattern` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/test-grok-pattern.html) +* `Indices.ResolveCluster` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html) +* `Security.QueryUser` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-user.html) + +# Typed API + +* `indices.ResolveCluster` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html) +* `textstructure.TestGrokPattern` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/test-grok-pattern.html) + +Thanks to @pakio, transport now has an optional pool based compression option. [link](https://github.com/elastic/elastic-transport-go/pull/19) + +And to @tblyler for fixing a very subtle memory leak in the `BulkIndexer`. #797 + +# 8.12.1 + +* Fix: ticker memory leak in bulk indexer due to internal flush call resetting the ticker. #797 +* Fix: Scroll now uses the body to pass the scroll_id. #785 +* Add: generated UnmarshalJSON for Requests to allow injecting payloads using aliases. +* Fix: `put_synonym_rule` was not working due to a type issue in the [Elasticsearch API Specification](https://github.com/elastic/elasticsearch-specification/pull/2407). + +# 8.12.0 + +## Client + +### Golang version + +The client now requires Golang version 1.20 + +### OpenTelemetry + +The client now provides OpenTelemetry integration. This integration can be enabled in the config using the `elasticsearch.NewOpenTelemetryInstrumentation`. +Once set up, the provided `context` will be used to record spans with useful information about the request being made to the server. + +More about what you can expect in the [Semantic Conventions for Elasticsearch](https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/). + +### BulkIndexer + +`if_seq_no` & `if_primary_term` are now supported thanks to @benjyiw [#783](https://github.com/elastic/go-elasticsearch/pull/783) + +## API + +* `SimulateIngest` +* `ConnectorCheckIn` +* `ConnectorDelete` +* `ConnectorGet` +* `ConnectorLastSync` +* `ConnectorList` +* `ConnectorPost` +* `ConnectorPut` +* `ConnectorSyncJobCancel` +* `ConnectorSyncJobCheckIn` +* `ConnectorSyncJobDelete` +* `ConnectorSyncJobError` +* `ConnectorSyncJobGet` +* `ConnectorSyncJobList` +* `ConnectorSyncJobPost` +* `ConnectorSyncJobUpdateStats` +* `ConnectorUpdateConfiguration` +* `ConnectorUpdateError` +* `ConnectorUpdateFiltering` +* `ConnectorUpdateName` +* `ConnectorUpdatePipeline` +* `ConnectorUpdateScheduling` + +## Typed API + +* `Esql.Query` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-rest.html) +* `Fleet.PostSecret` +* `Inference` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html) + * `DeleteModel` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-inference-api.html) + * `GetModel` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-inference-api.html) + * `Inference` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/post-inference-api.html) + * `PutModel` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/put-inference-api.html) +* `SearchApplication` + * `GetSettings` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-settings.html) + * `UpdateSettings` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-settings.html) + +# 8.11.1 + +## Typed API + +* Fix https://github.com/elastic/go-elasticsearch/issues/756 preventing from settings indices in `indices.PutSettings` + +# 8.11.0 + +## API + +**Experimental APIs** + +* `EsqlQuery` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-query-api.html) +* `InferenceDeleteModel` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html) +* `InferenceGetModel` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/get-inference-api.html) +* `InferenceInference` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html) +* `InferencePutModel` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html) + +## Typed API + +* Mandatory URL parameters are not exposed as functions anymore as they already exist in the constructor. + +# New Compatibility Policy + +Starting from version `8.12.0`, this library follow the Go language [policy](https://go.dev/doc/devel/release#policy). Each major Go release is supported until there are two newer major releases. For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +If you have any questions or concerns, please do not hesitate to reach out to us. + +# 8.10.1 + +## Typed API + +Update APIs to latest [elasticsearch-specification 8.10](https://github.com/elastic/elasticsearch-specification/commit/3b09f9d8e90178243f8a340a7bc324aab152c602) + +# 8.10.0 + +## API +**Experimental APIs for internal use** +* `FleetDeleteSecret` +* `FleetGetSecret` +* `FleetPostSecret` + +**Exprimental APIs** + +`QueryRulesetList` + +**Stable APIs** + +`Security.GetSettings` +`Security.UpdateSettings` + +## Typed API +**Exprimental APIs** + +`QueryRuleset.List` + +**Technical Preview** +* [QueryRuleSet](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-rules-apis.html) + +**Beta** +* [Synonyms](https://www.elastic.co/guide/en/elasticsearch/reference/current/synonyms-apis.html) + +# 8.9.0 + +## API +**New API** + +* `Cluster.Info` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-info.html) + +**Experimental APIs** + +* `QueryRulesetGet` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-ruleset.html) +* `QueryRulesetDelete` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-ruleset.html) +* `QueryRulesetPut` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-ruleset.html) +* `SearchApplicationRenderQuery` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-render-query.html) +* `Security.CreateCrossClusterAPIKey` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-cross-cluster-api-key.html) +* `Security.UpdateCrossClusterAPIKey` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-cross-cluster-api-key.html) + +## Typed API + +* Propagated request fields towards the endpoint for ease of access, taking priority over same-name query string fields. +* Added a stub for Do methods on endpoints that only support a boolean response such as `core.exists`. +* NDJSON endpoints support with custom serialization like `core.bulk`. +* Link to endpoints documentation in API index to better display and ease of use. + +**fixes** + +* Fixed a deserialization issue for `Property` & `Analyzer` #696 + +# 8.8.2 + +## Typed API + +* Fixed deserialization for `Suggest` in search responses. +* Fixed double-quoted strings in deserialization for unions normalized as string. #684 +* Fixed handling of `core.Get` response when the index did not exist. #678 + +# 8.7.0 + +## API + +* `ML.DeleteJob`: Added `WithDeleteUserAnnotations`. Should annotations added by the user be deleted. +* `ML.ResetJob`: Added `WithDeleteUserAnnotations`. Should annotations added by the user be deleted. +* `ML.StartTrainedModelDeployment`: Added `WithPriority`. The deployment priority. +* `TransformGetTransformStats`: Added `WithTimeout`. Controls the time to wait for the stats. +* `TransformStartTransform`: Added `WithFrom`. Restricts the set of transformed entities to those changed after this time. + +**New APIs** + +`TransformScheduleNowTransform` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.7/schedule-now-transform.html). +`HealthReport` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.7/health-api.html). + +## Typed API + +* Inclusion of responses structures. + +**Changes** + +* `Do` method on endpoints now return a typed response, one per endpoint. +* `Perform` method added on endpoints, returns `http.Response` as did `Do`. +* Elasticsearch exceptions are now handled as `types.ElasticsearchError` with `.As` and `.Is` methods. +* `.Raw` now takes a reader as input. +* User defined values such as `_source` in `Hits` are now `json.RawMessage` to highlight they later deserializable nature. + +# 8.6.0 + +## API + +* `ML.StartTrainedModelDeployment`: Added `WithPriority` + +**New APIs** +* `ML.UpdateTrainedModelDeployment`: Updates certain properties of trained model deployment. + + +## Client +**BulkIndexer** + +Improvements were made to the BulkIndexer memory usage to allow better handling under burst use cases. Thanks to @christos68k and @rockdaboot ! + +# 8.5.0 + +## API + +* `ML.StartTrainedModelDeployment`: Description of `NumberOfAllocations` has been changed in "The total number of allocations this model is assigned across machine learning nodes". +* `Security.GetAPIKey`: Added `WithLimitedBy` boolean parameter. Flag to show the limited-by role descriptors of API Keys. +* `Security.GetUser`: Added `WithProfileUID` boolean parameter. Flag to retrieve profile uid (if exists) associated to the user. +* `Security.GetUserProfile`: Changed the description of uid parameter, a comma-separated list of unique identifier for user profiles. +* `Security.QueryAPIKeys`: Added `WithLimitedBy` boolean parameter. Flag to show the limited-by role descriptors of API Keys. +* `TextStructureFindStructure`: Added `EcsCompatibility` string parameter. Optional parameter to specify the compatibility mode with ECS Grok patterns - may be either 'v1' or 'disabled'. + +**Promoted to stable** + +* `ML.InferTrainedModel` +* `ML.PutTrainedModelDefinitionPart` +* `ML.PutTrainedModelVocabulary` +* `ML.StartTrainedModelDeployment` +* `ML.StopTrainedModelDeployment` +* `Security.activateUserProfile` +* `Security.DisableUserProfile` +* `Security.EnableUserProfile` +* `Security.GetUserProfile` +* `Security.HasPrivilegesUserProfile` +* `Security.SuggestUserProfiles` +* `Security.UpdateUserProfileData` + +**New APIs** + +* `ML.ClearTrainedModelDeploymentCache` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-trained-model-deployment-cache.html). +* `Security.BulkUpdateAPIKeys` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-update-api-keys.html). +* `Indices.Downsample` (Experimental API) [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-rollup.html) + +## Typed API + +Following multiple feedbacks we decided to remove the builder API for the type tree. + +In its place, work has started to further simplify the type tree by removing redundant type aliases. The API also now comes with a helper package named `some` that allows to call for inline pointers on primitive types. + +In addition, a bug was fixed preventing the use of wildcards in index names, and enums are now extensible by default. + +The Typed API remains in `alpha` stage while its development continues. + +# 8.4.0 + +## API + +* `get`, `mget` and `search` added `force_synthetic_source`: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. +* `ML.StartTrainedModelDeployment` added `cache_size`: A byte-size value for configuring the inference cache size. For example, 20mb. +* `Snapshot.Get` added `sort`, `size`, `order`, `from_sort_value`, `after`, `offset` and `slm_policy_filter`. More on these in the [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.4/get-snapshot-api.html). + +**New API** + +* `Security.UpdateAPIKey` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.4/security-api-update-api-key.html). + +## Typed API + +As highlighted in the release not for the 8.4.0-alpha.1, this release marks the beginning of the newly arrived `TypedClient`. + +This new API is still in `alpha` stage and will be release alongside the existing `esapi`. + +A few examples of standard use-cases can be found in the [TypedAPI section of the documentation](https://www.elastic.co/guide/en/elasticsearch/client/go-api/master/typedapi.html). + +# 8.4.0-alpha.2 + +This second prerelease of the 8.4.0 updates the API for the client and fixes the serialization for types using [additional properties](https://github.com/elastic/elasticsearch-specification/blob/main/docs/behaviors.md#additionalproperties--additionalproperty). + +# 8.4.0-alpha.1 + +This prerelease introduces a new typed API generated from the [elasticsearch-specification](https://github.com/elastic/elasticsearch-specification). This generation from the common specification allows us to provide a complete API which uses an exhaustive hierarchy of types reflecting the possibilities given by Elasticsearch. + +This new API is the next iteration of the Go client for Elasticsearch, it now lives alongside the existing API, it is in `alpha` state and will gain features over time and releases. + +## What's new + +The `TypedClient` is built around a fluent builder for easier request creation and a collection of structures and helpers that mimics as closely as possible the Elasticsearch JSON API. + +As a first example, here is a search request: +```go +cfg := elasticsearch.Config{ +// Define your configuration +} +es, _ := elasticsearch.NewTypedClient(cfg) +res, err := es.Search(). +Index("index_name"). +Request(&search.Request{ +Query: &types.QueryContainer{ +Match: map[types.Field]types.MatchQuery{ +"name": {Query: "Foo"}, +}, +}, +}, +).Do(context.Background()) +``` + +The `Request` uses the structures found in the `typedapi/types` package which will lead you along the possibilities. A builder for each structure that allows easier access and declaration is also provided. + +More on the specifics and a few examples of standard use-cases can be found in the [TypedAPI section of the documentation](https://www.elastic.co/guide/en/elasticsearch/client/go-api/master/typedapi.html). + +## Limitations + +While most of the endpoints are covered, a few points are still being worked on and will be part of future releases: + +* NDJSON endpoints: `bulk`, `msearch`, `msearch_template`, `ML.post_data`, `find_structure`, to name a few. +* Response and Errors structures with deserialization. + +## Transport & config + +While being different, the new API uses all the existing layers that were built so far, `elastic-transport-go` remains the preferred transport and all your configuration and credentials applies, same as before. + +## Feedback + +Feedback is very welcome, play with it, use it, let us know what you think! + +# 8.3.0 + +## API + +* `ML.InferTrainedModelDeployment` renamed to `InferTrainedModel` +* `ML.PreviewDatafeed` has two new parameters, `start` and `end`. [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.3/ml-preview-datafeed.html) +* `ML.StartTrainedModelDeployment` has three new parameters, `number_of_allocations`, `threads_per_allocation` and `queue_capacity`. [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-model-deployment.html) +* `Cluster.DeleteVotingConfigExclusions` has a new `master_timeout` parameter. +* `Cluster.PostVotingConfigExclusions` has a new `master_timeout` parameter. +* `Snapshot.Get` has a new `index_names` parameters (boolean). Whether to include the name of each index in the snapshot. Defaults to true. + +**New APIs** + +* `Security.HasPrivilegesUserProfile` (Experimental API) [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges-user-profile.html) + +# 8.2.0 +## Client + +* Fixed a serialisation error for `retry_on_conflict` in the BulkIndexer. Thanks to @lpflpf for the help! +* Fixed a concurrent map error in the BulkIndexer when custom headers are applied. Thanks to @chzhuo for the contribution! + +## API + +**New APIs** + +* `Cat.ComponentTemplates` +* `ML.GetMemoryStats` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-memory.html) + +* `Security.activateUserProfile` (Experimental API) [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-activate-user-profile.html) +* `Security.disableUserProfile` (Experimental API) [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-disable-user-profile.html) +* `Security.enableUserProfile` (Experimental API) [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-enable-user-profile.html) +* `Security.getUserProfile` (Experimental API) [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-profile.html) +* `Security.suggestUserProfiles` (Experimental API) [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-suggest-user-profile.html) +* `Security.updateUserProfileData` (Experimental API) [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-user-profile-data.html) + +# 8.1.0 +## API + +* API is generated from the Elasticsearch 8.1.0 specification. + +**New parameters** + +* `WithWaitForCompletion` for `Indices.Forcemerge` +* `WithFeatures` for `Indices.Get` +* `WithForce` for `ML.DeleteTrainedModel` + +**New APIs** + +* `OidcAuthenticate`, `OidcLogout` and `OidcPrepareAuthentication` [see documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api.html#security-openid-apis) +* `TransformResetTransform` + +# 8.0.0 +## Client + +* The client now uses `elastic-transport-go` dependency which lives in its [own repository](https://github.com/elastic/elastic-transport-go/). +* With the knewly extracted transport, the `retryOnTimeout` has been replaced with a `retryOnError` callback. This allows to select more finely which error should be retried by the client. +* `BulkIndexerItem` `Body` field is now an `io.ReadSeeker` allowing reread without increasing memory consumption. +* `BulkIndexerItem` know correctly uses the `routing` property instead of the deprecated `_routing`. + +## API + +* API is generated from the Elasticsearch 8.0.0 specification. + + + + diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/CODE_OF_CONDUCT.md b/vendor/github.com/elastic/go-elasticsearch/v9/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..d6bad2212 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +303 See Other + +Location: https://www.elastic.co/community/codeofconduct \ No newline at end of file diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/CONTRIBUTING.md b/vendor/github.com/elastic/go-elasticsearch/v9/CONTRIBUTING.md new file mode 100644 index 000000000..8254a5442 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# Contributing + +The Elasticsearch Go client is open source and we love to receive contributions from our community. + +There are many ways to contribute, from writing tutorials or blog posts, improving the documentation, submitting bug reports and feature requests or writing code. + +## Sign the Contributor License Agreement + +We do ask that you sign the [Contiributor License Agreement](https://www.elastic.co/contributor-agreement) +before we can accept pull requests from you. + +## Development + +### Prerequisites + +You need at least Go 1.21 to build the project. + +Docker is used for some tests to spawn an Elasticsearch server. + +### Project structure + +The code for each API lives in individual files in the `esapi` subfolder, this code is entirely generated by the subproject located in the `internal/build` folder. + + + +### Before you open a PR + +Please run the tests locally with `make test` and make sure everything is in order. + +If your changes address the generator, you can run the integration with the following command: + +`TEST_SUITE=free STACK_VERSION=8.0.0-SNAPSHOT WORKSPACE=/tmp/workspace/$STACK_VERSION .ci/run-tests` + +TEST_SUITE can either be `free` or `platinum`, STACK_VERSION reflects the targeted Elasticsearch version of the client. \ No newline at end of file diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/LICENSE b/vendor/github.com/elastic/go-elasticsearch/v9/LICENSE new file mode 100644 index 000000000..b1a731fb5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch BV + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/Makefile b/vendor/github.com/elastic/go-elasticsearch/v9/Makefile new file mode 100644 index 000000000..2b2b924f2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/Makefile @@ -0,0 +1,487 @@ +SHELL := /bin/bash + +ELASTICSEARCH_DEFAULT_BUILD_VERSION = "9.1.0-SNAPSHOT" + +##@ Test +test-unit: ## Run unit tests + @printf "\033[2m→ Running unit tests...\033[0m\n" +ifdef race + $(eval testunitargs += "-race") +endif +ifeq ($(OS),Windows_NT) + $(eval testunitargs += "./...") +else + $(eval testunitargs += "-cover" "-coverprofile=tmp/unit.cov" "./...") +endif + @mkdir -p tmp + @if which gotestsum > /dev/null 2>&1 ; then \ + echo "gotestsum --format=short-verbose --junitfile=tmp/unit-report.xml --" $(testunitargs); \ + gotestsum --format=short-verbose --junitfile=tmp/unit-report.xml -- $(testunitargs); \ + else \ + echo "go test -v" $(testunitargs); \ + go test -v $(testunitargs); \ + fi; +test: test-unit + +test-integ: ## Run integration tests + @mkdir -p tmp + @printf "\033[2m→ Running integration tests...\033[0m\n" + $(eval testintegtags += "integration") +ifdef multinode + $(eval testintegtags += "multinode") +endif +ifdef race + $(eval testintegargs += "-race") +endif + $(eval testintegargs += "-cover" "-coverpkg=github.com/elastic/go-elasticsearch/v9,github.com/elastic/go-elasticsearch/v9/esutil,github.com/elastic/go-elasticsearch/v9/typedapi" "-coverprofile=$(PWD)/tmp/integration-client.cov" "-tags='$(testintegtags)'" "-timeout=1h") + @if which gotestsum > /dev/null 2>&1 ; then \ + cd internal/testing/e2e; \ + echo "gotestsum --format=short-verbose --junitfile=$(PWD)/tmp/integration-report.xml --" $(testintegargs); \ + gotestsum --format=short-verbose --junitfile=$(PWD)/tmp/integration-report.xml -- $(testintegargs); \ + else \ + cd internal/testing/e2e; \ + echo "go test -v -count=1" $(testintegargs); \ + go test -v -count=1 $(testintegargs); \ + fi; + +test-api: ## Run generated API integration tests + @mkdir -p tmp +ifdef race + $(eval testapiargs += "-race") +endif + $(eval testapiargs += "-cover" "-coverpkg=github.com/elastic/go-elasticsearch/v9/esapi" "-coverprofile=$(PWD)/tmp/integration-api.cov" "-tags='integration'" "-timeout=1h") +ifdef flavor +else + $(eval flavor='free') +endif + @printf "\033[2m→ Running API integration tests for [$(flavor)]...\033[0m\n" +ifeq ($(flavor), platinum) + @{ \ + set -e ; \ + trap "test -d .git && git checkout --quiet $(PWD)/esapi/test/go.mod" INT TERM EXIT; \ + export ELASTICSEARCH_URL='https://elastic:elastic@localhost:9200' && \ + if which gotestsum > /dev/null 2>&1 ; then \ + cd esapi/test && \ + go mod download && \ + gotestsum --format=short-verbose --junitfile=$(PWD)/tmp/integration-api-report.xml -- $(testapiargs) $(PWD)/esapi/test/xpack/*_test.go && \ + gotestsum --format=short-verbose --junitfile=$(PWD)/tmp/integration-api-report.xml -- $(testapiargs) $(PWD)/esapi/test/xpack/ml/*_test.go && \ + gotestsum --format=short-verbose --junitfile=$(PWD)/tmp/integration-api-report.xml -- $(testapiargs) $(PWD)/esapi/test/xpack/ml-crud/*_test.go; \ + else \ + echo "go test -v" $(testapiargs); \ + cd esapi/test && \ + go mod download && \ + go test -v $(testapiargs) $(PWD)/esapi/test/xpack/*_test.go && \ + go test -v $(testapiargs) $(PWD)/esapi/test/xpack/ml/*_test.go && \ + go test -v $(testapiargs) $(PWD)/esapi/test/xpack/ml-crud/*_test.go; \ + fi; \ + } +else + $(eval testapiargs += $(PWD)/esapi/test/*_test.go) + { \ + set -e ; \ + trap "test -d .git && git checkout --quiet $(PWD)/esapi/test/go.mod" INT TERM EXIT; \ + if which gotestsum > /dev/null 2>&1 ; then \ + cd esapi/test && \ + go mod download && \ + gotestsum --format=short-verbose --junitfile=$(PWD)/tmp/integration-api-report.xml -- $(testapiargs); \ + else \ + echo "go test -v" $(testapiargs); \ + cd esapi/test && \ + go mod download && \ + go test -v $(testapiargs); \ + fi; \ + } +endif + +test-bench: ## Run benchmarks + @printf "\033[2m→ Running benchmarks...\033[0m\n" + go test -run=none -bench=. -benchmem ./... + +test-examples: ## Execute the _examples + @printf "\033[2m→ Testing the examples...\033[0m\n" + @{ \ + set -e ; \ + for d in _examples/*/; do \ + printf "\033[2m────────────────────────────────────────────────────────────────────────────────\n"; \ + printf "\033[1mUpdating dependencies for $$d\033[0m\n"; \ + printf "\033[2m────────────────────────────────────────────────────────────────────────────────\033[0m\n"; \ + (cd $$d && go mod download all && make setup test) || \ + ( \ + printf "\033[31m────────────────────────────────────────────────────────────────────────────────\033[0m\n"; \ + printf "\033[31;1m⨯ ERROR\033[0m\n"; \ + false; \ + ); \ + done; \ + \ + ( \ + cd _examples; \ + for f in *.go; do \ + printf "\033[2m────────────────────────────────────────────────────────────────────────────────\n"; \ + printf "\033[1m$$f\033[0m\n"; \ + printf "\033[2m────────────────────────────────────────────────────────────────────────────────\033[0m\n"; \ + (export ELASTICSEARCH_URL=http://elastic:elastic@localhost:9200 && go run $$f && true) || \ + ( \ + printf "\033[31m────────────────────────────────────────────────────────────────────────────────\033[0m\n"; \ + printf "\033[31;1m⨯ ERROR\033[0m\n"; \ + false; \ + ); \ + done; \ + );\ +# \ +# for f in _examples/*/; do \ +# printf "\033[2m────────────────────────────────────────────────────────────────────────────────\033[0m\n"; \ +# printf "\033[1m$$f\033[0m\n"; \ +# printf "\033[2m────────────────────────────────────────────────────────────────────────────────\033[0m\n"; \ +# (cd $$f && make test && true) || \ +# ( \ +# printf "\033[31m────────────────────────────────────────────────────────────────────────────────\033[0m\n"; \ +# printf "\033[31;1m⨯ ERROR\033[0m\n"; \ +# false; \ +# ); \ +# done; \ + printf "\033[32m────────────────────────────────────────────────────────────────────────────────\033[0m\n"; \ + \ + printf "\033[32;1mSUCCESS\033[0m\n"; \ + } + +test-coverage: ## Generate test coverage report + @printf "\033[2m→ Generating test coverage report...\033[0m\n" + @go tool cover -html=tmp/unit.cov -o tmp/coverage.html + @go tool cover -func=tmp/unit.cov | 'grep' -v 'esapi/api\.' | sed 's/github.com\/elastic\/go-elasticsearch\///g' + @printf "\033[0m--------------------------------------------------------------------------------\nopen tmp/coverage.html\n\n\033[0m" + +##@ Development +lint: ## Run lint on the package + @printf "\033[2m→ Running lint...\033[0m\n" + go vet github.com/elastic/go-elasticsearch/... + @{ \ + set -e ; \ + trap "test -d ../../../.git && git checkout --quiet go.mod" INT TERM EXIT; \ + echo "cd internal/build/ && go vet ./..."; \ + cd "internal/build/" && go mod tidy && go mod download && go vet ./...; \ + } + + +apidiff: ## Display API incompabilities + @if ! command -v apidiff > /dev/null; then \ + printf "\033[31;1mERROR: apidiff not installed\033[0m\n"; \ + printf "go get -u github.com/go-modules-by-example/apidiff\n"; \ + printf "\033[2m→ https://github.com/go-modules-by-example/index/blob/master/019_apidiff/README.md\033[0m\n\n"; \ + false; \ + fi; + @rm -rf tmp/apidiff-OLD tmp/apidiff-NEW + @git clone --quiet --local .git/ tmp/apidiff-OLD + @mkdir -p tmp/apidiff-NEW + @tar -c --exclude .git --exclude tmp --exclude cmd . | tar -x -C tmp/apidiff-NEW + @printf "\033[2m→ Running apidiff...\033[0m\n" + @pritnf "tmp/apidiff-OLD/esapi tmp/apidiff-NEW/esapi\n" + @{ \ + set -e ; \ + output=$$(apidiff tmp/apidiff-OLD/esapi tmp/apidiff-NEW/esapi); \ + printf "\n$$output\n\n"; \ + if echo $$output | grep -i -e 'incompatible' - > /dev/null 2>&1; then \ + printf "\n\033[31;1mFAILURE\033[0m\n\n"; \ + false; \ + else \ + printf "\033[32;1mSUCCESS\033[0m\n"; \ + fi; \ + } + +backport: ## Backport one or more commits from main into version branches +ifeq ($(origin commits), undefined) + @echo "Missing commit(s), exiting..." + @exit 2 +endif +ifndef branches + $(eval branches_list = '7.16' '6.x' '5.x') +else + $(eval branches_list = $(shell echo $(branches) | tr ',' ' ') ) +endif + $(eval commits_list = $(shell echo $(commits) | tr ',' ' ')) + @printf "\033[2m→ Backporting commits [$(commits)]\033[0m\n" + @{ \ + set -e -o pipefail; \ + for commit in $(commits_list); do \ + git show --pretty='%h | %s' --no-patch $$commit; \ + done; \ + echo ""; \ + for branch in $(branches_list); do \ + printf "\033[2m→ $$branch\033[0m\n"; \ + git checkout $$branch; \ + for commit in $(commits_list); do \ + git cherry-pick -x $$commit; \ + done; \ + git status --short --branch; \ + echo ""; \ + done; \ + printf "\033[2m→ Push updates to Github:\033[0m\n"; \ + for branch in $(branches_list); do \ + echo "git push --verbose origin $$branch"; \ + done; \ + } + +release: ## Release a new version to Github + $(eval branch = $(shell git rev-parse --abbrev-ref HEAD)) + $(eval current_version = $(shell cat internal/version/version.go | sed -Ee 's/const Client = "(.*)"/\1/' | tail -1)) + @printf "\033[2m→ [$(branch)] Current version: $(current_version)...\033[0m\n" +ifndef version + @printf "\033[31m[!] Missing version argument, exiting...\033[0m\n" + @exit 2 +endif +ifeq ($(version), "") + @printf "\033[31m[!] Empty version argument, exiting...\033[0m\n" + @exit 2 +endif + @printf "\033[2m→ [$(branch)] Creating version $(version)...\033[0m\n" + @{ \ + set -e -o pipefail; \ + cp internal/version/version.go internal/version/version.go.OLD && \ + cat internal/version/version.go.OLD | sed -e 's/Client = ".*"/Client = "$(version)"/' > internal/version/version.go && \ + go vet internal/version/version.go && \ + go fmt internal/version/version.go && \ + git diff --color-words internal/version/version.go | tail -n 1; \ + } + @{ \ + set -e -o pipefail; \ + printf "\033[2m→ Commit and create Git tag? (y/n): \033[0m\c"; \ + read continue; \ + if [[ $$continue == "y" ]]; then \ + git add internal/version/version.go && \ + git commit --no-status --quiet --message "Release $(version)" && \ + git tag --annotate v$(version) --message 'Release $(version)'; \ + printf "\033[2m→ Push `git show --pretty='%h (%s)' --no-patch HEAD` to Github:\033[0m\n\n"; \ + printf "\033[1m git push origin HEAD && git push origin v$(version)\033[0m\n\n"; \ + mv internal/version/version.go.OLD internal/version/version.go && \ + git add internal/version/version.go && \ + original_version=`cat internal/version/version.go | sed -ne 's;^const Client = "\(.*\)"$$;\1;p'` && \ + git commit --no-status --quiet --message "Update version to $$original_version"; \ + printf "\033[2m→ Version updated to [$$original_version].\033[0m\n\n"; \ + else \ + echo "Aborting..."; \ + rm internal/version/version.go.OLD; \ + exit 1; \ + fi; \ + } + +godoc: ## Display documentation for the package + @printf "\033[2m→ Generating documentation...\033[0m\n" + @echo "* http://localhost:6060/pkg/github.com/elastic/go-elasticsearch/v9" + @echo "* http://localhost:6060/pkg/github.com/elastic/go-elasticsearch/v9/esapi" + @echo "* http://localhost:6060/pkg/github.com/elastic/go-elasticsearch/v9/esutil" + @printf "\n" + godoc --http=localhost:6060 --play + +cluster: ## Launch an Elasticsearch cluster with Docker + $(eval flavor ?= "core") + $(eval elasticsearch_url = "http://es1:9200") + +ifdef ELASTICSEARCH_BUILD_VERSION + $(eval version ?= "elasticsearch:"${ELASTICSEARCH_BUILD_VERSION}) +else + $(eval version ?= "elasticsearch:"${ELASTICSEARCH_DEFAULT_BUILD_VERSION}) +endif + +ifeq ($(origin nodes), undefined) + $(eval nodes = 1) +endif + @printf "\033[2m→ Launching %d node(s) of %s...\033[0m\n" $(nodes) $(version) +ifeq ($(shell test $(nodes) && test $(nodes) -gt 1; echo $$?),0) + $(eval detach ?= "true") +else + $(eval detach ?= "false") +endif + + $(eval elasticsearch_url = "http://elastic:elastic@es1:9200") +ifeq ($(flavor), platinum) + $(eval elasticsearch_url = "https://elastic:elastic@es1:9200") + $(eval xpack_env += --env "ELASTIC_PASSWORD=elastic") + $(eval xpack_env += --env "xpack.license.self_generated.type=trial") + $(eval xpack_env += --env "xpack.security.enabled=true") + $(eval xpack_env += --env "xpack.security.http.ssl.enabled=true") + $(eval xpack_env += --env "xpack.security.http.ssl.key=certs/testnode.key") + $(eval xpack_env += --env "xpack.security.http.ssl.certificate=certs/testnode.crt") + $(eval xpack_env += --env "xpack.security.http.ssl.certificate_authorities=certs/ca.crt") + $(eval xpack_env += --env "xpack.security.transport.ssl.enabled=true") + $(eval xpack_env += --env "xpack.security.transport.ssl.verification_mode=certificate") + $(eval xpack_env += --env "xpack.security.transport.ssl.key=certs/testnode.key") + $(eval xpack_env += --env "xpack.security.transport.ssl.certificate=certs/testnode.crt") + $(eval xpack_env += --env "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt") + $(eval xpack_volumes += --volume "$(PWD)/.ci/certs/testnode.crt:/usr/share/elasticsearch/config/certs/testnode.crt") + $(eval xpack_volumes += --volume "$(PWD)/.ci/certs/testnode.key:/usr/share/elasticsearch/config/certs/testnode.key") + $(eval xpack_volumes += --volume "$(PWD)/.ci/certs/ca.crt:/usr/share/elasticsearch/config/certs/ca.crt") +endif + @docker network inspect elasticsearch > /dev/null 2>&1 || docker network create elasticsearch; + @{ \ + for n in `seq 1 $(nodes)`; do \ + if [[ -z "$$port" ]]; then \ + hostport=$$((9199+$$n)); \ + else \ + hostport=$$port; \ + fi; \ + docker run \ + --name "es$$n" \ + --network elasticsearch \ + --env "ELASTIC_PASSWORD=elastic" \ + --env "node.name=es$$n" \ + --env "cluster.name=go-elasticsearch" \ + --env "cluster.initial_master_nodes=es1" \ + --env "discovery.seed_hosts=es1" \ + --env "cluster.routing.allocation.disk.threshold_enabled=false" \ + --env "bootstrap.memory_lock=true" \ + --env "node.attr.testattr=test" \ + --env "path.repo=/tmp" \ + --env "repositories.url.allowed_urls=http://snapshot.test*" \ + --env "xpack.security.enabled=false" \ + --env ES_JAVA_OPTS="-Xms1g -Xmx1g" \ + $(xpack_env) \ + --volume `echo $(version) | tr -C "[:alnum:]" '-'`-node-$$n-data:/usr/share/elasticsearch/data \ + $(xpack_volumes) \ + --publish $$hostport:9200 \ + --ulimit nofile=65536:65536 \ + --ulimit memlock=-1:-1 \ + --detach=$(detach) \ + --rm \ + docker.elastic.co/elasticsearch/$(version); \ + done \ + } +ifdef detach + @{ \ + printf "\033[2m→ Waiting for the cluster on $(elasticsearch_url)...\033[0m\n"; \ + docker run --network elasticsearch --rm appropriate/curl --max-time 120 --retry 120 --retry-delay 1 --retry-connrefused --show-error --silent --insecure $(elasticsearch_url); \ + output="\033[2m→ Cluster ready; to remove containers:"; \ + output="$$output docker rm -f"; \ + for n in `seq 1 $(nodes)`; do \ + output="$$output es$$n"; \ + done; \ + printf "$$output\033[0m\n"; \ + } +endif + +cluster-update: ## Update the Docker image +ifdef ELASTICSEARCH_BUILD_VERSION + $(eval version ?= "elasticsearch:"${ELASTICSEARCH_BUILD_VERSION}) +else + $(eval version ?= "elasticsearch:"${ELASTICSEARCH_DEFAULT_BUILD_VERSION}) +endif + @printf "\033[2m→ Updating the Docker image...\033[0m\n" + @docker pull docker.elastic.co/elasticsearch/$(version); + +cluster-clean: ## Remove unused Docker volumes and networks + @printf "\033[2m→ Cleaning up Docker assets...\033[0m\n" + docker volume prune --force + docker network prune --force + +docker: ## Build the Docker image and run it + docker build --file .ci/Dockerfile --tag elastic/go-elasticsearch . + docker run -it --network elasticsearch --volume $(PWD)/tmp:/tmp:rw,delegated --rm elastic/go-elasticsearch + +##@ Generator +gen-api: ## Generate the API package from the JSON specification + $(eval input ?= tmp/rest-api-spec) + $(eval output ?= esapi) +ifdef debug + $(eval args += --debug) +endif +ifdef ELASTICSEARCH_BUILD_VERSION + $(eval version = $(ELASTICSEARCH_BUILD_VERSION)) +else + $(eval version = $(ELASTICSEARCH_DEFAULT_BUILD_VERSION)) +endif +ifdef ELASTICSEARCH_BUILD_HASH + $(eval build_hash = $(ELASTICSEARCH_BUILD_HASH)) +else + $(eval build_hash = $(shell cat tmp/elasticsearch.json | jq ".projects.elasticsearch.commit_hash")) +endif + @printf "\033[2m→ Generating API package from specification ($(version):$(build_hash))...\033[0m\n" + @{ \ + set -e; \ + trap "test -d .git && git checkout --quiet $(PWD)/internal/build/go.mod" INT TERM EXIT; \ + export ELASTICSEARCH_BUILD_VERSION=$(version) && \ + export ELASTICSEARCH_BUILD_HASH=$(build_hash) && \ + cd internal/build && \ + go run main.go apisource --input '$(PWD)/$(input)/api/*.json' --output '$(PWD)/$(output)' $(args) && \ + go run main.go apistruct --output '$(PWD)/$(output)'; \ + } + +gen-tests: ## Generate the API tests from the YAML specification + $(eval input ?= tmp/rest-api-spec) + $(eval output ?= esapi/test) +ifdef debug + $(eval args += --debug) +endif +ifdef ELASTICSEARCH_BUILD_VERSION + $(eval version = $(ELASTICSEARCH_BUILD_VERSION)) +else + $(eval version = $(ELASTICSEARCH_DEFAULT_BUILD_VERSION)) +endif +ifdef ELASTICSEARCH_BUILD_HASH + $(eval build_hash = $(ELASTICSEARCH_BUILD_HASH)) +else + $(eval build_hash = $(shell cat tmp/elasticsearch.json | jq ".projects.elasticsearch.commit_hash")) +endif + @printf "\033[2m→ Generating API tests from specification ($(version):$(build_hash))...\033[0m\n" + @{ \ + set -e; \ + trap "test -d .git && git checkout --quiet $(PWD)/internal/cmd/generate/go.mod" INT TERM EXIT; \ + export ELASTICSEARCH_BUILD_VERSION=$(version) && \ + export ELASTICSEARCH_BUILD_HASH=$(build_hash) && \ + rm -rf $(output)/*_test.go && \ + rm -rf $(output)/xpack && \ + cd internal/build && \ + go get golang.org/x/tools/cmd/goimports && \ + go generate ./... && \ + go run main.go apitests --input '$(PWD)/$(input)/test/free/**/*.y*ml' --output '$(PWD)/$(output)' $(args) && \ + go run main.go apitests --input '$(PWD)/$(input)/test/platinum/**/*.yml' --output '$(PWD)/$(output)/xpack' $(args) && \ + mkdir -p '$(PWD)/esapi/test/xpack/ml' && \ + mkdir -p '$(PWD)/esapi/test/xpack/ml-crud' && \ + mv $(PWD)/esapi/test/xpack/xpack_ml* $(PWD)/esapi/test/xpack/ml/ && \ + mv $(PWD)/esapi/test/xpack/ml/xpack_ml__jobs_crud_test.go $(PWD)/esapi/test/xpack/ml-crud/; \ + } + +gen-docs: ## Generate the skeleton of documentation examples + $(eval input ?= tmp/alternatives_report.json) + $(eval update ?= no) + @{ \ + set -e; \ + trap "test -d .git && git checkout --quiet $(PWD)/internal/cmd/generate/go.mod" INT TERM EXIT; \ + if [[ $(update) == 'yes' ]]; then \ + printf "\033[2m→ Updating the alternatives_report.json file\033[0m\n" && \ + curl -s https://raw.githubusercontent.com/elastic/built-docs/master/raw/en/elasticsearch/reference/master/alternatives_report.json > tmp/alternatives_report.json; \ + fi; \ + printf "\033[2m→ Generating Go source files from Console input in [$(input)]\033[0m\n" && \ + ( cd '$(PWD)/internal/cmd/generate' && \ + go run main.go examples src --debug --input='$(PWD)/$(input)' --output='$(PWD)/.doc/examples/' \ + ) && \ + ( cd '$(PWD)/.doc/examples/src' && \ + if which gotestsum > /dev/null 2>&1 ; then \ + gotestsum --format=short-verbose; \ + else \ + go test -v $(testunitargs); \ + fi; \ + ) && \ + printf "\n\033[2m→ Generating ASCIIDoc files from Go source\033[0m\n" && \ + ( cd '$(PWD)/internal/build' && \ + go run main.go examples doc --debug --input='$(PWD)/.doc/examples/src/' --output='$(PWD)/.doc/examples/' \ + ) \ + } + +download-specs: ## Download the latest specs for the specified Elasticsearch version + $(eval output ?= tmp) + @mkdir -p tmp + @{ \ + set -e; \ + printf "\n\033[2m→ Downloading latest Elasticsearch specs for version [$(ELASTICSEARCH_DEFAULT_BUILD_VERSION)]\033[0m\n" && \ + rm -rf $(output)/rest-api-spec && \ + rm -rf $(output)/elasticsearch.json && \ + cd internal/build && \ + go run main.go download-spec --output '$(PWD)/$(output)'; \ + } + +##@ Other +#------------------------------------------------------------------------------ +help: ## Display help + @awk 'BEGIN {FS = ":.*##"; printf "Usage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) +#------------- -------------- + +.DEFAULT_GOAL := help +.PHONY: help apidiff backport cluster cluster-clean cluster-update coverage docker examples gen-api gen-tests godoc lint release test test-api test-bench test-integ test-unit diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/NOTICE b/vendor/github.com/elastic/go-elasticsearch/v9/NOTICE new file mode 100644 index 000000000..e7890c291 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/NOTICE @@ -0,0 +1,2 @@ +Elasticsearch Go Client +Copyright 2021 Elasticsearch B.V. \ No newline at end of file diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/README.md b/vendor/github.com/elastic/go-elasticsearch/v9/README.md new file mode 100644 index 000000000..4dabc257a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/README.md @@ -0,0 +1,90 @@ +# go-elasticsearch + +The official Go client for [Elasticsearch](https://www.elastic.co/products/elasticsearch). + +**[Download the latest version of Elasticsearch](https://www.elastic.co/downloads/elasticsearch)** +or +**[sign-up](https://cloud.elastic.co/registration?elektra=en-ess-sign-up-page)** +**for a free trial of Elastic Cloud**. + +[![Go Reference](https://pkg.go.dev/github.com/elastic/go-elasticsearch?status.svg)](https://pkg.go.dev/github.com/elastic/go-elasticsearch/v9) +[![Go Report Card](https://goreportcard.com/badge/github.com/elastic/go-elasticsearch)](https://goreportcard.com/report/github.com/elastic/go-elasticsearch) +[![codecov.io](https://codecov.io/github/elastic/go-elasticsearch/coverage.svg?branch=main)](https://codecov.io/gh/elastic/go-elasticsearch?branch=main) +[![Build](https://github.com/elastic/go-elasticsearch/workflows/Build/badge.svg)](https://github.com/elastic/go-elasticsearch/actions?query=branch%3Amain) +[![Unit](https://github.com/elastic/go-elasticsearch/workflows/Unit/badge.svg)](https://github.com/elastic/go-elasticsearch/actions?query=branch%3Amain) +[![Integration](https://github.com/elastic/go-elasticsearch/workflows/Integration/badge.svg)](https://github.com/elastic/go-elasticsearch/actions?query=branch%3Amain) +[![API](https://github.com/elastic/go-elasticsearch/workflows/API/badge.svg)](https://github.com/elastic/go-elasticsearch/actions?query=branch%3Amain) + +## Compatibility + +### Go + +Starting from version `8.12.0`, this library follow the Go language [policy](https://go.dev/doc/devel/release#policy). Each major Go release is supported until there are two newer major releases. For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +### Elasticsearch + +Language clients are forward compatible; meaning that clients support communicating with greater or equal minor versions of Elasticsearch. +Elasticsearch language clients are only backwards compatible with default distributions and without guarantees made. + +When using Go modules, include the version in the import path, and specify either an explicit version or a branch: + + require github.com/elastic/go-elasticsearch/v9 v9.x.x + require github.com/elastic/go-elasticsearch/v8 v8.x.x + +It's possible to use multiple versions of the client in a single project: + + // go.mod + github.com/elastic/go-elasticsearch/v8 v8.18.0 + github.com/elastic/go-elasticsearch/v9 v9.0.0 + + // main.go + import ( + elasticsearch7 "github.com/elastic/go-elasticsearch/v8" + elasticsearch8 "github.com/elastic/go-elasticsearch/v9" + ) + // ... + es8, _ := elasticsearch7.NewDefaultClient() + es9, _ := elasticsearch8.NewDefaultClient() + +The `main` branch of the client is compatible with the current `master` branch of Elasticsearch. + + + +## Installation + +Refer to the [Installation section](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_installation) +of the getting started documentation. + + + +## Connecting + +Refer to the [Connecting section](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_connecting) +of the getting started documentation. + +## Operations + +* [Creating an index](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_creating_an_index) +* [Indexing documents](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_indexing_documents) +* [Getting documents](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_getting_documents) +* [Searching documents](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_searching_documents) +* [Updating documents](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_updating_documents) +* [Deleting documents](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_deleting_documents) +* [Deleting an index](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_deleting_an_index) + + +## Helpers + +The `esutil` package provides convenience helpers for working with the client. At the moment, it provides the `esutil.JSONReader()` and the `esutil.BulkIndexer` helpers. + + + +## Examples + +The **[`_examples`](./_examples)** folder contains a number of recipes and comprehensive examples to get you started with the client, including configuration and customization of the client, using a custom certificate authority (CA) for security (TLS), mocking the transport for unit tests, embedding the client in a custom type, building queries, performing requests individually and in bulk, and parsing the responses. + + + +## License + +This software is licensed under the [Apache 2 license](./LICENSE). See [NOTICE](./NOTICE). diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/catalog-info.yaml b/vendor/github.com/elastic/go-elasticsearch/v9/catalog-info.yaml new file mode 100644 index 000000000..4635d33be --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/catalog-info.yaml @@ -0,0 +1,19 @@ +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-go-elasticsearch +spec: + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + description: Elasticsearch Go Client + name: go-elasticsearch + spec: + repository: elastic/go-elasticsearch + teams: + devtools-team: {} + everyone: + access_level: READ_ONLY + owner: group:devtools-team + type: buildkite-pipeline diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/doc.go b/vendor/github.com/elastic/go-elasticsearch/v9/doc.go new file mode 100644 index 000000000..59a77ba7e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/doc.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +/* +Package elasticsearch provides a Go client for Elasticsearch. + +Create the client with the NewDefaultClient function: + + elasticsearch.NewDefaultClient() + +The ELASTICSEARCH_URL environment variable is used instead of the default URL, when set. +Use a comma to separate multiple URLs. + +To configure the client, pass a Config object to the NewClient function: + + cfg := elasticsearch.Config{ + Addresses: []string{ + "http://localhost:9200", + "http://localhost:9201", + }, + Username: "foo", + Password: "bar", + Transport: &http.Transport{ + MaxIdleConnsPerHost: 10, + ResponseHeaderTimeout: time.Second, + DialContext: (&net.Dialer{Timeout: time.Second}).DialContext, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + }, + } + + elasticsearch.NewClient(cfg) + +When using the Elastic Service (https://elastic.co/cloud), you can use CloudID instead of Addresses. +When either Addresses or CloudID is set, the ELASTICSEARCH_URL environment variable is ignored. + +See the elasticsearch_integration_test.go file and the _examples folder for more information. + +Call the Elasticsearch APIs by invoking the corresponding methods on the client: + + res, err := es.Info() + if err != nil { + log.Fatalf("Error getting response: %s", err) + } + + log.Println(res) + +See the github.com/elastic/go-elasticsearch/esapi package for more information about using the API. + +See the github.com/elastic/elastic-transport-go package for more information about configuring the transport. +*/ +package elasticsearch diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/elasticsearch.go b/vendor/github.com/elastic/go-elasticsearch/v9/elasticsearch.go new file mode 100644 index 000000000..6eec2de9c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/elasticsearch.go @@ -0,0 +1,573 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package elasticsearch + +import ( + "encoding/base64" + "errors" + "fmt" + "go.opentelemetry.io/otel/trace" + "net/http" + "net/url" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/elastic/go-elasticsearch/v9/typedapi" + + "github.com/elastic/go-elasticsearch/v9/esapi" + "github.com/elastic/go-elasticsearch/v9/internal/version" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + tpversion "github.com/elastic/elastic-transport-go/v8/elastictransport/version" +) + +const ( + defaultURL = "http://localhost:9200" + + // Version returns the package version as a string. + Version = version.Client + unknownProduct = "the client noticed that the server is not Elasticsearch and we do not support this unknown product" + + // HeaderClientMeta Key for the HTTP Header related to telemetry data sent with + // each request to Elasticsearch. + HeaderClientMeta = "x-elastic-client-meta" + + compatibilityHeader = "application/vnd.elasticsearch+json;compatible-with=9" +) + +var ( + esCompatHeader = "ELASTIC_CLIENT_APIVERSIONING" + userAgent string + reGoVersion = regexp.MustCompile(`go(\d+\.\d+\..+)`) + reMetaVersion = regexp.MustCompile("([0-9.]+)(.*)") +) + +func init() { + userAgent = initUserAgent() +} + +// Config represents the client configuration. +type Config struct { + Addresses []string // A list of Elasticsearch nodes to use. + Username string // Username for HTTP Basic Authentication. + Password string // Password for HTTP Basic Authentication. + + CloudID string // Endpoint for the Elastic Service (https://elastic.co/cloud). + APIKey string // Base64-encoded token for authorization; if set, overrides username/password and service token. + ServiceToken string // Service token for authorization; if set, overrides username/password. + CertificateFingerprint string // SHA256 hex fingerprint given by Elasticsearch on first launch. + + Header http.Header // Global HTTP request header. + + // PEM-encoded certificate authorities. + // When set, an empty certificate pool will be created, and the certificates will be appended to it. + // The option is only valid when the transport is not specified, or when it's http.Transport. + CACert []byte + + RetryOnStatus []int // List of status codes for retry. Default: 502, 503, 504. + DisableRetry bool // Default: false. + MaxRetries int // Default: 3. + RetryOnError func(*http.Request, error) bool // Optional function allowing to indicate which error should be retried. Default: nil. + + CompressRequestBody bool // Default: false. + CompressRequestBodyLevel int // Default: gzip.DefaultCompression. + PoolCompressor bool // If true, a sync.Pool based gzip writer is used. Default: false. + + DiscoverNodesOnStart bool // Discover nodes when initializing the client. Default: false. + DiscoverNodesInterval time.Duration // Discover nodes periodically. Default: disabled. + + EnableMetrics bool // Enable the metrics collection. + EnableDebugLogger bool // Enable the debug logging. + EnableCompatibilityMode bool // Enable sends compatibility header + + DisableMetaHeader bool // Disable the additional "X-Elastic-Client-Meta" HTTP header. + + RetryBackoff func(attempt int) time.Duration // Optional backoff duration. Default: nil. + + Transport http.RoundTripper // The HTTP transport object. + Logger elastictransport.Logger // The logger object. + Selector elastictransport.Selector // The selector object. + + // Optional constructor function for a custom ConnectionPool. Default: nil. + ConnectionPoolFunc func([]*elastictransport.Connection, elastictransport.Selector) elastictransport.ConnectionPool + + Instrumentation elastictransport.Instrumentation // Enable instrumentation throughout the client. +} + +// NewOpenTelemetryInstrumentation provides the OpenTelemetry integration for both low-level and TypedAPI. +// provider is optional, if nil is passed the integration will retrieve the provider set globally by otel. +// captureSearchBody allows to define if the search queries body should be included in the span. +// Search endpoints are: +// +// search +// async_search.submit +// msearch +// eql.search +// terms_enum +// search_template +// msearch_template +// render_search_template +func NewOpenTelemetryInstrumentation(provider trace.TracerProvider, captureSearchBody bool) elastictransport.Instrumentation { + return elastictransport.NewOtelInstrumentation(provider, captureSearchBody, Version) +} + +// BaseClient represents the Elasticsearch client. +type BaseClient struct { + Transport elastictransport.Interface + metaHeader string + compatibilityHeader bool + + disableMetaHeader bool + productCheckMu sync.RWMutex + productCheckSuccess bool +} + +// Client represents the Functional Options API. +type Client struct { + BaseClient + *esapi.API +} + +// TypedClient represents the Typed API. +type TypedClient struct { + BaseClient + *typedapi.MethodAPI +} + +// NewBaseClient creates a new client free of any API. +func NewBaseClient(cfg Config) (*BaseClient, error) { + tp, err := newTransport(cfg) + if err != nil { + return nil, err + } + + compatHeaderEnv := os.Getenv(esCompatHeader) + compatibilityHeader, _ := strconv.ParseBool(compatHeaderEnv) + + client := &BaseClient{ + Transport: tp, + disableMetaHeader: cfg.DisableMetaHeader, + metaHeader: initMetaHeader(tp), + compatibilityHeader: cfg.EnableCompatibilityMode || compatibilityHeader, + } + + if cfg.DiscoverNodesOnStart { + go client.DiscoverNodes() + } + + return client, nil +} + +// NewDefaultClient creates a new client with default options. +// +// It will use http://localhost:9200 as the default address. +// +// It will use the ELASTICSEARCH_URL environment variable, if set, +// to configure the addresses; use a comma to separate multiple URLs. +func NewDefaultClient() (*Client, error) { + return NewClient(Config{}) +} + +// NewClient creates a new client with configuration from cfg. +// +// It will use http://localhost:9200 as the default address. +// +// It will use the ELASTICSEARCH_URL environment variable, if set, +// to configure the addresses; use a comma to separate multiple URLs. +// +// If either cfg.Addresses or cfg.CloudID is set, the ELASTICSEARCH_URL +// environment variable is ignored. +// +// It's an error to set both cfg.Addresses and cfg.CloudID. +func NewClient(cfg Config) (*Client, error) { + tp, err := newTransport(cfg) + if err != nil { + return nil, err + } + + compatHeaderEnv := os.Getenv(esCompatHeader) + compatibilityHeader, _ := strconv.ParseBool(compatHeaderEnv) + + client := &Client{ + BaseClient: BaseClient{ + Transport: tp, + disableMetaHeader: cfg.DisableMetaHeader, + metaHeader: initMetaHeader(tp), + compatibilityHeader: cfg.EnableCompatibilityMode || compatibilityHeader, + }, + } + client.API = esapi.New(client) + + if cfg.DiscoverNodesOnStart { + go client.DiscoverNodes() + } + + return client, nil +} + +// NewTypedClient create a new client with the configuration from cfg. +// +// This version uses the same configuration as NewClient. +// +// It will return the client with the TypedAPI. +func NewTypedClient(cfg Config) (*TypedClient, error) { + tp, err := newTransport(cfg) + if err != nil { + return nil, err + } + + compatHeaderEnv := os.Getenv(esCompatHeader) + compatibilityHeader, _ := strconv.ParseBool(compatHeaderEnv) + + metaHeader := strings.Join([]string{initMetaHeader(tp), "hl=1"}, ",") + + client := &TypedClient{ + BaseClient: BaseClient{ + Transport: tp, + disableMetaHeader: cfg.DisableMetaHeader, + metaHeader: metaHeader, + compatibilityHeader: cfg.EnableCompatibilityMode || compatibilityHeader, + }, + } + client.MethodAPI = typedapi.NewMethodAPI(client) + + if cfg.DiscoverNodesOnStart { + go client.DiscoverNodes() + } + + return client, nil +} + +func newTransport(cfg Config) (*elastictransport.Client, error) { + var addrs []string + + if len(cfg.Addresses) == 0 && cfg.CloudID == "" { + addrs = addrsFromEnvironment() + } else { + if len(cfg.Addresses) > 0 && cfg.CloudID != "" { + return nil, errors.New("cannot create client: both Addresses and CloudID are set") + } + + if cfg.CloudID != "" { + cloudAddr, err := addrFromCloudID(cfg.CloudID) + if err != nil { + return nil, fmt.Errorf("cannot create client: cannot parse CloudID: %s", err) + } + addrs = append(addrs, cloudAddr) + } + + if len(cfg.Addresses) > 0 { + addrs = append(addrs, cfg.Addresses...) + } + } + + urls, err := addrsToURLs(addrs) + if err != nil { + return nil, fmt.Errorf("cannot create client: %s", err) + } + + if len(urls) == 0 { + u, _ := url.Parse(defaultURL) // errcheck exclude + urls = append(urls, u) + } + + // TODO(karmi): Refactor + if urls[0].User != nil { + cfg.Username = urls[0].User.Username() + pw, _ := urls[0].User.Password() + cfg.Password = pw + } + + tpConfig := elastictransport.Config{ + UserAgent: userAgent, + + URLs: urls, + Username: cfg.Username, + Password: cfg.Password, + APIKey: cfg.APIKey, + ServiceToken: cfg.ServiceToken, + CertificateFingerprint: cfg.CertificateFingerprint, + + Header: cfg.Header, + CACert: cfg.CACert, + + RetryOnStatus: cfg.RetryOnStatus, + DisableRetry: cfg.DisableRetry, + RetryOnError: cfg.RetryOnError, + MaxRetries: cfg.MaxRetries, + RetryBackoff: cfg.RetryBackoff, + + CompressRequestBody: cfg.CompressRequestBody, + CompressRequestBodyLevel: cfg.CompressRequestBodyLevel, + PoolCompressor: cfg.PoolCompressor, + + EnableMetrics: cfg.EnableMetrics, + EnableDebugLogger: cfg.EnableDebugLogger, + + DiscoverNodesInterval: cfg.DiscoverNodesInterval, + + Transport: cfg.Transport, + Logger: cfg.Logger, + Selector: cfg.Selector, + ConnectionPoolFunc: cfg.ConnectionPoolFunc, + + Instrumentation: cfg.Instrumentation, + } + + tp, err := elastictransport.New(tpConfig) + if err != nil { + return nil, fmt.Errorf("error creating transport: %s", err) + } + + return tp, nil +} + +// Perform delegates to Transport to execute a request and return a response. +func (c *BaseClient) Perform(req *http.Request) (*http.Response, error) { + // Compatibility Header + if c.compatibilityHeader { + if req.Body != nil { + req.Header.Set("Content-Type", compatibilityHeader) + } + req.Header.Set("Accept", compatibilityHeader) + } + + if !c.disableMetaHeader { + existingMetaHeader := req.Header.Get(HeaderClientMeta) + if existingMetaHeader != "" { + req.Header.Set(HeaderClientMeta, strings.Join([]string{c.metaHeader, existingMetaHeader}, ",")) + } else { + req.Header.Add(HeaderClientMeta, c.metaHeader) + } + } else { + req.Header.Del(HeaderClientMeta) + } + + // Retrieve the original request. + res, err := c.Transport.Perform(req) + if err != nil { + return nil, err + } + + // ResponseCheck, we run the header check on the first answer from ES. + if res.StatusCode >= 200 && res.StatusCode < 300 { + checkHeader := func() error { return genuineCheckHeader(res.Header) } + if err := c.doProductCheck(checkHeader); err != nil { + res.Body.Close() + return nil, err + } + } + + return res, nil +} + +// InstrumentationEnabled propagates back to the client the Instrumentation provided by the transport. +func (c *BaseClient) InstrumentationEnabled() elastictransport.Instrumentation { + if tp, ok := c.Transport.(elastictransport.Instrumented); ok { + return tp.InstrumentationEnabled() + } + return nil +} + +// doProductCheck calls f if there as not been a prior successful call to doProductCheck, +// returning nil otherwise. +func (c *BaseClient) doProductCheck(f func() error) error { + c.productCheckMu.RLock() + productCheckSuccess := c.productCheckSuccess + c.productCheckMu.RUnlock() + + if productCheckSuccess { + return nil + } + + c.productCheckMu.Lock() + defer c.productCheckMu.Unlock() + + if c.productCheckSuccess { + return nil + } + + if err := f(); err != nil { + return err + } + + c.productCheckSuccess = true + + return nil +} + +// genuineCheckHeader validates the presence of the X-Elastic-Product header +func genuineCheckHeader(header http.Header) error { + if header.Get("X-Elastic-Product") != "Elasticsearch" { + return errors.New(unknownProduct) + } + return nil +} + +// Metrics returns the client metrics. +func (c *BaseClient) Metrics() (elastictransport.Metrics, error) { + if mt, ok := c.Transport.(elastictransport.Measurable); ok { + return mt.Metrics() + } + return elastictransport.Metrics{}, errors.New("transport is missing method Metrics()") +} + +// DiscoverNodes reloads the client connections by fetching information from the cluster. +func (c *BaseClient) DiscoverNodes() error { + if dt, ok := c.Transport.(elastictransport.Discoverable); ok { + return dt.DiscoverNodes() + } + return errors.New("transport is missing method DiscoverNodes()") +} + +// addrsFromEnvironment returns a list of addresses by splitting +// the ELASTICSEARCH_URL environment variable with comma, or an empty list. +func addrsFromEnvironment() []string { + var addrs []string + + if envURLs, ok := os.LookupEnv("ELASTICSEARCH_URL"); ok && envURLs != "" { + list := strings.Split(envURLs, ",") + for _, u := range list { + addrs = append(addrs, strings.TrimSpace(u)) + } + } + + return addrs +} + +// addrsToURLs creates a list of url.URL structures from url list. +func addrsToURLs(addrs []string) ([]*url.URL, error) { + var urls []*url.URL + for _, addr := range addrs { + u, err := url.Parse(strings.TrimRight(addr, "/")) + if err != nil { + return nil, fmt.Errorf("cannot parse url: %v", err) + } + + urls = append(urls, u) + } + return urls, nil +} + +// addrFromCloudID extracts the Elasticsearch URL from CloudID. +// See: https://www.elastic.co/guide/en/cloud/current/ec-cloud-id.html +func addrFromCloudID(input string) (string, error) { + var scheme = "https://" + + values := strings.Split(input, ":") + if len(values) != 2 { + return "", fmt.Errorf("unexpected format: %q", input) + } + data, err := base64.StdEncoding.DecodeString(values[1]) + if err != nil { + return "", err + } + parts := strings.Split(string(data), "$") + + if len(parts) < 2 { + return "", fmt.Errorf("invalid encoded value: %s", parts) + } + + return fmt.Sprintf("%s%s.%s", scheme, parts[1], parts[0]), nil +} + +func initUserAgent() string { + var b strings.Builder + + b.WriteString("go-elasticsearch") + b.WriteRune('/') + b.WriteString(Version) + b.WriteRune(' ') + b.WriteRune('(') + b.WriteString(runtime.GOOS) + b.WriteRune(' ') + b.WriteString(runtime.GOARCH) + b.WriteString("; ") + b.WriteString("Go ") + if v := reGoVersion.ReplaceAllString(runtime.Version(), "$1"); v != "" { + b.WriteString(v) + } else { + b.WriteString(runtime.Version()) + } + b.WriteRune(')') + + return b.String() +} + +func initMetaHeader(transport interface{}) string { + var b strings.Builder + var strippedGoVersion string + var strippedEsVersion string + var strippedTransportVersion string + + strippedEsVersion = buildStrippedVersion(Version) + strippedGoVersion = buildStrippedVersion(runtime.Version()) + + if _, ok := transport.(*elastictransport.Client); ok { + strippedTransportVersion = buildStrippedVersion(tpversion.Transport) + } else { + strippedTransportVersion = strippedEsVersion + } + + var duos = [][]string{ + { + "es", + strippedEsVersion, + }, + { + "go", + strippedGoVersion, + }, + { + "t", + strippedTransportVersion, + }, + { + "hc", + strippedGoVersion, + }, + } + + var arr []string + for _, duo := range duos { + arr = append(arr, strings.Join(duo, "=")) + } + b.WriteString(strings.Join(arr, ",")) + + return b.String() +} + +func buildStrippedVersion(version string) string { + v := reMetaVersion.FindStringSubmatch(version) + + if len(v) == 3 && !strings.Contains(version, "devel") { + switch { + case v[2] != "": + return v[1] + "p" + default: + return v[1] + } + } + + return "0.0p" +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api._.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api._.go new file mode 100644 index 000000000..e500c4a4b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api._.go @@ -0,0 +1,1297 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0 (9732617): DO NOT EDIT + +package esapi + +// API contains the Elasticsearch APIs +type API struct { + Cat *Cat + Cluster *Cluster + Indices *Indices + Ingest *Ingest + Nodes *Nodes + Remote *Remote + Snapshot *Snapshot + Tasks *Tasks + AsyncSearch *AsyncSearch + CCR *CCR + ILM *ILM + License *License + Migration *Migration + ML *ML + Monitoring *Monitoring + Rollup *Rollup + Security *Security + SQL *SQL + SSL *SSL + Watcher *Watcher + XPack *XPack + + AutoscalingDeleteAutoscalingPolicy AutoscalingDeleteAutoscalingPolicy + AutoscalingGetAutoscalingCapacity AutoscalingGetAutoscalingCapacity + AutoscalingGetAutoscalingPolicy AutoscalingGetAutoscalingPolicy + AutoscalingPutAutoscalingPolicy AutoscalingPutAutoscalingPolicy + Bulk Bulk + Capabilities Capabilities + ClearScroll ClearScroll + ClosePointInTime ClosePointInTime + ConnectorCheckIn ConnectorCheckIn + ConnectorDelete ConnectorDelete + ConnectorGet ConnectorGet + ConnectorLastSync ConnectorLastSync + ConnectorList ConnectorList + ConnectorPost ConnectorPost + ConnectorPut ConnectorPut + ConnectorSecretDelete ConnectorSecretDelete + ConnectorSecretGet ConnectorSecretGet + ConnectorSecretPost ConnectorSecretPost + ConnectorSecretPut ConnectorSecretPut + ConnectorSyncJobCancel ConnectorSyncJobCancel + ConnectorSyncJobCheckIn ConnectorSyncJobCheckIn + ConnectorSyncJobClaim ConnectorSyncJobClaim + ConnectorSyncJobDelete ConnectorSyncJobDelete + ConnectorSyncJobError ConnectorSyncJobError + ConnectorSyncJobGet ConnectorSyncJobGet + ConnectorSyncJobList ConnectorSyncJobList + ConnectorSyncJobPost ConnectorSyncJobPost + ConnectorSyncJobUpdateStats ConnectorSyncJobUpdateStats + ConnectorUpdateAPIKeyDocumentID ConnectorUpdateAPIKeyDocumentID + ConnectorUpdateActiveFiltering ConnectorUpdateActiveFiltering + ConnectorUpdateConfiguration ConnectorUpdateConfiguration + ConnectorUpdateError ConnectorUpdateError + ConnectorUpdateFeatures ConnectorUpdateFeatures + ConnectorUpdateFiltering ConnectorUpdateFiltering + ConnectorUpdateFilteringValidation ConnectorUpdateFilteringValidation + ConnectorUpdateIndexName ConnectorUpdateIndexName + ConnectorUpdateName ConnectorUpdateName + ConnectorUpdateNative ConnectorUpdateNative + ConnectorUpdatePipeline ConnectorUpdatePipeline + ConnectorUpdateScheduling ConnectorUpdateScheduling + ConnectorUpdateServiceDocumentType ConnectorUpdateServiceDocumentType + ConnectorUpdateStatus ConnectorUpdateStatus + Count Count + Create Create + DanglingIndicesDeleteDanglingIndex DanglingIndicesDeleteDanglingIndex + DanglingIndicesImportDanglingIndex DanglingIndicesImportDanglingIndex + DanglingIndicesListDanglingIndices DanglingIndicesListDanglingIndices + DeleteByQuery DeleteByQuery + DeleteByQueryRethrottle DeleteByQueryRethrottle + Delete Delete + DeleteScript DeleteScript + EnrichDeletePolicy EnrichDeletePolicy + EnrichExecutePolicy EnrichExecutePolicy + EnrichGetPolicy EnrichGetPolicy + EnrichPutPolicy EnrichPutPolicy + EnrichStats EnrichStats + EqlDelete EqlDelete + EqlGet EqlGet + EqlGetStatus EqlGetStatus + EqlSearch EqlSearch + EsqlAsyncQueryDelete EsqlAsyncQueryDelete + EsqlAsyncQueryGet EsqlAsyncQueryGet + EsqlAsyncQuery EsqlAsyncQuery + EsqlAsyncQueryStop EsqlAsyncQueryStop + EsqlGetQuery EsqlGetQuery + EsqlListQueries EsqlListQueries + EsqlQuery EsqlQuery + Exists Exists + ExistsSource ExistsSource + Explain Explain + FeaturesGetFeatures FeaturesGetFeatures + FeaturesResetFeatures FeaturesResetFeatures + FieldCaps FieldCaps + FleetDeleteSecret FleetDeleteSecret + FleetGetSecret FleetGetSecret + FleetGlobalCheckpoints FleetGlobalCheckpoints + FleetMsearch FleetMsearch + FleetPostSecret FleetPostSecret + FleetSearch FleetSearch + Get Get + GetScriptContext GetScriptContext + GetScriptLanguages GetScriptLanguages + GetScript GetScript + GetSource GetSource + GraphExplore GraphExplore + HealthReport HealthReport + Index Index + InferenceChatCompletionUnified InferenceChatCompletionUnified + InferenceCompletion InferenceCompletion + InferenceDelete InferenceDelete + InferenceGet InferenceGet + InferenceInference InferenceInference + InferencePutAlibabacloud InferencePutAlibabacloud + InferencePutAmazonbedrock InferencePutAmazonbedrock + InferencePutAmazonsagemaker InferencePutAmazonsagemaker + InferencePutAnthropic InferencePutAnthropic + InferencePutAzureaistudio InferencePutAzureaistudio + InferencePutAzureopenai InferencePutAzureopenai + InferencePutCohere InferencePutCohere + InferencePutCustom InferencePutCustom + InferencePutDeepseek InferencePutDeepseek + InferencePutElasticsearch InferencePutElasticsearch + InferencePutElser InferencePutElser + InferencePutGoogleaistudio InferencePutGoogleaistudio + InferencePutGooglevertexai InferencePutGooglevertexai + InferencePutHuggingFace InferencePutHuggingFace + InferencePutJinaai InferencePutJinaai + InferencePutMistral InferencePutMistral + InferencePutOpenai InferencePutOpenai + InferencePut InferencePut + InferencePutVoyageai InferencePutVoyageai + InferencePutWatsonx InferencePutWatsonx + InferenceRerank InferenceRerank + InferenceSparseEmbedding InferenceSparseEmbedding + InferenceStreamCompletion InferenceStreamCompletion + InferenceTextEmbedding InferenceTextEmbedding + InferenceUpdate InferenceUpdate + Info Info + LogstashDeletePipeline LogstashDeletePipeline + LogstashGetPipeline LogstashGetPipeline + LogstashPutPipeline LogstashPutPipeline + Mget Mget + Msearch Msearch + MsearchTemplate MsearchTemplate + Mtermvectors Mtermvectors + OpenPointInTime OpenPointInTime + Ping Ping + ProfilingFlamegraph ProfilingFlamegraph + ProfilingStacktraces ProfilingStacktraces + ProfilingStatus ProfilingStatus + ProfilingTopnFunctions ProfilingTopnFunctions + PutScript PutScript + QueryRulesDeleteRule QueryRulesDeleteRule + QueryRulesDeleteRuleset QueryRulesDeleteRuleset + QueryRulesGetRule QueryRulesGetRule + QueryRulesGetRuleset QueryRulesGetRuleset + QueryRulesListRulesets QueryRulesListRulesets + QueryRulesPutRule QueryRulesPutRule + QueryRulesPutRuleset QueryRulesPutRuleset + QueryRulesTest QueryRulesTest + RankEval RankEval + Reindex Reindex + ReindexRethrottle ReindexRethrottle + RenderSearchTemplate RenderSearchTemplate + ScriptsPainlessExecute ScriptsPainlessExecute + Scroll Scroll + SearchApplicationDeleteBehavioralAnalytics SearchApplicationDeleteBehavioralAnalytics + SearchApplicationDelete SearchApplicationDelete + SearchApplicationGetBehavioralAnalytics SearchApplicationGetBehavioralAnalytics + SearchApplicationGet SearchApplicationGet + SearchApplicationList SearchApplicationList + SearchApplicationPostBehavioralAnalyticsEvent SearchApplicationPostBehavioralAnalyticsEvent + SearchApplicationPutBehavioralAnalytics SearchApplicationPutBehavioralAnalytics + SearchApplicationPut SearchApplicationPut + SearchApplicationRenderQuery SearchApplicationRenderQuery + SearchApplicationSearch SearchApplicationSearch + SearchMvt SearchMvt + Search Search + SearchShards SearchShards + SearchTemplate SearchTemplate + SearchableSnapshotsCacheStats SearchableSnapshotsCacheStats + SearchableSnapshotsClearCache SearchableSnapshotsClearCache + SearchableSnapshotsMount SearchableSnapshotsMount + SearchableSnapshotsStats SearchableSnapshotsStats + ShutdownDeleteNode ShutdownDeleteNode + ShutdownGetNode ShutdownGetNode + ShutdownPutNode ShutdownPutNode + SimulateIngest SimulateIngest + SlmDeleteLifecycle SlmDeleteLifecycle + SlmExecuteLifecycle SlmExecuteLifecycle + SlmExecuteRetention SlmExecuteRetention + SlmGetLifecycle SlmGetLifecycle + SlmGetStats SlmGetStats + SlmGetStatus SlmGetStatus + SlmPutLifecycle SlmPutLifecycle + SlmStart SlmStart + SlmStop SlmStop + StreamsLogsDisable StreamsLogsDisable + StreamsLogsEnable StreamsLogsEnable + StreamsStatus StreamsStatus + SynonymsDeleteSynonym SynonymsDeleteSynonym + SynonymsDeleteSynonymRule SynonymsDeleteSynonymRule + SynonymsGetSynonym SynonymsGetSynonym + SynonymsGetSynonymRule SynonymsGetSynonymRule + SynonymsGetSynonymsSets SynonymsGetSynonymsSets + SynonymsPutSynonym SynonymsPutSynonym + SynonymsPutSynonymRule SynonymsPutSynonymRule + TermsEnum TermsEnum + Termvectors Termvectors + TextStructureFindFieldStructure TextStructureFindFieldStructure + TextStructureFindMessageStructure TextStructureFindMessageStructure + TextStructureFindStructure TextStructureFindStructure + TextStructureTestGrokPattern TextStructureTestGrokPattern + TransformDeleteTransform TransformDeleteTransform + TransformGetNodeStats TransformGetNodeStats + TransformGetTransform TransformGetTransform + TransformGetTransformStats TransformGetTransformStats + TransformPreviewTransform TransformPreviewTransform + TransformPutTransform TransformPutTransform + TransformResetTransform TransformResetTransform + TransformScheduleNowTransform TransformScheduleNowTransform + TransformStartTransform TransformStartTransform + TransformStopTransform TransformStopTransform + TransformUpdateTransform TransformUpdateTransform + TransformUpgradeTransforms TransformUpgradeTransforms + UpdateByQuery UpdateByQuery + UpdateByQueryRethrottle UpdateByQueryRethrottle + Update Update +} + +// Cat contains the Cat APIs +type Cat struct { + Aliases CatAliases + Allocation CatAllocation + ComponentTemplates CatComponentTemplates + Count CatCount + Fielddata CatFielddata + Health CatHealth + Help CatHelp + Indices CatIndices + MLDataFrameAnalytics CatMLDataFrameAnalytics + MLDatafeeds CatMLDatafeeds + MLJobs CatMLJobs + MLTrainedModels CatMLTrainedModels + Master CatMaster + Nodeattrs CatNodeattrs + Nodes CatNodes + PendingTasks CatPendingTasks + Plugins CatPlugins + Recovery CatRecovery + Repositories CatRepositories + Segments CatSegments + Shards CatShards + Snapshots CatSnapshots + Tasks CatTasks + Templates CatTemplates + ThreadPool CatThreadPool + Transforms CatTransforms +} + +// Cluster contains the Cluster APIs +type Cluster struct { + AllocationExplain ClusterAllocationExplain + DeleteComponentTemplate ClusterDeleteComponentTemplate + DeleteVotingConfigExclusions ClusterDeleteVotingConfigExclusions + ExistsComponentTemplate ClusterExistsComponentTemplate + GetComponentTemplate ClusterGetComponentTemplate + GetSettings ClusterGetSettings + Health ClusterHealth + Info ClusterInfo + PendingTasks ClusterPendingTasks + PostVotingConfigExclusions ClusterPostVotingConfigExclusions + PutComponentTemplate ClusterPutComponentTemplate + PutSettings ClusterPutSettings + RemoteInfo ClusterRemoteInfo + Reroute ClusterReroute + State ClusterState + Stats ClusterStats +} + +// Indices contains the Indices APIs +type Indices struct { + AddBlock IndicesAddBlock + Analyze IndicesAnalyze + CancelMigrateReindex IndicesCancelMigrateReindex + ClearCache IndicesClearCache + Clone IndicesClone + Close IndicesClose + CreateDataStream IndicesCreateDataStream + CreateFrom IndicesCreateFrom + Create IndicesCreate + DataStreamsStats IndicesDataStreamsStats + DeleteAlias IndicesDeleteAlias + DeleteDataLifecycle IndicesDeleteDataLifecycle + DeleteDataStreamOptions IndicesDeleteDataStreamOptions + DeleteDataStream IndicesDeleteDataStream + DeleteIndexTemplate IndicesDeleteIndexTemplate + Delete IndicesDelete + DeleteTemplate IndicesDeleteTemplate + DiskUsage IndicesDiskUsage + Downsample IndicesDownsample + ExistsAlias IndicesExistsAlias + ExistsIndexTemplate IndicesExistsIndexTemplate + Exists IndicesExists + ExistsTemplate IndicesExistsTemplate + ExplainDataLifecycle IndicesExplainDataLifecycle + FieldUsageStats IndicesFieldUsageStats + Flush IndicesFlush + Forcemerge IndicesForcemerge + GetAlias IndicesGetAlias + GetDataLifecycle IndicesGetDataLifecycle + GetDataLifecycleStats IndicesGetDataLifecycleStats + GetDataStreamOptions IndicesGetDataStreamOptions + GetDataStream IndicesGetDataStream + GetDataStreamSettings IndicesGetDataStreamSettings + GetFieldMapping IndicesGetFieldMapping + GetIndexTemplate IndicesGetIndexTemplate + GetMapping IndicesGetMapping + GetMigrateReindexStatus IndicesGetMigrateReindexStatus + Get IndicesGet + GetSettings IndicesGetSettings + GetTemplate IndicesGetTemplate + MigrateReindex IndicesMigrateReindex + MigrateToDataStream IndicesMigrateToDataStream + ModifyDataStream IndicesModifyDataStream + Open IndicesOpen + PromoteDataStream IndicesPromoteDataStream + PutAlias IndicesPutAlias + PutDataLifecycle IndicesPutDataLifecycle + PutDataStreamOptions IndicesPutDataStreamOptions + PutDataStreamSettings IndicesPutDataStreamSettings + PutIndexTemplate IndicesPutIndexTemplate + PutMapping IndicesPutMapping + PutSettings IndicesPutSettings + PutTemplate IndicesPutTemplate + Recovery IndicesRecovery + Refresh IndicesRefresh + ReloadSearchAnalyzers IndicesReloadSearchAnalyzers + RemoveBlock IndicesRemoveBlock + ResolveCluster IndicesResolveCluster + ResolveIndex IndicesResolveIndex + Rollover IndicesRollover + Segments IndicesSegments + ShardStores IndicesShardStores + Shrink IndicesShrink + SimulateIndexTemplate IndicesSimulateIndexTemplate + SimulateTemplate IndicesSimulateTemplate + Split IndicesSplit + Stats IndicesStats + UpdateAliases IndicesUpdateAliases + ValidateQuery IndicesValidateQuery +} + +// Ingest contains the Ingest APIs +type Ingest struct { + DeleteGeoipDatabase IngestDeleteGeoipDatabase + DeleteIPLocationDatabase IngestDeleteIPLocationDatabase + DeletePipeline IngestDeletePipeline + GeoIPStats IngestGeoIPStats + GetGeoipDatabase IngestGetGeoipDatabase + GetIPLocationDatabase IngestGetIPLocationDatabase + GetPipeline IngestGetPipeline + ProcessorGrok IngestProcessorGrok + PutGeoipDatabase IngestPutGeoipDatabase + PutIPLocationDatabase IngestPutIPLocationDatabase + PutPipeline IngestPutPipeline + Simulate IngestSimulate +} + +// Nodes contains the Nodes APIs +type Nodes struct { + ClearRepositoriesMeteringArchive NodesClearRepositoriesMeteringArchive + GetRepositoriesMeteringInfo NodesGetRepositoriesMeteringInfo + HotThreads NodesHotThreads + Info NodesInfo + ReloadSecureSettings NodesReloadSecureSettings + Stats NodesStats + Usage NodesUsage +} + +// Remote contains the Remote APIs +type Remote struct { +} + +// Snapshot contains the Snapshot APIs +type Snapshot struct { + CleanupRepository SnapshotCleanupRepository + Clone SnapshotClone + CreateRepository SnapshotCreateRepository + Create SnapshotCreate + DeleteRepository SnapshotDeleteRepository + Delete SnapshotDelete + GetRepository SnapshotGetRepository + Get SnapshotGet + RepositoryAnalyze SnapshotRepositoryAnalyze + RepositoryVerifyIntegrity SnapshotRepositoryVerifyIntegrity + Restore SnapshotRestore + Status SnapshotStatus + VerifyRepository SnapshotVerifyRepository +} + +// Tasks contains the Tasks APIs +type Tasks struct { + Cancel TasksCancel + Get TasksGet + List TasksList +} + +// AsyncSearch contains the AsyncSearch APIs +type AsyncSearch struct { + Delete AsyncSearchDelete + Get AsyncSearchGet + Status AsyncSearchStatus + Submit AsyncSearchSubmit +} + +// CCR contains the CCR APIs +type CCR struct { + DeleteAutoFollowPattern CCRDeleteAutoFollowPattern + FollowInfo CCRFollowInfo + Follow CCRFollow + FollowStats CCRFollowStats + ForgetFollower CCRForgetFollower + GetAutoFollowPattern CCRGetAutoFollowPattern + PauseAutoFollowPattern CCRPauseAutoFollowPattern + PauseFollow CCRPauseFollow + PutAutoFollowPattern CCRPutAutoFollowPattern + ResumeAutoFollowPattern CCRResumeAutoFollowPattern + ResumeFollow CCRResumeFollow + Stats CCRStats + Unfollow CCRUnfollow +} + +// ILM contains the ILM APIs +type ILM struct { + DeleteLifecycle ILMDeleteLifecycle + ExplainLifecycle ILMExplainLifecycle + GetLifecycle ILMGetLifecycle + GetStatus ILMGetStatus + MigrateToDataTiers ILMMigrateToDataTiers + MoveToStep ILMMoveToStep + PutLifecycle ILMPutLifecycle + RemovePolicy ILMRemovePolicy + Retry ILMRetry + Start ILMStart + Stop ILMStop +} + +// License contains the License APIs +type License struct { + Delete LicenseDelete + GetBasicStatus LicenseGetBasicStatus + Get LicenseGet + GetTrialStatus LicenseGetTrialStatus + Post LicensePost + PostStartBasic LicensePostStartBasic + PostStartTrial LicensePostStartTrial +} + +// Migration contains the Migration APIs +type Migration struct { + Deprecations MigrationDeprecations + GetFeatureUpgradeStatus MigrationGetFeatureUpgradeStatus + PostFeatureUpgrade MigrationPostFeatureUpgrade +} + +// ML contains the ML APIs +type ML struct { + ClearTrainedModelDeploymentCache MLClearTrainedModelDeploymentCache + CloseJob MLCloseJob + DeleteCalendarEvent MLDeleteCalendarEvent + DeleteCalendarJob MLDeleteCalendarJob + DeleteCalendar MLDeleteCalendar + DeleteDataFrameAnalytics MLDeleteDataFrameAnalytics + DeleteDatafeed MLDeleteDatafeed + DeleteExpiredData MLDeleteExpiredData + DeleteFilter MLDeleteFilter + DeleteForecast MLDeleteForecast + DeleteJob MLDeleteJob + DeleteModelSnapshot MLDeleteModelSnapshot + DeleteTrainedModelAlias MLDeleteTrainedModelAlias + DeleteTrainedModel MLDeleteTrainedModel + EstimateModelMemory MLEstimateModelMemory + EvaluateDataFrame MLEvaluateDataFrame + ExplainDataFrameAnalytics MLExplainDataFrameAnalytics + FlushJob MLFlushJob + Forecast MLForecast + GetBuckets MLGetBuckets + GetCalendarEvents MLGetCalendarEvents + GetCalendars MLGetCalendars + GetCategories MLGetCategories + GetDataFrameAnalytics MLGetDataFrameAnalytics + GetDataFrameAnalyticsStats MLGetDataFrameAnalyticsStats + GetDatafeedStats MLGetDatafeedStats + GetDatafeeds MLGetDatafeeds + GetFilters MLGetFilters + GetInfluencers MLGetInfluencers + GetJobStats MLGetJobStats + GetJobs MLGetJobs + GetMemoryStats MLGetMemoryStats + GetModelSnapshotUpgradeStats MLGetModelSnapshotUpgradeStats + GetModelSnapshots MLGetModelSnapshots + GetOverallBuckets MLGetOverallBuckets + GetRecords MLGetRecords + GetTrainedModels MLGetTrainedModels + GetTrainedModelsStats MLGetTrainedModelsStats + InferTrainedModel MLInferTrainedModel + Info MLInfo + OpenJob MLOpenJob + PostCalendarEvents MLPostCalendarEvents + PostData MLPostData + PreviewDataFrameAnalytics MLPreviewDataFrameAnalytics + PreviewDatafeed MLPreviewDatafeed + PutCalendarJob MLPutCalendarJob + PutCalendar MLPutCalendar + PutDataFrameAnalytics MLPutDataFrameAnalytics + PutDatafeed MLPutDatafeed + PutFilter MLPutFilter + PutJob MLPutJob + PutTrainedModelAlias MLPutTrainedModelAlias + PutTrainedModelDefinitionPart MLPutTrainedModelDefinitionPart + PutTrainedModel MLPutTrainedModel + PutTrainedModelVocabulary MLPutTrainedModelVocabulary + ResetJob MLResetJob + RevertModelSnapshot MLRevertModelSnapshot + SetUpgradeMode MLSetUpgradeMode + StartDataFrameAnalytics MLStartDataFrameAnalytics + StartDatafeed MLStartDatafeed + StartTrainedModelDeployment MLStartTrainedModelDeployment + StopDataFrameAnalytics MLStopDataFrameAnalytics + StopDatafeed MLStopDatafeed + StopTrainedModelDeployment MLStopTrainedModelDeployment + UpdateDataFrameAnalytics MLUpdateDataFrameAnalytics + UpdateDatafeed MLUpdateDatafeed + UpdateFilter MLUpdateFilter + UpdateJob MLUpdateJob + UpdateModelSnapshot MLUpdateModelSnapshot + UpdateTrainedModelDeployment MLUpdateTrainedModelDeployment + UpgradeJobSnapshot MLUpgradeJobSnapshot + ValidateDetector MLValidateDetector + Validate MLValidate +} + +// Monitoring contains the Monitoring APIs +type Monitoring struct { + Bulk MonitoringBulk +} + +// Rollup contains the Rollup APIs +type Rollup struct { + DeleteJob RollupDeleteJob + GetJobs RollupGetJobs + GetCaps RollupGetRollupCaps + GetIndexCaps RollupGetRollupIndexCaps + PutJob RollupPutJob + Search RollupRollupSearch + StartJob RollupStartJob + StopJob RollupStopJob +} + +// Security contains the Security APIs +type Security struct { + ActivateUserProfile SecurityActivateUserProfile + Authenticate SecurityAuthenticate + BulkDeleteRole SecurityBulkDeleteRole + BulkPutRole SecurityBulkPutRole + BulkUpdateAPIKeys SecurityBulkUpdateAPIKeys + ChangePassword SecurityChangePassword + ClearAPIKeyCache SecurityClearAPIKeyCache + ClearCachedPrivileges SecurityClearCachedPrivileges + ClearCachedRealms SecurityClearCachedRealms + ClearCachedRoles SecurityClearCachedRoles + ClearCachedServiceTokens SecurityClearCachedServiceTokens + CreateAPIKey SecurityCreateAPIKey + CreateCrossClusterAPIKey SecurityCreateCrossClusterAPIKey + CreateServiceToken SecurityCreateServiceToken + DelegatePki SecurityDelegatePki + DeletePrivileges SecurityDeletePrivileges + DeleteRoleMapping SecurityDeleteRoleMapping + DeleteRole SecurityDeleteRole + DeleteServiceToken SecurityDeleteServiceToken + DeleteUser SecurityDeleteUser + DisableUserProfile SecurityDisableUserProfile + DisableUser SecurityDisableUser + EnableUserProfile SecurityEnableUserProfile + EnableUser SecurityEnableUser + EnrollKibana SecurityEnrollKibana + EnrollNode SecurityEnrollNode + GetAPIKey SecurityGetAPIKey + GetBuiltinPrivileges SecurityGetBuiltinPrivileges + GetPrivileges SecurityGetPrivileges + GetRoleMapping SecurityGetRoleMapping + GetRole SecurityGetRole + GetServiceAccounts SecurityGetServiceAccounts + GetServiceCredentials SecurityGetServiceCredentials + GetSettings SecurityGetSettings + GetToken SecurityGetToken + GetUserPrivileges SecurityGetUserPrivileges + GetUserProfile SecurityGetUserProfile + GetUser SecurityGetUser + GrantAPIKey SecurityGrantAPIKey + HasPrivileges SecurityHasPrivileges + HasPrivilegesUserProfile SecurityHasPrivilegesUserProfile + InvalidateAPIKey SecurityInvalidateAPIKey + InvalidateToken SecurityInvalidateToken + OidcAuthenticate SecurityOidcAuthenticate + OidcLogout SecurityOidcLogout + OidcPrepareAuthentication SecurityOidcPrepareAuthentication + PutPrivileges SecurityPutPrivileges + PutRoleMapping SecurityPutRoleMapping + PutRole SecurityPutRole + PutUser SecurityPutUser + QueryAPIKeys SecurityQueryAPIKeys + QueryRole SecurityQueryRole + QueryUser SecurityQueryUser + SamlAuthenticate SecuritySamlAuthenticate + SamlCompleteLogout SecuritySamlCompleteLogout + SamlInvalidate SecuritySamlInvalidate + SamlLogout SecuritySamlLogout + SamlPrepareAuthentication SecuritySamlPrepareAuthentication + SamlServiceProviderMetadata SecuritySamlServiceProviderMetadata + SuggestUserProfiles SecuritySuggestUserProfiles + UpdateAPIKey SecurityUpdateAPIKey + UpdateCrossClusterAPIKey SecurityUpdateCrossClusterAPIKey + UpdateSettings SecurityUpdateSettings + UpdateUserProfileData SecurityUpdateUserProfileData +} + +// SQL contains the SQL APIs +type SQL struct { + ClearCursor SQLClearCursor + DeleteAsync SQLDeleteAsync + GetAsync SQLGetAsync + GetAsyncStatus SQLGetAsyncStatus + Query SQLQuery + Translate SQLTranslate +} + +// SSL contains the SSL APIs +type SSL struct { + Certificates SSLCertificates +} + +// Watcher contains the Watcher APIs +type Watcher struct { + AckWatch WatcherAckWatch + ActivateWatch WatcherActivateWatch + DeactivateWatch WatcherDeactivateWatch + DeleteWatch WatcherDeleteWatch + ExecuteWatch WatcherExecuteWatch + GetSettings WatcherGetSettings + GetWatch WatcherGetWatch + PutWatch WatcherPutWatch + QueryWatches WatcherQueryWatches + Start WatcherStart + Stats WatcherStats + Stop WatcherStop + UpdateSettings WatcherUpdateSettings +} + +// XPack contains the XPack APIs +type XPack struct { + Info XPackInfo + Usage XPackUsage +} + +// New creates new API +func New(t Transport) *API { + return &API{ + AutoscalingDeleteAutoscalingPolicy: newAutoscalingDeleteAutoscalingPolicyFunc(t), + AutoscalingGetAutoscalingCapacity: newAutoscalingGetAutoscalingCapacityFunc(t), + AutoscalingGetAutoscalingPolicy: newAutoscalingGetAutoscalingPolicyFunc(t), + AutoscalingPutAutoscalingPolicy: newAutoscalingPutAutoscalingPolicyFunc(t), + Bulk: newBulkFunc(t), + Capabilities: newCapabilitiesFunc(t), + ClearScroll: newClearScrollFunc(t), + ClosePointInTime: newClosePointInTimeFunc(t), + ConnectorCheckIn: newConnectorCheckInFunc(t), + ConnectorDelete: newConnectorDeleteFunc(t), + ConnectorGet: newConnectorGetFunc(t), + ConnectorLastSync: newConnectorLastSyncFunc(t), + ConnectorList: newConnectorListFunc(t), + ConnectorPost: newConnectorPostFunc(t), + ConnectorPut: newConnectorPutFunc(t), + ConnectorSecretDelete: newConnectorSecretDeleteFunc(t), + ConnectorSecretGet: newConnectorSecretGetFunc(t), + ConnectorSecretPost: newConnectorSecretPostFunc(t), + ConnectorSecretPut: newConnectorSecretPutFunc(t), + ConnectorSyncJobCancel: newConnectorSyncJobCancelFunc(t), + ConnectorSyncJobCheckIn: newConnectorSyncJobCheckInFunc(t), + ConnectorSyncJobClaim: newConnectorSyncJobClaimFunc(t), + ConnectorSyncJobDelete: newConnectorSyncJobDeleteFunc(t), + ConnectorSyncJobError: newConnectorSyncJobErrorFunc(t), + ConnectorSyncJobGet: newConnectorSyncJobGetFunc(t), + ConnectorSyncJobList: newConnectorSyncJobListFunc(t), + ConnectorSyncJobPost: newConnectorSyncJobPostFunc(t), + ConnectorSyncJobUpdateStats: newConnectorSyncJobUpdateStatsFunc(t), + ConnectorUpdateAPIKeyDocumentID: newConnectorUpdateAPIKeyDocumentIDFunc(t), + ConnectorUpdateActiveFiltering: newConnectorUpdateActiveFilteringFunc(t), + ConnectorUpdateConfiguration: newConnectorUpdateConfigurationFunc(t), + ConnectorUpdateError: newConnectorUpdateErrorFunc(t), + ConnectorUpdateFeatures: newConnectorUpdateFeaturesFunc(t), + ConnectorUpdateFiltering: newConnectorUpdateFilteringFunc(t), + ConnectorUpdateFilteringValidation: newConnectorUpdateFilteringValidationFunc(t), + ConnectorUpdateIndexName: newConnectorUpdateIndexNameFunc(t), + ConnectorUpdateName: newConnectorUpdateNameFunc(t), + ConnectorUpdateNative: newConnectorUpdateNativeFunc(t), + ConnectorUpdatePipeline: newConnectorUpdatePipelineFunc(t), + ConnectorUpdateScheduling: newConnectorUpdateSchedulingFunc(t), + ConnectorUpdateServiceDocumentType: newConnectorUpdateServiceDocumentTypeFunc(t), + ConnectorUpdateStatus: newConnectorUpdateStatusFunc(t), + Count: newCountFunc(t), + Create: newCreateFunc(t), + DanglingIndicesDeleteDanglingIndex: newDanglingIndicesDeleteDanglingIndexFunc(t), + DanglingIndicesImportDanglingIndex: newDanglingIndicesImportDanglingIndexFunc(t), + DanglingIndicesListDanglingIndices: newDanglingIndicesListDanglingIndicesFunc(t), + DeleteByQuery: newDeleteByQueryFunc(t), + DeleteByQueryRethrottle: newDeleteByQueryRethrottleFunc(t), + Delete: newDeleteFunc(t), + DeleteScript: newDeleteScriptFunc(t), + EnrichDeletePolicy: newEnrichDeletePolicyFunc(t), + EnrichExecutePolicy: newEnrichExecutePolicyFunc(t), + EnrichGetPolicy: newEnrichGetPolicyFunc(t), + EnrichPutPolicy: newEnrichPutPolicyFunc(t), + EnrichStats: newEnrichStatsFunc(t), + EqlDelete: newEqlDeleteFunc(t), + EqlGet: newEqlGetFunc(t), + EqlGetStatus: newEqlGetStatusFunc(t), + EqlSearch: newEqlSearchFunc(t), + EsqlAsyncQueryDelete: newEsqlAsyncQueryDeleteFunc(t), + EsqlAsyncQueryGet: newEsqlAsyncQueryGetFunc(t), + EsqlAsyncQuery: newEsqlAsyncQueryFunc(t), + EsqlAsyncQueryStop: newEsqlAsyncQueryStopFunc(t), + EsqlGetQuery: newEsqlGetQueryFunc(t), + EsqlListQueries: newEsqlListQueriesFunc(t), + EsqlQuery: newEsqlQueryFunc(t), + Exists: newExistsFunc(t), + ExistsSource: newExistsSourceFunc(t), + Explain: newExplainFunc(t), + FeaturesGetFeatures: newFeaturesGetFeaturesFunc(t), + FeaturesResetFeatures: newFeaturesResetFeaturesFunc(t), + FieldCaps: newFieldCapsFunc(t), + FleetDeleteSecret: newFleetDeleteSecretFunc(t), + FleetGetSecret: newFleetGetSecretFunc(t), + FleetGlobalCheckpoints: newFleetGlobalCheckpointsFunc(t), + FleetMsearch: newFleetMsearchFunc(t), + FleetPostSecret: newFleetPostSecretFunc(t), + FleetSearch: newFleetSearchFunc(t), + Get: newGetFunc(t), + GetScriptContext: newGetScriptContextFunc(t), + GetScriptLanguages: newGetScriptLanguagesFunc(t), + GetScript: newGetScriptFunc(t), + GetSource: newGetSourceFunc(t), + GraphExplore: newGraphExploreFunc(t), + HealthReport: newHealthReportFunc(t), + Index: newIndexFunc(t), + InferenceChatCompletionUnified: newInferenceChatCompletionUnifiedFunc(t), + InferenceCompletion: newInferenceCompletionFunc(t), + InferenceDelete: newInferenceDeleteFunc(t), + InferenceGet: newInferenceGetFunc(t), + InferenceInference: newInferenceInferenceFunc(t), + InferencePutAlibabacloud: newInferencePutAlibabacloudFunc(t), + InferencePutAmazonbedrock: newInferencePutAmazonbedrockFunc(t), + InferencePutAmazonsagemaker: newInferencePutAmazonsagemakerFunc(t), + InferencePutAnthropic: newInferencePutAnthropicFunc(t), + InferencePutAzureaistudio: newInferencePutAzureaistudioFunc(t), + InferencePutAzureopenai: newInferencePutAzureopenaiFunc(t), + InferencePutCohere: newInferencePutCohereFunc(t), + InferencePutCustom: newInferencePutCustomFunc(t), + InferencePutDeepseek: newInferencePutDeepseekFunc(t), + InferencePutElasticsearch: newInferencePutElasticsearchFunc(t), + InferencePutElser: newInferencePutElserFunc(t), + InferencePutGoogleaistudio: newInferencePutGoogleaistudioFunc(t), + InferencePutGooglevertexai: newInferencePutGooglevertexaiFunc(t), + InferencePutHuggingFace: newInferencePutHuggingFaceFunc(t), + InferencePutJinaai: newInferencePutJinaaiFunc(t), + InferencePutMistral: newInferencePutMistralFunc(t), + InferencePutOpenai: newInferencePutOpenaiFunc(t), + InferencePut: newInferencePutFunc(t), + InferencePutVoyageai: newInferencePutVoyageaiFunc(t), + InferencePutWatsonx: newInferencePutWatsonxFunc(t), + InferenceRerank: newInferenceRerankFunc(t), + InferenceSparseEmbedding: newInferenceSparseEmbeddingFunc(t), + InferenceStreamCompletion: newInferenceStreamCompletionFunc(t), + InferenceTextEmbedding: newInferenceTextEmbeddingFunc(t), + InferenceUpdate: newInferenceUpdateFunc(t), + Info: newInfoFunc(t), + LogstashDeletePipeline: newLogstashDeletePipelineFunc(t), + LogstashGetPipeline: newLogstashGetPipelineFunc(t), + LogstashPutPipeline: newLogstashPutPipelineFunc(t), + Mget: newMgetFunc(t), + Msearch: newMsearchFunc(t), + MsearchTemplate: newMsearchTemplateFunc(t), + Mtermvectors: newMtermvectorsFunc(t), + OpenPointInTime: newOpenPointInTimeFunc(t), + Ping: newPingFunc(t), + ProfilingFlamegraph: newProfilingFlamegraphFunc(t), + ProfilingStacktraces: newProfilingStacktracesFunc(t), + ProfilingStatus: newProfilingStatusFunc(t), + ProfilingTopnFunctions: newProfilingTopnFunctionsFunc(t), + PutScript: newPutScriptFunc(t), + QueryRulesDeleteRule: newQueryRulesDeleteRuleFunc(t), + QueryRulesDeleteRuleset: newQueryRulesDeleteRulesetFunc(t), + QueryRulesGetRule: newQueryRulesGetRuleFunc(t), + QueryRulesGetRuleset: newQueryRulesGetRulesetFunc(t), + QueryRulesListRulesets: newQueryRulesListRulesetsFunc(t), + QueryRulesPutRule: newQueryRulesPutRuleFunc(t), + QueryRulesPutRuleset: newQueryRulesPutRulesetFunc(t), + QueryRulesTest: newQueryRulesTestFunc(t), + RankEval: newRankEvalFunc(t), + Reindex: newReindexFunc(t), + ReindexRethrottle: newReindexRethrottleFunc(t), + RenderSearchTemplate: newRenderSearchTemplateFunc(t), + ScriptsPainlessExecute: newScriptsPainlessExecuteFunc(t), + Scroll: newScrollFunc(t), + SearchApplicationDeleteBehavioralAnalytics: newSearchApplicationDeleteBehavioralAnalyticsFunc(t), + SearchApplicationDelete: newSearchApplicationDeleteFunc(t), + SearchApplicationGetBehavioralAnalytics: newSearchApplicationGetBehavioralAnalyticsFunc(t), + SearchApplicationGet: newSearchApplicationGetFunc(t), + SearchApplicationList: newSearchApplicationListFunc(t), + SearchApplicationPostBehavioralAnalyticsEvent: newSearchApplicationPostBehavioralAnalyticsEventFunc(t), + SearchApplicationPutBehavioralAnalytics: newSearchApplicationPutBehavioralAnalyticsFunc(t), + SearchApplicationPut: newSearchApplicationPutFunc(t), + SearchApplicationRenderQuery: newSearchApplicationRenderQueryFunc(t), + SearchApplicationSearch: newSearchApplicationSearchFunc(t), + SearchMvt: newSearchMvtFunc(t), + Search: newSearchFunc(t), + SearchShards: newSearchShardsFunc(t), + SearchTemplate: newSearchTemplateFunc(t), + SearchableSnapshotsCacheStats: newSearchableSnapshotsCacheStatsFunc(t), + SearchableSnapshotsClearCache: newSearchableSnapshotsClearCacheFunc(t), + SearchableSnapshotsMount: newSearchableSnapshotsMountFunc(t), + SearchableSnapshotsStats: newSearchableSnapshotsStatsFunc(t), + ShutdownDeleteNode: newShutdownDeleteNodeFunc(t), + ShutdownGetNode: newShutdownGetNodeFunc(t), + ShutdownPutNode: newShutdownPutNodeFunc(t), + SimulateIngest: newSimulateIngestFunc(t), + SlmDeleteLifecycle: newSlmDeleteLifecycleFunc(t), + SlmExecuteLifecycle: newSlmExecuteLifecycleFunc(t), + SlmExecuteRetention: newSlmExecuteRetentionFunc(t), + SlmGetLifecycle: newSlmGetLifecycleFunc(t), + SlmGetStats: newSlmGetStatsFunc(t), + SlmGetStatus: newSlmGetStatusFunc(t), + SlmPutLifecycle: newSlmPutLifecycleFunc(t), + SlmStart: newSlmStartFunc(t), + SlmStop: newSlmStopFunc(t), + StreamsLogsDisable: newStreamsLogsDisableFunc(t), + StreamsLogsEnable: newStreamsLogsEnableFunc(t), + StreamsStatus: newStreamsStatusFunc(t), + SynonymsDeleteSynonym: newSynonymsDeleteSynonymFunc(t), + SynonymsDeleteSynonymRule: newSynonymsDeleteSynonymRuleFunc(t), + SynonymsGetSynonym: newSynonymsGetSynonymFunc(t), + SynonymsGetSynonymRule: newSynonymsGetSynonymRuleFunc(t), + SynonymsGetSynonymsSets: newSynonymsGetSynonymsSetsFunc(t), + SynonymsPutSynonym: newSynonymsPutSynonymFunc(t), + SynonymsPutSynonymRule: newSynonymsPutSynonymRuleFunc(t), + TermsEnum: newTermsEnumFunc(t), + Termvectors: newTermvectorsFunc(t), + TextStructureFindFieldStructure: newTextStructureFindFieldStructureFunc(t), + TextStructureFindMessageStructure: newTextStructureFindMessageStructureFunc(t), + TextStructureFindStructure: newTextStructureFindStructureFunc(t), + TextStructureTestGrokPattern: newTextStructureTestGrokPatternFunc(t), + TransformDeleteTransform: newTransformDeleteTransformFunc(t), + TransformGetNodeStats: newTransformGetNodeStatsFunc(t), + TransformGetTransform: newTransformGetTransformFunc(t), + TransformGetTransformStats: newTransformGetTransformStatsFunc(t), + TransformPreviewTransform: newTransformPreviewTransformFunc(t), + TransformPutTransform: newTransformPutTransformFunc(t), + TransformResetTransform: newTransformResetTransformFunc(t), + TransformScheduleNowTransform: newTransformScheduleNowTransformFunc(t), + TransformStartTransform: newTransformStartTransformFunc(t), + TransformStopTransform: newTransformStopTransformFunc(t), + TransformUpdateTransform: newTransformUpdateTransformFunc(t), + TransformUpgradeTransforms: newTransformUpgradeTransformsFunc(t), + UpdateByQuery: newUpdateByQueryFunc(t), + UpdateByQueryRethrottle: newUpdateByQueryRethrottleFunc(t), + Update: newUpdateFunc(t), + Cat: &Cat{ + Aliases: newCatAliasesFunc(t), + Allocation: newCatAllocationFunc(t), + ComponentTemplates: newCatComponentTemplatesFunc(t), + Count: newCatCountFunc(t), + Fielddata: newCatFielddataFunc(t), + Health: newCatHealthFunc(t), + Help: newCatHelpFunc(t), + Indices: newCatIndicesFunc(t), + MLDataFrameAnalytics: newCatMLDataFrameAnalyticsFunc(t), + MLDatafeeds: newCatMLDatafeedsFunc(t), + MLJobs: newCatMLJobsFunc(t), + MLTrainedModels: newCatMLTrainedModelsFunc(t), + Master: newCatMasterFunc(t), + Nodeattrs: newCatNodeattrsFunc(t), + Nodes: newCatNodesFunc(t), + PendingTasks: newCatPendingTasksFunc(t), + Plugins: newCatPluginsFunc(t), + Recovery: newCatRecoveryFunc(t), + Repositories: newCatRepositoriesFunc(t), + Segments: newCatSegmentsFunc(t), + Shards: newCatShardsFunc(t), + Snapshots: newCatSnapshotsFunc(t), + Tasks: newCatTasksFunc(t), + Templates: newCatTemplatesFunc(t), + ThreadPool: newCatThreadPoolFunc(t), + Transforms: newCatTransformsFunc(t), + }, + Cluster: &Cluster{ + AllocationExplain: newClusterAllocationExplainFunc(t), + DeleteComponentTemplate: newClusterDeleteComponentTemplateFunc(t), + DeleteVotingConfigExclusions: newClusterDeleteVotingConfigExclusionsFunc(t), + ExistsComponentTemplate: newClusterExistsComponentTemplateFunc(t), + GetComponentTemplate: newClusterGetComponentTemplateFunc(t), + GetSettings: newClusterGetSettingsFunc(t), + Health: newClusterHealthFunc(t), + Info: newClusterInfoFunc(t), + PendingTasks: newClusterPendingTasksFunc(t), + PostVotingConfigExclusions: newClusterPostVotingConfigExclusionsFunc(t), + PutComponentTemplate: newClusterPutComponentTemplateFunc(t), + PutSettings: newClusterPutSettingsFunc(t), + RemoteInfo: newClusterRemoteInfoFunc(t), + Reroute: newClusterRerouteFunc(t), + State: newClusterStateFunc(t), + Stats: newClusterStatsFunc(t), + }, + Indices: &Indices{ + AddBlock: newIndicesAddBlockFunc(t), + Analyze: newIndicesAnalyzeFunc(t), + CancelMigrateReindex: newIndicesCancelMigrateReindexFunc(t), + ClearCache: newIndicesClearCacheFunc(t), + Clone: newIndicesCloneFunc(t), + Close: newIndicesCloseFunc(t), + CreateDataStream: newIndicesCreateDataStreamFunc(t), + CreateFrom: newIndicesCreateFromFunc(t), + Create: newIndicesCreateFunc(t), + DataStreamsStats: newIndicesDataStreamsStatsFunc(t), + DeleteAlias: newIndicesDeleteAliasFunc(t), + DeleteDataLifecycle: newIndicesDeleteDataLifecycleFunc(t), + DeleteDataStreamOptions: newIndicesDeleteDataStreamOptionsFunc(t), + DeleteDataStream: newIndicesDeleteDataStreamFunc(t), + DeleteIndexTemplate: newIndicesDeleteIndexTemplateFunc(t), + Delete: newIndicesDeleteFunc(t), + DeleteTemplate: newIndicesDeleteTemplateFunc(t), + DiskUsage: newIndicesDiskUsageFunc(t), + Downsample: newIndicesDownsampleFunc(t), + ExistsAlias: newIndicesExistsAliasFunc(t), + ExistsIndexTemplate: newIndicesExistsIndexTemplateFunc(t), + Exists: newIndicesExistsFunc(t), + ExistsTemplate: newIndicesExistsTemplateFunc(t), + ExplainDataLifecycle: newIndicesExplainDataLifecycleFunc(t), + FieldUsageStats: newIndicesFieldUsageStatsFunc(t), + Flush: newIndicesFlushFunc(t), + Forcemerge: newIndicesForcemergeFunc(t), + GetAlias: newIndicesGetAliasFunc(t), + GetDataLifecycle: newIndicesGetDataLifecycleFunc(t), + GetDataLifecycleStats: newIndicesGetDataLifecycleStatsFunc(t), + GetDataStreamOptions: newIndicesGetDataStreamOptionsFunc(t), + GetDataStream: newIndicesGetDataStreamFunc(t), + GetDataStreamSettings: newIndicesGetDataStreamSettingsFunc(t), + GetFieldMapping: newIndicesGetFieldMappingFunc(t), + GetIndexTemplate: newIndicesGetIndexTemplateFunc(t), + GetMapping: newIndicesGetMappingFunc(t), + GetMigrateReindexStatus: newIndicesGetMigrateReindexStatusFunc(t), + Get: newIndicesGetFunc(t), + GetSettings: newIndicesGetSettingsFunc(t), + GetTemplate: newIndicesGetTemplateFunc(t), + MigrateReindex: newIndicesMigrateReindexFunc(t), + MigrateToDataStream: newIndicesMigrateToDataStreamFunc(t), + ModifyDataStream: newIndicesModifyDataStreamFunc(t), + Open: newIndicesOpenFunc(t), + PromoteDataStream: newIndicesPromoteDataStreamFunc(t), + PutAlias: newIndicesPutAliasFunc(t), + PutDataLifecycle: newIndicesPutDataLifecycleFunc(t), + PutDataStreamOptions: newIndicesPutDataStreamOptionsFunc(t), + PutDataStreamSettings: newIndicesPutDataStreamSettingsFunc(t), + PutIndexTemplate: newIndicesPutIndexTemplateFunc(t), + PutMapping: newIndicesPutMappingFunc(t), + PutSettings: newIndicesPutSettingsFunc(t), + PutTemplate: newIndicesPutTemplateFunc(t), + Recovery: newIndicesRecoveryFunc(t), + Refresh: newIndicesRefreshFunc(t), + ReloadSearchAnalyzers: newIndicesReloadSearchAnalyzersFunc(t), + RemoveBlock: newIndicesRemoveBlockFunc(t), + ResolveCluster: newIndicesResolveClusterFunc(t), + ResolveIndex: newIndicesResolveIndexFunc(t), + Rollover: newIndicesRolloverFunc(t), + Segments: newIndicesSegmentsFunc(t), + ShardStores: newIndicesShardStoresFunc(t), + Shrink: newIndicesShrinkFunc(t), + SimulateIndexTemplate: newIndicesSimulateIndexTemplateFunc(t), + SimulateTemplate: newIndicesSimulateTemplateFunc(t), + Split: newIndicesSplitFunc(t), + Stats: newIndicesStatsFunc(t), + UpdateAliases: newIndicesUpdateAliasesFunc(t), + ValidateQuery: newIndicesValidateQueryFunc(t), + }, + Ingest: &Ingest{ + DeleteGeoipDatabase: newIngestDeleteGeoipDatabaseFunc(t), + DeleteIPLocationDatabase: newIngestDeleteIPLocationDatabaseFunc(t), + DeletePipeline: newIngestDeletePipelineFunc(t), + GeoIPStats: newIngestGeoIPStatsFunc(t), + GetGeoipDatabase: newIngestGetGeoipDatabaseFunc(t), + GetIPLocationDatabase: newIngestGetIPLocationDatabaseFunc(t), + GetPipeline: newIngestGetPipelineFunc(t), + ProcessorGrok: newIngestProcessorGrokFunc(t), + PutGeoipDatabase: newIngestPutGeoipDatabaseFunc(t), + PutIPLocationDatabase: newIngestPutIPLocationDatabaseFunc(t), + PutPipeline: newIngestPutPipelineFunc(t), + Simulate: newIngestSimulateFunc(t), + }, + Nodes: &Nodes{ + ClearRepositoriesMeteringArchive: newNodesClearRepositoriesMeteringArchiveFunc(t), + GetRepositoriesMeteringInfo: newNodesGetRepositoriesMeteringInfoFunc(t), + HotThreads: newNodesHotThreadsFunc(t), + Info: newNodesInfoFunc(t), + ReloadSecureSettings: newNodesReloadSecureSettingsFunc(t), + Stats: newNodesStatsFunc(t), + Usage: newNodesUsageFunc(t), + }, + Remote: &Remote{}, + Snapshot: &Snapshot{ + CleanupRepository: newSnapshotCleanupRepositoryFunc(t), + Clone: newSnapshotCloneFunc(t), + CreateRepository: newSnapshotCreateRepositoryFunc(t), + Create: newSnapshotCreateFunc(t), + DeleteRepository: newSnapshotDeleteRepositoryFunc(t), + Delete: newSnapshotDeleteFunc(t), + GetRepository: newSnapshotGetRepositoryFunc(t), + Get: newSnapshotGetFunc(t), + RepositoryAnalyze: newSnapshotRepositoryAnalyzeFunc(t), + RepositoryVerifyIntegrity: newSnapshotRepositoryVerifyIntegrityFunc(t), + Restore: newSnapshotRestoreFunc(t), + Status: newSnapshotStatusFunc(t), + VerifyRepository: newSnapshotVerifyRepositoryFunc(t), + }, + Tasks: &Tasks{ + Cancel: newTasksCancelFunc(t), + Get: newTasksGetFunc(t), + List: newTasksListFunc(t), + }, + AsyncSearch: &AsyncSearch{ + Delete: newAsyncSearchDeleteFunc(t), + Get: newAsyncSearchGetFunc(t), + Status: newAsyncSearchStatusFunc(t), + Submit: newAsyncSearchSubmitFunc(t), + }, + CCR: &CCR{ + DeleteAutoFollowPattern: newCCRDeleteAutoFollowPatternFunc(t), + FollowInfo: newCCRFollowInfoFunc(t), + Follow: newCCRFollowFunc(t), + FollowStats: newCCRFollowStatsFunc(t), + ForgetFollower: newCCRForgetFollowerFunc(t), + GetAutoFollowPattern: newCCRGetAutoFollowPatternFunc(t), + PauseAutoFollowPattern: newCCRPauseAutoFollowPatternFunc(t), + PauseFollow: newCCRPauseFollowFunc(t), + PutAutoFollowPattern: newCCRPutAutoFollowPatternFunc(t), + ResumeAutoFollowPattern: newCCRResumeAutoFollowPatternFunc(t), + ResumeFollow: newCCRResumeFollowFunc(t), + Stats: newCCRStatsFunc(t), + Unfollow: newCCRUnfollowFunc(t), + }, + ILM: &ILM{ + DeleteLifecycle: newILMDeleteLifecycleFunc(t), + ExplainLifecycle: newILMExplainLifecycleFunc(t), + GetLifecycle: newILMGetLifecycleFunc(t), + GetStatus: newILMGetStatusFunc(t), + MigrateToDataTiers: newILMMigrateToDataTiersFunc(t), + MoveToStep: newILMMoveToStepFunc(t), + PutLifecycle: newILMPutLifecycleFunc(t), + RemovePolicy: newILMRemovePolicyFunc(t), + Retry: newILMRetryFunc(t), + Start: newILMStartFunc(t), + Stop: newILMStopFunc(t), + }, + License: &License{ + Delete: newLicenseDeleteFunc(t), + GetBasicStatus: newLicenseGetBasicStatusFunc(t), + Get: newLicenseGetFunc(t), + GetTrialStatus: newLicenseGetTrialStatusFunc(t), + Post: newLicensePostFunc(t), + PostStartBasic: newLicensePostStartBasicFunc(t), + PostStartTrial: newLicensePostStartTrialFunc(t), + }, + Migration: &Migration{ + Deprecations: newMigrationDeprecationsFunc(t), + GetFeatureUpgradeStatus: newMigrationGetFeatureUpgradeStatusFunc(t), + PostFeatureUpgrade: newMigrationPostFeatureUpgradeFunc(t), + }, + ML: &ML{ + ClearTrainedModelDeploymentCache: newMLClearTrainedModelDeploymentCacheFunc(t), + CloseJob: newMLCloseJobFunc(t), + DeleteCalendarEvent: newMLDeleteCalendarEventFunc(t), + DeleteCalendarJob: newMLDeleteCalendarJobFunc(t), + DeleteCalendar: newMLDeleteCalendarFunc(t), + DeleteDataFrameAnalytics: newMLDeleteDataFrameAnalyticsFunc(t), + DeleteDatafeed: newMLDeleteDatafeedFunc(t), + DeleteExpiredData: newMLDeleteExpiredDataFunc(t), + DeleteFilter: newMLDeleteFilterFunc(t), + DeleteForecast: newMLDeleteForecastFunc(t), + DeleteJob: newMLDeleteJobFunc(t), + DeleteModelSnapshot: newMLDeleteModelSnapshotFunc(t), + DeleteTrainedModelAlias: newMLDeleteTrainedModelAliasFunc(t), + DeleteTrainedModel: newMLDeleteTrainedModelFunc(t), + EstimateModelMemory: newMLEstimateModelMemoryFunc(t), + EvaluateDataFrame: newMLEvaluateDataFrameFunc(t), + ExplainDataFrameAnalytics: newMLExplainDataFrameAnalyticsFunc(t), + FlushJob: newMLFlushJobFunc(t), + Forecast: newMLForecastFunc(t), + GetBuckets: newMLGetBucketsFunc(t), + GetCalendarEvents: newMLGetCalendarEventsFunc(t), + GetCalendars: newMLGetCalendarsFunc(t), + GetCategories: newMLGetCategoriesFunc(t), + GetDataFrameAnalytics: newMLGetDataFrameAnalyticsFunc(t), + GetDataFrameAnalyticsStats: newMLGetDataFrameAnalyticsStatsFunc(t), + GetDatafeedStats: newMLGetDatafeedStatsFunc(t), + GetDatafeeds: newMLGetDatafeedsFunc(t), + GetFilters: newMLGetFiltersFunc(t), + GetInfluencers: newMLGetInfluencersFunc(t), + GetJobStats: newMLGetJobStatsFunc(t), + GetJobs: newMLGetJobsFunc(t), + GetMemoryStats: newMLGetMemoryStatsFunc(t), + GetModelSnapshotUpgradeStats: newMLGetModelSnapshotUpgradeStatsFunc(t), + GetModelSnapshots: newMLGetModelSnapshotsFunc(t), + GetOverallBuckets: newMLGetOverallBucketsFunc(t), + GetRecords: newMLGetRecordsFunc(t), + GetTrainedModels: newMLGetTrainedModelsFunc(t), + GetTrainedModelsStats: newMLGetTrainedModelsStatsFunc(t), + InferTrainedModel: newMLInferTrainedModelFunc(t), + Info: newMLInfoFunc(t), + OpenJob: newMLOpenJobFunc(t), + PostCalendarEvents: newMLPostCalendarEventsFunc(t), + PostData: newMLPostDataFunc(t), + PreviewDataFrameAnalytics: newMLPreviewDataFrameAnalyticsFunc(t), + PreviewDatafeed: newMLPreviewDatafeedFunc(t), + PutCalendarJob: newMLPutCalendarJobFunc(t), + PutCalendar: newMLPutCalendarFunc(t), + PutDataFrameAnalytics: newMLPutDataFrameAnalyticsFunc(t), + PutDatafeed: newMLPutDatafeedFunc(t), + PutFilter: newMLPutFilterFunc(t), + PutJob: newMLPutJobFunc(t), + PutTrainedModelAlias: newMLPutTrainedModelAliasFunc(t), + PutTrainedModelDefinitionPart: newMLPutTrainedModelDefinitionPartFunc(t), + PutTrainedModel: newMLPutTrainedModelFunc(t), + PutTrainedModelVocabulary: newMLPutTrainedModelVocabularyFunc(t), + ResetJob: newMLResetJobFunc(t), + RevertModelSnapshot: newMLRevertModelSnapshotFunc(t), + SetUpgradeMode: newMLSetUpgradeModeFunc(t), + StartDataFrameAnalytics: newMLStartDataFrameAnalyticsFunc(t), + StartDatafeed: newMLStartDatafeedFunc(t), + StartTrainedModelDeployment: newMLStartTrainedModelDeploymentFunc(t), + StopDataFrameAnalytics: newMLStopDataFrameAnalyticsFunc(t), + StopDatafeed: newMLStopDatafeedFunc(t), + StopTrainedModelDeployment: newMLStopTrainedModelDeploymentFunc(t), + UpdateDataFrameAnalytics: newMLUpdateDataFrameAnalyticsFunc(t), + UpdateDatafeed: newMLUpdateDatafeedFunc(t), + UpdateFilter: newMLUpdateFilterFunc(t), + UpdateJob: newMLUpdateJobFunc(t), + UpdateModelSnapshot: newMLUpdateModelSnapshotFunc(t), + UpdateTrainedModelDeployment: newMLUpdateTrainedModelDeploymentFunc(t), + UpgradeJobSnapshot: newMLUpgradeJobSnapshotFunc(t), + ValidateDetector: newMLValidateDetectorFunc(t), + Validate: newMLValidateFunc(t), + }, + Monitoring: &Monitoring{ + Bulk: newMonitoringBulkFunc(t), + }, + Rollup: &Rollup{ + DeleteJob: newRollupDeleteJobFunc(t), + GetJobs: newRollupGetJobsFunc(t), + GetCaps: newRollupGetRollupCapsFunc(t), + GetIndexCaps: newRollupGetRollupIndexCapsFunc(t), + PutJob: newRollupPutJobFunc(t), + Search: newRollupRollupSearchFunc(t), + StartJob: newRollupStartJobFunc(t), + StopJob: newRollupStopJobFunc(t), + }, + Security: &Security{ + ActivateUserProfile: newSecurityActivateUserProfileFunc(t), + Authenticate: newSecurityAuthenticateFunc(t), + BulkDeleteRole: newSecurityBulkDeleteRoleFunc(t), + BulkPutRole: newSecurityBulkPutRoleFunc(t), + BulkUpdateAPIKeys: newSecurityBulkUpdateAPIKeysFunc(t), + ChangePassword: newSecurityChangePasswordFunc(t), + ClearAPIKeyCache: newSecurityClearAPIKeyCacheFunc(t), + ClearCachedPrivileges: newSecurityClearCachedPrivilegesFunc(t), + ClearCachedRealms: newSecurityClearCachedRealmsFunc(t), + ClearCachedRoles: newSecurityClearCachedRolesFunc(t), + ClearCachedServiceTokens: newSecurityClearCachedServiceTokensFunc(t), + CreateAPIKey: newSecurityCreateAPIKeyFunc(t), + CreateCrossClusterAPIKey: newSecurityCreateCrossClusterAPIKeyFunc(t), + CreateServiceToken: newSecurityCreateServiceTokenFunc(t), + DelegatePki: newSecurityDelegatePkiFunc(t), + DeletePrivileges: newSecurityDeletePrivilegesFunc(t), + DeleteRoleMapping: newSecurityDeleteRoleMappingFunc(t), + DeleteRole: newSecurityDeleteRoleFunc(t), + DeleteServiceToken: newSecurityDeleteServiceTokenFunc(t), + DeleteUser: newSecurityDeleteUserFunc(t), + DisableUserProfile: newSecurityDisableUserProfileFunc(t), + DisableUser: newSecurityDisableUserFunc(t), + EnableUserProfile: newSecurityEnableUserProfileFunc(t), + EnableUser: newSecurityEnableUserFunc(t), + EnrollKibana: newSecurityEnrollKibanaFunc(t), + EnrollNode: newSecurityEnrollNodeFunc(t), + GetAPIKey: newSecurityGetAPIKeyFunc(t), + GetBuiltinPrivileges: newSecurityGetBuiltinPrivilegesFunc(t), + GetPrivileges: newSecurityGetPrivilegesFunc(t), + GetRoleMapping: newSecurityGetRoleMappingFunc(t), + GetRole: newSecurityGetRoleFunc(t), + GetServiceAccounts: newSecurityGetServiceAccountsFunc(t), + GetServiceCredentials: newSecurityGetServiceCredentialsFunc(t), + GetSettings: newSecurityGetSettingsFunc(t), + GetToken: newSecurityGetTokenFunc(t), + GetUserPrivileges: newSecurityGetUserPrivilegesFunc(t), + GetUserProfile: newSecurityGetUserProfileFunc(t), + GetUser: newSecurityGetUserFunc(t), + GrantAPIKey: newSecurityGrantAPIKeyFunc(t), + HasPrivileges: newSecurityHasPrivilegesFunc(t), + HasPrivilegesUserProfile: newSecurityHasPrivilegesUserProfileFunc(t), + InvalidateAPIKey: newSecurityInvalidateAPIKeyFunc(t), + InvalidateToken: newSecurityInvalidateTokenFunc(t), + OidcAuthenticate: newSecurityOidcAuthenticateFunc(t), + OidcLogout: newSecurityOidcLogoutFunc(t), + OidcPrepareAuthentication: newSecurityOidcPrepareAuthenticationFunc(t), + PutPrivileges: newSecurityPutPrivilegesFunc(t), + PutRoleMapping: newSecurityPutRoleMappingFunc(t), + PutRole: newSecurityPutRoleFunc(t), + PutUser: newSecurityPutUserFunc(t), + QueryAPIKeys: newSecurityQueryAPIKeysFunc(t), + QueryRole: newSecurityQueryRoleFunc(t), + QueryUser: newSecurityQueryUserFunc(t), + SamlAuthenticate: newSecuritySamlAuthenticateFunc(t), + SamlCompleteLogout: newSecuritySamlCompleteLogoutFunc(t), + SamlInvalidate: newSecuritySamlInvalidateFunc(t), + SamlLogout: newSecuritySamlLogoutFunc(t), + SamlPrepareAuthentication: newSecuritySamlPrepareAuthenticationFunc(t), + SamlServiceProviderMetadata: newSecuritySamlServiceProviderMetadataFunc(t), + SuggestUserProfiles: newSecuritySuggestUserProfilesFunc(t), + UpdateAPIKey: newSecurityUpdateAPIKeyFunc(t), + UpdateCrossClusterAPIKey: newSecurityUpdateCrossClusterAPIKeyFunc(t), + UpdateSettings: newSecurityUpdateSettingsFunc(t), + UpdateUserProfileData: newSecurityUpdateUserProfileDataFunc(t), + }, + SQL: &SQL{ + ClearCursor: newSQLClearCursorFunc(t), + DeleteAsync: newSQLDeleteAsyncFunc(t), + GetAsync: newSQLGetAsyncFunc(t), + GetAsyncStatus: newSQLGetAsyncStatusFunc(t), + Query: newSQLQueryFunc(t), + Translate: newSQLTranslateFunc(t), + }, + SSL: &SSL{ + Certificates: newSSLCertificatesFunc(t), + }, + Watcher: &Watcher{ + AckWatch: newWatcherAckWatchFunc(t), + ActivateWatch: newWatcherActivateWatchFunc(t), + DeactivateWatch: newWatcherDeactivateWatchFunc(t), + DeleteWatch: newWatcherDeleteWatchFunc(t), + ExecuteWatch: newWatcherExecuteWatchFunc(t), + GetSettings: newWatcherGetSettingsFunc(t), + GetWatch: newWatcherGetWatchFunc(t), + PutWatch: newWatcherPutWatchFunc(t), + QueryWatches: newWatcherQueryWatchesFunc(t), + Start: newWatcherStartFunc(t), + Stats: newWatcherStatsFunc(t), + Stop: newWatcherStopFunc(t), + UpdateSettings: newWatcherUpdateSettingsFunc(t), + }, + XPack: &XPack{ + Info: newXPackInfoFunc(t), + Usage: newXPackUsageFunc(t), + }, + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.bulk.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.bulk.go new file mode 100644 index 000000000..12246ca0a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.bulk.go @@ -0,0 +1,389 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newBulkFunc(t Transport) Bulk { + return func(body io.Reader, o ...func(*BulkRequest)) (*Response, error) { + var r = BulkRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Bulk allows to perform multiple index/update/delete operations in a single request. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-bulk.html. +type Bulk func(body io.Reader, o ...func(*BulkRequest)) (*Response, error) + +// BulkRequest configures the Bulk API request. +type BulkRequest struct { + Index string + + Body io.Reader + + IncludeSourceOnError *bool + ListExecutedPipelines *bool + Pipeline string + Refresh string + RequireAlias *bool + RequireDataStream *bool + Routing string + Source []string + SourceExcludes []string + SourceIncludes []string + Timeout time.Duration + WaitForActiveShards string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r BulkRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "bulk") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_bulk")) + path.WriteString("http://") + if r.Index != "" { + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + } + path.WriteString("/") + path.WriteString("_bulk") + + params = make(map[string]string) + + if r.IncludeSourceOnError != nil { + params["include_source_on_error"] = strconv.FormatBool(*r.IncludeSourceOnError) + } + + if r.ListExecutedPipelines != nil { + params["list_executed_pipelines"] = strconv.FormatBool(*r.ListExecutedPipelines) + } + + if r.Pipeline != "" { + params["pipeline"] = r.Pipeline + } + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.RequireAlias != nil { + params["require_alias"] = strconv.FormatBool(*r.RequireAlias) + } + + if r.RequireDataStream != nil { + params["require_data_stream"] = strconv.FormatBool(*r.RequireDataStream) + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if len(r.Source) > 0 { + params["_source"] = strings.Join(r.Source, ",") + } + + if len(r.SourceExcludes) > 0 { + params["_source_excludes"] = strings.Join(r.SourceExcludes, ",") + } + + if len(r.SourceIncludes) > 0 { + params["_source_includes"] = strings.Join(r.SourceIncludes, ",") + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "bulk") + if reader := instrument.RecordRequestBody(ctx, "bulk", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "bulk") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Bulk) WithContext(v context.Context) func(*BulkRequest) { + return func(r *BulkRequest) { + r.ctx = v + } +} + +// WithIndex - default index for items which don't provide one. +func (f Bulk) WithIndex(v string) func(*BulkRequest) { + return func(r *BulkRequest) { + r.Index = v + } +} + +// WithIncludeSourceOnError - true or false if to include the document source in the error message in case of parsing errors. defaults to true.. +func (f Bulk) WithIncludeSourceOnError(v bool) func(*BulkRequest) { + return func(r *BulkRequest) { + r.IncludeSourceOnError = &v + } +} + +// WithListExecutedPipelines - sets list_executed_pipelines for all incoming documents. defaults to unset (false). +func (f Bulk) WithListExecutedPipelines(v bool) func(*BulkRequest) { + return func(r *BulkRequest) { + r.ListExecutedPipelines = &v + } +} + +// WithPipeline - the pipeline ID to preprocess incoming documents with. +func (f Bulk) WithPipeline(v string) func(*BulkRequest) { + return func(r *BulkRequest) { + r.Pipeline = v + } +} + +// WithRefresh - if `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes.. +func (f Bulk) WithRefresh(v string) func(*BulkRequest) { + return func(r *BulkRequest) { + r.Refresh = v + } +} + +// WithRequireAlias - if true, the request’s actions must target an index alias. defaults to false.. +func (f Bulk) WithRequireAlias(v bool) func(*BulkRequest) { + return func(r *BulkRequest) { + r.RequireAlias = &v + } +} + +// WithRequireDataStream - if true, the request's actions must target a data stream (existing or to-be-created). default to false. +func (f Bulk) WithRequireDataStream(v bool) func(*BulkRequest) { + return func(r *BulkRequest) { + r.RequireDataStream = &v + } +} + +// WithRouting - specific routing value. +func (f Bulk) WithRouting(v string) func(*BulkRequest) { + return func(r *BulkRequest) { + r.Routing = v + } +} + +// WithSource - true or false to return the _source field or not, or default list of fields to return, can be overridden on each sub-request. +func (f Bulk) WithSource(v ...string) func(*BulkRequest) { + return func(r *BulkRequest) { + r.Source = v + } +} + +// WithSourceExcludes - default list of fields to exclude from the returned _source field, can be overridden on each sub-request. +func (f Bulk) WithSourceExcludes(v ...string) func(*BulkRequest) { + return func(r *BulkRequest) { + r.SourceExcludes = v + } +} + +// WithSourceIncludes - default list of fields to extract and return from the _source field, can be overridden on each sub-request. +func (f Bulk) WithSourceIncludes(v ...string) func(*BulkRequest) { + return func(r *BulkRequest) { + r.SourceIncludes = v + } +} + +// WithTimeout - explicit operation timeout. +func (f Bulk) WithTimeout(v time.Duration) func(*BulkRequest) { + return func(r *BulkRequest) { + r.Timeout = v + } +} + +// WithWaitForActiveShards - sets the number of shard copies that must be active before proceeding with the bulk operation. defaults to 1, meaning the primary shard only. set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1). +func (f Bulk) WithWaitForActiveShards(v string) func(*BulkRequest) { + return func(r *BulkRequest) { + r.WaitForActiveShards = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Bulk) WithPretty() func(*BulkRequest) { + return func(r *BulkRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Bulk) WithHuman() func(*BulkRequest) { + return func(r *BulkRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Bulk) WithErrorTrace() func(*BulkRequest) { + return func(r *BulkRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Bulk) WithFilterPath(v ...string) func(*BulkRequest) { + return func(r *BulkRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Bulk) WithHeader(h map[string]string) func(*BulkRequest) { + return func(r *BulkRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Bulk) WithOpaqueID(s string) func(*BulkRequest) { + return func(r *BulkRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.capabilities.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.capabilities.go new file mode 100644 index 000000000..5a6a5cd82 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.capabilities.go @@ -0,0 +1,279 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newCapabilitiesFunc(t Transport) Capabilities { + return func(o ...func(*CapabilitiesRequest)) (*Response, error) { + var r = CapabilitiesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Capabilities checks if the specified combination of method, API, parameters, and arbitrary capabilities are supported +// +// This API is experimental. +// +// See full documentation at https://github.com/elastic/elasticsearch/blob/main/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc#require-or-skip-api-capabilities. +type Capabilities func(o ...func(*CapabilitiesRequest)) (*Response, error) + +// CapabilitiesRequest configures the Capabilities API request. +type CapabilitiesRequest struct { + Capabilities string + LocalOnly *bool + Method string + Parameters string + Path string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CapabilitiesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "capabilities") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_capabilities")) + path.WriteString("http://") + path.WriteString("/_capabilities") + + params = make(map[string]string) + + if r.Capabilities != "" { + params["capabilities"] = r.Capabilities + } + + if r.LocalOnly != nil { + params["local_only"] = strconv.FormatBool(*r.LocalOnly) + } + + if r.Method != "" { + params["method"] = r.Method + } + + if r.Parameters != "" { + params["parameters"] = r.Parameters + } + + if r.Path != "" { + params["path"] = r.Path + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "capabilities") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "capabilities") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Capabilities) WithContext(v context.Context) func(*CapabilitiesRequest) { + return func(r *CapabilitiesRequest) { + r.ctx = v + } +} + +// WithCapabilities - comma-separated list of arbitrary api capabilities to check. +func (f Capabilities) WithCapabilities(v string) func(*CapabilitiesRequest) { + return func(r *CapabilitiesRequest) { + r.Capabilities = v + } +} + +// WithLocalOnly - true if only the node being called should be considered. +func (f Capabilities) WithLocalOnly(v bool) func(*CapabilitiesRequest) { + return func(r *CapabilitiesRequest) { + r.LocalOnly = &v + } +} + +// WithMethod - rest method to check. +func (f Capabilities) WithMethod(v string) func(*CapabilitiesRequest) { + return func(r *CapabilitiesRequest) { + r.Method = v + } +} + +// WithParameters - comma-separated list of api parameters to check. +func (f Capabilities) WithParameters(v string) func(*CapabilitiesRequest) { + return func(r *CapabilitiesRequest) { + r.Parameters = v + } +} + +// WithPath - api path to check. +func (f Capabilities) WithPath(v string) func(*CapabilitiesRequest) { + return func(r *CapabilitiesRequest) { + r.Path = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Capabilities) WithPretty() func(*CapabilitiesRequest) { + return func(r *CapabilitiesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Capabilities) WithHuman() func(*CapabilitiesRequest) { + return func(r *CapabilitiesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Capabilities) WithErrorTrace() func(*CapabilitiesRequest) { + return func(r *CapabilitiesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Capabilities) WithFilterPath(v ...string) func(*CapabilitiesRequest) { + return func(r *CapabilitiesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Capabilities) WithHeader(h map[string]string) func(*CapabilitiesRequest) { + return func(r *CapabilitiesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Capabilities) WithOpaqueID(s string) func(*CapabilitiesRequest) { + return func(r *CapabilitiesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.aliases.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.aliases.go new file mode 100644 index 000000000..0f4531d37 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.aliases.go @@ -0,0 +1,321 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatAliasesFunc(t Transport) CatAliases { + return func(o ...func(*CatAliasesRequest)) (*Response, error) { + var r = CatAliasesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatAliases shows information about currently configured aliases to indices including filter and routing infos. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html. +type CatAliases func(o ...func(*CatAliasesRequest)) (*Response, error) + +// CatAliasesRequest configures the Cat Aliases API request. +type CatAliasesRequest struct { + Name []string + + ExpandWildcards string + Format string + H []string + Help *bool + MasterTimeout time.Duration + S []string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatAliasesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.aliases") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("aliases") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("aliases") + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + } + + params = make(map[string]string) + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.aliases") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.aliases") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatAliases) WithContext(v context.Context) func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + r.ctx = v + } +} + +// WithName - a list of alias names to return. +func (f CatAliases) WithName(v ...string) func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + r.Name = v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f CatAliases) WithExpandWildcards(v string) func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + r.ExpandWildcards = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatAliases) WithFormat(v string) func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatAliases) WithH(v ...string) func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatAliases) WithHelp(v bool) func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + r.Help = &v + } +} + +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f CatAliases) WithMasterTimeout(v time.Duration) func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + r.MasterTimeout = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatAliases) WithS(v ...string) func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + r.S = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatAliases) WithV(v bool) func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatAliases) WithPretty() func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatAliases) WithHuman() func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatAliases) WithErrorTrace() func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatAliases) WithFilterPath(v ...string) func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatAliases) WithHeader(h map[string]string) func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatAliases) WithOpaqueID(s string) func(*CatAliasesRequest) { + return func(r *CatAliasesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.allocation.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.allocation.go new file mode 100644 index 000000000..3c2b5c1e7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.allocation.go @@ -0,0 +1,333 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatAllocationFunc(t Transport) CatAllocation { + return func(o ...func(*CatAllocationRequest)) (*Response, error) { + var r = CatAllocationRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatAllocation provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.html. +type CatAllocation func(o ...func(*CatAllocationRequest)) (*Response, error) + +// CatAllocationRequest configures the Cat Allocation API request. +type CatAllocationRequest struct { + NodeID []string + + Bytes string + Format string + H []string + Help *bool + Local *bool + MasterTimeout time.Duration + S []string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatAllocationRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.allocation") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("allocation") + 1 + len(strings.Join(r.NodeID, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("allocation") + if len(r.NodeID) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.NodeID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "node_id", strings.Join(r.NodeID, ",")) + } + } + + params = make(map[string]string) + + if r.Bytes != "" { + params["bytes"] = r.Bytes + } + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.allocation") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.allocation") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatAllocation) WithContext(v context.Context) func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + r.ctx = v + } +} + +// WithNodeID - a list of node ids or names to limit the returned information. +func (f CatAllocation) WithNodeID(v ...string) func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + r.NodeID = v + } +} + +// WithBytes - the unit in which to display byte values. +func (f CatAllocation) WithBytes(v string) func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + r.Bytes = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatAllocation) WithFormat(v string) func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatAllocation) WithH(v ...string) func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatAllocation) WithHelp(v bool) func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + r.Help = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f CatAllocation) WithLocal(v bool) func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatAllocation) WithMasterTimeout(v time.Duration) func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + r.MasterTimeout = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatAllocation) WithS(v ...string) func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + r.S = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatAllocation) WithV(v bool) func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatAllocation) WithPretty() func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatAllocation) WithHuman() func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatAllocation) WithErrorTrace() func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatAllocation) WithFilterPath(v ...string) func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatAllocation) WithHeader(h map[string]string) func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatAllocation) WithOpaqueID(s string) func(*CatAllocationRequest) { + return func(r *CatAllocationRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.component_templates.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.component_templates.go new file mode 100644 index 000000000..1ff1108a5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.component_templates.go @@ -0,0 +1,321 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatComponentTemplatesFunc(t Transport) CatComponentTemplates { + return func(o ...func(*CatComponentTemplatesRequest)) (*Response, error) { + var r = CatComponentTemplatesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatComponentTemplates returns information about existing component_templates templates. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-component-templates.html. +type CatComponentTemplates func(o ...func(*CatComponentTemplatesRequest)) (*Response, error) + +// CatComponentTemplatesRequest configures the Cat Component Templates API request. +type CatComponentTemplatesRequest struct { + Name string + + Format string + H []string + Help *bool + Local *bool + MasterTimeout time.Duration + S []string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatComponentTemplatesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.component_templates") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("component_templates") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("component_templates") + if r.Name != "" { + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + } + + params = make(map[string]string) + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.component_templates") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.component_templates") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatComponentTemplates) WithContext(v context.Context) func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + r.ctx = v + } +} + +// WithName - a pattern that returned component template names must match. +func (f CatComponentTemplates) WithName(v string) func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + r.Name = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatComponentTemplates) WithFormat(v string) func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatComponentTemplates) WithH(v ...string) func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatComponentTemplates) WithHelp(v bool) func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + r.Help = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f CatComponentTemplates) WithLocal(v bool) func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatComponentTemplates) WithMasterTimeout(v time.Duration) func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + r.MasterTimeout = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatComponentTemplates) WithS(v ...string) func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + r.S = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatComponentTemplates) WithV(v bool) func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatComponentTemplates) WithPretty() func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatComponentTemplates) WithHuman() func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatComponentTemplates) WithErrorTrace() func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatComponentTemplates) WithFilterPath(v ...string) func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatComponentTemplates) WithHeader(h map[string]string) func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatComponentTemplates) WithOpaqueID(s string) func(*CatComponentTemplatesRequest) { + return func(r *CatComponentTemplatesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.count.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.count.go new file mode 100644 index 000000000..1fe9cde57 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.count.go @@ -0,0 +1,296 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newCatCountFunc(t Transport) CatCount { + return func(o ...func(*CatCountRequest)) (*Response, error) { + var r = CatCountRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatCount provides quick access to the document count of the entire cluster, or individual indices. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html. +type CatCount func(o ...func(*CatCountRequest)) (*Response, error) + +// CatCountRequest configures the Cat Count API request. +type CatCountRequest struct { + Index []string + + Format string + H []string + Help *bool + S []string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatCountRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.count") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("count") + 1 + len(strings.Join(r.Index, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("count") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + + params = make(map[string]string) + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.count") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.count") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatCount) WithContext(v context.Context) func(*CatCountRequest) { + return func(r *CatCountRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names to limit the returned information. +func (f CatCount) WithIndex(v ...string) func(*CatCountRequest) { + return func(r *CatCountRequest) { + r.Index = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatCount) WithFormat(v string) func(*CatCountRequest) { + return func(r *CatCountRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatCount) WithH(v ...string) func(*CatCountRequest) { + return func(r *CatCountRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatCount) WithHelp(v bool) func(*CatCountRequest) { + return func(r *CatCountRequest) { + r.Help = &v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatCount) WithS(v ...string) func(*CatCountRequest) { + return func(r *CatCountRequest) { + r.S = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatCount) WithV(v bool) func(*CatCountRequest) { + return func(r *CatCountRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatCount) WithPretty() func(*CatCountRequest) { + return func(r *CatCountRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatCount) WithHuman() func(*CatCountRequest) { + return func(r *CatCountRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatCount) WithErrorTrace() func(*CatCountRequest) { + return func(r *CatCountRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatCount) WithFilterPath(v ...string) func(*CatCountRequest) { + return func(r *CatCountRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatCount) WithHeader(h map[string]string) func(*CatCountRequest) { + return func(r *CatCountRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatCount) WithOpaqueID(s string) func(*CatCountRequest) { + return func(r *CatCountRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.fielddata.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.fielddata.go new file mode 100644 index 000000000..6189777d6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.fielddata.go @@ -0,0 +1,312 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newCatFielddataFunc(t Transport) CatFielddata { + return func(o ...func(*CatFielddataRequest)) (*Response, error) { + var r = CatFielddataRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatFielddata shows how much heap memory is currently being used by fielddata on every data node in the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-fielddata.html. +type CatFielddata func(o ...func(*CatFielddataRequest)) (*Response, error) + +// CatFielddataRequest configures the Cat Fielddata API request. +type CatFielddataRequest struct { + Fields []string + + Bytes string + Format string + H []string + Help *bool + S []string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatFielddataRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.fielddata") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("fielddata") + 1 + len(strings.Join(r.Fields, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("fielddata") + if len(r.Fields) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Fields, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "fields", strings.Join(r.Fields, ",")) + } + } + + params = make(map[string]string) + + if r.Bytes != "" { + params["bytes"] = r.Bytes + } + + if len(r.Fields) > 0 { + params["fields"] = strings.Join(r.Fields, ",") + } + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.fielddata") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.fielddata") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatFielddata) WithContext(v context.Context) func(*CatFielddataRequest) { + return func(r *CatFielddataRequest) { + r.ctx = v + } +} + +// WithFields - a list of fields to return the fielddata size. +func (f CatFielddata) WithFields(v ...string) func(*CatFielddataRequest) { + return func(r *CatFielddataRequest) { + r.Fields = v + } +} + +// WithBytes - the unit in which to display byte values. +func (f CatFielddata) WithBytes(v string) func(*CatFielddataRequest) { + return func(r *CatFielddataRequest) { + r.Bytes = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatFielddata) WithFormat(v string) func(*CatFielddataRequest) { + return func(r *CatFielddataRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatFielddata) WithH(v ...string) func(*CatFielddataRequest) { + return func(r *CatFielddataRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatFielddata) WithHelp(v bool) func(*CatFielddataRequest) { + return func(r *CatFielddataRequest) { + r.Help = &v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatFielddata) WithS(v ...string) func(*CatFielddataRequest) { + return func(r *CatFielddataRequest) { + r.S = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatFielddata) WithV(v bool) func(*CatFielddataRequest) { + return func(r *CatFielddataRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatFielddata) WithPretty() func(*CatFielddataRequest) { + return func(r *CatFielddataRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatFielddata) WithHuman() func(*CatFielddataRequest) { + return func(r *CatFielddataRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatFielddata) WithErrorTrace() func(*CatFielddataRequest) { + return func(r *CatFielddataRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatFielddata) WithFilterPath(v ...string) func(*CatFielddataRequest) { + return func(r *CatFielddataRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatFielddata) WithHeader(h map[string]string) func(*CatFielddataRequest) { + return func(r *CatFielddataRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatFielddata) WithOpaqueID(s string) func(*CatFielddataRequest) { + return func(r *CatFielddataRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.health.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.health.go new file mode 100644 index 000000000..9b8404b01 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.health.go @@ -0,0 +1,301 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newCatHealthFunc(t Transport) CatHealth { + return func(o ...func(*CatHealthRequest)) (*Response, error) { + var r = CatHealthRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatHealth returns a concise representation of the cluster health. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-health.html. +type CatHealth func(o ...func(*CatHealthRequest)) (*Response, error) + +// CatHealthRequest configures the Cat Health API request. +type CatHealthRequest struct { + Format string + H []string + Help *bool + S []string + Time string + Ts *bool + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatHealthRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.health") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_cat/health")) + path.WriteString("http://") + path.WriteString("/_cat/health") + + params = make(map[string]string) + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.Time != "" { + params["time"] = r.Time + } + + if r.Ts != nil { + params["ts"] = strconv.FormatBool(*r.Ts) + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.health") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.health") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatHealth) WithContext(v context.Context) func(*CatHealthRequest) { + return func(r *CatHealthRequest) { + r.ctx = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatHealth) WithFormat(v string) func(*CatHealthRequest) { + return func(r *CatHealthRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatHealth) WithH(v ...string) func(*CatHealthRequest) { + return func(r *CatHealthRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatHealth) WithHelp(v bool) func(*CatHealthRequest) { + return func(r *CatHealthRequest) { + r.Help = &v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatHealth) WithS(v ...string) func(*CatHealthRequest) { + return func(r *CatHealthRequest) { + r.S = v + } +} + +// WithTime - the unit in which to display time values. +func (f CatHealth) WithTime(v string) func(*CatHealthRequest) { + return func(r *CatHealthRequest) { + r.Time = v + } +} + +// WithTs - set to false to disable timestamping. +func (f CatHealth) WithTs(v bool) func(*CatHealthRequest) { + return func(r *CatHealthRequest) { + r.Ts = &v + } +} + +// WithV - verbose mode. display column headers. +func (f CatHealth) WithV(v bool) func(*CatHealthRequest) { + return func(r *CatHealthRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatHealth) WithPretty() func(*CatHealthRequest) { + return func(r *CatHealthRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatHealth) WithHuman() func(*CatHealthRequest) { + return func(r *CatHealthRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatHealth) WithErrorTrace() func(*CatHealthRequest) { + return func(r *CatHealthRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatHealth) WithFilterPath(v ...string) func(*CatHealthRequest) { + return func(r *CatHealthRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatHealth) WithHeader(h map[string]string) func(*CatHealthRequest) { + return func(r *CatHealthRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatHealth) WithOpaqueID(s string) func(*CatHealthRequest) { + return func(r *CatHealthRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.help.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.help.go new file mode 100644 index 000000000..29e795c5e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.help.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newCatHelpFunc(t Transport) CatHelp { + return func(o ...func(*CatHelpRequest)) (*Response, error) { + var r = CatHelpRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatHelp returns help for the Cat APIs. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat.html. +type CatHelp func(o ...func(*CatHelpRequest)) (*Response, error) + +// CatHelpRequest configures the Cat Help API request. +type CatHelpRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatHelpRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.help") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_cat")) + path.WriteString("http://") + path.WriteString("/_cat") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.help") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.help") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatHelp) WithContext(v context.Context) func(*CatHelpRequest) { + return func(r *CatHelpRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatHelp) WithPretty() func(*CatHelpRequest) { + return func(r *CatHelpRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatHelp) WithHuman() func(*CatHelpRequest) { + return func(r *CatHelpRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatHelp) WithErrorTrace() func(*CatHelpRequest) { + return func(r *CatHelpRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatHelp) WithFilterPath(v ...string) func(*CatHelpRequest) { + return func(r *CatHelpRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatHelp) WithHeader(h map[string]string) func(*CatHelpRequest) { + return func(r *CatHelpRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatHelp) WithOpaqueID(s string) func(*CatHelpRequest) { + return func(r *CatHelpRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.indices.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.indices.go new file mode 100644 index 000000000..a4743da5e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.indices.go @@ -0,0 +1,381 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatIndicesFunc(t Transport) CatIndices { + return func(o ...func(*CatIndicesRequest)) (*Response, error) { + var r = CatIndicesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatIndices returns information about indices: number of primaries and replicas, document counts, disk size, ... +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html. +type CatIndices func(o ...func(*CatIndicesRequest)) (*Response, error) + +// CatIndicesRequest configures the Cat Indices API request. +type CatIndicesRequest struct { + Index []string + + Bytes string + ExpandWildcards string + Format string + H []string + Health string + Help *bool + IncludeUnloadedSegments *bool + MasterTimeout time.Duration + Pri *bool + S []string + Time string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatIndicesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.indices") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("indices") + 1 + len(strings.Join(r.Index, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("indices") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + + params = make(map[string]string) + + if r.Bytes != "" { + params["bytes"] = r.Bytes + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Health != "" { + params["health"] = r.Health + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.IncludeUnloadedSegments != nil { + params["include_unloaded_segments"] = strconv.FormatBool(*r.IncludeUnloadedSegments) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pri != nil { + params["pri"] = strconv.FormatBool(*r.Pri) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.Time != "" { + params["time"] = r.Time + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.indices") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.indices") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatIndices) WithContext(v context.Context) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names to limit the returned information. +func (f CatIndices) WithIndex(v ...string) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.Index = v + } +} + +// WithBytes - the unit in which to display byte values. +func (f CatIndices) WithBytes(v string) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.Bytes = v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f CatIndices) WithExpandWildcards(v string) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.ExpandWildcards = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatIndices) WithFormat(v string) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatIndices) WithH(v ...string) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.H = v + } +} + +// WithHealth - a health status ("green", "yellow", or "red" to filter only indices matching the specified health status. +func (f CatIndices) WithHealth(v string) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.Health = v + } +} + +// WithHelp - return help information. +func (f CatIndices) WithHelp(v bool) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.Help = &v + } +} + +// WithIncludeUnloadedSegments - if set to true segment stats will include stats for segments that are not currently loaded into memory. +func (f CatIndices) WithIncludeUnloadedSegments(v bool) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.IncludeUnloadedSegments = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatIndices) WithMasterTimeout(v time.Duration) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.MasterTimeout = v + } +} + +// WithPri - set to true to return stats only for primary shards. +func (f CatIndices) WithPri(v bool) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.Pri = &v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatIndices) WithS(v ...string) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.S = v + } +} + +// WithTime - the unit in which to display time values. +func (f CatIndices) WithTime(v string) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.Time = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatIndices) WithV(v bool) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatIndices) WithPretty() func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatIndices) WithHuman() func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatIndices) WithErrorTrace() func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatIndices) WithFilterPath(v ...string) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatIndices) WithHeader(h map[string]string) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatIndices) WithOpaqueID(s string) func(*CatIndicesRequest) { + return func(r *CatIndicesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.master.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.master.go new file mode 100644 index 000000000..689f3af12 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.master.go @@ -0,0 +1,302 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatMasterFunc(t Transport) CatMaster { + return func(o ...func(*CatMasterRequest)) (*Response, error) { + var r = CatMasterRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatMaster returns information about the master node. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-master.html. +type CatMaster func(o ...func(*CatMasterRequest)) (*Response, error) + +// CatMasterRequest configures the Cat Master API request. +type CatMasterRequest struct { + Format string + H []string + Help *bool + Local *bool + MasterTimeout time.Duration + S []string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatMasterRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.master") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_cat/master")) + path.WriteString("http://") + path.WriteString("/_cat/master") + + params = make(map[string]string) + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.master") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.master") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatMaster) WithContext(v context.Context) func(*CatMasterRequest) { + return func(r *CatMasterRequest) { + r.ctx = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatMaster) WithFormat(v string) func(*CatMasterRequest) { + return func(r *CatMasterRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatMaster) WithH(v ...string) func(*CatMasterRequest) { + return func(r *CatMasterRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatMaster) WithHelp(v bool) func(*CatMasterRequest) { + return func(r *CatMasterRequest) { + r.Help = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f CatMaster) WithLocal(v bool) func(*CatMasterRequest) { + return func(r *CatMasterRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatMaster) WithMasterTimeout(v time.Duration) func(*CatMasterRequest) { + return func(r *CatMasterRequest) { + r.MasterTimeout = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatMaster) WithS(v ...string) func(*CatMasterRequest) { + return func(r *CatMasterRequest) { + r.S = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatMaster) WithV(v bool) func(*CatMasterRequest) { + return func(r *CatMasterRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatMaster) WithPretty() func(*CatMasterRequest) { + return func(r *CatMasterRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatMaster) WithHuman() func(*CatMasterRequest) { + return func(r *CatMasterRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatMaster) WithErrorTrace() func(*CatMasterRequest) { + return func(r *CatMasterRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatMaster) WithFilterPath(v ...string) func(*CatMasterRequest) { + return func(r *CatMasterRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatMaster) WithHeader(h map[string]string) func(*CatMasterRequest) { + return func(r *CatMasterRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatMaster) WithOpaqueID(s string) func(*CatMasterRequest) { + return func(r *CatMasterRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.nodeattrs.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.nodeattrs.go new file mode 100644 index 000000000..4df967aea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.nodeattrs.go @@ -0,0 +1,302 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatNodeattrsFunc(t Transport) CatNodeattrs { + return func(o ...func(*CatNodeattrsRequest)) (*Response, error) { + var r = CatNodeattrsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatNodeattrs returns information about custom node attributes. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodeattrs.html. +type CatNodeattrs func(o ...func(*CatNodeattrsRequest)) (*Response, error) + +// CatNodeattrsRequest configures the Cat Nodeattrs API request. +type CatNodeattrsRequest struct { + Format string + H []string + Help *bool + Local *bool + MasterTimeout time.Duration + S []string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatNodeattrsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.nodeattrs") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_cat/nodeattrs")) + path.WriteString("http://") + path.WriteString("/_cat/nodeattrs") + + params = make(map[string]string) + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.nodeattrs") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.nodeattrs") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatNodeattrs) WithContext(v context.Context) func(*CatNodeattrsRequest) { + return func(r *CatNodeattrsRequest) { + r.ctx = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatNodeattrs) WithFormat(v string) func(*CatNodeattrsRequest) { + return func(r *CatNodeattrsRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatNodeattrs) WithH(v ...string) func(*CatNodeattrsRequest) { + return func(r *CatNodeattrsRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatNodeattrs) WithHelp(v bool) func(*CatNodeattrsRequest) { + return func(r *CatNodeattrsRequest) { + r.Help = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f CatNodeattrs) WithLocal(v bool) func(*CatNodeattrsRequest) { + return func(r *CatNodeattrsRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatNodeattrs) WithMasterTimeout(v time.Duration) func(*CatNodeattrsRequest) { + return func(r *CatNodeattrsRequest) { + r.MasterTimeout = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatNodeattrs) WithS(v ...string) func(*CatNodeattrsRequest) { + return func(r *CatNodeattrsRequest) { + r.S = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatNodeattrs) WithV(v bool) func(*CatNodeattrsRequest) { + return func(r *CatNodeattrsRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatNodeattrs) WithPretty() func(*CatNodeattrsRequest) { + return func(r *CatNodeattrsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatNodeattrs) WithHuman() func(*CatNodeattrsRequest) { + return func(r *CatNodeattrsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatNodeattrs) WithErrorTrace() func(*CatNodeattrsRequest) { + return func(r *CatNodeattrsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatNodeattrs) WithFilterPath(v ...string) func(*CatNodeattrsRequest) { + return func(r *CatNodeattrsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatNodeattrs) WithHeader(h map[string]string) func(*CatNodeattrsRequest) { + return func(r *CatNodeattrsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatNodeattrs) WithOpaqueID(s string) func(*CatNodeattrsRequest) { + return func(r *CatNodeattrsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.nodes.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.nodes.go new file mode 100644 index 000000000..54aedcefa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.nodes.go @@ -0,0 +1,338 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatNodesFunc(t Transport) CatNodes { + return func(o ...func(*CatNodesRequest)) (*Response, error) { + var r = CatNodesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatNodes returns basic statistics about performance of cluster nodes. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodes.html. +type CatNodes func(o ...func(*CatNodesRequest)) (*Response, error) + +// CatNodesRequest configures the Cat Nodes API request. +type CatNodesRequest struct { + Bytes string + Format string + FullID *bool + H []string + Help *bool + IncludeUnloadedSegments *bool + MasterTimeout time.Duration + S []string + Time string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatNodesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.nodes") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_cat/nodes")) + path.WriteString("http://") + path.WriteString("/_cat/nodes") + + params = make(map[string]string) + + if r.Bytes != "" { + params["bytes"] = r.Bytes + } + + if r.Format != "" { + params["format"] = r.Format + } + + if r.FullID != nil { + params["full_id"] = strconv.FormatBool(*r.FullID) + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.IncludeUnloadedSegments != nil { + params["include_unloaded_segments"] = strconv.FormatBool(*r.IncludeUnloadedSegments) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.Time != "" { + params["time"] = r.Time + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.nodes") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.nodes") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatNodes) WithContext(v context.Context) func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.ctx = v + } +} + +// WithBytes - the unit in which to display byte values. +func (f CatNodes) WithBytes(v string) func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.Bytes = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatNodes) WithFormat(v string) func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.Format = v + } +} + +// WithFullID - return the full node ID instead of the shortened version (default: false). +func (f CatNodes) WithFullID(v bool) func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.FullID = &v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatNodes) WithH(v ...string) func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatNodes) WithHelp(v bool) func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.Help = &v + } +} + +// WithIncludeUnloadedSegments - if set to true segment stats will include stats for segments that are not currently loaded into memory. +func (f CatNodes) WithIncludeUnloadedSegments(v bool) func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.IncludeUnloadedSegments = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatNodes) WithMasterTimeout(v time.Duration) func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.MasterTimeout = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatNodes) WithS(v ...string) func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.S = v + } +} + +// WithTime - the unit in which to display time values. +func (f CatNodes) WithTime(v string) func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.Time = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatNodes) WithV(v bool) func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatNodes) WithPretty() func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatNodes) WithHuman() func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatNodes) WithErrorTrace() func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatNodes) WithFilterPath(v ...string) func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatNodes) WithHeader(h map[string]string) func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatNodes) WithOpaqueID(s string) func(*CatNodesRequest) { + return func(r *CatNodesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.pending_tasks.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.pending_tasks.go new file mode 100644 index 000000000..be63a93be --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.pending_tasks.go @@ -0,0 +1,314 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatPendingTasksFunc(t Transport) CatPendingTasks { + return func(o ...func(*CatPendingTasksRequest)) (*Response, error) { + var r = CatPendingTasksRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatPendingTasks returns a concise representation of the cluster pending tasks. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-pending-tasks.html. +type CatPendingTasks func(o ...func(*CatPendingTasksRequest)) (*Response, error) + +// CatPendingTasksRequest configures the Cat Pending Tasks API request. +type CatPendingTasksRequest struct { + Format string + H []string + Help *bool + Local *bool + MasterTimeout time.Duration + S []string + Time string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatPendingTasksRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.pending_tasks") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_cat/pending_tasks")) + path.WriteString("http://") + path.WriteString("/_cat/pending_tasks") + + params = make(map[string]string) + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.Time != "" { + params["time"] = r.Time + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.pending_tasks") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.pending_tasks") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatPendingTasks) WithContext(v context.Context) func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + r.ctx = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatPendingTasks) WithFormat(v string) func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatPendingTasks) WithH(v ...string) func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatPendingTasks) WithHelp(v bool) func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + r.Help = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f CatPendingTasks) WithLocal(v bool) func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatPendingTasks) WithMasterTimeout(v time.Duration) func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + r.MasterTimeout = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatPendingTasks) WithS(v ...string) func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + r.S = v + } +} + +// WithTime - the unit in which to display time values. +func (f CatPendingTasks) WithTime(v string) func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + r.Time = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatPendingTasks) WithV(v bool) func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatPendingTasks) WithPretty() func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatPendingTasks) WithHuman() func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatPendingTasks) WithErrorTrace() func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatPendingTasks) WithFilterPath(v ...string) func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatPendingTasks) WithHeader(h map[string]string) func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatPendingTasks) WithOpaqueID(s string) func(*CatPendingTasksRequest) { + return func(r *CatPendingTasksRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.plugins.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.plugins.go new file mode 100644 index 000000000..19d3a84bc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.plugins.go @@ -0,0 +1,314 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatPluginsFunc(t Transport) CatPlugins { + return func(o ...func(*CatPluginsRequest)) (*Response, error) { + var r = CatPluginsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatPlugins returns information about installed plugins across nodes node. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-plugins.html. +type CatPlugins func(o ...func(*CatPluginsRequest)) (*Response, error) + +// CatPluginsRequest configures the Cat Plugins API request. +type CatPluginsRequest struct { + Format string + H []string + Help *bool + IncludeBootstrap *bool + Local *bool + MasterTimeout time.Duration + S []string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatPluginsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.plugins") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_cat/plugins")) + path.WriteString("http://") + path.WriteString("/_cat/plugins") + + params = make(map[string]string) + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.IncludeBootstrap != nil { + params["include_bootstrap"] = strconv.FormatBool(*r.IncludeBootstrap) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.plugins") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.plugins") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatPlugins) WithContext(v context.Context) func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + r.ctx = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatPlugins) WithFormat(v string) func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatPlugins) WithH(v ...string) func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatPlugins) WithHelp(v bool) func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + r.Help = &v + } +} + +// WithIncludeBootstrap - include bootstrap plugins in the response. +func (f CatPlugins) WithIncludeBootstrap(v bool) func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + r.IncludeBootstrap = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f CatPlugins) WithLocal(v bool) func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatPlugins) WithMasterTimeout(v time.Duration) func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + r.MasterTimeout = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatPlugins) WithS(v ...string) func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + r.S = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatPlugins) WithV(v bool) func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatPlugins) WithPretty() func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatPlugins) WithHuman() func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatPlugins) WithErrorTrace() func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatPlugins) WithFilterPath(v ...string) func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatPlugins) WithHeader(h map[string]string) func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatPlugins) WithOpaqueID(s string) func(*CatPluginsRequest) { + return func(r *CatPluginsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.recovery.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.recovery.go new file mode 100644 index 000000000..405edb525 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.recovery.go @@ -0,0 +1,348 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newCatRecoveryFunc(t Transport) CatRecovery { + return func(o ...func(*CatRecoveryRequest)) (*Response, error) { + var r = CatRecoveryRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatRecovery returns information about index shard recoveries, both on-going completed. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-recovery.html. +type CatRecovery func(o ...func(*CatRecoveryRequest)) (*Response, error) + +// CatRecoveryRequest configures the Cat Recovery API request. +type CatRecoveryRequest struct { + Index []string + + ActiveOnly *bool + Bytes string + Detailed *bool + Format string + H []string + Help *bool + S []string + Time string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatRecoveryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.recovery") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("recovery") + 1 + len(strings.Join(r.Index, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("recovery") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + + params = make(map[string]string) + + if r.ActiveOnly != nil { + params["active_only"] = strconv.FormatBool(*r.ActiveOnly) + } + + if r.Bytes != "" { + params["bytes"] = r.Bytes + } + + if r.Detailed != nil { + params["detailed"] = strconv.FormatBool(*r.Detailed) + } + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if len(r.Index) > 0 { + params["index"] = strings.Join(r.Index, ",") + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.Time != "" { + params["time"] = r.Time + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.recovery") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.recovery") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatRecovery) WithContext(v context.Context) func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.ctx = v + } +} + +// WithIndex - comma-separated list or wildcard expression of index names to limit the returned information. +func (f CatRecovery) WithIndex(v ...string) func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.Index = v + } +} + +// WithActiveOnly - if `true`, the response only includes ongoing shard recoveries. +func (f CatRecovery) WithActiveOnly(v bool) func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.ActiveOnly = &v + } +} + +// WithBytes - the unit in which to display byte values. +func (f CatRecovery) WithBytes(v string) func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.Bytes = v + } +} + +// WithDetailed - if `true`, the response includes detailed information about shard recoveries. +func (f CatRecovery) WithDetailed(v bool) func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.Detailed = &v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatRecovery) WithFormat(v string) func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatRecovery) WithH(v ...string) func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatRecovery) WithHelp(v bool) func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.Help = &v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatRecovery) WithS(v ...string) func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.S = v + } +} + +// WithTime - the unit in which to display time values. +func (f CatRecovery) WithTime(v string) func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.Time = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatRecovery) WithV(v bool) func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatRecovery) WithPretty() func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatRecovery) WithHuman() func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatRecovery) WithErrorTrace() func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatRecovery) WithFilterPath(v ...string) func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatRecovery) WithHeader(h map[string]string) func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatRecovery) WithOpaqueID(s string) func(*CatRecoveryRequest) { + return func(r *CatRecoveryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.repositories.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.repositories.go new file mode 100644 index 000000000..4377f097c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.repositories.go @@ -0,0 +1,302 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatRepositoriesFunc(t Transport) CatRepositories { + return func(o ...func(*CatRepositoriesRequest)) (*Response, error) { + var r = CatRepositoriesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatRepositories returns information about snapshot repositories registered in the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-repositories.html. +type CatRepositories func(o ...func(*CatRepositoriesRequest)) (*Response, error) + +// CatRepositoriesRequest configures the Cat Repositories API request. +type CatRepositoriesRequest struct { + Format string + H []string + Help *bool + Local *bool + MasterTimeout time.Duration + S []string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatRepositoriesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.repositories") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_cat/repositories")) + path.WriteString("http://") + path.WriteString("/_cat/repositories") + + params = make(map[string]string) + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.repositories") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.repositories") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatRepositories) WithContext(v context.Context) func(*CatRepositoriesRequest) { + return func(r *CatRepositoriesRequest) { + r.ctx = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatRepositories) WithFormat(v string) func(*CatRepositoriesRequest) { + return func(r *CatRepositoriesRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatRepositories) WithH(v ...string) func(*CatRepositoriesRequest) { + return func(r *CatRepositoriesRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatRepositories) WithHelp(v bool) func(*CatRepositoriesRequest) { + return func(r *CatRepositoriesRequest) { + r.Help = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node. +func (f CatRepositories) WithLocal(v bool) func(*CatRepositoriesRequest) { + return func(r *CatRepositoriesRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatRepositories) WithMasterTimeout(v time.Duration) func(*CatRepositoriesRequest) { + return func(r *CatRepositoriesRequest) { + r.MasterTimeout = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatRepositories) WithS(v ...string) func(*CatRepositoriesRequest) { + return func(r *CatRepositoriesRequest) { + r.S = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatRepositories) WithV(v bool) func(*CatRepositoriesRequest) { + return func(r *CatRepositoriesRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatRepositories) WithPretty() func(*CatRepositoriesRequest) { + return func(r *CatRepositoriesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatRepositories) WithHuman() func(*CatRepositoriesRequest) { + return func(r *CatRepositoriesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatRepositories) WithErrorTrace() func(*CatRepositoriesRequest) { + return func(r *CatRepositoriesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatRepositories) WithFilterPath(v ...string) func(*CatRepositoriesRequest) { + return func(r *CatRepositoriesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatRepositories) WithHeader(h map[string]string) func(*CatRepositoriesRequest) { + return func(r *CatRepositoriesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatRepositories) WithOpaqueID(s string) func(*CatRepositoriesRequest) { + return func(r *CatRepositoriesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.segments.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.segments.go new file mode 100644 index 000000000..bf7c801c7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.segments.go @@ -0,0 +1,333 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatSegmentsFunc(t Transport) CatSegments { + return func(o ...func(*CatSegmentsRequest)) (*Response, error) { + var r = CatSegmentsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatSegments provides low-level information about the segments in the shards of an index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-segments.html. +type CatSegments func(o ...func(*CatSegmentsRequest)) (*Response, error) + +// CatSegmentsRequest configures the Cat Segments API request. +type CatSegmentsRequest struct { + Index []string + + Bytes string + Format string + H []string + Help *bool + Local *bool + MasterTimeout time.Duration + S []string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatSegmentsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.segments") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("segments") + 1 + len(strings.Join(r.Index, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("segments") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + + params = make(map[string]string) + + if r.Bytes != "" { + params["bytes"] = r.Bytes + } + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.segments") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.segments") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatSegments) WithContext(v context.Context) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names to limit the returned information. +func (f CatSegments) WithIndex(v ...string) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.Index = v + } +} + +// WithBytes - the unit in which to display byte values. +func (f CatSegments) WithBytes(v string) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.Bytes = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatSegments) WithFormat(v string) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatSegments) WithH(v ...string) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatSegments) WithHelp(v bool) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.Help = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f CatSegments) WithLocal(v bool) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatSegments) WithMasterTimeout(v time.Duration) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.MasterTimeout = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatSegments) WithS(v ...string) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.S = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatSegments) WithV(v bool) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatSegments) WithPretty() func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatSegments) WithHuman() func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatSegments) WithErrorTrace() func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatSegments) WithFilterPath(v ...string) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatSegments) WithHeader(h map[string]string) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatSegments) WithOpaqueID(s string) func(*CatSegmentsRequest) { + return func(r *CatSegmentsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.shards.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.shards.go new file mode 100644 index 000000000..c993738a8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.shards.go @@ -0,0 +1,333 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatShardsFunc(t Transport) CatShards { + return func(o ...func(*CatShardsRequest)) (*Response, error) { + var r = CatShardsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatShards provides a detailed view of shard allocation on nodes. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-shards.html. +type CatShards func(o ...func(*CatShardsRequest)) (*Response, error) + +// CatShardsRequest configures the Cat Shards API request. +type CatShardsRequest struct { + Index []string + + Bytes string + Format string + H []string + Help *bool + MasterTimeout time.Duration + S []string + Time string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatShardsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.shards") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("shards") + 1 + len(strings.Join(r.Index, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("shards") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + + params = make(map[string]string) + + if r.Bytes != "" { + params["bytes"] = r.Bytes + } + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.Time != "" { + params["time"] = r.Time + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.shards") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.shards") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatShards) WithContext(v context.Context) func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names to limit the returned information. +func (f CatShards) WithIndex(v ...string) func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + r.Index = v + } +} + +// WithBytes - the unit in which to display byte values. +func (f CatShards) WithBytes(v string) func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + r.Bytes = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatShards) WithFormat(v string) func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatShards) WithH(v ...string) func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatShards) WithHelp(v bool) func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + r.Help = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatShards) WithMasterTimeout(v time.Duration) func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + r.MasterTimeout = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatShards) WithS(v ...string) func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + r.S = v + } +} + +// WithTime - the unit in which to display time values. +func (f CatShards) WithTime(v string) func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + r.Time = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatShards) WithV(v bool) func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatShards) WithPretty() func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatShards) WithHuman() func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatShards) WithErrorTrace() func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatShards) WithFilterPath(v ...string) func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatShards) WithHeader(h map[string]string) func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatShards) WithOpaqueID(s string) func(*CatShardsRequest) { + return func(r *CatShardsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.snapshots.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.snapshots.go new file mode 100644 index 000000000..33784c547 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.snapshots.go @@ -0,0 +1,333 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatSnapshotsFunc(t Transport) CatSnapshots { + return func(o ...func(*CatSnapshotsRequest)) (*Response, error) { + var r = CatSnapshotsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatSnapshots returns all snapshots in a specific repository. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.html. +type CatSnapshots func(o ...func(*CatSnapshotsRequest)) (*Response, error) + +// CatSnapshotsRequest configures the Cat Snapshots API request. +type CatSnapshotsRequest struct { + Repository []string + + Format string + H []string + Help *bool + IgnoreUnavailable *bool + MasterTimeout time.Duration + S []string + Time string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatSnapshotsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.snapshots") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("snapshots") + 1 + len(strings.Join(r.Repository, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("snapshots") + if len(r.Repository) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Repository, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", strings.Join(r.Repository, ",")) + } + } + + params = make(map[string]string) + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.Time != "" { + params["time"] = r.Time + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.snapshots") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.snapshots") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatSnapshots) WithContext(v context.Context) func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + r.ctx = v + } +} + +// WithRepository - name of repository from which to fetch the snapshot information. +func (f CatSnapshots) WithRepository(v ...string) func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + r.Repository = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatSnapshots) WithFormat(v string) func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatSnapshots) WithH(v ...string) func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatSnapshots) WithHelp(v bool) func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + r.Help = &v + } +} + +// WithIgnoreUnavailable - set to true to ignore unavailable snapshots. +func (f CatSnapshots) WithIgnoreUnavailable(v bool) func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatSnapshots) WithMasterTimeout(v time.Duration) func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + r.MasterTimeout = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatSnapshots) WithS(v ...string) func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + r.S = v + } +} + +// WithTime - the unit in which to display time values. +func (f CatSnapshots) WithTime(v string) func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + r.Time = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatSnapshots) WithV(v bool) func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatSnapshots) WithPretty() func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatSnapshots) WithHuman() func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatSnapshots) WithErrorTrace() func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatSnapshots) WithFilterPath(v ...string) func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatSnapshots) WithHeader(h map[string]string) func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatSnapshots) WithOpaqueID(s string) func(*CatSnapshotsRequest) { + return func(r *CatSnapshotsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.tasks.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.tasks.go new file mode 100644 index 000000000..58fb197a5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.tasks.go @@ -0,0 +1,364 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatTasksFunc(t Transport) CatTasks { + return func(o ...func(*CatTasksRequest)) (*Response, error) { + var r = CatTasksRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatTasks returns information about the tasks currently executing on one or more nodes in the cluster. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html. +type CatTasks func(o ...func(*CatTasksRequest)) (*Response, error) + +// CatTasksRequest configures the Cat Tasks API request. +type CatTasksRequest struct { + Actions []string + Detailed *bool + Format string + H []string + Help *bool + Nodes []string + ParentTaskID string + S []string + Time string + Timeout time.Duration + V *bool + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatTasksRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.tasks") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_cat/tasks")) + path.WriteString("http://") + path.WriteString("/_cat/tasks") + + params = make(map[string]string) + + if len(r.Actions) > 0 { + params["actions"] = strings.Join(r.Actions, ",") + } + + if r.Detailed != nil { + params["detailed"] = strconv.FormatBool(*r.Detailed) + } + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if len(r.Nodes) > 0 { + params["nodes"] = strings.Join(r.Nodes, ",") + } + + if r.ParentTaskID != "" { + params["parent_task_id"] = r.ParentTaskID + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.Time != "" { + params["time"] = r.Time + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.tasks") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.tasks") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatTasks) WithContext(v context.Context) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.ctx = v + } +} + +// WithActions - a list of actions that should be returned. leave empty to return all.. +func (f CatTasks) WithActions(v ...string) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.Actions = v + } +} + +// WithDetailed - return detailed task information (default: false). +func (f CatTasks) WithDetailed(v bool) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.Detailed = &v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatTasks) WithFormat(v string) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatTasks) WithH(v ...string) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatTasks) WithHelp(v bool) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.Help = &v + } +} + +// WithNodes - a list of node ids or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. +func (f CatTasks) WithNodes(v ...string) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.Nodes = v + } +} + +// WithParentTaskID - return tasks with specified parent task ID (node_id:task_number). set to -1 to return all.. +func (f CatTasks) WithParentTaskID(v string) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.ParentTaskID = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatTasks) WithS(v ...string) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.S = v + } +} + +// WithTime - the unit in which to display time values. +func (f CatTasks) WithTime(v string) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.Time = v + } +} + +// WithTimeout - period to wait for a response. if no response is received before the timeout expires, the request fails and returns an error.. +func (f CatTasks) WithTimeout(v time.Duration) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.Timeout = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatTasks) WithV(v bool) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.V = &v + } +} + +// WithWaitForCompletion - if `true`, the request blocks until the task has completed.. +func (f CatTasks) WithWaitForCompletion(v bool) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatTasks) WithPretty() func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatTasks) WithHuman() func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatTasks) WithErrorTrace() func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatTasks) WithFilterPath(v ...string) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatTasks) WithHeader(h map[string]string) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatTasks) WithOpaqueID(s string) func(*CatTasksRequest) { + return func(r *CatTasksRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.templates.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.templates.go new file mode 100644 index 000000000..04c03c64c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.templates.go @@ -0,0 +1,321 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatTemplatesFunc(t Transport) CatTemplates { + return func(o ...func(*CatTemplatesRequest)) (*Response, error) { + var r = CatTemplatesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatTemplates returns information about existing templates. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-templates.html. +type CatTemplates func(o ...func(*CatTemplatesRequest)) (*Response, error) + +// CatTemplatesRequest configures the Cat Templates API request. +type CatTemplatesRequest struct { + Name string + + Format string + H []string + Help *bool + Local *bool + MasterTimeout time.Duration + S []string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatTemplatesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.templates") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("templates") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("templates") + if r.Name != "" { + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + } + + params = make(map[string]string) + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.templates") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.templates") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatTemplates) WithContext(v context.Context) func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + r.ctx = v + } +} + +// WithName - a pattern that returned template names must match. +func (f CatTemplates) WithName(v string) func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + r.Name = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatTemplates) WithFormat(v string) func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatTemplates) WithH(v ...string) func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatTemplates) WithHelp(v bool) func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + r.Help = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f CatTemplates) WithLocal(v bool) func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatTemplates) WithMasterTimeout(v time.Duration) func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + r.MasterTimeout = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatTemplates) WithS(v ...string) func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + r.S = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatTemplates) WithV(v bool) func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatTemplates) WithPretty() func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatTemplates) WithHuman() func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatTemplates) WithErrorTrace() func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatTemplates) WithFilterPath(v ...string) func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatTemplates) WithHeader(h map[string]string) func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatTemplates) WithOpaqueID(s string) func(*CatTemplatesRequest) { + return func(r *CatTemplatesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.thread_pool.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.thread_pool.go new file mode 100644 index 000000000..70d18e59d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cat.thread_pool.go @@ -0,0 +1,334 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newCatThreadPoolFunc(t Transport) CatThreadPool { + return func(o ...func(*CatThreadPoolRequest)) (*Response, error) { + var r = CatThreadPoolRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatThreadPool returns cluster-wide thread pool statistics per node. +// By default the active, queue and rejected statistics are returned for all thread pools. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-thread-pool.html. +type CatThreadPool func(o ...func(*CatThreadPoolRequest)) (*Response, error) + +// CatThreadPoolRequest configures the Cat Thread Pool API request. +type CatThreadPoolRequest struct { + ThreadPoolPatterns []string + + Format string + H []string + Help *bool + Local *bool + MasterTimeout time.Duration + S []string + Time string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatThreadPoolRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.thread_pool") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("thread_pool") + 1 + len(strings.Join(r.ThreadPoolPatterns, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("thread_pool") + if len(r.ThreadPoolPatterns) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.ThreadPoolPatterns, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "thread_pool_patterns", strings.Join(r.ThreadPoolPatterns, ",")) + } + } + + params = make(map[string]string) + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.Time != "" { + params["time"] = r.Time + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.thread_pool") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.thread_pool") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatThreadPool) WithContext(v context.Context) func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + r.ctx = v + } +} + +// WithThreadPoolPatterns - a list of regular-expressions to filter the thread pools in the output. +func (f CatThreadPool) WithThreadPoolPatterns(v ...string) func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + r.ThreadPoolPatterns = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatThreadPool) WithFormat(v string) func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatThreadPool) WithH(v ...string) func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatThreadPool) WithHelp(v bool) func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + r.Help = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f CatThreadPool) WithLocal(v bool) func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CatThreadPool) WithMasterTimeout(v time.Duration) func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + r.MasterTimeout = v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatThreadPool) WithS(v ...string) func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + r.S = v + } +} + +// WithTime - the unit in which to display time values. +func (f CatThreadPool) WithTime(v string) func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + r.Time = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatThreadPool) WithV(v bool) func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatThreadPool) WithPretty() func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatThreadPool) WithHuman() func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatThreadPool) WithErrorTrace() func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatThreadPool) WithFilterPath(v ...string) func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatThreadPool) WithHeader(h map[string]string) func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatThreadPool) WithOpaqueID(s string) func(*CatThreadPoolRequest) { + return func(r *CatThreadPoolRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.clear_scroll.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.clear_scroll.go new file mode 100644 index 000000000..b3a86f7d8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.clear_scroll.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newClearScrollFunc(t Transport) ClearScroll { + return func(o ...func(*ClearScrollRequest)) (*Response, error) { + var r = ClearScrollRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClearScroll explicitly clears the search context for a scroll. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-scroll-api.html. +type ClearScroll func(o ...func(*ClearScrollRequest)) (*Response, error) + +// ClearScrollRequest configures the Clear Scroll API request. +type ClearScrollRequest struct { + Body io.Reader + + ScrollID []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClearScrollRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "clear_scroll") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_search") + 1 + len("scroll") + 1 + len(strings.Join(r.ScrollID, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_search") + path.WriteString("/") + path.WriteString("scroll") + if len(r.ScrollID) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.ScrollID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "scroll_id", strings.Join(r.ScrollID, ",")) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "clear_scroll") + if reader := instrument.RecordRequestBody(ctx, "clear_scroll", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "clear_scroll") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClearScroll) WithContext(v context.Context) func(*ClearScrollRequest) { + return func(r *ClearScrollRequest) { + r.ctx = v + } +} + +// WithBody - A comma-separated list of scroll IDs to clear if none was specified via the scroll_id parameter. +func (f ClearScroll) WithBody(v io.Reader) func(*ClearScrollRequest) { + return func(r *ClearScrollRequest) { + r.Body = v + } +} + +// WithScrollID - a list of scroll ids to clear. +func (f ClearScroll) WithScrollID(v ...string) func(*ClearScrollRequest) { + return func(r *ClearScrollRequest) { + r.ScrollID = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClearScroll) WithPretty() func(*ClearScrollRequest) { + return func(r *ClearScrollRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClearScroll) WithHuman() func(*ClearScrollRequest) { + return func(r *ClearScrollRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClearScroll) WithErrorTrace() func(*ClearScrollRequest) { + return func(r *ClearScrollRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClearScroll) WithFilterPath(v ...string) func(*ClearScrollRequest) { + return func(r *ClearScrollRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClearScroll) WithHeader(h map[string]string) func(*ClearScrollRequest) { + return func(r *ClearScrollRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClearScroll) WithOpaqueID(s string) func(*ClearScrollRequest) { + return func(r *ClearScrollRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.allocation_explain.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.allocation_explain.go new file mode 100644 index 000000000..8d2f68193 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.allocation_explain.go @@ -0,0 +1,271 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newClusterAllocationExplainFunc(t Transport) ClusterAllocationExplain { + return func(o ...func(*ClusterAllocationExplainRequest)) (*Response, error) { + var r = ClusterAllocationExplainRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterAllocationExplain provides explanations for shard allocations in the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html. +type ClusterAllocationExplain func(o ...func(*ClusterAllocationExplainRequest)) (*Response, error) + +// ClusterAllocationExplainRequest configures the Cluster Allocation Explain API request. +type ClusterAllocationExplainRequest struct { + Body io.Reader + + IncludeDiskInfo *bool + IncludeYesDecisions *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterAllocationExplainRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.allocation_explain") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_cluster/allocation/explain")) + path.WriteString("http://") + path.WriteString("/_cluster/allocation/explain") + + params = make(map[string]string) + + if r.IncludeDiskInfo != nil { + params["include_disk_info"] = strconv.FormatBool(*r.IncludeDiskInfo) + } + + if r.IncludeYesDecisions != nil { + params["include_yes_decisions"] = strconv.FormatBool(*r.IncludeYesDecisions) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.allocation_explain") + if reader := instrument.RecordRequestBody(ctx, "cluster.allocation_explain", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.allocation_explain") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterAllocationExplain) WithContext(v context.Context) func(*ClusterAllocationExplainRequest) { + return func(r *ClusterAllocationExplainRequest) { + r.ctx = v + } +} + +// WithBody - The index, shard, and primary flag to explain. Empty means 'explain a randomly-chosen unassigned shard'. +func (f ClusterAllocationExplain) WithBody(v io.Reader) func(*ClusterAllocationExplainRequest) { + return func(r *ClusterAllocationExplainRequest) { + r.Body = v + } +} + +// WithIncludeDiskInfo - return information about disk usage and shard sizes (default: false). +func (f ClusterAllocationExplain) WithIncludeDiskInfo(v bool) func(*ClusterAllocationExplainRequest) { + return func(r *ClusterAllocationExplainRequest) { + r.IncludeDiskInfo = &v + } +} + +// WithIncludeYesDecisions - return 'yes' decisions in explanation (default: false). +func (f ClusterAllocationExplain) WithIncludeYesDecisions(v bool) func(*ClusterAllocationExplainRequest) { + return func(r *ClusterAllocationExplainRequest) { + r.IncludeYesDecisions = &v + } +} + +// WithMasterTimeout - timeout for connection to master node. +func (f ClusterAllocationExplain) WithMasterTimeout(v time.Duration) func(*ClusterAllocationExplainRequest) { + return func(r *ClusterAllocationExplainRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterAllocationExplain) WithPretty() func(*ClusterAllocationExplainRequest) { + return func(r *ClusterAllocationExplainRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterAllocationExplain) WithHuman() func(*ClusterAllocationExplainRequest) { + return func(r *ClusterAllocationExplainRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterAllocationExplain) WithErrorTrace() func(*ClusterAllocationExplainRequest) { + return func(r *ClusterAllocationExplainRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterAllocationExplain) WithFilterPath(v ...string) func(*ClusterAllocationExplainRequest) { + return func(r *ClusterAllocationExplainRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterAllocationExplain) WithHeader(h map[string]string) func(*ClusterAllocationExplainRequest) { + return func(r *ClusterAllocationExplainRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterAllocationExplain) WithOpaqueID(s string) func(*ClusterAllocationExplainRequest) { + return func(r *ClusterAllocationExplainRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.delete_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.delete_component_template.go new file mode 100644 index 000000000..3c2baed58 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.delete_component_template.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newClusterDeleteComponentTemplateFunc(t Transport) ClusterDeleteComponentTemplate { + return func(name string, o ...func(*ClusterDeleteComponentTemplateRequest)) (*Response, error) { + var r = ClusterDeleteComponentTemplateRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterDeleteComponentTemplate deletes a component template +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html. +type ClusterDeleteComponentTemplate func(name string, o ...func(*ClusterDeleteComponentTemplateRequest)) (*Response, error) + +// ClusterDeleteComponentTemplateRequest configures the Cluster Delete Component Template API request. +type ClusterDeleteComponentTemplateRequest struct { + Name string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterDeleteComponentTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.delete_component_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_component_template") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_component_template") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.delete_component_template") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.delete_component_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterDeleteComponentTemplate) WithContext(v context.Context) func(*ClusterDeleteComponentTemplateRequest) { + return func(r *ClusterDeleteComponentTemplateRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f ClusterDeleteComponentTemplate) WithMasterTimeout(v time.Duration) func(*ClusterDeleteComponentTemplateRequest) { + return func(r *ClusterDeleteComponentTemplateRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ClusterDeleteComponentTemplate) WithTimeout(v time.Duration) func(*ClusterDeleteComponentTemplateRequest) { + return func(r *ClusterDeleteComponentTemplateRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterDeleteComponentTemplate) WithPretty() func(*ClusterDeleteComponentTemplateRequest) { + return func(r *ClusterDeleteComponentTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterDeleteComponentTemplate) WithHuman() func(*ClusterDeleteComponentTemplateRequest) { + return func(r *ClusterDeleteComponentTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterDeleteComponentTemplate) WithErrorTrace() func(*ClusterDeleteComponentTemplateRequest) { + return func(r *ClusterDeleteComponentTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterDeleteComponentTemplate) WithFilterPath(v ...string) func(*ClusterDeleteComponentTemplateRequest) { + return func(r *ClusterDeleteComponentTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterDeleteComponentTemplate) WithHeader(h map[string]string) func(*ClusterDeleteComponentTemplateRequest) { + return func(r *ClusterDeleteComponentTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterDeleteComponentTemplate) WithOpaqueID(s string) func(*ClusterDeleteComponentTemplateRequest) { + return func(r *ClusterDeleteComponentTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.delete_voting_config_exclusions.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.delete_voting_config_exclusions.go new file mode 100644 index 000000000..43f7c6103 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.delete_voting_config_exclusions.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newClusterDeleteVotingConfigExclusionsFunc(t Transport) ClusterDeleteVotingConfigExclusions { + return func(o ...func(*ClusterDeleteVotingConfigExclusionsRequest)) (*Response, error) { + var r = ClusterDeleteVotingConfigExclusionsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterDeleteVotingConfigExclusions clears cluster voting config exclusions. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html. +type ClusterDeleteVotingConfigExclusions func(o ...func(*ClusterDeleteVotingConfigExclusionsRequest)) (*Response, error) + +// ClusterDeleteVotingConfigExclusionsRequest configures the Cluster Delete Voting Config Exclusions API request. +type ClusterDeleteVotingConfigExclusionsRequest struct { + MasterTimeout time.Duration + WaitForRemoval *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterDeleteVotingConfigExclusionsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.delete_voting_config_exclusions") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + len("/_cluster/voting_config_exclusions")) + path.WriteString("http://") + path.WriteString("/_cluster/voting_config_exclusions") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.WaitForRemoval != nil { + params["wait_for_removal"] = strconv.FormatBool(*r.WaitForRemoval) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.delete_voting_config_exclusions") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.delete_voting_config_exclusions") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterDeleteVotingConfigExclusions) WithContext(v context.Context) func(*ClusterDeleteVotingConfigExclusionsRequest) { + return func(r *ClusterDeleteVotingConfigExclusionsRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for submitting request to master. +func (f ClusterDeleteVotingConfigExclusions) WithMasterTimeout(v time.Duration) func(*ClusterDeleteVotingConfigExclusionsRequest) { + return func(r *ClusterDeleteVotingConfigExclusionsRequest) { + r.MasterTimeout = v + } +} + +// WithWaitForRemoval - specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list.. +func (f ClusterDeleteVotingConfigExclusions) WithWaitForRemoval(v bool) func(*ClusterDeleteVotingConfigExclusionsRequest) { + return func(r *ClusterDeleteVotingConfigExclusionsRequest) { + r.WaitForRemoval = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterDeleteVotingConfigExclusions) WithPretty() func(*ClusterDeleteVotingConfigExclusionsRequest) { + return func(r *ClusterDeleteVotingConfigExclusionsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterDeleteVotingConfigExclusions) WithHuman() func(*ClusterDeleteVotingConfigExclusionsRequest) { + return func(r *ClusterDeleteVotingConfigExclusionsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterDeleteVotingConfigExclusions) WithErrorTrace() func(*ClusterDeleteVotingConfigExclusionsRequest) { + return func(r *ClusterDeleteVotingConfigExclusionsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterDeleteVotingConfigExclusions) WithFilterPath(v ...string) func(*ClusterDeleteVotingConfigExclusionsRequest) { + return func(r *ClusterDeleteVotingConfigExclusionsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterDeleteVotingConfigExclusions) WithHeader(h map[string]string) func(*ClusterDeleteVotingConfigExclusionsRequest) { + return func(r *ClusterDeleteVotingConfigExclusionsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterDeleteVotingConfigExclusions) WithOpaqueID(s string) func(*ClusterDeleteVotingConfigExclusionsRequest) { + return func(r *ClusterDeleteVotingConfigExclusionsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.exists_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.exists_component_template.go new file mode 100644 index 000000000..50facc059 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.exists_component_template.go @@ -0,0 +1,250 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newClusterExistsComponentTemplateFunc(t Transport) ClusterExistsComponentTemplate { + return func(name string, o ...func(*ClusterExistsComponentTemplateRequest)) (*Response, error) { + var r = ClusterExistsComponentTemplateRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterExistsComponentTemplate returns information about whether a particular component template exist +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html. +type ClusterExistsComponentTemplate func(name string, o ...func(*ClusterExistsComponentTemplateRequest)) (*Response, error) + +// ClusterExistsComponentTemplateRequest configures the Cluster Exists Component Template API request. +type ClusterExistsComponentTemplateRequest struct { + Name string + + Local *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterExistsComponentTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.exists_component_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "HEAD" + + path.Grow(7 + 1 + len("_component_template") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_component_template") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.exists_component_template") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.exists_component_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterExistsComponentTemplate) WithContext(v context.Context) func(*ClusterExistsComponentTemplateRequest) { + return func(r *ClusterExistsComponentTemplateRequest) { + r.ctx = v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f ClusterExistsComponentTemplate) WithLocal(v bool) func(*ClusterExistsComponentTemplateRequest) { + return func(r *ClusterExistsComponentTemplateRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f ClusterExistsComponentTemplate) WithMasterTimeout(v time.Duration) func(*ClusterExistsComponentTemplateRequest) { + return func(r *ClusterExistsComponentTemplateRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterExistsComponentTemplate) WithPretty() func(*ClusterExistsComponentTemplateRequest) { + return func(r *ClusterExistsComponentTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterExistsComponentTemplate) WithHuman() func(*ClusterExistsComponentTemplateRequest) { + return func(r *ClusterExistsComponentTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterExistsComponentTemplate) WithErrorTrace() func(*ClusterExistsComponentTemplateRequest) { + return func(r *ClusterExistsComponentTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterExistsComponentTemplate) WithFilterPath(v ...string) func(*ClusterExistsComponentTemplateRequest) { + return func(r *ClusterExistsComponentTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterExistsComponentTemplate) WithHeader(h map[string]string) func(*ClusterExistsComponentTemplateRequest) { + return func(r *ClusterExistsComponentTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterExistsComponentTemplate) WithOpaqueID(s string) func(*ClusterExistsComponentTemplateRequest) { + return func(r *ClusterExistsComponentTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.get_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.get_component_template.go new file mode 100644 index 000000000..72e7ea486 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.get_component_template.go @@ -0,0 +1,295 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newClusterGetComponentTemplateFunc(t Transport) ClusterGetComponentTemplate { + return func(o ...func(*ClusterGetComponentTemplateRequest)) (*Response, error) { + var r = ClusterGetComponentTemplateRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterGetComponentTemplate returns one or more component templates +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html. +type ClusterGetComponentTemplate func(o ...func(*ClusterGetComponentTemplateRequest)) (*Response, error) + +// ClusterGetComponentTemplateRequest configures the Cluster Get Component Template API request. +type ClusterGetComponentTemplateRequest struct { + Name []string + + FlatSettings *bool + IncludeDefaults *bool + Local *bool + MasterTimeout time.Duration + SettingsFilter string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterGetComponentTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.get_component_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_component_template") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_component_template") + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + } + + params = make(map[string]string) + + if r.FlatSettings != nil { + params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) + } + + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.SettingsFilter != "" { + params["settings_filter"] = r.SettingsFilter + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.get_component_template") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.get_component_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterGetComponentTemplate) WithContext(v context.Context) func(*ClusterGetComponentTemplateRequest) { + return func(r *ClusterGetComponentTemplateRequest) { + r.ctx = v + } +} + +// WithName - the comma separated names of the component templates. +func (f ClusterGetComponentTemplate) WithName(v ...string) func(*ClusterGetComponentTemplateRequest) { + return func(r *ClusterGetComponentTemplateRequest) { + r.Name = v + } +} + +// WithFlatSettings - return settings in flat format (default: false). +func (f ClusterGetComponentTemplate) WithFlatSettings(v bool) func(*ClusterGetComponentTemplateRequest) { + return func(r *ClusterGetComponentTemplateRequest) { + r.FlatSettings = &v + } +} + +// WithIncludeDefaults - return all default configurations for the component template (default: false). +func (f ClusterGetComponentTemplate) WithIncludeDefaults(v bool) func(*ClusterGetComponentTemplateRequest) { + return func(r *ClusterGetComponentTemplateRequest) { + r.IncludeDefaults = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f ClusterGetComponentTemplate) WithLocal(v bool) func(*ClusterGetComponentTemplateRequest) { + return func(r *ClusterGetComponentTemplateRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f ClusterGetComponentTemplate) WithMasterTimeout(v time.Duration) func(*ClusterGetComponentTemplateRequest) { + return func(r *ClusterGetComponentTemplateRequest) { + r.MasterTimeout = v + } +} + +// WithSettingsFilter - filter out results, for example to filter out sensitive information. supports wildcards or full settings keys. +func (f ClusterGetComponentTemplate) WithSettingsFilter(v string) func(*ClusterGetComponentTemplateRequest) { + return func(r *ClusterGetComponentTemplateRequest) { + r.SettingsFilter = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterGetComponentTemplate) WithPretty() func(*ClusterGetComponentTemplateRequest) { + return func(r *ClusterGetComponentTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterGetComponentTemplate) WithHuman() func(*ClusterGetComponentTemplateRequest) { + return func(r *ClusterGetComponentTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterGetComponentTemplate) WithErrorTrace() func(*ClusterGetComponentTemplateRequest) { + return func(r *ClusterGetComponentTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterGetComponentTemplate) WithFilterPath(v ...string) func(*ClusterGetComponentTemplateRequest) { + return func(r *ClusterGetComponentTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterGetComponentTemplate) WithHeader(h map[string]string) func(*ClusterGetComponentTemplateRequest) { + return func(r *ClusterGetComponentTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterGetComponentTemplate) WithOpaqueID(s string) func(*ClusterGetComponentTemplateRequest) { + return func(r *ClusterGetComponentTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.get_settings.go new file mode 100644 index 000000000..f50d3ab7d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.get_settings.go @@ -0,0 +1,266 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newClusterGetSettingsFunc(t Transport) ClusterGetSettings { + return func(o ...func(*ClusterGetSettingsRequest)) (*Response, error) { + var r = ClusterGetSettingsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterGetSettings returns cluster settings. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-get-settings.html. +type ClusterGetSettings func(o ...func(*ClusterGetSettingsRequest)) (*Response, error) + +// ClusterGetSettingsRequest configures the Cluster Get Settings API request. +type ClusterGetSettingsRequest struct { + FlatSettings *bool + IncludeDefaults *bool + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterGetSettingsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_cluster/settings")) + path.WriteString("http://") + path.WriteString("/_cluster/settings") + + params = make(map[string]string) + + if r.FlatSettings != nil { + params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) + } + + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.get_settings") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.get_settings") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterGetSettings) WithContext(v context.Context) func(*ClusterGetSettingsRequest) { + return func(r *ClusterGetSettingsRequest) { + r.ctx = v + } +} + +// WithFlatSettings - return settings in flat format (default: false). +func (f ClusterGetSettings) WithFlatSettings(v bool) func(*ClusterGetSettingsRequest) { + return func(r *ClusterGetSettingsRequest) { + r.FlatSettings = &v + } +} + +// WithIncludeDefaults - whether to return all default clusters setting.. +func (f ClusterGetSettings) WithIncludeDefaults(v bool) func(*ClusterGetSettingsRequest) { + return func(r *ClusterGetSettingsRequest) { + r.IncludeDefaults = &v + } +} + +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f ClusterGetSettings) WithMasterTimeout(v time.Duration) func(*ClusterGetSettingsRequest) { + return func(r *ClusterGetSettingsRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ClusterGetSettings) WithTimeout(v time.Duration) func(*ClusterGetSettingsRequest) { + return func(r *ClusterGetSettingsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterGetSettings) WithPretty() func(*ClusterGetSettingsRequest) { + return func(r *ClusterGetSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterGetSettings) WithHuman() func(*ClusterGetSettingsRequest) { + return func(r *ClusterGetSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterGetSettings) WithErrorTrace() func(*ClusterGetSettingsRequest) { + return func(r *ClusterGetSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterGetSettings) WithFilterPath(v ...string) func(*ClusterGetSettingsRequest) { + return func(r *ClusterGetSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterGetSettings) WithHeader(h map[string]string) func(*ClusterGetSettingsRequest) { + return func(r *ClusterGetSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterGetSettings) WithOpaqueID(s string) func(*ClusterGetSettingsRequest) { + return func(r *ClusterGetSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.health.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.health.go new file mode 100644 index 000000000..c69ee4746 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.health.go @@ -0,0 +1,369 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newClusterHealthFunc(t Transport) ClusterHealth { + return func(o ...func(*ClusterHealthRequest)) (*Response, error) { + var r = ClusterHealthRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterHealth returns basic information about the health of the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-health.html. +type ClusterHealth func(o ...func(*ClusterHealthRequest)) (*Response, error) + +// ClusterHealthRequest configures the Cluster Health API request. +type ClusterHealthRequest struct { + Index []string + + ExpandWildcards string + Level string + Local *bool + MasterTimeout time.Duration + Timeout time.Duration + WaitForActiveShards string + WaitForEvents string + WaitForNoInitializingShards *bool + WaitForNoRelocatingShards *bool + WaitForNodes string + WaitForStatus string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterHealthRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.health") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cluster") + 1 + len("health") + 1 + len(strings.Join(r.Index, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("health") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + + params = make(map[string]string) + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.Level != "" { + params["level"] = r.Level + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.WaitForEvents != "" { + params["wait_for_events"] = r.WaitForEvents + } + + if r.WaitForNoInitializingShards != nil { + params["wait_for_no_initializing_shards"] = strconv.FormatBool(*r.WaitForNoInitializingShards) + } + + if r.WaitForNoRelocatingShards != nil { + params["wait_for_no_relocating_shards"] = strconv.FormatBool(*r.WaitForNoRelocatingShards) + } + + if r.WaitForNodes != "" { + params["wait_for_nodes"] = r.WaitForNodes + } + + if r.WaitForStatus != "" { + params["wait_for_status"] = r.WaitForStatus + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.health") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.health") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterHealth) WithContext(v context.Context) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.ctx = v + } +} + +// WithIndex - limit the information returned to a specific index. +func (f ClusterHealth) WithIndex(v ...string) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.Index = v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f ClusterHealth) WithExpandWildcards(v string) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.ExpandWildcards = v + } +} + +// WithLevel - specify the level of detail for returned information. +func (f ClusterHealth) WithLevel(v string) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.Level = v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f ClusterHealth) WithLocal(v bool) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ClusterHealth) WithMasterTimeout(v time.Duration) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ClusterHealth) WithTimeout(v time.Duration) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.Timeout = v + } +} + +// WithWaitForActiveShards - wait until the specified number of shards is active. +func (f ClusterHealth) WithWaitForActiveShards(v string) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.WaitForActiveShards = v + } +} + +// WithWaitForEvents - wait until all currently queued events with the given priority are processed. +func (f ClusterHealth) WithWaitForEvents(v string) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.WaitForEvents = v + } +} + +// WithWaitForNoInitializingShards - whether to wait until there are no initializing shards in the cluster. +func (f ClusterHealth) WithWaitForNoInitializingShards(v bool) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.WaitForNoInitializingShards = &v + } +} + +// WithWaitForNoRelocatingShards - whether to wait until there are no relocating shards in the cluster. +func (f ClusterHealth) WithWaitForNoRelocatingShards(v bool) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.WaitForNoRelocatingShards = &v + } +} + +// WithWaitForNodes - wait until the specified number of nodes is available. +func (f ClusterHealth) WithWaitForNodes(v string) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.WaitForNodes = v + } +} + +// WithWaitForStatus - wait until cluster is in a specific state. +func (f ClusterHealth) WithWaitForStatus(v string) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.WaitForStatus = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterHealth) WithPretty() func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterHealth) WithHuman() func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterHealth) WithErrorTrace() func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterHealth) WithFilterPath(v ...string) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterHealth) WithHeader(h map[string]string) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterHealth) WithOpaqueID(s string) func(*ClusterHealthRequest) { + return func(r *ClusterHealthRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.info.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.info.go new file mode 100644 index 000000000..a0821da15 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.info.go @@ -0,0 +1,228 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" +) + +func newClusterInfoFunc(t Transport) ClusterInfo { + return func(target []string, o ...func(*ClusterInfoRequest)) (*Response, error) { + var r = ClusterInfoRequest{Target: target} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterInfo returns different information about the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-info.html. +type ClusterInfo func(target []string, o ...func(*ClusterInfoRequest)) (*Response, error) + +// ClusterInfoRequest configures the Cluster Info API request. +type ClusterInfoRequest struct { + Target []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterInfoRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + if len(r.Target) == 0 { + return nil, errors.New("target is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_info") + 1 + len(strings.Join(r.Target, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_info") + path.WriteString("/") + path.WriteString(strings.Join(r.Target, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "target", strings.Join(r.Target, ",")) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.info") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.info") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterInfo) WithContext(v context.Context) func(*ClusterInfoRequest) { + return func(r *ClusterInfoRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterInfo) WithPretty() func(*ClusterInfoRequest) { + return func(r *ClusterInfoRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterInfo) WithHuman() func(*ClusterInfoRequest) { + return func(r *ClusterInfoRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterInfo) WithErrorTrace() func(*ClusterInfoRequest) { + return func(r *ClusterInfoRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterInfo) WithFilterPath(v ...string) func(*ClusterInfoRequest) { + return func(r *ClusterInfoRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterInfo) WithHeader(h map[string]string) func(*ClusterInfoRequest) { + return func(r *ClusterInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterInfo) WithOpaqueID(s string) func(*ClusterInfoRequest) { + return func(r *ClusterInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.pending_tasks.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.pending_tasks.go new file mode 100644 index 000000000..5d62db8d9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.pending_tasks.go @@ -0,0 +1,243 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newClusterPendingTasksFunc(t Transport) ClusterPendingTasks { + return func(o ...func(*ClusterPendingTasksRequest)) (*Response, error) { + var r = ClusterPendingTasksRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterPendingTasks returns a list of any cluster-level changes (e.g. create index, update mapping, +// allocate or fail shard) which have not yet been executed. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-pending.html. +type ClusterPendingTasks func(o ...func(*ClusterPendingTasksRequest)) (*Response, error) + +// ClusterPendingTasksRequest configures the Cluster Pending Tasks API request. +type ClusterPendingTasksRequest struct { + Local *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterPendingTasksRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.pending_tasks") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_cluster/pending_tasks")) + path.WriteString("http://") + path.WriteString("/_cluster/pending_tasks") + + params = make(map[string]string) + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.pending_tasks") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.pending_tasks") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterPendingTasks) WithContext(v context.Context) func(*ClusterPendingTasksRequest) { + return func(r *ClusterPendingTasksRequest) { + r.ctx = v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f ClusterPendingTasks) WithLocal(v bool) func(*ClusterPendingTasksRequest) { + return func(r *ClusterPendingTasksRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f ClusterPendingTasks) WithMasterTimeout(v time.Duration) func(*ClusterPendingTasksRequest) { + return func(r *ClusterPendingTasksRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterPendingTasks) WithPretty() func(*ClusterPendingTasksRequest) { + return func(r *ClusterPendingTasksRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterPendingTasks) WithHuman() func(*ClusterPendingTasksRequest) { + return func(r *ClusterPendingTasksRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterPendingTasks) WithErrorTrace() func(*ClusterPendingTasksRequest) { + return func(r *ClusterPendingTasksRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterPendingTasks) WithFilterPath(v ...string) func(*ClusterPendingTasksRequest) { + return func(r *ClusterPendingTasksRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterPendingTasks) WithHeader(h map[string]string) func(*ClusterPendingTasksRequest) { + return func(r *ClusterPendingTasksRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterPendingTasks) WithOpaqueID(s string) func(*ClusterPendingTasksRequest) { + return func(r *ClusterPendingTasksRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.post_voting_config_exclusions.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.post_voting_config_exclusions.go new file mode 100644 index 000000000..91b7ec7f4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.post_voting_config_exclusions.go @@ -0,0 +1,265 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newClusterPostVotingConfigExclusionsFunc(t Transport) ClusterPostVotingConfigExclusions { + return func(o ...func(*ClusterPostVotingConfigExclusionsRequest)) (*Response, error) { + var r = ClusterPostVotingConfigExclusionsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterPostVotingConfigExclusions updates the cluster voting config exclusions by node ids or node names. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html. +type ClusterPostVotingConfigExclusions func(o ...func(*ClusterPostVotingConfigExclusionsRequest)) (*Response, error) + +// ClusterPostVotingConfigExclusionsRequest configures the Cluster Post Voting Config Exclusions API request. +type ClusterPostVotingConfigExclusionsRequest struct { + MasterTimeout time.Duration + NodeIds string + NodeNames string + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterPostVotingConfigExclusionsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.post_voting_config_exclusions") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_cluster/voting_config_exclusions")) + path.WriteString("http://") + path.WriteString("/_cluster/voting_config_exclusions") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.NodeIds != "" { + params["node_ids"] = r.NodeIds + } + + if r.NodeNames != "" { + params["node_names"] = r.NodeNames + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.post_voting_config_exclusions") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.post_voting_config_exclusions") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterPostVotingConfigExclusions) WithContext(v context.Context) func(*ClusterPostVotingConfigExclusionsRequest) { + return func(r *ClusterPostVotingConfigExclusionsRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for submitting request to master. +func (f ClusterPostVotingConfigExclusions) WithMasterTimeout(v time.Duration) func(*ClusterPostVotingConfigExclusionsRequest) { + return func(r *ClusterPostVotingConfigExclusionsRequest) { + r.MasterTimeout = v + } +} + +// WithNodeIds - a list of the persistent ids of the nodes to exclude from the voting configuration. if specified, you may not also specify ?node_names.. +func (f ClusterPostVotingConfigExclusions) WithNodeIds(v string) func(*ClusterPostVotingConfigExclusionsRequest) { + return func(r *ClusterPostVotingConfigExclusionsRequest) { + r.NodeIds = v + } +} + +// WithNodeNames - a list of the names of the nodes to exclude from the voting configuration. if specified, you may not also specify ?node_ids.. +func (f ClusterPostVotingConfigExclusions) WithNodeNames(v string) func(*ClusterPostVotingConfigExclusionsRequest) { + return func(r *ClusterPostVotingConfigExclusionsRequest) { + r.NodeNames = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ClusterPostVotingConfigExclusions) WithTimeout(v time.Duration) func(*ClusterPostVotingConfigExclusionsRequest) { + return func(r *ClusterPostVotingConfigExclusionsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterPostVotingConfigExclusions) WithPretty() func(*ClusterPostVotingConfigExclusionsRequest) { + return func(r *ClusterPostVotingConfigExclusionsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterPostVotingConfigExclusions) WithHuman() func(*ClusterPostVotingConfigExclusionsRequest) { + return func(r *ClusterPostVotingConfigExclusionsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterPostVotingConfigExclusions) WithErrorTrace() func(*ClusterPostVotingConfigExclusionsRequest) { + return func(r *ClusterPostVotingConfigExclusionsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterPostVotingConfigExclusions) WithFilterPath(v ...string) func(*ClusterPostVotingConfigExclusionsRequest) { + return func(r *ClusterPostVotingConfigExclusionsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterPostVotingConfigExclusions) WithHeader(h map[string]string) func(*ClusterPostVotingConfigExclusionsRequest) { + return func(r *ClusterPostVotingConfigExclusionsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterPostVotingConfigExclusions) WithOpaqueID(s string) func(*ClusterPostVotingConfigExclusionsRequest) { + return func(r *ClusterPostVotingConfigExclusionsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.put_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.put_component_template.go new file mode 100644 index 000000000..44885a3e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.put_component_template.go @@ -0,0 +1,272 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newClusterPutComponentTemplateFunc(t Transport) ClusterPutComponentTemplate { + return func(name string, body io.Reader, o ...func(*ClusterPutComponentTemplateRequest)) (*Response, error) { + var r = ClusterPutComponentTemplateRequest{Name: name, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterPutComponentTemplate creates or updates a component template +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html. +type ClusterPutComponentTemplate func(name string, body io.Reader, o ...func(*ClusterPutComponentTemplateRequest)) (*Response, error) + +// ClusterPutComponentTemplateRequest configures the Cluster Put Component Template API request. +type ClusterPutComponentTemplateRequest struct { + Body io.Reader + + Name string + + Cause string + Create *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterPutComponentTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.put_component_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_component_template") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_component_template") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Cause != "" { + params["cause"] = r.Cause + } + + if r.Create != nil { + params["create"] = strconv.FormatBool(*r.Create) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.put_component_template") + if reader := instrument.RecordRequestBody(ctx, "cluster.put_component_template", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.put_component_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterPutComponentTemplate) WithContext(v context.Context) func(*ClusterPutComponentTemplateRequest) { + return func(r *ClusterPutComponentTemplateRequest) { + r.ctx = v + } +} + +// WithCause - user defined reason for create the component template. +func (f ClusterPutComponentTemplate) WithCause(v string) func(*ClusterPutComponentTemplateRequest) { + return func(r *ClusterPutComponentTemplateRequest) { + r.Cause = v + } +} + +// WithCreate - whether the index template should only be added if new or can also replace an existing one. +func (f ClusterPutComponentTemplate) WithCreate(v bool) func(*ClusterPutComponentTemplateRequest) { + return func(r *ClusterPutComponentTemplateRequest) { + r.Create = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f ClusterPutComponentTemplate) WithMasterTimeout(v time.Duration) func(*ClusterPutComponentTemplateRequest) { + return func(r *ClusterPutComponentTemplateRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterPutComponentTemplate) WithPretty() func(*ClusterPutComponentTemplateRequest) { + return func(r *ClusterPutComponentTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterPutComponentTemplate) WithHuman() func(*ClusterPutComponentTemplateRequest) { + return func(r *ClusterPutComponentTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterPutComponentTemplate) WithErrorTrace() func(*ClusterPutComponentTemplateRequest) { + return func(r *ClusterPutComponentTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterPutComponentTemplate) WithFilterPath(v ...string) func(*ClusterPutComponentTemplateRequest) { + return func(r *ClusterPutComponentTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterPutComponentTemplate) WithHeader(h map[string]string) func(*ClusterPutComponentTemplateRequest) { + return func(r *ClusterPutComponentTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterPutComponentTemplate) WithOpaqueID(s string) func(*ClusterPutComponentTemplateRequest) { + return func(r *ClusterPutComponentTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.put_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.put_settings.go new file mode 100644 index 000000000..f1ba21271 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.put_settings.go @@ -0,0 +1,264 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newClusterPutSettingsFunc(t Transport) ClusterPutSettings { + return func(body io.Reader, o ...func(*ClusterPutSettingsRequest)) (*Response, error) { + var r = ClusterPutSettingsRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterPutSettings updates the cluster settings. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html. +type ClusterPutSettings func(body io.Reader, o ...func(*ClusterPutSettingsRequest)) (*Response, error) + +// ClusterPutSettingsRequest configures the Cluster Put Settings API request. +type ClusterPutSettingsRequest struct { + Body io.Reader + + FlatSettings *bool + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterPutSettingsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.put_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + len("/_cluster/settings")) + path.WriteString("http://") + path.WriteString("/_cluster/settings") + + params = make(map[string]string) + + if r.FlatSettings != nil { + params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.put_settings") + if reader := instrument.RecordRequestBody(ctx, "cluster.put_settings", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.put_settings") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterPutSettings) WithContext(v context.Context) func(*ClusterPutSettingsRequest) { + return func(r *ClusterPutSettingsRequest) { + r.ctx = v + } +} + +// WithFlatSettings - return settings in flat format (default: false). +func (f ClusterPutSettings) WithFlatSettings(v bool) func(*ClusterPutSettingsRequest) { + return func(r *ClusterPutSettingsRequest) { + r.FlatSettings = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ClusterPutSettings) WithMasterTimeout(v time.Duration) func(*ClusterPutSettingsRequest) { + return func(r *ClusterPutSettingsRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ClusterPutSettings) WithTimeout(v time.Duration) func(*ClusterPutSettingsRequest) { + return func(r *ClusterPutSettingsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterPutSettings) WithPretty() func(*ClusterPutSettingsRequest) { + return func(r *ClusterPutSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterPutSettings) WithHuman() func(*ClusterPutSettingsRequest) { + return func(r *ClusterPutSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterPutSettings) WithErrorTrace() func(*ClusterPutSettingsRequest) { + return func(r *ClusterPutSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterPutSettings) WithFilterPath(v ...string) func(*ClusterPutSettingsRequest) { + return func(r *ClusterPutSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterPutSettings) WithHeader(h map[string]string) func(*ClusterPutSettingsRequest) { + return func(r *ClusterPutSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterPutSettings) WithOpaqueID(s string) func(*ClusterPutSettingsRequest) { + return func(r *ClusterPutSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.remote_info.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.remote_info.go new file mode 100644 index 000000000..00184c9fe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.remote_info.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newClusterRemoteInfoFunc(t Transport) ClusterRemoteInfo { + return func(o ...func(*ClusterRemoteInfoRequest)) (*Response, error) { + var r = ClusterRemoteInfoRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterRemoteInfo returns the information about configured remote clusters. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html. +type ClusterRemoteInfo func(o ...func(*ClusterRemoteInfoRequest)) (*Response, error) + +// ClusterRemoteInfoRequest configures the Cluster Remote Info API request. +type ClusterRemoteInfoRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterRemoteInfoRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.remote_info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_remote/info")) + path.WriteString("http://") + path.WriteString("/_remote/info") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.remote_info") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.remote_info") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterRemoteInfo) WithContext(v context.Context) func(*ClusterRemoteInfoRequest) { + return func(r *ClusterRemoteInfoRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterRemoteInfo) WithPretty() func(*ClusterRemoteInfoRequest) { + return func(r *ClusterRemoteInfoRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterRemoteInfo) WithHuman() func(*ClusterRemoteInfoRequest) { + return func(r *ClusterRemoteInfoRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterRemoteInfo) WithErrorTrace() func(*ClusterRemoteInfoRequest) { + return func(r *ClusterRemoteInfoRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterRemoteInfo) WithFilterPath(v ...string) func(*ClusterRemoteInfoRequest) { + return func(r *ClusterRemoteInfoRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterRemoteInfo) WithHeader(h map[string]string) func(*ClusterRemoteInfoRequest) { + return func(r *ClusterRemoteInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterRemoteInfo) WithOpaqueID(s string) func(*ClusterRemoteInfoRequest) { + return func(r *ClusterRemoteInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.reroute.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.reroute.go new file mode 100644 index 000000000..b0e862432 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.reroute.go @@ -0,0 +1,307 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newClusterRerouteFunc(t Transport) ClusterReroute { + return func(o ...func(*ClusterRerouteRequest)) (*Response, error) { + var r = ClusterRerouteRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterReroute allows to manually change the allocation of individual shards in the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-reroute.html. +type ClusterReroute func(o ...func(*ClusterRerouteRequest)) (*Response, error) + +// ClusterRerouteRequest configures the Cluster Reroute API request. +type ClusterRerouteRequest struct { + Body io.Reader + + DryRun *bool + Explain *bool + MasterTimeout time.Duration + Metric []string + RetryFailed *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterRerouteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.reroute") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_cluster/reroute")) + path.WriteString("http://") + path.WriteString("/_cluster/reroute") + + params = make(map[string]string) + + if r.DryRun != nil { + params["dry_run"] = strconv.FormatBool(*r.DryRun) + } + + if r.Explain != nil { + params["explain"] = strconv.FormatBool(*r.Explain) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if len(r.Metric) > 0 { + params["metric"] = strings.Join(r.Metric, ",") + } + + if r.RetryFailed != nil { + params["retry_failed"] = strconv.FormatBool(*r.RetryFailed) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.reroute") + if reader := instrument.RecordRequestBody(ctx, "cluster.reroute", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.reroute") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterReroute) WithContext(v context.Context) func(*ClusterRerouteRequest) { + return func(r *ClusterRerouteRequest) { + r.ctx = v + } +} + +// WithBody - The definition of `commands` to perform (`move`, `cancel`, `allocate`). +func (f ClusterReroute) WithBody(v io.Reader) func(*ClusterRerouteRequest) { + return func(r *ClusterRerouteRequest) { + r.Body = v + } +} + +// WithDryRun - simulate the operation only and return the resulting state. +func (f ClusterReroute) WithDryRun(v bool) func(*ClusterRerouteRequest) { + return func(r *ClusterRerouteRequest) { + r.DryRun = &v + } +} + +// WithExplain - return an explanation of why the commands can or cannot be executed. +func (f ClusterReroute) WithExplain(v bool) func(*ClusterRerouteRequest) { + return func(r *ClusterRerouteRequest) { + r.Explain = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ClusterReroute) WithMasterTimeout(v time.Duration) func(*ClusterRerouteRequest) { + return func(r *ClusterRerouteRequest) { + r.MasterTimeout = v + } +} + +// WithMetric - limit the information returned to the specified metrics. defaults to all but metadata. +func (f ClusterReroute) WithMetric(v ...string) func(*ClusterRerouteRequest) { + return func(r *ClusterRerouteRequest) { + r.Metric = v + } +} + +// WithRetryFailed - retries allocation of shards that are blocked due to too many subsequent allocation failures. +func (f ClusterReroute) WithRetryFailed(v bool) func(*ClusterRerouteRequest) { + return func(r *ClusterRerouteRequest) { + r.RetryFailed = &v + } +} + +// WithTimeout - explicit operation timeout. +func (f ClusterReroute) WithTimeout(v time.Duration) func(*ClusterRerouteRequest) { + return func(r *ClusterRerouteRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterReroute) WithPretty() func(*ClusterRerouteRequest) { + return func(r *ClusterRerouteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterReroute) WithHuman() func(*ClusterRerouteRequest) { + return func(r *ClusterRerouteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterReroute) WithErrorTrace() func(*ClusterRerouteRequest) { + return func(r *ClusterRerouteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterReroute) WithFilterPath(v ...string) func(*ClusterRerouteRequest) { + return func(r *ClusterRerouteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterReroute) WithHeader(h map[string]string) func(*ClusterRerouteRequest) { + return func(r *ClusterRerouteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterReroute) WithOpaqueID(s string) func(*ClusterRerouteRequest) { + return func(r *ClusterRerouteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.state.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.state.go new file mode 100644 index 000000000..bbea2c788 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.state.go @@ -0,0 +1,349 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newClusterStateFunc(t Transport) ClusterState { + return func(o ...func(*ClusterStateRequest)) (*Response, error) { + var r = ClusterStateRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterState returns a comprehensive information about the state of the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-state.html. +type ClusterState func(o ...func(*ClusterStateRequest)) (*Response, error) + +// ClusterStateRequest configures the Cluster State API request. +type ClusterStateRequest struct { + Index []string + + Metric []string + + AllowNoIndices *bool + ExpandWildcards string + FlatSettings *bool + IgnoreUnavailable *bool + Local *bool + MasterTimeout time.Duration + WaitForMetadataVersion *int + WaitForTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterStateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.state") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cluster") + 1 + len("state") + 1 + len(strings.Join(r.Metric, ",")) + 1 + len(strings.Join(r.Index, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("state") + if len(r.Metric) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Metric, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", strings.Join(r.Metric, ",")) + } + } + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.FlatSettings != nil { + params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.WaitForMetadataVersion != nil { + params["wait_for_metadata_version"] = strconv.FormatInt(int64(*r.WaitForMetadataVersion), 10) + } + + if r.WaitForTimeout != 0 { + params["wait_for_timeout"] = formatDuration(r.WaitForTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.state") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.state") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterState) WithContext(v context.Context) func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names; use _all to perform the operation on all indices. +func (f ClusterState) WithIndex(v ...string) func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.Index = v + } +} + +// WithMetric - limit the information returned to the specified metrics. +func (f ClusterState) WithMetric(v ...string) func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.Metric = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f ClusterState) WithAllowNoIndices(v bool) func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f ClusterState) WithExpandWildcards(v string) func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.ExpandWildcards = v + } +} + +// WithFlatSettings - return settings in flat format (default: false). +func (f ClusterState) WithFlatSettings(v bool) func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.FlatSettings = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f ClusterState) WithIgnoreUnavailable(v bool) func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f ClusterState) WithLocal(v bool) func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f ClusterState) WithMasterTimeout(v time.Duration) func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.MasterTimeout = v + } +} + +// WithWaitForMetadataVersion - wait for the metadata version to be equal or greater than the specified metadata version. +func (f ClusterState) WithWaitForMetadataVersion(v int) func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.WaitForMetadataVersion = &v + } +} + +// WithWaitForTimeout - the maximum time to wait for wait_for_metadata_version before timing out. +func (f ClusterState) WithWaitForTimeout(v time.Duration) func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.WaitForTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterState) WithPretty() func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterState) WithHuman() func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterState) WithErrorTrace() func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterState) WithFilterPath(v ...string) func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterState) WithHeader(h map[string]string) func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterState) WithOpaqueID(s string) func(*ClusterStateRequest) { + return func(r *ClusterStateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.stats.go new file mode 100644 index 000000000..c4628f883 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.cluster.stats.go @@ -0,0 +1,259 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newClusterStatsFunc(t Transport) ClusterStats { + return func(o ...func(*ClusterStatsRequest)) (*Response, error) { + var r = ClusterStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterStats returns high-level overview of cluster statistics. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-stats.html. +type ClusterStats func(o ...func(*ClusterStatsRequest)) (*Response, error) + +// ClusterStatsRequest configures the Cluster Stats API request. +type ClusterStatsRequest struct { + NodeID []string + + IncludeRemotes *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClusterStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(len("/nodes/_cluster/stats/nodes/") + len(strings.Join(r.NodeID, ","))) + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("stats") + if len(r.NodeID) > 0 { + path.WriteString("/") + path.WriteString("nodes") + path.WriteString("/") + path.WriteString(strings.Join(r.NodeID, ",")) + } + + params = make(map[string]string) + + if r.IncludeRemotes != nil { + params["include_remotes"] = strconv.FormatBool(*r.IncludeRemotes) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterStats) WithContext(v context.Context) func(*ClusterStatsRequest) { + return func(r *ClusterStatsRequest) { + r.ctx = v + } +} + +// WithNodeID - a list of node ids or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. +func (f ClusterStats) WithNodeID(v ...string) func(*ClusterStatsRequest) { + return func(r *ClusterStatsRequest) { + r.NodeID = v + } +} + +// WithIncludeRemotes - include remote cluster data into the response (default: false). +func (f ClusterStats) WithIncludeRemotes(v bool) func(*ClusterStatsRequest) { + return func(r *ClusterStatsRequest) { + r.IncludeRemotes = &v + } +} + +// WithTimeout - explicit operation timeout. +func (f ClusterStats) WithTimeout(v time.Duration) func(*ClusterStatsRequest) { + return func(r *ClusterStatsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterStats) WithPretty() func(*ClusterStatsRequest) { + return func(r *ClusterStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterStats) WithHuman() func(*ClusterStatsRequest) { + return func(r *ClusterStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterStats) WithErrorTrace() func(*ClusterStatsRequest) { + return func(r *ClusterStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterStats) WithFilterPath(v ...string) func(*ClusterStatsRequest) { + return func(r *ClusterStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterStats) WithHeader(h map[string]string) func(*ClusterStatsRequest) { + return func(r *ClusterStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterStats) WithOpaqueID(s string) func(*ClusterStatsRequest) { + return func(r *ClusterStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.check_in.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.check_in.go new file mode 100644 index 000000000..75ab402de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.check_in.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newConnectorCheckInFunc(t Transport) ConnectorCheckIn { + return func(connector_id string, o ...func(*ConnectorCheckInRequest)) (*Response, error) { + var r = ConnectorCheckInRequest{ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorCheckIn updates the last_seen timestamp in the connector document. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-api.html. +type ConnectorCheckIn func(connector_id string, o ...func(*ConnectorCheckInRequest)) (*Response, error) + +// ConnectorCheckInRequest configures the Connector Check In API request. +type ConnectorCheckInRequest struct { + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorCheckInRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.check_in") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_check_in")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_check_in") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.check_in") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.check_in") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorCheckIn) WithContext(v context.Context) func(*ConnectorCheckInRequest) { + return func(r *ConnectorCheckInRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorCheckIn) WithPretty() func(*ConnectorCheckInRequest) { + return func(r *ConnectorCheckInRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorCheckIn) WithHuman() func(*ConnectorCheckInRequest) { + return func(r *ConnectorCheckInRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorCheckIn) WithErrorTrace() func(*ConnectorCheckInRequest) { + return func(r *ConnectorCheckInRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorCheckIn) WithFilterPath(v ...string) func(*ConnectorCheckInRequest) { + return func(r *ConnectorCheckInRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorCheckIn) WithHeader(h map[string]string) func(*ConnectorCheckInRequest) { + return func(r *ConnectorCheckInRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorCheckIn) WithOpaqueID(s string) func(*ConnectorCheckInRequest) { + return func(r *ConnectorCheckInRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.delete.go new file mode 100644 index 000000000..b2110c371 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.delete.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newConnectorDeleteFunc(t Transport) ConnectorDelete { + return func(connector_id string, o ...func(*ConnectorDeleteRequest)) (*Response, error) { + var r = ConnectorDeleteRequest{ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorDelete deletes a connector. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-api.html. +type ConnectorDelete func(connector_id string, o ...func(*ConnectorDeleteRequest)) (*Response, error) + +// ConnectorDeleteRequest configures the Connector Delete API request. +type ConnectorDeleteRequest struct { + ConnectorID string + + DeleteSyncJobs *bool + Hard *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorDeleteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + + params = make(map[string]string) + + if r.DeleteSyncJobs != nil { + params["delete_sync_jobs"] = strconv.FormatBool(*r.DeleteSyncJobs) + } + + if r.Hard != nil { + params["hard"] = strconv.FormatBool(*r.Hard) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.delete") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.delete") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorDelete) WithContext(v context.Context) func(*ConnectorDeleteRequest) { + return func(r *ConnectorDeleteRequest) { + r.ctx = v + } +} + +// WithDeleteSyncJobs - determines whether associated sync jobs are also deleted.. +func (f ConnectorDelete) WithDeleteSyncJobs(v bool) func(*ConnectorDeleteRequest) { + return func(r *ConnectorDeleteRequest) { + r.DeleteSyncJobs = &v + } +} + +// WithHard - if true, the connector doc is deleted. if false, connector doc is marked as deleted (soft-deleted).. +func (f ConnectorDelete) WithHard(v bool) func(*ConnectorDeleteRequest) { + return func(r *ConnectorDeleteRequest) { + r.Hard = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorDelete) WithPretty() func(*ConnectorDeleteRequest) { + return func(r *ConnectorDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorDelete) WithHuman() func(*ConnectorDeleteRequest) { + return func(r *ConnectorDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorDelete) WithErrorTrace() func(*ConnectorDeleteRequest) { + return func(r *ConnectorDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorDelete) WithFilterPath(v ...string) func(*ConnectorDeleteRequest) { + return func(r *ConnectorDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorDelete) WithHeader(h map[string]string) func(*ConnectorDeleteRequest) { + return func(r *ConnectorDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorDelete) WithOpaqueID(s string) func(*ConnectorDeleteRequest) { + return func(r *ConnectorDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.get.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.get.go new file mode 100644 index 000000000..7046f1d71 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.get.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newConnectorGetFunc(t Transport) ConnectorGet { + return func(connector_id string, o ...func(*ConnectorGetRequest)) (*Response, error) { + var r = ConnectorGetRequest{ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorGet returns the details about a connector. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-api.html. +type ConnectorGet func(connector_id string, o ...func(*ConnectorGetRequest)) (*Response, error) + +// ConnectorGetRequest configures the Connector Get API request. +type ConnectorGetRequest struct { + ConnectorID string + + IncludeDeleted *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorGetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + + params = make(map[string]string) + + if r.IncludeDeleted != nil { + params["include_deleted"] = strconv.FormatBool(*r.IncludeDeleted) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.get") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.get") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorGet) WithContext(v context.Context) func(*ConnectorGetRequest) { + return func(r *ConnectorGetRequest) { + r.ctx = v + } +} + +// WithIncludeDeleted - a flag indicating whether to return connectors that have been soft-deleted.. +func (f ConnectorGet) WithIncludeDeleted(v bool) func(*ConnectorGetRequest) { + return func(r *ConnectorGetRequest) { + r.IncludeDeleted = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorGet) WithPretty() func(*ConnectorGetRequest) { + return func(r *ConnectorGetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorGet) WithHuman() func(*ConnectorGetRequest) { + return func(r *ConnectorGetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorGet) WithErrorTrace() func(*ConnectorGetRequest) { + return func(r *ConnectorGetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorGet) WithFilterPath(v ...string) func(*ConnectorGetRequest) { + return func(r *ConnectorGetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorGet) WithHeader(h map[string]string) func(*ConnectorGetRequest) { + return func(r *ConnectorGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorGet) WithOpaqueID(s string) func(*ConnectorGetRequest) { + return func(r *ConnectorGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.last_sync.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.last_sync.go new file mode 100644 index 000000000..7e41ed37a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.last_sync.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorLastSyncFunc(t Transport) ConnectorLastSync { + return func(body io.Reader, connector_id string, o ...func(*ConnectorLastSyncRequest)) (*Response, error) { + var r = ConnectorLastSyncRequest{Body: body, ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorLastSync updates the stats of last sync in the connector document. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-last-sync-api.html. +type ConnectorLastSync func(body io.Reader, connector_id string, o ...func(*ConnectorLastSyncRequest)) (*Response, error) + +// ConnectorLastSyncRequest configures the Connector Last Sync API request. +type ConnectorLastSyncRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorLastSyncRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.last_sync") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_last_sync")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_last_sync") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.last_sync") + if reader := instrument.RecordRequestBody(ctx, "connector.last_sync", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.last_sync") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorLastSync) WithContext(v context.Context) func(*ConnectorLastSyncRequest) { + return func(r *ConnectorLastSyncRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorLastSync) WithPretty() func(*ConnectorLastSyncRequest) { + return func(r *ConnectorLastSyncRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorLastSync) WithHuman() func(*ConnectorLastSyncRequest) { + return func(r *ConnectorLastSyncRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorLastSync) WithErrorTrace() func(*ConnectorLastSyncRequest) { + return func(r *ConnectorLastSyncRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorLastSync) WithFilterPath(v ...string) func(*ConnectorLastSyncRequest) { + return func(r *ConnectorLastSyncRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorLastSync) WithHeader(h map[string]string) func(*ConnectorLastSyncRequest) { + return func(r *ConnectorLastSyncRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorLastSync) WithOpaqueID(s string) func(*ConnectorLastSyncRequest) { + return func(r *ConnectorLastSyncRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.list.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.list.go new file mode 100644 index 000000000..d065c08b5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.list.go @@ -0,0 +1,303 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newConnectorListFunc(t Transport) ConnectorList { + return func(o ...func(*ConnectorListRequest)) (*Response, error) { + var r = ConnectorListRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorList lists all connectors. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-api.html. +type ConnectorList func(o ...func(*ConnectorListRequest)) (*Response, error) + +// ConnectorListRequest configures the Connector List API request. +type ConnectorListRequest struct { + ConnectorName []string + From *int + IncludeDeleted *bool + IndexName []string + Query string + ServiceType []string + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorListRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.list") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_connector")) + path.WriteString("http://") + path.WriteString("/_connector") + + params = make(map[string]string) + + if len(r.ConnectorName) > 0 { + params["connector_name"] = strings.Join(r.ConnectorName, ",") + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.IncludeDeleted != nil { + params["include_deleted"] = strconv.FormatBool(*r.IncludeDeleted) + } + + if len(r.IndexName) > 0 { + params["index_name"] = strings.Join(r.IndexName, ",") + } + + if r.Query != "" { + params["query"] = r.Query + } + + if len(r.ServiceType) > 0 { + params["service_type"] = strings.Join(r.ServiceType, ",") + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.list") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.list") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorList) WithContext(v context.Context) func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + r.ctx = v + } +} + +// WithConnectorName - a list of connector names to fetch connector documents for. +func (f ConnectorList) WithConnectorName(v ...string) func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + r.ConnectorName = v + } +} + +// WithFrom - starting offset (default: 0). +func (f ConnectorList) WithFrom(v int) func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + r.From = &v + } +} + +// WithIncludeDeleted - a flag indicating whether to return connectors that have been soft-deleted.. +func (f ConnectorList) WithIncludeDeleted(v bool) func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + r.IncludeDeleted = &v + } +} + +// WithIndexName - a list of connector index names to fetch connector documents for. +func (f ConnectorList) WithIndexName(v ...string) func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + r.IndexName = v + } +} + +// WithQuery - a search string for querying connectors, filtering results by matching against connector names, descriptions, and index names. +func (f ConnectorList) WithQuery(v string) func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + r.Query = v + } +} + +// WithServiceType - a list of connector service types to fetch connector documents for. +func (f ConnectorList) WithServiceType(v ...string) func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + r.ServiceType = v + } +} + +// WithSize - specifies a max number of results to get (default: 100). +func (f ConnectorList) WithSize(v int) func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorList) WithPretty() func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorList) WithHuman() func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorList) WithErrorTrace() func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorList) WithFilterPath(v ...string) func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorList) WithHeader(h map[string]string) func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorList) WithOpaqueID(s string) func(*ConnectorListRequest) { + return func(r *ConnectorListRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.post.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.post.go new file mode 100644 index 000000000..e1c401b2e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.post.go @@ -0,0 +1,234 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorPostFunc(t Transport) ConnectorPost { + return func(o ...func(*ConnectorPostRequest)) (*Response, error) { + var r = ConnectorPostRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorPost creates a connector. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html. +type ConnectorPost func(o ...func(*ConnectorPostRequest)) (*Response, error) + +// ConnectorPostRequest configures the Connector Post API request. +type ConnectorPostRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorPostRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.post") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_connector")) + path.WriteString("http://") + path.WriteString("/_connector") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.post") + if reader := instrument.RecordRequestBody(ctx, "connector.post", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.post") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorPost) WithContext(v context.Context) func(*ConnectorPostRequest) { + return func(r *ConnectorPostRequest) { + r.ctx = v + } +} + +// WithBody - The connector configuration.. +func (f ConnectorPost) WithBody(v io.Reader) func(*ConnectorPostRequest) { + return func(r *ConnectorPostRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorPost) WithPretty() func(*ConnectorPostRequest) { + return func(r *ConnectorPostRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorPost) WithHuman() func(*ConnectorPostRequest) { + return func(r *ConnectorPostRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorPost) WithErrorTrace() func(*ConnectorPostRequest) { + return func(r *ConnectorPostRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorPost) WithFilterPath(v ...string) func(*ConnectorPostRequest) { + return func(r *ConnectorPostRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorPost) WithHeader(h map[string]string) func(*ConnectorPostRequest) { + return func(r *ConnectorPostRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorPost) WithOpaqueID(s string) func(*ConnectorPostRequest) { + return func(r *ConnectorPostRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.put.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.put.go new file mode 100644 index 000000000..a8c97406b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.put.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorPutFunc(t Transport) ConnectorPut { + return func(o ...func(*ConnectorPutRequest)) (*Response, error) { + var r = ConnectorPutRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorPut creates or updates a connector. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html. +type ConnectorPut func(o ...func(*ConnectorPutRequest)) (*Response, error) + +// ConnectorPutRequest configures the Connector Put API request. +type ConnectorPutRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorPutRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.put") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + if r.ConnectorID != "" { + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.put") + if reader := instrument.RecordRequestBody(ctx, "connector.put", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.put") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorPut) WithContext(v context.Context) func(*ConnectorPutRequest) { + return func(r *ConnectorPutRequest) { + r.ctx = v + } +} + +// WithBody - The connector configuration.. +func (f ConnectorPut) WithBody(v io.Reader) func(*ConnectorPutRequest) { + return func(r *ConnectorPutRequest) { + r.Body = v + } +} + +// WithConnectorID - the unique identifier of the connector to be created or updated.. +func (f ConnectorPut) WithConnectorID(v string) func(*ConnectorPutRequest) { + return func(r *ConnectorPutRequest) { + r.ConnectorID = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorPut) WithPretty() func(*ConnectorPutRequest) { + return func(r *ConnectorPutRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorPut) WithHuman() func(*ConnectorPutRequest) { + return func(r *ConnectorPutRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorPut) WithErrorTrace() func(*ConnectorPutRequest) { + return func(r *ConnectorPutRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorPut) WithFilterPath(v ...string) func(*ConnectorPutRequest) { + return func(r *ConnectorPutRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorPut) WithHeader(h map[string]string) func(*ConnectorPutRequest) { + return func(r *ConnectorPutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorPut) WithOpaqueID(s string) func(*ConnectorPutRequest) { + return func(r *ConnectorPutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.secret_delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.secret_delete.go new file mode 100644 index 000000000..d35dcc958 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.secret_delete.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newConnectorSecretDeleteFunc(t Transport) ConnectorSecretDelete { + return func(id string, o ...func(*ConnectorSecretDeleteRequest)) (*Response, error) { + var r = ConnectorSecretDeleteRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorSecretDelete deletes a connector secret. +// +// This API is experimental. +type ConnectorSecretDelete func(id string, o ...func(*ConnectorSecretDeleteRequest)) (*Response, error) + +// ConnectorSecretDeleteRequest configures the Connector Secret Delete API request. +type ConnectorSecretDeleteRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorSecretDeleteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.secret_delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_connector") + 1 + len("_secret") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_secret") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.secret_delete") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.secret_delete") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorSecretDelete) WithContext(v context.Context) func(*ConnectorSecretDeleteRequest) { + return func(r *ConnectorSecretDeleteRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorSecretDelete) WithPretty() func(*ConnectorSecretDeleteRequest) { + return func(r *ConnectorSecretDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorSecretDelete) WithHuman() func(*ConnectorSecretDeleteRequest) { + return func(r *ConnectorSecretDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorSecretDelete) WithErrorTrace() func(*ConnectorSecretDeleteRequest) { + return func(r *ConnectorSecretDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorSecretDelete) WithFilterPath(v ...string) func(*ConnectorSecretDeleteRequest) { + return func(r *ConnectorSecretDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorSecretDelete) WithHeader(h map[string]string) func(*ConnectorSecretDeleteRequest) { + return func(r *ConnectorSecretDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorSecretDelete) WithOpaqueID(s string) func(*ConnectorSecretDeleteRequest) { + return func(r *ConnectorSecretDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.secret_get.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.secret_get.go new file mode 100644 index 000000000..c8564455c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.secret_get.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newConnectorSecretGetFunc(t Transport) ConnectorSecretGet { + return func(id string, o ...func(*ConnectorSecretGetRequest)) (*Response, error) { + var r = ConnectorSecretGetRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorSecretGet retrieves a secret stored by Connectors. +// +// This API is experimental. +type ConnectorSecretGet func(id string, o ...func(*ConnectorSecretGetRequest)) (*Response, error) + +// ConnectorSecretGetRequest configures the Connector Secret Get API request. +type ConnectorSecretGetRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorSecretGetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.secret_get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_connector") + 1 + len("_secret") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_secret") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.secret_get") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.secret_get") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorSecretGet) WithContext(v context.Context) func(*ConnectorSecretGetRequest) { + return func(r *ConnectorSecretGetRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorSecretGet) WithPretty() func(*ConnectorSecretGetRequest) { + return func(r *ConnectorSecretGetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorSecretGet) WithHuman() func(*ConnectorSecretGetRequest) { + return func(r *ConnectorSecretGetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorSecretGet) WithErrorTrace() func(*ConnectorSecretGetRequest) { + return func(r *ConnectorSecretGetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorSecretGet) WithFilterPath(v ...string) func(*ConnectorSecretGetRequest) { + return func(r *ConnectorSecretGetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorSecretGet) WithHeader(h map[string]string) func(*ConnectorSecretGetRequest) { + return func(r *ConnectorSecretGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorSecretGet) WithOpaqueID(s string) func(*ConnectorSecretGetRequest) { + return func(r *ConnectorSecretGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.secret_post.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.secret_post.go new file mode 100644 index 000000000..13ff872de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.secret_post.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorSecretPostFunc(t Transport) ConnectorSecretPost { + return func(body io.Reader, o ...func(*ConnectorSecretPostRequest)) (*Response, error) { + var r = ConnectorSecretPostRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorSecretPost creates a secret for a Connector. +// +// This API is experimental. +type ConnectorSecretPost func(body io.Reader, o ...func(*ConnectorSecretPostRequest)) (*Response, error) + +// ConnectorSecretPostRequest configures the Connector Secret Post API request. +type ConnectorSecretPostRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorSecretPostRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.secret_post") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_connector/_secret")) + path.WriteString("http://") + path.WriteString("/_connector/_secret") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.secret_post") + if reader := instrument.RecordRequestBody(ctx, "connector.secret_post", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.secret_post") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorSecretPost) WithContext(v context.Context) func(*ConnectorSecretPostRequest) { + return func(r *ConnectorSecretPostRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorSecretPost) WithPretty() func(*ConnectorSecretPostRequest) { + return func(r *ConnectorSecretPostRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorSecretPost) WithHuman() func(*ConnectorSecretPostRequest) { + return func(r *ConnectorSecretPostRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorSecretPost) WithErrorTrace() func(*ConnectorSecretPostRequest) { + return func(r *ConnectorSecretPostRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorSecretPost) WithFilterPath(v ...string) func(*ConnectorSecretPostRequest) { + return func(r *ConnectorSecretPostRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorSecretPost) WithHeader(h map[string]string) func(*ConnectorSecretPostRequest) { + return func(r *ConnectorSecretPostRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorSecretPost) WithOpaqueID(s string) func(*ConnectorSecretPostRequest) { + return func(r *ConnectorSecretPostRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.secret_put.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.secret_put.go new file mode 100644 index 000000000..d3dde8726 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.secret_put.go @@ -0,0 +1,235 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorSecretPutFunc(t Transport) ConnectorSecretPut { + return func(id string, body io.Reader, o ...func(*ConnectorSecretPutRequest)) (*Response, error) { + var r = ConnectorSecretPutRequest{DocumentID: id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorSecretPut creates or updates a secret for a Connector. +// +// This API is experimental. +type ConnectorSecretPut func(id string, body io.Reader, o ...func(*ConnectorSecretPutRequest)) (*Response, error) + +// ConnectorSecretPutRequest configures the Connector Secret Put API request. +type ConnectorSecretPutRequest struct { + DocumentID string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorSecretPutRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.secret_put") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len("_secret") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_secret") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.secret_put") + if reader := instrument.RecordRequestBody(ctx, "connector.secret_put", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.secret_put") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorSecretPut) WithContext(v context.Context) func(*ConnectorSecretPutRequest) { + return func(r *ConnectorSecretPutRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorSecretPut) WithPretty() func(*ConnectorSecretPutRequest) { + return func(r *ConnectorSecretPutRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorSecretPut) WithHuman() func(*ConnectorSecretPutRequest) { + return func(r *ConnectorSecretPutRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorSecretPut) WithErrorTrace() func(*ConnectorSecretPutRequest) { + return func(r *ConnectorSecretPutRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorSecretPut) WithFilterPath(v ...string) func(*ConnectorSecretPutRequest) { + return func(r *ConnectorSecretPutRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorSecretPut) WithHeader(h map[string]string) func(*ConnectorSecretPutRequest) { + return func(r *ConnectorSecretPutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorSecretPut) WithOpaqueID(s string) func(*ConnectorSecretPutRequest) { + return func(r *ConnectorSecretPutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_cancel.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_cancel.go new file mode 100644 index 000000000..64f5dc6db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_cancel.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newConnectorSyncJobCancelFunc(t Transport) ConnectorSyncJobCancel { + return func(connector_sync_job_id string, o ...func(*ConnectorSyncJobCancelRequest)) (*Response, error) { + var r = ConnectorSyncJobCancelRequest{ConnectorSyncJobID: connector_sync_job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorSyncJobCancel cancels a connector sync job. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cancel-connector-sync-job-api.html. +type ConnectorSyncJobCancel func(connector_sync_job_id string, o ...func(*ConnectorSyncJobCancelRequest)) (*Response, error) + +// ConnectorSyncJobCancelRequest configures the Connector Sync Job Cancel API request. +type ConnectorSyncJobCancelRequest struct { + ConnectorSyncJobID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorSyncJobCancelRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_cancel") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len("_sync_job") + 1 + len(r.ConnectorSyncJobID) + 1 + len("_cancel")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + path.WriteString(r.ConnectorSyncJobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_sync_job_id", r.ConnectorSyncJobID) + } + path.WriteString("/") + path.WriteString("_cancel") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_cancel") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_cancel") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorSyncJobCancel) WithContext(v context.Context) func(*ConnectorSyncJobCancelRequest) { + return func(r *ConnectorSyncJobCancelRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorSyncJobCancel) WithPretty() func(*ConnectorSyncJobCancelRequest) { + return func(r *ConnectorSyncJobCancelRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorSyncJobCancel) WithHuman() func(*ConnectorSyncJobCancelRequest) { + return func(r *ConnectorSyncJobCancelRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorSyncJobCancel) WithErrorTrace() func(*ConnectorSyncJobCancelRequest) { + return func(r *ConnectorSyncJobCancelRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorSyncJobCancel) WithFilterPath(v ...string) func(*ConnectorSyncJobCancelRequest) { + return func(r *ConnectorSyncJobCancelRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorSyncJobCancel) WithHeader(h map[string]string) func(*ConnectorSyncJobCancelRequest) { + return func(r *ConnectorSyncJobCancelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorSyncJobCancel) WithOpaqueID(s string) func(*ConnectorSyncJobCancelRequest) { + return func(r *ConnectorSyncJobCancelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_check_in.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_check_in.go new file mode 100644 index 000000000..2626deb29 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_check_in.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newConnectorSyncJobCheckInFunc(t Transport) ConnectorSyncJobCheckIn { + return func(connector_sync_job_id string, o ...func(*ConnectorSyncJobCheckInRequest)) (*Response, error) { + var r = ConnectorSyncJobCheckInRequest{ConnectorSyncJobID: connector_sync_job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorSyncJobCheckIn checks in a connector sync job (refreshes 'last_seen'). +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-sync-job-api.html. +type ConnectorSyncJobCheckIn func(connector_sync_job_id string, o ...func(*ConnectorSyncJobCheckInRequest)) (*Response, error) + +// ConnectorSyncJobCheckInRequest configures the Connector Sync Job Check In API request. +type ConnectorSyncJobCheckInRequest struct { + ConnectorSyncJobID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorSyncJobCheckInRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_check_in") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len("_sync_job") + 1 + len(r.ConnectorSyncJobID) + 1 + len("_check_in")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + path.WriteString(r.ConnectorSyncJobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_sync_job_id", r.ConnectorSyncJobID) + } + path.WriteString("/") + path.WriteString("_check_in") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_check_in") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_check_in") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorSyncJobCheckIn) WithContext(v context.Context) func(*ConnectorSyncJobCheckInRequest) { + return func(r *ConnectorSyncJobCheckInRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorSyncJobCheckIn) WithPretty() func(*ConnectorSyncJobCheckInRequest) { + return func(r *ConnectorSyncJobCheckInRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorSyncJobCheckIn) WithHuman() func(*ConnectorSyncJobCheckInRequest) { + return func(r *ConnectorSyncJobCheckInRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorSyncJobCheckIn) WithErrorTrace() func(*ConnectorSyncJobCheckInRequest) { + return func(r *ConnectorSyncJobCheckInRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorSyncJobCheckIn) WithFilterPath(v ...string) func(*ConnectorSyncJobCheckInRequest) { + return func(r *ConnectorSyncJobCheckInRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorSyncJobCheckIn) WithHeader(h map[string]string) func(*ConnectorSyncJobCheckInRequest) { + return func(r *ConnectorSyncJobCheckInRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorSyncJobCheckIn) WithOpaqueID(s string) func(*ConnectorSyncJobCheckInRequest) { + return func(r *ConnectorSyncJobCheckInRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_claim.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_claim.go new file mode 100644 index 000000000..da610e03c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_claim.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorSyncJobClaimFunc(t Transport) ConnectorSyncJobClaim { + return func(body io.Reader, connector_sync_job_id string, o ...func(*ConnectorSyncJobClaimRequest)) (*Response, error) { + var r = ConnectorSyncJobClaimRequest{Body: body, ConnectorSyncJobID: connector_sync_job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorSyncJobClaim claims a connector sync job. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/claim-connector-sync-job-api.html. +type ConnectorSyncJobClaim func(body io.Reader, connector_sync_job_id string, o ...func(*ConnectorSyncJobClaimRequest)) (*Response, error) + +// ConnectorSyncJobClaimRequest configures the Connector Sync Job Claim API request. +type ConnectorSyncJobClaimRequest struct { + Body io.Reader + + ConnectorSyncJobID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorSyncJobClaimRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_claim") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len("_sync_job") + 1 + len(r.ConnectorSyncJobID) + 1 + len("_claim")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + path.WriteString(r.ConnectorSyncJobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_sync_job_id", r.ConnectorSyncJobID) + } + path.WriteString("/") + path.WriteString("_claim") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_claim") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_claim", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_claim") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorSyncJobClaim) WithContext(v context.Context) func(*ConnectorSyncJobClaimRequest) { + return func(r *ConnectorSyncJobClaimRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorSyncJobClaim) WithPretty() func(*ConnectorSyncJobClaimRequest) { + return func(r *ConnectorSyncJobClaimRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorSyncJobClaim) WithHuman() func(*ConnectorSyncJobClaimRequest) { + return func(r *ConnectorSyncJobClaimRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorSyncJobClaim) WithErrorTrace() func(*ConnectorSyncJobClaimRequest) { + return func(r *ConnectorSyncJobClaimRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorSyncJobClaim) WithFilterPath(v ...string) func(*ConnectorSyncJobClaimRequest) { + return func(r *ConnectorSyncJobClaimRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorSyncJobClaim) WithHeader(h map[string]string) func(*ConnectorSyncJobClaimRequest) { + return func(r *ConnectorSyncJobClaimRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorSyncJobClaim) WithOpaqueID(s string) func(*ConnectorSyncJobClaimRequest) { + return func(r *ConnectorSyncJobClaimRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_delete.go new file mode 100644 index 000000000..3172b229d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_delete.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newConnectorSyncJobDeleteFunc(t Transport) ConnectorSyncJobDelete { + return func(connector_sync_job_id string, o ...func(*ConnectorSyncJobDeleteRequest)) (*Response, error) { + var r = ConnectorSyncJobDeleteRequest{ConnectorSyncJobID: connector_sync_job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorSyncJobDelete deletes a connector sync job. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-sync-job-api.html. +type ConnectorSyncJobDelete func(connector_sync_job_id string, o ...func(*ConnectorSyncJobDeleteRequest)) (*Response, error) + +// ConnectorSyncJobDeleteRequest configures the Connector Sync Job Delete API request. +type ConnectorSyncJobDeleteRequest struct { + ConnectorSyncJobID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorSyncJobDeleteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_connector") + 1 + len("_sync_job") + 1 + len(r.ConnectorSyncJobID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + path.WriteString(r.ConnectorSyncJobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_sync_job_id", r.ConnectorSyncJobID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_delete") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_delete") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorSyncJobDelete) WithContext(v context.Context) func(*ConnectorSyncJobDeleteRequest) { + return func(r *ConnectorSyncJobDeleteRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorSyncJobDelete) WithPretty() func(*ConnectorSyncJobDeleteRequest) { + return func(r *ConnectorSyncJobDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorSyncJobDelete) WithHuman() func(*ConnectorSyncJobDeleteRequest) { + return func(r *ConnectorSyncJobDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorSyncJobDelete) WithErrorTrace() func(*ConnectorSyncJobDeleteRequest) { + return func(r *ConnectorSyncJobDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorSyncJobDelete) WithFilterPath(v ...string) func(*ConnectorSyncJobDeleteRequest) { + return func(r *ConnectorSyncJobDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorSyncJobDelete) WithHeader(h map[string]string) func(*ConnectorSyncJobDeleteRequest) { + return func(r *ConnectorSyncJobDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorSyncJobDelete) WithOpaqueID(s string) func(*ConnectorSyncJobDeleteRequest) { + return func(r *ConnectorSyncJobDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_error.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_error.go new file mode 100644 index 000000000..ecc7224c1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_error.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorSyncJobErrorFunc(t Transport) ConnectorSyncJobError { + return func(body io.Reader, connector_sync_job_id string, o ...func(*ConnectorSyncJobErrorRequest)) (*Response, error) { + var r = ConnectorSyncJobErrorRequest{Body: body, ConnectorSyncJobID: connector_sync_job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorSyncJobError sets an error for a connector sync job. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/set-connector-sync-job-error-api.html. +type ConnectorSyncJobError func(body io.Reader, connector_sync_job_id string, o ...func(*ConnectorSyncJobErrorRequest)) (*Response, error) + +// ConnectorSyncJobErrorRequest configures the Connector Sync Job Error API request. +type ConnectorSyncJobErrorRequest struct { + Body io.Reader + + ConnectorSyncJobID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorSyncJobErrorRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_error") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len("_sync_job") + 1 + len(r.ConnectorSyncJobID) + 1 + len("_error")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + path.WriteString(r.ConnectorSyncJobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_sync_job_id", r.ConnectorSyncJobID) + } + path.WriteString("/") + path.WriteString("_error") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_error") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_error", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_error") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorSyncJobError) WithContext(v context.Context) func(*ConnectorSyncJobErrorRequest) { + return func(r *ConnectorSyncJobErrorRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorSyncJobError) WithPretty() func(*ConnectorSyncJobErrorRequest) { + return func(r *ConnectorSyncJobErrorRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorSyncJobError) WithHuman() func(*ConnectorSyncJobErrorRequest) { + return func(r *ConnectorSyncJobErrorRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorSyncJobError) WithErrorTrace() func(*ConnectorSyncJobErrorRequest) { + return func(r *ConnectorSyncJobErrorRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorSyncJobError) WithFilterPath(v ...string) func(*ConnectorSyncJobErrorRequest) { + return func(r *ConnectorSyncJobErrorRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorSyncJobError) WithHeader(h map[string]string) func(*ConnectorSyncJobErrorRequest) { + return func(r *ConnectorSyncJobErrorRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorSyncJobError) WithOpaqueID(s string) func(*ConnectorSyncJobErrorRequest) { + return func(r *ConnectorSyncJobErrorRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_get.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_get.go new file mode 100644 index 000000000..5e5599e46 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_get.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newConnectorSyncJobGetFunc(t Transport) ConnectorSyncJobGet { + return func(connector_sync_job_id string, o ...func(*ConnectorSyncJobGetRequest)) (*Response, error) { + var r = ConnectorSyncJobGetRequest{ConnectorSyncJobID: connector_sync_job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorSyncJobGet returns the details about a connector sync job. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-sync-job-api.html. +type ConnectorSyncJobGet func(connector_sync_job_id string, o ...func(*ConnectorSyncJobGetRequest)) (*Response, error) + +// ConnectorSyncJobGetRequest configures the Connector Sync Job Get API request. +type ConnectorSyncJobGetRequest struct { + ConnectorSyncJobID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorSyncJobGetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_connector") + 1 + len("_sync_job") + 1 + len(r.ConnectorSyncJobID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + path.WriteString(r.ConnectorSyncJobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_sync_job_id", r.ConnectorSyncJobID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_get") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_get") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorSyncJobGet) WithContext(v context.Context) func(*ConnectorSyncJobGetRequest) { + return func(r *ConnectorSyncJobGetRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorSyncJobGet) WithPretty() func(*ConnectorSyncJobGetRequest) { + return func(r *ConnectorSyncJobGetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorSyncJobGet) WithHuman() func(*ConnectorSyncJobGetRequest) { + return func(r *ConnectorSyncJobGetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorSyncJobGet) WithErrorTrace() func(*ConnectorSyncJobGetRequest) { + return func(r *ConnectorSyncJobGetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorSyncJobGet) WithFilterPath(v ...string) func(*ConnectorSyncJobGetRequest) { + return func(r *ConnectorSyncJobGetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorSyncJobGet) WithHeader(h map[string]string) func(*ConnectorSyncJobGetRequest) { + return func(r *ConnectorSyncJobGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorSyncJobGet) WithOpaqueID(s string) func(*ConnectorSyncJobGetRequest) { + return func(r *ConnectorSyncJobGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_list.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_list.go new file mode 100644 index 000000000..6d00f01d6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_list.go @@ -0,0 +1,279 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newConnectorSyncJobListFunc(t Transport) ConnectorSyncJobList { + return func(o ...func(*ConnectorSyncJobListRequest)) (*Response, error) { + var r = ConnectorSyncJobListRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorSyncJobList lists all connector sync jobs. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-sync-jobs-api.html. +type ConnectorSyncJobList func(o ...func(*ConnectorSyncJobListRequest)) (*Response, error) + +// ConnectorSyncJobListRequest configures the Connector Sync Job List API request. +type ConnectorSyncJobListRequest struct { + ConnectorID string + From *int + JobType []string + Size *int + Status string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorSyncJobListRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_list") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_connector/_sync_job")) + path.WriteString("http://") + path.WriteString("/_connector/_sync_job") + + params = make(map[string]string) + + if r.ConnectorID != "" { + params["connector_id"] = r.ConnectorID + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if len(r.JobType) > 0 { + params["job_type"] = strings.Join(r.JobType, ",") + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Status != "" { + params["status"] = r.Status + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_list") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_list") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorSyncJobList) WithContext(v context.Context) func(*ConnectorSyncJobListRequest) { + return func(r *ConnectorSyncJobListRequest) { + r.ctx = v + } +} + +// WithConnectorID - ID of the connector to fetch the sync jobs for. +func (f ConnectorSyncJobList) WithConnectorID(v string) func(*ConnectorSyncJobListRequest) { + return func(r *ConnectorSyncJobListRequest) { + r.ConnectorID = v + } +} + +// WithFrom - starting offset (default: 0). +func (f ConnectorSyncJobList) WithFrom(v int) func(*ConnectorSyncJobListRequest) { + return func(r *ConnectorSyncJobListRequest) { + r.From = &v + } +} + +// WithJobType - a list of job types. +func (f ConnectorSyncJobList) WithJobType(v ...string) func(*ConnectorSyncJobListRequest) { + return func(r *ConnectorSyncJobListRequest) { + r.JobType = v + } +} + +// WithSize - specifies a max number of results to get (default: 100). +func (f ConnectorSyncJobList) WithSize(v int) func(*ConnectorSyncJobListRequest) { + return func(r *ConnectorSyncJobListRequest) { + r.Size = &v + } +} + +// WithStatus - sync job status, which sync jobs are fetched for. +func (f ConnectorSyncJobList) WithStatus(v string) func(*ConnectorSyncJobListRequest) { + return func(r *ConnectorSyncJobListRequest) { + r.Status = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorSyncJobList) WithPretty() func(*ConnectorSyncJobListRequest) { + return func(r *ConnectorSyncJobListRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorSyncJobList) WithHuman() func(*ConnectorSyncJobListRequest) { + return func(r *ConnectorSyncJobListRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorSyncJobList) WithErrorTrace() func(*ConnectorSyncJobListRequest) { + return func(r *ConnectorSyncJobListRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorSyncJobList) WithFilterPath(v ...string) func(*ConnectorSyncJobListRequest) { + return func(r *ConnectorSyncJobListRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorSyncJobList) WithHeader(h map[string]string) func(*ConnectorSyncJobListRequest) { + return func(r *ConnectorSyncJobListRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorSyncJobList) WithOpaqueID(s string) func(*ConnectorSyncJobListRequest) { + return func(r *ConnectorSyncJobListRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_post.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_post.go new file mode 100644 index 000000000..b1b5ac0e1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_post.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorSyncJobPostFunc(t Transport) ConnectorSyncJobPost { + return func(body io.Reader, o ...func(*ConnectorSyncJobPostRequest)) (*Response, error) { + var r = ConnectorSyncJobPostRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorSyncJobPost creates a connector sync job. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-sync-job-api.html. +type ConnectorSyncJobPost func(body io.Reader, o ...func(*ConnectorSyncJobPostRequest)) (*Response, error) + +// ConnectorSyncJobPostRequest configures the Connector Sync Job Post API request. +type ConnectorSyncJobPostRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorSyncJobPostRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_post") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_connector/_sync_job")) + path.WriteString("http://") + path.WriteString("/_connector/_sync_job") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_post") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_post", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_post") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorSyncJobPost) WithContext(v context.Context) func(*ConnectorSyncJobPostRequest) { + return func(r *ConnectorSyncJobPostRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorSyncJobPost) WithPretty() func(*ConnectorSyncJobPostRequest) { + return func(r *ConnectorSyncJobPostRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorSyncJobPost) WithHuman() func(*ConnectorSyncJobPostRequest) { + return func(r *ConnectorSyncJobPostRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorSyncJobPost) WithErrorTrace() func(*ConnectorSyncJobPostRequest) { + return func(r *ConnectorSyncJobPostRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorSyncJobPost) WithFilterPath(v ...string) func(*ConnectorSyncJobPostRequest) { + return func(r *ConnectorSyncJobPostRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorSyncJobPost) WithHeader(h map[string]string) func(*ConnectorSyncJobPostRequest) { + return func(r *ConnectorSyncJobPostRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorSyncJobPost) WithOpaqueID(s string) func(*ConnectorSyncJobPostRequest) { + return func(r *ConnectorSyncJobPostRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_update_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_update_stats.go new file mode 100644 index 000000000..bee74b0a3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.sync_job_update_stats.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorSyncJobUpdateStatsFunc(t Transport) ConnectorSyncJobUpdateStats { + return func(body io.Reader, connector_sync_job_id string, o ...func(*ConnectorSyncJobUpdateStatsRequest)) (*Response, error) { + var r = ConnectorSyncJobUpdateStatsRequest{Body: body, ConnectorSyncJobID: connector_sync_job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorSyncJobUpdateStats updates the stats fields in the connector sync job document. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/set-connector-sync-job-stats-api.html. +type ConnectorSyncJobUpdateStats func(body io.Reader, connector_sync_job_id string, o ...func(*ConnectorSyncJobUpdateStatsRequest)) (*Response, error) + +// ConnectorSyncJobUpdateStatsRequest configures the Connector Sync Job Update Stats API request. +type ConnectorSyncJobUpdateStatsRequest struct { + Body io.Reader + + ConnectorSyncJobID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorSyncJobUpdateStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_update_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len("_sync_job") + 1 + len(r.ConnectorSyncJobID) + 1 + len("_stats")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + path.WriteString(r.ConnectorSyncJobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_sync_job_id", r.ConnectorSyncJobID) + } + path.WriteString("/") + path.WriteString("_stats") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_update_stats") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_update_stats", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_update_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorSyncJobUpdateStats) WithContext(v context.Context) func(*ConnectorSyncJobUpdateStatsRequest) { + return func(r *ConnectorSyncJobUpdateStatsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorSyncJobUpdateStats) WithPretty() func(*ConnectorSyncJobUpdateStatsRequest) { + return func(r *ConnectorSyncJobUpdateStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorSyncJobUpdateStats) WithHuman() func(*ConnectorSyncJobUpdateStatsRequest) { + return func(r *ConnectorSyncJobUpdateStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorSyncJobUpdateStats) WithErrorTrace() func(*ConnectorSyncJobUpdateStatsRequest) { + return func(r *ConnectorSyncJobUpdateStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorSyncJobUpdateStats) WithFilterPath(v ...string) func(*ConnectorSyncJobUpdateStatsRequest) { + return func(r *ConnectorSyncJobUpdateStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorSyncJobUpdateStats) WithHeader(h map[string]string) func(*ConnectorSyncJobUpdateStatsRequest) { + return func(r *ConnectorSyncJobUpdateStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorSyncJobUpdateStats) WithOpaqueID(s string) func(*ConnectorSyncJobUpdateStatsRequest) { + return func(r *ConnectorSyncJobUpdateStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_active_filtering.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_active_filtering.go new file mode 100644 index 000000000..774a7e22e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_active_filtering.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newConnectorUpdateActiveFilteringFunc(t Transport) ConnectorUpdateActiveFiltering { + return func(connector_id string, o ...func(*ConnectorUpdateActiveFilteringRequest)) (*Response, error) { + var r = ConnectorUpdateActiveFilteringRequest{ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorUpdateActiveFiltering activates the draft filtering rules if they are in a validated state. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html. +type ConnectorUpdateActiveFiltering func(connector_id string, o ...func(*ConnectorUpdateActiveFilteringRequest)) (*Response, error) + +// ConnectorUpdateActiveFilteringRequest configures the Connector Update Active Filtering API request. +type ConnectorUpdateActiveFilteringRequest struct { + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorUpdateActiveFilteringRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_active_filtering") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_filtering") + 1 + len("_activate")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_filtering") + path.WriteString("/") + path.WriteString("_activate") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_active_filtering") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_active_filtering") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorUpdateActiveFiltering) WithContext(v context.Context) func(*ConnectorUpdateActiveFilteringRequest) { + return func(r *ConnectorUpdateActiveFilteringRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorUpdateActiveFiltering) WithPretty() func(*ConnectorUpdateActiveFilteringRequest) { + return func(r *ConnectorUpdateActiveFilteringRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorUpdateActiveFiltering) WithHuman() func(*ConnectorUpdateActiveFilteringRequest) { + return func(r *ConnectorUpdateActiveFilteringRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorUpdateActiveFiltering) WithErrorTrace() func(*ConnectorUpdateActiveFilteringRequest) { + return func(r *ConnectorUpdateActiveFilteringRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorUpdateActiveFiltering) WithFilterPath(v ...string) func(*ConnectorUpdateActiveFilteringRequest) { + return func(r *ConnectorUpdateActiveFilteringRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorUpdateActiveFiltering) WithHeader(h map[string]string) func(*ConnectorUpdateActiveFilteringRequest) { + return func(r *ConnectorUpdateActiveFilteringRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorUpdateActiveFiltering) WithOpaqueID(s string) func(*ConnectorUpdateActiveFilteringRequest) { + return func(r *ConnectorUpdateActiveFilteringRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_api_key_id.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_api_key_id.go new file mode 100644 index 000000000..276fcac8b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_api_key_id.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorUpdateAPIKeyDocumentIDFunc(t Transport) ConnectorUpdateAPIKeyDocumentID { + return func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateAPIKeyDocumentIDRequest)) (*Response, error) { + var r = ConnectorUpdateAPIKeyDocumentIDRequest{Body: body, ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorUpdateAPIKeyDocumentID updates the API key id and/or API key secret id fields in the connector document. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-api-key-id-api.html. +type ConnectorUpdateAPIKeyDocumentID func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateAPIKeyDocumentIDRequest)) (*Response, error) + +// ConnectorUpdateAPIKeyDocumentIDRequest configures the Connector UpdateAPI Key DocumentI D API request. +type ConnectorUpdateAPIKeyDocumentIDRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorUpdateAPIKeyDocumentIDRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_api_key_id") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_api_key_id")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_api_key_id") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_api_key_id") + if reader := instrument.RecordRequestBody(ctx, "connector.update_api_key_id", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_api_key_id") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorUpdateAPIKeyDocumentID) WithContext(v context.Context) func(*ConnectorUpdateAPIKeyDocumentIDRequest) { + return func(r *ConnectorUpdateAPIKeyDocumentIDRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorUpdateAPIKeyDocumentID) WithPretty() func(*ConnectorUpdateAPIKeyDocumentIDRequest) { + return func(r *ConnectorUpdateAPIKeyDocumentIDRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorUpdateAPIKeyDocumentID) WithHuman() func(*ConnectorUpdateAPIKeyDocumentIDRequest) { + return func(r *ConnectorUpdateAPIKeyDocumentIDRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorUpdateAPIKeyDocumentID) WithErrorTrace() func(*ConnectorUpdateAPIKeyDocumentIDRequest) { + return func(r *ConnectorUpdateAPIKeyDocumentIDRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorUpdateAPIKeyDocumentID) WithFilterPath(v ...string) func(*ConnectorUpdateAPIKeyDocumentIDRequest) { + return func(r *ConnectorUpdateAPIKeyDocumentIDRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorUpdateAPIKeyDocumentID) WithHeader(h map[string]string) func(*ConnectorUpdateAPIKeyDocumentIDRequest) { + return func(r *ConnectorUpdateAPIKeyDocumentIDRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorUpdateAPIKeyDocumentID) WithOpaqueID(s string) func(*ConnectorUpdateAPIKeyDocumentIDRequest) { + return func(r *ConnectorUpdateAPIKeyDocumentIDRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_configuration.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_configuration.go new file mode 100644 index 000000000..c35fc5644 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_configuration.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorUpdateConfigurationFunc(t Transport) ConnectorUpdateConfiguration { + return func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateConfigurationRequest)) (*Response, error) { + var r = ConnectorUpdateConfigurationRequest{Body: body, ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorUpdateConfiguration updates the connector configuration. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-configuration-api.html. +type ConnectorUpdateConfiguration func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateConfigurationRequest)) (*Response, error) + +// ConnectorUpdateConfigurationRequest configures the Connector Update Configuration API request. +type ConnectorUpdateConfigurationRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorUpdateConfigurationRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_configuration") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_configuration")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_configuration") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_configuration") + if reader := instrument.RecordRequestBody(ctx, "connector.update_configuration", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_configuration") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorUpdateConfiguration) WithContext(v context.Context) func(*ConnectorUpdateConfigurationRequest) { + return func(r *ConnectorUpdateConfigurationRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorUpdateConfiguration) WithPretty() func(*ConnectorUpdateConfigurationRequest) { + return func(r *ConnectorUpdateConfigurationRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorUpdateConfiguration) WithHuman() func(*ConnectorUpdateConfigurationRequest) { + return func(r *ConnectorUpdateConfigurationRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorUpdateConfiguration) WithErrorTrace() func(*ConnectorUpdateConfigurationRequest) { + return func(r *ConnectorUpdateConfigurationRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorUpdateConfiguration) WithFilterPath(v ...string) func(*ConnectorUpdateConfigurationRequest) { + return func(r *ConnectorUpdateConfigurationRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorUpdateConfiguration) WithHeader(h map[string]string) func(*ConnectorUpdateConfigurationRequest) { + return func(r *ConnectorUpdateConfigurationRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorUpdateConfiguration) WithOpaqueID(s string) func(*ConnectorUpdateConfigurationRequest) { + return func(r *ConnectorUpdateConfigurationRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_error.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_error.go new file mode 100644 index 000000000..903bd8f16 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_error.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorUpdateErrorFunc(t Transport) ConnectorUpdateError { + return func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateErrorRequest)) (*Response, error) { + var r = ConnectorUpdateErrorRequest{Body: body, ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorUpdateError updates the error field in the connector document. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-error-api.html. +type ConnectorUpdateError func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateErrorRequest)) (*Response, error) + +// ConnectorUpdateErrorRequest configures the Connector Update Error API request. +type ConnectorUpdateErrorRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorUpdateErrorRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_error") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_error")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_error") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_error") + if reader := instrument.RecordRequestBody(ctx, "connector.update_error", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_error") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorUpdateError) WithContext(v context.Context) func(*ConnectorUpdateErrorRequest) { + return func(r *ConnectorUpdateErrorRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorUpdateError) WithPretty() func(*ConnectorUpdateErrorRequest) { + return func(r *ConnectorUpdateErrorRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorUpdateError) WithHuman() func(*ConnectorUpdateErrorRequest) { + return func(r *ConnectorUpdateErrorRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorUpdateError) WithErrorTrace() func(*ConnectorUpdateErrorRequest) { + return func(r *ConnectorUpdateErrorRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorUpdateError) WithFilterPath(v ...string) func(*ConnectorUpdateErrorRequest) { + return func(r *ConnectorUpdateErrorRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorUpdateError) WithHeader(h map[string]string) func(*ConnectorUpdateErrorRequest) { + return func(r *ConnectorUpdateErrorRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorUpdateError) WithOpaqueID(s string) func(*ConnectorUpdateErrorRequest) { + return func(r *ConnectorUpdateErrorRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_features.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_features.go new file mode 100644 index 000000000..7038dc8b0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_features.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorUpdateFeaturesFunc(t Transport) ConnectorUpdateFeatures { + return func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateFeaturesRequest)) (*Response, error) { + var r = ConnectorUpdateFeaturesRequest{Body: body, ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorUpdateFeatures updates the connector features in the connector document. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-features-api.html. +type ConnectorUpdateFeatures func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateFeaturesRequest)) (*Response, error) + +// ConnectorUpdateFeaturesRequest configures the Connector Update Features API request. +type ConnectorUpdateFeaturesRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorUpdateFeaturesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_features") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_features")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_features") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_features") + if reader := instrument.RecordRequestBody(ctx, "connector.update_features", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_features") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorUpdateFeatures) WithContext(v context.Context) func(*ConnectorUpdateFeaturesRequest) { + return func(r *ConnectorUpdateFeaturesRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorUpdateFeatures) WithPretty() func(*ConnectorUpdateFeaturesRequest) { + return func(r *ConnectorUpdateFeaturesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorUpdateFeatures) WithHuman() func(*ConnectorUpdateFeaturesRequest) { + return func(r *ConnectorUpdateFeaturesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorUpdateFeatures) WithErrorTrace() func(*ConnectorUpdateFeaturesRequest) { + return func(r *ConnectorUpdateFeaturesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorUpdateFeatures) WithFilterPath(v ...string) func(*ConnectorUpdateFeaturesRequest) { + return func(r *ConnectorUpdateFeaturesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorUpdateFeatures) WithHeader(h map[string]string) func(*ConnectorUpdateFeaturesRequest) { + return func(r *ConnectorUpdateFeaturesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorUpdateFeatures) WithOpaqueID(s string) func(*ConnectorUpdateFeaturesRequest) { + return func(r *ConnectorUpdateFeaturesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_filtering.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_filtering.go new file mode 100644 index 000000000..2225de8a6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_filtering.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorUpdateFilteringFunc(t Transport) ConnectorUpdateFiltering { + return func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateFilteringRequest)) (*Response, error) { + var r = ConnectorUpdateFilteringRequest{Body: body, ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorUpdateFiltering updates the filtering field in the connector document. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html. +type ConnectorUpdateFiltering func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateFilteringRequest)) (*Response, error) + +// ConnectorUpdateFilteringRequest configures the Connector Update Filtering API request. +type ConnectorUpdateFilteringRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorUpdateFilteringRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_filtering") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_filtering")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_filtering") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_filtering") + if reader := instrument.RecordRequestBody(ctx, "connector.update_filtering", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_filtering") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorUpdateFiltering) WithContext(v context.Context) func(*ConnectorUpdateFilteringRequest) { + return func(r *ConnectorUpdateFilteringRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorUpdateFiltering) WithPretty() func(*ConnectorUpdateFilteringRequest) { + return func(r *ConnectorUpdateFilteringRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorUpdateFiltering) WithHuman() func(*ConnectorUpdateFilteringRequest) { + return func(r *ConnectorUpdateFilteringRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorUpdateFiltering) WithErrorTrace() func(*ConnectorUpdateFilteringRequest) { + return func(r *ConnectorUpdateFilteringRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorUpdateFiltering) WithFilterPath(v ...string) func(*ConnectorUpdateFilteringRequest) { + return func(r *ConnectorUpdateFilteringRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorUpdateFiltering) WithHeader(h map[string]string) func(*ConnectorUpdateFilteringRequest) { + return func(r *ConnectorUpdateFilteringRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorUpdateFiltering) WithOpaqueID(s string) func(*ConnectorUpdateFilteringRequest) { + return func(r *ConnectorUpdateFilteringRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_filtering_validation.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_filtering_validation.go new file mode 100644 index 000000000..1c6ab2b0a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_filtering_validation.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorUpdateFilteringValidationFunc(t Transport) ConnectorUpdateFilteringValidation { + return func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateFilteringValidationRequest)) (*Response, error) { + var r = ConnectorUpdateFilteringValidationRequest{Body: body, ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorUpdateFilteringValidation updates the validation info of the draft filtering rules. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html. +type ConnectorUpdateFilteringValidation func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateFilteringValidationRequest)) (*Response, error) + +// ConnectorUpdateFilteringValidationRequest configures the Connector Update Filtering Validation API request. +type ConnectorUpdateFilteringValidationRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorUpdateFilteringValidationRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_filtering_validation") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_filtering") + 1 + len("_validation")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_filtering") + path.WriteString("/") + path.WriteString("_validation") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_filtering_validation") + if reader := instrument.RecordRequestBody(ctx, "connector.update_filtering_validation", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_filtering_validation") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorUpdateFilteringValidation) WithContext(v context.Context) func(*ConnectorUpdateFilteringValidationRequest) { + return func(r *ConnectorUpdateFilteringValidationRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorUpdateFilteringValidation) WithPretty() func(*ConnectorUpdateFilteringValidationRequest) { + return func(r *ConnectorUpdateFilteringValidationRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorUpdateFilteringValidation) WithHuman() func(*ConnectorUpdateFilteringValidationRequest) { + return func(r *ConnectorUpdateFilteringValidationRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorUpdateFilteringValidation) WithErrorTrace() func(*ConnectorUpdateFilteringValidationRequest) { + return func(r *ConnectorUpdateFilteringValidationRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorUpdateFilteringValidation) WithFilterPath(v ...string) func(*ConnectorUpdateFilteringValidationRequest) { + return func(r *ConnectorUpdateFilteringValidationRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorUpdateFilteringValidation) WithHeader(h map[string]string) func(*ConnectorUpdateFilteringValidationRequest) { + return func(r *ConnectorUpdateFilteringValidationRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorUpdateFilteringValidation) WithOpaqueID(s string) func(*ConnectorUpdateFilteringValidationRequest) { + return func(r *ConnectorUpdateFilteringValidationRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_index_name.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_index_name.go new file mode 100644 index 000000000..f67204f14 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_index_name.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorUpdateIndexNameFunc(t Transport) ConnectorUpdateIndexName { + return func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateIndexNameRequest)) (*Response, error) { + var r = ConnectorUpdateIndexNameRequest{Body: body, ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorUpdateIndexName updates the index name of the connector. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-index-name-api.html. +type ConnectorUpdateIndexName func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateIndexNameRequest)) (*Response, error) + +// ConnectorUpdateIndexNameRequest configures the Connector Update Index Name API request. +type ConnectorUpdateIndexNameRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorUpdateIndexNameRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_index_name") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_index_name")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_index_name") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_index_name") + if reader := instrument.RecordRequestBody(ctx, "connector.update_index_name", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_index_name") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorUpdateIndexName) WithContext(v context.Context) func(*ConnectorUpdateIndexNameRequest) { + return func(r *ConnectorUpdateIndexNameRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorUpdateIndexName) WithPretty() func(*ConnectorUpdateIndexNameRequest) { + return func(r *ConnectorUpdateIndexNameRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorUpdateIndexName) WithHuman() func(*ConnectorUpdateIndexNameRequest) { + return func(r *ConnectorUpdateIndexNameRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorUpdateIndexName) WithErrorTrace() func(*ConnectorUpdateIndexNameRequest) { + return func(r *ConnectorUpdateIndexNameRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorUpdateIndexName) WithFilterPath(v ...string) func(*ConnectorUpdateIndexNameRequest) { + return func(r *ConnectorUpdateIndexNameRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorUpdateIndexName) WithHeader(h map[string]string) func(*ConnectorUpdateIndexNameRequest) { + return func(r *ConnectorUpdateIndexNameRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorUpdateIndexName) WithOpaqueID(s string) func(*ConnectorUpdateIndexNameRequest) { + return func(r *ConnectorUpdateIndexNameRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_name.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_name.go new file mode 100644 index 000000000..3e6fc52bf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_name.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorUpdateNameFunc(t Transport) ConnectorUpdateName { + return func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateNameRequest)) (*Response, error) { + var r = ConnectorUpdateNameRequest{Body: body, ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorUpdateName updates the name and/or description fields in the connector document. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-name-description-api.html. +type ConnectorUpdateName func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateNameRequest)) (*Response, error) + +// ConnectorUpdateNameRequest configures the Connector Update Name API request. +type ConnectorUpdateNameRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorUpdateNameRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_name") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_name")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_name") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_name") + if reader := instrument.RecordRequestBody(ctx, "connector.update_name", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_name") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorUpdateName) WithContext(v context.Context) func(*ConnectorUpdateNameRequest) { + return func(r *ConnectorUpdateNameRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorUpdateName) WithPretty() func(*ConnectorUpdateNameRequest) { + return func(r *ConnectorUpdateNameRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorUpdateName) WithHuman() func(*ConnectorUpdateNameRequest) { + return func(r *ConnectorUpdateNameRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorUpdateName) WithErrorTrace() func(*ConnectorUpdateNameRequest) { + return func(r *ConnectorUpdateNameRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorUpdateName) WithFilterPath(v ...string) func(*ConnectorUpdateNameRequest) { + return func(r *ConnectorUpdateNameRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorUpdateName) WithHeader(h map[string]string) func(*ConnectorUpdateNameRequest) { + return func(r *ConnectorUpdateNameRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorUpdateName) WithOpaqueID(s string) func(*ConnectorUpdateNameRequest) { + return func(r *ConnectorUpdateNameRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_native.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_native.go new file mode 100644 index 000000000..05b2b0c51 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_native.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorUpdateNativeFunc(t Transport) ConnectorUpdateNative { + return func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateNativeRequest)) (*Response, error) { + var r = ConnectorUpdateNativeRequest{Body: body, ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorUpdateNative updates the is_native flag of the connector. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/connector-apis.html. +type ConnectorUpdateNative func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateNativeRequest)) (*Response, error) + +// ConnectorUpdateNativeRequest configures the Connector Update Native API request. +type ConnectorUpdateNativeRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorUpdateNativeRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_native") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_native")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_native") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_native") + if reader := instrument.RecordRequestBody(ctx, "connector.update_native", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_native") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorUpdateNative) WithContext(v context.Context) func(*ConnectorUpdateNativeRequest) { + return func(r *ConnectorUpdateNativeRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorUpdateNative) WithPretty() func(*ConnectorUpdateNativeRequest) { + return func(r *ConnectorUpdateNativeRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorUpdateNative) WithHuman() func(*ConnectorUpdateNativeRequest) { + return func(r *ConnectorUpdateNativeRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorUpdateNative) WithErrorTrace() func(*ConnectorUpdateNativeRequest) { + return func(r *ConnectorUpdateNativeRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorUpdateNative) WithFilterPath(v ...string) func(*ConnectorUpdateNativeRequest) { + return func(r *ConnectorUpdateNativeRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorUpdateNative) WithHeader(h map[string]string) func(*ConnectorUpdateNativeRequest) { + return func(r *ConnectorUpdateNativeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorUpdateNative) WithOpaqueID(s string) func(*ConnectorUpdateNativeRequest) { + return func(r *ConnectorUpdateNativeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_pipeline.go new file mode 100644 index 000000000..3a438cd0b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_pipeline.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorUpdatePipelineFunc(t Transport) ConnectorUpdatePipeline { + return func(body io.Reader, connector_id string, o ...func(*ConnectorUpdatePipelineRequest)) (*Response, error) { + var r = ConnectorUpdatePipelineRequest{Body: body, ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorUpdatePipeline updates the pipeline field in the connector document. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-pipeline-api.html. +type ConnectorUpdatePipeline func(body io.Reader, connector_id string, o ...func(*ConnectorUpdatePipelineRequest)) (*Response, error) + +// ConnectorUpdatePipelineRequest configures the Connector Update Pipeline API request. +type ConnectorUpdatePipelineRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorUpdatePipelineRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_pipeline")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_pipeline") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_pipeline") + if reader := instrument.RecordRequestBody(ctx, "connector.update_pipeline", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_pipeline") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorUpdatePipeline) WithContext(v context.Context) func(*ConnectorUpdatePipelineRequest) { + return func(r *ConnectorUpdatePipelineRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorUpdatePipeline) WithPretty() func(*ConnectorUpdatePipelineRequest) { + return func(r *ConnectorUpdatePipelineRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorUpdatePipeline) WithHuman() func(*ConnectorUpdatePipelineRequest) { + return func(r *ConnectorUpdatePipelineRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorUpdatePipeline) WithErrorTrace() func(*ConnectorUpdatePipelineRequest) { + return func(r *ConnectorUpdatePipelineRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorUpdatePipeline) WithFilterPath(v ...string) func(*ConnectorUpdatePipelineRequest) { + return func(r *ConnectorUpdatePipelineRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorUpdatePipeline) WithHeader(h map[string]string) func(*ConnectorUpdatePipelineRequest) { + return func(r *ConnectorUpdatePipelineRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorUpdatePipeline) WithOpaqueID(s string) func(*ConnectorUpdatePipelineRequest) { + return func(r *ConnectorUpdatePipelineRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_scheduling.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_scheduling.go new file mode 100644 index 000000000..9cd5219a8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_scheduling.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorUpdateSchedulingFunc(t Transport) ConnectorUpdateScheduling { + return func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateSchedulingRequest)) (*Response, error) { + var r = ConnectorUpdateSchedulingRequest{Body: body, ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorUpdateScheduling updates the scheduling field in the connector document. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-scheduling-api.html. +type ConnectorUpdateScheduling func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateSchedulingRequest)) (*Response, error) + +// ConnectorUpdateSchedulingRequest configures the Connector Update Scheduling API request. +type ConnectorUpdateSchedulingRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorUpdateSchedulingRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_scheduling") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_scheduling")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_scheduling") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_scheduling") + if reader := instrument.RecordRequestBody(ctx, "connector.update_scheduling", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_scheduling") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorUpdateScheduling) WithContext(v context.Context) func(*ConnectorUpdateSchedulingRequest) { + return func(r *ConnectorUpdateSchedulingRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorUpdateScheduling) WithPretty() func(*ConnectorUpdateSchedulingRequest) { + return func(r *ConnectorUpdateSchedulingRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorUpdateScheduling) WithHuman() func(*ConnectorUpdateSchedulingRequest) { + return func(r *ConnectorUpdateSchedulingRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorUpdateScheduling) WithErrorTrace() func(*ConnectorUpdateSchedulingRequest) { + return func(r *ConnectorUpdateSchedulingRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorUpdateScheduling) WithFilterPath(v ...string) func(*ConnectorUpdateSchedulingRequest) { + return func(r *ConnectorUpdateSchedulingRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorUpdateScheduling) WithHeader(h map[string]string) func(*ConnectorUpdateSchedulingRequest) { + return func(r *ConnectorUpdateSchedulingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorUpdateScheduling) WithOpaqueID(s string) func(*ConnectorUpdateSchedulingRequest) { + return func(r *ConnectorUpdateSchedulingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_service_type.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_service_type.go new file mode 100644 index 000000000..98b912fcf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_service_type.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorUpdateServiceDocumentTypeFunc(t Transport) ConnectorUpdateServiceDocumentType { + return func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateServiceDocumentTypeRequest)) (*Response, error) { + var r = ConnectorUpdateServiceDocumentTypeRequest{Body: body, ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorUpdateServiceDocumentType updates the service type of the connector. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-service-type-api.html. +type ConnectorUpdateServiceDocumentType func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateServiceDocumentTypeRequest)) (*Response, error) + +// ConnectorUpdateServiceDocumentTypeRequest configures the Connector Update Service Document Type API request. +type ConnectorUpdateServiceDocumentTypeRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorUpdateServiceDocumentTypeRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_service_type") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_service_type")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_service_type") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_service_type") + if reader := instrument.RecordRequestBody(ctx, "connector.update_service_type", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_service_type") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorUpdateServiceDocumentType) WithContext(v context.Context) func(*ConnectorUpdateServiceDocumentTypeRequest) { + return func(r *ConnectorUpdateServiceDocumentTypeRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorUpdateServiceDocumentType) WithPretty() func(*ConnectorUpdateServiceDocumentTypeRequest) { + return func(r *ConnectorUpdateServiceDocumentTypeRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorUpdateServiceDocumentType) WithHuman() func(*ConnectorUpdateServiceDocumentTypeRequest) { + return func(r *ConnectorUpdateServiceDocumentTypeRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorUpdateServiceDocumentType) WithErrorTrace() func(*ConnectorUpdateServiceDocumentTypeRequest) { + return func(r *ConnectorUpdateServiceDocumentTypeRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorUpdateServiceDocumentType) WithFilterPath(v ...string) func(*ConnectorUpdateServiceDocumentTypeRequest) { + return func(r *ConnectorUpdateServiceDocumentTypeRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorUpdateServiceDocumentType) WithHeader(h map[string]string) func(*ConnectorUpdateServiceDocumentTypeRequest) { + return func(r *ConnectorUpdateServiceDocumentTypeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorUpdateServiceDocumentType) WithOpaqueID(s string) func(*ConnectorUpdateServiceDocumentTypeRequest) { + return func(r *ConnectorUpdateServiceDocumentTypeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_status.go new file mode 100644 index 000000000..fe3bd946e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.connector.update_status.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newConnectorUpdateStatusFunc(t Transport) ConnectorUpdateStatus { + return func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateStatusRequest)) (*Response, error) { + var r = ConnectorUpdateStatusRequest{Body: body, ConnectorID: connector_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ConnectorUpdateStatus updates the status of the connector. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-status-api.html. +type ConnectorUpdateStatus func(body io.Reader, connector_id string, o ...func(*ConnectorUpdateStatusRequest)) (*Response, error) + +// ConnectorUpdateStatusRequest configures the Connector Update Status API request. +type ConnectorUpdateStatusRequest struct { + Body io.Reader + + ConnectorID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ConnectorUpdateStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_connector") + 1 + len(r.ConnectorID) + 1 + len("_status")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString(r.ConnectorID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "connector_id", r.ConnectorID) + } + path.WriteString("/") + path.WriteString("_status") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_status") + if reader := instrument.RecordRequestBody(ctx, "connector.update_status", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_status") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ConnectorUpdateStatus) WithContext(v context.Context) func(*ConnectorUpdateStatusRequest) { + return func(r *ConnectorUpdateStatusRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ConnectorUpdateStatus) WithPretty() func(*ConnectorUpdateStatusRequest) { + return func(r *ConnectorUpdateStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ConnectorUpdateStatus) WithHuman() func(*ConnectorUpdateStatusRequest) { + return func(r *ConnectorUpdateStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ConnectorUpdateStatus) WithErrorTrace() func(*ConnectorUpdateStatusRequest) { + return func(r *ConnectorUpdateStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ConnectorUpdateStatus) WithFilterPath(v ...string) func(*ConnectorUpdateStatusRequest) { + return func(r *ConnectorUpdateStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ConnectorUpdateStatus) WithHeader(h map[string]string) func(*ConnectorUpdateStatusRequest) { + return func(r *ConnectorUpdateStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ConnectorUpdateStatus) WithOpaqueID(s string) func(*ConnectorUpdateStatusRequest) { + return func(r *ConnectorUpdateStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.count.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.count.go new file mode 100644 index 000000000..cfeea1704 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.count.go @@ -0,0 +1,419 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newCountFunc(t Transport) Count { + return func(o ...func(*CountRequest)) (*Response, error) { + var r = CountRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Count returns number of documents matching a query. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/search-count.html. +type Count func(o ...func(*CountRequest)) (*Response, error) + +// CountRequest configures the Count API request. +type CountRequest struct { + Index []string + + Body io.Reader + + AllowNoIndices *bool + Analyzer string + AnalyzeWildcard *bool + DefaultOperator string + Df string + ExpandWildcards string + IgnoreThrottled *bool + IgnoreUnavailable *bool + Lenient *bool + MinScore *int + Preference string + Query string + Routing []string + TerminateAfter *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CountRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "count") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_count")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_count") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.Analyzer != "" { + params["analyzer"] = r.Analyzer + } + + if r.AnalyzeWildcard != nil { + params["analyze_wildcard"] = strconv.FormatBool(*r.AnalyzeWildcard) + } + + if r.DefaultOperator != "" { + params["default_operator"] = r.DefaultOperator + } + + if r.Df != "" { + params["df"] = r.Df + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreThrottled != nil { + params["ignore_throttled"] = strconv.FormatBool(*r.IgnoreThrottled) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Lenient != nil { + params["lenient"] = strconv.FormatBool(*r.Lenient) + } + + if r.MinScore != nil { + params["min_score"] = strconv.FormatInt(int64(*r.MinScore), 10) + } + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Query != "" { + params["q"] = r.Query + } + + if len(r.Routing) > 0 { + params["routing"] = strings.Join(r.Routing, ",") + } + + if r.TerminateAfter != nil { + params["terminate_after"] = strconv.FormatInt(int64(*r.TerminateAfter), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "count") + if reader := instrument.RecordRequestBody(ctx, "count", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "count") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Count) WithContext(v context.Context) func(*CountRequest) { + return func(r *CountRequest) { + r.ctx = v + } +} + +// WithBody - A query to restrict the results specified with the Query DSL (optional). +func (f Count) WithBody(v io.Reader) func(*CountRequest) { + return func(r *CountRequest) { + r.Body = v + } +} + +// WithIndex - a list of indices to restrict the results. +func (f Count) WithIndex(v ...string) func(*CountRequest) { + return func(r *CountRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f Count) WithAllowNoIndices(v bool) func(*CountRequest) { + return func(r *CountRequest) { + r.AllowNoIndices = &v + } +} + +// WithAnalyzer - the analyzer to use for the query string. +func (f Count) WithAnalyzer(v string) func(*CountRequest) { + return func(r *CountRequest) { + r.Analyzer = v + } +} + +// WithAnalyzeWildcard - specify whether wildcard and prefix queries should be analyzed (default: false). +func (f Count) WithAnalyzeWildcard(v bool) func(*CountRequest) { + return func(r *CountRequest) { + r.AnalyzeWildcard = &v + } +} + +// WithDefaultOperator - the default operator for query string query (and or or). +func (f Count) WithDefaultOperator(v string) func(*CountRequest) { + return func(r *CountRequest) { + r.DefaultOperator = v + } +} + +// WithDf - the field to use as default where no field prefix is given in the query string. +func (f Count) WithDf(v string) func(*CountRequest) { + return func(r *CountRequest) { + r.Df = v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f Count) WithExpandWildcards(v string) func(*CountRequest) { + return func(r *CountRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreThrottled - whether specified concrete, expanded or aliased indices should be ignored when throttled. +func (f Count) WithIgnoreThrottled(v bool) func(*CountRequest) { + return func(r *CountRequest) { + r.IgnoreThrottled = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f Count) WithIgnoreUnavailable(v bool) func(*CountRequest) { + return func(r *CountRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithLenient - specify whether format-based query failures (such as providing text to a numeric field) should be ignored. +func (f Count) WithLenient(v bool) func(*CountRequest) { + return func(r *CountRequest) { + r.Lenient = &v + } +} + +// WithMinScore - include only documents with a specific `_score` value in the result. +func (f Count) WithMinScore(v int) func(*CountRequest) { + return func(r *CountRequest) { + r.MinScore = &v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random). +func (f Count) WithPreference(v string) func(*CountRequest) { + return func(r *CountRequest) { + r.Preference = v + } +} + +// WithQuery - query in the lucene query string syntax. +func (f Count) WithQuery(v string) func(*CountRequest) { + return func(r *CountRequest) { + r.Query = v + } +} + +// WithRouting - a list of specific routing values. +func (f Count) WithRouting(v ...string) func(*CountRequest) { + return func(r *CountRequest) { + r.Routing = v + } +} + +// WithTerminateAfter - the maximum count for each shard, upon reaching which the query execution will terminate early. +func (f Count) WithTerminateAfter(v int) func(*CountRequest) { + return func(r *CountRequest) { + r.TerminateAfter = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Count) WithPretty() func(*CountRequest) { + return func(r *CountRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Count) WithHuman() func(*CountRequest) { + return func(r *CountRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Count) WithErrorTrace() func(*CountRequest) { + return func(r *CountRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Count) WithFilterPath(v ...string) func(*CountRequest) { + return func(r *CountRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Count) WithHeader(h map[string]string) func(*CountRequest) { + return func(r *CountRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Count) WithOpaqueID(s string) func(*CountRequest) { + return func(r *CountRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.create.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.create.go new file mode 100644 index 000000000..63575133b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.create.go @@ -0,0 +1,364 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newCreateFunc(t Transport) Create { + return func(index string, id string, body io.Reader, o ...func(*CreateRequest)) (*Response, error) { + var r = CreateRequest{Index: index, DocumentID: id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Create creates a new document in the index. +// +// Returns a 409 response when a document with a same ID already exists in the index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html. +type Create func(index string, id string, body io.Reader, o ...func(*CreateRequest)) (*Response, error) + +// CreateRequest configures the Create API request. +type CreateRequest struct { + Index string + DocumentID string + + Body io.Reader + + IncludeSourceOnError *bool + Pipeline string + Refresh string + RequireAlias *bool + RequireDataStream *bool + Routing string + Timeout time.Duration + Version *int + VersionType string + WaitForActiveShards string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CreateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "create") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_create") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_create") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.IncludeSourceOnError != nil { + params["include_source_on_error"] = strconv.FormatBool(*r.IncludeSourceOnError) + } + + if r.Pipeline != "" { + params["pipeline"] = r.Pipeline + } + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.RequireAlias != nil { + params["require_alias"] = strconv.FormatBool(*r.RequireAlias) + } + + if r.RequireDataStream != nil { + params["require_data_stream"] = strconv.FormatBool(*r.RequireDataStream) + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Version != nil { + params["version"] = strconv.FormatInt(int64(*r.Version), 10) + } + + if r.VersionType != "" { + params["version_type"] = r.VersionType + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "create") + if reader := instrument.RecordRequestBody(ctx, "create", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "create") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Create) WithContext(v context.Context) func(*CreateRequest) { + return func(r *CreateRequest) { + r.ctx = v + } +} + +// WithIncludeSourceOnError - true or false if to include the document source in the error message in case of parsing errors. defaults to true.. +func (f Create) WithIncludeSourceOnError(v bool) func(*CreateRequest) { + return func(r *CreateRequest) { + r.IncludeSourceOnError = &v + } +} + +// WithPipeline - the pipeline ID to preprocess incoming documents with. +func (f Create) WithPipeline(v string) func(*CreateRequest) { + return func(r *CreateRequest) { + r.Pipeline = v + } +} + +// WithRefresh - if `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes.. +func (f Create) WithRefresh(v string) func(*CreateRequest) { + return func(r *CreateRequest) { + r.Refresh = v + } +} + +// WithRequireAlias - when true, requires destination to be an alias. default is false. +func (f Create) WithRequireAlias(v bool) func(*CreateRequest) { + return func(r *CreateRequest) { + r.RequireAlias = &v + } +} + +// WithRequireDataStream - when true, requires destination to be a data stream (existing or to be created). default is false. +func (f Create) WithRequireDataStream(v bool) func(*CreateRequest) { + return func(r *CreateRequest) { + r.RequireDataStream = &v + } +} + +// WithRouting - specific routing value. +func (f Create) WithRouting(v string) func(*CreateRequest) { + return func(r *CreateRequest) { + r.Routing = v + } +} + +// WithTimeout - explicit operation timeout. +func (f Create) WithTimeout(v time.Duration) func(*CreateRequest) { + return func(r *CreateRequest) { + r.Timeout = v + } +} + +// WithVersion - explicit version number for concurrency control. +func (f Create) WithVersion(v int) func(*CreateRequest) { + return func(r *CreateRequest) { + r.Version = &v + } +} + +// WithVersionType - specific version type. +func (f Create) WithVersionType(v string) func(*CreateRequest) { + return func(r *CreateRequest) { + r.VersionType = v + } +} + +// WithWaitForActiveShards - sets the number of shard copies that must be active before proceeding with the index operation. defaults to 1, meaning the primary shard only. set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1). +func (f Create) WithWaitForActiveShards(v string) func(*CreateRequest) { + return func(r *CreateRequest) { + r.WaitForActiveShards = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Create) WithPretty() func(*CreateRequest) { + return func(r *CreateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Create) WithHuman() func(*CreateRequest) { + return func(r *CreateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Create) WithErrorTrace() func(*CreateRequest) { + return func(r *CreateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Create) WithFilterPath(v ...string) func(*CreateRequest) { + return func(r *CreateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Create) WithHeader(h map[string]string) func(*CreateRequest) { + return func(r *CreateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Create) WithOpaqueID(s string) func(*CreateRequest) { + return func(r *CreateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.dangling_indices.delete_dangling_index.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.dangling_indices.delete_dangling_index.go new file mode 100644 index 000000000..4103f6349 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.dangling_indices.delete_dangling_index.go @@ -0,0 +1,262 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newDanglingIndicesDeleteDanglingIndexFunc(t Transport) DanglingIndicesDeleteDanglingIndex { + return func(index_uuid string, o ...func(*DanglingIndicesDeleteDanglingIndexRequest)) (*Response, error) { + var r = DanglingIndicesDeleteDanglingIndexRequest{IndexUUID: index_uuid} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// DanglingIndicesDeleteDanglingIndex deletes the specified dangling index +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html. +type DanglingIndicesDeleteDanglingIndex func(index_uuid string, o ...func(*DanglingIndicesDeleteDanglingIndexRequest)) (*Response, error) + +// DanglingIndicesDeleteDanglingIndexRequest configures the Dangling Indices Delete Dangling Index API request. +type DanglingIndicesDeleteDanglingIndexRequest struct { + IndexUUID string + + AcceptDataLoss *bool + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r DanglingIndicesDeleteDanglingIndexRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "dangling_indices.delete_dangling_index") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_dangling") + 1 + len(r.IndexUUID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_dangling") + path.WriteString("/") + path.WriteString(r.IndexUUID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index_uuid", r.IndexUUID) + } + + params = make(map[string]string) + + if r.AcceptDataLoss != nil { + params["accept_data_loss"] = strconv.FormatBool(*r.AcceptDataLoss) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "dangling_indices.delete_dangling_index") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "dangling_indices.delete_dangling_index") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f DanglingIndicesDeleteDanglingIndex) WithContext(v context.Context) func(*DanglingIndicesDeleteDanglingIndexRequest) { + return func(r *DanglingIndicesDeleteDanglingIndexRequest) { + r.ctx = v + } +} + +// WithAcceptDataLoss - must be set to true in order to delete the dangling index. +func (f DanglingIndicesDeleteDanglingIndex) WithAcceptDataLoss(v bool) func(*DanglingIndicesDeleteDanglingIndexRequest) { + return func(r *DanglingIndicesDeleteDanglingIndexRequest) { + r.AcceptDataLoss = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f DanglingIndicesDeleteDanglingIndex) WithMasterTimeout(v time.Duration) func(*DanglingIndicesDeleteDanglingIndexRequest) { + return func(r *DanglingIndicesDeleteDanglingIndexRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f DanglingIndicesDeleteDanglingIndex) WithTimeout(v time.Duration) func(*DanglingIndicesDeleteDanglingIndexRequest) { + return func(r *DanglingIndicesDeleteDanglingIndexRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f DanglingIndicesDeleteDanglingIndex) WithPretty() func(*DanglingIndicesDeleteDanglingIndexRequest) { + return func(r *DanglingIndicesDeleteDanglingIndexRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f DanglingIndicesDeleteDanglingIndex) WithHuman() func(*DanglingIndicesDeleteDanglingIndexRequest) { + return func(r *DanglingIndicesDeleteDanglingIndexRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f DanglingIndicesDeleteDanglingIndex) WithErrorTrace() func(*DanglingIndicesDeleteDanglingIndexRequest) { + return func(r *DanglingIndicesDeleteDanglingIndexRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f DanglingIndicesDeleteDanglingIndex) WithFilterPath(v ...string) func(*DanglingIndicesDeleteDanglingIndexRequest) { + return func(r *DanglingIndicesDeleteDanglingIndexRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f DanglingIndicesDeleteDanglingIndex) WithHeader(h map[string]string) func(*DanglingIndicesDeleteDanglingIndexRequest) { + return func(r *DanglingIndicesDeleteDanglingIndexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f DanglingIndicesDeleteDanglingIndex) WithOpaqueID(s string) func(*DanglingIndicesDeleteDanglingIndexRequest) { + return func(r *DanglingIndicesDeleteDanglingIndexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.dangling_indices.import_dangling_index.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.dangling_indices.import_dangling_index.go new file mode 100644 index 000000000..ececdef3a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.dangling_indices.import_dangling_index.go @@ -0,0 +1,262 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newDanglingIndicesImportDanglingIndexFunc(t Transport) DanglingIndicesImportDanglingIndex { + return func(index_uuid string, o ...func(*DanglingIndicesImportDanglingIndexRequest)) (*Response, error) { + var r = DanglingIndicesImportDanglingIndexRequest{IndexUUID: index_uuid} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// DanglingIndicesImportDanglingIndex imports the specified dangling index +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html. +type DanglingIndicesImportDanglingIndex func(index_uuid string, o ...func(*DanglingIndicesImportDanglingIndexRequest)) (*Response, error) + +// DanglingIndicesImportDanglingIndexRequest configures the Dangling Indices Import Dangling Index API request. +type DanglingIndicesImportDanglingIndexRequest struct { + IndexUUID string + + AcceptDataLoss *bool + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r DanglingIndicesImportDanglingIndexRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "dangling_indices.import_dangling_index") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_dangling") + 1 + len(r.IndexUUID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_dangling") + path.WriteString("/") + path.WriteString(r.IndexUUID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index_uuid", r.IndexUUID) + } + + params = make(map[string]string) + + if r.AcceptDataLoss != nil { + params["accept_data_loss"] = strconv.FormatBool(*r.AcceptDataLoss) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "dangling_indices.import_dangling_index") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "dangling_indices.import_dangling_index") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f DanglingIndicesImportDanglingIndex) WithContext(v context.Context) func(*DanglingIndicesImportDanglingIndexRequest) { + return func(r *DanglingIndicesImportDanglingIndexRequest) { + r.ctx = v + } +} + +// WithAcceptDataLoss - must be set to true in order to import the dangling index. +func (f DanglingIndicesImportDanglingIndex) WithAcceptDataLoss(v bool) func(*DanglingIndicesImportDanglingIndexRequest) { + return func(r *DanglingIndicesImportDanglingIndexRequest) { + r.AcceptDataLoss = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f DanglingIndicesImportDanglingIndex) WithMasterTimeout(v time.Duration) func(*DanglingIndicesImportDanglingIndexRequest) { + return func(r *DanglingIndicesImportDanglingIndexRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f DanglingIndicesImportDanglingIndex) WithTimeout(v time.Duration) func(*DanglingIndicesImportDanglingIndexRequest) { + return func(r *DanglingIndicesImportDanglingIndexRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f DanglingIndicesImportDanglingIndex) WithPretty() func(*DanglingIndicesImportDanglingIndexRequest) { + return func(r *DanglingIndicesImportDanglingIndexRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f DanglingIndicesImportDanglingIndex) WithHuman() func(*DanglingIndicesImportDanglingIndexRequest) { + return func(r *DanglingIndicesImportDanglingIndexRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f DanglingIndicesImportDanglingIndex) WithErrorTrace() func(*DanglingIndicesImportDanglingIndexRequest) { + return func(r *DanglingIndicesImportDanglingIndexRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f DanglingIndicesImportDanglingIndex) WithFilterPath(v ...string) func(*DanglingIndicesImportDanglingIndexRequest) { + return func(r *DanglingIndicesImportDanglingIndexRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f DanglingIndicesImportDanglingIndex) WithHeader(h map[string]string) func(*DanglingIndicesImportDanglingIndexRequest) { + return func(r *DanglingIndicesImportDanglingIndexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f DanglingIndicesImportDanglingIndex) WithOpaqueID(s string) func(*DanglingIndicesImportDanglingIndexRequest) { + return func(r *DanglingIndicesImportDanglingIndexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.dangling_indices.list_dangling_indices.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.dangling_indices.list_dangling_indices.go new file mode 100644 index 000000000..fd6e88c77 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.dangling_indices.list_dangling_indices.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newDanglingIndicesListDanglingIndicesFunc(t Transport) DanglingIndicesListDanglingIndices { + return func(o ...func(*DanglingIndicesListDanglingIndicesRequest)) (*Response, error) { + var r = DanglingIndicesListDanglingIndicesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// DanglingIndicesListDanglingIndices returns all dangling indices. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html. +type DanglingIndicesListDanglingIndices func(o ...func(*DanglingIndicesListDanglingIndicesRequest)) (*Response, error) + +// DanglingIndicesListDanglingIndicesRequest configures the Dangling Indices List Dangling Indices API request. +type DanglingIndicesListDanglingIndicesRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r DanglingIndicesListDanglingIndicesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "dangling_indices.list_dangling_indices") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_dangling")) + path.WriteString("http://") + path.WriteString("/_dangling") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "dangling_indices.list_dangling_indices") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "dangling_indices.list_dangling_indices") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f DanglingIndicesListDanglingIndices) WithContext(v context.Context) func(*DanglingIndicesListDanglingIndicesRequest) { + return func(r *DanglingIndicesListDanglingIndicesRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f DanglingIndicesListDanglingIndices) WithPretty() func(*DanglingIndicesListDanglingIndicesRequest) { + return func(r *DanglingIndicesListDanglingIndicesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f DanglingIndicesListDanglingIndices) WithHuman() func(*DanglingIndicesListDanglingIndicesRequest) { + return func(r *DanglingIndicesListDanglingIndicesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f DanglingIndicesListDanglingIndices) WithErrorTrace() func(*DanglingIndicesListDanglingIndicesRequest) { + return func(r *DanglingIndicesListDanglingIndicesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f DanglingIndicesListDanglingIndices) WithFilterPath(v ...string) func(*DanglingIndicesListDanglingIndicesRequest) { + return func(r *DanglingIndicesListDanglingIndicesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f DanglingIndicesListDanglingIndices) WithHeader(h map[string]string) func(*DanglingIndicesListDanglingIndicesRequest) { + return func(r *DanglingIndicesListDanglingIndicesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f DanglingIndicesListDanglingIndices) WithOpaqueID(s string) func(*DanglingIndicesListDanglingIndicesRequest) { + return func(r *DanglingIndicesListDanglingIndicesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.delete.go new file mode 100644 index 000000000..fb7cf0590 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.delete.go @@ -0,0 +1,328 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newDeleteFunc(t Transport) Delete { + return func(index string, id string, o ...func(*DeleteRequest)) (*Response, error) { + var r = DeleteRequest{Index: index, DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Delete removes a document from the index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html. +type Delete func(index string, id string, o ...func(*DeleteRequest)) (*Response, error) + +// DeleteRequest configures the Delete API request. +type DeleteRequest struct { + Index string + DocumentID string + + IfPrimaryTerm *int + IfSeqNo *int + Refresh string + Routing string + Timeout time.Duration + Version *int + VersionType string + WaitForActiveShards string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r DeleteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_doc") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_doc") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.IfPrimaryTerm != nil { + params["if_primary_term"] = strconv.FormatInt(int64(*r.IfPrimaryTerm), 10) + } + + if r.IfSeqNo != nil { + params["if_seq_no"] = strconv.FormatInt(int64(*r.IfSeqNo), 10) + } + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Version != nil { + params["version"] = strconv.FormatInt(int64(*r.Version), 10) + } + + if r.VersionType != "" { + params["version_type"] = r.VersionType + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "delete") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "delete") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Delete) WithContext(v context.Context) func(*DeleteRequest) { + return func(r *DeleteRequest) { + r.ctx = v + } +} + +// WithIfPrimaryTerm - only perform the delete operation if the last operation that has changed the document has the specified primary term. +func (f Delete) WithIfPrimaryTerm(v int) func(*DeleteRequest) { + return func(r *DeleteRequest) { + r.IfPrimaryTerm = &v + } +} + +// WithIfSeqNo - only perform the delete operation if the last operation that has changed the document has the specified sequence number. +func (f Delete) WithIfSeqNo(v int) func(*DeleteRequest) { + return func(r *DeleteRequest) { + r.IfSeqNo = &v + } +} + +// WithRefresh - if `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes.. +func (f Delete) WithRefresh(v string) func(*DeleteRequest) { + return func(r *DeleteRequest) { + r.Refresh = v + } +} + +// WithRouting - specific routing value. +func (f Delete) WithRouting(v string) func(*DeleteRequest) { + return func(r *DeleteRequest) { + r.Routing = v + } +} + +// WithTimeout - explicit operation timeout. +func (f Delete) WithTimeout(v time.Duration) func(*DeleteRequest) { + return func(r *DeleteRequest) { + r.Timeout = v + } +} + +// WithVersion - explicit version number for concurrency control. +func (f Delete) WithVersion(v int) func(*DeleteRequest) { + return func(r *DeleteRequest) { + r.Version = &v + } +} + +// WithVersionType - specific version type. +func (f Delete) WithVersionType(v string) func(*DeleteRequest) { + return func(r *DeleteRequest) { + r.VersionType = v + } +} + +// WithWaitForActiveShards - sets the number of shard copies that must be active before proceeding with the delete operation. defaults to 1, meaning the primary shard only. set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1). +func (f Delete) WithWaitForActiveShards(v string) func(*DeleteRequest) { + return func(r *DeleteRequest) { + r.WaitForActiveShards = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Delete) WithPretty() func(*DeleteRequest) { + return func(r *DeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Delete) WithHuman() func(*DeleteRequest) { + return func(r *DeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Delete) WithErrorTrace() func(*DeleteRequest) { + return func(r *DeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Delete) WithFilterPath(v ...string) func(*DeleteRequest) { + return func(r *DeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Delete) WithHeader(h map[string]string) func(*DeleteRequest) { + return func(r *DeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Delete) WithOpaqueID(s string) func(*DeleteRequest) { + return func(r *DeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.delete_by_query.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.delete_by_query.go new file mode 100644 index 000000000..c72436a44 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.delete_by_query.go @@ -0,0 +1,590 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newDeleteByQueryFunc(t Transport) DeleteByQuery { + return func(index []string, body io.Reader, o ...func(*DeleteByQueryRequest)) (*Response, error) { + var r = DeleteByQueryRequest{Index: index, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// DeleteByQuery deletes documents matching the provided query. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html. +type DeleteByQuery func(index []string, body io.Reader, o ...func(*DeleteByQueryRequest)) (*Response, error) + +// DeleteByQueryRequest configures the Delete By Query API request. +type DeleteByQueryRequest struct { + Index []string + + Body io.Reader + + AllowNoIndices *bool + Analyzer string + AnalyzeWildcard *bool + Conflicts string + DefaultOperator string + Df string + ExpandWildcards string + From *int + IgnoreUnavailable *bool + Lenient *bool + MaxDocs *int + Preference string + Query string + Refresh *bool + RequestCache *bool + RequestsPerSecond *int + Routing []string + Scroll time.Duration + ScrollSize *int + SearchTimeout time.Duration + SearchType string + Slices interface{} + Sort []string + Stats []string + TerminateAfter *int + Timeout time.Duration + Version *bool + WaitForActiveShards string + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r DeleteByQueryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "delete_by_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_delete_by_query")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_delete_by_query") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.Analyzer != "" { + params["analyzer"] = r.Analyzer + } + + if r.AnalyzeWildcard != nil { + params["analyze_wildcard"] = strconv.FormatBool(*r.AnalyzeWildcard) + } + + if r.Conflicts != "" { + params["conflicts"] = r.Conflicts + } + + if r.DefaultOperator != "" { + params["default_operator"] = r.DefaultOperator + } + + if r.Df != "" { + params["df"] = r.Df + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Lenient != nil { + params["lenient"] = strconv.FormatBool(*r.Lenient) + } + + if r.MaxDocs != nil { + params["max_docs"] = strconv.FormatInt(int64(*r.MaxDocs), 10) + } + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Query != "" { + params["q"] = r.Query + } + + if r.Refresh != nil { + params["refresh"] = strconv.FormatBool(*r.Refresh) + } + + if r.RequestCache != nil { + params["request_cache"] = strconv.FormatBool(*r.RequestCache) + } + + if r.RequestsPerSecond != nil { + params["requests_per_second"] = strconv.FormatInt(int64(*r.RequestsPerSecond), 10) + } + + if len(r.Routing) > 0 { + params["routing"] = strings.Join(r.Routing, ",") + } + + if r.Scroll != 0 { + params["scroll"] = formatDuration(r.Scroll) + } + + if r.ScrollSize != nil { + params["scroll_size"] = strconv.FormatInt(int64(*r.ScrollSize), 10) + } + + if r.SearchTimeout != 0 { + params["search_timeout"] = formatDuration(r.SearchTimeout) + } + + if r.SearchType != "" { + params["search_type"] = r.SearchType + } + + if r.Slices != nil { + params["slices"] = fmt.Sprintf("%v", r.Slices) + } + + if len(r.Sort) > 0 { + params["sort"] = strings.Join(r.Sort, ",") + } + + if len(r.Stats) > 0 { + params["stats"] = strings.Join(r.Stats, ",") + } + + if r.TerminateAfter != nil { + params["terminate_after"] = strconv.FormatInt(int64(*r.TerminateAfter), 10) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Version != nil { + params["version"] = strconv.FormatBool(*r.Version) + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "delete_by_query") + if reader := instrument.RecordRequestBody(ctx, "delete_by_query", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "delete_by_query") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f DeleteByQuery) WithContext(v context.Context) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f DeleteByQuery) WithAllowNoIndices(v bool) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.AllowNoIndices = &v + } +} + +// WithAnalyzer - the analyzer to use for the query string. +func (f DeleteByQuery) WithAnalyzer(v string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Analyzer = v + } +} + +// WithAnalyzeWildcard - specify whether wildcard and prefix queries should be analyzed (default: false). +func (f DeleteByQuery) WithAnalyzeWildcard(v bool) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.AnalyzeWildcard = &v + } +} + +// WithConflicts - what to do when the delete by query hits version conflicts?. +func (f DeleteByQuery) WithConflicts(v string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Conflicts = v + } +} + +// WithDefaultOperator - the default operator for query string query (and or or). +func (f DeleteByQuery) WithDefaultOperator(v string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.DefaultOperator = v + } +} + +// WithDf - the field to use as default where no field prefix is given in the query string. +func (f DeleteByQuery) WithDf(v string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Df = v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f DeleteByQuery) WithExpandWildcards(v string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.ExpandWildcards = v + } +} + +// WithFrom - starting offset (default: 0). +func (f DeleteByQuery) WithFrom(v int) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.From = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f DeleteByQuery) WithIgnoreUnavailable(v bool) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithLenient - specify whether format-based query failures (such as providing text to a numeric field) should be ignored. +func (f DeleteByQuery) WithLenient(v bool) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Lenient = &v + } +} + +// WithMaxDocs - maximum number of documents to process (default: all documents). +func (f DeleteByQuery) WithMaxDocs(v int) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.MaxDocs = &v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random). +func (f DeleteByQuery) WithPreference(v string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Preference = v + } +} + +// WithQuery - query in the lucene query string syntax. +func (f DeleteByQuery) WithQuery(v string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Query = v + } +} + +// WithRefresh - should the affected indexes be refreshed?. +func (f DeleteByQuery) WithRefresh(v bool) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Refresh = &v + } +} + +// WithRequestCache - specify if request cache should be used for this request or not, defaults to index level setting. +func (f DeleteByQuery) WithRequestCache(v bool) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.RequestCache = &v + } +} + +// WithRequestsPerSecond - the throttle for this request in sub-requests per second. -1 means no throttle.. +func (f DeleteByQuery) WithRequestsPerSecond(v int) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.RequestsPerSecond = &v + } +} + +// WithRouting - a list of specific routing values. +func (f DeleteByQuery) WithRouting(v ...string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Routing = v + } +} + +// WithScroll - specify how long a consistent view of the index should be maintained for scrolled search. +func (f DeleteByQuery) WithScroll(v time.Duration) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Scroll = v + } +} + +// WithScrollSize - size on the scroll request powering the delete by query. +func (f DeleteByQuery) WithScrollSize(v int) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.ScrollSize = &v + } +} + +// WithSearchTimeout - explicit timeout for each search request. defaults to no timeout.. +func (f DeleteByQuery) WithSearchTimeout(v time.Duration) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.SearchTimeout = v + } +} + +// WithSearchType - search operation type. +func (f DeleteByQuery) WithSearchType(v string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.SearchType = v + } +} + +// WithSlices - the number of slices this task should be divided into. defaults to 1, meaning the task isn't sliced into subtasks. can be set to `auto`.. +func (f DeleteByQuery) WithSlices(v interface{}) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Slices = v + } +} + +// WithSort - a list of : pairs. +func (f DeleteByQuery) WithSort(v ...string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Sort = v + } +} + +// WithStats - specific 'tag' of the request for logging and statistical purposes. +func (f DeleteByQuery) WithStats(v ...string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Stats = v + } +} + +// WithTerminateAfter - the maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early.. +func (f DeleteByQuery) WithTerminateAfter(v int) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.TerminateAfter = &v + } +} + +// WithTimeout - time each individual bulk request should wait for shards that are unavailable.. +func (f DeleteByQuery) WithTimeout(v time.Duration) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Timeout = v + } +} + +// WithVersion - specify whether to return document version as part of a hit. +func (f DeleteByQuery) WithVersion(v bool) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Version = &v + } +} + +// WithWaitForActiveShards - sets the number of shard copies that must be active before proceeding with the delete by query operation. defaults to 1, meaning the primary shard only. set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1). +func (f DeleteByQuery) WithWaitForActiveShards(v string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.WaitForActiveShards = v + } +} + +// WithWaitForCompletion - should the request should block until the delete by query is complete.. +func (f DeleteByQuery) WithWaitForCompletion(v bool) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f DeleteByQuery) WithPretty() func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f DeleteByQuery) WithHuman() func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f DeleteByQuery) WithErrorTrace() func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f DeleteByQuery) WithFilterPath(v ...string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f DeleteByQuery) WithHeader(h map[string]string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f DeleteByQuery) WithOpaqueID(s string) func(*DeleteByQueryRequest) { + return func(r *DeleteByQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.delete_by_query_rethrottle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.delete_by_query_rethrottle.go new file mode 100644 index 000000000..972e018ec --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.delete_by_query_rethrottle.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newDeleteByQueryRethrottleFunc(t Transport) DeleteByQueryRethrottle { + return func(task_id string, requests_per_second *int, o ...func(*DeleteByQueryRethrottleRequest)) (*Response, error) { + var r = DeleteByQueryRethrottleRequest{TaskID: task_id, RequestsPerSecond: requests_per_second} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// DeleteByQueryRethrottle changes the number of requests per second for a particular Delete By Query operation. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html. +type DeleteByQueryRethrottle func(task_id string, requests_per_second *int, o ...func(*DeleteByQueryRethrottleRequest)) (*Response, error) + +// DeleteByQueryRethrottleRequest configures the Delete By Query Rethrottle API request. +type DeleteByQueryRethrottleRequest struct { + TaskID string + + RequestsPerSecond *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r DeleteByQueryRethrottleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "delete_by_query_rethrottle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_delete_by_query") + 1 + len(r.TaskID) + 1 + len("_rethrottle")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_delete_by_query") + path.WriteString("/") + path.WriteString(r.TaskID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_id", r.TaskID) + } + path.WriteString("/") + path.WriteString("_rethrottle") + + params = make(map[string]string) + + if r.RequestsPerSecond != nil { + params["requests_per_second"] = strconv.FormatInt(int64(*r.RequestsPerSecond), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "delete_by_query_rethrottle") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "delete_by_query_rethrottle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f DeleteByQueryRethrottle) WithContext(v context.Context) func(*DeleteByQueryRethrottleRequest) { + return func(r *DeleteByQueryRethrottleRequest) { + r.ctx = v + } +} + +// WithRequestsPerSecond - the throttle to set on this request in floating sub-requests per second. -1 means set no throttle.. +func (f DeleteByQueryRethrottle) WithRequestsPerSecond(v int) func(*DeleteByQueryRethrottleRequest) { + return func(r *DeleteByQueryRethrottleRequest) { + r.RequestsPerSecond = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f DeleteByQueryRethrottle) WithPretty() func(*DeleteByQueryRethrottleRequest) { + return func(r *DeleteByQueryRethrottleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f DeleteByQueryRethrottle) WithHuman() func(*DeleteByQueryRethrottleRequest) { + return func(r *DeleteByQueryRethrottleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f DeleteByQueryRethrottle) WithErrorTrace() func(*DeleteByQueryRethrottleRequest) { + return func(r *DeleteByQueryRethrottleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f DeleteByQueryRethrottle) WithFilterPath(v ...string) func(*DeleteByQueryRethrottleRequest) { + return func(r *DeleteByQueryRethrottleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f DeleteByQueryRethrottle) WithHeader(h map[string]string) func(*DeleteByQueryRethrottleRequest) { + return func(r *DeleteByQueryRethrottleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f DeleteByQueryRethrottle) WithOpaqueID(s string) func(*DeleteByQueryRethrottleRequest) { + return func(r *DeleteByQueryRethrottleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.delete_script.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.delete_script.go new file mode 100644 index 000000000..18a7e0d6f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.delete_script.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newDeleteScriptFunc(t Transport) DeleteScript { + return func(id string, o ...func(*DeleteScriptRequest)) (*Response, error) { + var r = DeleteScriptRequest{ScriptID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// DeleteScript deletes a script. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html. +type DeleteScript func(id string, o ...func(*DeleteScriptRequest)) (*Response, error) + +// DeleteScriptRequest configures the Delete Script API request. +type DeleteScriptRequest struct { + ScriptID string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r DeleteScriptRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "delete_script") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_scripts") + 1 + len(r.ScriptID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_scripts") + path.WriteString("/") + path.WriteString(r.ScriptID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.ScriptID) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "delete_script") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "delete_script") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f DeleteScript) WithContext(v context.Context) func(*DeleteScriptRequest) { + return func(r *DeleteScriptRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f DeleteScript) WithMasterTimeout(v time.Duration) func(*DeleteScriptRequest) { + return func(r *DeleteScriptRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f DeleteScript) WithTimeout(v time.Duration) func(*DeleteScriptRequest) { + return func(r *DeleteScriptRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f DeleteScript) WithPretty() func(*DeleteScriptRequest) { + return func(r *DeleteScriptRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f DeleteScript) WithHuman() func(*DeleteScriptRequest) { + return func(r *DeleteScriptRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f DeleteScript) WithErrorTrace() func(*DeleteScriptRequest) { + return func(r *DeleteScriptRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f DeleteScript) WithFilterPath(v ...string) func(*DeleteScriptRequest) { + return func(r *DeleteScriptRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f DeleteScript) WithHeader(h map[string]string) func(*DeleteScriptRequest) { + return func(r *DeleteScriptRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f DeleteScript) WithOpaqueID(s string) func(*DeleteScriptRequest) { + return func(r *DeleteScriptRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.exists.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.exists.go new file mode 100644 index 000000000..6d490bd9a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.exists.go @@ -0,0 +1,351 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newExistsFunc(t Transport) Exists { + return func(index string, id string, o ...func(*ExistsRequest)) (*Response, error) { + var r = ExistsRequest{Index: index, DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Exists returns information about whether a document exists in an index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html. +type Exists func(index string, id string, o ...func(*ExistsRequest)) (*Response, error) + +// ExistsRequest configures the Exists API request. +type ExistsRequest struct { + Index string + DocumentID string + + Preference string + Realtime *bool + Refresh *bool + Routing string + Source []string + SourceExcludes []string + SourceIncludes []string + StoredFields []string + Version *int + VersionType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ExistsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "exists") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "HEAD" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_doc") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_doc") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Realtime != nil { + params["realtime"] = strconv.FormatBool(*r.Realtime) + } + + if r.Refresh != nil { + params["refresh"] = strconv.FormatBool(*r.Refresh) + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if len(r.Source) > 0 { + params["_source"] = strings.Join(r.Source, ",") + } + + if len(r.SourceExcludes) > 0 { + params["_source_excludes"] = strings.Join(r.SourceExcludes, ",") + } + + if len(r.SourceIncludes) > 0 { + params["_source_includes"] = strings.Join(r.SourceIncludes, ",") + } + + if len(r.StoredFields) > 0 { + params["stored_fields"] = strings.Join(r.StoredFields, ",") + } + + if r.Version != nil { + params["version"] = strconv.FormatInt(int64(*r.Version), 10) + } + + if r.VersionType != "" { + params["version_type"] = r.VersionType + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "exists") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "exists") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Exists) WithContext(v context.Context) func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.ctx = v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random). +func (f Exists) WithPreference(v string) func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.Preference = v + } +} + +// WithRealtime - specify whether to perform the operation in realtime or search mode. +func (f Exists) WithRealtime(v bool) func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.Realtime = &v + } +} + +// WithRefresh - refresh the shard containing the document before performing the operation. +func (f Exists) WithRefresh(v bool) func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.Refresh = &v + } +} + +// WithRouting - specific routing value. +func (f Exists) WithRouting(v string) func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.Routing = v + } +} + +// WithSource - true or false to return the _source field or not, or a list of fields to return. +func (f Exists) WithSource(v ...string) func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.Source = v + } +} + +// WithSourceExcludes - a list of fields to exclude from the returned _source field. +func (f Exists) WithSourceExcludes(v ...string) func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.SourceExcludes = v + } +} + +// WithSourceIncludes - a list of fields to extract and return from the _source field. +func (f Exists) WithSourceIncludes(v ...string) func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.SourceIncludes = v + } +} + +// WithStoredFields - a list of stored fields to return in the response. +func (f Exists) WithStoredFields(v ...string) func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.StoredFields = v + } +} + +// WithVersion - explicit version number for concurrency control. +func (f Exists) WithVersion(v int) func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.Version = &v + } +} + +// WithVersionType - specific version type. +func (f Exists) WithVersionType(v string) func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.VersionType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Exists) WithPretty() func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Exists) WithHuman() func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Exists) WithErrorTrace() func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Exists) WithFilterPath(v ...string) func(*ExistsRequest) { + return func(r *ExistsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Exists) WithHeader(h map[string]string) func(*ExistsRequest) { + return func(r *ExistsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Exists) WithOpaqueID(s string) func(*ExistsRequest) { + return func(r *ExistsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.exists_source.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.exists_source.go new file mode 100644 index 000000000..4da7afc11 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.exists_source.go @@ -0,0 +1,339 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newExistsSourceFunc(t Transport) ExistsSource { + return func(index string, id string, o ...func(*ExistsSourceRequest)) (*Response, error) { + var r = ExistsSourceRequest{Index: index, DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ExistsSource returns information about whether a document source exists in an index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html. +type ExistsSource func(index string, id string, o ...func(*ExistsSourceRequest)) (*Response, error) + +// ExistsSourceRequest configures the Exists Source API request. +type ExistsSourceRequest struct { + Index string + DocumentID string + + Preference string + Realtime *bool + Refresh *bool + Routing string + Source []string + SourceExcludes []string + SourceIncludes []string + Version *int + VersionType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ExistsSourceRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "exists_source") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "HEAD" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_source") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_source") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Realtime != nil { + params["realtime"] = strconv.FormatBool(*r.Realtime) + } + + if r.Refresh != nil { + params["refresh"] = strconv.FormatBool(*r.Refresh) + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if len(r.Source) > 0 { + params["_source"] = strings.Join(r.Source, ",") + } + + if len(r.SourceExcludes) > 0 { + params["_source_excludes"] = strings.Join(r.SourceExcludes, ",") + } + + if len(r.SourceIncludes) > 0 { + params["_source_includes"] = strings.Join(r.SourceIncludes, ",") + } + + if r.Version != nil { + params["version"] = strconv.FormatInt(int64(*r.Version), 10) + } + + if r.VersionType != "" { + params["version_type"] = r.VersionType + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "exists_source") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "exists_source") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ExistsSource) WithContext(v context.Context) func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + r.ctx = v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random). +func (f ExistsSource) WithPreference(v string) func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + r.Preference = v + } +} + +// WithRealtime - specify whether to perform the operation in realtime or search mode. +func (f ExistsSource) WithRealtime(v bool) func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + r.Realtime = &v + } +} + +// WithRefresh - refresh the shard containing the document before performing the operation. +func (f ExistsSource) WithRefresh(v bool) func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + r.Refresh = &v + } +} + +// WithRouting - specific routing value. +func (f ExistsSource) WithRouting(v string) func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + r.Routing = v + } +} + +// WithSource - true or false to return the _source field or not, or a list of fields to return. +func (f ExistsSource) WithSource(v ...string) func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + r.Source = v + } +} + +// WithSourceExcludes - a list of fields to exclude from the returned _source field. +func (f ExistsSource) WithSourceExcludes(v ...string) func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + r.SourceExcludes = v + } +} + +// WithSourceIncludes - a list of fields to extract and return from the _source field. +func (f ExistsSource) WithSourceIncludes(v ...string) func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + r.SourceIncludes = v + } +} + +// WithVersion - explicit version number for concurrency control. +func (f ExistsSource) WithVersion(v int) func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + r.Version = &v + } +} + +// WithVersionType - specific version type. +func (f ExistsSource) WithVersionType(v string) func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + r.VersionType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ExistsSource) WithPretty() func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ExistsSource) WithHuman() func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ExistsSource) WithErrorTrace() func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ExistsSource) WithFilterPath(v ...string) func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ExistsSource) WithHeader(h map[string]string) func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ExistsSource) WithOpaqueID(s string) func(*ExistsSourceRequest) { + return func(r *ExistsSourceRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.explain.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.explain.go new file mode 100644 index 000000000..dd35d2c66 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.explain.go @@ -0,0 +1,392 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newExplainFunc(t Transport) Explain { + return func(index string, id string, o ...func(*ExplainRequest)) (*Response, error) { + var r = ExplainRequest{Index: index, DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Explain returns information about why a specific matches (or doesn't match) a query. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.html. +type Explain func(index string, id string, o ...func(*ExplainRequest)) (*Response, error) + +// ExplainRequest configures the Explain API request. +type ExplainRequest struct { + Index string + DocumentID string + + Body io.Reader + + Analyzer string + AnalyzeWildcard *bool + DefaultOperator string + Df string + Lenient *bool + Preference string + Query string + Routing string + Source []string + SourceExcludes []string + SourceIncludes []string + StoredFields []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ExplainRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "explain") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_explain") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_explain") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Analyzer != "" { + params["analyzer"] = r.Analyzer + } + + if r.AnalyzeWildcard != nil { + params["analyze_wildcard"] = strconv.FormatBool(*r.AnalyzeWildcard) + } + + if r.DefaultOperator != "" { + params["default_operator"] = r.DefaultOperator + } + + if r.Df != "" { + params["df"] = r.Df + } + + if r.Lenient != nil { + params["lenient"] = strconv.FormatBool(*r.Lenient) + } + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Query != "" { + params["q"] = r.Query + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if len(r.Source) > 0 { + params["_source"] = strings.Join(r.Source, ",") + } + + if len(r.SourceExcludes) > 0 { + params["_source_excludes"] = strings.Join(r.SourceExcludes, ",") + } + + if len(r.SourceIncludes) > 0 { + params["_source_includes"] = strings.Join(r.SourceIncludes, ",") + } + + if len(r.StoredFields) > 0 { + params["stored_fields"] = strings.Join(r.StoredFields, ",") + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "explain") + if reader := instrument.RecordRequestBody(ctx, "explain", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "explain") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Explain) WithContext(v context.Context) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.ctx = v + } +} + +// WithBody - The query definition using the Query DSL. +func (f Explain) WithBody(v io.Reader) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.Body = v + } +} + +// WithAnalyzer - the analyzer for the query string query. +func (f Explain) WithAnalyzer(v string) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.Analyzer = v + } +} + +// WithAnalyzeWildcard - specify whether wildcards and prefix queries in the query string query should be analyzed (default: false). +func (f Explain) WithAnalyzeWildcard(v bool) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.AnalyzeWildcard = &v + } +} + +// WithDefaultOperator - the default operator for query string query (and or or). +func (f Explain) WithDefaultOperator(v string) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.DefaultOperator = v + } +} + +// WithDf - the default field for query string query (default: _all). +func (f Explain) WithDf(v string) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.Df = v + } +} + +// WithLenient - specify whether format-based query failures (such as providing text to a numeric field) should be ignored. +func (f Explain) WithLenient(v bool) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.Lenient = &v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random). +func (f Explain) WithPreference(v string) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.Preference = v + } +} + +// WithQuery - query in the lucene query string syntax. +func (f Explain) WithQuery(v string) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.Query = v + } +} + +// WithRouting - specific routing value. +func (f Explain) WithRouting(v string) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.Routing = v + } +} + +// WithSource - true or false to return the _source field or not, or a list of fields to return. +func (f Explain) WithSource(v ...string) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.Source = v + } +} + +// WithSourceExcludes - a list of fields to exclude from the returned _source field. +func (f Explain) WithSourceExcludes(v ...string) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.SourceExcludes = v + } +} + +// WithSourceIncludes - a list of fields to extract and return from the _source field. +func (f Explain) WithSourceIncludes(v ...string) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.SourceIncludes = v + } +} + +// WithStoredFields - a list of stored fields to return in the response. +func (f Explain) WithStoredFields(v ...string) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.StoredFields = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Explain) WithPretty() func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Explain) WithHuman() func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Explain) WithErrorTrace() func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Explain) WithFilterPath(v ...string) func(*ExplainRequest) { + return func(r *ExplainRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Explain) WithHeader(h map[string]string) func(*ExplainRequest) { + return func(r *ExplainRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Explain) WithOpaqueID(s string) func(*ExplainRequest) { + return func(r *ExplainRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.features.get_features.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.features.get_features.go new file mode 100644 index 000000000..694ab3d50 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.features.get_features.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newFeaturesGetFeaturesFunc(t Transport) FeaturesGetFeatures { + return func(o ...func(*FeaturesGetFeaturesRequest)) (*Response, error) { + var r = FeaturesGetFeaturesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// FeaturesGetFeatures gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-features-api.html. +type FeaturesGetFeatures func(o ...func(*FeaturesGetFeaturesRequest)) (*Response, error) + +// FeaturesGetFeaturesRequest configures the Features Get Features API request. +type FeaturesGetFeaturesRequest struct { + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r FeaturesGetFeaturesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "features.get_features") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_features")) + path.WriteString("http://") + path.WriteString("/_features") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "features.get_features") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "features.get_features") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f FeaturesGetFeatures) WithContext(v context.Context) func(*FeaturesGetFeaturesRequest) { + return func(r *FeaturesGetFeaturesRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f FeaturesGetFeatures) WithMasterTimeout(v time.Duration) func(*FeaturesGetFeaturesRequest) { + return func(r *FeaturesGetFeaturesRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f FeaturesGetFeatures) WithPretty() func(*FeaturesGetFeaturesRequest) { + return func(r *FeaturesGetFeaturesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f FeaturesGetFeatures) WithHuman() func(*FeaturesGetFeaturesRequest) { + return func(r *FeaturesGetFeaturesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f FeaturesGetFeatures) WithErrorTrace() func(*FeaturesGetFeaturesRequest) { + return func(r *FeaturesGetFeaturesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f FeaturesGetFeatures) WithFilterPath(v ...string) func(*FeaturesGetFeaturesRequest) { + return func(r *FeaturesGetFeaturesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f FeaturesGetFeatures) WithHeader(h map[string]string) func(*FeaturesGetFeaturesRequest) { + return func(r *FeaturesGetFeaturesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f FeaturesGetFeatures) WithOpaqueID(s string) func(*FeaturesGetFeaturesRequest) { + return func(r *FeaturesGetFeaturesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.features.reset_features.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.features.reset_features.go new file mode 100644 index 000000000..bb62b5bba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.features.reset_features.go @@ -0,0 +1,231 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newFeaturesResetFeaturesFunc(t Transport) FeaturesResetFeatures { + return func(o ...func(*FeaturesResetFeaturesRequest)) (*Response, error) { + var r = FeaturesResetFeaturesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// FeaturesResetFeatures resets the internal state of features, usually by deleting system indices +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html. +type FeaturesResetFeatures func(o ...func(*FeaturesResetFeaturesRequest)) (*Response, error) + +// FeaturesResetFeaturesRequest configures the Features Reset Features API request. +type FeaturesResetFeaturesRequest struct { + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r FeaturesResetFeaturesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "features.reset_features") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_features/_reset")) + path.WriteString("http://") + path.WriteString("/_features/_reset") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "features.reset_features") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "features.reset_features") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f FeaturesResetFeatures) WithContext(v context.Context) func(*FeaturesResetFeaturesRequest) { + return func(r *FeaturesResetFeaturesRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f FeaturesResetFeatures) WithMasterTimeout(v time.Duration) func(*FeaturesResetFeaturesRequest) { + return func(r *FeaturesResetFeaturesRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f FeaturesResetFeatures) WithPretty() func(*FeaturesResetFeaturesRequest) { + return func(r *FeaturesResetFeaturesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f FeaturesResetFeatures) WithHuman() func(*FeaturesResetFeaturesRequest) { + return func(r *FeaturesResetFeaturesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f FeaturesResetFeatures) WithErrorTrace() func(*FeaturesResetFeaturesRequest) { + return func(r *FeaturesResetFeaturesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f FeaturesResetFeatures) WithFilterPath(v ...string) func(*FeaturesResetFeaturesRequest) { + return func(r *FeaturesResetFeaturesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f FeaturesResetFeatures) WithHeader(h map[string]string) func(*FeaturesResetFeaturesRequest) { + return func(r *FeaturesResetFeaturesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f FeaturesResetFeatures) WithOpaqueID(s string) func(*FeaturesResetFeaturesRequest) { + return func(r *FeaturesResetFeaturesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.field_caps.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.field_caps.go new file mode 100644 index 000000000..7e19e6a11 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.field_caps.go @@ -0,0 +1,347 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newFieldCapsFunc(t Transport) FieldCaps { + return func(o ...func(*FieldCapsRequest)) (*Response, error) { + var r = FieldCapsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// FieldCaps returns the information about the capabilities of fields among multiple indices. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-caps.html. +type FieldCaps func(o ...func(*FieldCapsRequest)) (*Response, error) + +// FieldCapsRequest configures the Field Caps API request. +type FieldCapsRequest struct { + Index []string + + Body io.Reader + + AllowNoIndices *bool + ExpandWildcards string + Fields []string + Filters []string + IgnoreUnavailable *bool + IncludeEmptyFields *bool + IncludeUnmapped *bool + Types []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r FieldCapsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "field_caps") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_field_caps")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_field_caps") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if len(r.Fields) > 0 { + params["fields"] = strings.Join(r.Fields, ",") + } + + if len(r.Filters) > 0 { + params["filters"] = strings.Join(r.Filters, ",") + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.IncludeEmptyFields != nil { + params["include_empty_fields"] = strconv.FormatBool(*r.IncludeEmptyFields) + } + + if r.IncludeUnmapped != nil { + params["include_unmapped"] = strconv.FormatBool(*r.IncludeUnmapped) + } + + if len(r.Types) > 0 { + params["types"] = strings.Join(r.Types, ",") + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "field_caps") + if reader := instrument.RecordRequestBody(ctx, "field_caps", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "field_caps") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f FieldCaps) WithContext(v context.Context) func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.ctx = v + } +} + +// WithBody - An index filter specified with the Query DSL. +func (f FieldCaps) WithBody(v io.Reader) func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.Body = v + } +} + +// WithIndex - a list of index names; use _all to perform the operation on all indices. +func (f FieldCaps) WithIndex(v ...string) func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f FieldCaps) WithAllowNoIndices(v bool) func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f FieldCaps) WithExpandWildcards(v string) func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.ExpandWildcards = v + } +} + +// WithFields - a list of field names. +func (f FieldCaps) WithFields(v ...string) func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.Fields = v + } +} + +// WithFilters - an optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent. +func (f FieldCaps) WithFilters(v ...string) func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.Filters = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f FieldCaps) WithIgnoreUnavailable(v bool) func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithIncludeEmptyFields - include empty fields in result. +func (f FieldCaps) WithIncludeEmptyFields(v bool) func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.IncludeEmptyFields = &v + } +} + +// WithIncludeUnmapped - indicates whether unmapped fields should be included in the response.. +func (f FieldCaps) WithIncludeUnmapped(v bool) func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.IncludeUnmapped = &v + } +} + +// WithTypes - only return results for fields that have one of the types in the list. +func (f FieldCaps) WithTypes(v ...string) func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.Types = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f FieldCaps) WithPretty() func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f FieldCaps) WithHuman() func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f FieldCaps) WithErrorTrace() func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f FieldCaps) WithFilterPath(v ...string) func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f FieldCaps) WithHeader(h map[string]string) func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f FieldCaps) WithOpaqueID(s string) func(*FieldCapsRequest) { + return func(r *FieldCapsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.delete_secret.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.delete_secret.go new file mode 100644 index 000000000..2d9525705 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.delete_secret.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newFleetDeleteSecretFunc(t Transport) FleetDeleteSecret { + return func(id string, o ...func(*FleetDeleteSecretRequest)) (*Response, error) { + var r = FleetDeleteSecretRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// FleetDeleteSecret deletes a secret stored by Fleet. +// +// This API is experimental. +type FleetDeleteSecret func(id string, o ...func(*FleetDeleteSecretRequest)) (*Response, error) + +// FleetDeleteSecretRequest configures the Fleet Delete Secret API request. +type FleetDeleteSecretRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r FleetDeleteSecretRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "fleet.delete_secret") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_fleet") + 1 + len("secret") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_fleet") + path.WriteString("/") + path.WriteString("secret") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "fleet.delete_secret") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "fleet.delete_secret") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f FleetDeleteSecret) WithContext(v context.Context) func(*FleetDeleteSecretRequest) { + return func(r *FleetDeleteSecretRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f FleetDeleteSecret) WithPretty() func(*FleetDeleteSecretRequest) { + return func(r *FleetDeleteSecretRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f FleetDeleteSecret) WithHuman() func(*FleetDeleteSecretRequest) { + return func(r *FleetDeleteSecretRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f FleetDeleteSecret) WithErrorTrace() func(*FleetDeleteSecretRequest) { + return func(r *FleetDeleteSecretRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f FleetDeleteSecret) WithFilterPath(v ...string) func(*FleetDeleteSecretRequest) { + return func(r *FleetDeleteSecretRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f FleetDeleteSecret) WithHeader(h map[string]string) func(*FleetDeleteSecretRequest) { + return func(r *FleetDeleteSecretRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f FleetDeleteSecret) WithOpaqueID(s string) func(*FleetDeleteSecretRequest) { + return func(r *FleetDeleteSecretRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.get_secret.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.get_secret.go new file mode 100644 index 000000000..e6a2e9d15 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.get_secret.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newFleetGetSecretFunc(t Transport) FleetGetSecret { + return func(id string, o ...func(*FleetGetSecretRequest)) (*Response, error) { + var r = FleetGetSecretRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// FleetGetSecret retrieves a secret stored by Fleet. +// +// This API is experimental. +type FleetGetSecret func(id string, o ...func(*FleetGetSecretRequest)) (*Response, error) + +// FleetGetSecretRequest configures the Fleet Get Secret API request. +type FleetGetSecretRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r FleetGetSecretRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "fleet.get_secret") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_fleet") + 1 + len("secret") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_fleet") + path.WriteString("/") + path.WriteString("secret") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "fleet.get_secret") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "fleet.get_secret") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f FleetGetSecret) WithContext(v context.Context) func(*FleetGetSecretRequest) { + return func(r *FleetGetSecretRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f FleetGetSecret) WithPretty() func(*FleetGetSecretRequest) { + return func(r *FleetGetSecretRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f FleetGetSecret) WithHuman() func(*FleetGetSecretRequest) { + return func(r *FleetGetSecretRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f FleetGetSecret) WithErrorTrace() func(*FleetGetSecretRequest) { + return func(r *FleetGetSecretRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f FleetGetSecret) WithFilterPath(v ...string) func(*FleetGetSecretRequest) { + return func(r *FleetGetSecretRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f FleetGetSecret) WithHeader(h map[string]string) func(*FleetGetSecretRequest) { + return func(r *FleetGetSecretRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f FleetGetSecret) WithOpaqueID(s string) func(*FleetGetSecretRequest) { + return func(r *FleetGetSecretRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.global_checkpoints.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.global_checkpoints.go new file mode 100644 index 000000000..dd39d96d4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.global_checkpoints.go @@ -0,0 +1,276 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newFleetGlobalCheckpointsFunc(t Transport) FleetGlobalCheckpoints { + return func(index string, o ...func(*FleetGlobalCheckpointsRequest)) (*Response, error) { + var r = FleetGlobalCheckpointsRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// FleetGlobalCheckpoints returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/get-global-checkpoints.html. +type FleetGlobalCheckpoints func(index string, o ...func(*FleetGlobalCheckpointsRequest)) (*Response, error) + +// FleetGlobalCheckpointsRequest configures the Fleet Global Checkpoints API request. +type FleetGlobalCheckpointsRequest struct { + Index string + + Checkpoints []string + Timeout time.Duration + WaitForAdvance *bool + WaitForIndex *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r FleetGlobalCheckpointsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "fleet.global_checkpoints") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_fleet") + 1 + len("global_checkpoints")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_fleet") + path.WriteString("/") + path.WriteString("global_checkpoints") + + params = make(map[string]string) + + if len(r.Checkpoints) > 0 { + params["checkpoints"] = strings.Join(r.Checkpoints, ",") + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForAdvance != nil { + params["wait_for_advance"] = strconv.FormatBool(*r.WaitForAdvance) + } + + if r.WaitForIndex != nil { + params["wait_for_index"] = strconv.FormatBool(*r.WaitForIndex) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "fleet.global_checkpoints") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "fleet.global_checkpoints") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f FleetGlobalCheckpoints) WithContext(v context.Context) func(*FleetGlobalCheckpointsRequest) { + return func(r *FleetGlobalCheckpointsRequest) { + r.ctx = v + } +} + +// WithCheckpoints - comma separated list of checkpoints. +func (f FleetGlobalCheckpoints) WithCheckpoints(v ...string) func(*FleetGlobalCheckpointsRequest) { + return func(r *FleetGlobalCheckpointsRequest) { + r.Checkpoints = v + } +} + +// WithTimeout - timeout to wait for global checkpoint to advance. +func (f FleetGlobalCheckpoints) WithTimeout(v time.Duration) func(*FleetGlobalCheckpointsRequest) { + return func(r *FleetGlobalCheckpointsRequest) { + r.Timeout = v + } +} + +// WithWaitForAdvance - whether to wait for the global checkpoint to advance past the specified current checkpoints. +func (f FleetGlobalCheckpoints) WithWaitForAdvance(v bool) func(*FleetGlobalCheckpointsRequest) { + return func(r *FleetGlobalCheckpointsRequest) { + r.WaitForAdvance = &v + } +} + +// WithWaitForIndex - whether to wait for the target index to exist and all primary shards be active. +func (f FleetGlobalCheckpoints) WithWaitForIndex(v bool) func(*FleetGlobalCheckpointsRequest) { + return func(r *FleetGlobalCheckpointsRequest) { + r.WaitForIndex = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f FleetGlobalCheckpoints) WithPretty() func(*FleetGlobalCheckpointsRequest) { + return func(r *FleetGlobalCheckpointsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f FleetGlobalCheckpoints) WithHuman() func(*FleetGlobalCheckpointsRequest) { + return func(r *FleetGlobalCheckpointsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f FleetGlobalCheckpoints) WithErrorTrace() func(*FleetGlobalCheckpointsRequest) { + return func(r *FleetGlobalCheckpointsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f FleetGlobalCheckpoints) WithFilterPath(v ...string) func(*FleetGlobalCheckpointsRequest) { + return func(r *FleetGlobalCheckpointsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f FleetGlobalCheckpoints) WithHeader(h map[string]string) func(*FleetGlobalCheckpointsRequest) { + return func(r *FleetGlobalCheckpointsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f FleetGlobalCheckpoints) WithOpaqueID(s string) func(*FleetGlobalCheckpointsRequest) { + return func(r *FleetGlobalCheckpointsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.msearch.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.msearch.go new file mode 100644 index 000000000..f162520f6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.msearch.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newFleetMsearchFunc(t Transport) FleetMsearch { + return func(body io.Reader, o ...func(*FleetMsearchRequest)) (*Response, error) { + var r = FleetMsearchRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// FleetMsearch multi Search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. +// +// This API is experimental. +type FleetMsearch func(body io.Reader, o ...func(*FleetMsearchRequest)) (*Response, error) + +// FleetMsearchRequest configures the Fleet Msearch API request. +type FleetMsearchRequest struct { + Index string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r FleetMsearchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "fleet.msearch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_fleet") + 1 + len("_fleet_msearch")) + path.WriteString("http://") + if r.Index != "" { + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + } + path.WriteString("/") + path.WriteString("_fleet") + path.WriteString("/") + path.WriteString("_fleet_msearch") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "fleet.msearch") + if reader := instrument.RecordRequestBody(ctx, "fleet.msearch", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "fleet.msearch") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f FleetMsearch) WithContext(v context.Context) func(*FleetMsearchRequest) { + return func(r *FleetMsearchRequest) { + r.ctx = v + } +} + +// WithIndex - the index name to use as the default. +func (f FleetMsearch) WithIndex(v string) func(*FleetMsearchRequest) { + return func(r *FleetMsearchRequest) { + r.Index = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f FleetMsearch) WithPretty() func(*FleetMsearchRequest) { + return func(r *FleetMsearchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f FleetMsearch) WithHuman() func(*FleetMsearchRequest) { + return func(r *FleetMsearchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f FleetMsearch) WithErrorTrace() func(*FleetMsearchRequest) { + return func(r *FleetMsearchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f FleetMsearch) WithFilterPath(v ...string) func(*FleetMsearchRequest) { + return func(r *FleetMsearchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f FleetMsearch) WithHeader(h map[string]string) func(*FleetMsearchRequest) { + return func(r *FleetMsearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f FleetMsearch) WithOpaqueID(s string) func(*FleetMsearchRequest) { + return func(r *FleetMsearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.post_secret.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.post_secret.go new file mode 100644 index 000000000..6478f9187 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.post_secret.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newFleetPostSecretFunc(t Transport) FleetPostSecret { + return func(body io.Reader, o ...func(*FleetPostSecretRequest)) (*Response, error) { + var r = FleetPostSecretRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// FleetPostSecret creates a secret stored by Fleet. +// +// This API is experimental. +type FleetPostSecret func(body io.Reader, o ...func(*FleetPostSecretRequest)) (*Response, error) + +// FleetPostSecretRequest configures the Fleet Post Secret API request. +type FleetPostSecretRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r FleetPostSecretRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "fleet.post_secret") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_fleet/secret")) + path.WriteString("http://") + path.WriteString("/_fleet/secret") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "fleet.post_secret") + if reader := instrument.RecordRequestBody(ctx, "fleet.post_secret", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "fleet.post_secret") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f FleetPostSecret) WithContext(v context.Context) func(*FleetPostSecretRequest) { + return func(r *FleetPostSecretRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f FleetPostSecret) WithPretty() func(*FleetPostSecretRequest) { + return func(r *FleetPostSecretRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f FleetPostSecret) WithHuman() func(*FleetPostSecretRequest) { + return func(r *FleetPostSecretRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f FleetPostSecret) WithErrorTrace() func(*FleetPostSecretRequest) { + return func(r *FleetPostSecretRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f FleetPostSecret) WithFilterPath(v ...string) func(*FleetPostSecretRequest) { + return func(r *FleetPostSecretRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f FleetPostSecret) WithHeader(h map[string]string) func(*FleetPostSecretRequest) { + return func(r *FleetPostSecretRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f FleetPostSecret) WithOpaqueID(s string) func(*FleetPostSecretRequest) { + return func(r *FleetPostSecretRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.search.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.search.go new file mode 100644 index 000000000..a763b3d2b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.fleet.search.go @@ -0,0 +1,281 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newFleetSearchFunc(t Transport) FleetSearch { + return func(index string, o ...func(*FleetSearchRequest)) (*Response, error) { + var r = FleetSearchRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// FleetSearch search API where the search will only be executed after specified checkpoints are available due to a refresh. This API is designed for internal use by the fleet server project. +// +// This API is experimental. +type FleetSearch func(index string, o ...func(*FleetSearchRequest)) (*Response, error) + +// FleetSearchRequest configures the Fleet Search API request. +type FleetSearchRequest struct { + Index string + + Body io.Reader + + AllowPartialSearchResults *bool + WaitForCheckpoints []string + WaitForCheckpointsTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r FleetSearchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "fleet.search") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_fleet") + 1 + len("_fleet_search")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_fleet") + path.WriteString("/") + path.WriteString("_fleet_search") + + params = make(map[string]string) + + if r.AllowPartialSearchResults != nil { + params["allow_partial_search_results"] = strconv.FormatBool(*r.AllowPartialSearchResults) + } + + if len(r.WaitForCheckpoints) > 0 { + params["wait_for_checkpoints"] = strings.Join(r.WaitForCheckpoints, ",") + } + + if r.WaitForCheckpointsTimeout != 0 { + params["wait_for_checkpoints_timeout"] = formatDuration(r.WaitForCheckpointsTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "fleet.search") + if reader := instrument.RecordRequestBody(ctx, "fleet.search", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "fleet.search") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f FleetSearch) WithContext(v context.Context) func(*FleetSearchRequest) { + return func(r *FleetSearchRequest) { + r.ctx = v + } +} + +// WithBody - The search definition using the Query DSL. +func (f FleetSearch) WithBody(v io.Reader) func(*FleetSearchRequest) { + return func(r *FleetSearchRequest) { + r.Body = v + } +} + +// WithAllowPartialSearchResults - indicate if an error should be returned if there is a partial search failure or timeout. +func (f FleetSearch) WithAllowPartialSearchResults(v bool) func(*FleetSearchRequest) { + return func(r *FleetSearchRequest) { + r.AllowPartialSearchResults = &v + } +} + +// WithWaitForCheckpoints - comma separated list of checkpoints, one per shard. +func (f FleetSearch) WithWaitForCheckpoints(v ...string) func(*FleetSearchRequest) { + return func(r *FleetSearchRequest) { + r.WaitForCheckpoints = v + } +} + +// WithWaitForCheckpointsTimeout - explicit wait_for_checkpoints timeout. +func (f FleetSearch) WithWaitForCheckpointsTimeout(v time.Duration) func(*FleetSearchRequest) { + return func(r *FleetSearchRequest) { + r.WaitForCheckpointsTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f FleetSearch) WithPretty() func(*FleetSearchRequest) { + return func(r *FleetSearchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f FleetSearch) WithHuman() func(*FleetSearchRequest) { + return func(r *FleetSearchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f FleetSearch) WithErrorTrace() func(*FleetSearchRequest) { + return func(r *FleetSearchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f FleetSearch) WithFilterPath(v ...string) func(*FleetSearchRequest) { + return func(r *FleetSearchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f FleetSearch) WithHeader(h map[string]string) func(*FleetSearchRequest) { + return func(r *FleetSearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f FleetSearch) WithOpaqueID(s string) func(*FleetSearchRequest) { + return func(r *FleetSearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get.go new file mode 100644 index 000000000..b8a900c39 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get.go @@ -0,0 +1,363 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newGetFunc(t Transport) Get { + return func(index string, id string, o ...func(*GetRequest)) (*Response, error) { + var r = GetRequest{Index: index, DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Get returns a document. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html. +type Get func(index string, id string, o ...func(*GetRequest)) (*Response, error) + +// GetRequest configures the Get API request. +type GetRequest struct { + Index string + DocumentID string + + ForceSyntheticSource *bool + Preference string + Realtime *bool + Refresh *bool + Routing string + Source []string + SourceExcludes []string + SourceIncludes []string + StoredFields []string + Version *int + VersionType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r GetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_doc") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_doc") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.ForceSyntheticSource != nil { + params["force_synthetic_source"] = strconv.FormatBool(*r.ForceSyntheticSource) + } + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Realtime != nil { + params["realtime"] = strconv.FormatBool(*r.Realtime) + } + + if r.Refresh != nil { + params["refresh"] = strconv.FormatBool(*r.Refresh) + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if len(r.Source) > 0 { + params["_source"] = strings.Join(r.Source, ",") + } + + if len(r.SourceExcludes) > 0 { + params["_source_excludes"] = strings.Join(r.SourceExcludes, ",") + } + + if len(r.SourceIncludes) > 0 { + params["_source_includes"] = strings.Join(r.SourceIncludes, ",") + } + + if len(r.StoredFields) > 0 { + params["stored_fields"] = strings.Join(r.StoredFields, ",") + } + + if r.Version != nil { + params["version"] = strconv.FormatInt(int64(*r.Version), 10) + } + + if r.VersionType != "" { + params["version_type"] = r.VersionType + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "get") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "get") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Get) WithContext(v context.Context) func(*GetRequest) { + return func(r *GetRequest) { + r.ctx = v + } +} + +// WithForceSyntheticSource - should this request force synthetic _source? use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. fetches with this enabled will be slower the enabling synthetic source natively in the index.. +func (f Get) WithForceSyntheticSource(v bool) func(*GetRequest) { + return func(r *GetRequest) { + r.ForceSyntheticSource = &v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random). +func (f Get) WithPreference(v string) func(*GetRequest) { + return func(r *GetRequest) { + r.Preference = v + } +} + +// WithRealtime - specify whether to perform the operation in realtime or search mode. +func (f Get) WithRealtime(v bool) func(*GetRequest) { + return func(r *GetRequest) { + r.Realtime = &v + } +} + +// WithRefresh - refresh the shard containing the document before performing the operation. +func (f Get) WithRefresh(v bool) func(*GetRequest) { + return func(r *GetRequest) { + r.Refresh = &v + } +} + +// WithRouting - specific routing value. +func (f Get) WithRouting(v string) func(*GetRequest) { + return func(r *GetRequest) { + r.Routing = v + } +} + +// WithSource - true or false to return the _source field or not, or a list of fields to return. +func (f Get) WithSource(v ...string) func(*GetRequest) { + return func(r *GetRequest) { + r.Source = v + } +} + +// WithSourceExcludes - a list of fields to exclude from the returned _source field. +func (f Get) WithSourceExcludes(v ...string) func(*GetRequest) { + return func(r *GetRequest) { + r.SourceExcludes = v + } +} + +// WithSourceIncludes - a list of fields to extract and return from the _source field. +func (f Get) WithSourceIncludes(v ...string) func(*GetRequest) { + return func(r *GetRequest) { + r.SourceIncludes = v + } +} + +// WithStoredFields - a list of stored fields to return in the response. +func (f Get) WithStoredFields(v ...string) func(*GetRequest) { + return func(r *GetRequest) { + r.StoredFields = v + } +} + +// WithVersion - explicit version number for concurrency control. +func (f Get) WithVersion(v int) func(*GetRequest) { + return func(r *GetRequest) { + r.Version = &v + } +} + +// WithVersionType - specific version type. +func (f Get) WithVersionType(v string) func(*GetRequest) { + return func(r *GetRequest) { + r.VersionType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Get) WithPretty() func(*GetRequest) { + return func(r *GetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Get) WithHuman() func(*GetRequest) { + return func(r *GetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Get) WithErrorTrace() func(*GetRequest) { + return func(r *GetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Get) WithFilterPath(v ...string) func(*GetRequest) { + return func(r *GetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Get) WithHeader(h map[string]string) func(*GetRequest) { + return func(r *GetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Get) WithOpaqueID(s string) func(*GetRequest) { + return func(r *GetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get_script.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get_script.go new file mode 100644 index 000000000..458416e50 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get_script.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newGetScriptFunc(t Transport) GetScript { + return func(id string, o ...func(*GetScriptRequest)) (*Response, error) { + var r = GetScriptRequest{ScriptID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// GetScript returns a script. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html. +type GetScript func(id string, o ...func(*GetScriptRequest)) (*Response, error) + +// GetScriptRequest configures the Get Script API request. +type GetScriptRequest struct { + ScriptID string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r GetScriptRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get_script") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_scripts") + 1 + len(r.ScriptID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_scripts") + path.WriteString("/") + path.WriteString(r.ScriptID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.ScriptID) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "get_script") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "get_script") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f GetScript) WithContext(v context.Context) func(*GetScriptRequest) { + return func(r *GetScriptRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f GetScript) WithMasterTimeout(v time.Duration) func(*GetScriptRequest) { + return func(r *GetScriptRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f GetScript) WithPretty() func(*GetScriptRequest) { + return func(r *GetScriptRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f GetScript) WithHuman() func(*GetScriptRequest) { + return func(r *GetScriptRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f GetScript) WithErrorTrace() func(*GetScriptRequest) { + return func(r *GetScriptRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f GetScript) WithFilterPath(v ...string) func(*GetScriptRequest) { + return func(r *GetScriptRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f GetScript) WithHeader(h map[string]string) func(*GetScriptRequest) { + return func(r *GetScriptRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f GetScript) WithOpaqueID(s string) func(*GetScriptRequest) { + return func(r *GetScriptRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get_script_context.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get_script_context.go new file mode 100644 index 000000000..a93a95a4e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get_script_context.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newGetScriptContextFunc(t Transport) GetScriptContext { + return func(o ...func(*GetScriptContextRequest)) (*Response, error) { + var r = GetScriptContextRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// GetScriptContext returns all script contexts. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts.html. +type GetScriptContext func(o ...func(*GetScriptContextRequest)) (*Response, error) + +// GetScriptContextRequest configures the Get Script Context API request. +type GetScriptContextRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r GetScriptContextRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get_script_context") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_script_context")) + path.WriteString("http://") + path.WriteString("/_script_context") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "get_script_context") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "get_script_context") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f GetScriptContext) WithContext(v context.Context) func(*GetScriptContextRequest) { + return func(r *GetScriptContextRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f GetScriptContext) WithPretty() func(*GetScriptContextRequest) { + return func(r *GetScriptContextRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f GetScriptContext) WithHuman() func(*GetScriptContextRequest) { + return func(r *GetScriptContextRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f GetScriptContext) WithErrorTrace() func(*GetScriptContextRequest) { + return func(r *GetScriptContextRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f GetScriptContext) WithFilterPath(v ...string) func(*GetScriptContextRequest) { + return func(r *GetScriptContextRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f GetScriptContext) WithHeader(h map[string]string) func(*GetScriptContextRequest) { + return func(r *GetScriptContextRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f GetScriptContext) WithOpaqueID(s string) func(*GetScriptContextRequest) { + return func(r *GetScriptContextRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get_script_languages.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get_script_languages.go new file mode 100644 index 000000000..88b1b9af7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get_script_languages.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newGetScriptLanguagesFunc(t Transport) GetScriptLanguages { + return func(o ...func(*GetScriptLanguagesRequest)) (*Response, error) { + var r = GetScriptLanguagesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// GetScriptLanguages returns available script types, languages and contexts +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html. +type GetScriptLanguages func(o ...func(*GetScriptLanguagesRequest)) (*Response, error) + +// GetScriptLanguagesRequest configures the Get Script Languages API request. +type GetScriptLanguagesRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r GetScriptLanguagesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get_script_languages") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_script_language")) + path.WriteString("http://") + path.WriteString("/_script_language") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "get_script_languages") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "get_script_languages") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f GetScriptLanguages) WithContext(v context.Context) func(*GetScriptLanguagesRequest) { + return func(r *GetScriptLanguagesRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f GetScriptLanguages) WithPretty() func(*GetScriptLanguagesRequest) { + return func(r *GetScriptLanguagesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f GetScriptLanguages) WithHuman() func(*GetScriptLanguagesRequest) { + return func(r *GetScriptLanguagesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f GetScriptLanguages) WithErrorTrace() func(*GetScriptLanguagesRequest) { + return func(r *GetScriptLanguagesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f GetScriptLanguages) WithFilterPath(v ...string) func(*GetScriptLanguagesRequest) { + return func(r *GetScriptLanguagesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f GetScriptLanguages) WithHeader(h map[string]string) func(*GetScriptLanguagesRequest) { + return func(r *GetScriptLanguagesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f GetScriptLanguages) WithOpaqueID(s string) func(*GetScriptLanguagesRequest) { + return func(r *GetScriptLanguagesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get_source.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get_source.go new file mode 100644 index 000000000..524161023 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.get_source.go @@ -0,0 +1,339 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newGetSourceFunc(t Transport) GetSource { + return func(index string, id string, o ...func(*GetSourceRequest)) (*Response, error) { + var r = GetSourceRequest{Index: index, DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// GetSource returns the source of a document. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html. +type GetSource func(index string, id string, o ...func(*GetSourceRequest)) (*Response, error) + +// GetSourceRequest configures the Get Source API request. +type GetSourceRequest struct { + Index string + DocumentID string + + Preference string + Realtime *bool + Refresh *bool + Routing string + Source []string + SourceExcludes []string + SourceIncludes []string + Version *int + VersionType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r GetSourceRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get_source") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_source") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_source") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Realtime != nil { + params["realtime"] = strconv.FormatBool(*r.Realtime) + } + + if r.Refresh != nil { + params["refresh"] = strconv.FormatBool(*r.Refresh) + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if len(r.Source) > 0 { + params["_source"] = strings.Join(r.Source, ",") + } + + if len(r.SourceExcludes) > 0 { + params["_source_excludes"] = strings.Join(r.SourceExcludes, ",") + } + + if len(r.SourceIncludes) > 0 { + params["_source_includes"] = strings.Join(r.SourceIncludes, ",") + } + + if r.Version != nil { + params["version"] = strconv.FormatInt(int64(*r.Version), 10) + } + + if r.VersionType != "" { + params["version_type"] = r.VersionType + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "get_source") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "get_source") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f GetSource) WithContext(v context.Context) func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + r.ctx = v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random). +func (f GetSource) WithPreference(v string) func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + r.Preference = v + } +} + +// WithRealtime - specify whether to perform the operation in realtime or search mode. +func (f GetSource) WithRealtime(v bool) func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + r.Realtime = &v + } +} + +// WithRefresh - refresh the shard containing the document before performing the operation. +func (f GetSource) WithRefresh(v bool) func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + r.Refresh = &v + } +} + +// WithRouting - specific routing value. +func (f GetSource) WithRouting(v string) func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + r.Routing = v + } +} + +// WithSource - true or false to return the _source field or not, or a list of fields to return. +func (f GetSource) WithSource(v ...string) func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + r.Source = v + } +} + +// WithSourceExcludes - a list of fields to exclude from the returned _source field. +func (f GetSource) WithSourceExcludes(v ...string) func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + r.SourceExcludes = v + } +} + +// WithSourceIncludes - a list of fields to extract and return from the _source field. +func (f GetSource) WithSourceIncludes(v ...string) func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + r.SourceIncludes = v + } +} + +// WithVersion - explicit version number for concurrency control. +func (f GetSource) WithVersion(v int) func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + r.Version = &v + } +} + +// WithVersionType - specific version type. +func (f GetSource) WithVersionType(v string) func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + r.VersionType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f GetSource) WithPretty() func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f GetSource) WithHuman() func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f GetSource) WithErrorTrace() func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f GetSource) WithFilterPath(v ...string) func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f GetSource) WithHeader(h map[string]string) func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f GetSource) WithOpaqueID(s string) func(*GetSourceRequest) { + return func(r *GetSourceRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.health_report.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.health_report.go new file mode 100644 index 000000000..acf8ae7ff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.health_report.go @@ -0,0 +1,271 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newHealthReportFunc(t Transport) HealthReport { + return func(o ...func(*HealthReportRequest)) (*Response, error) { + var r = HealthReportRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// HealthReport returns the health of the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/health-api.html. +type HealthReport func(o ...func(*HealthReportRequest)) (*Response, error) + +// HealthReportRequest configures the Health Report API request. +type HealthReportRequest struct { + Feature string + + Size *int + Timeout time.Duration + Verbose *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r HealthReportRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "health_report") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_health_report") + 1 + len(r.Feature)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_health_report") + if r.Feature != "" { + path.WriteString("/") + path.WriteString(r.Feature) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "feature", r.Feature) + } + } + + params = make(map[string]string) + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Verbose != nil { + params["verbose"] = strconv.FormatBool(*r.Verbose) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "health_report") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "health_report") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f HealthReport) WithContext(v context.Context) func(*HealthReportRequest) { + return func(r *HealthReportRequest) { + r.ctx = v + } +} + +// WithFeature - a feature of the cluster, as returned by the top-level health api. +func (f HealthReport) WithFeature(v string) func(*HealthReportRequest) { + return func(r *HealthReportRequest) { + r.Feature = v + } +} + +// WithSize - limit the number of affected resources the health api returns. +func (f HealthReport) WithSize(v int) func(*HealthReportRequest) { + return func(r *HealthReportRequest) { + r.Size = &v + } +} + +// WithTimeout - explicit operation timeout. +func (f HealthReport) WithTimeout(v time.Duration) func(*HealthReportRequest) { + return func(r *HealthReportRequest) { + r.Timeout = v + } +} + +// WithVerbose - opt in for more information about the health of the system. +func (f HealthReport) WithVerbose(v bool) func(*HealthReportRequest) { + return func(r *HealthReportRequest) { + r.Verbose = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f HealthReport) WithPretty() func(*HealthReportRequest) { + return func(r *HealthReportRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f HealthReport) WithHuman() func(*HealthReportRequest) { + return func(r *HealthReportRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f HealthReport) WithErrorTrace() func(*HealthReportRequest) { + return func(r *HealthReportRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f HealthReport) WithFilterPath(v ...string) func(*HealthReportRequest) { + return func(r *HealthReportRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f HealthReport) WithHeader(h map[string]string) func(*HealthReportRequest) { + return func(r *HealthReportRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f HealthReport) WithOpaqueID(s string) func(*HealthReportRequest) { + return func(r *HealthReportRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.index.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.index.go new file mode 100644 index 000000000..54529ec54 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.index.go @@ -0,0 +1,411 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndexFunc(t Transport) Index { + return func(index string, body io.Reader, o ...func(*IndexRequest)) (*Response, error) { + var r = IndexRequest{Index: index, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Index creates or updates a document in an index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html. +type Index func(index string, body io.Reader, o ...func(*IndexRequest)) (*Response, error) + +// IndexRequest configures the Index API request. +type IndexRequest struct { + Index string + DocumentID string + + Body io.Reader + + IfPrimaryTerm *int + IfSeqNo *int + IncludeSourceOnError *bool + OpType string + Pipeline string + Refresh string + RequireAlias *bool + RequireDataStream *bool + Routing string + Timeout time.Duration + Version *int + VersionType string + WaitForActiveShards string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndexRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "index") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + if r.DocumentID != "" { + method = "PUT" + } else { + method = "POST" + } + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_doc") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_doc") + if r.DocumentID != "" { + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + } + + params = make(map[string]string) + + if r.IfPrimaryTerm != nil { + params["if_primary_term"] = strconv.FormatInt(int64(*r.IfPrimaryTerm), 10) + } + + if r.IfSeqNo != nil { + params["if_seq_no"] = strconv.FormatInt(int64(*r.IfSeqNo), 10) + } + + if r.IncludeSourceOnError != nil { + params["include_source_on_error"] = strconv.FormatBool(*r.IncludeSourceOnError) + } + + if r.OpType != "" { + params["op_type"] = r.OpType + } + + if r.Pipeline != "" { + params["pipeline"] = r.Pipeline + } + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.RequireAlias != nil { + params["require_alias"] = strconv.FormatBool(*r.RequireAlias) + } + + if r.RequireDataStream != nil { + params["require_data_stream"] = strconv.FormatBool(*r.RequireDataStream) + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Version != nil { + params["version"] = strconv.FormatInt(int64(*r.Version), 10) + } + + if r.VersionType != "" { + params["version_type"] = r.VersionType + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "index") + if reader := instrument.RecordRequestBody(ctx, "index", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "index") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Index) WithContext(v context.Context) func(*IndexRequest) { + return func(r *IndexRequest) { + r.ctx = v + } +} + +// WithDocumentID - document ID. +func (f Index) WithDocumentID(v string) func(*IndexRequest) { + return func(r *IndexRequest) { + r.DocumentID = v + } +} + +// WithIfPrimaryTerm - only perform the index operation if the last operation that has changed the document has the specified primary term. +func (f Index) WithIfPrimaryTerm(v int) func(*IndexRequest) { + return func(r *IndexRequest) { + r.IfPrimaryTerm = &v + } +} + +// WithIfSeqNo - only perform the index operation if the last operation that has changed the document has the specified sequence number. +func (f Index) WithIfSeqNo(v int) func(*IndexRequest) { + return func(r *IndexRequest) { + r.IfSeqNo = &v + } +} + +// WithIncludeSourceOnError - true or false if to include the document source in the error message in case of parsing errors. defaults to true.. +func (f Index) WithIncludeSourceOnError(v bool) func(*IndexRequest) { + return func(r *IndexRequest) { + r.IncludeSourceOnError = &v + } +} + +// WithOpType - explicit operation type. defaults to `index` for requests with an explicit document ID, and to `create`for requests without an explicit document ID. +func (f Index) WithOpType(v string) func(*IndexRequest) { + return func(r *IndexRequest) { + r.OpType = v + } +} + +// WithPipeline - the pipeline ID to preprocess incoming documents with. +func (f Index) WithPipeline(v string) func(*IndexRequest) { + return func(r *IndexRequest) { + r.Pipeline = v + } +} + +// WithRefresh - if `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes.. +func (f Index) WithRefresh(v string) func(*IndexRequest) { + return func(r *IndexRequest) { + r.Refresh = v + } +} + +// WithRequireAlias - when true, requires destination to be an alias. default is false. +func (f Index) WithRequireAlias(v bool) func(*IndexRequest) { + return func(r *IndexRequest) { + r.RequireAlias = &v + } +} + +// WithRequireDataStream - when true, requires the destination to be a data stream (existing or to-be-created). default is false. +func (f Index) WithRequireDataStream(v bool) func(*IndexRequest) { + return func(r *IndexRequest) { + r.RequireDataStream = &v + } +} + +// WithRouting - specific routing value. +func (f Index) WithRouting(v string) func(*IndexRequest) { + return func(r *IndexRequest) { + r.Routing = v + } +} + +// WithTimeout - explicit operation timeout. +func (f Index) WithTimeout(v time.Duration) func(*IndexRequest) { + return func(r *IndexRequest) { + r.Timeout = v + } +} + +// WithVersion - explicit version number for concurrency control. +func (f Index) WithVersion(v int) func(*IndexRequest) { + return func(r *IndexRequest) { + r.Version = &v + } +} + +// WithVersionType - specific version type. +func (f Index) WithVersionType(v string) func(*IndexRequest) { + return func(r *IndexRequest) { + r.VersionType = v + } +} + +// WithWaitForActiveShards - sets the number of shard copies that must be active before proceeding with the index operation. defaults to 1, meaning the primary shard only. set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1). +func (f Index) WithWaitForActiveShards(v string) func(*IndexRequest) { + return func(r *IndexRequest) { + r.WaitForActiveShards = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Index) WithPretty() func(*IndexRequest) { + return func(r *IndexRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Index) WithHuman() func(*IndexRequest) { + return func(r *IndexRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Index) WithErrorTrace() func(*IndexRequest) { + return func(r *IndexRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Index) WithFilterPath(v ...string) func(*IndexRequest) { + return func(r *IndexRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Index) WithHeader(h map[string]string) func(*IndexRequest) { + return func(r *IndexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Index) WithOpaqueID(s string) func(*IndexRequest) { + return func(r *IndexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.add_block.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.add_block.go new file mode 100644 index 000000000..30cae1cda --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.add_block.go @@ -0,0 +1,298 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesAddBlockFunc(t Transport) IndicesAddBlock { + return func(index []string, block string, o ...func(*IndicesAddBlockRequest)) (*Response, error) { + var r = IndicesAddBlockRequest{Index: index, Block: block} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesAddBlock adds a block to an index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html. +type IndicesAddBlock func(index []string, block string, o ...func(*IndicesAddBlockRequest)) (*Response, error) + +// IndicesAddBlockRequest configures the Indices Add Block API request. +type IndicesAddBlockRequest struct { + Index []string + + Block string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesAddBlockRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.add_block") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_block") + 1 + len(r.Block)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_block") + path.WriteString("/") + path.WriteString(r.Block) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "block", r.Block) + } + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.add_block") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.add_block") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesAddBlock) WithContext(v context.Context) func(*IndicesAddBlockRequest) { + return func(r *IndicesAddBlockRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesAddBlock) WithAllowNoIndices(v bool) func(*IndicesAddBlockRequest) { + return func(r *IndicesAddBlockRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesAddBlock) WithExpandWildcards(v string) func(*IndicesAddBlockRequest) { + return func(r *IndicesAddBlockRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesAddBlock) WithIgnoreUnavailable(v bool) func(*IndicesAddBlockRequest) { + return func(r *IndicesAddBlockRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesAddBlock) WithMasterTimeout(v time.Duration) func(*IndicesAddBlockRequest) { + return func(r *IndicesAddBlockRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IndicesAddBlock) WithTimeout(v time.Duration) func(*IndicesAddBlockRequest) { + return func(r *IndicesAddBlockRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesAddBlock) WithPretty() func(*IndicesAddBlockRequest) { + return func(r *IndicesAddBlockRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesAddBlock) WithHuman() func(*IndicesAddBlockRequest) { + return func(r *IndicesAddBlockRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesAddBlock) WithErrorTrace() func(*IndicesAddBlockRequest) { + return func(r *IndicesAddBlockRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesAddBlock) WithFilterPath(v ...string) func(*IndicesAddBlockRequest) { + return func(r *IndicesAddBlockRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesAddBlock) WithHeader(h map[string]string) func(*IndicesAddBlockRequest) { + return func(r *IndicesAddBlockRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesAddBlock) WithOpaqueID(s string) func(*IndicesAddBlockRequest) { + return func(r *IndicesAddBlockRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.analyze.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.analyze.go new file mode 100644 index 000000000..5362b939d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.analyze.go @@ -0,0 +1,253 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newIndicesAnalyzeFunc(t Transport) IndicesAnalyze { + return func(o ...func(*IndicesAnalyzeRequest)) (*Response, error) { + var r = IndicesAnalyzeRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesAnalyze performs the analysis process on a text and return the tokens breakdown of the text. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html. +type IndicesAnalyze func(o ...func(*IndicesAnalyzeRequest)) (*Response, error) + +// IndicesAnalyzeRequest configures the Indices Analyze API request. +type IndicesAnalyzeRequest struct { + Index string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesAnalyzeRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.analyze") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_analyze")) + path.WriteString("http://") + if r.Index != "" { + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + } + path.WriteString("/") + path.WriteString("_analyze") + + params = make(map[string]string) + + if r.Index != "" { + params["index"] = r.Index + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.analyze") + if reader := instrument.RecordRequestBody(ctx, "indices.analyze", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.analyze") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesAnalyze) WithContext(v context.Context) func(*IndicesAnalyzeRequest) { + return func(r *IndicesAnalyzeRequest) { + r.ctx = v + } +} + +// WithBody - Define analyzer/tokenizer parameters and the text on which the analysis should be performed. +func (f IndicesAnalyze) WithBody(v io.Reader) func(*IndicesAnalyzeRequest) { + return func(r *IndicesAnalyzeRequest) { + r.Body = v + } +} + +// WithIndex - the name of the index to scope the operation. +func (f IndicesAnalyze) WithIndex(v string) func(*IndicesAnalyzeRequest) { + return func(r *IndicesAnalyzeRequest) { + r.Index = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesAnalyze) WithPretty() func(*IndicesAnalyzeRequest) { + return func(r *IndicesAnalyzeRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesAnalyze) WithHuman() func(*IndicesAnalyzeRequest) { + return func(r *IndicesAnalyzeRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesAnalyze) WithErrorTrace() func(*IndicesAnalyzeRequest) { + return func(r *IndicesAnalyzeRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesAnalyze) WithFilterPath(v ...string) func(*IndicesAnalyzeRequest) { + return func(r *IndicesAnalyzeRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesAnalyze) WithHeader(h map[string]string) func(*IndicesAnalyzeRequest) { + return func(r *IndicesAnalyzeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesAnalyze) WithOpaqueID(s string) func(*IndicesAnalyzeRequest) { + return func(r *IndicesAnalyzeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.cancel_migrate_reindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.cancel_migrate_reindex.go new file mode 100644 index 000000000..cb50796d5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.cancel_migrate_reindex.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIndicesCancelMigrateReindexFunc(t Transport) IndicesCancelMigrateReindex { + return func(index string, o ...func(*IndicesCancelMigrateReindexRequest)) (*Response, error) { + var r = IndicesCancelMigrateReindexRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesCancelMigrateReindex this API returns the status of a migration reindex attempt for a data stream or index +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex-cancel-api.html. +type IndicesCancelMigrateReindex func(index string, o ...func(*IndicesCancelMigrateReindexRequest)) (*Response, error) + +// IndicesCancelMigrateReindexRequest configures the Indices Cancel Migrate Reindex API request. +type IndicesCancelMigrateReindexRequest struct { + Index string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesCancelMigrateReindexRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.cancel_migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_migration") + 1 + len("reindex") + 1 + len(r.Index) + 1 + len("_cancel")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_cancel") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.cancel_migrate_reindex") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.cancel_migrate_reindex") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesCancelMigrateReindex) WithContext(v context.Context) func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesCancelMigrateReindex) WithPretty() func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesCancelMigrateReindex) WithHuman() func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesCancelMigrateReindex) WithErrorTrace() func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesCancelMigrateReindex) WithFilterPath(v ...string) func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesCancelMigrateReindex) WithHeader(h map[string]string) func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesCancelMigrateReindex) WithOpaqueID(s string) func(*IndicesCancelMigrateReindexRequest) { + return func(r *IndicesCancelMigrateReindexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.clear_cache.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.clear_cache.go new file mode 100644 index 000000000..6b0e65f00 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.clear_cache.go @@ -0,0 +1,324 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newIndicesClearCacheFunc(t Transport) IndicesClearCache { + return func(o ...func(*IndicesClearCacheRequest)) (*Response, error) { + var r = IndicesClearCacheRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesClearCache clears all or specific caches for one or more indices. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html. +type IndicesClearCache func(o ...func(*IndicesClearCacheRequest)) (*Response, error) + +// IndicesClearCacheRequest configures the Indices Clear Cache API request. +type IndicesClearCacheRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + Fielddata *bool + Fields []string + IgnoreUnavailable *bool + Query *bool + Request *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesClearCacheRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.clear_cache") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_cache") + 1 + len("clear")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_cache") + path.WriteString("/") + path.WriteString("clear") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.Fielddata != nil { + params["fielddata"] = strconv.FormatBool(*r.Fielddata) + } + + if len(r.Fields) > 0 { + params["fields"] = strings.Join(r.Fields, ",") + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if len(r.Index) > 0 { + params["index"] = strings.Join(r.Index, ",") + } + + if r.Query != nil { + params["query"] = strconv.FormatBool(*r.Query) + } + + if r.Request != nil { + params["request"] = strconv.FormatBool(*r.Request) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.clear_cache") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.clear_cache") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesClearCache) WithContext(v context.Context) func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index name to limit the operation. +func (f IndicesClearCache) WithIndex(v ...string) func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesClearCache) WithAllowNoIndices(v bool) func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesClearCache) WithExpandWildcards(v string) func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + r.ExpandWildcards = v + } +} + +// WithFielddata - clear field data. +func (f IndicesClearCache) WithFielddata(v bool) func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + r.Fielddata = &v + } +} + +// WithFields - a list of fields to clear when using the `fielddata` parameter (default: all). +func (f IndicesClearCache) WithFields(v ...string) func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + r.Fields = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesClearCache) WithIgnoreUnavailable(v bool) func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithQuery - clear query caches. +func (f IndicesClearCache) WithQuery(v bool) func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + r.Query = &v + } +} + +// WithRequest - clear request cache. +func (f IndicesClearCache) WithRequest(v bool) func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + r.Request = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesClearCache) WithPretty() func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesClearCache) WithHuman() func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesClearCache) WithErrorTrace() func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesClearCache) WithFilterPath(v ...string) func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesClearCache) WithHeader(h map[string]string) func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesClearCache) WithOpaqueID(s string) func(*IndicesClearCacheRequest) { + return func(r *IndicesClearCacheRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.clone.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.clone.go new file mode 100644 index 000000000..4cd395929 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.clone.go @@ -0,0 +1,285 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newIndicesCloneFunc(t Transport) IndicesClone { + return func(index string, target string, o ...func(*IndicesCloneRequest)) (*Response, error) { + var r = IndicesCloneRequest{Index: index, Target: target} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesClone clones an index +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html. +type IndicesClone func(index string, target string, o ...func(*IndicesCloneRequest)) (*Response, error) + +// IndicesCloneRequest configures the Indices Clone API request. +type IndicesCloneRequest struct { + Index string + + Body io.Reader + + Target string + + MasterTimeout time.Duration + Timeout time.Duration + WaitForActiveShards string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesCloneRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.clone") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_clone") + 1 + len(r.Target)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_clone") + path.WriteString("/") + path.WriteString(r.Target) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "target", r.Target) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.clone") + if reader := instrument.RecordRequestBody(ctx, "indices.clone", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.clone") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesClone) WithContext(v context.Context) func(*IndicesCloneRequest) { + return func(r *IndicesCloneRequest) { + r.ctx = v + } +} + +// WithBody - The configuration for the target index (`settings` and `aliases`). +func (f IndicesClone) WithBody(v io.Reader) func(*IndicesCloneRequest) { + return func(r *IndicesCloneRequest) { + r.Body = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesClone) WithMasterTimeout(v time.Duration) func(*IndicesCloneRequest) { + return func(r *IndicesCloneRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IndicesClone) WithTimeout(v time.Duration) func(*IndicesCloneRequest) { + return func(r *IndicesCloneRequest) { + r.Timeout = v + } +} + +// WithWaitForActiveShards - set the number of active shards to wait for on the cloned index before the operation returns.. +func (f IndicesClone) WithWaitForActiveShards(v string) func(*IndicesCloneRequest) { + return func(r *IndicesCloneRequest) { + r.WaitForActiveShards = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesClone) WithPretty() func(*IndicesCloneRequest) { + return func(r *IndicesCloneRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesClone) WithHuman() func(*IndicesCloneRequest) { + return func(r *IndicesCloneRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesClone) WithErrorTrace() func(*IndicesCloneRequest) { + return func(r *IndicesCloneRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesClone) WithFilterPath(v ...string) func(*IndicesCloneRequest) { + return func(r *IndicesCloneRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesClone) WithHeader(h map[string]string) func(*IndicesCloneRequest) { + return func(r *IndicesCloneRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesClone) WithOpaqueID(s string) func(*IndicesCloneRequest) { + return func(r *IndicesCloneRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.close.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.close.go new file mode 100644 index 000000000..fc944a069 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.close.go @@ -0,0 +1,303 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesCloseFunc(t Transport) IndicesClose { + return func(index []string, o ...func(*IndicesCloseRequest)) (*Response, error) { + var r = IndicesCloseRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesClose closes an index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html. +type IndicesClose func(index []string, o ...func(*IndicesCloseRequest)) (*Response, error) + +// IndicesCloseRequest configures the Indices Close API request. +type IndicesCloseRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + MasterTimeout time.Duration + Timeout time.Duration + WaitForActiveShards string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesCloseRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.close") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_close")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_close") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.close") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.close") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesClose) WithContext(v context.Context) func(*IndicesCloseRequest) { + return func(r *IndicesCloseRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesClose) WithAllowNoIndices(v bool) func(*IndicesCloseRequest) { + return func(r *IndicesCloseRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesClose) WithExpandWildcards(v string) func(*IndicesCloseRequest) { + return func(r *IndicesCloseRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesClose) WithIgnoreUnavailable(v bool) func(*IndicesCloseRequest) { + return func(r *IndicesCloseRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesClose) WithMasterTimeout(v time.Duration) func(*IndicesCloseRequest) { + return func(r *IndicesCloseRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IndicesClose) WithTimeout(v time.Duration) func(*IndicesCloseRequest) { + return func(r *IndicesCloseRequest) { + r.Timeout = v + } +} + +// WithWaitForActiveShards - sets the number of active shards to wait for before the operation returns.. +func (f IndicesClose) WithWaitForActiveShards(v string) func(*IndicesCloseRequest) { + return func(r *IndicesCloseRequest) { + r.WaitForActiveShards = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesClose) WithPretty() func(*IndicesCloseRequest) { + return func(r *IndicesCloseRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesClose) WithHuman() func(*IndicesCloseRequest) { + return func(r *IndicesCloseRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesClose) WithErrorTrace() func(*IndicesCloseRequest) { + return func(r *IndicesCloseRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesClose) WithFilterPath(v ...string) func(*IndicesCloseRequest) { + return func(r *IndicesCloseRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesClose) WithHeader(h map[string]string) func(*IndicesCloseRequest) { + return func(r *IndicesCloseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesClose) WithOpaqueID(s string) func(*IndicesCloseRequest) { + return func(r *IndicesCloseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.create.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.create.go new file mode 100644 index 000000000..e6036ed6e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.create.go @@ -0,0 +1,276 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newIndicesCreateFunc(t Transport) IndicesCreate { + return func(index string, o ...func(*IndicesCreateRequest)) (*Response, error) { + var r = IndicesCreateRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesCreate creates an index with optional settings and mappings. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html. +type IndicesCreate func(index string, o ...func(*IndicesCreateRequest)) (*Response, error) + +// IndicesCreateRequest configures the Indices Create API request. +type IndicesCreateRequest struct { + Index string + + Body io.Reader + + MasterTimeout time.Duration + Timeout time.Duration + WaitForActiveShards string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesCreateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.create") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len(r.Index)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.create") + if reader := instrument.RecordRequestBody(ctx, "indices.create", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.create") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesCreate) WithContext(v context.Context) func(*IndicesCreateRequest) { + return func(r *IndicesCreateRequest) { + r.ctx = v + } +} + +// WithBody - The configuration for the index (`settings` and `mappings`). +func (f IndicesCreate) WithBody(v io.Reader) func(*IndicesCreateRequest) { + return func(r *IndicesCreateRequest) { + r.Body = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesCreate) WithMasterTimeout(v time.Duration) func(*IndicesCreateRequest) { + return func(r *IndicesCreateRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IndicesCreate) WithTimeout(v time.Duration) func(*IndicesCreateRequest) { + return func(r *IndicesCreateRequest) { + r.Timeout = v + } +} + +// WithWaitForActiveShards - set the number of active shards to wait for before the operation returns.. +func (f IndicesCreate) WithWaitForActiveShards(v string) func(*IndicesCreateRequest) { + return func(r *IndicesCreateRequest) { + r.WaitForActiveShards = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesCreate) WithPretty() func(*IndicesCreateRequest) { + return func(r *IndicesCreateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesCreate) WithHuman() func(*IndicesCreateRequest) { + return func(r *IndicesCreateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesCreate) WithErrorTrace() func(*IndicesCreateRequest) { + return func(r *IndicesCreateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesCreate) WithFilterPath(v ...string) func(*IndicesCreateRequest) { + return func(r *IndicesCreateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesCreate) WithHeader(h map[string]string) func(*IndicesCreateRequest) { + return func(r *IndicesCreateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesCreate) WithOpaqueID(s string) func(*IndicesCreateRequest) { + return func(r *IndicesCreateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.create_from.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.create_from.go new file mode 100644 index 000000000..02ef4896e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.create_from.go @@ -0,0 +1,248 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newIndicesCreateFromFunc(t Transport) IndicesCreateFrom { + return func(dest string, source string, o ...func(*IndicesCreateFromRequest)) (*Response, error) { + var r = IndicesCreateFromRequest{Dest: dest, Source: source} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesCreateFrom this API creates a destination from a source index. It copies the mappings and settings from the source index while allowing request settings and mappings to override the source values. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index-from-source.html. +type IndicesCreateFrom func(dest string, source string, o ...func(*IndicesCreateFromRequest)) (*Response, error) + +// IndicesCreateFromRequest configures the Indices Create From API request. +type IndicesCreateFromRequest struct { + Body io.Reader + + Dest string + Source string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesCreateFromRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.create_from") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_create_from") + 1 + len(r.Source) + 1 + len(r.Dest)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_create_from") + path.WriteString("/") + path.WriteString(r.Source) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "source", r.Source) + } + path.WriteString("/") + path.WriteString(r.Dest) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "dest", r.Dest) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.create_from") + if reader := instrument.RecordRequestBody(ctx, "indices.create_from", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.create_from") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesCreateFrom) WithContext(v context.Context) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.ctx = v + } +} + +// WithBody - The body contains the fields `mappings_override`, `settings_override`, and `remove_index_blocks`.. +func (f IndicesCreateFrom) WithBody(v io.Reader) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesCreateFrom) WithPretty() func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesCreateFrom) WithHuman() func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesCreateFrom) WithErrorTrace() func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesCreateFrom) WithFilterPath(v ...string) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesCreateFrom) WithHeader(h map[string]string) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesCreateFrom) WithOpaqueID(s string) func(*IndicesCreateFromRequest) { + return func(r *IndicesCreateFromRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete.go new file mode 100644 index 000000000..0cc6e27b0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete.go @@ -0,0 +1,289 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesDeleteFunc(t Transport) IndicesDelete { + return func(index []string, o ...func(*IndicesDeleteRequest)) (*Response, error) { + var r = IndicesDeleteRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesDelete deletes an index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html. +type IndicesDelete func(index []string, o ...func(*IndicesDeleteRequest)) (*Response, error) + +// IndicesDeleteRequest configures the Indices Delete API request. +type IndicesDeleteRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesDeleteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.delete") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.delete") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesDelete) WithContext(v context.Context) func(*IndicesDeleteRequest) { + return func(r *IndicesDeleteRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - ignore if a wildcard expression resolves to no concrete indices (default: false). +func (f IndicesDelete) WithAllowNoIndices(v bool) func(*IndicesDeleteRequest) { + return func(r *IndicesDeleteRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open, closed, or hidden indices. +func (f IndicesDelete) WithExpandWildcards(v string) func(*IndicesDeleteRequest) { + return func(r *IndicesDeleteRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - ignore unavailable indexes (default: false). +func (f IndicesDelete) WithIgnoreUnavailable(v bool) func(*IndicesDeleteRequest) { + return func(r *IndicesDeleteRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesDelete) WithMasterTimeout(v time.Duration) func(*IndicesDeleteRequest) { + return func(r *IndicesDeleteRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IndicesDelete) WithTimeout(v time.Duration) func(*IndicesDeleteRequest) { + return func(r *IndicesDeleteRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesDelete) WithPretty() func(*IndicesDeleteRequest) { + return func(r *IndicesDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesDelete) WithHuman() func(*IndicesDeleteRequest) { + return func(r *IndicesDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesDelete) WithErrorTrace() func(*IndicesDeleteRequest) { + return func(r *IndicesDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesDelete) WithFilterPath(v ...string) func(*IndicesDeleteRequest) { + return func(r *IndicesDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesDelete) WithHeader(h map[string]string) func(*IndicesDeleteRequest) { + return func(r *IndicesDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesDelete) WithOpaqueID(s string) func(*IndicesDeleteRequest) { + return func(r *IndicesDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete_alias.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete_alias.go new file mode 100644 index 000000000..28be6f038 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete_alias.go @@ -0,0 +1,264 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" + "time" +) + +func newIndicesDeleteAliasFunc(t Transport) IndicesDeleteAlias { + return func(index []string, name []string, o ...func(*IndicesDeleteAliasRequest)) (*Response, error) { + var r = IndicesDeleteAliasRequest{Index: index, Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesDeleteAlias deletes an alias. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html. +type IndicesDeleteAlias func(index []string, name []string, o ...func(*IndicesDeleteAliasRequest)) (*Response, error) + +// IndicesDeleteAliasRequest configures the Indices Delete Alias API request. +type IndicesDeleteAliasRequest struct { + Index []string + + Name []string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesDeleteAliasRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_aliases") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_aliases") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.delete_alias") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.delete_alias") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesDeleteAlias) WithContext(v context.Context) func(*IndicesDeleteAliasRequest) { + return func(r *IndicesDeleteAliasRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesDeleteAlias) WithMasterTimeout(v time.Duration) func(*IndicesDeleteAliasRequest) { + return func(r *IndicesDeleteAliasRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit timestamp for the document. +func (f IndicesDeleteAlias) WithTimeout(v time.Duration) func(*IndicesDeleteAliasRequest) { + return func(r *IndicesDeleteAliasRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesDeleteAlias) WithPretty() func(*IndicesDeleteAliasRequest) { + return func(r *IndicesDeleteAliasRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesDeleteAlias) WithHuman() func(*IndicesDeleteAliasRequest) { + return func(r *IndicesDeleteAliasRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesDeleteAlias) WithErrorTrace() func(*IndicesDeleteAliasRequest) { + return func(r *IndicesDeleteAliasRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesDeleteAlias) WithFilterPath(v ...string) func(*IndicesDeleteAliasRequest) { + return func(r *IndicesDeleteAliasRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesDeleteAlias) WithHeader(h map[string]string) func(*IndicesDeleteAliasRequest) { + return func(r *IndicesDeleteAliasRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesDeleteAlias) WithOpaqueID(s string) func(*IndicesDeleteAliasRequest) { + return func(r *IndicesDeleteAliasRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete_data_lifecycle.go new file mode 100644 index 000000000..f91c78c5c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete_data_lifecycle.go @@ -0,0 +1,268 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" + "time" +) + +func newIndicesDeleteDataLifecycleFunc(t Transport) IndicesDeleteDataLifecycle { + return func(name []string, o ...func(*IndicesDeleteDataLifecycleRequest)) (*Response, error) { + var r = IndicesDeleteDataLifecycleRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesDeleteDataLifecycle deletes the data stream lifecycle of the selected data streams. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-delete-lifecycle.html. +type IndicesDeleteDataLifecycle func(name []string, o ...func(*IndicesDeleteDataLifecycleRequest)) (*Response, error) + +// IndicesDeleteDataLifecycleRequest configures the Indices Delete Data Lifecycle API request. +type IndicesDeleteDataLifecycleRequest struct { + Name []string + + ExpandWildcards string + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesDeleteDataLifecycleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_data_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(strings.Join(r.Name, ",")) + 1 + len("_lifecycle")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + path.WriteString("/") + path.WriteString("_lifecycle") + + params = make(map[string]string) + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.delete_data_lifecycle") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.delete_data_lifecycle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesDeleteDataLifecycle) WithContext(v context.Context) func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.ctx = v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +func (f IndicesDeleteDataLifecycle) WithExpandWildcards(v string) func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.ExpandWildcards = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesDeleteDataLifecycle) WithMasterTimeout(v time.Duration) func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit timestamp for the document. +func (f IndicesDeleteDataLifecycle) WithTimeout(v time.Duration) func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesDeleteDataLifecycle) WithPretty() func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesDeleteDataLifecycle) WithHuman() func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesDeleteDataLifecycle) WithErrorTrace() func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesDeleteDataLifecycle) WithFilterPath(v ...string) func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesDeleteDataLifecycle) WithHeader(h map[string]string) func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesDeleteDataLifecycle) WithOpaqueID(s string) func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete_index_template.go new file mode 100644 index 000000000..0c756f79a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete_index_template.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newIndicesDeleteIndexTemplateFunc(t Transport) IndicesDeleteIndexTemplate { + return func(name string, o ...func(*IndicesDeleteIndexTemplateRequest)) (*Response, error) { + var r = IndicesDeleteIndexTemplateRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesDeleteIndexTemplate deletes an index template. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template.html. +type IndicesDeleteIndexTemplate func(name string, o ...func(*IndicesDeleteIndexTemplateRequest)) (*Response, error) + +// IndicesDeleteIndexTemplateRequest configures the Indices Delete Index Template API request. +type IndicesDeleteIndexTemplateRequest struct { + Name string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesDeleteIndexTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_index_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_index_template") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_index_template") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.delete_index_template") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.delete_index_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesDeleteIndexTemplate) WithContext(v context.Context) func(*IndicesDeleteIndexTemplateRequest) { + return func(r *IndicesDeleteIndexTemplateRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesDeleteIndexTemplate) WithMasterTimeout(v time.Duration) func(*IndicesDeleteIndexTemplateRequest) { + return func(r *IndicesDeleteIndexTemplateRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IndicesDeleteIndexTemplate) WithTimeout(v time.Duration) func(*IndicesDeleteIndexTemplateRequest) { + return func(r *IndicesDeleteIndexTemplateRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesDeleteIndexTemplate) WithPretty() func(*IndicesDeleteIndexTemplateRequest) { + return func(r *IndicesDeleteIndexTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesDeleteIndexTemplate) WithHuman() func(*IndicesDeleteIndexTemplateRequest) { + return func(r *IndicesDeleteIndexTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesDeleteIndexTemplate) WithErrorTrace() func(*IndicesDeleteIndexTemplateRequest) { + return func(r *IndicesDeleteIndexTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesDeleteIndexTemplate) WithFilterPath(v ...string) func(*IndicesDeleteIndexTemplateRequest) { + return func(r *IndicesDeleteIndexTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesDeleteIndexTemplate) WithHeader(h map[string]string) func(*IndicesDeleteIndexTemplateRequest) { + return func(r *IndicesDeleteIndexTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesDeleteIndexTemplate) WithOpaqueID(s string) func(*IndicesDeleteIndexTemplateRequest) { + return func(r *IndicesDeleteIndexTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete_template.go new file mode 100644 index 000000000..fd353af0f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.delete_template.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newIndicesDeleteTemplateFunc(t Transport) IndicesDeleteTemplate { + return func(name string, o ...func(*IndicesDeleteTemplateRequest)) (*Response, error) { + var r = IndicesDeleteTemplateRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesDeleteTemplate deletes an index template. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template-v1.html. +type IndicesDeleteTemplate func(name string, o ...func(*IndicesDeleteTemplateRequest)) (*Response, error) + +// IndicesDeleteTemplateRequest configures the Indices Delete Template API request. +type IndicesDeleteTemplateRequest struct { + Name string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesDeleteTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_template") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_template") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.delete_template") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.delete_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesDeleteTemplate) WithContext(v context.Context) func(*IndicesDeleteTemplateRequest) { + return func(r *IndicesDeleteTemplateRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesDeleteTemplate) WithMasterTimeout(v time.Duration) func(*IndicesDeleteTemplateRequest) { + return func(r *IndicesDeleteTemplateRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IndicesDeleteTemplate) WithTimeout(v time.Duration) func(*IndicesDeleteTemplateRequest) { + return func(r *IndicesDeleteTemplateRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesDeleteTemplate) WithPretty() func(*IndicesDeleteTemplateRequest) { + return func(r *IndicesDeleteTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesDeleteTemplate) WithHuman() func(*IndicesDeleteTemplateRequest) { + return func(r *IndicesDeleteTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesDeleteTemplate) WithErrorTrace() func(*IndicesDeleteTemplateRequest) { + return func(r *IndicesDeleteTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesDeleteTemplate) WithFilterPath(v ...string) func(*IndicesDeleteTemplateRequest) { + return func(r *IndicesDeleteTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesDeleteTemplate) WithHeader(h map[string]string) func(*IndicesDeleteTemplateRequest) { + return func(r *IndicesDeleteTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesDeleteTemplate) WithOpaqueID(s string) func(*IndicesDeleteTemplateRequest) { + return func(r *IndicesDeleteTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.disk_usage.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.disk_usage.go new file mode 100644 index 000000000..c1efafcf0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.disk_usage.go @@ -0,0 +1,287 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newIndicesDiskUsageFunc(t Transport) IndicesDiskUsage { + return func(index string, o ...func(*IndicesDiskUsageRequest)) (*Response, error) { + var r = IndicesDiskUsageRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesDiskUsage analyzes the disk usage of each field of an index or data stream +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usage.html. +type IndicesDiskUsage func(index string, o ...func(*IndicesDiskUsageRequest)) (*Response, error) + +// IndicesDiskUsageRequest configures the Indices Disk Usage API request. +type IndicesDiskUsageRequest struct { + Index string + + AllowNoIndices *bool + ExpandWildcards string + Flush *bool + IgnoreUnavailable *bool + RunExpensiveTasks *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesDiskUsageRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.disk_usage") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_disk_usage")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_disk_usage") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.Flush != nil { + params["flush"] = strconv.FormatBool(*r.Flush) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.RunExpensiveTasks != nil { + params["run_expensive_tasks"] = strconv.FormatBool(*r.RunExpensiveTasks) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.disk_usage") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.disk_usage") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesDiskUsage) WithContext(v context.Context) func(*IndicesDiskUsageRequest) { + return func(r *IndicesDiskUsageRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesDiskUsage) WithAllowNoIndices(v bool) func(*IndicesDiskUsageRequest) { + return func(r *IndicesDiskUsageRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesDiskUsage) WithExpandWildcards(v string) func(*IndicesDiskUsageRequest) { + return func(r *IndicesDiskUsageRequest) { + r.ExpandWildcards = v + } +} + +// WithFlush - whether flush or not before analyzing the index disk usage. defaults to true. +func (f IndicesDiskUsage) WithFlush(v bool) func(*IndicesDiskUsageRequest) { + return func(r *IndicesDiskUsageRequest) { + r.Flush = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesDiskUsage) WithIgnoreUnavailable(v bool) func(*IndicesDiskUsageRequest) { + return func(r *IndicesDiskUsageRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithRunExpensiveTasks - must be set to [true] in order for the task to be performed. defaults to false.. +func (f IndicesDiskUsage) WithRunExpensiveTasks(v bool) func(*IndicesDiskUsageRequest) { + return func(r *IndicesDiskUsageRequest) { + r.RunExpensiveTasks = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesDiskUsage) WithPretty() func(*IndicesDiskUsageRequest) { + return func(r *IndicesDiskUsageRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesDiskUsage) WithHuman() func(*IndicesDiskUsageRequest) { + return func(r *IndicesDiskUsageRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesDiskUsage) WithErrorTrace() func(*IndicesDiskUsageRequest) { + return func(r *IndicesDiskUsageRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesDiskUsage) WithFilterPath(v ...string) func(*IndicesDiskUsageRequest) { + return func(r *IndicesDiskUsageRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesDiskUsage) WithHeader(h map[string]string) func(*IndicesDiskUsageRequest) { + return func(r *IndicesDiskUsageRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesDiskUsage) WithOpaqueID(s string) func(*IndicesDiskUsageRequest) { + return func(r *IndicesDiskUsageRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.downsample.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.downsample.go new file mode 100644 index 000000000..76bff9ef8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.downsample.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newIndicesDownsampleFunc(t Transport) IndicesDownsample { + return func(index string, body io.Reader, target_index string, o ...func(*IndicesDownsampleRequest)) (*Response, error) { + var r = IndicesDownsampleRequest{Index: index, Body: body, TargetIndex: target_index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesDownsample downsample an index +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-rollup.html. +type IndicesDownsample func(index string, body io.Reader, target_index string, o ...func(*IndicesDownsampleRequest)) (*Response, error) + +// IndicesDownsampleRequest configures the Indices Downsample API request. +type IndicesDownsampleRequest struct { + Index string + + Body io.Reader + + TargetIndex string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesDownsampleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.downsample") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_downsample") + 1 + len(r.TargetIndex)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_downsample") + path.WriteString("/") + path.WriteString(r.TargetIndex) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "target_index", r.TargetIndex) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.downsample") + if reader := instrument.RecordRequestBody(ctx, "indices.downsample", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.downsample") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesDownsample) WithContext(v context.Context) func(*IndicesDownsampleRequest) { + return func(r *IndicesDownsampleRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesDownsample) WithPretty() func(*IndicesDownsampleRequest) { + return func(r *IndicesDownsampleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesDownsample) WithHuman() func(*IndicesDownsampleRequest) { + return func(r *IndicesDownsampleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesDownsample) WithErrorTrace() func(*IndicesDownsampleRequest) { + return func(r *IndicesDownsampleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesDownsample) WithFilterPath(v ...string) func(*IndicesDownsampleRequest) { + return func(r *IndicesDownsampleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesDownsample) WithHeader(h map[string]string) func(*IndicesDownsampleRequest) { + return func(r *IndicesDownsampleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesDownsample) WithOpaqueID(s string) func(*IndicesDownsampleRequest) { + return func(r *IndicesDownsampleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.exists.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.exists.go new file mode 100644 index 000000000..3b1488a4a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.exists.go @@ -0,0 +1,300 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" +) + +func newIndicesExistsFunc(t Transport) IndicesExists { + return func(index []string, o ...func(*IndicesExistsRequest)) (*Response, error) { + var r = IndicesExistsRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesExists returns information about whether a particular index exists. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html. +type IndicesExists func(index []string, o ...func(*IndicesExistsRequest)) (*Response, error) + +// IndicesExistsRequest configures the Indices Exists API request. +type IndicesExistsRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + FlatSettings *bool + IgnoreUnavailable *bool + IncludeDefaults *bool + Local *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesExistsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.exists") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "HEAD" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.FlatSettings != nil { + params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.exists") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.exists") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesExists) WithContext(v context.Context) func(*IndicesExistsRequest) { + return func(r *IndicesExistsRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - ignore if a wildcard expression resolves to no concrete indices (default: false). +func (f IndicesExists) WithAllowNoIndices(v bool) func(*IndicesExistsRequest) { + return func(r *IndicesExistsRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +func (f IndicesExists) WithExpandWildcards(v string) func(*IndicesExistsRequest) { + return func(r *IndicesExistsRequest) { + r.ExpandWildcards = v + } +} + +// WithFlatSettings - return settings in flat format (default: false). +func (f IndicesExists) WithFlatSettings(v bool) func(*IndicesExistsRequest) { + return func(r *IndicesExistsRequest) { + r.FlatSettings = &v + } +} + +// WithIgnoreUnavailable - ignore unavailable indexes (default: false). +func (f IndicesExists) WithIgnoreUnavailable(v bool) func(*IndicesExistsRequest) { + return func(r *IndicesExistsRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithIncludeDefaults - whether to return all default setting for each of the indices.. +func (f IndicesExists) WithIncludeDefaults(v bool) func(*IndicesExistsRequest) { + return func(r *IndicesExistsRequest) { + r.IncludeDefaults = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f IndicesExists) WithLocal(v bool) func(*IndicesExistsRequest) { + return func(r *IndicesExistsRequest) { + r.Local = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesExists) WithPretty() func(*IndicesExistsRequest) { + return func(r *IndicesExistsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesExists) WithHuman() func(*IndicesExistsRequest) { + return func(r *IndicesExistsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesExists) WithErrorTrace() func(*IndicesExistsRequest) { + return func(r *IndicesExistsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesExists) WithFilterPath(v ...string) func(*IndicesExistsRequest) { + return func(r *IndicesExistsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesExists) WithHeader(h map[string]string) func(*IndicesExistsRequest) { + return func(r *IndicesExistsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesExists) WithOpaqueID(s string) func(*IndicesExistsRequest) { + return func(r *IndicesExistsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.exists_alias.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.exists_alias.go new file mode 100644 index 000000000..600e195cd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.exists_alias.go @@ -0,0 +1,295 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesExistsAliasFunc(t Transport) IndicesExistsAlias { + return func(name []string, o ...func(*IndicesExistsAliasRequest)) (*Response, error) { + var r = IndicesExistsAliasRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesExistsAlias returns information about whether a particular alias exists. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html. +type IndicesExistsAlias func(name []string, o ...func(*IndicesExistsAliasRequest)) (*Response, error) + +// IndicesExistsAliasRequest configures the Indices Exists Alias API request. +type IndicesExistsAliasRequest struct { + Index []string + + Name []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesExistsAliasRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.exists_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "HEAD" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_alias") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_alias") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.exists_alias") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.exists_alias") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesExistsAlias) WithContext(v context.Context) func(*IndicesExistsAliasRequest) { + return func(r *IndicesExistsAliasRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names to filter aliases. +func (f IndicesExistsAlias) WithIndex(v ...string) func(*IndicesExistsAliasRequest) { + return func(r *IndicesExistsAliasRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesExistsAlias) WithAllowNoIndices(v bool) func(*IndicesExistsAliasRequest) { + return func(r *IndicesExistsAliasRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesExistsAlias) WithExpandWildcards(v string) func(*IndicesExistsAliasRequest) { + return func(r *IndicesExistsAliasRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesExistsAlias) WithIgnoreUnavailable(v bool) func(*IndicesExistsAliasRequest) { + return func(r *IndicesExistsAliasRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f IndicesExistsAlias) WithMasterTimeout(v time.Duration) func(*IndicesExistsAliasRequest) { + return func(r *IndicesExistsAliasRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesExistsAlias) WithPretty() func(*IndicesExistsAliasRequest) { + return func(r *IndicesExistsAliasRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesExistsAlias) WithHuman() func(*IndicesExistsAliasRequest) { + return func(r *IndicesExistsAliasRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesExistsAlias) WithErrorTrace() func(*IndicesExistsAliasRequest) { + return func(r *IndicesExistsAliasRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesExistsAlias) WithFilterPath(v ...string) func(*IndicesExistsAliasRequest) { + return func(r *IndicesExistsAliasRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesExistsAlias) WithHeader(h map[string]string) func(*IndicesExistsAliasRequest) { + return func(r *IndicesExistsAliasRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesExistsAlias) WithOpaqueID(s string) func(*IndicesExistsAliasRequest) { + return func(r *IndicesExistsAliasRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.exists_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.exists_index_template.go new file mode 100644 index 000000000..a37812da7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.exists_index_template.go @@ -0,0 +1,262 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesExistsIndexTemplateFunc(t Transport) IndicesExistsIndexTemplate { + return func(name string, o ...func(*IndicesExistsIndexTemplateRequest)) (*Response, error) { + var r = IndicesExistsIndexTemplateRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesExistsIndexTemplate returns information about whether a particular index template exists. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/index-templates.html. +type IndicesExistsIndexTemplate func(name string, o ...func(*IndicesExistsIndexTemplateRequest)) (*Response, error) + +// IndicesExistsIndexTemplateRequest configures the Indices Exists Index Template API request. +type IndicesExistsIndexTemplateRequest struct { + Name string + + FlatSettings *bool + Local *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesExistsIndexTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.exists_index_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "HEAD" + + path.Grow(7 + 1 + len("_index_template") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_index_template") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.FlatSettings != nil { + params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.exists_index_template") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.exists_index_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesExistsIndexTemplate) WithContext(v context.Context) func(*IndicesExistsIndexTemplateRequest) { + return func(r *IndicesExistsIndexTemplateRequest) { + r.ctx = v + } +} + +// WithFlatSettings - return settings in flat format (default: false). +func (f IndicesExistsIndexTemplate) WithFlatSettings(v bool) func(*IndicesExistsIndexTemplateRequest) { + return func(r *IndicesExistsIndexTemplateRequest) { + r.FlatSettings = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f IndicesExistsIndexTemplate) WithLocal(v bool) func(*IndicesExistsIndexTemplateRequest) { + return func(r *IndicesExistsIndexTemplateRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f IndicesExistsIndexTemplate) WithMasterTimeout(v time.Duration) func(*IndicesExistsIndexTemplateRequest) { + return func(r *IndicesExistsIndexTemplateRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesExistsIndexTemplate) WithPretty() func(*IndicesExistsIndexTemplateRequest) { + return func(r *IndicesExistsIndexTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesExistsIndexTemplate) WithHuman() func(*IndicesExistsIndexTemplateRequest) { + return func(r *IndicesExistsIndexTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesExistsIndexTemplate) WithErrorTrace() func(*IndicesExistsIndexTemplateRequest) { + return func(r *IndicesExistsIndexTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesExistsIndexTemplate) WithFilterPath(v ...string) func(*IndicesExistsIndexTemplateRequest) { + return func(r *IndicesExistsIndexTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesExistsIndexTemplate) WithHeader(h map[string]string) func(*IndicesExistsIndexTemplateRequest) { + return func(r *IndicesExistsIndexTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesExistsIndexTemplate) WithOpaqueID(s string) func(*IndicesExistsIndexTemplateRequest) { + return func(r *IndicesExistsIndexTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.exists_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.exists_template.go new file mode 100644 index 000000000..9c8cc9ed5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.exists_template.go @@ -0,0 +1,267 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesExistsTemplateFunc(t Transport) IndicesExistsTemplate { + return func(name []string, o ...func(*IndicesExistsTemplateRequest)) (*Response, error) { + var r = IndicesExistsTemplateRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesExistsTemplate returns information about whether a particular index template exists. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-template-exists-v1.html. +type IndicesExistsTemplate func(name []string, o ...func(*IndicesExistsTemplateRequest)) (*Response, error) + +// IndicesExistsTemplateRequest configures the Indices Exists Template API request. +type IndicesExistsTemplateRequest struct { + Name []string + + FlatSettings *bool + Local *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesExistsTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.exists_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "HEAD" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_template") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_template") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + + params = make(map[string]string) + + if r.FlatSettings != nil { + params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.exists_template") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.exists_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesExistsTemplate) WithContext(v context.Context) func(*IndicesExistsTemplateRequest) { + return func(r *IndicesExistsTemplateRequest) { + r.ctx = v + } +} + +// WithFlatSettings - return settings in flat format (default: false). +func (f IndicesExistsTemplate) WithFlatSettings(v bool) func(*IndicesExistsTemplateRequest) { + return func(r *IndicesExistsTemplateRequest) { + r.FlatSettings = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f IndicesExistsTemplate) WithLocal(v bool) func(*IndicesExistsTemplateRequest) { + return func(r *IndicesExistsTemplateRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f IndicesExistsTemplate) WithMasterTimeout(v time.Duration) func(*IndicesExistsTemplateRequest) { + return func(r *IndicesExistsTemplateRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesExistsTemplate) WithPretty() func(*IndicesExistsTemplateRequest) { + return func(r *IndicesExistsTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesExistsTemplate) WithHuman() func(*IndicesExistsTemplateRequest) { + return func(r *IndicesExistsTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesExistsTemplate) WithErrorTrace() func(*IndicesExistsTemplateRequest) { + return func(r *IndicesExistsTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesExistsTemplate) WithFilterPath(v ...string) func(*IndicesExistsTemplateRequest) { + return func(r *IndicesExistsTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesExistsTemplate) WithHeader(h map[string]string) func(*IndicesExistsTemplateRequest) { + return func(r *IndicesExistsTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesExistsTemplate) WithOpaqueID(s string) func(*IndicesExistsTemplateRequest) { + return func(r *IndicesExistsTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.explain_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.explain_data_lifecycle.go new file mode 100644 index 000000000..e77b63393 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.explain_data_lifecycle.go @@ -0,0 +1,252 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesExplainDataLifecycleFunc(t Transport) IndicesExplainDataLifecycle { + return func(index string, o ...func(*IndicesExplainDataLifecycleRequest)) (*Response, error) { + var r = IndicesExplainDataLifecycleRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesExplainDataLifecycle retrieves information about the index's current data stream lifecycle, such as any potential encountered error, time since creation etc. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-explain-lifecycle.html. +type IndicesExplainDataLifecycle func(index string, o ...func(*IndicesExplainDataLifecycleRequest)) (*Response, error) + +// IndicesExplainDataLifecycleRequest configures the Indices Explain Data Lifecycle API request. +type IndicesExplainDataLifecycleRequest struct { + Index string + + IncludeDefaults *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesExplainDataLifecycleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.explain_data_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_lifecycle") + 1 + len("explain")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_lifecycle") + path.WriteString("/") + path.WriteString("explain") + + params = make(map[string]string) + + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.explain_data_lifecycle") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.explain_data_lifecycle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesExplainDataLifecycle) WithContext(v context.Context) func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + r.ctx = v + } +} + +// WithIncludeDefaults - indicates if the api should return the default values the system uses for the index's lifecycle. +func (f IndicesExplainDataLifecycle) WithIncludeDefaults(v bool) func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + r.IncludeDefaults = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesExplainDataLifecycle) WithMasterTimeout(v time.Duration) func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesExplainDataLifecycle) WithPretty() func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesExplainDataLifecycle) WithHuman() func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesExplainDataLifecycle) WithErrorTrace() func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesExplainDataLifecycle) WithFilterPath(v ...string) func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesExplainDataLifecycle) WithHeader(h map[string]string) func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesExplainDataLifecycle) WithOpaqueID(s string) func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.field_usage_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.field_usage_stats.go new file mode 100644 index 000000000..c8944d3c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.field_usage_stats.go @@ -0,0 +1,275 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newIndicesFieldUsageStatsFunc(t Transport) IndicesFieldUsageStats { + return func(index string, o ...func(*IndicesFieldUsageStatsRequest)) (*Response, error) { + var r = IndicesFieldUsageStatsRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesFieldUsageStats returns the field usage stats for each field of an index +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/field-usage-stats.html. +type IndicesFieldUsageStats func(index string, o ...func(*IndicesFieldUsageStatsRequest)) (*Response, error) + +// IndicesFieldUsageStatsRequest configures the Indices Field Usage Stats API request. +type IndicesFieldUsageStatsRequest struct { + Index string + + AllowNoIndices *bool + ExpandWildcards string + Fields []string + IgnoreUnavailable *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesFieldUsageStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.field_usage_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_field_usage_stats")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_field_usage_stats") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if len(r.Fields) > 0 { + params["fields"] = strings.Join(r.Fields, ",") + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.field_usage_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.field_usage_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesFieldUsageStats) WithContext(v context.Context) func(*IndicesFieldUsageStatsRequest) { + return func(r *IndicesFieldUsageStatsRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesFieldUsageStats) WithAllowNoIndices(v bool) func(*IndicesFieldUsageStatsRequest) { + return func(r *IndicesFieldUsageStatsRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesFieldUsageStats) WithExpandWildcards(v string) func(*IndicesFieldUsageStatsRequest) { + return func(r *IndicesFieldUsageStatsRequest) { + r.ExpandWildcards = v + } +} + +// WithFields - a list of fields to include in the stats if only a subset of fields should be returned (supports wildcards). +func (f IndicesFieldUsageStats) WithFields(v ...string) func(*IndicesFieldUsageStatsRequest) { + return func(r *IndicesFieldUsageStatsRequest) { + r.Fields = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesFieldUsageStats) WithIgnoreUnavailable(v bool) func(*IndicesFieldUsageStatsRequest) { + return func(r *IndicesFieldUsageStatsRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesFieldUsageStats) WithPretty() func(*IndicesFieldUsageStatsRequest) { + return func(r *IndicesFieldUsageStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesFieldUsageStats) WithHuman() func(*IndicesFieldUsageStatsRequest) { + return func(r *IndicesFieldUsageStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesFieldUsageStats) WithErrorTrace() func(*IndicesFieldUsageStatsRequest) { + return func(r *IndicesFieldUsageStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesFieldUsageStats) WithFilterPath(v ...string) func(*IndicesFieldUsageStatsRequest) { + return func(r *IndicesFieldUsageStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesFieldUsageStats) WithHeader(h map[string]string) func(*IndicesFieldUsageStatsRequest) { + return func(r *IndicesFieldUsageStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesFieldUsageStats) WithOpaqueID(s string) func(*IndicesFieldUsageStatsRequest) { + return func(r *IndicesFieldUsageStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.flush.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.flush.go new file mode 100644 index 000000000..546476bb8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.flush.go @@ -0,0 +1,294 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newIndicesFlushFunc(t Transport) IndicesFlush { + return func(o ...func(*IndicesFlushRequest)) (*Response, error) { + var r = IndicesFlushRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesFlush performs the flush operation on one or more indices. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html. +type IndicesFlush func(o ...func(*IndicesFlushRequest)) (*Response, error) + +// IndicesFlushRequest configures the Indices Flush API request. +type IndicesFlushRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + Force *bool + IgnoreUnavailable *bool + WaitIfOngoing *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesFlushRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.flush") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_flush")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_flush") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.Force != nil { + params["force"] = strconv.FormatBool(*r.Force) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.WaitIfOngoing != nil { + params["wait_if_ongoing"] = strconv.FormatBool(*r.WaitIfOngoing) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.flush") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.flush") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesFlush) WithContext(v context.Context) func(*IndicesFlushRequest) { + return func(r *IndicesFlushRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names; use _all for all indices. +func (f IndicesFlush) WithIndex(v ...string) func(*IndicesFlushRequest) { + return func(r *IndicesFlushRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesFlush) WithAllowNoIndices(v bool) func(*IndicesFlushRequest) { + return func(r *IndicesFlushRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesFlush) WithExpandWildcards(v string) func(*IndicesFlushRequest) { + return func(r *IndicesFlushRequest) { + r.ExpandWildcards = v + } +} + +// WithForce - whether a flush should be forced even if it is not necessarily needed ie. if no changes will be committed to the index. this is useful if transaction log ids should be incremented even if no uncommitted changes are present. (this setting can be considered as internal). +func (f IndicesFlush) WithForce(v bool) func(*IndicesFlushRequest) { + return func(r *IndicesFlushRequest) { + r.Force = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesFlush) WithIgnoreUnavailable(v bool) func(*IndicesFlushRequest) { + return func(r *IndicesFlushRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithWaitIfOngoing - if set to true the flush operation will block until the flush can be executed if another flush operation is already executing. the default is true. if set to false the flush will be skipped iff if another flush operation is already running.. +func (f IndicesFlush) WithWaitIfOngoing(v bool) func(*IndicesFlushRequest) { + return func(r *IndicesFlushRequest) { + r.WaitIfOngoing = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesFlush) WithPretty() func(*IndicesFlushRequest) { + return func(r *IndicesFlushRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesFlush) WithHuman() func(*IndicesFlushRequest) { + return func(r *IndicesFlushRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesFlush) WithErrorTrace() func(*IndicesFlushRequest) { + return func(r *IndicesFlushRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesFlush) WithFilterPath(v ...string) func(*IndicesFlushRequest) { + return func(r *IndicesFlushRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesFlush) WithHeader(h map[string]string) func(*IndicesFlushRequest) { + return func(r *IndicesFlushRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesFlush) WithOpaqueID(s string) func(*IndicesFlushRequest) { + return func(r *IndicesFlushRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.forcemerge.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.forcemerge.go new file mode 100644 index 000000000..36e4a3b81 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.forcemerge.go @@ -0,0 +1,318 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newIndicesForcemergeFunc(t Transport) IndicesForcemerge { + return func(o ...func(*IndicesForcemergeRequest)) (*Response, error) { + var r = IndicesForcemergeRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesForcemerge performs the force merge operation on one or more indices. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html. +type IndicesForcemerge func(o ...func(*IndicesForcemergeRequest)) (*Response, error) + +// IndicesForcemergeRequest configures the Indices Forcemerge API request. +type IndicesForcemergeRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + Flush *bool + IgnoreUnavailable *bool + MaxNumSegments *int + OnlyExpungeDeletes *bool + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesForcemergeRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.forcemerge") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_forcemerge")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_forcemerge") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.Flush != nil { + params["flush"] = strconv.FormatBool(*r.Flush) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.MaxNumSegments != nil { + params["max_num_segments"] = strconv.FormatInt(int64(*r.MaxNumSegments), 10) + } + + if r.OnlyExpungeDeletes != nil { + params["only_expunge_deletes"] = strconv.FormatBool(*r.OnlyExpungeDeletes) + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.forcemerge") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.forcemerge") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesForcemerge) WithContext(v context.Context) func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names; use _all to perform the operation on all indices. +func (f IndicesForcemerge) WithIndex(v ...string) func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesForcemerge) WithAllowNoIndices(v bool) func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesForcemerge) WithExpandWildcards(v string) func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + r.ExpandWildcards = v + } +} + +// WithFlush - specify whether the index should be flushed after performing the operation (default: true). +func (f IndicesForcemerge) WithFlush(v bool) func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + r.Flush = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesForcemerge) WithIgnoreUnavailable(v bool) func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithMaxNumSegments - the number of segments the index should be merged into (default: dynamic). +func (f IndicesForcemerge) WithMaxNumSegments(v int) func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + r.MaxNumSegments = &v + } +} + +// WithOnlyExpungeDeletes - specify whether the operation should only expunge deleted documents. +func (f IndicesForcemerge) WithOnlyExpungeDeletes(v bool) func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + r.OnlyExpungeDeletes = &v + } +} + +// WithWaitForCompletion - should the request wait until the force merge is completed.. +func (f IndicesForcemerge) WithWaitForCompletion(v bool) func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesForcemerge) WithPretty() func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesForcemerge) WithHuman() func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesForcemerge) WithErrorTrace() func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesForcemerge) WithFilterPath(v ...string) func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesForcemerge) WithHeader(h map[string]string) func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesForcemerge) WithOpaqueID(s string) func(*IndicesForcemergeRequest) { + return func(r *IndicesForcemergeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get.go new file mode 100644 index 000000000..1a6266633 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get.go @@ -0,0 +1,325 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesGetFunc(t Transport) IndicesGet { + return func(index []string, o ...func(*IndicesGetRequest)) (*Response, error) { + var r = IndicesGetRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGet returns information about one or more indices. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html. +type IndicesGet func(index []string, o ...func(*IndicesGetRequest)) (*Response, error) + +// IndicesGetRequest configures the Indices Get API request. +type IndicesGetRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + Features string + FlatSettings *bool + IgnoreUnavailable *bool + IncludeDefaults *bool + Local *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.Features != "" { + params["features"] = r.Features + } + + if r.FlatSettings != nil { + params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGet) WithContext(v context.Context) func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - ignore if a wildcard expression resolves to no concrete indices (default: false). +func (f IndicesGet) WithAllowNoIndices(v bool) func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +func (f IndicesGet) WithExpandWildcards(v string) func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + r.ExpandWildcards = v + } +} + +// WithFeatures - return only information on specified index features. +func (f IndicesGet) WithFeatures(v string) func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + r.Features = v + } +} + +// WithFlatSettings - return settings in flat format (default: false). +func (f IndicesGet) WithFlatSettings(v bool) func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + r.FlatSettings = &v + } +} + +// WithIgnoreUnavailable - ignore unavailable indexes (default: false). +func (f IndicesGet) WithIgnoreUnavailable(v bool) func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithIncludeDefaults - whether to return all default setting for each of the indices.. +func (f IndicesGet) WithIncludeDefaults(v bool) func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + r.IncludeDefaults = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f IndicesGet) WithLocal(v bool) func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesGet) WithMasterTimeout(v time.Duration) func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGet) WithPretty() func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGet) WithHuman() func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGet) WithErrorTrace() func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGet) WithFilterPath(v ...string) func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGet) WithHeader(h map[string]string) func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGet) WithOpaqueID(s string) func(*IndicesGetRequest) { + return func(r *IndicesGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_alias.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_alias.go new file mode 100644 index 000000000..656d2ce0c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_alias.go @@ -0,0 +1,299 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesGetAliasFunc(t Transport) IndicesGetAlias { + return func(o ...func(*IndicesGetAliasRequest)) (*Response, error) { + var r = IndicesGetAliasRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetAlias returns an alias. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html. +type IndicesGetAlias func(o ...func(*IndicesGetAliasRequest)) (*Response, error) + +// IndicesGetAliasRequest configures the Indices Get Alias API request. +type IndicesGetAliasRequest struct { + Index []string + + Name []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetAliasRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_alias") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_alias") + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + } + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_alias") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_alias") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetAlias) WithContext(v context.Context) func(*IndicesGetAliasRequest) { + return func(r *IndicesGetAliasRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names to filter aliases. +func (f IndicesGetAlias) WithIndex(v ...string) func(*IndicesGetAliasRequest) { + return func(r *IndicesGetAliasRequest) { + r.Index = v + } +} + +// WithName - a list of alias names to return. +func (f IndicesGetAlias) WithName(v ...string) func(*IndicesGetAliasRequest) { + return func(r *IndicesGetAliasRequest) { + r.Name = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesGetAlias) WithAllowNoIndices(v bool) func(*IndicesGetAliasRequest) { + return func(r *IndicesGetAliasRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesGetAlias) WithExpandWildcards(v string) func(*IndicesGetAliasRequest) { + return func(r *IndicesGetAliasRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesGetAlias) WithIgnoreUnavailable(v bool) func(*IndicesGetAliasRequest) { + return func(r *IndicesGetAliasRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f IndicesGetAlias) WithMasterTimeout(v time.Duration) func(*IndicesGetAliasRequest) { + return func(r *IndicesGetAliasRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetAlias) WithPretty() func(*IndicesGetAliasRequest) { + return func(r *IndicesGetAliasRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetAlias) WithHuman() func(*IndicesGetAliasRequest) { + return func(r *IndicesGetAliasRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetAlias) WithErrorTrace() func(*IndicesGetAliasRequest) { + return func(r *IndicesGetAliasRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetAlias) WithFilterPath(v ...string) func(*IndicesGetAliasRequest) { + return func(r *IndicesGetAliasRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetAlias) WithHeader(h map[string]string) func(*IndicesGetAliasRequest) { + return func(r *IndicesGetAliasRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetAlias) WithOpaqueID(s string) func(*IndicesGetAliasRequest) { + return func(r *IndicesGetAliasRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_data_lifecycle.go new file mode 100644 index 000000000..b5bf410f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_data_lifecycle.go @@ -0,0 +1,269 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesGetDataLifecycleFunc(t Transport) IndicesGetDataLifecycle { + return func(name []string, o ...func(*IndicesGetDataLifecycleRequest)) (*Response, error) { + var r = IndicesGetDataLifecycleRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetDataLifecycle returns the data stream lifecycle of the selected data streams. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle.html. +type IndicesGetDataLifecycle func(name []string, o ...func(*IndicesGetDataLifecycleRequest)) (*Response, error) + +// IndicesGetDataLifecycleRequest configures the Indices Get Data Lifecycle API request. +type IndicesGetDataLifecycleRequest struct { + Name []string + + ExpandWildcards string + IncludeDefaults *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetDataLifecycleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(strings.Join(r.Name, ",")) + 1 + len("_lifecycle")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + path.WriteString("/") + path.WriteString("_lifecycle") + + params = make(map[string]string) + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_data_lifecycle") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_data_lifecycle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetDataLifecycle) WithContext(v context.Context) func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.ctx = v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +func (f IndicesGetDataLifecycle) WithExpandWildcards(v string) func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.ExpandWildcards = v + } +} + +// WithIncludeDefaults - return all relevant default configurations for the data stream (default: false). +func (f IndicesGetDataLifecycle) WithIncludeDefaults(v bool) func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.IncludeDefaults = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesGetDataLifecycle) WithMasterTimeout(v time.Duration) func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetDataLifecycle) WithPretty() func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetDataLifecycle) WithHuman() func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetDataLifecycle) WithErrorTrace() func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetDataLifecycle) WithFilterPath(v ...string) func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetDataLifecycle) WithHeader(h map[string]string) func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetDataLifecycle) WithOpaqueID(s string) func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_data_lifecycle_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_data_lifecycle_stats.go new file mode 100644 index 000000000..76a9ed2ed --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_data_lifecycle_stats.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIndicesGetDataLifecycleStatsFunc(t Transport) IndicesGetDataLifecycleStats { + return func(o ...func(*IndicesGetDataLifecycleStatsRequest)) (*Response, error) { + var r = IndicesGetDataLifecycleStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetDataLifecycleStats get data stream lifecycle statistics. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle-stats.html. +type IndicesGetDataLifecycleStats func(o ...func(*IndicesGetDataLifecycleStatsRequest)) (*Response, error) + +// IndicesGetDataLifecycleStatsRequest configures the Indices Get Data Lifecycle Stats API request. +type IndicesGetDataLifecycleStatsRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetDataLifecycleStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_lifecycle_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_lifecycle/stats")) + path.WriteString("http://") + path.WriteString("/_lifecycle/stats") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_data_lifecycle_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_data_lifecycle_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetDataLifecycleStats) WithContext(v context.Context) func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetDataLifecycleStats) WithPretty() func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetDataLifecycleStats) WithHuman() func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetDataLifecycleStats) WithErrorTrace() func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetDataLifecycleStats) WithFilterPath(v ...string) func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetDataLifecycleStats) WithHeader(h map[string]string) func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetDataLifecycleStats) WithOpaqueID(s string) func(*IndicesGetDataLifecycleStatsRequest) { + return func(r *IndicesGetDataLifecycleStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_field_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_field_mapping.go new file mode 100644 index 000000000..72ae95edc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_field_mapping.go @@ -0,0 +1,296 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" +) + +func newIndicesGetFieldMappingFunc(t Transport) IndicesGetFieldMapping { + return func(fields []string, o ...func(*IndicesGetFieldMappingRequest)) (*Response, error) { + var r = IndicesGetFieldMappingRequest{Fields: fields} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetFieldMapping returns mapping for one or more fields. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html. +type IndicesGetFieldMapping func(fields []string, o ...func(*IndicesGetFieldMappingRequest)) (*Response, error) + +// IndicesGetFieldMappingRequest configures the Indices Get Field Mapping API request. +type IndicesGetFieldMappingRequest struct { + Index []string + + Fields []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + IncludeDefaults *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetFieldMappingRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_field_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + if len(r.Fields) == 0 { + return nil, errors.New("fields is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_mapping") + 1 + len("field") + 1 + len(strings.Join(r.Fields, ","))) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_mapping") + path.WriteString("/") + path.WriteString("field") + path.WriteString("/") + path.WriteString(strings.Join(r.Fields, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "fields", strings.Join(r.Fields, ",")) + } + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_field_mapping") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_field_mapping") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetFieldMapping) WithContext(v context.Context) func(*IndicesGetFieldMappingRequest) { + return func(r *IndicesGetFieldMappingRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names. +func (f IndicesGetFieldMapping) WithIndex(v ...string) func(*IndicesGetFieldMappingRequest) { + return func(r *IndicesGetFieldMappingRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesGetFieldMapping) WithAllowNoIndices(v bool) func(*IndicesGetFieldMappingRequest) { + return func(r *IndicesGetFieldMappingRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesGetFieldMapping) WithExpandWildcards(v string) func(*IndicesGetFieldMappingRequest) { + return func(r *IndicesGetFieldMappingRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesGetFieldMapping) WithIgnoreUnavailable(v bool) func(*IndicesGetFieldMappingRequest) { + return func(r *IndicesGetFieldMappingRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithIncludeDefaults - whether the default mapping values should be returned as well. +func (f IndicesGetFieldMapping) WithIncludeDefaults(v bool) func(*IndicesGetFieldMappingRequest) { + return func(r *IndicesGetFieldMappingRequest) { + r.IncludeDefaults = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetFieldMapping) WithPretty() func(*IndicesGetFieldMappingRequest) { + return func(r *IndicesGetFieldMappingRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetFieldMapping) WithHuman() func(*IndicesGetFieldMappingRequest) { + return func(r *IndicesGetFieldMappingRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetFieldMapping) WithErrorTrace() func(*IndicesGetFieldMappingRequest) { + return func(r *IndicesGetFieldMappingRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetFieldMapping) WithFilterPath(v ...string) func(*IndicesGetFieldMappingRequest) { + return func(r *IndicesGetFieldMappingRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetFieldMapping) WithHeader(h map[string]string) func(*IndicesGetFieldMappingRequest) { + return func(r *IndicesGetFieldMappingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetFieldMapping) WithOpaqueID(s string) func(*IndicesGetFieldMappingRequest) { + return func(r *IndicesGetFieldMappingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_index_template.go new file mode 100644 index 000000000..58c8773b8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_index_template.go @@ -0,0 +1,283 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesGetIndexTemplateFunc(t Transport) IndicesGetIndexTemplate { + return func(o ...func(*IndicesGetIndexTemplateRequest)) (*Response, error) { + var r = IndicesGetIndexTemplateRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetIndexTemplate returns an index template. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template.html. +type IndicesGetIndexTemplate func(o ...func(*IndicesGetIndexTemplateRequest)) (*Response, error) + +// IndicesGetIndexTemplateRequest configures the Indices Get Index Template API request. +type IndicesGetIndexTemplateRequest struct { + Name string + + FlatSettings *bool + IncludeDefaults *bool + Local *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetIndexTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_index_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_index_template") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_index_template") + if r.Name != "" { + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + } + + params = make(map[string]string) + + if r.FlatSettings != nil { + params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) + } + + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_index_template") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_index_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetIndexTemplate) WithContext(v context.Context) func(*IndicesGetIndexTemplateRequest) { + return func(r *IndicesGetIndexTemplateRequest) { + r.ctx = v + } +} + +// WithName - a pattern that returned template names must match. +func (f IndicesGetIndexTemplate) WithName(v string) func(*IndicesGetIndexTemplateRequest) { + return func(r *IndicesGetIndexTemplateRequest) { + r.Name = v + } +} + +// WithFlatSettings - return settings in flat format (default: false). +func (f IndicesGetIndexTemplate) WithFlatSettings(v bool) func(*IndicesGetIndexTemplateRequest) { + return func(r *IndicesGetIndexTemplateRequest) { + r.FlatSettings = &v + } +} + +// WithIncludeDefaults - return all relevant default configurations for the index template (default: false). +func (f IndicesGetIndexTemplate) WithIncludeDefaults(v bool) func(*IndicesGetIndexTemplateRequest) { + return func(r *IndicesGetIndexTemplateRequest) { + r.IncludeDefaults = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f IndicesGetIndexTemplate) WithLocal(v bool) func(*IndicesGetIndexTemplateRequest) { + return func(r *IndicesGetIndexTemplateRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f IndicesGetIndexTemplate) WithMasterTimeout(v time.Duration) func(*IndicesGetIndexTemplateRequest) { + return func(r *IndicesGetIndexTemplateRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetIndexTemplate) WithPretty() func(*IndicesGetIndexTemplateRequest) { + return func(r *IndicesGetIndexTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetIndexTemplate) WithHuman() func(*IndicesGetIndexTemplateRequest) { + return func(r *IndicesGetIndexTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetIndexTemplate) WithErrorTrace() func(*IndicesGetIndexTemplateRequest) { + return func(r *IndicesGetIndexTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetIndexTemplate) WithFilterPath(v ...string) func(*IndicesGetIndexTemplateRequest) { + return func(r *IndicesGetIndexTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetIndexTemplate) WithHeader(h map[string]string) func(*IndicesGetIndexTemplateRequest) { + return func(r *IndicesGetIndexTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetIndexTemplate) WithOpaqueID(s string) func(*IndicesGetIndexTemplateRequest) { + return func(r *IndicesGetIndexTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_mapping.go new file mode 100644 index 000000000..af256a108 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_mapping.go @@ -0,0 +1,295 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesGetMappingFunc(t Transport) IndicesGetMapping { + return func(o ...func(*IndicesGetMappingRequest)) (*Response, error) { + var r = IndicesGetMappingRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetMapping returns mappings for one or more indices. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html. +type IndicesGetMapping func(o ...func(*IndicesGetMappingRequest)) (*Response, error) + +// IndicesGetMappingRequest configures the Indices Get Mapping API request. +type IndicesGetMappingRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + Local *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetMappingRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_mapping")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_mapping") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_mapping") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_mapping") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetMapping) WithContext(v context.Context) func(*IndicesGetMappingRequest) { + return func(r *IndicesGetMappingRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names. +func (f IndicesGetMapping) WithIndex(v ...string) func(*IndicesGetMappingRequest) { + return func(r *IndicesGetMappingRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesGetMapping) WithAllowNoIndices(v bool) func(*IndicesGetMappingRequest) { + return func(r *IndicesGetMappingRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesGetMapping) WithExpandWildcards(v string) func(*IndicesGetMappingRequest) { + return func(r *IndicesGetMappingRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesGetMapping) WithIgnoreUnavailable(v bool) func(*IndicesGetMappingRequest) { + return func(r *IndicesGetMappingRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f IndicesGetMapping) WithLocal(v bool) func(*IndicesGetMappingRequest) { + return func(r *IndicesGetMappingRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f IndicesGetMapping) WithMasterTimeout(v time.Duration) func(*IndicesGetMappingRequest) { + return func(r *IndicesGetMappingRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetMapping) WithPretty() func(*IndicesGetMappingRequest) { + return func(r *IndicesGetMappingRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetMapping) WithHuman() func(*IndicesGetMappingRequest) { + return func(r *IndicesGetMappingRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetMapping) WithErrorTrace() func(*IndicesGetMappingRequest) { + return func(r *IndicesGetMappingRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetMapping) WithFilterPath(v ...string) func(*IndicesGetMappingRequest) { + return func(r *IndicesGetMappingRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetMapping) WithHeader(h map[string]string) func(*IndicesGetMappingRequest) { + return func(r *IndicesGetMappingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetMapping) WithOpaqueID(s string) func(*IndicesGetMappingRequest) { + return func(r *IndicesGetMappingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_migrate_reindex_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_migrate_reindex_status.go new file mode 100644 index 000000000..459a37c3f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_migrate_reindex_status.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIndicesGetMigrateReindexStatusFunc(t Transport) IndicesGetMigrateReindexStatus { + return func(index string, o ...func(*IndicesGetMigrateReindexStatusRequest)) (*Response, error) { + var r = IndicesGetMigrateReindexStatusRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetMigrateReindexStatus this API returns the status of a migration reindex attempt for a data stream or index +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex-status-api.html. +type IndicesGetMigrateReindexStatus func(index string, o ...func(*IndicesGetMigrateReindexStatusRequest)) (*Response, error) + +// IndicesGetMigrateReindexStatusRequest configures the Indices Get Migrate Reindex Status API request. +type IndicesGetMigrateReindexStatusRequest struct { + Index string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetMigrateReindexStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_migrate_reindex_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_migration") + 1 + len("reindex") + 1 + len(r.Index) + 1 + len("_status")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_status") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_migrate_reindex_status") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_migrate_reindex_status") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetMigrateReindexStatus) WithContext(v context.Context) func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetMigrateReindexStatus) WithPretty() func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetMigrateReindexStatus) WithHuman() func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetMigrateReindexStatus) WithErrorTrace() func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetMigrateReindexStatus) WithFilterPath(v ...string) func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetMigrateReindexStatus) WithHeader(h map[string]string) func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetMigrateReindexStatus) WithOpaqueID(s string) func(*IndicesGetMigrateReindexStatusRequest) { + return func(r *IndicesGetMigrateReindexStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_settings.go new file mode 100644 index 000000000..3b69abff7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_settings.go @@ -0,0 +1,335 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesGetSettingsFunc(t Transport) IndicesGetSettings { + return func(o ...func(*IndicesGetSettingsRequest)) (*Response, error) { + var r = IndicesGetSettingsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetSettings returns settings for one or more indices. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html. +type IndicesGetSettings func(o ...func(*IndicesGetSettingsRequest)) (*Response, error) + +// IndicesGetSettingsRequest configures the Indices Get Settings API request. +type IndicesGetSettingsRequest struct { + Index []string + + Name []string + + AllowNoIndices *bool + ExpandWildcards string + FlatSettings *bool + IgnoreUnavailable *bool + IncludeDefaults *bool + Local *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetSettingsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_settings") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_settings") + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + } + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.FlatSettings != nil { + params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_settings") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_settings") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetSettings) WithContext(v context.Context) func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names; use _all to perform the operation on all indices. +func (f IndicesGetSettings) WithIndex(v ...string) func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + r.Index = v + } +} + +// WithName - the name of the settings that should be included. +func (f IndicesGetSettings) WithName(v ...string) func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + r.Name = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesGetSettings) WithAllowNoIndices(v bool) func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesGetSettings) WithExpandWildcards(v string) func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + r.ExpandWildcards = v + } +} + +// WithFlatSettings - return settings in flat format (default: false). +func (f IndicesGetSettings) WithFlatSettings(v bool) func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + r.FlatSettings = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesGetSettings) WithIgnoreUnavailable(v bool) func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithIncludeDefaults - whether to return all default setting for each of the indices.. +func (f IndicesGetSettings) WithIncludeDefaults(v bool) func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + r.IncludeDefaults = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f IndicesGetSettings) WithLocal(v bool) func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesGetSettings) WithMasterTimeout(v time.Duration) func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetSettings) WithPretty() func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetSettings) WithHuman() func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetSettings) WithErrorTrace() func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetSettings) WithFilterPath(v ...string) func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetSettings) WithHeader(h map[string]string) func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetSettings) WithOpaqueID(s string) func(*IndicesGetSettingsRequest) { + return func(r *IndicesGetSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_template.go new file mode 100644 index 000000000..73f9e7209 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.get_template.go @@ -0,0 +1,271 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesGetTemplateFunc(t Transport) IndicesGetTemplate { + return func(o ...func(*IndicesGetTemplateRequest)) (*Response, error) { + var r = IndicesGetTemplateRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetTemplate returns an index template. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template-v1.html. +type IndicesGetTemplate func(o ...func(*IndicesGetTemplateRequest)) (*Response, error) + +// IndicesGetTemplateRequest configures the Indices Get Template API request. +type IndicesGetTemplateRequest struct { + Name []string + + FlatSettings *bool + Local *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_template") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_template") + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + } + + params = make(map[string]string) + + if r.FlatSettings != nil { + params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_template") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetTemplate) WithContext(v context.Context) func(*IndicesGetTemplateRequest) { + return func(r *IndicesGetTemplateRequest) { + r.ctx = v + } +} + +// WithName - the comma separated names of the index templates. +func (f IndicesGetTemplate) WithName(v ...string) func(*IndicesGetTemplateRequest) { + return func(r *IndicesGetTemplateRequest) { + r.Name = v + } +} + +// WithFlatSettings - return settings in flat format (default: false). +func (f IndicesGetTemplate) WithFlatSettings(v bool) func(*IndicesGetTemplateRequest) { + return func(r *IndicesGetTemplateRequest) { + r.FlatSettings = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f IndicesGetTemplate) WithLocal(v bool) func(*IndicesGetTemplateRequest) { + return func(r *IndicesGetTemplateRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f IndicesGetTemplate) WithMasterTimeout(v time.Duration) func(*IndicesGetTemplateRequest) { + return func(r *IndicesGetTemplateRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetTemplate) WithPretty() func(*IndicesGetTemplateRequest) { + return func(r *IndicesGetTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetTemplate) WithHuman() func(*IndicesGetTemplateRequest) { + return func(r *IndicesGetTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetTemplate) WithErrorTrace() func(*IndicesGetTemplateRequest) { + return func(r *IndicesGetTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetTemplate) WithFilterPath(v ...string) func(*IndicesGetTemplateRequest) { + return func(r *IndicesGetTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetTemplate) WithHeader(h map[string]string) func(*IndicesGetTemplateRequest) { + return func(r *IndicesGetTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetTemplate) WithOpaqueID(s string) func(*IndicesGetTemplateRequest) { + return func(r *IndicesGetTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.migrate_reindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.migrate_reindex.go new file mode 100644 index 000000000..caaaa9a6c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.migrate_reindex.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newIndicesMigrateReindexFunc(t Transport) IndicesMigrateReindex { + return func(body io.Reader, o ...func(*IndicesMigrateReindexRequest)) (*Response, error) { + var r = IndicesMigrateReindexRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesMigrateReindex this API reindexes all legacy backing indices for a data stream. It does this in a persistent task. The persistent task id is returned immediately, and the reindexing work is completed in that task +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex-api.html. +type IndicesMigrateReindex func(body io.Reader, o ...func(*IndicesMigrateReindexRequest)) (*Response, error) + +// IndicesMigrateReindexRequest configures the Indices Migrate Reindex API request. +type IndicesMigrateReindexRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesMigrateReindexRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_migration/reindex")) + path.WriteString("http://") + path.WriteString("/_migration/reindex") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.migrate_reindex") + if reader := instrument.RecordRequestBody(ctx, "indices.migrate_reindex", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.migrate_reindex") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesMigrateReindex) WithContext(v context.Context) func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesMigrateReindex) WithPretty() func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesMigrateReindex) WithHuman() func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesMigrateReindex) WithErrorTrace() func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesMigrateReindex) WithFilterPath(v ...string) func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesMigrateReindex) WithHeader(h map[string]string) func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesMigrateReindex) WithOpaqueID(s string) func(*IndicesMigrateReindexRequest) { + return func(r *IndicesMigrateReindexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.modify_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.modify_data_stream.go new file mode 100644 index 000000000..0777d22b2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.modify_data_stream.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newIndicesModifyDataStreamFunc(t Transport) IndicesModifyDataStream { + return func(body io.Reader, o ...func(*IndicesModifyDataStreamRequest)) (*Response, error) { + var r = IndicesModifyDataStreamRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesModifyDataStream modifies a data stream +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html. +type IndicesModifyDataStream func(body io.Reader, o ...func(*IndicesModifyDataStreamRequest)) (*Response, error) + +// IndicesModifyDataStreamRequest configures the Indices Modify Data Stream API request. +type IndicesModifyDataStreamRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesModifyDataStreamRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.modify_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_data_stream/_modify")) + path.WriteString("http://") + path.WriteString("/_data_stream/_modify") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.modify_data_stream") + if reader := instrument.RecordRequestBody(ctx, "indices.modify_data_stream", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.modify_data_stream") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesModifyDataStream) WithContext(v context.Context) func(*IndicesModifyDataStreamRequest) { + return func(r *IndicesModifyDataStreamRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesModifyDataStream) WithPretty() func(*IndicesModifyDataStreamRequest) { + return func(r *IndicesModifyDataStreamRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesModifyDataStream) WithHuman() func(*IndicesModifyDataStreamRequest) { + return func(r *IndicesModifyDataStreamRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesModifyDataStream) WithErrorTrace() func(*IndicesModifyDataStreamRequest) { + return func(r *IndicesModifyDataStreamRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesModifyDataStream) WithFilterPath(v ...string) func(*IndicesModifyDataStreamRequest) { + return func(r *IndicesModifyDataStreamRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesModifyDataStream) WithHeader(h map[string]string) func(*IndicesModifyDataStreamRequest) { + return func(r *IndicesModifyDataStreamRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesModifyDataStream) WithOpaqueID(s string) func(*IndicesModifyDataStreamRequest) { + return func(r *IndicesModifyDataStreamRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.open.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.open.go new file mode 100644 index 000000000..85251e1ff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.open.go @@ -0,0 +1,303 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesOpenFunc(t Transport) IndicesOpen { + return func(index []string, o ...func(*IndicesOpenRequest)) (*Response, error) { + var r = IndicesOpenRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesOpen opens an index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html. +type IndicesOpen func(index []string, o ...func(*IndicesOpenRequest)) (*Response, error) + +// IndicesOpenRequest configures the Indices Open API request. +type IndicesOpenRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + MasterTimeout time.Duration + Timeout time.Duration + WaitForActiveShards string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesOpenRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.open") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_open")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_open") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.open") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.open") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesOpen) WithContext(v context.Context) func(*IndicesOpenRequest) { + return func(r *IndicesOpenRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesOpen) WithAllowNoIndices(v bool) func(*IndicesOpenRequest) { + return func(r *IndicesOpenRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesOpen) WithExpandWildcards(v string) func(*IndicesOpenRequest) { + return func(r *IndicesOpenRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesOpen) WithIgnoreUnavailable(v bool) func(*IndicesOpenRequest) { + return func(r *IndicesOpenRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesOpen) WithMasterTimeout(v time.Duration) func(*IndicesOpenRequest) { + return func(r *IndicesOpenRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IndicesOpen) WithTimeout(v time.Duration) func(*IndicesOpenRequest) { + return func(r *IndicesOpenRequest) { + r.Timeout = v + } +} + +// WithWaitForActiveShards - sets the number of active shards to wait for before the operation returns.. +func (f IndicesOpen) WithWaitForActiveShards(v string) func(*IndicesOpenRequest) { + return func(r *IndicesOpenRequest) { + r.WaitForActiveShards = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesOpen) WithPretty() func(*IndicesOpenRequest) { + return func(r *IndicesOpenRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesOpen) WithHuman() func(*IndicesOpenRequest) { + return func(r *IndicesOpenRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesOpen) WithErrorTrace() func(*IndicesOpenRequest) { + return func(r *IndicesOpenRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesOpen) WithFilterPath(v ...string) func(*IndicesOpenRequest) { + return func(r *IndicesOpenRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesOpen) WithHeader(h map[string]string) func(*IndicesOpenRequest) { + return func(r *IndicesOpenRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesOpen) WithOpaqueID(s string) func(*IndicesOpenRequest) { + return func(r *IndicesOpenRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_alias.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_alias.go new file mode 100644 index 000000000..49f31875f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_alias.go @@ -0,0 +1,278 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "io" + "net/http" + "strings" + "time" +) + +func newIndicesPutAliasFunc(t Transport) IndicesPutAlias { + return func(index []string, name string, o ...func(*IndicesPutAliasRequest)) (*Response, error) { + var r = IndicesPutAliasRequest{Index: index, Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesPutAlias creates or updates an alias. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html. +type IndicesPutAlias func(index []string, name string, o ...func(*IndicesPutAliasRequest)) (*Response, error) + +// IndicesPutAliasRequest configures the Indices Put Alias API request. +type IndicesPutAliasRequest struct { + Index []string + + Body io.Reader + + Name string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesPutAliasRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_aliases") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_aliases") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_alias") + if reader := instrument.RecordRequestBody(ctx, "indices.put_alias", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_alias") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesPutAlias) WithContext(v context.Context) func(*IndicesPutAliasRequest) { + return func(r *IndicesPutAliasRequest) { + r.ctx = v + } +} + +// WithBody - The settings for the alias, such as `routing` or `filter`. +func (f IndicesPutAlias) WithBody(v io.Reader) func(*IndicesPutAliasRequest) { + return func(r *IndicesPutAliasRequest) { + r.Body = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesPutAlias) WithMasterTimeout(v time.Duration) func(*IndicesPutAliasRequest) { + return func(r *IndicesPutAliasRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit timestamp for the document. +func (f IndicesPutAlias) WithTimeout(v time.Duration) func(*IndicesPutAliasRequest) { + return func(r *IndicesPutAliasRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesPutAlias) WithPretty() func(*IndicesPutAliasRequest) { + return func(r *IndicesPutAliasRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesPutAlias) WithHuman() func(*IndicesPutAliasRequest) { + return func(r *IndicesPutAliasRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesPutAlias) WithErrorTrace() func(*IndicesPutAliasRequest) { + return func(r *IndicesPutAliasRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesPutAlias) WithFilterPath(v ...string) func(*IndicesPutAliasRequest) { + return func(r *IndicesPutAliasRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesPutAlias) WithHeader(h map[string]string) func(*IndicesPutAliasRequest) { + return func(r *IndicesPutAliasRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesPutAlias) WithOpaqueID(s string) func(*IndicesPutAliasRequest) { + return func(r *IndicesPutAliasRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_data_lifecycle.go new file mode 100644 index 000000000..bb1ad59e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_data_lifecycle.go @@ -0,0 +1,285 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "io" + "net/http" + "strings" + "time" +) + +func newIndicesPutDataLifecycleFunc(t Transport) IndicesPutDataLifecycle { + return func(name []string, o ...func(*IndicesPutDataLifecycleRequest)) (*Response, error) { + var r = IndicesPutDataLifecycleRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesPutDataLifecycle updates the data stream lifecycle of the selected data streams. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-put-lifecycle.html. +type IndicesPutDataLifecycle func(name []string, o ...func(*IndicesPutDataLifecycleRequest)) (*Response, error) + +// IndicesPutDataLifecycleRequest configures the Indices Put Data Lifecycle API request. +type IndicesPutDataLifecycleRequest struct { + Body io.Reader + + Name []string + + ExpandWildcards string + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesPutDataLifecycleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_data_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(strings.Join(r.Name, ",")) + 1 + len("_lifecycle")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + path.WriteString("/") + path.WriteString("_lifecycle") + + params = make(map[string]string) + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_data_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "indices.put_data_lifecycle", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_data_lifecycle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesPutDataLifecycle) WithContext(v context.Context) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.ctx = v + } +} + +// WithBody - The data stream lifecycle configuration that consist of the data retention. +func (f IndicesPutDataLifecycle) WithBody(v io.Reader) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.Body = v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +func (f IndicesPutDataLifecycle) WithExpandWildcards(v string) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.ExpandWildcards = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesPutDataLifecycle) WithMasterTimeout(v time.Duration) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit timestamp for the document. +func (f IndicesPutDataLifecycle) WithTimeout(v time.Duration) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesPutDataLifecycle) WithPretty() func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesPutDataLifecycle) WithHuman() func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesPutDataLifecycle) WithErrorTrace() func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesPutDataLifecycle) WithFilterPath(v ...string) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesPutDataLifecycle) WithHeader(h map[string]string) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesPutDataLifecycle) WithOpaqueID(s string) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_data_stream_options.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_data_stream_options.go new file mode 100644 index 000000000..5214a3160 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_data_stream_options.go @@ -0,0 +1,285 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "io" + "net/http" + "strings" + "time" +) + +func newIndicesPutDataStreamOptionsFunc(t Transport) IndicesPutDataStreamOptions { + return func(name []string, o ...func(*IndicesPutDataStreamOptionsRequest)) (*Response, error) { + var r = IndicesPutDataStreamOptionsRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesPutDataStreamOptions updates the data stream options of the selected data streams. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html. +type IndicesPutDataStreamOptions func(name []string, o ...func(*IndicesPutDataStreamOptionsRequest)) (*Response, error) + +// IndicesPutDataStreamOptionsRequest configures the Indices Put Data Stream Options API request. +type IndicesPutDataStreamOptionsRequest struct { + Body io.Reader + + Name []string + + ExpandWildcards string + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesPutDataStreamOptionsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_data_stream_options") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(strings.Join(r.Name, ",")) + 1 + len("_options")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + path.WriteString("/") + path.WriteString("_options") + + params = make(map[string]string) + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_data_stream_options") + if reader := instrument.RecordRequestBody(ctx, "indices.put_data_stream_options", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_data_stream_options") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesPutDataStreamOptions) WithContext(v context.Context) func(*IndicesPutDataStreamOptionsRequest) { + return func(r *IndicesPutDataStreamOptionsRequest) { + r.ctx = v + } +} + +// WithBody - The data stream options configuration that consist of the failure store configuration. +func (f IndicesPutDataStreamOptions) WithBody(v io.Reader) func(*IndicesPutDataStreamOptionsRequest) { + return func(r *IndicesPutDataStreamOptionsRequest) { + r.Body = v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +func (f IndicesPutDataStreamOptions) WithExpandWildcards(v string) func(*IndicesPutDataStreamOptionsRequest) { + return func(r *IndicesPutDataStreamOptionsRequest) { + r.ExpandWildcards = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesPutDataStreamOptions) WithMasterTimeout(v time.Duration) func(*IndicesPutDataStreamOptionsRequest) { + return func(r *IndicesPutDataStreamOptionsRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit timestamp for the document. +func (f IndicesPutDataStreamOptions) WithTimeout(v time.Duration) func(*IndicesPutDataStreamOptionsRequest) { + return func(r *IndicesPutDataStreamOptionsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesPutDataStreamOptions) WithPretty() func(*IndicesPutDataStreamOptionsRequest) { + return func(r *IndicesPutDataStreamOptionsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesPutDataStreamOptions) WithHuman() func(*IndicesPutDataStreamOptionsRequest) { + return func(r *IndicesPutDataStreamOptionsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesPutDataStreamOptions) WithErrorTrace() func(*IndicesPutDataStreamOptionsRequest) { + return func(r *IndicesPutDataStreamOptionsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesPutDataStreamOptions) WithFilterPath(v ...string) func(*IndicesPutDataStreamOptionsRequest) { + return func(r *IndicesPutDataStreamOptionsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesPutDataStreamOptions) WithHeader(h map[string]string) func(*IndicesPutDataStreamOptionsRequest) { + return func(r *IndicesPutDataStreamOptionsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesPutDataStreamOptions) WithOpaqueID(s string) func(*IndicesPutDataStreamOptionsRequest) { + return func(r *IndicesPutDataStreamOptionsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_data_stream_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_data_stream_settings.go new file mode 100644 index 000000000..7fd81804a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_data_stream_settings.go @@ -0,0 +1,274 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesPutDataStreamSettingsFunc(t Transport) IndicesPutDataStreamSettings { + return func(name string, body io.Reader, o ...func(*IndicesPutDataStreamSettingsRequest)) (*Response, error) { + var r = IndicesPutDataStreamSettingsRequest{Name: name, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesPutDataStreamSettings updates a data stream's settings +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html. +type IndicesPutDataStreamSettings func(name string, body io.Reader, o ...func(*IndicesPutDataStreamSettingsRequest)) (*Response, error) + +// IndicesPutDataStreamSettingsRequest configures the Indices Put Data Stream Settings API request. +type IndicesPutDataStreamSettingsRequest struct { + Body io.Reader + + Name string + + DryRun *bool + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesPutDataStreamSettingsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_data_stream_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(r.Name) + 1 + len("_settings")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + path.WriteString("/") + path.WriteString("_settings") + + params = make(map[string]string) + + if r.DryRun != nil { + params["dry_run"] = strconv.FormatBool(*r.DryRun) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_data_stream_settings") + if reader := instrument.RecordRequestBody(ctx, "indices.put_data_stream_settings", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_data_stream_settings") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesPutDataStreamSettings) WithContext(v context.Context) func(*IndicesPutDataStreamSettingsRequest) { + return func(r *IndicesPutDataStreamSettingsRequest) { + r.ctx = v + } +} + +// WithDryRun - whether this request should only be a dry run rather than actually applying settings. +func (f IndicesPutDataStreamSettings) WithDryRun(v bool) func(*IndicesPutDataStreamSettingsRequest) { + return func(r *IndicesPutDataStreamSettingsRequest) { + r.DryRun = &v + } +} + +// WithMasterTimeout - period to wait for a connection to the master node. +func (f IndicesPutDataStreamSettings) WithMasterTimeout(v time.Duration) func(*IndicesPutDataStreamSettingsRequest) { + return func(r *IndicesPutDataStreamSettingsRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - period to wait for a response. +func (f IndicesPutDataStreamSettings) WithTimeout(v time.Duration) func(*IndicesPutDataStreamSettingsRequest) { + return func(r *IndicesPutDataStreamSettingsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesPutDataStreamSettings) WithPretty() func(*IndicesPutDataStreamSettingsRequest) { + return func(r *IndicesPutDataStreamSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesPutDataStreamSettings) WithHuman() func(*IndicesPutDataStreamSettingsRequest) { + return func(r *IndicesPutDataStreamSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesPutDataStreamSettings) WithErrorTrace() func(*IndicesPutDataStreamSettingsRequest) { + return func(r *IndicesPutDataStreamSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesPutDataStreamSettings) WithFilterPath(v ...string) func(*IndicesPutDataStreamSettingsRequest) { + return func(r *IndicesPutDataStreamSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesPutDataStreamSettings) WithHeader(h map[string]string) func(*IndicesPutDataStreamSettingsRequest) { + return func(r *IndicesPutDataStreamSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesPutDataStreamSettings) WithOpaqueID(s string) func(*IndicesPutDataStreamSettingsRequest) { + return func(r *IndicesPutDataStreamSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_index_template.go new file mode 100644 index 000000000..722dfe0b9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_index_template.go @@ -0,0 +1,272 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesPutIndexTemplateFunc(t Transport) IndicesPutIndexTemplate { + return func(name string, body io.Reader, o ...func(*IndicesPutIndexTemplateRequest)) (*Response, error) { + var r = IndicesPutIndexTemplateRequest{Name: name, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesPutIndexTemplate creates or updates an index template. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-template.html. +type IndicesPutIndexTemplate func(name string, body io.Reader, o ...func(*IndicesPutIndexTemplateRequest)) (*Response, error) + +// IndicesPutIndexTemplateRequest configures the Indices Put Index Template API request. +type IndicesPutIndexTemplateRequest struct { + Body io.Reader + + Name string + + Cause string + Create *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesPutIndexTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_index_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_index_template") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_index_template") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Cause != "" { + params["cause"] = r.Cause + } + + if r.Create != nil { + params["create"] = strconv.FormatBool(*r.Create) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_index_template") + if reader := instrument.RecordRequestBody(ctx, "indices.put_index_template", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_index_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesPutIndexTemplate) WithContext(v context.Context) func(*IndicesPutIndexTemplateRequest) { + return func(r *IndicesPutIndexTemplateRequest) { + r.ctx = v + } +} + +// WithCause - user defined reason for creating/updating the index template. +func (f IndicesPutIndexTemplate) WithCause(v string) func(*IndicesPutIndexTemplateRequest) { + return func(r *IndicesPutIndexTemplateRequest) { + r.Cause = v + } +} + +// WithCreate - whether the index template should only be added if new or can also replace an existing one. +func (f IndicesPutIndexTemplate) WithCreate(v bool) func(*IndicesPutIndexTemplateRequest) { + return func(r *IndicesPutIndexTemplateRequest) { + r.Create = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesPutIndexTemplate) WithMasterTimeout(v time.Duration) func(*IndicesPutIndexTemplateRequest) { + return func(r *IndicesPutIndexTemplateRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesPutIndexTemplate) WithPretty() func(*IndicesPutIndexTemplateRequest) { + return func(r *IndicesPutIndexTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesPutIndexTemplate) WithHuman() func(*IndicesPutIndexTemplateRequest) { + return func(r *IndicesPutIndexTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesPutIndexTemplate) WithErrorTrace() func(*IndicesPutIndexTemplateRequest) { + return func(r *IndicesPutIndexTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesPutIndexTemplate) WithFilterPath(v ...string) func(*IndicesPutIndexTemplateRequest) { + return func(r *IndicesPutIndexTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesPutIndexTemplate) WithHeader(h map[string]string) func(*IndicesPutIndexTemplateRequest) { + return func(r *IndicesPutIndexTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesPutIndexTemplate) WithOpaqueID(s string) func(*IndicesPutIndexTemplateRequest) { + return func(r *IndicesPutIndexTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_mapping.go new file mode 100644 index 000000000..047868638 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_mapping.go @@ -0,0 +1,306 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesPutMappingFunc(t Transport) IndicesPutMapping { + return func(index []string, body io.Reader, o ...func(*IndicesPutMappingRequest)) (*Response, error) { + var r = IndicesPutMappingRequest{Index: index, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesPutMapping updates the index mappings. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html. +type IndicesPutMapping func(index []string, body io.Reader, o ...func(*IndicesPutMappingRequest)) (*Response, error) + +// IndicesPutMappingRequest configures the Indices Put Mapping API request. +type IndicesPutMappingRequest struct { + Index []string + + Body io.Reader + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + MasterTimeout time.Duration + Timeout time.Duration + WriteIndexOnly *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesPutMappingRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(len(strings.Join(r.Index, ",")) + len("/_mapping") + 1) + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_mapping") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WriteIndexOnly != nil { + params["write_index_only"] = strconv.FormatBool(*r.WriteIndexOnly) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_mapping") + if reader := instrument.RecordRequestBody(ctx, "indices.put_mapping", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_mapping") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesPutMapping) WithContext(v context.Context) func(*IndicesPutMappingRequest) { + return func(r *IndicesPutMappingRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesPutMapping) WithAllowNoIndices(v bool) func(*IndicesPutMappingRequest) { + return func(r *IndicesPutMappingRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesPutMapping) WithExpandWildcards(v string) func(*IndicesPutMappingRequest) { + return func(r *IndicesPutMappingRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesPutMapping) WithIgnoreUnavailable(v bool) func(*IndicesPutMappingRequest) { + return func(r *IndicesPutMappingRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesPutMapping) WithMasterTimeout(v time.Duration) func(*IndicesPutMappingRequest) { + return func(r *IndicesPutMappingRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IndicesPutMapping) WithTimeout(v time.Duration) func(*IndicesPutMappingRequest) { + return func(r *IndicesPutMappingRequest) { + r.Timeout = v + } +} + +// WithWriteIndexOnly - when true, applies mappings only to the write index of an alias or data stream. +func (f IndicesPutMapping) WithWriteIndexOnly(v bool) func(*IndicesPutMappingRequest) { + return func(r *IndicesPutMappingRequest) { + r.WriteIndexOnly = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesPutMapping) WithPretty() func(*IndicesPutMappingRequest) { + return func(r *IndicesPutMappingRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesPutMapping) WithHuman() func(*IndicesPutMappingRequest) { + return func(r *IndicesPutMappingRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesPutMapping) WithErrorTrace() func(*IndicesPutMappingRequest) { + return func(r *IndicesPutMappingRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesPutMapping) WithFilterPath(v ...string) func(*IndicesPutMappingRequest) { + return func(r *IndicesPutMappingRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesPutMapping) WithHeader(h map[string]string) func(*IndicesPutMappingRequest) { + return func(r *IndicesPutMappingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesPutMapping) WithOpaqueID(s string) func(*IndicesPutMappingRequest) { + return func(r *IndicesPutMappingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_settings.go new file mode 100644 index 000000000..c604a6ffc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_settings.go @@ -0,0 +1,341 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesPutSettingsFunc(t Transport) IndicesPutSettings { + return func(body io.Reader, o ...func(*IndicesPutSettingsRequest)) (*Response, error) { + var r = IndicesPutSettingsRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesPutSettings updates the index settings. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html. +type IndicesPutSettings func(body io.Reader, o ...func(*IndicesPutSettingsRequest)) (*Response, error) + +// IndicesPutSettingsRequest configures the Indices Put Settings API request. +type IndicesPutSettingsRequest struct { + Index []string + + Body io.Reader + + AllowNoIndices *bool + ExpandWildcards string + FlatSettings *bool + IgnoreUnavailable *bool + MasterTimeout time.Duration + PreserveExisting *bool + Reopen *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesPutSettingsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_settings")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_settings") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.FlatSettings != nil { + params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.PreserveExisting != nil { + params["preserve_existing"] = strconv.FormatBool(*r.PreserveExisting) + } + + if r.Reopen != nil { + params["reopen"] = strconv.FormatBool(*r.Reopen) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_settings") + if reader := instrument.RecordRequestBody(ctx, "indices.put_settings", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_settings") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesPutSettings) WithContext(v context.Context) func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names; use _all to perform the operation on all indices. +func (f IndicesPutSettings) WithIndex(v ...string) func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesPutSettings) WithAllowNoIndices(v bool) func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesPutSettings) WithExpandWildcards(v string) func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + r.ExpandWildcards = v + } +} + +// WithFlatSettings - return settings in flat format (default: false). +func (f IndicesPutSettings) WithFlatSettings(v bool) func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + r.FlatSettings = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesPutSettings) WithIgnoreUnavailable(v bool) func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesPutSettings) WithMasterTimeout(v time.Duration) func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + r.MasterTimeout = v + } +} + +// WithPreserveExisting - whether to update existing settings. if set to `true` existing settings on an index remain unchanged, the default is `false`. +func (f IndicesPutSettings) WithPreserveExisting(v bool) func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + r.PreserveExisting = &v + } +} + +// WithReopen - whether to close and reopen the index to apply non-dynamic settings. if set to `true` the indices to which the settings are being applied will be closed temporarily and then reopened in order to apply the changes. the default is `false`. +func (f IndicesPutSettings) WithReopen(v bool) func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + r.Reopen = &v + } +} + +// WithTimeout - explicit operation timeout. +func (f IndicesPutSettings) WithTimeout(v time.Duration) func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesPutSettings) WithPretty() func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesPutSettings) WithHuman() func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesPutSettings) WithErrorTrace() func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesPutSettings) WithFilterPath(v ...string) func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesPutSettings) WithHeader(h map[string]string) func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesPutSettings) WithOpaqueID(s string) func(*IndicesPutSettingsRequest) { + return func(r *IndicesPutSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_template.go new file mode 100644 index 000000000..875b0896e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.put_template.go @@ -0,0 +1,284 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesPutTemplateFunc(t Transport) IndicesPutTemplate { + return func(name string, body io.Reader, o ...func(*IndicesPutTemplateRequest)) (*Response, error) { + var r = IndicesPutTemplateRequest{Name: name, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesPutTemplate creates or updates an index template. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates-v1.html. +type IndicesPutTemplate func(name string, body io.Reader, o ...func(*IndicesPutTemplateRequest)) (*Response, error) + +// IndicesPutTemplateRequest configures the Indices Put Template API request. +type IndicesPutTemplateRequest struct { + Body io.Reader + + Name string + + Cause string + Create *bool + MasterTimeout time.Duration + Order *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesPutTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_template") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_template") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Cause != "" { + params["cause"] = r.Cause + } + + if r.Create != nil { + params["create"] = strconv.FormatBool(*r.Create) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Order != nil { + params["order"] = strconv.FormatInt(int64(*r.Order), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_template") + if reader := instrument.RecordRequestBody(ctx, "indices.put_template", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesPutTemplate) WithContext(v context.Context) func(*IndicesPutTemplateRequest) { + return func(r *IndicesPutTemplateRequest) { + r.ctx = v + } +} + +// WithCause - user defined reason for creating/updating the index template. +func (f IndicesPutTemplate) WithCause(v string) func(*IndicesPutTemplateRequest) { + return func(r *IndicesPutTemplateRequest) { + r.Cause = v + } +} + +// WithCreate - whether the index template should only be added if new or can also replace an existing one. +func (f IndicesPutTemplate) WithCreate(v bool) func(*IndicesPutTemplateRequest) { + return func(r *IndicesPutTemplateRequest) { + r.Create = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesPutTemplate) WithMasterTimeout(v time.Duration) func(*IndicesPutTemplateRequest) { + return func(r *IndicesPutTemplateRequest) { + r.MasterTimeout = v + } +} + +// WithOrder - the order for this template when merging multiple matching ones (higher numbers are merged later, overriding the lower numbers). +func (f IndicesPutTemplate) WithOrder(v int) func(*IndicesPutTemplateRequest) { + return func(r *IndicesPutTemplateRequest) { + r.Order = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesPutTemplate) WithPretty() func(*IndicesPutTemplateRequest) { + return func(r *IndicesPutTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesPutTemplate) WithHuman() func(*IndicesPutTemplateRequest) { + return func(r *IndicesPutTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesPutTemplate) WithErrorTrace() func(*IndicesPutTemplateRequest) { + return func(r *IndicesPutTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesPutTemplate) WithFilterPath(v ...string) func(*IndicesPutTemplateRequest) { + return func(r *IndicesPutTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesPutTemplate) WithHeader(h map[string]string) func(*IndicesPutTemplateRequest) { + return func(r *IndicesPutTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesPutTemplate) WithOpaqueID(s string) func(*IndicesPutTemplateRequest) { + return func(r *IndicesPutTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.recovery.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.recovery.go new file mode 100644 index 000000000..f87008e9d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.recovery.go @@ -0,0 +1,294 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newIndicesRecoveryFunc(t Transport) IndicesRecovery { + return func(o ...func(*IndicesRecoveryRequest)) (*Response, error) { + var r = IndicesRecoveryRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesRecovery returns information about ongoing index shard recoveries. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html. +type IndicesRecovery func(o ...func(*IndicesRecoveryRequest)) (*Response, error) + +// IndicesRecoveryRequest configures the Indices Recovery API request. +type IndicesRecoveryRequest struct { + Index []string + + ActiveOnly *bool + AllowNoIndices *bool + Detailed *bool + ExpandWildcards string + IgnoreUnavailable *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesRecoveryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.recovery") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_recovery")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_recovery") + + params = make(map[string]string) + + if r.ActiveOnly != nil { + params["active_only"] = strconv.FormatBool(*r.ActiveOnly) + } + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.Detailed != nil { + params["detailed"] = strconv.FormatBool(*r.Detailed) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.recovery") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.recovery") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesRecovery) WithContext(v context.Context) func(*IndicesRecoveryRequest) { + return func(r *IndicesRecoveryRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names; use _all to perform the operation on all indices. +func (f IndicesRecovery) WithIndex(v ...string) func(*IndicesRecoveryRequest) { + return func(r *IndicesRecoveryRequest) { + r.Index = v + } +} + +// WithActiveOnly - display only those recoveries that are currently on-going. +func (f IndicesRecovery) WithActiveOnly(v bool) func(*IndicesRecoveryRequest) { + return func(r *IndicesRecoveryRequest) { + r.ActiveOnly = &v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesRecovery) WithAllowNoIndices(v bool) func(*IndicesRecoveryRequest) { + return func(r *IndicesRecoveryRequest) { + r.AllowNoIndices = &v + } +} + +// WithDetailed - whether to display detailed information about shard recovery. +func (f IndicesRecovery) WithDetailed(v bool) func(*IndicesRecoveryRequest) { + return func(r *IndicesRecoveryRequest) { + r.Detailed = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesRecovery) WithExpandWildcards(v string) func(*IndicesRecoveryRequest) { + return func(r *IndicesRecoveryRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesRecovery) WithIgnoreUnavailable(v bool) func(*IndicesRecoveryRequest) { + return func(r *IndicesRecoveryRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesRecovery) WithPretty() func(*IndicesRecoveryRequest) { + return func(r *IndicesRecoveryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesRecovery) WithHuman() func(*IndicesRecoveryRequest) { + return func(r *IndicesRecoveryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesRecovery) WithErrorTrace() func(*IndicesRecoveryRequest) { + return func(r *IndicesRecoveryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesRecovery) WithFilterPath(v ...string) func(*IndicesRecoveryRequest) { + return func(r *IndicesRecoveryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesRecovery) WithHeader(h map[string]string) func(*IndicesRecoveryRequest) { + return func(r *IndicesRecoveryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesRecovery) WithOpaqueID(s string) func(*IndicesRecoveryRequest) { + return func(r *IndicesRecoveryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.refresh.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.refresh.go new file mode 100644 index 000000000..be6e23038 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.refresh.go @@ -0,0 +1,270 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newIndicesRefreshFunc(t Transport) IndicesRefresh { + return func(o ...func(*IndicesRefreshRequest)) (*Response, error) { + var r = IndicesRefreshRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesRefresh performs the refresh operation in one or more indices. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html. +type IndicesRefresh func(o ...func(*IndicesRefreshRequest)) (*Response, error) + +// IndicesRefreshRequest configures the Indices Refresh API request. +type IndicesRefreshRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesRefreshRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.refresh") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_refresh")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_refresh") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.refresh") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.refresh") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesRefresh) WithContext(v context.Context) func(*IndicesRefreshRequest) { + return func(r *IndicesRefreshRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names; use _all to perform the operation on all indices. +func (f IndicesRefresh) WithIndex(v ...string) func(*IndicesRefreshRequest) { + return func(r *IndicesRefreshRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesRefresh) WithAllowNoIndices(v bool) func(*IndicesRefreshRequest) { + return func(r *IndicesRefreshRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesRefresh) WithExpandWildcards(v string) func(*IndicesRefreshRequest) { + return func(r *IndicesRefreshRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesRefresh) WithIgnoreUnavailable(v bool) func(*IndicesRefreshRequest) { + return func(r *IndicesRefreshRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesRefresh) WithPretty() func(*IndicesRefreshRequest) { + return func(r *IndicesRefreshRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesRefresh) WithHuman() func(*IndicesRefreshRequest) { + return func(r *IndicesRefreshRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesRefresh) WithErrorTrace() func(*IndicesRefreshRequest) { + return func(r *IndicesRefreshRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesRefresh) WithFilterPath(v ...string) func(*IndicesRefreshRequest) { + return func(r *IndicesRefreshRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesRefresh) WithHeader(h map[string]string) func(*IndicesRefreshRequest) { + return func(r *IndicesRefreshRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesRefresh) WithOpaqueID(s string) func(*IndicesRefreshRequest) { + return func(r *IndicesRefreshRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.remove_block.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.remove_block.go new file mode 100644 index 000000000..e348d8ef7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.remove_block.go @@ -0,0 +1,298 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesRemoveBlockFunc(t Transport) IndicesRemoveBlock { + return func(index []string, block string, o ...func(*IndicesRemoveBlockRequest)) (*Response, error) { + var r = IndicesRemoveBlockRequest{Index: index, Block: block} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesRemoveBlock removes a block from an index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html. +type IndicesRemoveBlock func(index []string, block string, o ...func(*IndicesRemoveBlockRequest)) (*Response, error) + +// IndicesRemoveBlockRequest configures the Indices Remove Block API request. +type IndicesRemoveBlockRequest struct { + Index []string + + Block string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesRemoveBlockRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.remove_block") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_block") + 1 + len(r.Block)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_block") + path.WriteString("/") + path.WriteString(r.Block) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "block", r.Block) + } + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.remove_block") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.remove_block") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesRemoveBlock) WithContext(v context.Context) func(*IndicesRemoveBlockRequest) { + return func(r *IndicesRemoveBlockRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesRemoveBlock) WithAllowNoIndices(v bool) func(*IndicesRemoveBlockRequest) { + return func(r *IndicesRemoveBlockRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesRemoveBlock) WithExpandWildcards(v string) func(*IndicesRemoveBlockRequest) { + return func(r *IndicesRemoveBlockRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesRemoveBlock) WithIgnoreUnavailable(v bool) func(*IndicesRemoveBlockRequest) { + return func(r *IndicesRemoveBlockRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesRemoveBlock) WithMasterTimeout(v time.Duration) func(*IndicesRemoveBlockRequest) { + return func(r *IndicesRemoveBlockRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IndicesRemoveBlock) WithTimeout(v time.Duration) func(*IndicesRemoveBlockRequest) { + return func(r *IndicesRemoveBlockRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesRemoveBlock) WithPretty() func(*IndicesRemoveBlockRequest) { + return func(r *IndicesRemoveBlockRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesRemoveBlock) WithHuman() func(*IndicesRemoveBlockRequest) { + return func(r *IndicesRemoveBlockRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesRemoveBlock) WithErrorTrace() func(*IndicesRemoveBlockRequest) { + return func(r *IndicesRemoveBlockRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesRemoveBlock) WithFilterPath(v ...string) func(*IndicesRemoveBlockRequest) { + return func(r *IndicesRemoveBlockRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesRemoveBlock) WithHeader(h map[string]string) func(*IndicesRemoveBlockRequest) { + return func(r *IndicesRemoveBlockRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesRemoveBlock) WithOpaqueID(s string) func(*IndicesRemoveBlockRequest) { + return func(r *IndicesRemoveBlockRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.resolve_cluster.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.resolve_cluster.go new file mode 100644 index 000000000..3ead8fc06 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.resolve_cluster.go @@ -0,0 +1,297 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesResolveClusterFunc(t Transport) IndicesResolveCluster { + return func(o ...func(*IndicesResolveClusterRequest)) (*Response, error) { + var r = IndicesResolveClusterRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesResolveCluster resolves the specified index expressions to return information about each cluster. If no index expression is provided, this endpoint will return information about all the remote clusters that are configured on the local cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html. +type IndicesResolveCluster func(o ...func(*IndicesResolveClusterRequest)) (*Response, error) + +// IndicesResolveClusterRequest configures the Indices Resolve Cluster API request. +type IndicesResolveClusterRequest struct { + Name []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreThrottled *bool + IgnoreUnavailable *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesResolveClusterRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.resolve_cluster") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_resolve") + 1 + len("cluster") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_resolve") + path.WriteString("/") + path.WriteString("cluster") + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + } + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreThrottled != nil { + params["ignore_throttled"] = strconv.FormatBool(*r.IgnoreThrottled) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.resolve_cluster") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.resolve_cluster") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesResolveCluster) WithContext(v context.Context) func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.ctx = v + } +} + +// WithName - a list of cluster:index names or wildcard expressions. +func (f IndicesResolveCluster) WithName(v ...string) func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.Name = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). only allowed when providing an index expression.. +func (f IndicesResolveCluster) WithAllowNoIndices(v bool) func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). only allowed when providing an index expression.. +func (f IndicesResolveCluster) WithExpandWildcards(v string) func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreThrottled - whether specified concrete, expanded or aliased indices should be ignored when throttled. only allowed when providing an index expression.. +func (f IndicesResolveCluster) WithIgnoreThrottled(v bool) func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.IgnoreThrottled = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). only allowed when providing an index expression.. +func (f IndicesResolveCluster) WithIgnoreUnavailable(v bool) func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithTimeout - the maximum time to wait for remote clusters to respond. +func (f IndicesResolveCluster) WithTimeout(v time.Duration) func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesResolveCluster) WithPretty() func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesResolveCluster) WithHuman() func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesResolveCluster) WithErrorTrace() func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesResolveCluster) WithFilterPath(v ...string) func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesResolveCluster) WithHeader(h map[string]string) func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesResolveCluster) WithOpaqueID(s string) func(*IndicesResolveClusterRequest) { + return func(r *IndicesResolveClusterRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.resolve_index.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.resolve_index.go new file mode 100644 index 000000000..6cb0be31d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.resolve_index.go @@ -0,0 +1,268 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" +) + +func newIndicesResolveIndexFunc(t Transport) IndicesResolveIndex { + return func(name []string, o ...func(*IndicesResolveIndexRequest)) (*Response, error) { + var r = IndicesResolveIndexRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesResolveIndex returns information about any matching indices, aliases, and data streams +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-index-api.html. +type IndicesResolveIndex func(name []string, o ...func(*IndicesResolveIndexRequest)) (*Response, error) + +// IndicesResolveIndexRequest configures the Indices Resolve Index API request. +type IndicesResolveIndexRequest struct { + Name []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesResolveIndexRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.resolve_index") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_resolve") + 1 + len("index") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_resolve") + path.WriteString("/") + path.WriteString("index") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.resolve_index") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.resolve_index") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesResolveIndex) WithContext(v context.Context) func(*IndicesResolveIndexRequest) { + return func(r *IndicesResolveIndexRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesResolveIndex) WithAllowNoIndices(v bool) func(*IndicesResolveIndexRequest) { + return func(r *IndicesResolveIndexRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +func (f IndicesResolveIndex) WithExpandWildcards(v string) func(*IndicesResolveIndexRequest) { + return func(r *IndicesResolveIndexRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesResolveIndex) WithIgnoreUnavailable(v bool) func(*IndicesResolveIndexRequest) { + return func(r *IndicesResolveIndexRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesResolveIndex) WithPretty() func(*IndicesResolveIndexRequest) { + return func(r *IndicesResolveIndexRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesResolveIndex) WithHuman() func(*IndicesResolveIndexRequest) { + return func(r *IndicesResolveIndexRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesResolveIndex) WithErrorTrace() func(*IndicesResolveIndexRequest) { + return func(r *IndicesResolveIndexRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesResolveIndex) WithFilterPath(v ...string) func(*IndicesResolveIndexRequest) { + return func(r *IndicesResolveIndexRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesResolveIndex) WithHeader(h map[string]string) func(*IndicesResolveIndexRequest) { + return func(r *IndicesResolveIndexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesResolveIndex) WithOpaqueID(s string) func(*IndicesResolveIndexRequest) { + return func(r *IndicesResolveIndexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.rollover.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.rollover.go new file mode 100644 index 000000000..21bb0c2bc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.rollover.go @@ -0,0 +1,319 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesRolloverFunc(t Transport) IndicesRollover { + return func(alias string, o ...func(*IndicesRolloverRequest)) (*Response, error) { + var r = IndicesRolloverRequest{Alias: alias} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesRollover updates an alias to point to a new index when the existing index +// is considered to be too large or too old. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html. +type IndicesRollover func(alias string, o ...func(*IndicesRolloverRequest)) (*Response, error) + +// IndicesRolloverRequest configures the Indices Rollover API request. +type IndicesRolloverRequest struct { + Body io.Reader + + Alias string + NewIndex string + + DryRun *bool + Lazy *bool + MasterTimeout time.Duration + Timeout time.Duration + WaitForActiveShards string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesRolloverRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.rollover") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Alias) + 1 + len("_rollover") + 1 + len(r.NewIndex)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Alias) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "alias", r.Alias) + } + path.WriteString("/") + path.WriteString("_rollover") + if r.NewIndex != "" { + path.WriteString("/") + path.WriteString(r.NewIndex) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "new_index", r.NewIndex) + } + } + + params = make(map[string]string) + + if r.DryRun != nil { + params["dry_run"] = strconv.FormatBool(*r.DryRun) + } + + if r.Lazy != nil { + params["lazy"] = strconv.FormatBool(*r.Lazy) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.rollover") + if reader := instrument.RecordRequestBody(ctx, "indices.rollover", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.rollover") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesRollover) WithContext(v context.Context) func(*IndicesRolloverRequest) { + return func(r *IndicesRolloverRequest) { + r.ctx = v + } +} + +// WithBody - The conditions that needs to be met for executing rollover. +func (f IndicesRollover) WithBody(v io.Reader) func(*IndicesRolloverRequest) { + return func(r *IndicesRolloverRequest) { + r.Body = v + } +} + +// WithNewIndex - the name of the rollover index. +func (f IndicesRollover) WithNewIndex(v string) func(*IndicesRolloverRequest) { + return func(r *IndicesRolloverRequest) { + r.NewIndex = v + } +} + +// WithDryRun - if set to true the rollover action will only be validated but not actually performed even if a condition matches. the default is false. +func (f IndicesRollover) WithDryRun(v bool) func(*IndicesRolloverRequest) { + return func(r *IndicesRolloverRequest) { + r.DryRun = &v + } +} + +// WithLazy - if set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. only allowed on data streams.. +func (f IndicesRollover) WithLazy(v bool) func(*IndicesRolloverRequest) { + return func(r *IndicesRolloverRequest) { + r.Lazy = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesRollover) WithMasterTimeout(v time.Duration) func(*IndicesRolloverRequest) { + return func(r *IndicesRolloverRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IndicesRollover) WithTimeout(v time.Duration) func(*IndicesRolloverRequest) { + return func(r *IndicesRolloverRequest) { + r.Timeout = v + } +} + +// WithWaitForActiveShards - set the number of active shards to wait for on the newly created rollover index before the operation returns.. +func (f IndicesRollover) WithWaitForActiveShards(v string) func(*IndicesRolloverRequest) { + return func(r *IndicesRolloverRequest) { + r.WaitForActiveShards = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesRollover) WithPretty() func(*IndicesRolloverRequest) { + return func(r *IndicesRolloverRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesRollover) WithHuman() func(*IndicesRolloverRequest) { + return func(r *IndicesRolloverRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesRollover) WithErrorTrace() func(*IndicesRolloverRequest) { + return func(r *IndicesRolloverRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesRollover) WithFilterPath(v ...string) func(*IndicesRolloverRequest) { + return func(r *IndicesRolloverRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesRollover) WithHeader(h map[string]string) func(*IndicesRolloverRequest) { + return func(r *IndicesRolloverRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesRollover) WithOpaqueID(s string) func(*IndicesRolloverRequest) { + return func(r *IndicesRolloverRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.segments.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.segments.go new file mode 100644 index 000000000..b1c3b4a3b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.segments.go @@ -0,0 +1,270 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newIndicesSegmentsFunc(t Transport) IndicesSegments { + return func(o ...func(*IndicesSegmentsRequest)) (*Response, error) { + var r = IndicesSegmentsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesSegments provides low-level information about segments in a Lucene index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html. +type IndicesSegments func(o ...func(*IndicesSegmentsRequest)) (*Response, error) + +// IndicesSegmentsRequest configures the Indices Segments API request. +type IndicesSegmentsRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesSegmentsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.segments") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_segments")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_segments") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.segments") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.segments") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesSegments) WithContext(v context.Context) func(*IndicesSegmentsRequest) { + return func(r *IndicesSegmentsRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names; use _all to perform the operation on all indices. +func (f IndicesSegments) WithIndex(v ...string) func(*IndicesSegmentsRequest) { + return func(r *IndicesSegmentsRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesSegments) WithAllowNoIndices(v bool) func(*IndicesSegmentsRequest) { + return func(r *IndicesSegmentsRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesSegments) WithExpandWildcards(v string) func(*IndicesSegmentsRequest) { + return func(r *IndicesSegmentsRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesSegments) WithIgnoreUnavailable(v bool) func(*IndicesSegmentsRequest) { + return func(r *IndicesSegmentsRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesSegments) WithPretty() func(*IndicesSegmentsRequest) { + return func(r *IndicesSegmentsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesSegments) WithHuman() func(*IndicesSegmentsRequest) { + return func(r *IndicesSegmentsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesSegments) WithErrorTrace() func(*IndicesSegmentsRequest) { + return func(r *IndicesSegmentsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesSegments) WithFilterPath(v ...string) func(*IndicesSegmentsRequest) { + return func(r *IndicesSegmentsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesSegments) WithHeader(h map[string]string) func(*IndicesSegmentsRequest) { + return func(r *IndicesSegmentsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesSegments) WithOpaqueID(s string) func(*IndicesSegmentsRequest) { + return func(r *IndicesSegmentsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.shard_stores.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.shard_stores.go new file mode 100644 index 000000000..a44e92cd2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.shard_stores.go @@ -0,0 +1,282 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newIndicesShardStoresFunc(t Transport) IndicesShardStores { + return func(o ...func(*IndicesShardStoresRequest)) (*Response, error) { + var r = IndicesShardStoresRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesShardStores provides store information for shard copies of indices. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html. +type IndicesShardStores func(o ...func(*IndicesShardStoresRequest)) (*Response, error) + +// IndicesShardStoresRequest configures the Indices Shard Stores API request. +type IndicesShardStoresRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + Status []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesShardStoresRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.shard_stores") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_shard_stores")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_shard_stores") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if len(r.Status) > 0 { + params["status"] = strings.Join(r.Status, ",") + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.shard_stores") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.shard_stores") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesShardStores) WithContext(v context.Context) func(*IndicesShardStoresRequest) { + return func(r *IndicesShardStoresRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names; use _all to perform the operation on all indices. +func (f IndicesShardStores) WithIndex(v ...string) func(*IndicesShardStoresRequest) { + return func(r *IndicesShardStoresRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesShardStores) WithAllowNoIndices(v bool) func(*IndicesShardStoresRequest) { + return func(r *IndicesShardStoresRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesShardStores) WithExpandWildcards(v string) func(*IndicesShardStoresRequest) { + return func(r *IndicesShardStoresRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesShardStores) WithIgnoreUnavailable(v bool) func(*IndicesShardStoresRequest) { + return func(r *IndicesShardStoresRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithStatus - a list of statuses used to filter on shards to get store information for. +func (f IndicesShardStores) WithStatus(v ...string) func(*IndicesShardStoresRequest) { + return func(r *IndicesShardStoresRequest) { + r.Status = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesShardStores) WithPretty() func(*IndicesShardStoresRequest) { + return func(r *IndicesShardStoresRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesShardStores) WithHuman() func(*IndicesShardStoresRequest) { + return func(r *IndicesShardStoresRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesShardStores) WithErrorTrace() func(*IndicesShardStoresRequest) { + return func(r *IndicesShardStoresRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesShardStores) WithFilterPath(v ...string) func(*IndicesShardStoresRequest) { + return func(r *IndicesShardStoresRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesShardStores) WithHeader(h map[string]string) func(*IndicesShardStoresRequest) { + return func(r *IndicesShardStoresRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesShardStores) WithOpaqueID(s string) func(*IndicesShardStoresRequest) { + return func(r *IndicesShardStoresRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.shrink.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.shrink.go new file mode 100644 index 000000000..40acbfce5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.shrink.go @@ -0,0 +1,285 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newIndicesShrinkFunc(t Transport) IndicesShrink { + return func(index string, target string, o ...func(*IndicesShrinkRequest)) (*Response, error) { + var r = IndicesShrinkRequest{Index: index, Target: target} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesShrink allow to shrink an existing index into a new index with fewer primary shards. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html. +type IndicesShrink func(index string, target string, o ...func(*IndicesShrinkRequest)) (*Response, error) + +// IndicesShrinkRequest configures the Indices Shrink API request. +type IndicesShrinkRequest struct { + Index string + + Body io.Reader + + Target string + + MasterTimeout time.Duration + Timeout time.Duration + WaitForActiveShards string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesShrinkRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.shrink") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_shrink") + 1 + len(r.Target)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_shrink") + path.WriteString("/") + path.WriteString(r.Target) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "target", r.Target) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.shrink") + if reader := instrument.RecordRequestBody(ctx, "indices.shrink", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.shrink") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesShrink) WithContext(v context.Context) func(*IndicesShrinkRequest) { + return func(r *IndicesShrinkRequest) { + r.ctx = v + } +} + +// WithBody - The configuration for the target index (`settings` and `aliases`). +func (f IndicesShrink) WithBody(v io.Reader) func(*IndicesShrinkRequest) { + return func(r *IndicesShrinkRequest) { + r.Body = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesShrink) WithMasterTimeout(v time.Duration) func(*IndicesShrinkRequest) { + return func(r *IndicesShrinkRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IndicesShrink) WithTimeout(v time.Duration) func(*IndicesShrinkRequest) { + return func(r *IndicesShrinkRequest) { + r.Timeout = v + } +} + +// WithWaitForActiveShards - set the number of active shards to wait for on the shrunken index before the operation returns.. +func (f IndicesShrink) WithWaitForActiveShards(v string) func(*IndicesShrinkRequest) { + return func(r *IndicesShrinkRequest) { + r.WaitForActiveShards = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesShrink) WithPretty() func(*IndicesShrinkRequest) { + return func(r *IndicesShrinkRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesShrink) WithHuman() func(*IndicesShrinkRequest) { + return func(r *IndicesShrinkRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesShrink) WithErrorTrace() func(*IndicesShrinkRequest) { + return func(r *IndicesShrinkRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesShrink) WithFilterPath(v ...string) func(*IndicesShrinkRequest) { + return func(r *IndicesShrinkRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesShrink) WithHeader(h map[string]string) func(*IndicesShrinkRequest) { + return func(r *IndicesShrinkRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesShrink) WithOpaqueID(s string) func(*IndicesShrinkRequest) { + return func(r *IndicesShrinkRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.simulate_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.simulate_index_template.go new file mode 100644 index 000000000..139d6fc5d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.simulate_index_template.go @@ -0,0 +1,293 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesSimulateIndexTemplateFunc(t Transport) IndicesSimulateIndexTemplate { + return func(name string, o ...func(*IndicesSimulateIndexTemplateRequest)) (*Response, error) { + var r = IndicesSimulateIndexTemplateRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesSimulateIndexTemplate simulate matching the given index name against the index templates in the system +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-index.html. +type IndicesSimulateIndexTemplate func(name string, o ...func(*IndicesSimulateIndexTemplateRequest)) (*Response, error) + +// IndicesSimulateIndexTemplateRequest configures the Indices Simulate Index Template API request. +type IndicesSimulateIndexTemplateRequest struct { + Body io.Reader + + Name string + + Cause string + Create *bool + IncludeDefaults *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesSimulateIndexTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.simulate_index_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_index_template") + 1 + len("_simulate_index") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_index_template") + path.WriteString("/") + path.WriteString("_simulate_index") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Cause != "" { + params["cause"] = r.Cause + } + + if r.Create != nil { + params["create"] = strconv.FormatBool(*r.Create) + } + + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.simulate_index_template") + if reader := instrument.RecordRequestBody(ctx, "indices.simulate_index_template", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.simulate_index_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesSimulateIndexTemplate) WithContext(v context.Context) func(*IndicesSimulateIndexTemplateRequest) { + return func(r *IndicesSimulateIndexTemplateRequest) { + r.ctx = v + } +} + +// WithBody - New index template definition, which will be included in the simulation, as if it already exists in the system. +func (f IndicesSimulateIndexTemplate) WithBody(v io.Reader) func(*IndicesSimulateIndexTemplateRequest) { + return func(r *IndicesSimulateIndexTemplateRequest) { + r.Body = v + } +} + +// WithCause - user defined reason for dry-run creating the new template for simulation purposes. +func (f IndicesSimulateIndexTemplate) WithCause(v string) func(*IndicesSimulateIndexTemplateRequest) { + return func(r *IndicesSimulateIndexTemplateRequest) { + r.Cause = v + } +} + +// WithCreate - whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one. +func (f IndicesSimulateIndexTemplate) WithCreate(v bool) func(*IndicesSimulateIndexTemplateRequest) { + return func(r *IndicesSimulateIndexTemplateRequest) { + r.Create = &v + } +} + +// WithIncludeDefaults - return all relevant default configurations for this index template simulation (default: false). +func (f IndicesSimulateIndexTemplate) WithIncludeDefaults(v bool) func(*IndicesSimulateIndexTemplateRequest) { + return func(r *IndicesSimulateIndexTemplateRequest) { + r.IncludeDefaults = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesSimulateIndexTemplate) WithMasterTimeout(v time.Duration) func(*IndicesSimulateIndexTemplateRequest) { + return func(r *IndicesSimulateIndexTemplateRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesSimulateIndexTemplate) WithPretty() func(*IndicesSimulateIndexTemplateRequest) { + return func(r *IndicesSimulateIndexTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesSimulateIndexTemplate) WithHuman() func(*IndicesSimulateIndexTemplateRequest) { + return func(r *IndicesSimulateIndexTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesSimulateIndexTemplate) WithErrorTrace() func(*IndicesSimulateIndexTemplateRequest) { + return func(r *IndicesSimulateIndexTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesSimulateIndexTemplate) WithFilterPath(v ...string) func(*IndicesSimulateIndexTemplateRequest) { + return func(r *IndicesSimulateIndexTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesSimulateIndexTemplate) WithHeader(h map[string]string) func(*IndicesSimulateIndexTemplateRequest) { + return func(r *IndicesSimulateIndexTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesSimulateIndexTemplate) WithOpaqueID(s string) func(*IndicesSimulateIndexTemplateRequest) { + return func(r *IndicesSimulateIndexTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.simulate_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.simulate_template.go new file mode 100644 index 000000000..3c48d17ea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.simulate_template.go @@ -0,0 +1,302 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesSimulateTemplateFunc(t Transport) IndicesSimulateTemplate { + return func(o ...func(*IndicesSimulateTemplateRequest)) (*Response, error) { + var r = IndicesSimulateTemplateRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesSimulateTemplate simulate resolving the given template name or body +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-template.html. +type IndicesSimulateTemplate func(o ...func(*IndicesSimulateTemplateRequest)) (*Response, error) + +// IndicesSimulateTemplateRequest configures the Indices Simulate Template API request. +type IndicesSimulateTemplateRequest struct { + Body io.Reader + + Name string + + Cause string + Create *bool + IncludeDefaults *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesSimulateTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.simulate_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_index_template") + 1 + len("_simulate") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_index_template") + path.WriteString("/") + path.WriteString("_simulate") + if r.Name != "" { + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + } + + params = make(map[string]string) + + if r.Cause != "" { + params["cause"] = r.Cause + } + + if r.Create != nil { + params["create"] = strconv.FormatBool(*r.Create) + } + + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.simulate_template") + if reader := instrument.RecordRequestBody(ctx, "indices.simulate_template", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.simulate_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesSimulateTemplate) WithContext(v context.Context) func(*IndicesSimulateTemplateRequest) { + return func(r *IndicesSimulateTemplateRequest) { + r.ctx = v + } +} + +// WithBody - New index template definition to be simulated, if no index template name is specified. +func (f IndicesSimulateTemplate) WithBody(v io.Reader) func(*IndicesSimulateTemplateRequest) { + return func(r *IndicesSimulateTemplateRequest) { + r.Body = v + } +} + +// WithName - the name of the index template. +func (f IndicesSimulateTemplate) WithName(v string) func(*IndicesSimulateTemplateRequest) { + return func(r *IndicesSimulateTemplateRequest) { + r.Name = v + } +} + +// WithCause - user defined reason for dry-run creating the new template for simulation purposes. +func (f IndicesSimulateTemplate) WithCause(v string) func(*IndicesSimulateTemplateRequest) { + return func(r *IndicesSimulateTemplateRequest) { + r.Cause = v + } +} + +// WithCreate - whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one. +func (f IndicesSimulateTemplate) WithCreate(v bool) func(*IndicesSimulateTemplateRequest) { + return func(r *IndicesSimulateTemplateRequest) { + r.Create = &v + } +} + +// WithIncludeDefaults - return all relevant default configurations for this template simulation (default: false). +func (f IndicesSimulateTemplate) WithIncludeDefaults(v bool) func(*IndicesSimulateTemplateRequest) { + return func(r *IndicesSimulateTemplateRequest) { + r.IncludeDefaults = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesSimulateTemplate) WithMasterTimeout(v time.Duration) func(*IndicesSimulateTemplateRequest) { + return func(r *IndicesSimulateTemplateRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesSimulateTemplate) WithPretty() func(*IndicesSimulateTemplateRequest) { + return func(r *IndicesSimulateTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesSimulateTemplate) WithHuman() func(*IndicesSimulateTemplateRequest) { + return func(r *IndicesSimulateTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesSimulateTemplate) WithErrorTrace() func(*IndicesSimulateTemplateRequest) { + return func(r *IndicesSimulateTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesSimulateTemplate) WithFilterPath(v ...string) func(*IndicesSimulateTemplateRequest) { + return func(r *IndicesSimulateTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesSimulateTemplate) WithHeader(h map[string]string) func(*IndicesSimulateTemplateRequest) { + return func(r *IndicesSimulateTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesSimulateTemplate) WithOpaqueID(s string) func(*IndicesSimulateTemplateRequest) { + return func(r *IndicesSimulateTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.split.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.split.go new file mode 100644 index 000000000..64b71b9b7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.split.go @@ -0,0 +1,285 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newIndicesSplitFunc(t Transport) IndicesSplit { + return func(index string, target string, o ...func(*IndicesSplitRequest)) (*Response, error) { + var r = IndicesSplitRequest{Index: index, Target: target} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesSplit allows you to split an existing index into a new index with more primary shards. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html. +type IndicesSplit func(index string, target string, o ...func(*IndicesSplitRequest)) (*Response, error) + +// IndicesSplitRequest configures the Indices Split API request. +type IndicesSplitRequest struct { + Index string + + Body io.Reader + + Target string + + MasterTimeout time.Duration + Timeout time.Duration + WaitForActiveShards string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesSplitRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.split") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_split") + 1 + len(r.Target)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_split") + path.WriteString("/") + path.WriteString(r.Target) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "target", r.Target) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.split") + if reader := instrument.RecordRequestBody(ctx, "indices.split", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.split") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesSplit) WithContext(v context.Context) func(*IndicesSplitRequest) { + return func(r *IndicesSplitRequest) { + r.ctx = v + } +} + +// WithBody - The configuration for the target index (`settings` and `aliases`). +func (f IndicesSplit) WithBody(v io.Reader) func(*IndicesSplitRequest) { + return func(r *IndicesSplitRequest) { + r.Body = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesSplit) WithMasterTimeout(v time.Duration) func(*IndicesSplitRequest) { + return func(r *IndicesSplitRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IndicesSplit) WithTimeout(v time.Duration) func(*IndicesSplitRequest) { + return func(r *IndicesSplitRequest) { + r.Timeout = v + } +} + +// WithWaitForActiveShards - set the number of active shards to wait for on the shrunken index before the operation returns.. +func (f IndicesSplit) WithWaitForActiveShards(v string) func(*IndicesSplitRequest) { + return func(r *IndicesSplitRequest) { + r.WaitForActiveShards = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesSplit) WithPretty() func(*IndicesSplitRequest) { + return func(r *IndicesSplitRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesSplit) WithHuman() func(*IndicesSplitRequest) { + return func(r *IndicesSplitRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesSplit) WithErrorTrace() func(*IndicesSplitRequest) { + return func(r *IndicesSplitRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesSplit) WithFilterPath(v ...string) func(*IndicesSplitRequest) { + return func(r *IndicesSplitRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesSplit) WithHeader(h map[string]string) func(*IndicesSplitRequest) { + return func(r *IndicesSplitRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesSplit) WithOpaqueID(s string) func(*IndicesSplitRequest) { + return func(r *IndicesSplitRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.stats.go new file mode 100644 index 000000000..89fb37336 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.stats.go @@ -0,0 +1,358 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newIndicesStatsFunc(t Transport) IndicesStats { + return func(o ...func(*IndicesStatsRequest)) (*Response, error) { + var r = IndicesStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesStats provides statistics on operations happening in an index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html. +type IndicesStats func(o ...func(*IndicesStatsRequest)) (*Response, error) + +// IndicesStatsRequest configures the Indices Stats API request. +type IndicesStatsRequest struct { + Index []string + + Metric []string + + CompletionFields []string + ExpandWildcards string + FielddataFields []string + Fields []string + ForbidClosedIndices *bool + Groups []string + IncludeSegmentFileSizes *bool + IncludeUnloadedSegments *bool + Level string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_stats") + 1 + len(strings.Join(r.Metric, ","))) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_stats") + if len(r.Metric) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Metric, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", strings.Join(r.Metric, ",")) + } + } + + params = make(map[string]string) + + if len(r.CompletionFields) > 0 { + params["completion_fields"] = strings.Join(r.CompletionFields, ",") + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if len(r.FielddataFields) > 0 { + params["fielddata_fields"] = strings.Join(r.FielddataFields, ",") + } + + if len(r.Fields) > 0 { + params["fields"] = strings.Join(r.Fields, ",") + } + + if r.ForbidClosedIndices != nil { + params["forbid_closed_indices"] = strconv.FormatBool(*r.ForbidClosedIndices) + } + + if len(r.Groups) > 0 { + params["groups"] = strings.Join(r.Groups, ",") + } + + if r.IncludeSegmentFileSizes != nil { + params["include_segment_file_sizes"] = strconv.FormatBool(*r.IncludeSegmentFileSizes) + } + + if r.IncludeUnloadedSegments != nil { + params["include_unloaded_segments"] = strconv.FormatBool(*r.IncludeUnloadedSegments) + } + + if r.Level != "" { + params["level"] = r.Level + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesStats) WithContext(v context.Context) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names; use _all to perform the operation on all indices. +func (f IndicesStats) WithIndex(v ...string) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.Index = v + } +} + +// WithMetric - limit the information returned the specific metrics.. +func (f IndicesStats) WithMetric(v ...string) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.Metric = v + } +} + +// WithCompletionFields - a list of fields for the `completion` index metric (supports wildcards). +func (f IndicesStats) WithCompletionFields(v ...string) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.CompletionFields = v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesStats) WithExpandWildcards(v string) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.ExpandWildcards = v + } +} + +// WithFielddataFields - a list of fields for the `fielddata` index metric (supports wildcards). +func (f IndicesStats) WithFielddataFields(v ...string) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.FielddataFields = v + } +} + +// WithFields - a list of fields for `fielddata` and `completion` index metric (supports wildcards). +func (f IndicesStats) WithFields(v ...string) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.Fields = v + } +} + +// WithForbidClosedIndices - if set to false stats will also collected from closed indices if explicitly specified or if expand_wildcards expands to closed indices. +func (f IndicesStats) WithForbidClosedIndices(v bool) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.ForbidClosedIndices = &v + } +} + +// WithGroups - a list of search groups for `search` index metric. +func (f IndicesStats) WithGroups(v ...string) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.Groups = v + } +} + +// WithIncludeSegmentFileSizes - whether to report the aggregated disk usage of each one of the lucene index files (only applies if segment stats are requested). +func (f IndicesStats) WithIncludeSegmentFileSizes(v bool) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.IncludeSegmentFileSizes = &v + } +} + +// WithIncludeUnloadedSegments - if set to true segment stats will include stats for segments that are not currently loaded into memory. +func (f IndicesStats) WithIncludeUnloadedSegments(v bool) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.IncludeUnloadedSegments = &v + } +} + +// WithLevel - return stats aggregated at cluster, index or shard level. +func (f IndicesStats) WithLevel(v string) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.Level = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesStats) WithPretty() func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesStats) WithHuman() func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesStats) WithErrorTrace() func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesStats) WithFilterPath(v ...string) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesStats) WithHeader(h map[string]string) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesStats) WithOpaqueID(s string) func(*IndicesStatsRequest) { + return func(r *IndicesStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.update_aliases.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.update_aliases.go new file mode 100644 index 000000000..61cb93538 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.update_aliases.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newIndicesUpdateAliasesFunc(t Transport) IndicesUpdateAliases { + return func(body io.Reader, o ...func(*IndicesUpdateAliasesRequest)) (*Response, error) { + var r = IndicesUpdateAliasesRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesUpdateAliases updates index aliases. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html. +type IndicesUpdateAliases func(body io.Reader, o ...func(*IndicesUpdateAliasesRequest)) (*Response, error) + +// IndicesUpdateAliasesRequest configures the Indices Update Aliases API request. +type IndicesUpdateAliasesRequest struct { + Body io.Reader + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesUpdateAliasesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.update_aliases") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_aliases")) + path.WriteString("http://") + path.WriteString("/_aliases") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.update_aliases") + if reader := instrument.RecordRequestBody(ctx, "indices.update_aliases", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.update_aliases") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesUpdateAliases) WithContext(v context.Context) func(*IndicesUpdateAliasesRequest) { + return func(r *IndicesUpdateAliasesRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesUpdateAliases) WithMasterTimeout(v time.Duration) func(*IndicesUpdateAliasesRequest) { + return func(r *IndicesUpdateAliasesRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - request timeout. +func (f IndicesUpdateAliases) WithTimeout(v time.Duration) func(*IndicesUpdateAliasesRequest) { + return func(r *IndicesUpdateAliasesRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesUpdateAliases) WithPretty() func(*IndicesUpdateAliasesRequest) { + return func(r *IndicesUpdateAliasesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesUpdateAliases) WithHuman() func(*IndicesUpdateAliasesRequest) { + return func(r *IndicesUpdateAliasesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesUpdateAliases) WithErrorTrace() func(*IndicesUpdateAliasesRequest) { + return func(r *IndicesUpdateAliasesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesUpdateAliases) WithFilterPath(v ...string) func(*IndicesUpdateAliasesRequest) { + return func(r *IndicesUpdateAliasesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesUpdateAliases) WithHeader(h map[string]string) func(*IndicesUpdateAliasesRequest) { + return func(r *IndicesUpdateAliasesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesUpdateAliases) WithOpaqueID(s string) func(*IndicesUpdateAliasesRequest) { + return func(r *IndicesUpdateAliasesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.validate_query.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.validate_query.go new file mode 100644 index 000000000..82f163cd8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.indices.validate_query.go @@ -0,0 +1,397 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newIndicesValidateQueryFunc(t Transport) IndicesValidateQuery { + return func(o ...func(*IndicesValidateQueryRequest)) (*Response, error) { + var r = IndicesValidateQueryRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesValidateQuery allows a user to validate a potentially expensive query without executing it. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/search-validate.html. +type IndicesValidateQuery func(o ...func(*IndicesValidateQueryRequest)) (*Response, error) + +// IndicesValidateQueryRequest configures the Indices Validate Query API request. +type IndicesValidateQueryRequest struct { + Index []string + + Body io.Reader + + AllowNoIndices *bool + AllShards *bool + Analyzer string + AnalyzeWildcard *bool + DefaultOperator string + Df string + ExpandWildcards string + Explain *bool + IgnoreUnavailable *bool + Lenient *bool + Query string + Rewrite *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesValidateQueryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.validate_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_validate") + 1 + len("query")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_validate") + path.WriteString("/") + path.WriteString("query") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.AllShards != nil { + params["all_shards"] = strconv.FormatBool(*r.AllShards) + } + + if r.Analyzer != "" { + params["analyzer"] = r.Analyzer + } + + if r.AnalyzeWildcard != nil { + params["analyze_wildcard"] = strconv.FormatBool(*r.AnalyzeWildcard) + } + + if r.DefaultOperator != "" { + params["default_operator"] = r.DefaultOperator + } + + if r.Df != "" { + params["df"] = r.Df + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.Explain != nil { + params["explain"] = strconv.FormatBool(*r.Explain) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Lenient != nil { + params["lenient"] = strconv.FormatBool(*r.Lenient) + } + + if r.Query != "" { + params["q"] = r.Query + } + + if r.Rewrite != nil { + params["rewrite"] = strconv.FormatBool(*r.Rewrite) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.validate_query") + if reader := instrument.RecordRequestBody(ctx, "indices.validate_query", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.validate_query") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesValidateQuery) WithContext(v context.Context) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.ctx = v + } +} + +// WithBody - The query definition specified with the Query DSL. +func (f IndicesValidateQuery) WithBody(v io.Reader) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.Body = v + } +} + +// WithIndex - a list of index names to restrict the operation; use _all to perform the operation on all indices. +func (f IndicesValidateQuery) WithIndex(v ...string) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesValidateQuery) WithAllowNoIndices(v bool) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.AllowNoIndices = &v + } +} + +// WithAllShards - execute validation on all shards instead of one random shard per index. +func (f IndicesValidateQuery) WithAllShards(v bool) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.AllShards = &v + } +} + +// WithAnalyzer - the analyzer to use for the query string. +func (f IndicesValidateQuery) WithAnalyzer(v string) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.Analyzer = v + } +} + +// WithAnalyzeWildcard - specify whether wildcard and prefix queries should be analyzed (default: false). +func (f IndicesValidateQuery) WithAnalyzeWildcard(v bool) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.AnalyzeWildcard = &v + } +} + +// WithDefaultOperator - the default operator for query string query (and or or). +func (f IndicesValidateQuery) WithDefaultOperator(v string) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.DefaultOperator = v + } +} + +// WithDf - the field to use as default where no field prefix is given in the query string. +func (f IndicesValidateQuery) WithDf(v string) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.Df = v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesValidateQuery) WithExpandWildcards(v string) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.ExpandWildcards = v + } +} + +// WithExplain - return detailed information about the error. +func (f IndicesValidateQuery) WithExplain(v bool) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.Explain = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesValidateQuery) WithIgnoreUnavailable(v bool) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithLenient - specify whether format-based query failures (such as providing text to a numeric field) should be ignored. +func (f IndicesValidateQuery) WithLenient(v bool) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.Lenient = &v + } +} + +// WithQuery - query in the lucene query string syntax. +func (f IndicesValidateQuery) WithQuery(v string) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.Query = v + } +} + +// WithRewrite - provide a more detailed explanation showing the actual lucene query that will be executed.. +func (f IndicesValidateQuery) WithRewrite(v bool) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.Rewrite = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesValidateQuery) WithPretty() func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesValidateQuery) WithHuman() func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesValidateQuery) WithErrorTrace() func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesValidateQuery) WithFilterPath(v ...string) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesValidateQuery) WithHeader(h map[string]string) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesValidateQuery) WithOpaqueID(s string) func(*IndicesValidateQueryRequest) { + return func(r *IndicesValidateQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.chat_completion_unified.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.chat_completion_unified.go new file mode 100644 index 000000000..f7c25a3b8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.chat_completion_unified.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceChatCompletionUnifiedFunc(t Transport) InferenceChatCompletionUnified { + return func(inference_id string, o ...func(*InferenceChatCompletionUnifiedRequest)) (*Response, error) { + var r = InferenceChatCompletionUnifiedRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceChatCompletionUnified perform chat completion inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/chat-completion-inference.html. +type InferenceChatCompletionUnified func(inference_id string, o ...func(*InferenceChatCompletionUnifiedRequest)) (*Response, error) + +// InferenceChatCompletionUnifiedRequest configures the Inference Chat Completion Unified API request. +type InferenceChatCompletionUnifiedRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceChatCompletionUnifiedRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.chat_completion_unified") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("chat_completion") + 1 + len(r.InferenceID) + 1 + len("_stream")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("chat_completion") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + path.WriteString("/") + path.WriteString("_stream") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.chat_completion_unified") + if reader := instrument.RecordRequestBody(ctx, "inference.chat_completion_unified", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.chat_completion_unified") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceChatCompletionUnified) WithContext(v context.Context) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceChatCompletionUnified) WithBody(v io.Reader) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceChatCompletionUnified) WithPretty() func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceChatCompletionUnified) WithHuman() func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceChatCompletionUnified) WithErrorTrace() func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceChatCompletionUnified) WithFilterPath(v ...string) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceChatCompletionUnified) WithHeader(h map[string]string) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceChatCompletionUnified) WithOpaqueID(s string) func(*InferenceChatCompletionUnifiedRequest) { + return func(r *InferenceChatCompletionUnifiedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.completion.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.completion.go new file mode 100644 index 000000000..86f289ee3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.completion.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceCompletionFunc(t Transport) InferenceCompletion { + return func(inference_id string, o ...func(*InferenceCompletionRequest)) (*Response, error) { + var r = InferenceCompletionRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceCompletion perform completion inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html. +type InferenceCompletion func(inference_id string, o ...func(*InferenceCompletionRequest)) (*Response, error) + +// InferenceCompletionRequest configures the Inference Completion API request. +type InferenceCompletionRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceCompletionRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.completion") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("completion") + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("completion") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.completion") + if reader := instrument.RecordRequestBody(ctx, "inference.completion", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.completion") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceCompletion) WithContext(v context.Context) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceCompletion) WithBody(v io.Reader) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceCompletion) WithPretty() func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceCompletion) WithHuman() func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceCompletion) WithErrorTrace() func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceCompletion) WithFilterPath(v ...string) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceCompletion) WithHeader(h map[string]string) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceCompletion) WithOpaqueID(s string) func(*InferenceCompletionRequest) { + return func(r *InferenceCompletionRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.delete.go new file mode 100644 index 000000000..a1b92adc4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.delete.go @@ -0,0 +1,264 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newInferenceDeleteFunc(t Transport) InferenceDelete { + return func(inference_id string, o ...func(*InferenceDeleteRequest)) (*Response, error) { + var r = InferenceDeleteRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceDelete delete an inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html. +type InferenceDelete func(inference_id string, o ...func(*InferenceDeleteRequest)) (*Response, error) + +// InferenceDeleteRequest configures the Inference Delete API request. +type InferenceDeleteRequest struct { + InferenceID string + TaskType string + + DryRun *bool + Force *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceDeleteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + if r.TaskType != "" { + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + } + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.DryRun != nil { + params["dry_run"] = strconv.FormatBool(*r.DryRun) + } + + if r.Force != nil { + params["force"] = strconv.FormatBool(*r.Force) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.delete") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.delete") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceDelete) WithContext(v context.Context) func(*InferenceDeleteRequest) { + return func(r *InferenceDeleteRequest) { + r.ctx = v + } +} + +// WithTaskType - the task type. +func (f InferenceDelete) WithTaskType(v string) func(*InferenceDeleteRequest) { + return func(r *InferenceDeleteRequest) { + r.TaskType = v + } +} + +// WithDryRun - if true the endpoint will not be deleted and a list of ingest processors which reference this endpoint will be returned.. +func (f InferenceDelete) WithDryRun(v bool) func(*InferenceDeleteRequest) { + return func(r *InferenceDeleteRequest) { + r.DryRun = &v + } +} + +// WithForce - if true the endpoint will be forcefully stopped (regardless of whether or not it is referenced by any ingest processors or semantic text fields).. +func (f InferenceDelete) WithForce(v bool) func(*InferenceDeleteRequest) { + return func(r *InferenceDeleteRequest) { + r.Force = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceDelete) WithPretty() func(*InferenceDeleteRequest) { + return func(r *InferenceDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceDelete) WithHuman() func(*InferenceDeleteRequest) { + return func(r *InferenceDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceDelete) WithErrorTrace() func(*InferenceDeleteRequest) { + return func(r *InferenceDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceDelete) WithFilterPath(v ...string) func(*InferenceDeleteRequest) { + return func(r *InferenceDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceDelete) WithHeader(h map[string]string) func(*InferenceDeleteRequest) { + return func(r *InferenceDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceDelete) WithOpaqueID(s string) func(*InferenceDeleteRequest) { + return func(r *InferenceDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.get.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.get.go new file mode 100644 index 000000000..4750bf64b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.get.go @@ -0,0 +1,247 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newInferenceGetFunc(t Transport) InferenceGet { + return func(o ...func(*InferenceGetRequest)) (*Response, error) { + var r = InferenceGetRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceGet get an inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-inference-api.html. +type InferenceGet func(o ...func(*InferenceGetRequest)) (*Response, error) + +// InferenceGetRequest configures the Inference Get API request. +type InferenceGetRequest struct { + InferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceGetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + if r.TaskType != "" { + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + } + if r.InferenceID != "" { + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.get") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.get") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceGet) WithContext(v context.Context) func(*InferenceGetRequest) { + return func(r *InferenceGetRequest) { + r.ctx = v + } +} + +// WithInferenceID - the inference ID. +func (f InferenceGet) WithInferenceID(v string) func(*InferenceGetRequest) { + return func(r *InferenceGetRequest) { + r.InferenceID = v + } +} + +// WithTaskType - the task type. +func (f InferenceGet) WithTaskType(v string) func(*InferenceGetRequest) { + return func(r *InferenceGetRequest) { + r.TaskType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceGet) WithPretty() func(*InferenceGetRequest) { + return func(r *InferenceGetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceGet) WithHuman() func(*InferenceGetRequest) { + return func(r *InferenceGetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceGet) WithErrorTrace() func(*InferenceGetRequest) { + return func(r *InferenceGetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceGet) WithFilterPath(v ...string) func(*InferenceGetRequest) { + return func(r *InferenceGetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceGet) WithHeader(h map[string]string) func(*InferenceGetRequest) { + return func(r *InferenceGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceGet) WithOpaqueID(s string) func(*InferenceGetRequest) { + return func(r *InferenceGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.inference.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.inference.go new file mode 100644 index 000000000..ad9205669 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.inference.go @@ -0,0 +1,255 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceInferenceFunc(t Transport) InferenceInference { + return func(inference_id string, o ...func(*InferenceInferenceRequest)) (*Response, error) { + var r = InferenceInferenceRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceInference perform inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html. +type InferenceInference func(inference_id string, o ...func(*InferenceInferenceRequest)) (*Response, error) + +// InferenceInferenceRequest configures the Inference Inference API request. +type InferenceInferenceRequest struct { + Body io.Reader + + InferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceInferenceRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.inference") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + if r.TaskType != "" { + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + } + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.inference") + if reader := instrument.RecordRequestBody(ctx, "inference.inference", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.inference") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceInference) WithContext(v context.Context) func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceInference) WithBody(v io.Reader) func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + r.Body = v + } +} + +// WithTaskType - the task type. +func (f InferenceInference) WithTaskType(v string) func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + r.TaskType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceInference) WithPretty() func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceInference) WithHuman() func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceInference) WithErrorTrace() func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceInference) WithFilterPath(v ...string) func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceInference) WithHeader(h map[string]string) func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceInference) WithOpaqueID(s string) func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put.go new file mode 100644 index 000000000..8146f46d0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put.go @@ -0,0 +1,255 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutFunc(t Transport) InferencePut { + return func(inference_id string, o ...func(*InferencePutRequest)) (*Response, error) { + var r = InferencePutRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePut configure an inference endpoint for use in the Inference API +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html. +type InferencePut func(inference_id string, o ...func(*InferencePutRequest)) (*Response, error) + +// InferencePutRequest configures the Inference Put API request. +type InferencePutRequest struct { + Body io.Reader + + InferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + if r.TaskType != "" { + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + } + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put") + if reader := instrument.RecordRequestBody(ctx, "inference.put", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePut) WithContext(v context.Context) func(*InferencePutRequest) { + return func(r *InferencePutRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePut) WithBody(v io.Reader) func(*InferencePutRequest) { + return func(r *InferencePutRequest) { + r.Body = v + } +} + +// WithTaskType - the task type. +func (f InferencePut) WithTaskType(v string) func(*InferencePutRequest) { + return func(r *InferencePutRequest) { + r.TaskType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePut) WithPretty() func(*InferencePutRequest) { + return func(r *InferencePutRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePut) WithHuman() func(*InferencePutRequest) { + return func(r *InferencePutRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePut) WithErrorTrace() func(*InferencePutRequest) { + return func(r *InferencePutRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePut) WithFilterPath(v ...string) func(*InferencePutRequest) { + return func(r *InferencePutRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePut) WithHeader(h map[string]string) func(*InferencePutRequest) { + return func(r *InferencePutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePut) WithOpaqueID(s string) func(*InferencePutRequest) { + return func(r *InferencePutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_alibabacloud.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_alibabacloud.go new file mode 100644 index 000000000..79daa0b4d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_alibabacloud.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutAlibabacloudFunc(t Transport) InferencePutAlibabacloud { + return func(alibabacloud_inference_id string, task_type string, o ...func(*InferencePutAlibabacloudRequest)) (*Response, error) { + var r = InferencePutAlibabacloudRequest{AlibabacloudInferenceID: alibabacloud_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutAlibabacloud configure an AlibabaCloud AI Search inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-alibabacloud-ai-search.html. +type InferencePutAlibabacloud func(alibabacloud_inference_id string, task_type string, o ...func(*InferencePutAlibabacloudRequest)) (*Response, error) + +// InferencePutAlibabacloudRequest configures the Inference Put Alibabacloud API request. +type InferencePutAlibabacloudRequest struct { + Body io.Reader + + AlibabacloudInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutAlibabacloudRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_alibabacloud") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.AlibabacloudInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.AlibabacloudInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "alibabacloud_inference_id", r.AlibabacloudInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_alibabacloud") + if reader := instrument.RecordRequestBody(ctx, "inference.put_alibabacloud", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_alibabacloud") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutAlibabacloud) WithContext(v context.Context) func(*InferencePutAlibabacloudRequest) { + return func(r *InferencePutAlibabacloudRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutAlibabacloud) WithBody(v io.Reader) func(*InferencePutAlibabacloudRequest) { + return func(r *InferencePutAlibabacloudRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutAlibabacloud) WithPretty() func(*InferencePutAlibabacloudRequest) { + return func(r *InferencePutAlibabacloudRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutAlibabacloud) WithHuman() func(*InferencePutAlibabacloudRequest) { + return func(r *InferencePutAlibabacloudRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutAlibabacloud) WithErrorTrace() func(*InferencePutAlibabacloudRequest) { + return func(r *InferencePutAlibabacloudRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutAlibabacloud) WithFilterPath(v ...string) func(*InferencePutAlibabacloudRequest) { + return func(r *InferencePutAlibabacloudRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutAlibabacloud) WithHeader(h map[string]string) func(*InferencePutAlibabacloudRequest) { + return func(r *InferencePutAlibabacloudRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutAlibabacloud) WithOpaqueID(s string) func(*InferencePutAlibabacloudRequest) { + return func(r *InferencePutAlibabacloudRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_amazonbedrock.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_amazonbedrock.go new file mode 100644 index 000000000..5fc782259 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_amazonbedrock.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutAmazonbedrockFunc(t Transport) InferencePutAmazonbedrock { + return func(amazonbedrock_inference_id string, task_type string, o ...func(*InferencePutAmazonbedrockRequest)) (*Response, error) { + var r = InferencePutAmazonbedrockRequest{AmazonbedrockInferenceID: amazonbedrock_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutAmazonbedrock configure an Amazon Bedrock inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-amazon-bedrock.html. +type InferencePutAmazonbedrock func(amazonbedrock_inference_id string, task_type string, o ...func(*InferencePutAmazonbedrockRequest)) (*Response, error) + +// InferencePutAmazonbedrockRequest configures the Inference Put Amazonbedrock API request. +type InferencePutAmazonbedrockRequest struct { + Body io.Reader + + AmazonbedrockInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutAmazonbedrockRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_amazonbedrock") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.AmazonbedrockInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.AmazonbedrockInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "amazonbedrock_inference_id", r.AmazonbedrockInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_amazonbedrock") + if reader := instrument.RecordRequestBody(ctx, "inference.put_amazonbedrock", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_amazonbedrock") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutAmazonbedrock) WithContext(v context.Context) func(*InferencePutAmazonbedrockRequest) { + return func(r *InferencePutAmazonbedrockRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutAmazonbedrock) WithBody(v io.Reader) func(*InferencePutAmazonbedrockRequest) { + return func(r *InferencePutAmazonbedrockRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutAmazonbedrock) WithPretty() func(*InferencePutAmazonbedrockRequest) { + return func(r *InferencePutAmazonbedrockRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutAmazonbedrock) WithHuman() func(*InferencePutAmazonbedrockRequest) { + return func(r *InferencePutAmazonbedrockRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutAmazonbedrock) WithErrorTrace() func(*InferencePutAmazonbedrockRequest) { + return func(r *InferencePutAmazonbedrockRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutAmazonbedrock) WithFilterPath(v ...string) func(*InferencePutAmazonbedrockRequest) { + return func(r *InferencePutAmazonbedrockRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutAmazonbedrock) WithHeader(h map[string]string) func(*InferencePutAmazonbedrockRequest) { + return func(r *InferencePutAmazonbedrockRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutAmazonbedrock) WithOpaqueID(s string) func(*InferencePutAmazonbedrockRequest) { + return func(r *InferencePutAmazonbedrockRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_amazonsagemaker.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_amazonsagemaker.go new file mode 100644 index 000000000..4dc2eb09c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_amazonsagemaker.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutAmazonsagemakerFunc(t Transport) InferencePutAmazonsagemaker { + return func(amazonsagemaker_inference_id string, task_type string, o ...func(*InferencePutAmazonsagemakerRequest)) (*Response, error) { + var r = InferencePutAmazonsagemakerRequest{AmazonsagemakerInferenceID: amazonsagemaker_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutAmazonsagemaker configure a Amazon SageMaker inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-amazon-sagemaker.html. +type InferencePutAmazonsagemaker func(amazonsagemaker_inference_id string, task_type string, o ...func(*InferencePutAmazonsagemakerRequest)) (*Response, error) + +// InferencePutAmazonsagemakerRequest configures the Inference Put Amazonsagemaker API request. +type InferencePutAmazonsagemakerRequest struct { + Body io.Reader + + AmazonsagemakerInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutAmazonsagemakerRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_amazonsagemaker") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.AmazonsagemakerInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.AmazonsagemakerInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "amazonsagemaker_inference_id", r.AmazonsagemakerInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_amazonsagemaker") + if reader := instrument.RecordRequestBody(ctx, "inference.put_amazonsagemaker", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_amazonsagemaker") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutAmazonsagemaker) WithContext(v context.Context) func(*InferencePutAmazonsagemakerRequest) { + return func(r *InferencePutAmazonsagemakerRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutAmazonsagemaker) WithBody(v io.Reader) func(*InferencePutAmazonsagemakerRequest) { + return func(r *InferencePutAmazonsagemakerRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutAmazonsagemaker) WithPretty() func(*InferencePutAmazonsagemakerRequest) { + return func(r *InferencePutAmazonsagemakerRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutAmazonsagemaker) WithHuman() func(*InferencePutAmazonsagemakerRequest) { + return func(r *InferencePutAmazonsagemakerRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutAmazonsagemaker) WithErrorTrace() func(*InferencePutAmazonsagemakerRequest) { + return func(r *InferencePutAmazonsagemakerRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutAmazonsagemaker) WithFilterPath(v ...string) func(*InferencePutAmazonsagemakerRequest) { + return func(r *InferencePutAmazonsagemakerRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutAmazonsagemaker) WithHeader(h map[string]string) func(*InferencePutAmazonsagemakerRequest) { + return func(r *InferencePutAmazonsagemakerRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutAmazonsagemaker) WithOpaqueID(s string) func(*InferencePutAmazonsagemakerRequest) { + return func(r *InferencePutAmazonsagemakerRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_anthropic.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_anthropic.go new file mode 100644 index 000000000..427ae680b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_anthropic.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutAnthropicFunc(t Transport) InferencePutAnthropic { + return func(anthropic_inference_id string, task_type string, o ...func(*InferencePutAnthropicRequest)) (*Response, error) { + var r = InferencePutAnthropicRequest{AnthropicInferenceID: anthropic_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutAnthropic configure an Anthropic inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-anthropic.html. +type InferencePutAnthropic func(anthropic_inference_id string, task_type string, o ...func(*InferencePutAnthropicRequest)) (*Response, error) + +// InferencePutAnthropicRequest configures the Inference Put Anthropic API request. +type InferencePutAnthropicRequest struct { + Body io.Reader + + AnthropicInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutAnthropicRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_anthropic") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.AnthropicInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.AnthropicInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "anthropic_inference_id", r.AnthropicInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_anthropic") + if reader := instrument.RecordRequestBody(ctx, "inference.put_anthropic", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_anthropic") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutAnthropic) WithContext(v context.Context) func(*InferencePutAnthropicRequest) { + return func(r *InferencePutAnthropicRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutAnthropic) WithBody(v io.Reader) func(*InferencePutAnthropicRequest) { + return func(r *InferencePutAnthropicRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutAnthropic) WithPretty() func(*InferencePutAnthropicRequest) { + return func(r *InferencePutAnthropicRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutAnthropic) WithHuman() func(*InferencePutAnthropicRequest) { + return func(r *InferencePutAnthropicRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutAnthropic) WithErrorTrace() func(*InferencePutAnthropicRequest) { + return func(r *InferencePutAnthropicRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutAnthropic) WithFilterPath(v ...string) func(*InferencePutAnthropicRequest) { + return func(r *InferencePutAnthropicRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutAnthropic) WithHeader(h map[string]string) func(*InferencePutAnthropicRequest) { + return func(r *InferencePutAnthropicRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutAnthropic) WithOpaqueID(s string) func(*InferencePutAnthropicRequest) { + return func(r *InferencePutAnthropicRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_azureaistudio.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_azureaistudio.go new file mode 100644 index 000000000..187125862 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_azureaistudio.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutAzureaistudioFunc(t Transport) InferencePutAzureaistudio { + return func(azureaistudio_inference_id string, task_type string, o ...func(*InferencePutAzureaistudioRequest)) (*Response, error) { + var r = InferencePutAzureaistudioRequest{AzureaistudioInferenceID: azureaistudio_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutAzureaistudio configure an Azure AI Studio inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-azure-ai-studio.html. +type InferencePutAzureaistudio func(azureaistudio_inference_id string, task_type string, o ...func(*InferencePutAzureaistudioRequest)) (*Response, error) + +// InferencePutAzureaistudioRequest configures the Inference Put Azureaistudio API request. +type InferencePutAzureaistudioRequest struct { + Body io.Reader + + AzureaistudioInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutAzureaistudioRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_azureaistudio") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.AzureaistudioInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.AzureaistudioInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "azureaistudio_inference_id", r.AzureaistudioInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_azureaistudio") + if reader := instrument.RecordRequestBody(ctx, "inference.put_azureaistudio", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_azureaistudio") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutAzureaistudio) WithContext(v context.Context) func(*InferencePutAzureaistudioRequest) { + return func(r *InferencePutAzureaistudioRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutAzureaistudio) WithBody(v io.Reader) func(*InferencePutAzureaistudioRequest) { + return func(r *InferencePutAzureaistudioRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutAzureaistudio) WithPretty() func(*InferencePutAzureaistudioRequest) { + return func(r *InferencePutAzureaistudioRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutAzureaistudio) WithHuman() func(*InferencePutAzureaistudioRequest) { + return func(r *InferencePutAzureaistudioRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutAzureaistudio) WithErrorTrace() func(*InferencePutAzureaistudioRequest) { + return func(r *InferencePutAzureaistudioRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutAzureaistudio) WithFilterPath(v ...string) func(*InferencePutAzureaistudioRequest) { + return func(r *InferencePutAzureaistudioRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutAzureaistudio) WithHeader(h map[string]string) func(*InferencePutAzureaistudioRequest) { + return func(r *InferencePutAzureaistudioRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutAzureaistudio) WithOpaqueID(s string) func(*InferencePutAzureaistudioRequest) { + return func(r *InferencePutAzureaistudioRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_azureopenai.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_azureopenai.go new file mode 100644 index 000000000..7b370c124 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_azureopenai.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutAzureopenaiFunc(t Transport) InferencePutAzureopenai { + return func(azureopenai_inference_id string, task_type string, o ...func(*InferencePutAzureopenaiRequest)) (*Response, error) { + var r = InferencePutAzureopenaiRequest{AzureopenaiInferenceID: azureopenai_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutAzureopenai configure an Azure OpenAI inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-azure-openai.html. +type InferencePutAzureopenai func(azureopenai_inference_id string, task_type string, o ...func(*InferencePutAzureopenaiRequest)) (*Response, error) + +// InferencePutAzureopenaiRequest configures the Inference Put Azureopenai API request. +type InferencePutAzureopenaiRequest struct { + Body io.Reader + + AzureopenaiInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutAzureopenaiRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_azureopenai") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.AzureopenaiInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.AzureopenaiInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "azureopenai_inference_id", r.AzureopenaiInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_azureopenai") + if reader := instrument.RecordRequestBody(ctx, "inference.put_azureopenai", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_azureopenai") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutAzureopenai) WithContext(v context.Context) func(*InferencePutAzureopenaiRequest) { + return func(r *InferencePutAzureopenaiRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutAzureopenai) WithBody(v io.Reader) func(*InferencePutAzureopenaiRequest) { + return func(r *InferencePutAzureopenaiRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutAzureopenai) WithPretty() func(*InferencePutAzureopenaiRequest) { + return func(r *InferencePutAzureopenaiRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutAzureopenai) WithHuman() func(*InferencePutAzureopenaiRequest) { + return func(r *InferencePutAzureopenaiRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutAzureopenai) WithErrorTrace() func(*InferencePutAzureopenaiRequest) { + return func(r *InferencePutAzureopenaiRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutAzureopenai) WithFilterPath(v ...string) func(*InferencePutAzureopenaiRequest) { + return func(r *InferencePutAzureopenaiRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutAzureopenai) WithHeader(h map[string]string) func(*InferencePutAzureopenaiRequest) { + return func(r *InferencePutAzureopenaiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutAzureopenai) WithOpaqueID(s string) func(*InferencePutAzureopenaiRequest) { + return func(r *InferencePutAzureopenaiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_cohere.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_cohere.go new file mode 100644 index 000000000..732509ee5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_cohere.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutCohereFunc(t Transport) InferencePutCohere { + return func(cohere_inference_id string, task_type string, o ...func(*InferencePutCohereRequest)) (*Response, error) { + var r = InferencePutCohereRequest{CohereInferenceID: cohere_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutCohere configure a Cohere inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-cohere.html. +type InferencePutCohere func(cohere_inference_id string, task_type string, o ...func(*InferencePutCohereRequest)) (*Response, error) + +// InferencePutCohereRequest configures the Inference Put Cohere API request. +type InferencePutCohereRequest struct { + Body io.Reader + + CohereInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutCohereRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_cohere") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.CohereInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.CohereInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "cohere_inference_id", r.CohereInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_cohere") + if reader := instrument.RecordRequestBody(ctx, "inference.put_cohere", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_cohere") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutCohere) WithContext(v context.Context) func(*InferencePutCohereRequest) { + return func(r *InferencePutCohereRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutCohere) WithBody(v io.Reader) func(*InferencePutCohereRequest) { + return func(r *InferencePutCohereRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutCohere) WithPretty() func(*InferencePutCohereRequest) { + return func(r *InferencePutCohereRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutCohere) WithHuman() func(*InferencePutCohereRequest) { + return func(r *InferencePutCohereRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutCohere) WithErrorTrace() func(*InferencePutCohereRequest) { + return func(r *InferencePutCohereRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutCohere) WithFilterPath(v ...string) func(*InferencePutCohereRequest) { + return func(r *InferencePutCohereRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutCohere) WithHeader(h map[string]string) func(*InferencePutCohereRequest) { + return func(r *InferencePutCohereRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutCohere) WithOpaqueID(s string) func(*InferencePutCohereRequest) { + return func(r *InferencePutCohereRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_custom.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_custom.go new file mode 100644 index 000000000..46cc73357 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_custom.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutCustomFunc(t Transport) InferencePutCustom { + return func(custom_inference_id string, task_type string, o ...func(*InferencePutCustomRequest)) (*Response, error) { + var r = InferencePutCustomRequest{CustomInferenceID: custom_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutCustom configure a custom inference endpoint +// +// See full documentation at https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-custom. +type InferencePutCustom func(custom_inference_id string, task_type string, o ...func(*InferencePutCustomRequest)) (*Response, error) + +// InferencePutCustomRequest configures the Inference Put Custom API request. +type InferencePutCustomRequest struct { + Body io.Reader + + CustomInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutCustomRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_custom") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.CustomInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.CustomInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "custom_inference_id", r.CustomInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_custom") + if reader := instrument.RecordRequestBody(ctx, "inference.put_custom", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_custom") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutCustom) WithContext(v context.Context) func(*InferencePutCustomRequest) { + return func(r *InferencePutCustomRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutCustom) WithBody(v io.Reader) func(*InferencePutCustomRequest) { + return func(r *InferencePutCustomRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutCustom) WithPretty() func(*InferencePutCustomRequest) { + return func(r *InferencePutCustomRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutCustom) WithHuman() func(*InferencePutCustomRequest) { + return func(r *InferencePutCustomRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutCustom) WithErrorTrace() func(*InferencePutCustomRequest) { + return func(r *InferencePutCustomRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutCustom) WithFilterPath(v ...string) func(*InferencePutCustomRequest) { + return func(r *InferencePutCustomRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutCustom) WithHeader(h map[string]string) func(*InferencePutCustomRequest) { + return func(r *InferencePutCustomRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutCustom) WithOpaqueID(s string) func(*InferencePutCustomRequest) { + return func(r *InferencePutCustomRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_deepseek.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_deepseek.go new file mode 100644 index 000000000..dc1c45857 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_deepseek.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutDeepseekFunc(t Transport) InferencePutDeepseek { + return func(deepseek_inference_id string, task_type string, o ...func(*InferencePutDeepseekRequest)) (*Response, error) { + var r = InferencePutDeepseekRequest{DeepseekInferenceID: deepseek_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutDeepseek configure a DeepSeek inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-deepseek.html. +type InferencePutDeepseek func(deepseek_inference_id string, task_type string, o ...func(*InferencePutDeepseekRequest)) (*Response, error) + +// InferencePutDeepseekRequest configures the Inference Put Deepseek API request. +type InferencePutDeepseekRequest struct { + Body io.Reader + + DeepseekInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutDeepseekRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_deepseek") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.DeepseekInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.DeepseekInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "deepseek_inference_id", r.DeepseekInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_deepseek") + if reader := instrument.RecordRequestBody(ctx, "inference.put_deepseek", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_deepseek") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutDeepseek) WithContext(v context.Context) func(*InferencePutDeepseekRequest) { + return func(r *InferencePutDeepseekRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutDeepseek) WithBody(v io.Reader) func(*InferencePutDeepseekRequest) { + return func(r *InferencePutDeepseekRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutDeepseek) WithPretty() func(*InferencePutDeepseekRequest) { + return func(r *InferencePutDeepseekRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutDeepseek) WithHuman() func(*InferencePutDeepseekRequest) { + return func(r *InferencePutDeepseekRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutDeepseek) WithErrorTrace() func(*InferencePutDeepseekRequest) { + return func(r *InferencePutDeepseekRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutDeepseek) WithFilterPath(v ...string) func(*InferencePutDeepseekRequest) { + return func(r *InferencePutDeepseekRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutDeepseek) WithHeader(h map[string]string) func(*InferencePutDeepseekRequest) { + return func(r *InferencePutDeepseekRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutDeepseek) WithOpaqueID(s string) func(*InferencePutDeepseekRequest) { + return func(r *InferencePutDeepseekRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_elasticsearch.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_elasticsearch.go new file mode 100644 index 000000000..9acf9bb4d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_elasticsearch.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutElasticsearchFunc(t Transport) InferencePutElasticsearch { + return func(elasticsearch_inference_id string, task_type string, o ...func(*InferencePutElasticsearchRequest)) (*Response, error) { + var r = InferencePutElasticsearchRequest{ElasticsearchInferenceID: elasticsearch_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutElasticsearch configure an Elasticsearch inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-elasticsearch.html. +type InferencePutElasticsearch func(elasticsearch_inference_id string, task_type string, o ...func(*InferencePutElasticsearchRequest)) (*Response, error) + +// InferencePutElasticsearchRequest configures the Inference Put Elasticsearch API request. +type InferencePutElasticsearchRequest struct { + Body io.Reader + + ElasticsearchInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutElasticsearchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_elasticsearch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.ElasticsearchInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.ElasticsearchInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "elasticsearch_inference_id", r.ElasticsearchInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_elasticsearch") + if reader := instrument.RecordRequestBody(ctx, "inference.put_elasticsearch", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_elasticsearch") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutElasticsearch) WithContext(v context.Context) func(*InferencePutElasticsearchRequest) { + return func(r *InferencePutElasticsearchRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutElasticsearch) WithBody(v io.Reader) func(*InferencePutElasticsearchRequest) { + return func(r *InferencePutElasticsearchRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutElasticsearch) WithPretty() func(*InferencePutElasticsearchRequest) { + return func(r *InferencePutElasticsearchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutElasticsearch) WithHuman() func(*InferencePutElasticsearchRequest) { + return func(r *InferencePutElasticsearchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutElasticsearch) WithErrorTrace() func(*InferencePutElasticsearchRequest) { + return func(r *InferencePutElasticsearchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutElasticsearch) WithFilterPath(v ...string) func(*InferencePutElasticsearchRequest) { + return func(r *InferencePutElasticsearchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutElasticsearch) WithHeader(h map[string]string) func(*InferencePutElasticsearchRequest) { + return func(r *InferencePutElasticsearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutElasticsearch) WithOpaqueID(s string) func(*InferencePutElasticsearchRequest) { + return func(r *InferencePutElasticsearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_elser.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_elser.go new file mode 100644 index 000000000..6536e3a17 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_elser.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutElserFunc(t Transport) InferencePutElser { + return func(elser_inference_id string, task_type string, o ...func(*InferencePutElserRequest)) (*Response, error) { + var r = InferencePutElserRequest{ElserInferenceID: elser_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutElser configure an ELSER inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-elser.html. +type InferencePutElser func(elser_inference_id string, task_type string, o ...func(*InferencePutElserRequest)) (*Response, error) + +// InferencePutElserRequest configures the Inference Put Elser API request. +type InferencePutElserRequest struct { + Body io.Reader + + ElserInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutElserRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_elser") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.ElserInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.ElserInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "elser_inference_id", r.ElserInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_elser") + if reader := instrument.RecordRequestBody(ctx, "inference.put_elser", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_elser") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutElser) WithContext(v context.Context) func(*InferencePutElserRequest) { + return func(r *InferencePutElserRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutElser) WithBody(v io.Reader) func(*InferencePutElserRequest) { + return func(r *InferencePutElserRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutElser) WithPretty() func(*InferencePutElserRequest) { + return func(r *InferencePutElserRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutElser) WithHuman() func(*InferencePutElserRequest) { + return func(r *InferencePutElserRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutElser) WithErrorTrace() func(*InferencePutElserRequest) { + return func(r *InferencePutElserRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutElser) WithFilterPath(v ...string) func(*InferencePutElserRequest) { + return func(r *InferencePutElserRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutElser) WithHeader(h map[string]string) func(*InferencePutElserRequest) { + return func(r *InferencePutElserRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutElser) WithOpaqueID(s string) func(*InferencePutElserRequest) { + return func(r *InferencePutElserRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_googleaistudio.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_googleaistudio.go new file mode 100644 index 000000000..05c849b14 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_googleaistudio.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutGoogleaistudioFunc(t Transport) InferencePutGoogleaistudio { + return func(googleaistudio_inference_id string, task_type string, o ...func(*InferencePutGoogleaistudioRequest)) (*Response, error) { + var r = InferencePutGoogleaistudioRequest{GoogleaistudioInferenceID: googleaistudio_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutGoogleaistudio configure a Google AI Studio inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-google-ai-studio.html. +type InferencePutGoogleaistudio func(googleaistudio_inference_id string, task_type string, o ...func(*InferencePutGoogleaistudioRequest)) (*Response, error) + +// InferencePutGoogleaistudioRequest configures the Inference Put Googleaistudio API request. +type InferencePutGoogleaistudioRequest struct { + Body io.Reader + + GoogleaistudioInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutGoogleaistudioRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_googleaistudio") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.GoogleaistudioInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.GoogleaistudioInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "googleaistudio_inference_id", r.GoogleaistudioInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_googleaistudio") + if reader := instrument.RecordRequestBody(ctx, "inference.put_googleaistudio", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_googleaistudio") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutGoogleaistudio) WithContext(v context.Context) func(*InferencePutGoogleaistudioRequest) { + return func(r *InferencePutGoogleaistudioRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutGoogleaistudio) WithBody(v io.Reader) func(*InferencePutGoogleaistudioRequest) { + return func(r *InferencePutGoogleaistudioRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutGoogleaistudio) WithPretty() func(*InferencePutGoogleaistudioRequest) { + return func(r *InferencePutGoogleaistudioRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutGoogleaistudio) WithHuman() func(*InferencePutGoogleaistudioRequest) { + return func(r *InferencePutGoogleaistudioRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutGoogleaistudio) WithErrorTrace() func(*InferencePutGoogleaistudioRequest) { + return func(r *InferencePutGoogleaistudioRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutGoogleaistudio) WithFilterPath(v ...string) func(*InferencePutGoogleaistudioRequest) { + return func(r *InferencePutGoogleaistudioRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutGoogleaistudio) WithHeader(h map[string]string) func(*InferencePutGoogleaistudioRequest) { + return func(r *InferencePutGoogleaistudioRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutGoogleaistudio) WithOpaqueID(s string) func(*InferencePutGoogleaistudioRequest) { + return func(r *InferencePutGoogleaistudioRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_googlevertexai.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_googlevertexai.go new file mode 100644 index 000000000..a34714a4d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_googlevertexai.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutGooglevertexaiFunc(t Transport) InferencePutGooglevertexai { + return func(googlevertexai_inference_id string, task_type string, o ...func(*InferencePutGooglevertexaiRequest)) (*Response, error) { + var r = InferencePutGooglevertexaiRequest{GooglevertexaiInferenceID: googlevertexai_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutGooglevertexai configure a Google Vertex AI inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-google-vertex-ai.html. +type InferencePutGooglevertexai func(googlevertexai_inference_id string, task_type string, o ...func(*InferencePutGooglevertexaiRequest)) (*Response, error) + +// InferencePutGooglevertexaiRequest configures the Inference Put Googlevertexai API request. +type InferencePutGooglevertexaiRequest struct { + Body io.Reader + + GooglevertexaiInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutGooglevertexaiRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_googlevertexai") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.GooglevertexaiInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.GooglevertexaiInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "googlevertexai_inference_id", r.GooglevertexaiInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_googlevertexai") + if reader := instrument.RecordRequestBody(ctx, "inference.put_googlevertexai", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_googlevertexai") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutGooglevertexai) WithContext(v context.Context) func(*InferencePutGooglevertexaiRequest) { + return func(r *InferencePutGooglevertexaiRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutGooglevertexai) WithBody(v io.Reader) func(*InferencePutGooglevertexaiRequest) { + return func(r *InferencePutGooglevertexaiRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutGooglevertexai) WithPretty() func(*InferencePutGooglevertexaiRequest) { + return func(r *InferencePutGooglevertexaiRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutGooglevertexai) WithHuman() func(*InferencePutGooglevertexaiRequest) { + return func(r *InferencePutGooglevertexaiRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutGooglevertexai) WithErrorTrace() func(*InferencePutGooglevertexaiRequest) { + return func(r *InferencePutGooglevertexaiRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutGooglevertexai) WithFilterPath(v ...string) func(*InferencePutGooglevertexaiRequest) { + return func(r *InferencePutGooglevertexaiRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutGooglevertexai) WithHeader(h map[string]string) func(*InferencePutGooglevertexaiRequest) { + return func(r *InferencePutGooglevertexaiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutGooglevertexai) WithOpaqueID(s string) func(*InferencePutGooglevertexaiRequest) { + return func(r *InferencePutGooglevertexaiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_hugging_face.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_hugging_face.go new file mode 100644 index 000000000..5cc5ff1a1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_hugging_face.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutHuggingFaceFunc(t Transport) InferencePutHuggingFace { + return func(huggingface_inference_id string, task_type string, o ...func(*InferencePutHuggingFaceRequest)) (*Response, error) { + var r = InferencePutHuggingFaceRequest{HuggingfaceInferenceID: huggingface_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutHuggingFace configure a HuggingFace inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-hugging-face.html. +type InferencePutHuggingFace func(huggingface_inference_id string, task_type string, o ...func(*InferencePutHuggingFaceRequest)) (*Response, error) + +// InferencePutHuggingFaceRequest configures the Inference Put Hugging Face API request. +type InferencePutHuggingFaceRequest struct { + Body io.Reader + + HuggingfaceInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutHuggingFaceRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_hugging_face") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.HuggingfaceInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.HuggingfaceInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "huggingface_inference_id", r.HuggingfaceInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_hugging_face") + if reader := instrument.RecordRequestBody(ctx, "inference.put_hugging_face", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_hugging_face") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutHuggingFace) WithContext(v context.Context) func(*InferencePutHuggingFaceRequest) { + return func(r *InferencePutHuggingFaceRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutHuggingFace) WithBody(v io.Reader) func(*InferencePutHuggingFaceRequest) { + return func(r *InferencePutHuggingFaceRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutHuggingFace) WithPretty() func(*InferencePutHuggingFaceRequest) { + return func(r *InferencePutHuggingFaceRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutHuggingFace) WithHuman() func(*InferencePutHuggingFaceRequest) { + return func(r *InferencePutHuggingFaceRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutHuggingFace) WithErrorTrace() func(*InferencePutHuggingFaceRequest) { + return func(r *InferencePutHuggingFaceRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutHuggingFace) WithFilterPath(v ...string) func(*InferencePutHuggingFaceRequest) { + return func(r *InferencePutHuggingFaceRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutHuggingFace) WithHeader(h map[string]string) func(*InferencePutHuggingFaceRequest) { + return func(r *InferencePutHuggingFaceRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutHuggingFace) WithOpaqueID(s string) func(*InferencePutHuggingFaceRequest) { + return func(r *InferencePutHuggingFaceRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_jinaai.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_jinaai.go new file mode 100644 index 000000000..8c25e3f5a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_jinaai.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutJinaaiFunc(t Transport) InferencePutJinaai { + return func(jinaai_inference_id string, task_type string, o ...func(*InferencePutJinaaiRequest)) (*Response, error) { + var r = InferencePutJinaaiRequest{JinaaiInferenceID: jinaai_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutJinaai configure a JinaAI inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-jinaai.html. +type InferencePutJinaai func(jinaai_inference_id string, task_type string, o ...func(*InferencePutJinaaiRequest)) (*Response, error) + +// InferencePutJinaaiRequest configures the Inference Put Jinaai API request. +type InferencePutJinaaiRequest struct { + Body io.Reader + + JinaaiInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutJinaaiRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_jinaai") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.JinaaiInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.JinaaiInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "jinaai_inference_id", r.JinaaiInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_jinaai") + if reader := instrument.RecordRequestBody(ctx, "inference.put_jinaai", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_jinaai") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutJinaai) WithContext(v context.Context) func(*InferencePutJinaaiRequest) { + return func(r *InferencePutJinaaiRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutJinaai) WithBody(v io.Reader) func(*InferencePutJinaaiRequest) { + return func(r *InferencePutJinaaiRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutJinaai) WithPretty() func(*InferencePutJinaaiRequest) { + return func(r *InferencePutJinaaiRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutJinaai) WithHuman() func(*InferencePutJinaaiRequest) { + return func(r *InferencePutJinaaiRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutJinaai) WithErrorTrace() func(*InferencePutJinaaiRequest) { + return func(r *InferencePutJinaaiRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutJinaai) WithFilterPath(v ...string) func(*InferencePutJinaaiRequest) { + return func(r *InferencePutJinaaiRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutJinaai) WithHeader(h map[string]string) func(*InferencePutJinaaiRequest) { + return func(r *InferencePutJinaaiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutJinaai) WithOpaqueID(s string) func(*InferencePutJinaaiRequest) { + return func(r *InferencePutJinaaiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_mistral.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_mistral.go new file mode 100644 index 000000000..2e3c452d8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_mistral.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutMistralFunc(t Transport) InferencePutMistral { + return func(mistral_inference_id string, task_type string, o ...func(*InferencePutMistralRequest)) (*Response, error) { + var r = InferencePutMistralRequest{MistralInferenceID: mistral_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutMistral configure a Mistral inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-mistral.html. +type InferencePutMistral func(mistral_inference_id string, task_type string, o ...func(*InferencePutMistralRequest)) (*Response, error) + +// InferencePutMistralRequest configures the Inference Put Mistral API request. +type InferencePutMistralRequest struct { + Body io.Reader + + MistralInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutMistralRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_mistral") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.MistralInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.MistralInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "mistral_inference_id", r.MistralInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_mistral") + if reader := instrument.RecordRequestBody(ctx, "inference.put_mistral", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_mistral") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutMistral) WithContext(v context.Context) func(*InferencePutMistralRequest) { + return func(r *InferencePutMistralRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutMistral) WithBody(v io.Reader) func(*InferencePutMistralRequest) { + return func(r *InferencePutMistralRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutMistral) WithPretty() func(*InferencePutMistralRequest) { + return func(r *InferencePutMistralRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutMistral) WithHuman() func(*InferencePutMistralRequest) { + return func(r *InferencePutMistralRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutMistral) WithErrorTrace() func(*InferencePutMistralRequest) { + return func(r *InferencePutMistralRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutMistral) WithFilterPath(v ...string) func(*InferencePutMistralRequest) { + return func(r *InferencePutMistralRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutMistral) WithHeader(h map[string]string) func(*InferencePutMistralRequest) { + return func(r *InferencePutMistralRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutMistral) WithOpaqueID(s string) func(*InferencePutMistralRequest) { + return func(r *InferencePutMistralRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_openai.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_openai.go new file mode 100644 index 000000000..b74f0b4f1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_openai.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutOpenaiFunc(t Transport) InferencePutOpenai { + return func(openai_inference_id string, task_type string, o ...func(*InferencePutOpenaiRequest)) (*Response, error) { + var r = InferencePutOpenaiRequest{OpenaiInferenceID: openai_inference_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutOpenai configure an OpenAI inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-openai.html. +type InferencePutOpenai func(openai_inference_id string, task_type string, o ...func(*InferencePutOpenaiRequest)) (*Response, error) + +// InferencePutOpenaiRequest configures the Inference Put Openai API request. +type InferencePutOpenaiRequest struct { + Body io.Reader + + OpenaiInferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutOpenaiRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_openai") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.OpenaiInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.OpenaiInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "openai_inference_id", r.OpenaiInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_openai") + if reader := instrument.RecordRequestBody(ctx, "inference.put_openai", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_openai") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutOpenai) WithContext(v context.Context) func(*InferencePutOpenaiRequest) { + return func(r *InferencePutOpenaiRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutOpenai) WithBody(v io.Reader) func(*InferencePutOpenaiRequest) { + return func(r *InferencePutOpenaiRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutOpenai) WithPretty() func(*InferencePutOpenaiRequest) { + return func(r *InferencePutOpenaiRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutOpenai) WithHuman() func(*InferencePutOpenaiRequest) { + return func(r *InferencePutOpenaiRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutOpenai) WithErrorTrace() func(*InferencePutOpenaiRequest) { + return func(r *InferencePutOpenaiRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutOpenai) WithFilterPath(v ...string) func(*InferencePutOpenaiRequest) { + return func(r *InferencePutOpenaiRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutOpenai) WithHeader(h map[string]string) func(*InferencePutOpenaiRequest) { + return func(r *InferencePutOpenaiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutOpenai) WithOpaqueID(s string) func(*InferencePutOpenaiRequest) { + return func(r *InferencePutOpenaiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_voyageai.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_voyageai.go new file mode 100644 index 000000000..66f4e7f36 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_voyageai.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutVoyageaiFunc(t Transport) InferencePutVoyageai { + return func(task_type string, voyageai_inference_id string, o ...func(*InferencePutVoyageaiRequest)) (*Response, error) { + var r = InferencePutVoyageaiRequest{TaskType: task_type, VoyageaiInferenceID: voyageai_inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutVoyageai configure a VoyageAI inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html. +type InferencePutVoyageai func(task_type string, voyageai_inference_id string, o ...func(*InferencePutVoyageaiRequest)) (*Response, error) + +// InferencePutVoyageaiRequest configures the Inference Put Voyageai API request. +type InferencePutVoyageaiRequest struct { + Body io.Reader + + TaskType string + VoyageaiInferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutVoyageaiRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_voyageai") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.VoyageaiInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.VoyageaiInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "voyageai_inference_id", r.VoyageaiInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_voyageai") + if reader := instrument.RecordRequestBody(ctx, "inference.put_voyageai", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_voyageai") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutVoyageai) WithContext(v context.Context) func(*InferencePutVoyageaiRequest) { + return func(r *InferencePutVoyageaiRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutVoyageai) WithBody(v io.Reader) func(*InferencePutVoyageaiRequest) { + return func(r *InferencePutVoyageaiRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutVoyageai) WithPretty() func(*InferencePutVoyageaiRequest) { + return func(r *InferencePutVoyageaiRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutVoyageai) WithHuman() func(*InferencePutVoyageaiRequest) { + return func(r *InferencePutVoyageaiRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutVoyageai) WithErrorTrace() func(*InferencePutVoyageaiRequest) { + return func(r *InferencePutVoyageaiRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutVoyageai) WithFilterPath(v ...string) func(*InferencePutVoyageaiRequest) { + return func(r *InferencePutVoyageaiRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutVoyageai) WithHeader(h map[string]string) func(*InferencePutVoyageaiRequest) { + return func(r *InferencePutVoyageaiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutVoyageai) WithOpaqueID(s string) func(*InferencePutVoyageaiRequest) { + return func(r *InferencePutVoyageaiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_watsonx.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_watsonx.go new file mode 100644 index 000000000..3c2a75422 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.put_watsonx.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutWatsonxFunc(t Transport) InferencePutWatsonx { + return func(task_type string, watsonx_inference_id string, o ...func(*InferencePutWatsonxRequest)) (*Response, error) { + var r = InferencePutWatsonxRequest{TaskType: task_type, WatsonxInferenceID: watsonx_inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutWatsonx configure a Watsonx inference endpoint +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-watsonx-ai.html. +type InferencePutWatsonx func(task_type string, watsonx_inference_id string, o ...func(*InferencePutWatsonxRequest)) (*Response, error) + +// InferencePutWatsonxRequest configures the Inference Put Watsonx API request. +type InferencePutWatsonxRequest struct { + Body io.Reader + + TaskType string + WatsonxInferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferencePutWatsonxRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_watsonx") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.WatsonxInferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + path.WriteString("/") + path.WriteString(r.WatsonxInferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "watsonx_inference_id", r.WatsonxInferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_watsonx") + if reader := instrument.RecordRequestBody(ctx, "inference.put_watsonx", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_watsonx") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutWatsonx) WithContext(v context.Context) func(*InferencePutWatsonxRequest) { + return func(r *InferencePutWatsonxRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferencePutWatsonx) WithBody(v io.Reader) func(*InferencePutWatsonxRequest) { + return func(r *InferencePutWatsonxRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutWatsonx) WithPretty() func(*InferencePutWatsonxRequest) { + return func(r *InferencePutWatsonxRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutWatsonx) WithHuman() func(*InferencePutWatsonxRequest) { + return func(r *InferencePutWatsonxRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutWatsonx) WithErrorTrace() func(*InferencePutWatsonxRequest) { + return func(r *InferencePutWatsonxRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutWatsonx) WithFilterPath(v ...string) func(*InferencePutWatsonxRequest) { + return func(r *InferencePutWatsonxRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutWatsonx) WithHeader(h map[string]string) func(*InferencePutWatsonxRequest) { + return func(r *InferencePutWatsonxRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutWatsonx) WithOpaqueID(s string) func(*InferencePutWatsonxRequest) { + return func(r *InferencePutWatsonxRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.rerank.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.rerank.go new file mode 100644 index 000000000..5c516a85c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.rerank.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceRerankFunc(t Transport) InferenceRerank { + return func(inference_id string, o ...func(*InferenceRerankRequest)) (*Response, error) { + var r = InferenceRerankRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceRerank perform reranking inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html. +type InferenceRerank func(inference_id string, o ...func(*InferenceRerankRequest)) (*Response, error) + +// InferenceRerankRequest configures the Inference Rerank API request. +type InferenceRerankRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceRerankRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.rerank") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("rerank") + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("rerank") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.rerank") + if reader := instrument.RecordRequestBody(ctx, "inference.rerank", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.rerank") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceRerank) WithContext(v context.Context) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceRerank) WithBody(v io.Reader) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceRerank) WithPretty() func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceRerank) WithHuman() func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceRerank) WithErrorTrace() func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceRerank) WithFilterPath(v ...string) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceRerank) WithHeader(h map[string]string) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceRerank) WithOpaqueID(s string) func(*InferenceRerankRequest) { + return func(r *InferenceRerankRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.sparse_embedding.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.sparse_embedding.go new file mode 100644 index 000000000..fd258eef9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.sparse_embedding.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceSparseEmbeddingFunc(t Transport) InferenceSparseEmbedding { + return func(inference_id string, o ...func(*InferenceSparseEmbeddingRequest)) (*Response, error) { + var r = InferenceSparseEmbeddingRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceSparseEmbedding perform sparse embedding inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html. +type InferenceSparseEmbedding func(inference_id string, o ...func(*InferenceSparseEmbeddingRequest)) (*Response, error) + +// InferenceSparseEmbeddingRequest configures the Inference Sparse Embedding API request. +type InferenceSparseEmbeddingRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceSparseEmbeddingRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.sparse_embedding") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("sparse_embedding") + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("sparse_embedding") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.sparse_embedding") + if reader := instrument.RecordRequestBody(ctx, "inference.sparse_embedding", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.sparse_embedding") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceSparseEmbedding) WithContext(v context.Context) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceSparseEmbedding) WithBody(v io.Reader) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceSparseEmbedding) WithPretty() func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceSparseEmbedding) WithHuman() func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceSparseEmbedding) WithErrorTrace() func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceSparseEmbedding) WithFilterPath(v ...string) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceSparseEmbedding) WithHeader(h map[string]string) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceSparseEmbedding) WithOpaqueID(s string) func(*InferenceSparseEmbeddingRequest) { + return func(r *InferenceSparseEmbeddingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.stream_completion.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.stream_completion.go new file mode 100644 index 000000000..a5f7c2490 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.stream_completion.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceStreamCompletionFunc(t Transport) InferenceStreamCompletion { + return func(inference_id string, o ...func(*InferenceStreamCompletionRequest)) (*Response, error) { + var r = InferenceStreamCompletionRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceStreamCompletion perform streaming completion inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-stream-inference-api.html. +type InferenceStreamCompletion func(inference_id string, o ...func(*InferenceStreamCompletionRequest)) (*Response, error) + +// InferenceStreamCompletionRequest configures the Inference Stream Completion API request. +type InferenceStreamCompletionRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceStreamCompletionRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.stream_completion") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("completion") + 1 + len(r.InferenceID) + 1 + len("_stream")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("completion") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + path.WriteString("/") + path.WriteString("_stream") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.stream_completion") + if reader := instrument.RecordRequestBody(ctx, "inference.stream_completion", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.stream_completion") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceStreamCompletion) WithContext(v context.Context) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceStreamCompletion) WithBody(v io.Reader) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceStreamCompletion) WithPretty() func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceStreamCompletion) WithHuman() func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceStreamCompletion) WithErrorTrace() func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceStreamCompletion) WithFilterPath(v ...string) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceStreamCompletion) WithHeader(h map[string]string) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceStreamCompletion) WithOpaqueID(s string) func(*InferenceStreamCompletionRequest) { + return func(r *InferenceStreamCompletionRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.text_embedding.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.text_embedding.go new file mode 100644 index 000000000..3de2cf359 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.text_embedding.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceTextEmbeddingFunc(t Transport) InferenceTextEmbedding { + return func(inference_id string, o ...func(*InferenceTextEmbeddingRequest)) (*Response, error) { + var r = InferenceTextEmbeddingRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceTextEmbedding perform text embedding inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html. +type InferenceTextEmbedding func(inference_id string, o ...func(*InferenceTextEmbeddingRequest)) (*Response, error) + +// InferenceTextEmbeddingRequest configures the Inference Text Embedding API request. +type InferenceTextEmbeddingRequest struct { + Body io.Reader + + InferenceID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceTextEmbeddingRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.text_embedding") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len("text_embedding") + 1 + len(r.InferenceID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("text_embedding") + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.text_embedding") + if reader := instrument.RecordRequestBody(ctx, "inference.text_embedding", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.text_embedding") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceTextEmbedding) WithContext(v context.Context) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceTextEmbedding) WithBody(v io.Reader) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceTextEmbedding) WithPretty() func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceTextEmbedding) WithHuman() func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceTextEmbedding) WithErrorTrace() func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceTextEmbedding) WithFilterPath(v ...string) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceTextEmbedding) WithHeader(h map[string]string) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceTextEmbedding) WithOpaqueID(s string) func(*InferenceTextEmbeddingRequest) { + return func(r *InferenceTextEmbeddingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.update.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.update.go new file mode 100644 index 000000000..98e4e4dc6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.inference.update.go @@ -0,0 +1,257 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceUpdateFunc(t Transport) InferenceUpdate { + return func(inference_id string, o ...func(*InferenceUpdateRequest)) (*Response, error) { + var r = InferenceUpdateRequest{InferenceID: inference_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceUpdate update inference +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-inference-api.html. +type InferenceUpdate func(inference_id string, o ...func(*InferenceUpdateRequest)) (*Response, error) + +// InferenceUpdateRequest configures the Inference Update API request. +type InferenceUpdateRequest struct { + Body io.Reader + + InferenceID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InferenceUpdateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.update") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.InferenceID) + 1 + len("_update")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + if r.TaskType != "" { + path.WriteString("/") + path.WriteString(r.TaskType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_type", r.TaskType) + } + } + path.WriteString("/") + path.WriteString(r.InferenceID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "inference_id", r.InferenceID) + } + path.WriteString("/") + path.WriteString("_update") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "inference.update") + if reader := instrument.RecordRequestBody(ctx, "inference.update", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.update") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceUpdate) WithContext(v context.Context) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.ctx = v + } +} + +// WithBody - The inference endpoint's task and service settings. +func (f InferenceUpdate) WithBody(v io.Reader) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.Body = v + } +} + +// WithTaskType - the task type. +func (f InferenceUpdate) WithTaskType(v string) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.TaskType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceUpdate) WithPretty() func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceUpdate) WithHuman() func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceUpdate) WithErrorTrace() func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceUpdate) WithFilterPath(v ...string) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceUpdate) WithHeader(h map[string]string) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceUpdate) WithOpaqueID(s string) func(*InferenceUpdateRequest) { + return func(r *InferenceUpdateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.info.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.info.go new file mode 100644 index 000000000..6f4cc9ad1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.info.go @@ -0,0 +1,208 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newInfoFunc(t Transport) Info { + return func(o ...func(*InfoRequest)) (*Response, error) { + var r = InfoRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Info returns basic information about the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html. +type Info func(o ...func(*InfoRequest)) (*Response, error) + +// InfoRequest configures the Info API request. +type InfoRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r InfoRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/")) + path.WriteString("http://") + path.WriteString("/") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "info") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "info") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Info) WithContext(v context.Context) func(*InfoRequest) { + return func(r *InfoRequest) { + r.ctx = v + } +} + +// WithHuman makes statistical values human-readable. +func (f Info) WithHuman() func(*InfoRequest) { + return func(r *InfoRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Info) WithErrorTrace() func(*InfoRequest) { + return func(r *InfoRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Info) WithFilterPath(v ...string) func(*InfoRequest) { + return func(r *InfoRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Info) WithHeader(h map[string]string) func(*InfoRequest) { + return func(r *InfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Info) WithOpaqueID(s string) func(*InfoRequest) { + return func(r *InfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.delete_geoip_database.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.delete_geoip_database.go new file mode 100644 index 000000000..07af96fad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.delete_geoip_database.go @@ -0,0 +1,258 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" + "time" +) + +func newIngestDeleteGeoipDatabaseFunc(t Transport) IngestDeleteGeoipDatabase { + return func(id []string, o ...func(*IngestDeleteGeoipDatabaseRequest)) (*Response, error) { + var r = IngestDeleteGeoipDatabaseRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestDeleteGeoipDatabase deletes a geoip database configuration +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-geoip-database-api.html. +type IngestDeleteGeoipDatabase func(id []string, o ...func(*IngestDeleteGeoipDatabaseRequest)) (*Response, error) + +// IngestDeleteGeoipDatabaseRequest configures the Ingest Delete Geoip Database API request. +type IngestDeleteGeoipDatabaseRequest struct { + DocumentID []string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestDeleteGeoipDatabaseRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.delete_geoip_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + if len(r.DocumentID) == 0 { + return nil, errors.New("id is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_ingest") + 1 + len("geoip") + 1 + len("database") + 1 + len(strings.Join(r.DocumentID, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("geoip") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + path.WriteString(strings.Join(r.DocumentID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", strings.Join(r.DocumentID, ",")) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.delete_geoip_database") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.delete_geoip_database") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestDeleteGeoipDatabase) WithContext(v context.Context) func(*IngestDeleteGeoipDatabaseRequest) { + return func(r *IngestDeleteGeoipDatabaseRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f IngestDeleteGeoipDatabase) WithMasterTimeout(v time.Duration) func(*IngestDeleteGeoipDatabaseRequest) { + return func(r *IngestDeleteGeoipDatabaseRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IngestDeleteGeoipDatabase) WithTimeout(v time.Duration) func(*IngestDeleteGeoipDatabaseRequest) { + return func(r *IngestDeleteGeoipDatabaseRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestDeleteGeoipDatabase) WithPretty() func(*IngestDeleteGeoipDatabaseRequest) { + return func(r *IngestDeleteGeoipDatabaseRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestDeleteGeoipDatabase) WithHuman() func(*IngestDeleteGeoipDatabaseRequest) { + return func(r *IngestDeleteGeoipDatabaseRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestDeleteGeoipDatabase) WithErrorTrace() func(*IngestDeleteGeoipDatabaseRequest) { + return func(r *IngestDeleteGeoipDatabaseRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestDeleteGeoipDatabase) WithFilterPath(v ...string) func(*IngestDeleteGeoipDatabaseRequest) { + return func(r *IngestDeleteGeoipDatabaseRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestDeleteGeoipDatabase) WithHeader(h map[string]string) func(*IngestDeleteGeoipDatabaseRequest) { + return func(r *IngestDeleteGeoipDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestDeleteGeoipDatabase) WithOpaqueID(s string) func(*IngestDeleteGeoipDatabaseRequest) { + return func(r *IngestDeleteGeoipDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.delete_ip_location_database.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.delete_ip_location_database.go new file mode 100644 index 000000000..8986e23ed --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.delete_ip_location_database.go @@ -0,0 +1,258 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" + "time" +) + +func newIngestDeleteIPLocationDatabaseFunc(t Transport) IngestDeleteIPLocationDatabase { + return func(id []string, o ...func(*IngestDeleteIPLocationDatabaseRequest)) (*Response, error) { + var r = IngestDeleteIPLocationDatabaseRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestDeleteIPLocationDatabase deletes an ip location database configuration +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-ip-location-database-api.html. +type IngestDeleteIPLocationDatabase func(id []string, o ...func(*IngestDeleteIPLocationDatabaseRequest)) (*Response, error) + +// IngestDeleteIPLocationDatabaseRequest configures the Ingest DeleteIP Location Database API request. +type IngestDeleteIPLocationDatabaseRequest struct { + DocumentID []string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestDeleteIPLocationDatabaseRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.delete_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + if len(r.DocumentID) == 0 { + return nil, errors.New("id is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_ingest") + 1 + len("ip_location") + 1 + len("database") + 1 + len(strings.Join(r.DocumentID, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + path.WriteString(strings.Join(r.DocumentID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", strings.Join(r.DocumentID, ",")) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.delete_ip_location_database") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.delete_ip_location_database") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestDeleteIPLocationDatabase) WithContext(v context.Context) func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f IngestDeleteIPLocationDatabase) WithMasterTimeout(v time.Duration) func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IngestDeleteIPLocationDatabase) WithTimeout(v time.Duration) func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestDeleteIPLocationDatabase) WithPretty() func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestDeleteIPLocationDatabase) WithHuman() func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestDeleteIPLocationDatabase) WithErrorTrace() func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestDeleteIPLocationDatabase) WithFilterPath(v ...string) func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestDeleteIPLocationDatabase) WithHeader(h map[string]string) func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestDeleteIPLocationDatabase) WithOpaqueID(s string) func(*IngestDeleteIPLocationDatabaseRequest) { + return func(r *IngestDeleteIPLocationDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.delete_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.delete_pipeline.go new file mode 100644 index 000000000..6e90d29b2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.delete_pipeline.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newIngestDeletePipelineFunc(t Transport) IngestDeletePipeline { + return func(id string, o ...func(*IngestDeletePipelineRequest)) (*Response, error) { + var r = IngestDeletePipelineRequest{PipelineID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestDeletePipeline deletes a pipeline. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html. +type IngestDeletePipeline func(id string, o ...func(*IngestDeletePipelineRequest)) (*Response, error) + +// IngestDeletePipelineRequest configures the Ingest Delete Pipeline API request. +type IngestDeletePipelineRequest struct { + PipelineID string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestDeletePipelineRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.delete_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ingest") + 1 + len("pipeline") + 1 + len(r.PipelineID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("pipeline") + path.WriteString("/") + path.WriteString(r.PipelineID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.PipelineID) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.delete_pipeline") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.delete_pipeline") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestDeletePipeline) WithContext(v context.Context) func(*IngestDeletePipelineRequest) { + return func(r *IngestDeletePipelineRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f IngestDeletePipeline) WithMasterTimeout(v time.Duration) func(*IngestDeletePipelineRequest) { + return func(r *IngestDeletePipelineRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IngestDeletePipeline) WithTimeout(v time.Duration) func(*IngestDeletePipelineRequest) { + return func(r *IngestDeletePipelineRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestDeletePipeline) WithPretty() func(*IngestDeletePipelineRequest) { + return func(r *IngestDeletePipelineRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestDeletePipeline) WithHuman() func(*IngestDeletePipelineRequest) { + return func(r *IngestDeletePipelineRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestDeletePipeline) WithErrorTrace() func(*IngestDeletePipelineRequest) { + return func(r *IngestDeletePipelineRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestDeletePipeline) WithFilterPath(v ...string) func(*IngestDeletePipelineRequest) { + return func(r *IngestDeletePipelineRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestDeletePipeline) WithHeader(h map[string]string) func(*IngestDeletePipelineRequest) { + return func(r *IngestDeletePipelineRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestDeletePipeline) WithOpaqueID(s string) func(*IngestDeletePipelineRequest) { + return func(r *IngestDeletePipelineRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.geo_ip_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.geo_ip_stats.go new file mode 100644 index 000000000..847d7953a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.geo_ip_stats.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIngestGeoIPStatsFunc(t Transport) IngestGeoIPStats { + return func(o ...func(*IngestGeoIPStatsRequest)) (*Response, error) { + var r = IngestGeoIPStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestGeoIPStats returns statistical information about geoip databases +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/geoip-stats-api.html. +type IngestGeoIPStats func(o ...func(*IngestGeoIPStatsRequest)) (*Response, error) + +// IngestGeoIPStatsRequest configures the Ingest GeoIP Stats API request. +type IngestGeoIPStatsRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestGeoIPStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.geo_ip_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_ingest/geoip/stats")) + path.WriteString("http://") + path.WriteString("/_ingest/geoip/stats") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.geo_ip_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.geo_ip_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestGeoIPStats) WithContext(v context.Context) func(*IngestGeoIPStatsRequest) { + return func(r *IngestGeoIPStatsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestGeoIPStats) WithPretty() func(*IngestGeoIPStatsRequest) { + return func(r *IngestGeoIPStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestGeoIPStats) WithHuman() func(*IngestGeoIPStatsRequest) { + return func(r *IngestGeoIPStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestGeoIPStats) WithErrorTrace() func(*IngestGeoIPStatsRequest) { + return func(r *IngestGeoIPStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestGeoIPStats) WithFilterPath(v ...string) func(*IngestGeoIPStatsRequest) { + return func(r *IngestGeoIPStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestGeoIPStats) WithHeader(h map[string]string) func(*IngestGeoIPStatsRequest) { + return func(r *IngestGeoIPStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestGeoIPStats) WithOpaqueID(s string) func(*IngestGeoIPStatsRequest) { + return func(r *IngestGeoIPStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.get_geoip_database.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.get_geoip_database.go new file mode 100644 index 000000000..fc6ed06f1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.get_geoip_database.go @@ -0,0 +1,236 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIngestGetGeoipDatabaseFunc(t Transport) IngestGetGeoipDatabase { + return func(o ...func(*IngestGetGeoipDatabaseRequest)) (*Response, error) { + var r = IngestGetGeoipDatabaseRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestGetGeoipDatabase returns geoip database configuration. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-geoip-database-api.html. +type IngestGetGeoipDatabase func(o ...func(*IngestGetGeoipDatabaseRequest)) (*Response, error) + +// IngestGetGeoipDatabaseRequest configures the Ingest Get Geoip Database API request. +type IngestGetGeoipDatabaseRequest struct { + DocumentID []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestGetGeoipDatabaseRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_geoip_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ingest") + 1 + len("geoip") + 1 + len("database") + 1 + len(strings.Join(r.DocumentID, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("geoip") + path.WriteString("/") + path.WriteString("database") + if len(r.DocumentID) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.DocumentID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", strings.Join(r.DocumentID, ",")) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.get_geoip_database") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.get_geoip_database") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestGetGeoipDatabase) WithContext(v context.Context) func(*IngestGetGeoipDatabaseRequest) { + return func(r *IngestGetGeoipDatabaseRequest) { + r.ctx = v + } +} + +// WithDocumentID - a list of geoip database configurations to get; use `*` to get all geoip database configurations. +func (f IngestGetGeoipDatabase) WithDocumentID(v ...string) func(*IngestGetGeoipDatabaseRequest) { + return func(r *IngestGetGeoipDatabaseRequest) { + r.DocumentID = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestGetGeoipDatabase) WithPretty() func(*IngestGetGeoipDatabaseRequest) { + return func(r *IngestGetGeoipDatabaseRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestGetGeoipDatabase) WithHuman() func(*IngestGetGeoipDatabaseRequest) { + return func(r *IngestGetGeoipDatabaseRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestGetGeoipDatabase) WithErrorTrace() func(*IngestGetGeoipDatabaseRequest) { + return func(r *IngestGetGeoipDatabaseRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestGetGeoipDatabase) WithFilterPath(v ...string) func(*IngestGetGeoipDatabaseRequest) { + return func(r *IngestGetGeoipDatabaseRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestGetGeoipDatabase) WithHeader(h map[string]string) func(*IngestGetGeoipDatabaseRequest) { + return func(r *IngestGetGeoipDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestGetGeoipDatabase) WithOpaqueID(s string) func(*IngestGetGeoipDatabaseRequest) { + return func(r *IngestGetGeoipDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.get_ip_location_database.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.get_ip_location_database.go new file mode 100644 index 000000000..b42300cf8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.get_ip_location_database.go @@ -0,0 +1,236 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIngestGetIPLocationDatabaseFunc(t Transport) IngestGetIPLocationDatabase { + return func(o ...func(*IngestGetIPLocationDatabaseRequest)) (*Response, error) { + var r = IngestGetIPLocationDatabaseRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestGetIPLocationDatabase returns the specified ip location database configuration +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-ip-location-database-api.html. +type IngestGetIPLocationDatabase func(o ...func(*IngestGetIPLocationDatabaseRequest)) (*Response, error) + +// IngestGetIPLocationDatabaseRequest configures the Ingest GetIP Location Database API request. +type IngestGetIPLocationDatabaseRequest struct { + DocumentID []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestGetIPLocationDatabaseRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ingest") + 1 + len("ip_location") + 1 + len("database") + 1 + len(strings.Join(r.DocumentID, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + if len(r.DocumentID) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.DocumentID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", strings.Join(r.DocumentID, ",")) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.get_ip_location_database") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.get_ip_location_database") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestGetIPLocationDatabase) WithContext(v context.Context) func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + r.ctx = v + } +} + +// WithDocumentID - a list of ip location database configurations to get; use `*` to get all ip location database configurations. +func (f IngestGetIPLocationDatabase) WithDocumentID(v ...string) func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + r.DocumentID = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestGetIPLocationDatabase) WithPretty() func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestGetIPLocationDatabase) WithHuman() func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestGetIPLocationDatabase) WithErrorTrace() func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestGetIPLocationDatabase) WithFilterPath(v ...string) func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestGetIPLocationDatabase) WithHeader(h map[string]string) func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestGetIPLocationDatabase) WithOpaqueID(s string) func(*IngestGetIPLocationDatabaseRequest) { + return func(r *IngestGetIPLocationDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.get_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.get_pipeline.go new file mode 100644 index 000000000..17faf7282 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.get_pipeline.go @@ -0,0 +1,261 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newIngestGetPipelineFunc(t Transport) IngestGetPipeline { + return func(o ...func(*IngestGetPipelineRequest)) (*Response, error) { + var r = IngestGetPipelineRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestGetPipeline returns a pipeline. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html. +type IngestGetPipeline func(o ...func(*IngestGetPipelineRequest)) (*Response, error) + +// IngestGetPipelineRequest configures the Ingest Get Pipeline API request. +type IngestGetPipelineRequest struct { + PipelineID string + + MasterTimeout time.Duration + Summary *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestGetPipelineRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ingest") + 1 + len("pipeline") + 1 + len(r.PipelineID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("pipeline") + if r.PipelineID != "" { + path.WriteString("/") + path.WriteString(r.PipelineID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.PipelineID) + } + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Summary != nil { + params["summary"] = strconv.FormatBool(*r.Summary) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.get_pipeline") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.get_pipeline") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestGetPipeline) WithContext(v context.Context) func(*IngestGetPipelineRequest) { + return func(r *IngestGetPipelineRequest) { + r.ctx = v + } +} + +// WithPipelineID - comma separated list of pipeline ids. wildcards supported. +func (f IngestGetPipeline) WithPipelineID(v string) func(*IngestGetPipelineRequest) { + return func(r *IngestGetPipelineRequest) { + r.PipelineID = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f IngestGetPipeline) WithMasterTimeout(v time.Duration) func(*IngestGetPipelineRequest) { + return func(r *IngestGetPipelineRequest) { + r.MasterTimeout = v + } +} + +// WithSummary - return pipelines without their definitions (default: false). +func (f IngestGetPipeline) WithSummary(v bool) func(*IngestGetPipelineRequest) { + return func(r *IngestGetPipelineRequest) { + r.Summary = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestGetPipeline) WithPretty() func(*IngestGetPipelineRequest) { + return func(r *IngestGetPipelineRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestGetPipeline) WithHuman() func(*IngestGetPipelineRequest) { + return func(r *IngestGetPipelineRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestGetPipeline) WithErrorTrace() func(*IngestGetPipelineRequest) { + return func(r *IngestGetPipelineRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestGetPipeline) WithFilterPath(v ...string) func(*IngestGetPipelineRequest) { + return func(r *IngestGetPipelineRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestGetPipeline) WithHeader(h map[string]string) func(*IngestGetPipelineRequest) { + return func(r *IngestGetPipelineRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestGetPipeline) WithOpaqueID(s string) func(*IngestGetPipelineRequest) { + return func(r *IngestGetPipelineRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.processor_grok.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.processor_grok.go new file mode 100644 index 000000000..b5eabd915 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.processor_grok.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIngestProcessorGrokFunc(t Transport) IngestProcessorGrok { + return func(o ...func(*IngestProcessorGrokRequest)) (*Response, error) { + var r = IngestProcessorGrokRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestProcessorGrok returns a list of the built-in patterns. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html#grok-processor-rest-get. +type IngestProcessorGrok func(o ...func(*IngestProcessorGrokRequest)) (*Response, error) + +// IngestProcessorGrokRequest configures the Ingest Processor Grok API request. +type IngestProcessorGrokRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestProcessorGrokRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.processor_grok") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_ingest/processor/grok")) + path.WriteString("http://") + path.WriteString("/_ingest/processor/grok") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.processor_grok") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.processor_grok") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestProcessorGrok) WithContext(v context.Context) func(*IngestProcessorGrokRequest) { + return func(r *IngestProcessorGrokRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestProcessorGrok) WithPretty() func(*IngestProcessorGrokRequest) { + return func(r *IngestProcessorGrokRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestProcessorGrok) WithHuman() func(*IngestProcessorGrokRequest) { + return func(r *IngestProcessorGrokRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestProcessorGrok) WithErrorTrace() func(*IngestProcessorGrokRequest) { + return func(r *IngestProcessorGrokRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestProcessorGrok) WithFilterPath(v ...string) func(*IngestProcessorGrokRequest) { + return func(r *IngestProcessorGrokRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestProcessorGrok) WithHeader(h map[string]string) func(*IngestProcessorGrokRequest) { + return func(r *IngestProcessorGrokRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestProcessorGrok) WithOpaqueID(s string) func(*IngestProcessorGrokRequest) { + return func(r *IngestProcessorGrokRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.put_geoip_database.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.put_geoip_database.go new file mode 100644 index 000000000..234ca48a9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.put_geoip_database.go @@ -0,0 +1,263 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newIngestPutGeoipDatabaseFunc(t Transport) IngestPutGeoipDatabase { + return func(id string, body io.Reader, o ...func(*IngestPutGeoipDatabaseRequest)) (*Response, error) { + var r = IngestPutGeoipDatabaseRequest{DocumentID: id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestPutGeoipDatabase puts the configuration for a geoip database to be downloaded +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-geoip-database-api.html. +type IngestPutGeoipDatabase func(id string, body io.Reader, o ...func(*IngestPutGeoipDatabaseRequest)) (*Response, error) + +// IngestPutGeoipDatabaseRequest configures the Ingest Put Geoip Database API request. +type IngestPutGeoipDatabaseRequest struct { + DocumentID string + + Body io.Reader + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestPutGeoipDatabaseRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.put_geoip_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ingest") + 1 + len("geoip") + 1 + len("database") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("geoip") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.put_geoip_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.put_geoip_database", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.put_geoip_database") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestPutGeoipDatabase) WithContext(v context.Context) func(*IngestPutGeoipDatabaseRequest) { + return func(r *IngestPutGeoipDatabaseRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f IngestPutGeoipDatabase) WithMasterTimeout(v time.Duration) func(*IngestPutGeoipDatabaseRequest) { + return func(r *IngestPutGeoipDatabaseRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IngestPutGeoipDatabase) WithTimeout(v time.Duration) func(*IngestPutGeoipDatabaseRequest) { + return func(r *IngestPutGeoipDatabaseRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestPutGeoipDatabase) WithPretty() func(*IngestPutGeoipDatabaseRequest) { + return func(r *IngestPutGeoipDatabaseRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestPutGeoipDatabase) WithHuman() func(*IngestPutGeoipDatabaseRequest) { + return func(r *IngestPutGeoipDatabaseRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestPutGeoipDatabase) WithErrorTrace() func(*IngestPutGeoipDatabaseRequest) { + return func(r *IngestPutGeoipDatabaseRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestPutGeoipDatabase) WithFilterPath(v ...string) func(*IngestPutGeoipDatabaseRequest) { + return func(r *IngestPutGeoipDatabaseRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestPutGeoipDatabase) WithHeader(h map[string]string) func(*IngestPutGeoipDatabaseRequest) { + return func(r *IngestPutGeoipDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestPutGeoipDatabase) WithOpaqueID(s string) func(*IngestPutGeoipDatabaseRequest) { + return func(r *IngestPutGeoipDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.put_ip_location_database.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.put_ip_location_database.go new file mode 100644 index 000000000..4c2ae516d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.put_ip_location_database.go @@ -0,0 +1,263 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newIngestPutIPLocationDatabaseFunc(t Transport) IngestPutIPLocationDatabase { + return func(id string, body io.Reader, o ...func(*IngestPutIPLocationDatabaseRequest)) (*Response, error) { + var r = IngestPutIPLocationDatabaseRequest{DocumentID: id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestPutIPLocationDatabase puts the configuration for a ip location database to be downloaded +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-ip-location-database-api.html. +type IngestPutIPLocationDatabase func(id string, body io.Reader, o ...func(*IngestPutIPLocationDatabaseRequest)) (*Response, error) + +// IngestPutIPLocationDatabaseRequest configures the Ingest PutIP Location Database API request. +type IngestPutIPLocationDatabaseRequest struct { + DocumentID string + + Body io.Reader + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestPutIPLocationDatabaseRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.put_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ingest") + 1 + len("ip_location") + 1 + len("database") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.put_ip_location_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.put_ip_location_database", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.put_ip_location_database") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestPutIPLocationDatabase) WithContext(v context.Context) func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f IngestPutIPLocationDatabase) WithMasterTimeout(v time.Duration) func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IngestPutIPLocationDatabase) WithTimeout(v time.Duration) func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestPutIPLocationDatabase) WithPretty() func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestPutIPLocationDatabase) WithHuman() func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestPutIPLocationDatabase) WithErrorTrace() func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestPutIPLocationDatabase) WithFilterPath(v ...string) func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestPutIPLocationDatabase) WithHeader(h map[string]string) func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestPutIPLocationDatabase) WithOpaqueID(s string) func(*IngestPutIPLocationDatabaseRequest) { + return func(r *IngestPutIPLocationDatabaseRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.put_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.put_pipeline.go new file mode 100644 index 000000000..361c1517f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.put_pipeline.go @@ -0,0 +1,274 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newIngestPutPipelineFunc(t Transport) IngestPutPipeline { + return func(id string, body io.Reader, o ...func(*IngestPutPipelineRequest)) (*Response, error) { + var r = IngestPutPipelineRequest{PipelineID: id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestPutPipeline creates or updates a pipeline. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-pipeline-api.html. +type IngestPutPipeline func(id string, body io.Reader, o ...func(*IngestPutPipelineRequest)) (*Response, error) + +// IngestPutPipelineRequest configures the Ingest Put Pipeline API request. +type IngestPutPipelineRequest struct { + PipelineID string + + Body io.Reader + + IfVersion *int + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestPutPipelineRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.put_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ingest") + 1 + len("pipeline") + 1 + len(r.PipelineID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("pipeline") + path.WriteString("/") + path.WriteString(r.PipelineID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.PipelineID) + } + + params = make(map[string]string) + + if r.IfVersion != nil { + params["if_version"] = strconv.FormatInt(int64(*r.IfVersion), 10) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.put_pipeline") + if reader := instrument.RecordRequestBody(ctx, "ingest.put_pipeline", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.put_pipeline") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestPutPipeline) WithContext(v context.Context) func(*IngestPutPipelineRequest) { + return func(r *IngestPutPipelineRequest) { + r.ctx = v + } +} + +// WithIfVersion - required version for optimistic concurrency control for pipeline updates. +func (f IngestPutPipeline) WithIfVersion(v int) func(*IngestPutPipelineRequest) { + return func(r *IngestPutPipelineRequest) { + r.IfVersion = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f IngestPutPipeline) WithMasterTimeout(v time.Duration) func(*IngestPutPipelineRequest) { + return func(r *IngestPutPipelineRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f IngestPutPipeline) WithTimeout(v time.Duration) func(*IngestPutPipelineRequest) { + return func(r *IngestPutPipelineRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestPutPipeline) WithPretty() func(*IngestPutPipelineRequest) { + return func(r *IngestPutPipelineRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestPutPipeline) WithHuman() func(*IngestPutPipelineRequest) { + return func(r *IngestPutPipelineRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestPutPipeline) WithErrorTrace() func(*IngestPutPipelineRequest) { + return func(r *IngestPutPipelineRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestPutPipeline) WithFilterPath(v ...string) func(*IngestPutPipelineRequest) { + return func(r *IngestPutPipelineRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestPutPipeline) WithHeader(h map[string]string) func(*IngestPutPipelineRequest) { + return func(r *IngestPutPipelineRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestPutPipeline) WithOpaqueID(s string) func(*IngestPutPipelineRequest) { + return func(r *IngestPutPipelineRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.simulate.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.simulate.go new file mode 100644 index 000000000..890761c84 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ingest.simulate.go @@ -0,0 +1,260 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newIngestSimulateFunc(t Transport) IngestSimulate { + return func(body io.Reader, o ...func(*IngestSimulateRequest)) (*Response, error) { + var r = IngestSimulateRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IngestSimulate allows to simulate a pipeline with example documents. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html. +type IngestSimulate func(body io.Reader, o ...func(*IngestSimulateRequest)) (*Response, error) + +// IngestSimulateRequest configures the Ingest Simulate API request. +type IngestSimulateRequest struct { + PipelineID string + + Body io.Reader + + Verbose *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IngestSimulateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.simulate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ingest") + 1 + len("pipeline") + 1 + len(r.PipelineID) + 1 + len("_simulate")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("pipeline") + if r.PipelineID != "" { + path.WriteString("/") + path.WriteString(r.PipelineID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.PipelineID) + } + } + path.WriteString("/") + path.WriteString("_simulate") + + params = make(map[string]string) + + if r.Verbose != nil { + params["verbose"] = strconv.FormatBool(*r.Verbose) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.simulate") + if reader := instrument.RecordRequestBody(ctx, "ingest.simulate", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.simulate") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IngestSimulate) WithContext(v context.Context) func(*IngestSimulateRequest) { + return func(r *IngestSimulateRequest) { + r.ctx = v + } +} + +// WithPipelineID - pipeline ID. +func (f IngestSimulate) WithPipelineID(v string) func(*IngestSimulateRequest) { + return func(r *IngestSimulateRequest) { + r.PipelineID = v + } +} + +// WithVerbose - verbose mode. display data output for each processor in executed pipeline. +func (f IngestSimulate) WithVerbose(v bool) func(*IngestSimulateRequest) { + return func(r *IngestSimulateRequest) { + r.Verbose = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IngestSimulate) WithPretty() func(*IngestSimulateRequest) { + return func(r *IngestSimulateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IngestSimulate) WithHuman() func(*IngestSimulateRequest) { + return func(r *IngestSimulateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IngestSimulate) WithErrorTrace() func(*IngestSimulateRequest) { + return func(r *IngestSimulateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IngestSimulate) WithFilterPath(v ...string) func(*IngestSimulateRequest) { + return func(r *IngestSimulateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IngestSimulate) WithHeader(h map[string]string) func(*IngestSimulateRequest) { + return func(r *IngestSimulateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IngestSimulate) WithOpaqueID(s string) func(*IngestSimulateRequest) { + return func(r *IngestSimulateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.mget.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.mget.go new file mode 100644 index 000000000..0c7c41713 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.mget.go @@ -0,0 +1,352 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newMgetFunc(t Transport) Mget { + return func(body io.Reader, o ...func(*MgetRequest)) (*Response, error) { + var r = MgetRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Mget allows to get multiple documents in one request. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-get.html. +type Mget func(body io.Reader, o ...func(*MgetRequest)) (*Response, error) + +// MgetRequest configures the Mget API request. +type MgetRequest struct { + Index string + + Body io.Reader + + ForceSyntheticSource *bool + Preference string + Realtime *bool + Refresh *bool + Routing string + Source []string + SourceExcludes []string + SourceIncludes []string + StoredFields []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MgetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "mget") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_mget")) + path.WriteString("http://") + if r.Index != "" { + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + } + path.WriteString("/") + path.WriteString("_mget") + + params = make(map[string]string) + + if r.ForceSyntheticSource != nil { + params["force_synthetic_source"] = strconv.FormatBool(*r.ForceSyntheticSource) + } + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Realtime != nil { + params["realtime"] = strconv.FormatBool(*r.Realtime) + } + + if r.Refresh != nil { + params["refresh"] = strconv.FormatBool(*r.Refresh) + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if len(r.Source) > 0 { + params["_source"] = strings.Join(r.Source, ",") + } + + if len(r.SourceExcludes) > 0 { + params["_source_excludes"] = strings.Join(r.SourceExcludes, ",") + } + + if len(r.SourceIncludes) > 0 { + params["_source_includes"] = strings.Join(r.SourceIncludes, ",") + } + + if len(r.StoredFields) > 0 { + params["stored_fields"] = strings.Join(r.StoredFields, ",") + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "mget") + if reader := instrument.RecordRequestBody(ctx, "mget", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "mget") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Mget) WithContext(v context.Context) func(*MgetRequest) { + return func(r *MgetRequest) { + r.ctx = v + } +} + +// WithIndex - the name of the index. +func (f Mget) WithIndex(v string) func(*MgetRequest) { + return func(r *MgetRequest) { + r.Index = v + } +} + +// WithForceSyntheticSource - should this request force synthetic _source? use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. fetches with this enabled will be slower the enabling synthetic source natively in the index.. +func (f Mget) WithForceSyntheticSource(v bool) func(*MgetRequest) { + return func(r *MgetRequest) { + r.ForceSyntheticSource = &v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random). +func (f Mget) WithPreference(v string) func(*MgetRequest) { + return func(r *MgetRequest) { + r.Preference = v + } +} + +// WithRealtime - specify whether to perform the operation in realtime or search mode. +func (f Mget) WithRealtime(v bool) func(*MgetRequest) { + return func(r *MgetRequest) { + r.Realtime = &v + } +} + +// WithRefresh - refresh the shard containing the document before performing the operation. +func (f Mget) WithRefresh(v bool) func(*MgetRequest) { + return func(r *MgetRequest) { + r.Refresh = &v + } +} + +// WithRouting - specific routing value. +func (f Mget) WithRouting(v string) func(*MgetRequest) { + return func(r *MgetRequest) { + r.Routing = v + } +} + +// WithSource - true or false to return the _source field or not, or a list of fields to return. +func (f Mget) WithSource(v ...string) func(*MgetRequest) { + return func(r *MgetRequest) { + r.Source = v + } +} + +// WithSourceExcludes - a list of fields to exclude from the returned _source field. +func (f Mget) WithSourceExcludes(v ...string) func(*MgetRequest) { + return func(r *MgetRequest) { + r.SourceExcludes = v + } +} + +// WithSourceIncludes - a list of fields to extract and return from the _source field. +func (f Mget) WithSourceIncludes(v ...string) func(*MgetRequest) { + return func(r *MgetRequest) { + r.SourceIncludes = v + } +} + +// WithStoredFields - a list of stored fields to return in the response. +func (f Mget) WithStoredFields(v ...string) func(*MgetRequest) { + return func(r *MgetRequest) { + r.StoredFields = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Mget) WithPretty() func(*MgetRequest) { + return func(r *MgetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Mget) WithHuman() func(*MgetRequest) { + return func(r *MgetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Mget) WithErrorTrace() func(*MgetRequest) { + return func(r *MgetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Mget) WithFilterPath(v ...string) func(*MgetRequest) { + return func(r *MgetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Mget) WithHeader(h map[string]string) func(*MgetRequest) { + return func(r *MgetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Mget) WithOpaqueID(s string) func(*MgetRequest) { + return func(r *MgetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.msearch.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.msearch.go new file mode 100644 index 000000000..ddcafc932 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.msearch.go @@ -0,0 +1,404 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newMsearchFunc(t Transport) Msearch { + return func(body io.Reader, o ...func(*MsearchRequest)) (*Response, error) { + var r = MsearchRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Msearch allows to execute several search operations in one request. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html. +type Msearch func(body io.Reader, o ...func(*MsearchRequest)) (*Response, error) + +// MsearchRequest configures the Msearch API request. +type MsearchRequest struct { + Index []string + + Body io.Reader + + AllowNoIndices *bool + CcsMinimizeRoundtrips *bool + ExpandWildcards string + IgnoreThrottled *bool + IgnoreUnavailable *bool + IncludeNamedQueriesScore *bool + MaxConcurrentSearches *int + MaxConcurrentShardRequests *int + PreFilterShardSize *int + RestTotalHitsAsInt *bool + Routing []string + SearchType string + TypedKeys *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MsearchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "msearch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_msearch")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_msearch") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.CcsMinimizeRoundtrips != nil { + params["ccs_minimize_roundtrips"] = strconv.FormatBool(*r.CcsMinimizeRoundtrips) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreThrottled != nil { + params["ignore_throttled"] = strconv.FormatBool(*r.IgnoreThrottled) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.IncludeNamedQueriesScore != nil { + params["include_named_queries_score"] = strconv.FormatBool(*r.IncludeNamedQueriesScore) + } + + if len(r.Index) > 0 { + params["index"] = strings.Join(r.Index, ",") + } + + if r.MaxConcurrentSearches != nil { + params["max_concurrent_searches"] = strconv.FormatInt(int64(*r.MaxConcurrentSearches), 10) + } + + if r.MaxConcurrentShardRequests != nil { + params["max_concurrent_shard_requests"] = strconv.FormatInt(int64(*r.MaxConcurrentShardRequests), 10) + } + + if r.PreFilterShardSize != nil { + params["pre_filter_shard_size"] = strconv.FormatInt(int64(*r.PreFilterShardSize), 10) + } + + if r.RestTotalHitsAsInt != nil { + params["rest_total_hits_as_int"] = strconv.FormatBool(*r.RestTotalHitsAsInt) + } + + if len(r.Routing) > 0 { + params["routing"] = strings.Join(r.Routing, ",") + } + + if r.SearchType != "" { + params["search_type"] = r.SearchType + } + + if r.TypedKeys != nil { + params["typed_keys"] = strconv.FormatBool(*r.TypedKeys) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "msearch") + if reader := instrument.RecordRequestBody(ctx, "msearch", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "msearch") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Msearch) WithContext(v context.Context) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names to use as default. +func (f Msearch) WithIndex(v ...string) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f Msearch) WithAllowNoIndices(v bool) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.AllowNoIndices = &v + } +} + +// WithCcsMinimizeRoundtrips - indicates whether network round-trips should be minimized as part of cross-cluster search requests execution. +func (f Msearch) WithCcsMinimizeRoundtrips(v bool) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.CcsMinimizeRoundtrips = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f Msearch) WithExpandWildcards(v string) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreThrottled - whether specified concrete, expanded or aliased indices should be ignored when throttled. +func (f Msearch) WithIgnoreThrottled(v bool) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.IgnoreThrottled = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f Msearch) WithIgnoreUnavailable(v bool) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithIncludeNamedQueriesScore - indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false). +func (f Msearch) WithIncludeNamedQueriesScore(v bool) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.IncludeNamedQueriesScore = &v + } +} + +// WithMaxConcurrentSearches - controls the maximum number of concurrent searches the multi search api will execute. +func (f Msearch) WithMaxConcurrentSearches(v int) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.MaxConcurrentSearches = &v + } +} + +// WithMaxConcurrentShardRequests - the number of concurrent shard requests each sub search executes concurrently per node. this value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. +func (f Msearch) WithMaxConcurrentShardRequests(v int) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.MaxConcurrentShardRequests = &v + } +} + +// WithPreFilterShardSize - a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. this filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint.. +func (f Msearch) WithPreFilterShardSize(v int) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.PreFilterShardSize = &v + } +} + +// WithRestTotalHitsAsInt - indicates whether hits.total should be rendered as an integer or an object in the rest search response. +func (f Msearch) WithRestTotalHitsAsInt(v bool) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.RestTotalHitsAsInt = &v + } +} + +// WithRouting - a list of specific routing values. +func (f Msearch) WithRouting(v ...string) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.Routing = v + } +} + +// WithSearchType - search operation type. +func (f Msearch) WithSearchType(v string) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.SearchType = v + } +} + +// WithTypedKeys - specify whether aggregation and suggester names should be prefixed by their respective types in the response. +func (f Msearch) WithTypedKeys(v bool) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.TypedKeys = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Msearch) WithPretty() func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Msearch) WithHuman() func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Msearch) WithErrorTrace() func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Msearch) WithFilterPath(v ...string) func(*MsearchRequest) { + return func(r *MsearchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Msearch) WithHeader(h map[string]string) func(*MsearchRequest) { + return func(r *MsearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Msearch) WithOpaqueID(s string) func(*MsearchRequest) { + return func(r *MsearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.msearch_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.msearch_template.go new file mode 100644 index 000000000..bbcd13640 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.msearch_template.go @@ -0,0 +1,306 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newMsearchTemplateFunc(t Transport) MsearchTemplate { + return func(body io.Reader, o ...func(*MsearchTemplateRequest)) (*Response, error) { + var r = MsearchTemplateRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MsearchTemplate allows to execute several search template operations in one request. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html. +type MsearchTemplate func(body io.Reader, o ...func(*MsearchTemplateRequest)) (*Response, error) + +// MsearchTemplateRequest configures the Msearch Template API request. +type MsearchTemplateRequest struct { + Index []string + + Body io.Reader + + CcsMinimizeRoundtrips *bool + MaxConcurrentSearches *int + RestTotalHitsAsInt *bool + SearchType string + TypedKeys *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MsearchTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "msearch_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_msearch") + 1 + len("template")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_msearch") + path.WriteString("/") + path.WriteString("template") + + params = make(map[string]string) + + if r.CcsMinimizeRoundtrips != nil { + params["ccs_minimize_roundtrips"] = strconv.FormatBool(*r.CcsMinimizeRoundtrips) + } + + if r.MaxConcurrentSearches != nil { + params["max_concurrent_searches"] = strconv.FormatInt(int64(*r.MaxConcurrentSearches), 10) + } + + if r.RestTotalHitsAsInt != nil { + params["rest_total_hits_as_int"] = strconv.FormatBool(*r.RestTotalHitsAsInt) + } + + if r.SearchType != "" { + params["search_type"] = r.SearchType + } + + if r.TypedKeys != nil { + params["typed_keys"] = strconv.FormatBool(*r.TypedKeys) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "msearch_template") + if reader := instrument.RecordRequestBody(ctx, "msearch_template", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "msearch_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MsearchTemplate) WithContext(v context.Context) func(*MsearchTemplateRequest) { + return func(r *MsearchTemplateRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names to use as default. +func (f MsearchTemplate) WithIndex(v ...string) func(*MsearchTemplateRequest) { + return func(r *MsearchTemplateRequest) { + r.Index = v + } +} + +// WithCcsMinimizeRoundtrips - indicates whether network round-trips should be minimized as part of cross-cluster search requests execution. +func (f MsearchTemplate) WithCcsMinimizeRoundtrips(v bool) func(*MsearchTemplateRequest) { + return func(r *MsearchTemplateRequest) { + r.CcsMinimizeRoundtrips = &v + } +} + +// WithMaxConcurrentSearches - controls the maximum number of concurrent searches the multi search api will execute. +func (f MsearchTemplate) WithMaxConcurrentSearches(v int) func(*MsearchTemplateRequest) { + return func(r *MsearchTemplateRequest) { + r.MaxConcurrentSearches = &v + } +} + +// WithRestTotalHitsAsInt - indicates whether hits.total should be rendered as an integer or an object in the rest search response. +func (f MsearchTemplate) WithRestTotalHitsAsInt(v bool) func(*MsearchTemplateRequest) { + return func(r *MsearchTemplateRequest) { + r.RestTotalHitsAsInt = &v + } +} + +// WithSearchType - search operation type. +func (f MsearchTemplate) WithSearchType(v string) func(*MsearchTemplateRequest) { + return func(r *MsearchTemplateRequest) { + r.SearchType = v + } +} + +// WithTypedKeys - specify whether aggregation and suggester names should be prefixed by their respective types in the response. +func (f MsearchTemplate) WithTypedKeys(v bool) func(*MsearchTemplateRequest) { + return func(r *MsearchTemplateRequest) { + r.TypedKeys = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MsearchTemplate) WithPretty() func(*MsearchTemplateRequest) { + return func(r *MsearchTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MsearchTemplate) WithHuman() func(*MsearchTemplateRequest) { + return func(r *MsearchTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MsearchTemplate) WithErrorTrace() func(*MsearchTemplateRequest) { + return func(r *MsearchTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MsearchTemplate) WithFilterPath(v ...string) func(*MsearchTemplateRequest) { + return func(r *MsearchTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MsearchTemplate) WithHeader(h map[string]string) func(*MsearchTemplateRequest) { + return func(r *MsearchTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MsearchTemplate) WithOpaqueID(s string) func(*MsearchTemplateRequest) { + return func(r *MsearchTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.mtermvectors.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.mtermvectors.go new file mode 100644 index 000000000..7c4caa17c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.mtermvectors.go @@ -0,0 +1,395 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newMtermvectorsFunc(t Transport) Mtermvectors { + return func(o ...func(*MtermvectorsRequest)) (*Response, error) { + var r = MtermvectorsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Mtermvectors returns multiple termvectors in one request. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-termvectors.html. +type Mtermvectors func(o ...func(*MtermvectorsRequest)) (*Response, error) + +// MtermvectorsRequest configures the Mtermvectors API request. +type MtermvectorsRequest struct { + Index string + + Body io.Reader + + Fields []string + FieldStatistics *bool + Ids []string + Offsets *bool + Payloads *bool + Positions *bool + Preference string + Realtime *bool + Routing string + TermStatistics *bool + Version *int + VersionType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MtermvectorsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "mtermvectors") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_mtermvectors")) + path.WriteString("http://") + if r.Index != "" { + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + } + path.WriteString("/") + path.WriteString("_mtermvectors") + + params = make(map[string]string) + + if len(r.Fields) > 0 { + params["fields"] = strings.Join(r.Fields, ",") + } + + if r.FieldStatistics != nil { + params["field_statistics"] = strconv.FormatBool(*r.FieldStatistics) + } + + if len(r.Ids) > 0 { + params["ids"] = strings.Join(r.Ids, ",") + } + + if r.Offsets != nil { + params["offsets"] = strconv.FormatBool(*r.Offsets) + } + + if r.Payloads != nil { + params["payloads"] = strconv.FormatBool(*r.Payloads) + } + + if r.Positions != nil { + params["positions"] = strconv.FormatBool(*r.Positions) + } + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Realtime != nil { + params["realtime"] = strconv.FormatBool(*r.Realtime) + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if r.TermStatistics != nil { + params["term_statistics"] = strconv.FormatBool(*r.TermStatistics) + } + + if r.Version != nil { + params["version"] = strconv.FormatInt(int64(*r.Version), 10) + } + + if r.VersionType != "" { + params["version_type"] = r.VersionType + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "mtermvectors") + if reader := instrument.RecordRequestBody(ctx, "mtermvectors", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "mtermvectors") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Mtermvectors) WithContext(v context.Context) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.ctx = v + } +} + +// WithBody - Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation.. +func (f Mtermvectors) WithBody(v io.Reader) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.Body = v + } +} + +// WithIndex - the index in which the document resides.. +func (f Mtermvectors) WithIndex(v string) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.Index = v + } +} + +// WithFields - a list of fields to return. applies to all returned documents unless otherwise specified in body "params" or "docs".. +func (f Mtermvectors) WithFields(v ...string) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.Fields = v + } +} + +// WithFieldStatistics - specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. applies to all returned documents unless otherwise specified in body "params" or "docs".. +func (f Mtermvectors) WithFieldStatistics(v bool) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.FieldStatistics = &v + } +} + +// WithIds - a list of documents ids. you must define ids as parameter or set "ids" or "docs" in the request body. +func (f Mtermvectors) WithIds(v ...string) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.Ids = v + } +} + +// WithOffsets - specifies if term offsets should be returned. applies to all returned documents unless otherwise specified in body "params" or "docs".. +func (f Mtermvectors) WithOffsets(v bool) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.Offsets = &v + } +} + +// WithPayloads - specifies if term payloads should be returned. applies to all returned documents unless otherwise specified in body "params" or "docs".. +func (f Mtermvectors) WithPayloads(v bool) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.Payloads = &v + } +} + +// WithPositions - specifies if term positions should be returned. applies to all returned documents unless otherwise specified in body "params" or "docs".. +func (f Mtermvectors) WithPositions(v bool) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.Positions = &v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random) .applies to all returned documents unless otherwise specified in body "params" or "docs".. +func (f Mtermvectors) WithPreference(v string) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.Preference = v + } +} + +// WithRealtime - specifies if requests are real-time as opposed to near-real-time (default: true).. +func (f Mtermvectors) WithRealtime(v bool) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.Realtime = &v + } +} + +// WithRouting - specific routing value. applies to all returned documents unless otherwise specified in body "params" or "docs".. +func (f Mtermvectors) WithRouting(v string) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.Routing = v + } +} + +// WithTermStatistics - specifies if total term frequency and document frequency should be returned. applies to all returned documents unless otherwise specified in body "params" or "docs".. +func (f Mtermvectors) WithTermStatistics(v bool) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.TermStatistics = &v + } +} + +// WithVersion - explicit version number for concurrency control. +func (f Mtermvectors) WithVersion(v int) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.Version = &v + } +} + +// WithVersionType - specific version type. +func (f Mtermvectors) WithVersionType(v string) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.VersionType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Mtermvectors) WithPretty() func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Mtermvectors) WithHuman() func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Mtermvectors) WithErrorTrace() func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Mtermvectors) WithFilterPath(v ...string) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Mtermvectors) WithHeader(h map[string]string) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Mtermvectors) WithOpaqueID(s string) func(*MtermvectorsRequest) { + return func(r *MtermvectorsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.clear_repositories_metering_archive.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.clear_repositories_metering_archive.go new file mode 100644 index 000000000..1b13fde66 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.clear_repositories_metering_archive.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" +) + +func newNodesClearRepositoriesMeteringArchiveFunc(t Transport) NodesClearRepositoriesMeteringArchive { + return func(max_archive_version *int, node_id []string, o ...func(*NodesClearRepositoriesMeteringArchiveRequest)) (*Response, error) { + var r = NodesClearRepositoriesMeteringArchiveRequest{MaxArchiveVersion: max_archive_version, NodeID: node_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// NodesClearRepositoriesMeteringArchive removes the archived repositories metering information present in the cluster. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-repositories-metering-archive-api.html. +type NodesClearRepositoriesMeteringArchive func(max_archive_version *int, node_id []string, o ...func(*NodesClearRepositoriesMeteringArchiveRequest)) (*Response, error) + +// NodesClearRepositoriesMeteringArchiveRequest configures the Nodes Clear Repositories Metering Archive API request. +type NodesClearRepositoriesMeteringArchiveRequest struct { + MaxArchiveVersion *int + NodeID []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r NodesClearRepositoriesMeteringArchiveRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.clear_repositories_metering_archive") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + if len(r.NodeID) == 0 { + return nil, errors.New("node_id is required and cannot be nil or empty") + } + if r.MaxArchiveVersion == nil { + return nil, errors.New("max_archive_version is required and cannot be nil") + } + + path.Grow(7 + 1 + len("_nodes") + 1 + len(strings.Join(r.NodeID, ",")) + 1 + len("_repositories_metering") + 1 + len(strconv.Itoa(*r.MaxArchiveVersion))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + path.WriteString(strings.Join(r.NodeID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "node_id", strings.Join(r.NodeID, ",")) + } + path.WriteString("/") + path.WriteString("_repositories_metering") + path.WriteString("/") + path.WriteString(strconv.Itoa(*r.MaxArchiveVersion)) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "max_archive_version", strconv.Itoa(*r.MaxArchiveVersion)) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "nodes.clear_repositories_metering_archive") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "nodes.clear_repositories_metering_archive") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f NodesClearRepositoriesMeteringArchive) WithContext(v context.Context) func(*NodesClearRepositoriesMeteringArchiveRequest) { + return func(r *NodesClearRepositoriesMeteringArchiveRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f NodesClearRepositoriesMeteringArchive) WithPretty() func(*NodesClearRepositoriesMeteringArchiveRequest) { + return func(r *NodesClearRepositoriesMeteringArchiveRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f NodesClearRepositoriesMeteringArchive) WithHuman() func(*NodesClearRepositoriesMeteringArchiveRequest) { + return func(r *NodesClearRepositoriesMeteringArchiveRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f NodesClearRepositoriesMeteringArchive) WithErrorTrace() func(*NodesClearRepositoriesMeteringArchiveRequest) { + return func(r *NodesClearRepositoriesMeteringArchiveRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f NodesClearRepositoriesMeteringArchive) WithFilterPath(v ...string) func(*NodesClearRepositoriesMeteringArchiveRequest) { + return func(r *NodesClearRepositoriesMeteringArchiveRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f NodesClearRepositoriesMeteringArchive) WithHeader(h map[string]string) func(*NodesClearRepositoriesMeteringArchiveRequest) { + return func(r *NodesClearRepositoriesMeteringArchiveRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f NodesClearRepositoriesMeteringArchive) WithOpaqueID(s string) func(*NodesClearRepositoriesMeteringArchiveRequest) { + return func(r *NodesClearRepositoriesMeteringArchiveRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.get_repositories_metering_info.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.get_repositories_metering_info.go new file mode 100644 index 000000000..452ce675d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.get_repositories_metering_info.go @@ -0,0 +1,232 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" +) + +func newNodesGetRepositoriesMeteringInfoFunc(t Transport) NodesGetRepositoriesMeteringInfo { + return func(node_id []string, o ...func(*NodesGetRepositoriesMeteringInfoRequest)) (*Response, error) { + var r = NodesGetRepositoriesMeteringInfoRequest{NodeID: node_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// NodesGetRepositoriesMeteringInfo returns cluster repositories metering information. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html. +type NodesGetRepositoriesMeteringInfo func(node_id []string, o ...func(*NodesGetRepositoriesMeteringInfoRequest)) (*Response, error) + +// NodesGetRepositoriesMeteringInfoRequest configures the Nodes Get Repositories Metering Info API request. +type NodesGetRepositoriesMeteringInfoRequest struct { + NodeID []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r NodesGetRepositoriesMeteringInfoRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.get_repositories_metering_info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + if len(r.NodeID) == 0 { + return nil, errors.New("node_id is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_nodes") + 1 + len(strings.Join(r.NodeID, ",")) + 1 + len("_repositories_metering")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + path.WriteString(strings.Join(r.NodeID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "node_id", strings.Join(r.NodeID, ",")) + } + path.WriteString("/") + path.WriteString("_repositories_metering") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "nodes.get_repositories_metering_info") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "nodes.get_repositories_metering_info") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f NodesGetRepositoriesMeteringInfo) WithContext(v context.Context) func(*NodesGetRepositoriesMeteringInfoRequest) { + return func(r *NodesGetRepositoriesMeteringInfoRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f NodesGetRepositoriesMeteringInfo) WithPretty() func(*NodesGetRepositoriesMeteringInfoRequest) { + return func(r *NodesGetRepositoriesMeteringInfoRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f NodesGetRepositoriesMeteringInfo) WithHuman() func(*NodesGetRepositoriesMeteringInfoRequest) { + return func(r *NodesGetRepositoriesMeteringInfoRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f NodesGetRepositoriesMeteringInfo) WithErrorTrace() func(*NodesGetRepositoriesMeteringInfoRequest) { + return func(r *NodesGetRepositoriesMeteringInfoRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f NodesGetRepositoriesMeteringInfo) WithFilterPath(v ...string) func(*NodesGetRepositoriesMeteringInfoRequest) { + return func(r *NodesGetRepositoriesMeteringInfoRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f NodesGetRepositoriesMeteringInfo) WithHeader(h map[string]string) func(*NodesGetRepositoriesMeteringInfoRequest) { + return func(r *NodesGetRepositoriesMeteringInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f NodesGetRepositoriesMeteringInfo) WithOpaqueID(s string) func(*NodesGetRepositoriesMeteringInfoRequest) { + return func(r *NodesGetRepositoriesMeteringInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.hot_threads.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.hot_threads.go new file mode 100644 index 000000000..a0f495b8a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.hot_threads.go @@ -0,0 +1,321 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newNodesHotThreadsFunc(t Transport) NodesHotThreads { + return func(o ...func(*NodesHotThreadsRequest)) (*Response, error) { + var r = NodesHotThreadsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// NodesHotThreads returns information about hot threads on each node in the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-hot-threads.html. +type NodesHotThreads func(o ...func(*NodesHotThreadsRequest)) (*Response, error) + +// NodesHotThreadsRequest configures the Nodes Hot Threads API request. +type NodesHotThreadsRequest struct { + NodeID []string + + IgnoreIdleThreads *bool + Interval time.Duration + Snapshots *int + Sort string + Threads *int + Timeout time.Duration + DocumentType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r NodesHotThreadsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.hot_threads") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_nodes") + 1 + len(strings.Join(r.NodeID, ",")) + 1 + len("hot_threads")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_nodes") + if len(r.NodeID) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.NodeID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "node_id", strings.Join(r.NodeID, ",")) + } + } + path.WriteString("/") + path.WriteString("hot_threads") + + params = make(map[string]string) + + if r.IgnoreIdleThreads != nil { + params["ignore_idle_threads"] = strconv.FormatBool(*r.IgnoreIdleThreads) + } + + if r.Interval != 0 { + params["interval"] = formatDuration(r.Interval) + } + + if r.Snapshots != nil { + params["snapshots"] = strconv.FormatInt(int64(*r.Snapshots), 10) + } + + if r.Sort != "" { + params["sort"] = r.Sort + } + + if r.Threads != nil { + params["threads"] = strconv.FormatInt(int64(*r.Threads), 10) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.DocumentType != "" { + params["type"] = r.DocumentType + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "nodes.hot_threads") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "nodes.hot_threads") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f NodesHotThreads) WithContext(v context.Context) func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + r.ctx = v + } +} + +// WithNodeID - a list of node ids or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. +func (f NodesHotThreads) WithNodeID(v ...string) func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + r.NodeID = v + } +} + +// WithIgnoreIdleThreads - don't show threads that are in known-idle places, such as waiting on a socket select or pulling from an empty task queue (default: true). +func (f NodesHotThreads) WithIgnoreIdleThreads(v bool) func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + r.IgnoreIdleThreads = &v + } +} + +// WithInterval - the interval for the second sampling of threads. +func (f NodesHotThreads) WithInterval(v time.Duration) func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + r.Interval = v + } +} + +// WithSnapshots - number of samples of thread stacktrace (default: 10). +func (f NodesHotThreads) WithSnapshots(v int) func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + r.Snapshots = &v + } +} + +// WithSort - the sort order for 'cpu' type (default: total). +func (f NodesHotThreads) WithSort(v string) func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + r.Sort = v + } +} + +// WithThreads - specify the number of threads to provide information for (default: 3). +func (f NodesHotThreads) WithThreads(v int) func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + r.Threads = &v + } +} + +// WithTimeout - explicit operation timeout. +func (f NodesHotThreads) WithTimeout(v time.Duration) func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + r.Timeout = v + } +} + +// WithDocumentType - the type to sample (default: cpu). +func (f NodesHotThreads) WithDocumentType(v string) func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + r.DocumentType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f NodesHotThreads) WithPretty() func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f NodesHotThreads) WithHuman() func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f NodesHotThreads) WithErrorTrace() func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f NodesHotThreads) WithFilterPath(v ...string) func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f NodesHotThreads) WithHeader(h map[string]string) func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f NodesHotThreads) WithOpaqueID(s string) func(*NodesHotThreadsRequest) { + return func(r *NodesHotThreadsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.info.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.info.go new file mode 100644 index 000000000..e2cc0d0d1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.info.go @@ -0,0 +1,274 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newNodesInfoFunc(t Transport) NodesInfo { + return func(o ...func(*NodesInfoRequest)) (*Response, error) { + var r = NodesInfoRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// NodesInfo returns information about nodes in the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-info.html. +type NodesInfo func(o ...func(*NodesInfoRequest)) (*Response, error) + +// NodesInfoRequest configures the Nodes Info API request. +type NodesInfoRequest struct { + Metric []string + NodeID []string + + FlatSettings *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r NodesInfoRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_nodes") + 1 + len(strings.Join(r.NodeID, ",")) + 1 + len(strings.Join(r.Metric, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_nodes") + if len(r.NodeID) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.NodeID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "node_id", strings.Join(r.NodeID, ",")) + } + } + if len(r.Metric) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Metric, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", strings.Join(r.Metric, ",")) + } + } + + params = make(map[string]string) + + if r.FlatSettings != nil { + params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "nodes.info") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "nodes.info") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f NodesInfo) WithContext(v context.Context) func(*NodesInfoRequest) { + return func(r *NodesInfoRequest) { + r.ctx = v + } +} + +// WithMetric - a list of metrics you wish returned. leave empty to return all metrics.. +func (f NodesInfo) WithMetric(v ...string) func(*NodesInfoRequest) { + return func(r *NodesInfoRequest) { + r.Metric = v + } +} + +// WithNodeID - a list of node ids or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. +func (f NodesInfo) WithNodeID(v ...string) func(*NodesInfoRequest) { + return func(r *NodesInfoRequest) { + r.NodeID = v + } +} + +// WithFlatSettings - return settings in flat format (default: false). +func (f NodesInfo) WithFlatSettings(v bool) func(*NodesInfoRequest) { + return func(r *NodesInfoRequest) { + r.FlatSettings = &v + } +} + +// WithTimeout - explicit operation timeout. +func (f NodesInfo) WithTimeout(v time.Duration) func(*NodesInfoRequest) { + return func(r *NodesInfoRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f NodesInfo) WithPretty() func(*NodesInfoRequest) { + return func(r *NodesInfoRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f NodesInfo) WithHuman() func(*NodesInfoRequest) { + return func(r *NodesInfoRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f NodesInfo) WithErrorTrace() func(*NodesInfoRequest) { + return func(r *NodesInfoRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f NodesInfo) WithFilterPath(v ...string) func(*NodesInfoRequest) { + return func(r *NodesInfoRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f NodesInfo) WithHeader(h map[string]string) func(*NodesInfoRequest) { + return func(r *NodesInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f NodesInfo) WithOpaqueID(s string) func(*NodesInfoRequest) { + return func(r *NodesInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.reload_secure_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.reload_secure_settings.go new file mode 100644 index 000000000..db4399828 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.reload_secure_settings.go @@ -0,0 +1,265 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newNodesReloadSecureSettingsFunc(t Transport) NodesReloadSecureSettings { + return func(o ...func(*NodesReloadSecureSettingsRequest)) (*Response, error) { + var r = NodesReloadSecureSettingsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// NodesReloadSecureSettings reloads secure settings. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/secure-settings.html#reloadable-secure-settings. +type NodesReloadSecureSettings func(o ...func(*NodesReloadSecureSettingsRequest)) (*Response, error) + +// NodesReloadSecureSettingsRequest configures the Nodes Reload Secure Settings API request. +type NodesReloadSecureSettingsRequest struct { + Body io.Reader + + NodeID []string + + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r NodesReloadSecureSettingsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.reload_secure_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_nodes") + 1 + len(strings.Join(r.NodeID, ",")) + 1 + len("reload_secure_settings")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_nodes") + if len(r.NodeID) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.NodeID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "node_id", strings.Join(r.NodeID, ",")) + } + } + path.WriteString("/") + path.WriteString("reload_secure_settings") + + params = make(map[string]string) + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "nodes.reload_secure_settings") + if reader := instrument.RecordRequestBody(ctx, "nodes.reload_secure_settings", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "nodes.reload_secure_settings") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f NodesReloadSecureSettings) WithContext(v context.Context) func(*NodesReloadSecureSettingsRequest) { + return func(r *NodesReloadSecureSettingsRequest) { + r.ctx = v + } +} + +// WithBody - An object containing the password for the elasticsearch keystore. +func (f NodesReloadSecureSettings) WithBody(v io.Reader) func(*NodesReloadSecureSettingsRequest) { + return func(r *NodesReloadSecureSettingsRequest) { + r.Body = v + } +} + +// WithNodeID - a list of node ids to span the reload/reinit call. should stay empty because reloading usually involves all cluster nodes.. +func (f NodesReloadSecureSettings) WithNodeID(v ...string) func(*NodesReloadSecureSettingsRequest) { + return func(r *NodesReloadSecureSettingsRequest) { + r.NodeID = v + } +} + +// WithTimeout - explicit operation timeout. +func (f NodesReloadSecureSettings) WithTimeout(v time.Duration) func(*NodesReloadSecureSettingsRequest) { + return func(r *NodesReloadSecureSettingsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f NodesReloadSecureSettings) WithPretty() func(*NodesReloadSecureSettingsRequest) { + return func(r *NodesReloadSecureSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f NodesReloadSecureSettings) WithHuman() func(*NodesReloadSecureSettingsRequest) { + return func(r *NodesReloadSecureSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f NodesReloadSecureSettings) WithErrorTrace() func(*NodesReloadSecureSettingsRequest) { + return func(r *NodesReloadSecureSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f NodesReloadSecureSettings) WithFilterPath(v ...string) func(*NodesReloadSecureSettingsRequest) { + return func(r *NodesReloadSecureSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f NodesReloadSecureSettings) WithHeader(h map[string]string) func(*NodesReloadSecureSettingsRequest) { + return func(r *NodesReloadSecureSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f NodesReloadSecureSettings) WithOpaqueID(s string) func(*NodesReloadSecureSettingsRequest) { + return func(r *NodesReloadSecureSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.stats.go new file mode 100644 index 000000000..fffc7645a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.stats.go @@ -0,0 +1,375 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newNodesStatsFunc(t Transport) NodesStats { + return func(o ...func(*NodesStatsRequest)) (*Response, error) { + var r = NodesStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// NodesStats returns statistical information about nodes in the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html. +type NodesStats func(o ...func(*NodesStatsRequest)) (*Response, error) + +// NodesStatsRequest configures the Nodes Stats API request. +type NodesStatsRequest struct { + IndexMetric []string + Metric []string + NodeID []string + + CompletionFields []string + FielddataFields []string + Fields []string + Groups *bool + IncludeSegmentFileSizes *bool + IncludeUnloadedSegments *bool + Level string + Timeout time.Duration + Types []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r NodesStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_nodes") + 1 + len(strings.Join(r.NodeID, ",")) + 1 + len("stats") + 1 + len(strings.Join(r.Metric, ",")) + 1 + len(strings.Join(r.IndexMetric, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_nodes") + if len(r.NodeID) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.NodeID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "node_id", strings.Join(r.NodeID, ",")) + } + } + path.WriteString("/") + path.WriteString("stats") + if len(r.Metric) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Metric, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", strings.Join(r.Metric, ",")) + } + } + if len(r.IndexMetric) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.IndexMetric, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index_metric", strings.Join(r.IndexMetric, ",")) + } + } + + params = make(map[string]string) + + if len(r.CompletionFields) > 0 { + params["completion_fields"] = strings.Join(r.CompletionFields, ",") + } + + if len(r.FielddataFields) > 0 { + params["fielddata_fields"] = strings.Join(r.FielddataFields, ",") + } + + if len(r.Fields) > 0 { + params["fields"] = strings.Join(r.Fields, ",") + } + + if r.Groups != nil { + params["groups"] = strconv.FormatBool(*r.Groups) + } + + if r.IncludeSegmentFileSizes != nil { + params["include_segment_file_sizes"] = strconv.FormatBool(*r.IncludeSegmentFileSizes) + } + + if r.IncludeUnloadedSegments != nil { + params["include_unloaded_segments"] = strconv.FormatBool(*r.IncludeUnloadedSegments) + } + + if r.Level != "" { + params["level"] = r.Level + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if len(r.Types) > 0 { + params["types"] = strings.Join(r.Types, ",") + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "nodes.stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "nodes.stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f NodesStats) WithContext(v context.Context) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.ctx = v + } +} + +// WithIndexMetric - limit the information returned for `indices` metric to the specific index metrics. isn't used if `indices` (or `all`) metric isn't specified.. +func (f NodesStats) WithIndexMetric(v ...string) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.IndexMetric = v + } +} + +// WithMetric - limit the information returned to the specified metrics. +func (f NodesStats) WithMetric(v ...string) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.Metric = v + } +} + +// WithNodeID - a list of node ids or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. +func (f NodesStats) WithNodeID(v ...string) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.NodeID = v + } +} + +// WithCompletionFields - a list of fields for the `completion` index metric (supports wildcards). +func (f NodesStats) WithCompletionFields(v ...string) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.CompletionFields = v + } +} + +// WithFielddataFields - a list of fields for the `fielddata` index metric (supports wildcards). +func (f NodesStats) WithFielddataFields(v ...string) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.FielddataFields = v + } +} + +// WithFields - a list of fields for `fielddata` and `completion` index metric (supports wildcards). +func (f NodesStats) WithFields(v ...string) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.Fields = v + } +} + +// WithGroups - a list of search groups for `search` index metric. +func (f NodesStats) WithGroups(v bool) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.Groups = &v + } +} + +// WithIncludeSegmentFileSizes - whether to report the aggregated disk usage of each one of the lucene index files (only applies if segment stats are requested). +func (f NodesStats) WithIncludeSegmentFileSizes(v bool) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.IncludeSegmentFileSizes = &v + } +} + +// WithIncludeUnloadedSegments - if set to true segment stats will include stats for segments that are not currently loaded into memory. +func (f NodesStats) WithIncludeUnloadedSegments(v bool) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.IncludeUnloadedSegments = &v + } +} + +// WithLevel - return indices stats aggregated at index, node or shard level. +func (f NodesStats) WithLevel(v string) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.Level = v + } +} + +// WithTimeout - explicit operation timeout. +func (f NodesStats) WithTimeout(v time.Duration) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.Timeout = v + } +} + +// WithTypes - a list of document types for the `indexing` index metric. +func (f NodesStats) WithTypes(v ...string) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.Types = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f NodesStats) WithPretty() func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f NodesStats) WithHuman() func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f NodesStats) WithErrorTrace() func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f NodesStats) WithFilterPath(v ...string) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f NodesStats) WithHeader(h map[string]string) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f NodesStats) WithOpaqueID(s string) func(*NodesStatsRequest) { + return func(r *NodesStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.usage.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.usage.go new file mode 100644 index 000000000..9842b0868 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.nodes.usage.go @@ -0,0 +1,263 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newNodesUsageFunc(t Transport) NodesUsage { + return func(o ...func(*NodesUsageRequest)) (*Response, error) { + var r = NodesUsageRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// NodesUsage returns low-level information about REST actions usage on nodes. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-usage.html. +type NodesUsage func(o ...func(*NodesUsageRequest)) (*Response, error) + +// NodesUsageRequest configures the Nodes Usage API request. +type NodesUsageRequest struct { + Metric []string + NodeID []string + + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r NodesUsageRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.usage") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_nodes") + 1 + len(strings.Join(r.NodeID, ",")) + 1 + len("usage") + 1 + len(strings.Join(r.Metric, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_nodes") + if len(r.NodeID) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.NodeID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "node_id", strings.Join(r.NodeID, ",")) + } + } + path.WriteString("/") + path.WriteString("usage") + if len(r.Metric) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Metric, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", strings.Join(r.Metric, ",")) + } + } + + params = make(map[string]string) + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "nodes.usage") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "nodes.usage") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f NodesUsage) WithContext(v context.Context) func(*NodesUsageRequest) { + return func(r *NodesUsageRequest) { + r.ctx = v + } +} + +// WithMetric - limit the information returned to the specified metrics. +func (f NodesUsage) WithMetric(v ...string) func(*NodesUsageRequest) { + return func(r *NodesUsageRequest) { + r.Metric = v + } +} + +// WithNodeID - a list of node ids or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. +func (f NodesUsage) WithNodeID(v ...string) func(*NodesUsageRequest) { + return func(r *NodesUsageRequest) { + r.NodeID = v + } +} + +// WithTimeout - explicit operation timeout. +func (f NodesUsage) WithTimeout(v time.Duration) func(*NodesUsageRequest) { + return func(r *NodesUsageRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f NodesUsage) WithPretty() func(*NodesUsageRequest) { + return func(r *NodesUsageRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f NodesUsage) WithHuman() func(*NodesUsageRequest) { + return func(r *NodesUsageRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f NodesUsage) WithErrorTrace() func(*NodesUsageRequest) { + return func(r *NodesUsageRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f NodesUsage) WithFilterPath(v ...string) func(*NodesUsageRequest) { + return func(r *NodesUsageRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f NodesUsage) WithHeader(h map[string]string) func(*NodesUsageRequest) { + return func(r *NodesUsageRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f NodesUsage) WithOpaqueID(s string) func(*NodesUsageRequest) { + return func(r *NodesUsageRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ping.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ping.go new file mode 100644 index 000000000..faa8fb02c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.ping.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newPingFunc(t Transport) Ping { + return func(o ...func(*PingRequest)) (*Response, error) { + var r = PingRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Ping returns whether the cluster is running. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html. +type Ping func(o ...func(*PingRequest)) (*Response, error) + +// PingRequest configures the Ping API request. +type PingRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r PingRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "HEAD" + + path.Grow(7 + len("/")) + path.WriteString("http://") + path.WriteString("/") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ping") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ping") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Ping) WithContext(v context.Context) func(*PingRequest) { + return func(r *PingRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Ping) WithPretty() func(*PingRequest) { + return func(r *PingRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Ping) WithHuman() func(*PingRequest) { + return func(r *PingRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Ping) WithErrorTrace() func(*PingRequest) { + return func(r *PingRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Ping) WithFilterPath(v ...string) func(*PingRequest) { + return func(r *PingRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Ping) WithHeader(h map[string]string) func(*PingRequest) { + return func(r *PingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Ping) WithOpaqueID(s string) func(*PingRequest) { + return func(r *PingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.profiling.stacktraces.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.profiling.stacktraces.go new file mode 100644 index 000000000..4bc11a41b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.profiling.stacktraces.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newProfilingStacktracesFunc(t Transport) ProfilingStacktraces { + return func(body io.Reader, o ...func(*ProfilingStacktracesRequest)) (*Response, error) { + var r = ProfilingStacktracesRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ProfilingStacktraces extracts raw stacktrace information from Universal Profiling. +// +// See full documentation at https://www.elastic.co/guide/en/observability/current/universal-profiling.html. +type ProfilingStacktraces func(body io.Reader, o ...func(*ProfilingStacktracesRequest)) (*Response, error) + +// ProfilingStacktracesRequest configures the Profiling Stacktraces API request. +type ProfilingStacktracesRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ProfilingStacktracesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "profiling.stacktraces") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_profiling/stacktraces")) + path.WriteString("http://") + path.WriteString("/_profiling/stacktraces") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "profiling.stacktraces") + if reader := instrument.RecordRequestBody(ctx, "profiling.stacktraces", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "profiling.stacktraces") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ProfilingStacktraces) WithContext(v context.Context) func(*ProfilingStacktracesRequest) { + return func(r *ProfilingStacktracesRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ProfilingStacktraces) WithPretty() func(*ProfilingStacktracesRequest) { + return func(r *ProfilingStacktracesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ProfilingStacktraces) WithHuman() func(*ProfilingStacktracesRequest) { + return func(r *ProfilingStacktracesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ProfilingStacktraces) WithErrorTrace() func(*ProfilingStacktracesRequest) { + return func(r *ProfilingStacktracesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ProfilingStacktraces) WithFilterPath(v ...string) func(*ProfilingStacktracesRequest) { + return func(r *ProfilingStacktracesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ProfilingStacktraces) WithHeader(h map[string]string) func(*ProfilingStacktracesRequest) { + return func(r *ProfilingStacktracesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ProfilingStacktraces) WithOpaqueID(s string) func(*ProfilingStacktracesRequest) { + return func(r *ProfilingStacktracesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.profiling.status.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.profiling.status.go new file mode 100644 index 000000000..40e212bea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.profiling.status.go @@ -0,0 +1,254 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newProfilingStatusFunc(t Transport) ProfilingStatus { + return func(o ...func(*ProfilingStatusRequest)) (*Response, error) { + var r = ProfilingStatusRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ProfilingStatus returns basic information about the status of Universal Profiling. +// +// See full documentation at https://www.elastic.co/guide/en/observability/current/universal-profiling.html. +type ProfilingStatus func(o ...func(*ProfilingStatusRequest)) (*Response, error) + +// ProfilingStatusRequest configures the Profiling Status API request. +type ProfilingStatusRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + WaitForResourcesCreated *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ProfilingStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "profiling.status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_profiling/status")) + path.WriteString("http://") + path.WriteString("/_profiling/status") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForResourcesCreated != nil { + params["wait_for_resources_created"] = strconv.FormatBool(*r.WaitForResourcesCreated) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "profiling.status") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "profiling.status") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ProfilingStatus) WithContext(v context.Context) func(*ProfilingStatusRequest) { + return func(r *ProfilingStatusRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ProfilingStatus) WithMasterTimeout(v time.Duration) func(*ProfilingStatusRequest) { + return func(r *ProfilingStatusRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ProfilingStatus) WithTimeout(v time.Duration) func(*ProfilingStatusRequest) { + return func(r *ProfilingStatusRequest) { + r.Timeout = v + } +} + +// WithWaitForResourcesCreated - whether to return immediately or wait until resources have been created. +func (f ProfilingStatus) WithWaitForResourcesCreated(v bool) func(*ProfilingStatusRequest) { + return func(r *ProfilingStatusRequest) { + r.WaitForResourcesCreated = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ProfilingStatus) WithPretty() func(*ProfilingStatusRequest) { + return func(r *ProfilingStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ProfilingStatus) WithHuman() func(*ProfilingStatusRequest) { + return func(r *ProfilingStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ProfilingStatus) WithErrorTrace() func(*ProfilingStatusRequest) { + return func(r *ProfilingStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ProfilingStatus) WithFilterPath(v ...string) func(*ProfilingStatusRequest) { + return func(r *ProfilingStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ProfilingStatus) WithHeader(h map[string]string) func(*ProfilingStatusRequest) { + return func(r *ProfilingStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ProfilingStatus) WithOpaqueID(s string) func(*ProfilingStatusRequest) { + return func(r *ProfilingStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.profiling.topn_functions.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.profiling.topn_functions.go new file mode 100644 index 000000000..1b9562e21 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.profiling.topn_functions.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newProfilingTopnFunctionsFunc(t Transport) ProfilingTopnFunctions { + return func(body io.Reader, o ...func(*ProfilingTopnFunctionsRequest)) (*Response, error) { + var r = ProfilingTopnFunctionsRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ProfilingTopnFunctions extracts a list of topN functions from Universal Profiling. +// +// See full documentation at https://www.elastic.co/guide/en/observability/current/universal-profiling.html. +type ProfilingTopnFunctions func(body io.Reader, o ...func(*ProfilingTopnFunctionsRequest)) (*Response, error) + +// ProfilingTopnFunctionsRequest configures the Profiling Topn Functions API request. +type ProfilingTopnFunctionsRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ProfilingTopnFunctionsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "profiling.topn_functions") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_profiling/topn/functions")) + path.WriteString("http://") + path.WriteString("/_profiling/topn/functions") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "profiling.topn_functions") + if reader := instrument.RecordRequestBody(ctx, "profiling.topn_functions", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "profiling.topn_functions") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ProfilingTopnFunctions) WithContext(v context.Context) func(*ProfilingTopnFunctionsRequest) { + return func(r *ProfilingTopnFunctionsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ProfilingTopnFunctions) WithPretty() func(*ProfilingTopnFunctionsRequest) { + return func(r *ProfilingTopnFunctionsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ProfilingTopnFunctions) WithHuman() func(*ProfilingTopnFunctionsRequest) { + return func(r *ProfilingTopnFunctionsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ProfilingTopnFunctions) WithErrorTrace() func(*ProfilingTopnFunctionsRequest) { + return func(r *ProfilingTopnFunctionsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ProfilingTopnFunctions) WithFilterPath(v ...string) func(*ProfilingTopnFunctionsRequest) { + return func(r *ProfilingTopnFunctionsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ProfilingTopnFunctions) WithHeader(h map[string]string) func(*ProfilingTopnFunctionsRequest) { + return func(r *ProfilingTopnFunctionsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ProfilingTopnFunctions) WithOpaqueID(s string) func(*ProfilingTopnFunctionsRequest) { + return func(r *ProfilingTopnFunctionsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.put_script.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.put_script.go new file mode 100644 index 000000000..e3722cb26 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.put_script.go @@ -0,0 +1,279 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newPutScriptFunc(t Transport) PutScript { + return func(id string, body io.Reader, o ...func(*PutScriptRequest)) (*Response, error) { + var r = PutScriptRequest{ScriptID: id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// PutScript creates or updates a script. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html. +type PutScript func(id string, body io.Reader, o ...func(*PutScriptRequest)) (*Response, error) + +// PutScriptRequest configures the Put Script API request. +type PutScriptRequest struct { + ScriptID string + + Body io.Reader + + ScriptContext string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r PutScriptRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "put_script") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_scripts") + 1 + len(r.ScriptID) + 1 + len(r.ScriptContext)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_scripts") + path.WriteString("/") + path.WriteString(r.ScriptID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.ScriptID) + } + if r.ScriptContext != "" { + path.WriteString("/") + path.WriteString(r.ScriptContext) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "context", r.ScriptContext) + } + } + + params = make(map[string]string) + + if r.ScriptContext != "" { + params["context"] = r.ScriptContext + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "put_script") + if reader := instrument.RecordRequestBody(ctx, "put_script", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "put_script") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f PutScript) WithContext(v context.Context) func(*PutScriptRequest) { + return func(r *PutScriptRequest) { + r.ctx = v + } +} + +// WithScriptContext - script context. +func (f PutScript) WithScriptContext(v string) func(*PutScriptRequest) { + return func(r *PutScriptRequest) { + r.ScriptContext = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f PutScript) WithMasterTimeout(v time.Duration) func(*PutScriptRequest) { + return func(r *PutScriptRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f PutScript) WithTimeout(v time.Duration) func(*PutScriptRequest) { + return func(r *PutScriptRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f PutScript) WithPretty() func(*PutScriptRequest) { + return func(r *PutScriptRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f PutScript) WithHuman() func(*PutScriptRequest) { + return func(r *PutScriptRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f PutScript) WithErrorTrace() func(*PutScriptRequest) { + return func(r *PutScriptRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f PutScript) WithFilterPath(v ...string) func(*PutScriptRequest) { + return func(r *PutScriptRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f PutScript) WithHeader(h map[string]string) func(*PutScriptRequest) { + return func(r *PutScriptRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f PutScript) WithOpaqueID(s string) func(*PutScriptRequest) { + return func(r *PutScriptRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.delete_rule.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.delete_rule.go new file mode 100644 index 000000000..622b34333 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.delete_rule.go @@ -0,0 +1,231 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newQueryRulesDeleteRuleFunc(t Transport) QueryRulesDeleteRule { + return func(rule_id string, ruleset_id string, o ...func(*QueryRulesDeleteRuleRequest)) (*Response, error) { + var r = QueryRulesDeleteRuleRequest{RuleID: rule_id, RulesetID: ruleset_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// QueryRulesDeleteRule deletes an individual query rule within a ruleset. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-rule.html. +type QueryRulesDeleteRule func(rule_id string, ruleset_id string, o ...func(*QueryRulesDeleteRuleRequest)) (*Response, error) + +// QueryRulesDeleteRuleRequest configures the Query Rules Delete Rule API request. +type QueryRulesDeleteRuleRequest struct { + RuleID string + RulesetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r QueryRulesDeleteRuleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.delete_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_query_rules") + 1 + len(r.RulesetID) + 1 + len("_rule") + 1 + len(r.RuleID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + path.WriteString(r.RulesetID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "ruleset_id", r.RulesetID) + } + path.WriteString("/") + path.WriteString("_rule") + path.WriteString("/") + path.WriteString(r.RuleID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "rule_id", r.RuleID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.delete_rule") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.delete_rule") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f QueryRulesDeleteRule) WithContext(v context.Context) func(*QueryRulesDeleteRuleRequest) { + return func(r *QueryRulesDeleteRuleRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f QueryRulesDeleteRule) WithPretty() func(*QueryRulesDeleteRuleRequest) { + return func(r *QueryRulesDeleteRuleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f QueryRulesDeleteRule) WithHuman() func(*QueryRulesDeleteRuleRequest) { + return func(r *QueryRulesDeleteRuleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f QueryRulesDeleteRule) WithErrorTrace() func(*QueryRulesDeleteRuleRequest) { + return func(r *QueryRulesDeleteRuleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f QueryRulesDeleteRule) WithFilterPath(v ...string) func(*QueryRulesDeleteRuleRequest) { + return func(r *QueryRulesDeleteRuleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f QueryRulesDeleteRule) WithHeader(h map[string]string) func(*QueryRulesDeleteRuleRequest) { + return func(r *QueryRulesDeleteRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f QueryRulesDeleteRule) WithOpaqueID(s string) func(*QueryRulesDeleteRuleRequest) { + return func(r *QueryRulesDeleteRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.delete_ruleset.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.delete_ruleset.go new file mode 100644 index 000000000..6b76bfedc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.delete_ruleset.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newQueryRulesDeleteRulesetFunc(t Transport) QueryRulesDeleteRuleset { + return func(ruleset_id string, o ...func(*QueryRulesDeleteRulesetRequest)) (*Response, error) { + var r = QueryRulesDeleteRulesetRequest{RulesetID: ruleset_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// QueryRulesDeleteRuleset deletes a query ruleset. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-ruleset.html. +type QueryRulesDeleteRuleset func(ruleset_id string, o ...func(*QueryRulesDeleteRulesetRequest)) (*Response, error) + +// QueryRulesDeleteRulesetRequest configures the Query Rules Delete Ruleset API request. +type QueryRulesDeleteRulesetRequest struct { + RulesetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r QueryRulesDeleteRulesetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.delete_ruleset") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_query_rules") + 1 + len(r.RulesetID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + path.WriteString(r.RulesetID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "ruleset_id", r.RulesetID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.delete_ruleset") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.delete_ruleset") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f QueryRulesDeleteRuleset) WithContext(v context.Context) func(*QueryRulesDeleteRulesetRequest) { + return func(r *QueryRulesDeleteRulesetRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f QueryRulesDeleteRuleset) WithPretty() func(*QueryRulesDeleteRulesetRequest) { + return func(r *QueryRulesDeleteRulesetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f QueryRulesDeleteRuleset) WithHuman() func(*QueryRulesDeleteRulesetRequest) { + return func(r *QueryRulesDeleteRulesetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f QueryRulesDeleteRuleset) WithErrorTrace() func(*QueryRulesDeleteRulesetRequest) { + return func(r *QueryRulesDeleteRulesetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f QueryRulesDeleteRuleset) WithFilterPath(v ...string) func(*QueryRulesDeleteRulesetRequest) { + return func(r *QueryRulesDeleteRulesetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f QueryRulesDeleteRuleset) WithHeader(h map[string]string) func(*QueryRulesDeleteRulesetRequest) { + return func(r *QueryRulesDeleteRulesetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f QueryRulesDeleteRuleset) WithOpaqueID(s string) func(*QueryRulesDeleteRulesetRequest) { + return func(r *QueryRulesDeleteRulesetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.get_rule.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.get_rule.go new file mode 100644 index 000000000..1da4aa03b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.get_rule.go @@ -0,0 +1,231 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newQueryRulesGetRuleFunc(t Transport) QueryRulesGetRule { + return func(rule_id string, ruleset_id string, o ...func(*QueryRulesGetRuleRequest)) (*Response, error) { + var r = QueryRulesGetRuleRequest{RuleID: rule_id, RulesetID: ruleset_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// QueryRulesGetRule returns the details about an individual query rule within a ruleset. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-rule.html. +type QueryRulesGetRule func(rule_id string, ruleset_id string, o ...func(*QueryRulesGetRuleRequest)) (*Response, error) + +// QueryRulesGetRuleRequest configures the Query Rules Get Rule API request. +type QueryRulesGetRuleRequest struct { + RuleID string + RulesetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r QueryRulesGetRuleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.get_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_query_rules") + 1 + len(r.RulesetID) + 1 + len("_rule") + 1 + len(r.RuleID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + path.WriteString(r.RulesetID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "ruleset_id", r.RulesetID) + } + path.WriteString("/") + path.WriteString("_rule") + path.WriteString("/") + path.WriteString(r.RuleID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "rule_id", r.RuleID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.get_rule") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.get_rule") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f QueryRulesGetRule) WithContext(v context.Context) func(*QueryRulesGetRuleRequest) { + return func(r *QueryRulesGetRuleRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f QueryRulesGetRule) WithPretty() func(*QueryRulesGetRuleRequest) { + return func(r *QueryRulesGetRuleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f QueryRulesGetRule) WithHuman() func(*QueryRulesGetRuleRequest) { + return func(r *QueryRulesGetRuleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f QueryRulesGetRule) WithErrorTrace() func(*QueryRulesGetRuleRequest) { + return func(r *QueryRulesGetRuleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f QueryRulesGetRule) WithFilterPath(v ...string) func(*QueryRulesGetRuleRequest) { + return func(r *QueryRulesGetRuleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f QueryRulesGetRule) WithHeader(h map[string]string) func(*QueryRulesGetRuleRequest) { + return func(r *QueryRulesGetRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f QueryRulesGetRule) WithOpaqueID(s string) func(*QueryRulesGetRuleRequest) { + return func(r *QueryRulesGetRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.get_ruleset.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.get_ruleset.go new file mode 100644 index 000000000..9624967da --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.get_ruleset.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newQueryRulesGetRulesetFunc(t Transport) QueryRulesGetRuleset { + return func(ruleset_id string, o ...func(*QueryRulesGetRulesetRequest)) (*Response, error) { + var r = QueryRulesGetRulesetRequest{RulesetID: ruleset_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// QueryRulesGetRuleset returns the details about a query ruleset. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-ruleset.html. +type QueryRulesGetRuleset func(ruleset_id string, o ...func(*QueryRulesGetRulesetRequest)) (*Response, error) + +// QueryRulesGetRulesetRequest configures the Query Rules Get Ruleset API request. +type QueryRulesGetRulesetRequest struct { + RulesetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r QueryRulesGetRulesetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.get_ruleset") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_query_rules") + 1 + len(r.RulesetID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + path.WriteString(r.RulesetID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "ruleset_id", r.RulesetID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.get_ruleset") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.get_ruleset") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f QueryRulesGetRuleset) WithContext(v context.Context) func(*QueryRulesGetRulesetRequest) { + return func(r *QueryRulesGetRulesetRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f QueryRulesGetRuleset) WithPretty() func(*QueryRulesGetRulesetRequest) { + return func(r *QueryRulesGetRulesetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f QueryRulesGetRuleset) WithHuman() func(*QueryRulesGetRulesetRequest) { + return func(r *QueryRulesGetRulesetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f QueryRulesGetRuleset) WithErrorTrace() func(*QueryRulesGetRulesetRequest) { + return func(r *QueryRulesGetRulesetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f QueryRulesGetRuleset) WithFilterPath(v ...string) func(*QueryRulesGetRulesetRequest) { + return func(r *QueryRulesGetRulesetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f QueryRulesGetRuleset) WithHeader(h map[string]string) func(*QueryRulesGetRulesetRequest) { + return func(r *QueryRulesGetRulesetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f QueryRulesGetRuleset) WithOpaqueID(s string) func(*QueryRulesGetRulesetRequest) { + return func(r *QueryRulesGetRulesetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.list_rulesets.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.list_rulesets.go new file mode 100644 index 000000000..12eb99cd8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.list_rulesets.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newQueryRulesListRulesetsFunc(t Transport) QueryRulesListRulesets { + return func(o ...func(*QueryRulesListRulesetsRequest)) (*Response, error) { + var r = QueryRulesListRulesetsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// QueryRulesListRulesets lists query rulesets. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/list-query-rulesets.html. +type QueryRulesListRulesets func(o ...func(*QueryRulesListRulesetsRequest)) (*Response, error) + +// QueryRulesListRulesetsRequest configures the Query Rules List Rulesets API request. +type QueryRulesListRulesetsRequest struct { + From *int + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r QueryRulesListRulesetsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.list_rulesets") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_query_rules")) + path.WriteString("http://") + path.WriteString("/_query_rules") + + params = make(map[string]string) + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.list_rulesets") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.list_rulesets") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f QueryRulesListRulesets) WithContext(v context.Context) func(*QueryRulesListRulesetsRequest) { + return func(r *QueryRulesListRulesetsRequest) { + r.ctx = v + } +} + +// WithFrom - starting offset (default: 0). +func (f QueryRulesListRulesets) WithFrom(v int) func(*QueryRulesListRulesetsRequest) { + return func(r *QueryRulesListRulesetsRequest) { + r.From = &v + } +} + +// WithSize - specifies a max number of results to get (default: 100). +func (f QueryRulesListRulesets) WithSize(v int) func(*QueryRulesListRulesetsRequest) { + return func(r *QueryRulesListRulesetsRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f QueryRulesListRulesets) WithPretty() func(*QueryRulesListRulesetsRequest) { + return func(r *QueryRulesListRulesetsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f QueryRulesListRulesets) WithHuman() func(*QueryRulesListRulesetsRequest) { + return func(r *QueryRulesListRulesetsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f QueryRulesListRulesets) WithErrorTrace() func(*QueryRulesListRulesetsRequest) { + return func(r *QueryRulesListRulesetsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f QueryRulesListRulesets) WithFilterPath(v ...string) func(*QueryRulesListRulesetsRequest) { + return func(r *QueryRulesListRulesetsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f QueryRulesListRulesets) WithHeader(h map[string]string) func(*QueryRulesListRulesetsRequest) { + return func(r *QueryRulesListRulesetsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f QueryRulesListRulesets) WithOpaqueID(s string) func(*QueryRulesListRulesetsRequest) { + return func(r *QueryRulesListRulesetsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.put_rule.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.put_rule.go new file mode 100644 index 000000000..74d4b8c1b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.put_rule.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newQueryRulesPutRuleFunc(t Transport) QueryRulesPutRule { + return func(body io.Reader, rule_id string, ruleset_id string, o ...func(*QueryRulesPutRuleRequest)) (*Response, error) { + var r = QueryRulesPutRuleRequest{Body: body, RuleID: rule_id, RulesetID: ruleset_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// QueryRulesPutRule creates or updates a query rule within a ruleset. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-rule.html. +type QueryRulesPutRule func(body io.Reader, rule_id string, ruleset_id string, o ...func(*QueryRulesPutRuleRequest)) (*Response, error) + +// QueryRulesPutRuleRequest configures the Query Rules Put Rule API request. +type QueryRulesPutRuleRequest struct { + Body io.Reader + + RuleID string + RulesetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r QueryRulesPutRuleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.put_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_query_rules") + 1 + len(r.RulesetID) + 1 + len("_rule") + 1 + len(r.RuleID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + path.WriteString(r.RulesetID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "ruleset_id", r.RulesetID) + } + path.WriteString("/") + path.WriteString("_rule") + path.WriteString("/") + path.WriteString(r.RuleID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "rule_id", r.RuleID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.put_rule") + if reader := instrument.RecordRequestBody(ctx, "query_rules.put_rule", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.put_rule") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f QueryRulesPutRule) WithContext(v context.Context) func(*QueryRulesPutRuleRequest) { + return func(r *QueryRulesPutRuleRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f QueryRulesPutRule) WithPretty() func(*QueryRulesPutRuleRequest) { + return func(r *QueryRulesPutRuleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f QueryRulesPutRule) WithHuman() func(*QueryRulesPutRuleRequest) { + return func(r *QueryRulesPutRuleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f QueryRulesPutRule) WithErrorTrace() func(*QueryRulesPutRuleRequest) { + return func(r *QueryRulesPutRuleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f QueryRulesPutRule) WithFilterPath(v ...string) func(*QueryRulesPutRuleRequest) { + return func(r *QueryRulesPutRuleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f QueryRulesPutRule) WithHeader(h map[string]string) func(*QueryRulesPutRuleRequest) { + return func(r *QueryRulesPutRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f QueryRulesPutRule) WithOpaqueID(s string) func(*QueryRulesPutRuleRequest) { + return func(r *QueryRulesPutRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.put_ruleset.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.put_ruleset.go new file mode 100644 index 000000000..29f53f0e8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.put_ruleset.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newQueryRulesPutRulesetFunc(t Transport) QueryRulesPutRuleset { + return func(body io.Reader, ruleset_id string, o ...func(*QueryRulesPutRulesetRequest)) (*Response, error) { + var r = QueryRulesPutRulesetRequest{Body: body, RulesetID: ruleset_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// QueryRulesPutRuleset creates or updates a query ruleset. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-ruleset.html. +type QueryRulesPutRuleset func(body io.Reader, ruleset_id string, o ...func(*QueryRulesPutRulesetRequest)) (*Response, error) + +// QueryRulesPutRulesetRequest configures the Query Rules Put Ruleset API request. +type QueryRulesPutRulesetRequest struct { + Body io.Reader + + RulesetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r QueryRulesPutRulesetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.put_ruleset") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_query_rules") + 1 + len(r.RulesetID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + path.WriteString(r.RulesetID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "ruleset_id", r.RulesetID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.put_ruleset") + if reader := instrument.RecordRequestBody(ctx, "query_rules.put_ruleset", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.put_ruleset") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f QueryRulesPutRuleset) WithContext(v context.Context) func(*QueryRulesPutRulesetRequest) { + return func(r *QueryRulesPutRulesetRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f QueryRulesPutRuleset) WithPretty() func(*QueryRulesPutRulesetRequest) { + return func(r *QueryRulesPutRulesetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f QueryRulesPutRuleset) WithHuman() func(*QueryRulesPutRulesetRequest) { + return func(r *QueryRulesPutRulesetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f QueryRulesPutRuleset) WithErrorTrace() func(*QueryRulesPutRulesetRequest) { + return func(r *QueryRulesPutRulesetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f QueryRulesPutRuleset) WithFilterPath(v ...string) func(*QueryRulesPutRulesetRequest) { + return func(r *QueryRulesPutRulesetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f QueryRulesPutRuleset) WithHeader(h map[string]string) func(*QueryRulesPutRulesetRequest) { + return func(r *QueryRulesPutRulesetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f QueryRulesPutRuleset) WithOpaqueID(s string) func(*QueryRulesPutRulesetRequest) { + return func(r *QueryRulesPutRulesetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.test.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.test.go new file mode 100644 index 000000000..8d7bf9f28 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.query_rules.test.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newQueryRulesTestFunc(t Transport) QueryRulesTest { + return func(body io.Reader, ruleset_id string, o ...func(*QueryRulesTestRequest)) (*Response, error) { + var r = QueryRulesTestRequest{Body: body, RulesetID: ruleset_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// QueryRulesTest tests a query ruleset to identify the rules that would match input criteria +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/test-query-ruleset.html. +type QueryRulesTest func(body io.Reader, ruleset_id string, o ...func(*QueryRulesTestRequest)) (*Response, error) + +// QueryRulesTestRequest configures the Query Rules Test API request. +type QueryRulesTestRequest struct { + Body io.Reader + + RulesetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r QueryRulesTestRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.test") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_query_rules") + 1 + len(r.RulesetID) + 1 + len("_test")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + path.WriteString(r.RulesetID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "ruleset_id", r.RulesetID) + } + path.WriteString("/") + path.WriteString("_test") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.test") + if reader := instrument.RecordRequestBody(ctx, "query_rules.test", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.test") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f QueryRulesTest) WithContext(v context.Context) func(*QueryRulesTestRequest) { + return func(r *QueryRulesTestRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f QueryRulesTest) WithPretty() func(*QueryRulesTestRequest) { + return func(r *QueryRulesTestRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f QueryRulesTest) WithHuman() func(*QueryRulesTestRequest) { + return func(r *QueryRulesTestRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f QueryRulesTest) WithErrorTrace() func(*QueryRulesTestRequest) { + return func(r *QueryRulesTestRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f QueryRulesTest) WithFilterPath(v ...string) func(*QueryRulesTestRequest) { + return func(r *QueryRulesTestRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f QueryRulesTest) WithHeader(h map[string]string) func(*QueryRulesTestRequest) { + return func(r *QueryRulesTestRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f QueryRulesTest) WithOpaqueID(s string) func(*QueryRulesTestRequest) { + return func(r *QueryRulesTestRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.rank_eval.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.rank_eval.go new file mode 100644 index 000000000..25e99041d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.rank_eval.go @@ -0,0 +1,292 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newRankEvalFunc(t Transport) RankEval { + return func(body io.Reader, o ...func(*RankEvalRequest)) (*Response, error) { + var r = RankEvalRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// RankEval allows to evaluate the quality of ranked search results over a set of typical search queries +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html. +type RankEval func(body io.Reader, o ...func(*RankEvalRequest)) (*Response, error) + +// RankEvalRequest configures the Rank Eval API request. +type RankEvalRequest struct { + Index []string + + Body io.Reader + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + SearchType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r RankEvalRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rank_eval") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_rank_eval")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_rank_eval") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.SearchType != "" { + params["search_type"] = r.SearchType + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "rank_eval") + if reader := instrument.RecordRequestBody(ctx, "rank_eval", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rank_eval") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f RankEval) WithContext(v context.Context) func(*RankEvalRequest) { + return func(r *RankEvalRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names to search; use _all to perform the operation on all indices. +func (f RankEval) WithIndex(v ...string) func(*RankEvalRequest) { + return func(r *RankEvalRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f RankEval) WithAllowNoIndices(v bool) func(*RankEvalRequest) { + return func(r *RankEvalRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f RankEval) WithExpandWildcards(v string) func(*RankEvalRequest) { + return func(r *RankEvalRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f RankEval) WithIgnoreUnavailable(v bool) func(*RankEvalRequest) { + return func(r *RankEvalRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithSearchType - search operation type. +func (f RankEval) WithSearchType(v string) func(*RankEvalRequest) { + return func(r *RankEvalRequest) { + r.SearchType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f RankEval) WithPretty() func(*RankEvalRequest) { + return func(r *RankEvalRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f RankEval) WithHuman() func(*RankEvalRequest) { + return func(r *RankEvalRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f RankEval) WithErrorTrace() func(*RankEvalRequest) { + return func(r *RankEvalRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f RankEval) WithFilterPath(v ...string) func(*RankEvalRequest) { + return func(r *RankEvalRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f RankEval) WithHeader(h map[string]string) func(*RankEvalRequest) { + return func(r *RankEvalRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f RankEval) WithOpaqueID(s string) func(*RankEvalRequest) { + return func(r *RankEvalRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.reindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.reindex.go new file mode 100644 index 000000000..ebea311a5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.reindex.go @@ -0,0 +1,339 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newReindexFunc(t Transport) Reindex { + return func(body io.Reader, o ...func(*ReindexRequest)) (*Response, error) { + var r = ReindexRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Reindex allows to copy documents from one index to another, optionally filtering the source +// documents by a query, changing the destination index settings, or fetching the +// documents from a remote cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html. +type Reindex func(body io.Reader, o ...func(*ReindexRequest)) (*Response, error) + +// ReindexRequest configures the Reindex API request. +type ReindexRequest struct { + Body io.Reader + + MaxDocs *int + Refresh *bool + RequestsPerSecond *int + RequireAlias *bool + Scroll time.Duration + Slices interface{} + Timeout time.Duration + WaitForActiveShards string + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ReindexRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_reindex")) + path.WriteString("http://") + path.WriteString("/_reindex") + + params = make(map[string]string) + + if r.MaxDocs != nil { + params["max_docs"] = strconv.FormatInt(int64(*r.MaxDocs), 10) + } + + if r.Refresh != nil { + params["refresh"] = strconv.FormatBool(*r.Refresh) + } + + if r.RequestsPerSecond != nil { + params["requests_per_second"] = strconv.FormatInt(int64(*r.RequestsPerSecond), 10) + } + + if r.RequireAlias != nil { + params["require_alias"] = strconv.FormatBool(*r.RequireAlias) + } + + if r.Scroll != 0 { + params["scroll"] = formatDuration(r.Scroll) + } + + if r.Slices != nil { + params["slices"] = fmt.Sprintf("%v", r.Slices) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "reindex") + if reader := instrument.RecordRequestBody(ctx, "reindex", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "reindex") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Reindex) WithContext(v context.Context) func(*ReindexRequest) { + return func(r *ReindexRequest) { + r.ctx = v + } +} + +// WithMaxDocs - maximum number of documents to process (default: all documents). +func (f Reindex) WithMaxDocs(v int) func(*ReindexRequest) { + return func(r *ReindexRequest) { + r.MaxDocs = &v + } +} + +// WithRefresh - should the affected indexes be refreshed?. +func (f Reindex) WithRefresh(v bool) func(*ReindexRequest) { + return func(r *ReindexRequest) { + r.Refresh = &v + } +} + +// WithRequestsPerSecond - the throttle to set on this request in sub-requests per second. -1 means no throttle.. +func (f Reindex) WithRequestsPerSecond(v int) func(*ReindexRequest) { + return func(r *ReindexRequest) { + r.RequestsPerSecond = &v + } +} + +// WithRequireAlias - when true, requires destination to be an alias.. +func (f Reindex) WithRequireAlias(v bool) func(*ReindexRequest) { + return func(r *ReindexRequest) { + r.RequireAlias = &v + } +} + +// WithScroll - control how long to keep the search context alive. +func (f Reindex) WithScroll(v time.Duration) func(*ReindexRequest) { + return func(r *ReindexRequest) { + r.Scroll = v + } +} + +// WithSlices - the number of slices this task should be divided into. defaults to 1, meaning the task isn't sliced into subtasks. can be set to `auto`.. +func (f Reindex) WithSlices(v interface{}) func(*ReindexRequest) { + return func(r *ReindexRequest) { + r.Slices = v + } +} + +// WithTimeout - time each individual bulk request should wait for shards that are unavailable.. +func (f Reindex) WithTimeout(v time.Duration) func(*ReindexRequest) { + return func(r *ReindexRequest) { + r.Timeout = v + } +} + +// WithWaitForActiveShards - sets the number of shard copies that must be active before proceeding with the reindex operation. defaults to 1, meaning the primary shard only. set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1). +func (f Reindex) WithWaitForActiveShards(v string) func(*ReindexRequest) { + return func(r *ReindexRequest) { + r.WaitForActiveShards = v + } +} + +// WithWaitForCompletion - should the request should block until the reindex is complete.. +func (f Reindex) WithWaitForCompletion(v bool) func(*ReindexRequest) { + return func(r *ReindexRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Reindex) WithPretty() func(*ReindexRequest) { + return func(r *ReindexRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Reindex) WithHuman() func(*ReindexRequest) { + return func(r *ReindexRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Reindex) WithErrorTrace() func(*ReindexRequest) { + return func(r *ReindexRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Reindex) WithFilterPath(v ...string) func(*ReindexRequest) { + return func(r *ReindexRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Reindex) WithHeader(h map[string]string) func(*ReindexRequest) { + return func(r *ReindexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Reindex) WithOpaqueID(s string) func(*ReindexRequest) { + return func(r *ReindexRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.reindex_rethrottle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.reindex_rethrottle.go new file mode 100644 index 000000000..db5275f66 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.reindex_rethrottle.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newReindexRethrottleFunc(t Transport) ReindexRethrottle { + return func(task_id string, requests_per_second *int, o ...func(*ReindexRethrottleRequest)) (*Response, error) { + var r = ReindexRethrottleRequest{TaskID: task_id, RequestsPerSecond: requests_per_second} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ReindexRethrottle changes the number of requests per second for a particular Reindex operation. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html. +type ReindexRethrottle func(task_id string, requests_per_second *int, o ...func(*ReindexRethrottleRequest)) (*Response, error) + +// ReindexRethrottleRequest configures the Reindex Rethrottle API request. +type ReindexRethrottleRequest struct { + TaskID string + + RequestsPerSecond *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ReindexRethrottleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "reindex_rethrottle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_reindex") + 1 + len(r.TaskID) + 1 + len("_rethrottle")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_reindex") + path.WriteString("/") + path.WriteString(r.TaskID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_id", r.TaskID) + } + path.WriteString("/") + path.WriteString("_rethrottle") + + params = make(map[string]string) + + if r.RequestsPerSecond != nil { + params["requests_per_second"] = strconv.FormatInt(int64(*r.RequestsPerSecond), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "reindex_rethrottle") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "reindex_rethrottle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ReindexRethrottle) WithContext(v context.Context) func(*ReindexRethrottleRequest) { + return func(r *ReindexRethrottleRequest) { + r.ctx = v + } +} + +// WithRequestsPerSecond - the throttle to set on this request in floating sub-requests per second. -1 means set no throttle.. +func (f ReindexRethrottle) WithRequestsPerSecond(v int) func(*ReindexRethrottleRequest) { + return func(r *ReindexRethrottleRequest) { + r.RequestsPerSecond = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ReindexRethrottle) WithPretty() func(*ReindexRethrottleRequest) { + return func(r *ReindexRethrottleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ReindexRethrottle) WithHuman() func(*ReindexRethrottleRequest) { + return func(r *ReindexRethrottleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ReindexRethrottle) WithErrorTrace() func(*ReindexRethrottleRequest) { + return func(r *ReindexRethrottleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ReindexRethrottle) WithFilterPath(v ...string) func(*ReindexRethrottleRequest) { + return func(r *ReindexRethrottleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ReindexRethrottle) WithHeader(h map[string]string) func(*ReindexRethrottleRequest) { + return func(r *ReindexRethrottleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ReindexRethrottle) WithOpaqueID(s string) func(*ReindexRethrottleRequest) { + return func(r *ReindexRethrottleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.render_search_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.render_search_template.go new file mode 100644 index 000000000..15a6f4fe3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.render_search_template.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newRenderSearchTemplateFunc(t Transport) RenderSearchTemplate { + return func(o ...func(*RenderSearchTemplateRequest)) (*Response, error) { + var r = RenderSearchTemplateRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// RenderSearchTemplate allows to use the Mustache language to pre-render a search definition. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/render-search-template-api.html. +type RenderSearchTemplate func(o ...func(*RenderSearchTemplateRequest)) (*Response, error) + +// RenderSearchTemplateRequest configures the Render Search Template API request. +type RenderSearchTemplateRequest struct { + TemplateID string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r RenderSearchTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "render_search_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_render") + 1 + len("template") + 1 + len(r.TemplateID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_render") + path.WriteString("/") + path.WriteString("template") + if r.TemplateID != "" { + path.WriteString("/") + path.WriteString(r.TemplateID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.TemplateID) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "render_search_template") + if reader := instrument.RecordRequestBody(ctx, "render_search_template", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "render_search_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f RenderSearchTemplate) WithContext(v context.Context) func(*RenderSearchTemplateRequest) { + return func(r *RenderSearchTemplateRequest) { + r.ctx = v + } +} + +// WithBody - The search definition template and its params. +func (f RenderSearchTemplate) WithBody(v io.Reader) func(*RenderSearchTemplateRequest) { + return func(r *RenderSearchTemplateRequest) { + r.Body = v + } +} + +// WithTemplateID - the ID of the stored search template. +func (f RenderSearchTemplate) WithTemplateID(v string) func(*RenderSearchTemplateRequest) { + return func(r *RenderSearchTemplateRequest) { + r.TemplateID = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f RenderSearchTemplate) WithPretty() func(*RenderSearchTemplateRequest) { + return func(r *RenderSearchTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f RenderSearchTemplate) WithHuman() func(*RenderSearchTemplateRequest) { + return func(r *RenderSearchTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f RenderSearchTemplate) WithErrorTrace() func(*RenderSearchTemplateRequest) { + return func(r *RenderSearchTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f RenderSearchTemplate) WithFilterPath(v ...string) func(*RenderSearchTemplateRequest) { + return func(r *RenderSearchTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f RenderSearchTemplate) WithHeader(h map[string]string) func(*RenderSearchTemplateRequest) { + return func(r *RenderSearchTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f RenderSearchTemplate) WithOpaqueID(s string) func(*RenderSearchTemplateRequest) { + return func(r *RenderSearchTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.scripts_painless_execute.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.scripts_painless_execute.go new file mode 100644 index 000000000..aed66e300 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.scripts_painless_execute.go @@ -0,0 +1,234 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newScriptsPainlessExecuteFunc(t Transport) ScriptsPainlessExecute { + return func(o ...func(*ScriptsPainlessExecuteRequest)) (*Response, error) { + var r = ScriptsPainlessExecuteRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ScriptsPainlessExecute allows an arbitrary script to be executed and a result to be returned +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html. +type ScriptsPainlessExecute func(o ...func(*ScriptsPainlessExecuteRequest)) (*Response, error) + +// ScriptsPainlessExecuteRequest configures the Scripts Painless Execute API request. +type ScriptsPainlessExecuteRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ScriptsPainlessExecuteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "scripts_painless_execute") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_scripts/painless/_execute")) + path.WriteString("http://") + path.WriteString("/_scripts/painless/_execute") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "scripts_painless_execute") + if reader := instrument.RecordRequestBody(ctx, "scripts_painless_execute", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "scripts_painless_execute") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ScriptsPainlessExecute) WithContext(v context.Context) func(*ScriptsPainlessExecuteRequest) { + return func(r *ScriptsPainlessExecuteRequest) { + r.ctx = v + } +} + +// WithBody - The script to execute. +func (f ScriptsPainlessExecute) WithBody(v io.Reader) func(*ScriptsPainlessExecuteRequest) { + return func(r *ScriptsPainlessExecuteRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ScriptsPainlessExecute) WithPretty() func(*ScriptsPainlessExecuteRequest) { + return func(r *ScriptsPainlessExecuteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ScriptsPainlessExecute) WithHuman() func(*ScriptsPainlessExecuteRequest) { + return func(r *ScriptsPainlessExecuteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ScriptsPainlessExecute) WithErrorTrace() func(*ScriptsPainlessExecuteRequest) { + return func(r *ScriptsPainlessExecuteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ScriptsPainlessExecute) WithFilterPath(v ...string) func(*ScriptsPainlessExecuteRequest) { + return func(r *ScriptsPainlessExecuteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ScriptsPainlessExecute) WithHeader(h map[string]string) func(*ScriptsPainlessExecuteRequest) { + return func(r *ScriptsPainlessExecuteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ScriptsPainlessExecute) WithOpaqueID(s string) func(*ScriptsPainlessExecuteRequest) { + return func(r *ScriptsPainlessExecuteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.scroll.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.scroll.go new file mode 100644 index 000000000..f13365eaa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.scroll.go @@ -0,0 +1,271 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newScrollFunc(t Transport) Scroll { + return func(o ...func(*ScrollRequest)) (*Response, error) { + var r = ScrollRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Scroll allows to retrieve a large numbers of results from a single search request. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll. +type Scroll func(o ...func(*ScrollRequest)) (*Response, error) + +// ScrollRequest configures the Scroll API request. +type ScrollRequest struct { + Body io.Reader + + ScrollID string + + RestTotalHitsAsInt *bool + Scroll time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ScrollRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "scroll") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(len("/_search/scroll")) + path.WriteString("/_search/scroll") + + params = make(map[string]string) + + if r.RestTotalHitsAsInt != nil { + params["rest_total_hits_as_int"] = strconv.FormatBool(*r.RestTotalHitsAsInt) + } + + if r.Scroll != 0 { + params["scroll"] = formatDuration(r.Scroll) + } + + if r.ScrollID != "" { + params["scroll_id"] = r.ScrollID + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "scroll") + if reader := instrument.RecordRequestBody(ctx, "scroll", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "scroll") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Scroll) WithContext(v context.Context) func(*ScrollRequest) { + return func(r *ScrollRequest) { + r.ctx = v + } +} + +// WithBody - The scroll ID if not passed by URL or query parameter.. +func (f Scroll) WithBody(v io.Reader) func(*ScrollRequest) { + return func(r *ScrollRequest) { + r.Body = v + } +} + +// WithScrollID - the scroll ID. +func (f Scroll) WithScrollID(v string) func(*ScrollRequest) { + return func(r *ScrollRequest) { + r.ScrollID = v + } +} + +// WithRestTotalHitsAsInt - indicates whether hits.total should be rendered as an integer or an object in the rest search response. +func (f Scroll) WithRestTotalHitsAsInt(v bool) func(*ScrollRequest) { + return func(r *ScrollRequest) { + r.RestTotalHitsAsInt = &v + } +} + +// WithScroll - specify how long a consistent view of the index should be maintained for scrolled search. +func (f Scroll) WithScroll(v time.Duration) func(*ScrollRequest) { + return func(r *ScrollRequest) { + r.Scroll = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Scroll) WithPretty() func(*ScrollRequest) { + return func(r *ScrollRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Scroll) WithHuman() func(*ScrollRequest) { + return func(r *ScrollRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Scroll) WithErrorTrace() func(*ScrollRequest) { + return func(r *ScrollRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Scroll) WithFilterPath(v ...string) func(*ScrollRequest) { + return func(r *ScrollRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Scroll) WithHeader(h map[string]string) func(*ScrollRequest) { + return func(r *ScrollRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Scroll) WithOpaqueID(s string) func(*ScrollRequest) { + return func(r *ScrollRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search.go new file mode 100644 index 000000000..9d943a4a6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search.go @@ -0,0 +1,781 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newSearchFunc(t Transport) Search { + return func(o ...func(*SearchRequest)) (*Response, error) { + var r = SearchRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Search returns results matching a query. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html. +type Search func(o ...func(*SearchRequest)) (*Response, error) + +// SearchRequest configures the Search API request. +type SearchRequest struct { + Index []string + + Body io.Reader + + AllowNoIndices *bool + AllowPartialSearchResults *bool + Analyzer string + AnalyzeWildcard *bool + BatchedReduceSize *int + CcsMinimizeRoundtrips *bool + DefaultOperator string + Df string + DocvalueFields []string + ExpandWildcards string + Explain *bool + ForceSyntheticSource *bool + From *int + IgnoreThrottled *bool + IgnoreUnavailable *bool + IncludeNamedQueriesScore *bool + Lenient *bool + MaxConcurrentShardRequests *int + Preference string + PreFilterShardSize *int + Query string + RequestCache *bool + RestTotalHitsAsInt *bool + Routing []string + Scroll time.Duration + SearchType string + SeqNoPrimaryTerm *bool + Size *int + Sort []string + Source []string + SourceExcludes []string + SourceIncludes []string + Stats []string + StoredFields []string + SuggestField string + SuggestMode string + SuggestSize *int + SuggestText string + TerminateAfter *int + Timeout time.Duration + TrackScores *bool + TrackTotalHits interface{} + TypedKeys *bool + Version *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_search")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_search") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.AllowPartialSearchResults != nil { + params["allow_partial_search_results"] = strconv.FormatBool(*r.AllowPartialSearchResults) + } + + if r.Analyzer != "" { + params["analyzer"] = r.Analyzer + } + + if r.AnalyzeWildcard != nil { + params["analyze_wildcard"] = strconv.FormatBool(*r.AnalyzeWildcard) + } + + if r.BatchedReduceSize != nil { + params["batched_reduce_size"] = strconv.FormatInt(int64(*r.BatchedReduceSize), 10) + } + + if r.CcsMinimizeRoundtrips != nil { + params["ccs_minimize_roundtrips"] = strconv.FormatBool(*r.CcsMinimizeRoundtrips) + } + + if r.DefaultOperator != "" { + params["default_operator"] = r.DefaultOperator + } + + if r.Df != "" { + params["df"] = r.Df + } + + if len(r.DocvalueFields) > 0 { + params["docvalue_fields"] = strings.Join(r.DocvalueFields, ",") + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.Explain != nil { + params["explain"] = strconv.FormatBool(*r.Explain) + } + + if r.ForceSyntheticSource != nil { + params["force_synthetic_source"] = strconv.FormatBool(*r.ForceSyntheticSource) + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.IgnoreThrottled != nil { + params["ignore_throttled"] = strconv.FormatBool(*r.IgnoreThrottled) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.IncludeNamedQueriesScore != nil { + params["include_named_queries_score"] = strconv.FormatBool(*r.IncludeNamedQueriesScore) + } + + if r.Lenient != nil { + params["lenient"] = strconv.FormatBool(*r.Lenient) + } + + if r.MaxConcurrentShardRequests != nil { + params["max_concurrent_shard_requests"] = strconv.FormatInt(int64(*r.MaxConcurrentShardRequests), 10) + } + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.PreFilterShardSize != nil { + params["pre_filter_shard_size"] = strconv.FormatInt(int64(*r.PreFilterShardSize), 10) + } + + if r.Query != "" { + params["q"] = r.Query + } + + if r.RequestCache != nil { + params["request_cache"] = strconv.FormatBool(*r.RequestCache) + } + + if r.RestTotalHitsAsInt != nil { + params["rest_total_hits_as_int"] = strconv.FormatBool(*r.RestTotalHitsAsInt) + } + + if len(r.Routing) > 0 { + params["routing"] = strings.Join(r.Routing, ",") + } + + if r.Scroll != 0 { + params["scroll"] = formatDuration(r.Scroll) + } + + if r.SearchType != "" { + params["search_type"] = r.SearchType + } + + if r.SeqNoPrimaryTerm != nil { + params["seq_no_primary_term"] = strconv.FormatBool(*r.SeqNoPrimaryTerm) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if len(r.Sort) > 0 { + params["sort"] = strings.Join(r.Sort, ",") + } + + if len(r.Source) > 0 { + params["_source"] = strings.Join(r.Source, ",") + } + + if len(r.SourceExcludes) > 0 { + params["_source_excludes"] = strings.Join(r.SourceExcludes, ",") + } + + if len(r.SourceIncludes) > 0 { + params["_source_includes"] = strings.Join(r.SourceIncludes, ",") + } + + if len(r.Stats) > 0 { + params["stats"] = strings.Join(r.Stats, ",") + } + + if len(r.StoredFields) > 0 { + params["stored_fields"] = strings.Join(r.StoredFields, ",") + } + + if r.SuggestField != "" { + params["suggest_field"] = r.SuggestField + } + + if r.SuggestMode != "" { + params["suggest_mode"] = r.SuggestMode + } + + if r.SuggestSize != nil { + params["suggest_size"] = strconv.FormatInt(int64(*r.SuggestSize), 10) + } + + if r.SuggestText != "" { + params["suggest_text"] = r.SuggestText + } + + if r.TerminateAfter != nil { + params["terminate_after"] = strconv.FormatInt(int64(*r.TerminateAfter), 10) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.TrackScores != nil { + params["track_scores"] = strconv.FormatBool(*r.TrackScores) + } + + if r.TrackTotalHits != nil { + params["track_total_hits"] = fmt.Sprintf("%v", r.TrackTotalHits) + } + + if r.TypedKeys != nil { + params["typed_keys"] = strconv.FormatBool(*r.TypedKeys) + } + + if r.Version != nil { + params["version"] = strconv.FormatBool(*r.Version) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "search") + if reader := instrument.RecordRequestBody(ctx, "search", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Search) WithContext(v context.Context) func(*SearchRequest) { + return func(r *SearchRequest) { + r.ctx = v + } +} + +// WithBody - The search definition using the Query DSL. +func (f Search) WithBody(v io.Reader) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Body = v + } +} + +// WithIndex - a list of index names to search; use _all to perform the operation on all indices. +func (f Search) WithIndex(v ...string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f Search) WithAllowNoIndices(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.AllowNoIndices = &v + } +} + +// WithAllowPartialSearchResults - indicate if an error should be returned if there is a partial search failure or timeout. +func (f Search) WithAllowPartialSearchResults(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.AllowPartialSearchResults = &v + } +} + +// WithAnalyzer - the analyzer to use for the query string. +func (f Search) WithAnalyzer(v string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Analyzer = v + } +} + +// WithAnalyzeWildcard - specify whether wildcard and prefix queries should be analyzed (default: false). +func (f Search) WithAnalyzeWildcard(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.AnalyzeWildcard = &v + } +} + +// WithBatchedReduceSize - the number of shard results that should be reduced at once on the coordinating node. this value should be used as a protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large.. +func (f Search) WithBatchedReduceSize(v int) func(*SearchRequest) { + return func(r *SearchRequest) { + r.BatchedReduceSize = &v + } +} + +// WithCcsMinimizeRoundtrips - indicates whether network round-trips should be minimized as part of cross-cluster search requests execution. +func (f Search) WithCcsMinimizeRoundtrips(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.CcsMinimizeRoundtrips = &v + } +} + +// WithDefaultOperator - the default operator for query string query (and or or). +func (f Search) WithDefaultOperator(v string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.DefaultOperator = v + } +} + +// WithDf - the field to use as default where no field prefix is given in the query string. +func (f Search) WithDf(v string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Df = v + } +} + +// WithDocvalueFields - a list of fields to return as the docvalue representation of a field for each hit. +func (f Search) WithDocvalueFields(v ...string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.DocvalueFields = v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f Search) WithExpandWildcards(v string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.ExpandWildcards = v + } +} + +// WithExplain - specify whether to return detailed information about score computation as part of a hit. +func (f Search) WithExplain(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Explain = &v + } +} + +// WithForceSyntheticSource - should this request force synthetic _source? use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. fetches with this enabled will be slower the enabling synthetic source natively in the index.. +func (f Search) WithForceSyntheticSource(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.ForceSyntheticSource = &v + } +} + +// WithFrom - starting offset (default: 0). +func (f Search) WithFrom(v int) func(*SearchRequest) { + return func(r *SearchRequest) { + r.From = &v + } +} + +// WithIgnoreThrottled - whether specified concrete, expanded or aliased indices should be ignored when throttled. +func (f Search) WithIgnoreThrottled(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.IgnoreThrottled = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f Search) WithIgnoreUnavailable(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithIncludeNamedQueriesScore - indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false). +func (f Search) WithIncludeNamedQueriesScore(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.IncludeNamedQueriesScore = &v + } +} + +// WithLenient - specify whether format-based query failures (such as providing text to a numeric field) should be ignored. +func (f Search) WithLenient(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Lenient = &v + } +} + +// WithMaxConcurrentShardRequests - the number of concurrent shard requests per node this search executes concurrently. this value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. +func (f Search) WithMaxConcurrentShardRequests(v int) func(*SearchRequest) { + return func(r *SearchRequest) { + r.MaxConcurrentShardRequests = &v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random). +func (f Search) WithPreference(v string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Preference = v + } +} + +// WithPreFilterShardSize - a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. this filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint.. +func (f Search) WithPreFilterShardSize(v int) func(*SearchRequest) { + return func(r *SearchRequest) { + r.PreFilterShardSize = &v + } +} + +// WithQuery - query in the lucene query string syntax. +func (f Search) WithQuery(v string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Query = v + } +} + +// WithRequestCache - specify if request cache should be used for this request or not, defaults to index level setting. +func (f Search) WithRequestCache(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.RequestCache = &v + } +} + +// WithRestTotalHitsAsInt - indicates whether hits.total should be rendered as an integer or an object in the rest search response. +func (f Search) WithRestTotalHitsAsInt(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.RestTotalHitsAsInt = &v + } +} + +// WithRouting - a list of specific routing values. +func (f Search) WithRouting(v ...string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Routing = v + } +} + +// WithScroll - specify how long a consistent view of the index should be maintained for scrolled search. +func (f Search) WithScroll(v time.Duration) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Scroll = v + } +} + +// WithSearchType - search operation type. +func (f Search) WithSearchType(v string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.SearchType = v + } +} + +// WithSeqNoPrimaryTerm - specify whether to return sequence number and primary term of the last modification of each hit. +func (f Search) WithSeqNoPrimaryTerm(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.SeqNoPrimaryTerm = &v + } +} + +// WithSize - number of hits to return (default: 10). +func (f Search) WithSize(v int) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Size = &v + } +} + +// WithSort - a list of : pairs. +func (f Search) WithSort(v ...string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Sort = v + } +} + +// WithSource - true or false to return the _source field or not, or a list of fields to return. +func (f Search) WithSource(v ...string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Source = v + } +} + +// WithSourceExcludes - a list of fields to exclude from the returned _source field. +func (f Search) WithSourceExcludes(v ...string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.SourceExcludes = v + } +} + +// WithSourceIncludes - a list of fields to extract and return from the _source field. +func (f Search) WithSourceIncludes(v ...string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.SourceIncludes = v + } +} + +// WithStats - specific 'tag' of the request for logging and statistical purposes. +func (f Search) WithStats(v ...string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Stats = v + } +} + +// WithStoredFields - a list of stored fields to return as part of a hit. +func (f Search) WithStoredFields(v ...string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.StoredFields = v + } +} + +// WithSuggestField - specify which field to use for suggestions. +func (f Search) WithSuggestField(v string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.SuggestField = v + } +} + +// WithSuggestMode - specify suggest mode. +func (f Search) WithSuggestMode(v string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.SuggestMode = v + } +} + +// WithSuggestSize - how many suggestions to return in response. +func (f Search) WithSuggestSize(v int) func(*SearchRequest) { + return func(r *SearchRequest) { + r.SuggestSize = &v + } +} + +// WithSuggestText - the source text for which the suggestions should be returned. +func (f Search) WithSuggestText(v string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.SuggestText = v + } +} + +// WithTerminateAfter - the maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early.. +func (f Search) WithTerminateAfter(v int) func(*SearchRequest) { + return func(r *SearchRequest) { + r.TerminateAfter = &v + } +} + +// WithTimeout - explicit operation timeout. +func (f Search) WithTimeout(v time.Duration) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Timeout = v + } +} + +// WithTrackScores - whether to calculate and return scores even if they are not used for sorting. +func (f Search) WithTrackScores(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.TrackScores = &v + } +} + +// WithTrackTotalHits - indicate if the number of documents that match the query should be tracked. a number can also be specified, to accurately track the total hit count up to the number.. +func (f Search) WithTrackTotalHits(v interface{}) func(*SearchRequest) { + return func(r *SearchRequest) { + r.TrackTotalHits = v + } +} + +// WithTypedKeys - specify whether aggregation and suggester names should be prefixed by their respective types in the response. +func (f Search) WithTypedKeys(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.TypedKeys = &v + } +} + +// WithVersion - specify whether to return document version as part of a hit. +func (f Search) WithVersion(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.Version = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Search) WithPretty() func(*SearchRequest) { + return func(r *SearchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Search) WithHuman() func(*SearchRequest) { + return func(r *SearchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Search) WithErrorTrace() func(*SearchRequest) { + return func(r *SearchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Search) WithFilterPath(v ...string) func(*SearchRequest) { + return func(r *SearchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Search) WithHeader(h map[string]string) func(*SearchRequest) { + return func(r *SearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Search) WithOpaqueID(s string) func(*SearchRequest) { + return func(r *SearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.delete.go new file mode 100644 index 000000000..15c58d20c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.delete.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSearchApplicationDeleteFunc(t Transport) SearchApplicationDelete { + return func(name string, o ...func(*SearchApplicationDeleteRequest)) (*Response, error) { + var r = SearchApplicationDeleteRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationDelete deletes a search application. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-search-application.html. +type SearchApplicationDelete func(name string, o ...func(*SearchApplicationDeleteRequest)) (*Response, error) + +// SearchApplicationDeleteRequest configures the Search Application Delete API request. +type SearchApplicationDeleteRequest struct { + Name string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchApplicationDeleteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_application") + 1 + len("search_application") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.delete") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.delete") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationDelete) WithContext(v context.Context) func(*SearchApplicationDeleteRequest) { + return func(r *SearchApplicationDeleteRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationDelete) WithPretty() func(*SearchApplicationDeleteRequest) { + return func(r *SearchApplicationDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationDelete) WithHuman() func(*SearchApplicationDeleteRequest) { + return func(r *SearchApplicationDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationDelete) WithErrorTrace() func(*SearchApplicationDeleteRequest) { + return func(r *SearchApplicationDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationDelete) WithFilterPath(v ...string) func(*SearchApplicationDeleteRequest) { + return func(r *SearchApplicationDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationDelete) WithHeader(h map[string]string) func(*SearchApplicationDeleteRequest) { + return func(r *SearchApplicationDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationDelete) WithOpaqueID(s string) func(*SearchApplicationDeleteRequest) { + return func(r *SearchApplicationDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.delete_behavioral_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.delete_behavioral_analytics.go new file mode 100644 index 000000000..c825547e4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.delete_behavioral_analytics.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSearchApplicationDeleteBehavioralAnalyticsFunc(t Transport) SearchApplicationDeleteBehavioralAnalytics { + return func(name string, o ...func(*SearchApplicationDeleteBehavioralAnalyticsRequest)) (*Response, error) { + var r = SearchApplicationDeleteBehavioralAnalyticsRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationDeleteBehavioralAnalytics delete a behavioral analytics collection. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-analytics-collection.html. +type SearchApplicationDeleteBehavioralAnalytics func(name string, o ...func(*SearchApplicationDeleteBehavioralAnalyticsRequest)) (*Response, error) + +// SearchApplicationDeleteBehavioralAnalyticsRequest configures the Search Application Delete Behavioral Analytics API request. +type SearchApplicationDeleteBehavioralAnalyticsRequest struct { + Name string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchApplicationDeleteBehavioralAnalyticsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.delete_behavioral_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_application") + 1 + len("analytics") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.delete_behavioral_analytics") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.delete_behavioral_analytics") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationDeleteBehavioralAnalytics) WithContext(v context.Context) func(*SearchApplicationDeleteBehavioralAnalyticsRequest) { + return func(r *SearchApplicationDeleteBehavioralAnalyticsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationDeleteBehavioralAnalytics) WithPretty() func(*SearchApplicationDeleteBehavioralAnalyticsRequest) { + return func(r *SearchApplicationDeleteBehavioralAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationDeleteBehavioralAnalytics) WithHuman() func(*SearchApplicationDeleteBehavioralAnalyticsRequest) { + return func(r *SearchApplicationDeleteBehavioralAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationDeleteBehavioralAnalytics) WithErrorTrace() func(*SearchApplicationDeleteBehavioralAnalyticsRequest) { + return func(r *SearchApplicationDeleteBehavioralAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationDeleteBehavioralAnalytics) WithFilterPath(v ...string) func(*SearchApplicationDeleteBehavioralAnalyticsRequest) { + return func(r *SearchApplicationDeleteBehavioralAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationDeleteBehavioralAnalytics) WithHeader(h map[string]string) func(*SearchApplicationDeleteBehavioralAnalyticsRequest) { + return func(r *SearchApplicationDeleteBehavioralAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationDeleteBehavioralAnalytics) WithOpaqueID(s string) func(*SearchApplicationDeleteBehavioralAnalyticsRequest) { + return func(r *SearchApplicationDeleteBehavioralAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.get.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.get.go new file mode 100644 index 000000000..fc2c96c5a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.get.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSearchApplicationGetFunc(t Transport) SearchApplicationGet { + return func(name string, o ...func(*SearchApplicationGetRequest)) (*Response, error) { + var r = SearchApplicationGetRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationGet returns the details about a search application. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-search-application.html. +type SearchApplicationGet func(name string, o ...func(*SearchApplicationGetRequest)) (*Response, error) + +// SearchApplicationGetRequest configures the Search Application Get API request. +type SearchApplicationGetRequest struct { + Name string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchApplicationGetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_application") + 1 + len("search_application") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.get") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.get") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationGet) WithContext(v context.Context) func(*SearchApplicationGetRequest) { + return func(r *SearchApplicationGetRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationGet) WithPretty() func(*SearchApplicationGetRequest) { + return func(r *SearchApplicationGetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationGet) WithHuman() func(*SearchApplicationGetRequest) { + return func(r *SearchApplicationGetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationGet) WithErrorTrace() func(*SearchApplicationGetRequest) { + return func(r *SearchApplicationGetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationGet) WithFilterPath(v ...string) func(*SearchApplicationGetRequest) { + return func(r *SearchApplicationGetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationGet) WithHeader(h map[string]string) func(*SearchApplicationGetRequest) { + return func(r *SearchApplicationGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationGet) WithOpaqueID(s string) func(*SearchApplicationGetRequest) { + return func(r *SearchApplicationGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.get_behavioral_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.get_behavioral_analytics.go new file mode 100644 index 000000000..0e522025f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.get_behavioral_analytics.go @@ -0,0 +1,236 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSearchApplicationGetBehavioralAnalyticsFunc(t Transport) SearchApplicationGetBehavioralAnalytics { + return func(o ...func(*SearchApplicationGetBehavioralAnalyticsRequest)) (*Response, error) { + var r = SearchApplicationGetBehavioralAnalyticsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationGetBehavioralAnalytics returns the existing behavioral analytics collections. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/list-analytics-collection.html. +type SearchApplicationGetBehavioralAnalytics func(o ...func(*SearchApplicationGetBehavioralAnalyticsRequest)) (*Response, error) + +// SearchApplicationGetBehavioralAnalyticsRequest configures the Search Application Get Behavioral Analytics API request. +type SearchApplicationGetBehavioralAnalyticsRequest struct { + Name []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchApplicationGetBehavioralAnalyticsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.get_behavioral_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_application") + 1 + len("analytics") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.get_behavioral_analytics") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.get_behavioral_analytics") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationGetBehavioralAnalytics) WithContext(v context.Context) func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + r.ctx = v + } +} + +// WithName - a list of analytics collections to limit the returned information. +func (f SearchApplicationGetBehavioralAnalytics) WithName(v ...string) func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + r.Name = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationGetBehavioralAnalytics) WithPretty() func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationGetBehavioralAnalytics) WithHuman() func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationGetBehavioralAnalytics) WithErrorTrace() func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationGetBehavioralAnalytics) WithFilterPath(v ...string) func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationGetBehavioralAnalytics) WithHeader(h map[string]string) func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationGetBehavioralAnalytics) WithOpaqueID(s string) func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.list.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.list.go new file mode 100644 index 000000000..820bcd1f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.list.go @@ -0,0 +1,255 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newSearchApplicationListFunc(t Transport) SearchApplicationList { + return func(o ...func(*SearchApplicationListRequest)) (*Response, error) { + var r = SearchApplicationListRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationList returns the existing search applications. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/list-search-applications.html. +type SearchApplicationList func(o ...func(*SearchApplicationListRequest)) (*Response, error) + +// SearchApplicationListRequest configures the Search Application List API request. +type SearchApplicationListRequest struct { + From *int + Query string + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchApplicationListRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.list") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_application/search_application")) + path.WriteString("http://") + path.WriteString("/_application/search_application") + + params = make(map[string]string) + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Query != "" { + params["q"] = r.Query + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.list") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.list") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationList) WithContext(v context.Context) func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.ctx = v + } +} + +// WithFrom - starting offset (default: 0). +func (f SearchApplicationList) WithFrom(v int) func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.From = &v + } +} + +// WithQuery - query in the lucene query string syntax. +func (f SearchApplicationList) WithQuery(v string) func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.Query = v + } +} + +// WithSize - specifies a max number of results to get. +func (f SearchApplicationList) WithSize(v int) func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationList) WithPretty() func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationList) WithHuman() func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationList) WithErrorTrace() func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationList) WithFilterPath(v ...string) func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationList) WithHeader(h map[string]string) func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationList) WithOpaqueID(s string) func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.post_behavioral_analytics_event.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.post_behavioral_analytics_event.go new file mode 100644 index 000000000..e78491b27 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.post_behavioral_analytics_event.go @@ -0,0 +1,259 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newSearchApplicationPostBehavioralAnalyticsEventFunc(t Transport) SearchApplicationPostBehavioralAnalyticsEvent { + return func(body io.Reader, collection_name string, event_type string, o ...func(*SearchApplicationPostBehavioralAnalyticsEventRequest)) (*Response, error) { + var r = SearchApplicationPostBehavioralAnalyticsEventRequest{Body: body, CollectionName: collection_name, EventType: event_type} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationPostBehavioralAnalyticsEvent creates a behavioral analytics event for existing collection. +// +// This API is experimental. +// +// See full documentation at http://todo.com/tbd. +type SearchApplicationPostBehavioralAnalyticsEvent func(body io.Reader, collection_name string, event_type string, o ...func(*SearchApplicationPostBehavioralAnalyticsEventRequest)) (*Response, error) + +// SearchApplicationPostBehavioralAnalyticsEventRequest configures the Search Application Post Behavioral Analytics Event API request. +type SearchApplicationPostBehavioralAnalyticsEventRequest struct { + Body io.Reader + + CollectionName string + EventType string + + Debug *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchApplicationPostBehavioralAnalyticsEventRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.post_behavioral_analytics_event") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_application") + 1 + len("analytics") + 1 + len(r.CollectionName) + 1 + len("event") + 1 + len(r.EventType)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + path.WriteString(r.CollectionName) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "collection_name", r.CollectionName) + } + path.WriteString("/") + path.WriteString("event") + path.WriteString("/") + path.WriteString(r.EventType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "event_type", r.EventType) + } + + params = make(map[string]string) + + if r.Debug != nil { + params["debug"] = strconv.FormatBool(*r.Debug) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.post_behavioral_analytics_event") + if reader := instrument.RecordRequestBody(ctx, "search_application.post_behavioral_analytics_event", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.post_behavioral_analytics_event") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithContext(v context.Context) func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + r.ctx = v + } +} + +// WithDebug - if true, returns event information that will be stored. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithDebug(v bool) func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + r.Debug = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithPretty() func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithHuman() func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithErrorTrace() func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithFilterPath(v ...string) func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithHeader(h map[string]string) func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithOpaqueID(s string) func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.put.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.put.go new file mode 100644 index 000000000..a8bedc92f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.put.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newSearchApplicationPutFunc(t Transport) SearchApplicationPut { + return func(name string, body io.Reader, o ...func(*SearchApplicationPutRequest)) (*Response, error) { + var r = SearchApplicationPutRequest{Name: name, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationPut creates or updates a search application. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-search-application.html. +type SearchApplicationPut func(name string, body io.Reader, o ...func(*SearchApplicationPutRequest)) (*Response, error) + +// SearchApplicationPutRequest configures the Search Application Put API request. +type SearchApplicationPutRequest struct { + Body io.Reader + + Name string + + Create *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchApplicationPutRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.put") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_application") + 1 + len("search_application") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Create != nil { + params["create"] = strconv.FormatBool(*r.Create) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.put") + if reader := instrument.RecordRequestBody(ctx, "search_application.put", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.put") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationPut) WithContext(v context.Context) func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + r.ctx = v + } +} + +// WithCreate - if true, requires that a search application with the specified resource_id does not already exist. (default: false). +func (f SearchApplicationPut) WithCreate(v bool) func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + r.Create = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationPut) WithPretty() func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationPut) WithHuman() func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationPut) WithErrorTrace() func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationPut) WithFilterPath(v ...string) func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationPut) WithHeader(h map[string]string) func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationPut) WithOpaqueID(s string) func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.put_behavioral_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.put_behavioral_analytics.go new file mode 100644 index 000000000..4db751f7c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.put_behavioral_analytics.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSearchApplicationPutBehavioralAnalyticsFunc(t Transport) SearchApplicationPutBehavioralAnalytics { + return func(name string, o ...func(*SearchApplicationPutBehavioralAnalyticsRequest)) (*Response, error) { + var r = SearchApplicationPutBehavioralAnalyticsRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationPutBehavioralAnalytics creates a behavioral analytics collection. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-analytics-collection.html. +type SearchApplicationPutBehavioralAnalytics func(name string, o ...func(*SearchApplicationPutBehavioralAnalyticsRequest)) (*Response, error) + +// SearchApplicationPutBehavioralAnalyticsRequest configures the Search Application Put Behavioral Analytics API request. +type SearchApplicationPutBehavioralAnalyticsRequest struct { + Name string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchApplicationPutBehavioralAnalyticsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.put_behavioral_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_application") + 1 + len("analytics") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.put_behavioral_analytics") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.put_behavioral_analytics") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationPutBehavioralAnalytics) WithContext(v context.Context) func(*SearchApplicationPutBehavioralAnalyticsRequest) { + return func(r *SearchApplicationPutBehavioralAnalyticsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationPutBehavioralAnalytics) WithPretty() func(*SearchApplicationPutBehavioralAnalyticsRequest) { + return func(r *SearchApplicationPutBehavioralAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationPutBehavioralAnalytics) WithHuman() func(*SearchApplicationPutBehavioralAnalyticsRequest) { + return func(r *SearchApplicationPutBehavioralAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationPutBehavioralAnalytics) WithErrorTrace() func(*SearchApplicationPutBehavioralAnalyticsRequest) { + return func(r *SearchApplicationPutBehavioralAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationPutBehavioralAnalytics) WithFilterPath(v ...string) func(*SearchApplicationPutBehavioralAnalyticsRequest) { + return func(r *SearchApplicationPutBehavioralAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationPutBehavioralAnalytics) WithHeader(h map[string]string) func(*SearchApplicationPutBehavioralAnalyticsRequest) { + return func(r *SearchApplicationPutBehavioralAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationPutBehavioralAnalytics) WithOpaqueID(s string) func(*SearchApplicationPutBehavioralAnalyticsRequest) { + return func(r *SearchApplicationPutBehavioralAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.render_query.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.render_query.go new file mode 100644 index 000000000..13e659021 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.render_query.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSearchApplicationRenderQueryFunc(t Transport) SearchApplicationRenderQuery { + return func(name string, o ...func(*SearchApplicationRenderQueryRequest)) (*Response, error) { + var r = SearchApplicationRenderQueryRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationRenderQuery renders a query for given search application search parameters +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-render-query.html. +type SearchApplicationRenderQuery func(name string, o ...func(*SearchApplicationRenderQueryRequest)) (*Response, error) + +// SearchApplicationRenderQueryRequest configures the Search Application Render Query API request. +type SearchApplicationRenderQueryRequest struct { + Body io.Reader + + Name string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchApplicationRenderQueryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.render_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_application") + 1 + len("search_application") + 1 + len(r.Name) + 1 + len("_render_query")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + path.WriteString("/") + path.WriteString("_render_query") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.render_query") + if reader := instrument.RecordRequestBody(ctx, "search_application.render_query", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.render_query") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationRenderQuery) WithContext(v context.Context) func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + r.ctx = v + } +} + +// WithBody - Search parameters, which will override any default search parameters defined in the search application template. +func (f SearchApplicationRenderQuery) WithBody(v io.Reader) func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationRenderQuery) WithPretty() func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationRenderQuery) WithHuman() func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationRenderQuery) WithErrorTrace() func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationRenderQuery) WithFilterPath(v ...string) func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationRenderQuery) WithHeader(h map[string]string) func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationRenderQuery) WithOpaqueID(s string) func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.search.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.search.go new file mode 100644 index 000000000..549df9b53 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_application.search.go @@ -0,0 +1,260 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newSearchApplicationSearchFunc(t Transport) SearchApplicationSearch { + return func(name string, o ...func(*SearchApplicationSearchRequest)) (*Response, error) { + var r = SearchApplicationSearchRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationSearch perform a search against a search application +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-search.html. +type SearchApplicationSearch func(name string, o ...func(*SearchApplicationSearchRequest)) (*Response, error) + +// SearchApplicationSearchRequest configures the Search Application Search API request. +type SearchApplicationSearchRequest struct { + Body io.Reader + + Name string + + TypedKeys *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchApplicationSearchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.search") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_application") + 1 + len("search_application") + 1 + len(r.Name) + 1 + len("_search")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + path.WriteString("/") + path.WriteString("_search") + + params = make(map[string]string) + + if r.TypedKeys != nil { + params["typed_keys"] = strconv.FormatBool(*r.TypedKeys) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.search") + if reader := instrument.RecordRequestBody(ctx, "search_application.search", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.search") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationSearch) WithContext(v context.Context) func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + r.ctx = v + } +} + +// WithBody - Search parameters, including template parameters that override defaults. +func (f SearchApplicationSearch) WithBody(v io.Reader) func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + r.Body = v + } +} + +// WithTypedKeys - specify whether aggregation and suggester names should be prefixed by their respective types in the response. +func (f SearchApplicationSearch) WithTypedKeys(v bool) func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + r.TypedKeys = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationSearch) WithPretty() func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationSearch) WithHuman() func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationSearch) WithErrorTrace() func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationSearch) WithFilterPath(v ...string) func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationSearch) WithHeader(h map[string]string) func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationSearch) WithOpaqueID(s string) func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_mvt.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_mvt.go new file mode 100644 index 000000000..84b72dbcb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_mvt.go @@ -0,0 +1,380 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +func newSearchMvtFunc(t Transport) SearchMvt { + return func(index []string, field string, x *int, y *int, zoom *int, o ...func(*SearchMvtRequest)) (*Response, error) { + var r = SearchMvtRequest{Index: index, Field: field, X: x, Y: y, Zoom: zoom} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchMvt searches a vector tile for geospatial values. Returns results as a binary Mapbox vector tile. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/search-vector-tile-api.html. +type SearchMvt func(index []string, field string, x *int, y *int, zoom *int, o ...func(*SearchMvtRequest)) (*Response, error) + +// SearchMvtRequest configures the Search Mvt API request. +type SearchMvtRequest struct { + Index []string + + Body io.Reader + + Field string + X *int + Y *int + Zoom *int + + ExactBounds *bool + Extent *int + GridAgg string + GridPrecision *int + GridType string + Size *int + TrackTotalHits interface{} + WithLabels *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchMvtRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_mvt") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + if r.Zoom == nil { + return nil, errors.New("zoom is required and cannot be nil") + } + if r.X == nil { + return nil, errors.New("x is required and cannot be nil") + } + if r.Y == nil { + return nil, errors.New("y is required and cannot be nil") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_mvt") + 1 + len(r.Field) + 1 + len(strconv.Itoa(*r.Zoom)) + 1 + len(strconv.Itoa(*r.X)) + 1 + len(strconv.Itoa(*r.Y))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_mvt") + path.WriteString("/") + path.WriteString(r.Field) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "field", r.Field) + } + path.WriteString("/") + path.WriteString(strconv.Itoa(*r.Zoom)) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "zoom", strconv.Itoa(*r.Zoom)) + } + path.WriteString("/") + path.WriteString(strconv.Itoa(*r.X)) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "x", strconv.Itoa(*r.X)) + } + path.WriteString("/") + path.WriteString(strconv.Itoa(*r.Y)) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "y", strconv.Itoa(*r.Y)) + } + + params = make(map[string]string) + + if r.ExactBounds != nil { + params["exact_bounds"] = strconv.FormatBool(*r.ExactBounds) + } + + if r.Extent != nil { + params["extent"] = strconv.FormatInt(int64(*r.Extent), 10) + } + + if r.GridAgg != "" { + params["grid_agg"] = r.GridAgg + } + + if r.GridPrecision != nil { + params["grid_precision"] = strconv.FormatInt(int64(*r.GridPrecision), 10) + } + + if r.GridType != "" { + params["grid_type"] = r.GridType + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.TrackTotalHits != nil { + params["track_total_hits"] = fmt.Sprintf("%v", r.TrackTotalHits) + } + + if r.WithLabels != nil { + params["with_labels"] = strconv.FormatBool(*r.WithLabels) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "search_mvt") + if reader := instrument.RecordRequestBody(ctx, "search_mvt", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_mvt") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchMvt) WithContext(v context.Context) func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + r.ctx = v + } +} + +// WithBody - Search request body.. +func (f SearchMvt) WithBody(v io.Reader) func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + r.Body = v + } +} + +// WithExactBounds - if false, the meta layer's feature is the bounding box of the tile. if true, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation.. +func (f SearchMvt) WithExactBounds(v bool) func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + r.ExactBounds = &v + } +} + +// WithExtent - size, in pixels, of a side of the vector tile.. +func (f SearchMvt) WithExtent(v int) func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + r.Extent = &v + } +} + +// WithGridAgg - aggregation used to create a grid for `field`.. +func (f SearchMvt) WithGridAgg(v string) func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + r.GridAgg = v + } +} + +// WithGridPrecision - additional zoom levels available through the aggs layer. accepts 0-8.. +func (f SearchMvt) WithGridPrecision(v int) func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + r.GridPrecision = &v + } +} + +// WithGridType - determines the geometry type for features in the aggs layer.. +func (f SearchMvt) WithGridType(v string) func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + r.GridType = v + } +} + +// WithSize - maximum number of features to return in the hits layer. accepts 0-10000.. +func (f SearchMvt) WithSize(v int) func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + r.Size = &v + } +} + +// WithTrackTotalHits - indicate if the number of documents that match the query should be tracked. a number can also be specified, to accurately track the total hit count up to the number.. +func (f SearchMvt) WithTrackTotalHits(v interface{}) func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + r.TrackTotalHits = v + } +} + +// WithWithLabels - if true, the hits and aggs layers will contain additional point features with suggested label positions for the original features.. +func (f SearchMvt) WithWithLabels(v bool) func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + r.WithLabels = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchMvt) WithPretty() func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchMvt) WithHuman() func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchMvt) WithErrorTrace() func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchMvt) WithFilterPath(v ...string) func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchMvt) WithHeader(h map[string]string) func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchMvt) WithOpaqueID(s string) func(*SearchMvtRequest) { + return func(r *SearchMvtRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_shards.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_shards.go new file mode 100644 index 000000000..993021b16 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_shards.go @@ -0,0 +1,319 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newSearchShardsFunc(t Transport) SearchShards { + return func(o ...func(*SearchShardsRequest)) (*Response, error) { + var r = SearchShardsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchShards returns information about the indices and shards that a search request would be executed against. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/search-shards.html. +type SearchShards func(o ...func(*SearchShardsRequest)) (*Response, error) + +// SearchShardsRequest configures the Search Shards API request. +type SearchShardsRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + Local *bool + MasterTimeout time.Duration + Preference string + Routing string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchShardsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_shards") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_search_shards")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_search_shards") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "search_shards") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_shards") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchShards) WithContext(v context.Context) func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names to search; use _all to perform the operation on all indices. +func (f SearchShards) WithIndex(v ...string) func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f SearchShards) WithAllowNoIndices(v bool) func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f SearchShards) WithExpandWildcards(v string) func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f SearchShards) WithIgnoreUnavailable(v bool) func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f SearchShards) WithLocal(v bool) func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SearchShards) WithMasterTimeout(v time.Duration) func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + r.MasterTimeout = v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random). +func (f SearchShards) WithPreference(v string) func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + r.Preference = v + } +} + +// WithRouting - specific routing value. +func (f SearchShards) WithRouting(v string) func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + r.Routing = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchShards) WithPretty() func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchShards) WithHuman() func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchShards) WithErrorTrace() func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchShards) WithFilterPath(v ...string) func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchShards) WithHeader(h map[string]string) func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchShards) WithOpaqueID(s string) func(*SearchShardsRequest) { + return func(r *SearchShardsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_template.go new file mode 100644 index 000000000..78d8a5857 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.search_template.go @@ -0,0 +1,403 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newSearchTemplateFunc(t Transport) SearchTemplate { + return func(body io.Reader, o ...func(*SearchTemplateRequest)) (*Response, error) { + var r = SearchTemplateRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchTemplate allows to use the Mustache language to pre-render a search definition. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html. +type SearchTemplate func(body io.Reader, o ...func(*SearchTemplateRequest)) (*Response, error) + +// SearchTemplateRequest configures the Search Template API request. +type SearchTemplateRequest struct { + Index []string + + Body io.Reader + + AllowNoIndices *bool + CcsMinimizeRoundtrips *bool + ExpandWildcards string + Explain *bool + IgnoreThrottled *bool + IgnoreUnavailable *bool + Preference string + Profile *bool + RestTotalHitsAsInt *bool + Routing []string + Scroll time.Duration + SearchType string + TypedKeys *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchTemplateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_search") + 1 + len("template")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_search") + path.WriteString("/") + path.WriteString("template") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.CcsMinimizeRoundtrips != nil { + params["ccs_minimize_roundtrips"] = strconv.FormatBool(*r.CcsMinimizeRoundtrips) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.Explain != nil { + params["explain"] = strconv.FormatBool(*r.Explain) + } + + if r.IgnoreThrottled != nil { + params["ignore_throttled"] = strconv.FormatBool(*r.IgnoreThrottled) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Profile != nil { + params["profile"] = strconv.FormatBool(*r.Profile) + } + + if r.RestTotalHitsAsInt != nil { + params["rest_total_hits_as_int"] = strconv.FormatBool(*r.RestTotalHitsAsInt) + } + + if len(r.Routing) > 0 { + params["routing"] = strings.Join(r.Routing, ",") + } + + if r.Scroll != 0 { + params["scroll"] = formatDuration(r.Scroll) + } + + if r.SearchType != "" { + params["search_type"] = r.SearchType + } + + if r.TypedKeys != nil { + params["typed_keys"] = strconv.FormatBool(*r.TypedKeys) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "search_template") + if reader := instrument.RecordRequestBody(ctx, "search_template", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_template") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchTemplate) WithContext(v context.Context) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names to search; use _all to perform the operation on all indices. +func (f SearchTemplate) WithIndex(v ...string) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f SearchTemplate) WithAllowNoIndices(v bool) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.AllowNoIndices = &v + } +} + +// WithCcsMinimizeRoundtrips - indicates whether network round-trips should be minimized as part of cross-cluster search requests execution. +func (f SearchTemplate) WithCcsMinimizeRoundtrips(v bool) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.CcsMinimizeRoundtrips = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f SearchTemplate) WithExpandWildcards(v string) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.ExpandWildcards = v + } +} + +// WithExplain - specify whether to return detailed information about score computation as part of a hit. +func (f SearchTemplate) WithExplain(v bool) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.Explain = &v + } +} + +// WithIgnoreThrottled - whether specified concrete, expanded or aliased indices should be ignored when throttled. +func (f SearchTemplate) WithIgnoreThrottled(v bool) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.IgnoreThrottled = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f SearchTemplate) WithIgnoreUnavailable(v bool) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random). +func (f SearchTemplate) WithPreference(v string) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.Preference = v + } +} + +// WithProfile - specify whether to profile the query execution. +func (f SearchTemplate) WithProfile(v bool) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.Profile = &v + } +} + +// WithRestTotalHitsAsInt - indicates whether hits.total should be rendered as an integer or an object in the rest search response. +func (f SearchTemplate) WithRestTotalHitsAsInt(v bool) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.RestTotalHitsAsInt = &v + } +} + +// WithRouting - a list of specific routing values. +func (f SearchTemplate) WithRouting(v ...string) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.Routing = v + } +} + +// WithScroll - specify how long a consistent view of the index should be maintained for scrolled search. +func (f SearchTemplate) WithScroll(v time.Duration) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.Scroll = v + } +} + +// WithSearchType - search operation type. +func (f SearchTemplate) WithSearchType(v string) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.SearchType = v + } +} + +// WithTypedKeys - specify whether aggregation and suggester names should be prefixed by their respective types in the response. +func (f SearchTemplate) WithTypedKeys(v bool) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.TypedKeys = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchTemplate) WithPretty() func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchTemplate) WithHuman() func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchTemplate) WithErrorTrace() func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchTemplate) WithFilterPath(v ...string) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchTemplate) WithHeader(h map[string]string) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchTemplate) WithOpaqueID(s string) func(*SearchTemplateRequest) { + return func(r *SearchTemplateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.shutdown.delete_node.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.shutdown.delete_node.go new file mode 100644 index 000000000..3d257910a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.shutdown.delete_node.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newShutdownDeleteNodeFunc(t Transport) ShutdownDeleteNode { + return func(node_id string, o ...func(*ShutdownDeleteNodeRequest)) (*Response, error) { + var r = ShutdownDeleteNodeRequest{NodeID: node_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ShutdownDeleteNode removes a node from the shutdown list. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current. +type ShutdownDeleteNode func(node_id string, o ...func(*ShutdownDeleteNodeRequest)) (*Response, error) + +// ShutdownDeleteNodeRequest configures the Shutdown Delete Node API request. +type ShutdownDeleteNodeRequest struct { + NodeID string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ShutdownDeleteNodeRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "shutdown.delete_node") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_nodes") + 1 + len(r.NodeID) + 1 + len("shutdown")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + path.WriteString(r.NodeID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "node_id", r.NodeID) + } + path.WriteString("/") + path.WriteString("shutdown") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "shutdown.delete_node") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "shutdown.delete_node") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ShutdownDeleteNode) WithContext(v context.Context) func(*ShutdownDeleteNodeRequest) { + return func(r *ShutdownDeleteNodeRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ShutdownDeleteNode) WithMasterTimeout(v time.Duration) func(*ShutdownDeleteNodeRequest) { + return func(r *ShutdownDeleteNodeRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ShutdownDeleteNode) WithTimeout(v time.Duration) func(*ShutdownDeleteNodeRequest) { + return func(r *ShutdownDeleteNodeRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ShutdownDeleteNode) WithPretty() func(*ShutdownDeleteNodeRequest) { + return func(r *ShutdownDeleteNodeRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ShutdownDeleteNode) WithHuman() func(*ShutdownDeleteNodeRequest) { + return func(r *ShutdownDeleteNodeRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ShutdownDeleteNode) WithErrorTrace() func(*ShutdownDeleteNodeRequest) { + return func(r *ShutdownDeleteNodeRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ShutdownDeleteNode) WithFilterPath(v ...string) func(*ShutdownDeleteNodeRequest) { + return func(r *ShutdownDeleteNodeRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ShutdownDeleteNode) WithHeader(h map[string]string) func(*ShutdownDeleteNodeRequest) { + return func(r *ShutdownDeleteNodeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ShutdownDeleteNode) WithOpaqueID(s string) func(*ShutdownDeleteNodeRequest) { + return func(r *ShutdownDeleteNodeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.shutdown.get_node.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.shutdown.get_node.go new file mode 100644 index 000000000..63a09d2cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.shutdown.get_node.go @@ -0,0 +1,248 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newShutdownGetNodeFunc(t Transport) ShutdownGetNode { + return func(o ...func(*ShutdownGetNodeRequest)) (*Response, error) { + var r = ShutdownGetNodeRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ShutdownGetNode retrieve status of a node or nodes that are currently marked as shutting down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current. +type ShutdownGetNode func(o ...func(*ShutdownGetNodeRequest)) (*Response, error) + +// ShutdownGetNodeRequest configures the Shutdown Get Node API request. +type ShutdownGetNodeRequest struct { + NodeID string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ShutdownGetNodeRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "shutdown.get_node") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_nodes") + 1 + len(r.NodeID) + 1 + len("shutdown")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_nodes") + if r.NodeID != "" { + path.WriteString("/") + path.WriteString(r.NodeID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "node_id", r.NodeID) + } + } + path.WriteString("/") + path.WriteString("shutdown") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "shutdown.get_node") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "shutdown.get_node") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ShutdownGetNode) WithContext(v context.Context) func(*ShutdownGetNodeRequest) { + return func(r *ShutdownGetNodeRequest) { + r.ctx = v + } +} + +// WithNodeID - which node for which to retrieve the shutdown status. +func (f ShutdownGetNode) WithNodeID(v string) func(*ShutdownGetNodeRequest) { + return func(r *ShutdownGetNodeRequest) { + r.NodeID = v + } +} + +// WithMasterTimeout - timeout for processing on master node. +func (f ShutdownGetNode) WithMasterTimeout(v time.Duration) func(*ShutdownGetNodeRequest) { + return func(r *ShutdownGetNodeRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ShutdownGetNode) WithPretty() func(*ShutdownGetNodeRequest) { + return func(r *ShutdownGetNodeRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ShutdownGetNode) WithHuman() func(*ShutdownGetNodeRequest) { + return func(r *ShutdownGetNodeRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ShutdownGetNode) WithErrorTrace() func(*ShutdownGetNodeRequest) { + return func(r *ShutdownGetNodeRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ShutdownGetNode) WithFilterPath(v ...string) func(*ShutdownGetNodeRequest) { + return func(r *ShutdownGetNodeRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ShutdownGetNode) WithHeader(h map[string]string) func(*ShutdownGetNodeRequest) { + return func(r *ShutdownGetNodeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ShutdownGetNode) WithOpaqueID(s string) func(*ShutdownGetNodeRequest) { + return func(r *ShutdownGetNodeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.shutdown.put_node.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.shutdown.put_node.go new file mode 100644 index 000000000..b32f9fd1e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.shutdown.put_node.go @@ -0,0 +1,261 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newShutdownPutNodeFunc(t Transport) ShutdownPutNode { + return func(body io.Reader, node_id string, o ...func(*ShutdownPutNodeRequest)) (*Response, error) { + var r = ShutdownPutNodeRequest{Body: body, NodeID: node_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ShutdownPutNode adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current. +type ShutdownPutNode func(body io.Reader, node_id string, o ...func(*ShutdownPutNodeRequest)) (*Response, error) + +// ShutdownPutNodeRequest configures the Shutdown Put Node API request. +type ShutdownPutNodeRequest struct { + Body io.Reader + + NodeID string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ShutdownPutNodeRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "shutdown.put_node") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_nodes") + 1 + len(r.NodeID) + 1 + len("shutdown")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + path.WriteString(r.NodeID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "node_id", r.NodeID) + } + path.WriteString("/") + path.WriteString("shutdown") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "shutdown.put_node") + if reader := instrument.RecordRequestBody(ctx, "shutdown.put_node", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "shutdown.put_node") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ShutdownPutNode) WithContext(v context.Context) func(*ShutdownPutNodeRequest) { + return func(r *ShutdownPutNodeRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ShutdownPutNode) WithMasterTimeout(v time.Duration) func(*ShutdownPutNodeRequest) { + return func(r *ShutdownPutNodeRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ShutdownPutNode) WithTimeout(v time.Duration) func(*ShutdownPutNodeRequest) { + return func(r *ShutdownPutNodeRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ShutdownPutNode) WithPretty() func(*ShutdownPutNodeRequest) { + return func(r *ShutdownPutNodeRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ShutdownPutNode) WithHuman() func(*ShutdownPutNodeRequest) { + return func(r *ShutdownPutNodeRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ShutdownPutNode) WithErrorTrace() func(*ShutdownPutNodeRequest) { + return func(r *ShutdownPutNodeRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ShutdownPutNode) WithFilterPath(v ...string) func(*ShutdownPutNodeRequest) { + return func(r *ShutdownPutNodeRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ShutdownPutNode) WithHeader(h map[string]string) func(*ShutdownPutNodeRequest) { + return func(r *ShutdownPutNodeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ShutdownPutNode) WithOpaqueID(s string) func(*ShutdownPutNodeRequest) { + return func(r *ShutdownPutNodeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.simulate.ingest.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.simulate.ingest.go new file mode 100644 index 000000000..4835d0f0d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.simulate.ingest.go @@ -0,0 +1,259 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSimulateIngestFunc(t Transport) SimulateIngest { + return func(body io.Reader, o ...func(*SimulateIngestRequest)) (*Response, error) { + var r = SimulateIngestRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SimulateIngest simulates running ingest with example documents. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-ingest-api.html. +type SimulateIngest func(body io.Reader, o ...func(*SimulateIngestRequest)) (*Response, error) + +// SimulateIngestRequest configures the Simulate Ingest API request. +type SimulateIngestRequest struct { + Index string + + Body io.Reader + + Pipeline string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SimulateIngestRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "simulate.ingest") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ingest") + 1 + len(r.Index) + 1 + len("_simulate")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ingest") + if r.Index != "" { + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + } + path.WriteString("/") + path.WriteString("_simulate") + + params = make(map[string]string) + + if r.Pipeline != "" { + params["pipeline"] = r.Pipeline + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "simulate.ingest") + if reader := instrument.RecordRequestBody(ctx, "simulate.ingest", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "simulate.ingest") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SimulateIngest) WithContext(v context.Context) func(*SimulateIngestRequest) { + return func(r *SimulateIngestRequest) { + r.ctx = v + } +} + +// WithIndex - default index for docs which don't provide one. +func (f SimulateIngest) WithIndex(v string) func(*SimulateIngestRequest) { + return func(r *SimulateIngestRequest) { + r.Index = v + } +} + +// WithPipeline - the pipeline ID to preprocess incoming documents with if no pipeline is given for a particular document. +func (f SimulateIngest) WithPipeline(v string) func(*SimulateIngestRequest) { + return func(r *SimulateIngestRequest) { + r.Pipeline = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SimulateIngest) WithPretty() func(*SimulateIngestRequest) { + return func(r *SimulateIngestRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SimulateIngest) WithHuman() func(*SimulateIngestRequest) { + return func(r *SimulateIngestRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SimulateIngest) WithErrorTrace() func(*SimulateIngestRequest) { + return func(r *SimulateIngestRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SimulateIngest) WithFilterPath(v ...string) func(*SimulateIngestRequest) { + return func(r *SimulateIngestRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SimulateIngest) WithHeader(h map[string]string) func(*SimulateIngestRequest) { + return func(r *SimulateIngestRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SimulateIngest) WithOpaqueID(s string) func(*SimulateIngestRequest) { + return func(r *SimulateIngestRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.cleanup_repository.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.cleanup_repository.go new file mode 100644 index 000000000..b84293997 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.cleanup_repository.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newSnapshotCleanupRepositoryFunc(t Transport) SnapshotCleanupRepository { + return func(repository string, o ...func(*SnapshotCleanupRepositoryRequest)) (*Response, error) { + var r = SnapshotCleanupRepositoryRequest{Repository: repository} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SnapshotCleanupRepository removes stale data from repository. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/clean-up-snapshot-repo-api.html. +type SnapshotCleanupRepository func(repository string, o ...func(*SnapshotCleanupRepositoryRequest)) (*Response, error) + +// SnapshotCleanupRepositoryRequest configures the Snapshot Cleanup Repository API request. +type SnapshotCleanupRepositoryRequest struct { + Repository string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SnapshotCleanupRepositoryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.cleanup_repository") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(r.Repository) + 1 + len("_cleanup")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + path.WriteString(r.Repository) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.Repository) + } + path.WriteString("/") + path.WriteString("_cleanup") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.cleanup_repository") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.cleanup_repository") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SnapshotCleanupRepository) WithContext(v context.Context) func(*SnapshotCleanupRepositoryRequest) { + return func(r *SnapshotCleanupRepositoryRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SnapshotCleanupRepository) WithMasterTimeout(v time.Duration) func(*SnapshotCleanupRepositoryRequest) { + return func(r *SnapshotCleanupRepositoryRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SnapshotCleanupRepository) WithTimeout(v time.Duration) func(*SnapshotCleanupRepositoryRequest) { + return func(r *SnapshotCleanupRepositoryRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SnapshotCleanupRepository) WithPretty() func(*SnapshotCleanupRepositoryRequest) { + return func(r *SnapshotCleanupRepositoryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SnapshotCleanupRepository) WithHuman() func(*SnapshotCleanupRepositoryRequest) { + return func(r *SnapshotCleanupRepositoryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SnapshotCleanupRepository) WithErrorTrace() func(*SnapshotCleanupRepositoryRequest) { + return func(r *SnapshotCleanupRepositoryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SnapshotCleanupRepository) WithFilterPath(v ...string) func(*SnapshotCleanupRepositoryRequest) { + return func(r *SnapshotCleanupRepositoryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SnapshotCleanupRepository) WithHeader(h map[string]string) func(*SnapshotCleanupRepositoryRequest) { + return func(r *SnapshotCleanupRepositoryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SnapshotCleanupRepository) WithOpaqueID(s string) func(*SnapshotCleanupRepositoryRequest) { + return func(r *SnapshotCleanupRepositoryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.clone.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.clone.go new file mode 100644 index 000000000..905ac1622 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.clone.go @@ -0,0 +1,261 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newSnapshotCloneFunc(t Transport) SnapshotClone { + return func(repository string, snapshot string, body io.Reader, target_snapshot string, o ...func(*SnapshotCloneRequest)) (*Response, error) { + var r = SnapshotCloneRequest{Repository: repository, Snapshot: snapshot, Body: body, TargetSnapshot: target_snapshot} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SnapshotClone clones indices from one snapshot into another snapshot in the same repository. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html. +type SnapshotClone func(repository string, snapshot string, body io.Reader, target_snapshot string, o ...func(*SnapshotCloneRequest)) (*Response, error) + +// SnapshotCloneRequest configures the Snapshot Clone API request. +type SnapshotCloneRequest struct { + Body io.Reader + + Repository string + Snapshot string + TargetSnapshot string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SnapshotCloneRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.clone") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(r.Repository) + 1 + len(r.Snapshot) + 1 + len("_clone") + 1 + len(r.TargetSnapshot)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + path.WriteString(r.Repository) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.Repository) + } + path.WriteString("/") + path.WriteString(r.Snapshot) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot", r.Snapshot) + } + path.WriteString("/") + path.WriteString("_clone") + path.WriteString("/") + path.WriteString(r.TargetSnapshot) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "target_snapshot", r.TargetSnapshot) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.clone") + if reader := instrument.RecordRequestBody(ctx, "snapshot.clone", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.clone") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SnapshotClone) WithContext(v context.Context) func(*SnapshotCloneRequest) { + return func(r *SnapshotCloneRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SnapshotClone) WithMasterTimeout(v time.Duration) func(*SnapshotCloneRequest) { + return func(r *SnapshotCloneRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SnapshotClone) WithPretty() func(*SnapshotCloneRequest) { + return func(r *SnapshotCloneRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SnapshotClone) WithHuman() func(*SnapshotCloneRequest) { + return func(r *SnapshotCloneRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SnapshotClone) WithErrorTrace() func(*SnapshotCloneRequest) { + return func(r *SnapshotCloneRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SnapshotClone) WithFilterPath(v ...string) func(*SnapshotCloneRequest) { + return func(r *SnapshotCloneRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SnapshotClone) WithHeader(h map[string]string) func(*SnapshotCloneRequest) { + return func(r *SnapshotCloneRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SnapshotClone) WithOpaqueID(s string) func(*SnapshotCloneRequest) { + return func(r *SnapshotCloneRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.create.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.create.go new file mode 100644 index 000000000..32a8f9317 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.create.go @@ -0,0 +1,273 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newSnapshotCreateFunc(t Transport) SnapshotCreate { + return func(repository string, snapshot string, o ...func(*SnapshotCreateRequest)) (*Response, error) { + var r = SnapshotCreateRequest{Repository: repository, Snapshot: snapshot} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SnapshotCreate creates a snapshot in a repository. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html. +type SnapshotCreate func(repository string, snapshot string, o ...func(*SnapshotCreateRequest)) (*Response, error) + +// SnapshotCreateRequest configures the Snapshot Create API request. +type SnapshotCreateRequest struct { + Body io.Reader + + Repository string + Snapshot string + + MasterTimeout time.Duration + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SnapshotCreateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.create") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(r.Repository) + 1 + len(r.Snapshot)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + path.WriteString(r.Repository) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.Repository) + } + path.WriteString("/") + path.WriteString(r.Snapshot) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot", r.Snapshot) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.create") + if reader := instrument.RecordRequestBody(ctx, "snapshot.create", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.create") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SnapshotCreate) WithContext(v context.Context) func(*SnapshotCreateRequest) { + return func(r *SnapshotCreateRequest) { + r.ctx = v + } +} + +// WithBody - The snapshot definition. +func (f SnapshotCreate) WithBody(v io.Reader) func(*SnapshotCreateRequest) { + return func(r *SnapshotCreateRequest) { + r.Body = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SnapshotCreate) WithMasterTimeout(v time.Duration) func(*SnapshotCreateRequest) { + return func(r *SnapshotCreateRequest) { + r.MasterTimeout = v + } +} + +// WithWaitForCompletion - should this request wait until the operation has completed before returning. +func (f SnapshotCreate) WithWaitForCompletion(v bool) func(*SnapshotCreateRequest) { + return func(r *SnapshotCreateRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SnapshotCreate) WithPretty() func(*SnapshotCreateRequest) { + return func(r *SnapshotCreateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SnapshotCreate) WithHuman() func(*SnapshotCreateRequest) { + return func(r *SnapshotCreateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SnapshotCreate) WithErrorTrace() func(*SnapshotCreateRequest) { + return func(r *SnapshotCreateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SnapshotCreate) WithFilterPath(v ...string) func(*SnapshotCreateRequest) { + return func(r *SnapshotCreateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SnapshotCreate) WithHeader(h map[string]string) func(*SnapshotCreateRequest) { + return func(r *SnapshotCreateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SnapshotCreate) WithOpaqueID(s string) func(*SnapshotCreateRequest) { + return func(r *SnapshotCreateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.create_repository.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.create_repository.go new file mode 100644 index 000000000..38f89b06e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.create_repository.go @@ -0,0 +1,272 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newSnapshotCreateRepositoryFunc(t Transport) SnapshotCreateRepository { + return func(repository string, body io.Reader, o ...func(*SnapshotCreateRepositoryRequest)) (*Response, error) { + var r = SnapshotCreateRepositoryRequest{Repository: repository, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SnapshotCreateRepository creates a repository. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html. +type SnapshotCreateRepository func(repository string, body io.Reader, o ...func(*SnapshotCreateRepositoryRequest)) (*Response, error) + +// SnapshotCreateRepositoryRequest configures the Snapshot Create Repository API request. +type SnapshotCreateRepositoryRequest struct { + Body io.Reader + + Repository string + + MasterTimeout time.Duration + Timeout time.Duration + Verify *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SnapshotCreateRepositoryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.create_repository") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(r.Repository)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + path.WriteString(r.Repository) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.Repository) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Verify != nil { + params["verify"] = strconv.FormatBool(*r.Verify) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.create_repository") + if reader := instrument.RecordRequestBody(ctx, "snapshot.create_repository", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.create_repository") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SnapshotCreateRepository) WithContext(v context.Context) func(*SnapshotCreateRepositoryRequest) { + return func(r *SnapshotCreateRepositoryRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SnapshotCreateRepository) WithMasterTimeout(v time.Duration) func(*SnapshotCreateRepositoryRequest) { + return func(r *SnapshotCreateRepositoryRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SnapshotCreateRepository) WithTimeout(v time.Duration) func(*SnapshotCreateRepositoryRequest) { + return func(r *SnapshotCreateRepositoryRequest) { + r.Timeout = v + } +} + +// WithVerify - whether to verify the repository after creation. +func (f SnapshotCreateRepository) WithVerify(v bool) func(*SnapshotCreateRepositoryRequest) { + return func(r *SnapshotCreateRepositoryRequest) { + r.Verify = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SnapshotCreateRepository) WithPretty() func(*SnapshotCreateRepositoryRequest) { + return func(r *SnapshotCreateRepositoryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SnapshotCreateRepository) WithHuman() func(*SnapshotCreateRepositoryRequest) { + return func(r *SnapshotCreateRepositoryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SnapshotCreateRepository) WithErrorTrace() func(*SnapshotCreateRepositoryRequest) { + return func(r *SnapshotCreateRepositoryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SnapshotCreateRepository) WithFilterPath(v ...string) func(*SnapshotCreateRepositoryRequest) { + return func(r *SnapshotCreateRepositoryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SnapshotCreateRepository) WithHeader(h map[string]string) func(*SnapshotCreateRepositoryRequest) { + return func(r *SnapshotCreateRepositoryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SnapshotCreateRepository) WithOpaqueID(s string) func(*SnapshotCreateRepositoryRequest) { + return func(r *SnapshotCreateRepositoryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.delete.go new file mode 100644 index 000000000..78a1374d0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.delete.go @@ -0,0 +1,261 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" + "time" +) + +func newSnapshotDeleteFunc(t Transport) SnapshotDelete { + return func(repository string, snapshot []string, o ...func(*SnapshotDeleteRequest)) (*Response, error) { + var r = SnapshotDeleteRequest{Repository: repository, Snapshot: snapshot} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SnapshotDelete deletes one or more snapshots. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html. +type SnapshotDelete func(repository string, snapshot []string, o ...func(*SnapshotDeleteRequest)) (*Response, error) + +// SnapshotDeleteRequest configures the Snapshot Delete API request. +type SnapshotDeleteRequest struct { + Repository string + Snapshot []string + + MasterTimeout time.Duration + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SnapshotDeleteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + if len(r.Snapshot) == 0 { + return nil, errors.New("snapshot is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(r.Repository) + 1 + len(strings.Join(r.Snapshot, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + path.WriteString(r.Repository) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.Repository) + } + path.WriteString("/") + path.WriteString(strings.Join(r.Snapshot, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot", strings.Join(r.Snapshot, ",")) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.delete") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.delete") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SnapshotDelete) WithContext(v context.Context) func(*SnapshotDeleteRequest) { + return func(r *SnapshotDeleteRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SnapshotDelete) WithMasterTimeout(v time.Duration) func(*SnapshotDeleteRequest) { + return func(r *SnapshotDeleteRequest) { + r.MasterTimeout = v + } +} + +// WithWaitForCompletion - should this request wait until the operation has completed before returning. +func (f SnapshotDelete) WithWaitForCompletion(v bool) func(*SnapshotDeleteRequest) { + return func(r *SnapshotDeleteRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SnapshotDelete) WithPretty() func(*SnapshotDeleteRequest) { + return func(r *SnapshotDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SnapshotDelete) WithHuman() func(*SnapshotDeleteRequest) { + return func(r *SnapshotDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SnapshotDelete) WithErrorTrace() func(*SnapshotDeleteRequest) { + return func(r *SnapshotDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SnapshotDelete) WithFilterPath(v ...string) func(*SnapshotDeleteRequest) { + return func(r *SnapshotDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SnapshotDelete) WithHeader(h map[string]string) func(*SnapshotDeleteRequest) { + return func(r *SnapshotDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SnapshotDelete) WithOpaqueID(s string) func(*SnapshotDeleteRequest) { + return func(r *SnapshotDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.delete_repository.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.delete_repository.go new file mode 100644 index 000000000..a886af986 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.delete_repository.go @@ -0,0 +1,254 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" + "time" +) + +func newSnapshotDeleteRepositoryFunc(t Transport) SnapshotDeleteRepository { + return func(repository []string, o ...func(*SnapshotDeleteRepositoryRequest)) (*Response, error) { + var r = SnapshotDeleteRepositoryRequest{Repository: repository} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SnapshotDeleteRepository deletes a repository. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html. +type SnapshotDeleteRepository func(repository []string, o ...func(*SnapshotDeleteRepositoryRequest)) (*Response, error) + +// SnapshotDeleteRepositoryRequest configures the Snapshot Delete Repository API request. +type SnapshotDeleteRepositoryRequest struct { + Repository []string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SnapshotDeleteRepositoryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.delete_repository") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + if len(r.Repository) == 0 { + return nil, errors.New("repository is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(strings.Join(r.Repository, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + path.WriteString(strings.Join(r.Repository, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", strings.Join(r.Repository, ",")) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.delete_repository") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.delete_repository") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SnapshotDeleteRepository) WithContext(v context.Context) func(*SnapshotDeleteRepositoryRequest) { + return func(r *SnapshotDeleteRepositoryRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SnapshotDeleteRepository) WithMasterTimeout(v time.Duration) func(*SnapshotDeleteRepositoryRequest) { + return func(r *SnapshotDeleteRepositoryRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SnapshotDeleteRepository) WithTimeout(v time.Duration) func(*SnapshotDeleteRepositoryRequest) { + return func(r *SnapshotDeleteRepositoryRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SnapshotDeleteRepository) WithPretty() func(*SnapshotDeleteRepositoryRequest) { + return func(r *SnapshotDeleteRepositoryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SnapshotDeleteRepository) WithHuman() func(*SnapshotDeleteRepositoryRequest) { + return func(r *SnapshotDeleteRepositoryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SnapshotDeleteRepository) WithErrorTrace() func(*SnapshotDeleteRepositoryRequest) { + return func(r *SnapshotDeleteRepositoryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SnapshotDeleteRepository) WithFilterPath(v ...string) func(*SnapshotDeleteRepositoryRequest) { + return func(r *SnapshotDeleteRepositoryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SnapshotDeleteRepository) WithHeader(h map[string]string) func(*SnapshotDeleteRepositoryRequest) { + return func(r *SnapshotDeleteRepositoryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SnapshotDeleteRepository) WithOpaqueID(s string) func(*SnapshotDeleteRepositoryRequest) { + return func(r *SnapshotDeleteRepositoryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.get.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.get.go new file mode 100644 index 000000000..0a66843fd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.get.go @@ -0,0 +1,405 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" + "time" +) + +func newSnapshotGetFunc(t Transport) SnapshotGet { + return func(repository string, snapshot []string, o ...func(*SnapshotGetRequest)) (*Response, error) { + var r = SnapshotGetRequest{Repository: repository, Snapshot: snapshot} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SnapshotGet returns information about a snapshot. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html. +type SnapshotGet func(repository string, snapshot []string, o ...func(*SnapshotGetRequest)) (*Response, error) + +// SnapshotGetRequest configures the Snapshot Get API request. +type SnapshotGetRequest struct { + Repository string + Snapshot []string + + After string + FromSortValue string + IgnoreUnavailable *bool + IncludeRepository *bool + IndexDetails *bool + IndexNames *bool + MasterTimeout time.Duration + Offset *int + Order string + Size *int + SlmPolicyFilter string + Sort string + State []string + Verbose *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SnapshotGetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + if len(r.Snapshot) == 0 { + return nil, errors.New("snapshot is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(r.Repository) + 1 + len(strings.Join(r.Snapshot, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + path.WriteString(r.Repository) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.Repository) + } + path.WriteString("/") + path.WriteString(strings.Join(r.Snapshot, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot", strings.Join(r.Snapshot, ",")) + } + + params = make(map[string]string) + + if r.After != "" { + params["after"] = r.After + } + + if r.FromSortValue != "" { + params["from_sort_value"] = r.FromSortValue + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.IncludeRepository != nil { + params["include_repository"] = strconv.FormatBool(*r.IncludeRepository) + } + + if r.IndexDetails != nil { + params["index_details"] = strconv.FormatBool(*r.IndexDetails) + } + + if r.IndexNames != nil { + params["index_names"] = strconv.FormatBool(*r.IndexNames) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Offset != nil { + params["offset"] = strconv.FormatInt(int64(*r.Offset), 10) + } + + if r.Order != "" { + params["order"] = r.Order + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.SlmPolicyFilter != "" { + params["slm_policy_filter"] = r.SlmPolicyFilter + } + + if r.Sort != "" { + params["sort"] = r.Sort + } + + if len(r.State) > 0 { + params["state"] = strings.Join(r.State, ",") + } + + if r.Verbose != nil { + params["verbose"] = strconv.FormatBool(*r.Verbose) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.get") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.get") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SnapshotGet) WithContext(v context.Context) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.ctx = v + } +} + +// WithAfter - offset identifier to start pagination from as returned by the 'next' field in the response body.. +func (f SnapshotGet) WithAfter(v string) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.After = v + } +} + +// WithFromSortValue - value of the current sort column at which to start retrieval.. +func (f SnapshotGet) WithFromSortValue(v string) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.FromSortValue = v + } +} + +// WithIgnoreUnavailable - whether to ignore unavailable snapshots, defaults to false which means a snapshotmissingexception is thrown. +func (f SnapshotGet) WithIgnoreUnavailable(v bool) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithIncludeRepository - whether to include the repository name in the snapshot info. defaults to true.. +func (f SnapshotGet) WithIncludeRepository(v bool) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.IncludeRepository = &v + } +} + +// WithIndexDetails - whether to include details of each index in the snapshot, if those details are available. defaults to false.. +func (f SnapshotGet) WithIndexDetails(v bool) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.IndexDetails = &v + } +} + +// WithIndexNames - whether to include the name of each index in the snapshot. defaults to true.. +func (f SnapshotGet) WithIndexNames(v bool) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.IndexNames = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SnapshotGet) WithMasterTimeout(v time.Duration) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.MasterTimeout = v + } +} + +// WithOffset - numeric offset to start pagination based on the snapshots matching the request. defaults to 0. +func (f SnapshotGet) WithOffset(v int) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.Offset = &v + } +} + +// WithOrder - sort order. +func (f SnapshotGet) WithOrder(v string) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.Order = v + } +} + +// WithSize - maximum number of snapshots to return. defaults to 0 which means return all that match without limit.. +func (f SnapshotGet) WithSize(v int) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.Size = &v + } +} + +// WithSlmPolicyFilter - filter snapshots by a list of slm policy names that snapshots belong to. accepts wildcards. use the special pattern '_none' to match snapshots without an slm policy. +func (f SnapshotGet) WithSlmPolicyFilter(v string) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.SlmPolicyFilter = v + } +} + +// WithSort - allows setting a sort order for the result. defaults to start_time. +func (f SnapshotGet) WithSort(v string) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.Sort = v + } +} + +// WithState - filter snapshots by a list of states. valid state values are 'success', 'in_progress', 'failed', 'partial', or 'incompatible'.. +func (f SnapshotGet) WithState(v ...string) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.State = v + } +} + +// WithVerbose - whether to show verbose snapshot info or only show the basic info found in the repository index blob. +func (f SnapshotGet) WithVerbose(v bool) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.Verbose = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SnapshotGet) WithPretty() func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SnapshotGet) WithHuman() func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SnapshotGet) WithErrorTrace() func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SnapshotGet) WithFilterPath(v ...string) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SnapshotGet) WithHeader(h map[string]string) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SnapshotGet) WithOpaqueID(s string) func(*SnapshotGetRequest) { + return func(r *SnapshotGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.get_repository.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.get_repository.go new file mode 100644 index 000000000..282c1b722 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.get_repository.go @@ -0,0 +1,259 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newSnapshotGetRepositoryFunc(t Transport) SnapshotGetRepository { + return func(o ...func(*SnapshotGetRepositoryRequest)) (*Response, error) { + var r = SnapshotGetRepositoryRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SnapshotGetRepository returns information about a repository. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html. +type SnapshotGetRepository func(o ...func(*SnapshotGetRepositoryRequest)) (*Response, error) + +// SnapshotGetRepositoryRequest configures the Snapshot Get Repository API request. +type SnapshotGetRepositoryRequest struct { + Repository []string + + Local *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SnapshotGetRepositoryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.get_repository") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(strings.Join(r.Repository, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + if len(r.Repository) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Repository, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", strings.Join(r.Repository, ",")) + } + } + + params = make(map[string]string) + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.get_repository") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.get_repository") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SnapshotGetRepository) WithContext(v context.Context) func(*SnapshotGetRepositoryRequest) { + return func(r *SnapshotGetRepositoryRequest) { + r.ctx = v + } +} + +// WithRepository - a list of repository names. +func (f SnapshotGetRepository) WithRepository(v ...string) func(*SnapshotGetRepositoryRequest) { + return func(r *SnapshotGetRepositoryRequest) { + r.Repository = v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f SnapshotGetRepository) WithLocal(v bool) func(*SnapshotGetRepositoryRequest) { + return func(r *SnapshotGetRepositoryRequest) { + r.Local = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SnapshotGetRepository) WithMasterTimeout(v time.Duration) func(*SnapshotGetRepositoryRequest) { + return func(r *SnapshotGetRepositoryRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SnapshotGetRepository) WithPretty() func(*SnapshotGetRepositoryRequest) { + return func(r *SnapshotGetRepositoryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SnapshotGetRepository) WithHuman() func(*SnapshotGetRepositoryRequest) { + return func(r *SnapshotGetRepositoryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SnapshotGetRepository) WithErrorTrace() func(*SnapshotGetRepositoryRequest) { + return func(r *SnapshotGetRepositoryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SnapshotGetRepository) WithFilterPath(v ...string) func(*SnapshotGetRepositoryRequest) { + return func(r *SnapshotGetRepositoryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SnapshotGetRepository) WithHeader(h map[string]string) func(*SnapshotGetRepositoryRequest) { + return func(r *SnapshotGetRepositoryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SnapshotGetRepository) WithOpaqueID(s string) func(*SnapshotGetRepositoryRequest) { + return func(r *SnapshotGetRepositoryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.repository_analyze.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.repository_analyze.go new file mode 100644 index 000000000..672c0bb8d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.repository_analyze.go @@ -0,0 +1,372 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newSnapshotRepositoryAnalyzeFunc(t Transport) SnapshotRepositoryAnalyze { + return func(repository string, o ...func(*SnapshotRepositoryAnalyzeRequest)) (*Response, error) { + var r = SnapshotRepositoryAnalyzeRequest{Repository: repository} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SnapshotRepositoryAnalyze analyzes a repository for correctness and performance +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html. +type SnapshotRepositoryAnalyze func(repository string, o ...func(*SnapshotRepositoryAnalyzeRequest)) (*Response, error) + +// SnapshotRepositoryAnalyzeRequest configures the Snapshot Repository Analyze API request. +type SnapshotRepositoryAnalyzeRequest struct { + Repository string + + BlobCount *int + Concurrency *int + Detailed *bool + EarlyReadNodeCount *int + MaxBlobSize string + MaxTotalDataSize string + RareActionProbability *int + RarelyAbortWrites *bool + ReadNodeCount *int + RegisterOperationCount *int + Seed *int + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SnapshotRepositoryAnalyzeRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.repository_analyze") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(r.Repository) + 1 + len("_analyze")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + path.WriteString(r.Repository) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.Repository) + } + path.WriteString("/") + path.WriteString("_analyze") + + params = make(map[string]string) + + if r.BlobCount != nil { + params["blob_count"] = strconv.FormatInt(int64(*r.BlobCount), 10) + } + + if r.Concurrency != nil { + params["concurrency"] = strconv.FormatInt(int64(*r.Concurrency), 10) + } + + if r.Detailed != nil { + params["detailed"] = strconv.FormatBool(*r.Detailed) + } + + if r.EarlyReadNodeCount != nil { + params["early_read_node_count"] = strconv.FormatInt(int64(*r.EarlyReadNodeCount), 10) + } + + if r.MaxBlobSize != "" { + params["max_blob_size"] = r.MaxBlobSize + } + + if r.MaxTotalDataSize != "" { + params["max_total_data_size"] = r.MaxTotalDataSize + } + + if r.RareActionProbability != nil { + params["rare_action_probability"] = strconv.FormatInt(int64(*r.RareActionProbability), 10) + } + + if r.RarelyAbortWrites != nil { + params["rarely_abort_writes"] = strconv.FormatBool(*r.RarelyAbortWrites) + } + + if r.ReadNodeCount != nil { + params["read_node_count"] = strconv.FormatInt(int64(*r.ReadNodeCount), 10) + } + + if r.RegisterOperationCount != nil { + params["register_operation_count"] = strconv.FormatInt(int64(*r.RegisterOperationCount), 10) + } + + if r.Seed != nil { + params["seed"] = strconv.FormatInt(int64(*r.Seed), 10) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.repository_analyze") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.repository_analyze") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SnapshotRepositoryAnalyze) WithContext(v context.Context) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.ctx = v + } +} + +// WithBlobCount - number of blobs to create during the test. defaults to 100.. +func (f SnapshotRepositoryAnalyze) WithBlobCount(v int) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.BlobCount = &v + } +} + +// WithConcurrency - number of operations to run concurrently during the test. defaults to 10.. +func (f SnapshotRepositoryAnalyze) WithConcurrency(v int) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.Concurrency = &v + } +} + +// WithDetailed - whether to return detailed results or a summary. defaults to 'false' so that only the summary is returned.. +func (f SnapshotRepositoryAnalyze) WithDetailed(v bool) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.Detailed = &v + } +} + +// WithEarlyReadNodeCount - number of nodes on which to perform an early read on a blob, i.e. before writing has completed. early reads are rare actions so the 'rare_action_probability' parameter is also relevant. defaults to 2.. +func (f SnapshotRepositoryAnalyze) WithEarlyReadNodeCount(v int) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.EarlyReadNodeCount = &v + } +} + +// WithMaxBlobSize - maximum size of a blob to create during the test, e.g '1gb' or '100mb'. defaults to '10mb'.. +func (f SnapshotRepositoryAnalyze) WithMaxBlobSize(v string) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.MaxBlobSize = v + } +} + +// WithMaxTotalDataSize - maximum total size of all blobs to create during the test, e.g '1tb' or '100gb'. defaults to '1gb'.. +func (f SnapshotRepositoryAnalyze) WithMaxTotalDataSize(v string) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.MaxTotalDataSize = v + } +} + +// WithRareActionProbability - probability of taking a rare action such as an early read or an overwrite. defaults to 0.02.. +func (f SnapshotRepositoryAnalyze) WithRareActionProbability(v int) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.RareActionProbability = &v + } +} + +// WithRarelyAbortWrites - whether to rarely abort writes before they complete. defaults to 'true'.. +func (f SnapshotRepositoryAnalyze) WithRarelyAbortWrites(v bool) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.RarelyAbortWrites = &v + } +} + +// WithReadNodeCount - number of nodes on which to read a blob after writing. defaults to 10.. +func (f SnapshotRepositoryAnalyze) WithReadNodeCount(v int) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.ReadNodeCount = &v + } +} + +// WithRegisterOperationCount - the minimum number of linearizable register operations to perform in total. defaults to 10.. +func (f SnapshotRepositoryAnalyze) WithRegisterOperationCount(v int) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.RegisterOperationCount = &v + } +} + +// WithSeed - seed for the random number generator used to create the test workload. defaults to a random value.. +func (f SnapshotRepositoryAnalyze) WithSeed(v int) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.Seed = &v + } +} + +// WithTimeout - explicit operation timeout. defaults to '30s'.. +func (f SnapshotRepositoryAnalyze) WithTimeout(v time.Duration) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SnapshotRepositoryAnalyze) WithPretty() func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SnapshotRepositoryAnalyze) WithHuman() func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SnapshotRepositoryAnalyze) WithErrorTrace() func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SnapshotRepositoryAnalyze) WithFilterPath(v ...string) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SnapshotRepositoryAnalyze) WithHeader(h map[string]string) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SnapshotRepositoryAnalyze) WithOpaqueID(s string) func(*SnapshotRepositoryAnalyzeRequest) { + return func(r *SnapshotRepositoryAnalyzeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.repository_verify_integrity.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.repository_verify_integrity.go new file mode 100644 index 000000000..815e82a86 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.repository_verify_integrity.go @@ -0,0 +1,325 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newSnapshotRepositoryVerifyIntegrityFunc(t Transport) SnapshotRepositoryVerifyIntegrity { + return func(repository string, o ...func(*SnapshotRepositoryVerifyIntegrityRequest)) (*Response, error) { + var r = SnapshotRepositoryVerifyIntegrityRequest{Repository: repository} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SnapshotRepositoryVerifyIntegrity verifies the integrity of the contents of a snapshot repository +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html. +type SnapshotRepositoryVerifyIntegrity func(repository string, o ...func(*SnapshotRepositoryVerifyIntegrityRequest)) (*Response, error) + +// SnapshotRepositoryVerifyIntegrityRequest configures the Snapshot Repository Verify Integrity API request. +type SnapshotRepositoryVerifyIntegrityRequest struct { + Repository string + + BlobThreadPoolConcurrency *int + IndexSnapshotVerificationConcurrency *int + IndexVerificationConcurrency *int + MaxBytesPerSec string + MaxFailedShardSnapshots *int + MetaThreadPoolConcurrency *int + SnapshotVerificationConcurrency *int + VerifyBlobContents *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SnapshotRepositoryVerifyIntegrityRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.repository_verify_integrity") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(r.Repository) + 1 + len("_verify_integrity")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + path.WriteString(r.Repository) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.Repository) + } + path.WriteString("/") + path.WriteString("_verify_integrity") + + params = make(map[string]string) + + if r.BlobThreadPoolConcurrency != nil { + params["blob_thread_pool_concurrency"] = strconv.FormatInt(int64(*r.BlobThreadPoolConcurrency), 10) + } + + if r.IndexSnapshotVerificationConcurrency != nil { + params["index_snapshot_verification_concurrency"] = strconv.FormatInt(int64(*r.IndexSnapshotVerificationConcurrency), 10) + } + + if r.IndexVerificationConcurrency != nil { + params["index_verification_concurrency"] = strconv.FormatInt(int64(*r.IndexVerificationConcurrency), 10) + } + + if r.MaxBytesPerSec != "" { + params["max_bytes_per_sec"] = r.MaxBytesPerSec + } + + if r.MaxFailedShardSnapshots != nil { + params["max_failed_shard_snapshots"] = strconv.FormatInt(int64(*r.MaxFailedShardSnapshots), 10) + } + + if r.MetaThreadPoolConcurrency != nil { + params["meta_thread_pool_concurrency"] = strconv.FormatInt(int64(*r.MetaThreadPoolConcurrency), 10) + } + + if r.SnapshotVerificationConcurrency != nil { + params["snapshot_verification_concurrency"] = strconv.FormatInt(int64(*r.SnapshotVerificationConcurrency), 10) + } + + if r.VerifyBlobContents != nil { + params["verify_blob_contents"] = strconv.FormatBool(*r.VerifyBlobContents) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.repository_verify_integrity") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.repository_verify_integrity") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SnapshotRepositoryVerifyIntegrity) WithContext(v context.Context) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.ctx = v + } +} + +// WithBlobThreadPoolConcurrency - number of threads to use for reading blob contents. +func (f SnapshotRepositoryVerifyIntegrity) WithBlobThreadPoolConcurrency(v int) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.BlobThreadPoolConcurrency = &v + } +} + +// WithIndexSnapshotVerificationConcurrency - number of snapshots to verify concurrently within each index. +func (f SnapshotRepositoryVerifyIntegrity) WithIndexSnapshotVerificationConcurrency(v int) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.IndexSnapshotVerificationConcurrency = &v + } +} + +// WithIndexVerificationConcurrency - number of indices to verify concurrently. +func (f SnapshotRepositoryVerifyIntegrity) WithIndexVerificationConcurrency(v int) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.IndexVerificationConcurrency = &v + } +} + +// WithMaxBytesPerSec - rate limit for individual blob verification. +func (f SnapshotRepositoryVerifyIntegrity) WithMaxBytesPerSec(v string) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.MaxBytesPerSec = v + } +} + +// WithMaxFailedShardSnapshots - maximum permitted number of failed shard snapshots. +func (f SnapshotRepositoryVerifyIntegrity) WithMaxFailedShardSnapshots(v int) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.MaxFailedShardSnapshots = &v + } +} + +// WithMetaThreadPoolConcurrency - number of threads to use for reading metadata. +func (f SnapshotRepositoryVerifyIntegrity) WithMetaThreadPoolConcurrency(v int) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.MetaThreadPoolConcurrency = &v + } +} + +// WithSnapshotVerificationConcurrency - number of snapshots to verify concurrently. +func (f SnapshotRepositoryVerifyIntegrity) WithSnapshotVerificationConcurrency(v int) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.SnapshotVerificationConcurrency = &v + } +} + +// WithVerifyBlobContents - whether to verify the contents of individual blobs. +func (f SnapshotRepositoryVerifyIntegrity) WithVerifyBlobContents(v bool) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.VerifyBlobContents = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SnapshotRepositoryVerifyIntegrity) WithPretty() func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SnapshotRepositoryVerifyIntegrity) WithHuman() func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SnapshotRepositoryVerifyIntegrity) WithErrorTrace() func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SnapshotRepositoryVerifyIntegrity) WithFilterPath(v ...string) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SnapshotRepositoryVerifyIntegrity) WithHeader(h map[string]string) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SnapshotRepositoryVerifyIntegrity) WithOpaqueID(s string) func(*SnapshotRepositoryVerifyIntegrityRequest) { + return func(r *SnapshotRepositoryVerifyIntegrityRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.restore.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.restore.go new file mode 100644 index 000000000..fef2df59d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.restore.go @@ -0,0 +1,275 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newSnapshotRestoreFunc(t Transport) SnapshotRestore { + return func(repository string, snapshot string, o ...func(*SnapshotRestoreRequest)) (*Response, error) { + var r = SnapshotRestoreRequest{Repository: repository, Snapshot: snapshot} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SnapshotRestore restores a snapshot. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html. +type SnapshotRestore func(repository string, snapshot string, o ...func(*SnapshotRestoreRequest)) (*Response, error) + +// SnapshotRestoreRequest configures the Snapshot Restore API request. +type SnapshotRestoreRequest struct { + Body io.Reader + + Repository string + Snapshot string + + MasterTimeout time.Duration + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SnapshotRestoreRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.restore") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(r.Repository) + 1 + len(r.Snapshot) + 1 + len("_restore")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + path.WriteString(r.Repository) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.Repository) + } + path.WriteString("/") + path.WriteString(r.Snapshot) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot", r.Snapshot) + } + path.WriteString("/") + path.WriteString("_restore") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.restore") + if reader := instrument.RecordRequestBody(ctx, "snapshot.restore", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.restore") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SnapshotRestore) WithContext(v context.Context) func(*SnapshotRestoreRequest) { + return func(r *SnapshotRestoreRequest) { + r.ctx = v + } +} + +// WithBody - Details of what to restore. +func (f SnapshotRestore) WithBody(v io.Reader) func(*SnapshotRestoreRequest) { + return func(r *SnapshotRestoreRequest) { + r.Body = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SnapshotRestore) WithMasterTimeout(v time.Duration) func(*SnapshotRestoreRequest) { + return func(r *SnapshotRestoreRequest) { + r.MasterTimeout = v + } +} + +// WithWaitForCompletion - should this request wait until the operation has completed before returning. +func (f SnapshotRestore) WithWaitForCompletion(v bool) func(*SnapshotRestoreRequest) { + return func(r *SnapshotRestoreRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SnapshotRestore) WithPretty() func(*SnapshotRestoreRequest) { + return func(r *SnapshotRestoreRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SnapshotRestore) WithHuman() func(*SnapshotRestoreRequest) { + return func(r *SnapshotRestoreRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SnapshotRestore) WithErrorTrace() func(*SnapshotRestoreRequest) { + return func(r *SnapshotRestoreRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SnapshotRestore) WithFilterPath(v ...string) func(*SnapshotRestoreRequest) { + return func(r *SnapshotRestoreRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SnapshotRestore) WithHeader(h map[string]string) func(*SnapshotRestoreRequest) { + return func(r *SnapshotRestoreRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SnapshotRestore) WithOpaqueID(s string) func(*SnapshotRestoreRequest) { + return func(r *SnapshotRestoreRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.status.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.status.go new file mode 100644 index 000000000..8e1ea3aa1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.status.go @@ -0,0 +1,276 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newSnapshotStatusFunc(t Transport) SnapshotStatus { + return func(o ...func(*SnapshotStatusRequest)) (*Response, error) { + var r = SnapshotStatusRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SnapshotStatus returns information about the status of a snapshot. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html. +type SnapshotStatus func(o ...func(*SnapshotStatusRequest)) (*Response, error) + +// SnapshotStatusRequest configures the Snapshot Status API request. +type SnapshotStatusRequest struct { + Repository string + Snapshot []string + + IgnoreUnavailable *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SnapshotStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(r.Repository) + 1 + len(strings.Join(r.Snapshot, ",")) + 1 + len("_status")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + if r.Repository != "" { + path.WriteString("/") + path.WriteString(r.Repository) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.Repository) + } + } + if len(r.Snapshot) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Snapshot, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot", strings.Join(r.Snapshot, ",")) + } + } + path.WriteString("/") + path.WriteString("_status") + + params = make(map[string]string) + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.status") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.status") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SnapshotStatus) WithContext(v context.Context) func(*SnapshotStatusRequest) { + return func(r *SnapshotStatusRequest) { + r.ctx = v + } +} + +// WithRepository - a repository name. +func (f SnapshotStatus) WithRepository(v string) func(*SnapshotStatusRequest) { + return func(r *SnapshotStatusRequest) { + r.Repository = v + } +} + +// WithSnapshot - a list of snapshot names. +func (f SnapshotStatus) WithSnapshot(v ...string) func(*SnapshotStatusRequest) { + return func(r *SnapshotStatusRequest) { + r.Snapshot = v + } +} + +// WithIgnoreUnavailable - whether to ignore unavailable snapshots, defaults to false which means a snapshotmissingexception is thrown. +func (f SnapshotStatus) WithIgnoreUnavailable(v bool) func(*SnapshotStatusRequest) { + return func(r *SnapshotStatusRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SnapshotStatus) WithMasterTimeout(v time.Duration) func(*SnapshotStatusRequest) { + return func(r *SnapshotStatusRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SnapshotStatus) WithPretty() func(*SnapshotStatusRequest) { + return func(r *SnapshotStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SnapshotStatus) WithHuman() func(*SnapshotStatusRequest) { + return func(r *SnapshotStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SnapshotStatus) WithErrorTrace() func(*SnapshotStatusRequest) { + return func(r *SnapshotStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SnapshotStatus) WithFilterPath(v ...string) func(*SnapshotStatusRequest) { + return func(r *SnapshotStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SnapshotStatus) WithHeader(h map[string]string) func(*SnapshotStatusRequest) { + return func(r *SnapshotStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SnapshotStatus) WithOpaqueID(s string) func(*SnapshotStatusRequest) { + return func(r *SnapshotStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.verify_repository.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.verify_repository.go new file mode 100644 index 000000000..cbe1dbdd4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.snapshot.verify_repository.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newSnapshotVerifyRepositoryFunc(t Transport) SnapshotVerifyRepository { + return func(repository string, o ...func(*SnapshotVerifyRepositoryRequest)) (*Response, error) { + var r = SnapshotVerifyRepositoryRequest{Repository: repository} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SnapshotVerifyRepository verifies a repository. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html. +type SnapshotVerifyRepository func(repository string, o ...func(*SnapshotVerifyRepositoryRequest)) (*Response, error) + +// SnapshotVerifyRepositoryRequest configures the Snapshot Verify Repository API request. +type SnapshotVerifyRepositoryRequest struct { + Repository string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SnapshotVerifyRepositoryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.verify_repository") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(r.Repository) + 1 + len("_verify")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + path.WriteString(r.Repository) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.Repository) + } + path.WriteString("/") + path.WriteString("_verify") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.verify_repository") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.verify_repository") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SnapshotVerifyRepository) WithContext(v context.Context) func(*SnapshotVerifyRepositoryRequest) { + return func(r *SnapshotVerifyRepositoryRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SnapshotVerifyRepository) WithMasterTimeout(v time.Duration) func(*SnapshotVerifyRepositoryRequest) { + return func(r *SnapshotVerifyRepositoryRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SnapshotVerifyRepository) WithTimeout(v time.Duration) func(*SnapshotVerifyRepositoryRequest) { + return func(r *SnapshotVerifyRepositoryRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SnapshotVerifyRepository) WithPretty() func(*SnapshotVerifyRepositoryRequest) { + return func(r *SnapshotVerifyRepositoryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SnapshotVerifyRepository) WithHuman() func(*SnapshotVerifyRepositoryRequest) { + return func(r *SnapshotVerifyRepositoryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SnapshotVerifyRepository) WithErrorTrace() func(*SnapshotVerifyRepositoryRequest) { + return func(r *SnapshotVerifyRepositoryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SnapshotVerifyRepository) WithFilterPath(v ...string) func(*SnapshotVerifyRepositoryRequest) { + return func(r *SnapshotVerifyRepositoryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SnapshotVerifyRepository) WithHeader(h map[string]string) func(*SnapshotVerifyRepositoryRequest) { + return func(r *SnapshotVerifyRepositoryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SnapshotVerifyRepository) WithOpaqueID(s string) func(*SnapshotVerifyRepositoryRequest) { + return func(r *SnapshotVerifyRepositoryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.streams.logs_disable.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.streams.logs_disable.go new file mode 100644 index 000000000..e7e91da47 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.streams.logs_disable.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newStreamsLogsDisableFunc(t Transport) StreamsLogsDisable { + return func(o ...func(*StreamsLogsDisableRequest)) (*Response, error) { + var r = StreamsLogsDisableRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// StreamsLogsDisable disable the Logs Streams feature for this cluster +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/streams-logs-disable.html. +type StreamsLogsDisable func(o ...func(*StreamsLogsDisableRequest)) (*Response, error) + +// StreamsLogsDisableRequest configures the Streams Logs Disable API request. +type StreamsLogsDisableRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r StreamsLogsDisableRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "streams.logs_disable") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_streams/logs/_disable")) + path.WriteString("http://") + path.WriteString("/_streams/logs/_disable") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "streams.logs_disable") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "streams.logs_disable") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f StreamsLogsDisable) WithContext(v context.Context) func(*StreamsLogsDisableRequest) { + return func(r *StreamsLogsDisableRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - period to wait for a connection to the master node. if no response is received before the timeout expires, the request fails and returns an error.. +func (f StreamsLogsDisable) WithMasterTimeout(v time.Duration) func(*StreamsLogsDisableRequest) { + return func(r *StreamsLogsDisableRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - period to wait for a response. if no response is received before the timeout expires, the request fails and returns an error.. +func (f StreamsLogsDisable) WithTimeout(v time.Duration) func(*StreamsLogsDisableRequest) { + return func(r *StreamsLogsDisableRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f StreamsLogsDisable) WithPretty() func(*StreamsLogsDisableRequest) { + return func(r *StreamsLogsDisableRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f StreamsLogsDisable) WithHuman() func(*StreamsLogsDisableRequest) { + return func(r *StreamsLogsDisableRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f StreamsLogsDisable) WithErrorTrace() func(*StreamsLogsDisableRequest) { + return func(r *StreamsLogsDisableRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f StreamsLogsDisable) WithFilterPath(v ...string) func(*StreamsLogsDisableRequest) { + return func(r *StreamsLogsDisableRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f StreamsLogsDisable) WithHeader(h map[string]string) func(*StreamsLogsDisableRequest) { + return func(r *StreamsLogsDisableRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f StreamsLogsDisable) WithOpaqueID(s string) func(*StreamsLogsDisableRequest) { + return func(r *StreamsLogsDisableRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.streams.logs_enable.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.streams.logs_enable.go new file mode 100644 index 000000000..b883b0ba3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.streams.logs_enable.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newStreamsLogsEnableFunc(t Transport) StreamsLogsEnable { + return func(o ...func(*StreamsLogsEnableRequest)) (*Response, error) { + var r = StreamsLogsEnableRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// StreamsLogsEnable enable the Logs Streams feature for this cluster +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/streams-logs-enable.html. +type StreamsLogsEnable func(o ...func(*StreamsLogsEnableRequest)) (*Response, error) + +// StreamsLogsEnableRequest configures the Streams Logs Enable API request. +type StreamsLogsEnableRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r StreamsLogsEnableRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "streams.logs_enable") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_streams/logs/_enable")) + path.WriteString("http://") + path.WriteString("/_streams/logs/_enable") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "streams.logs_enable") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "streams.logs_enable") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f StreamsLogsEnable) WithContext(v context.Context) func(*StreamsLogsEnableRequest) { + return func(r *StreamsLogsEnableRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - period to wait for a connection to the master node. if no response is received before the timeout expires, the request fails and returns an error.. +func (f StreamsLogsEnable) WithMasterTimeout(v time.Duration) func(*StreamsLogsEnableRequest) { + return func(r *StreamsLogsEnableRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - period to wait for a response. if no response is received before the timeout expires, the request fails and returns an error.. +func (f StreamsLogsEnable) WithTimeout(v time.Duration) func(*StreamsLogsEnableRequest) { + return func(r *StreamsLogsEnableRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f StreamsLogsEnable) WithPretty() func(*StreamsLogsEnableRequest) { + return func(r *StreamsLogsEnableRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f StreamsLogsEnable) WithHuman() func(*StreamsLogsEnableRequest) { + return func(r *StreamsLogsEnableRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f StreamsLogsEnable) WithErrorTrace() func(*StreamsLogsEnableRequest) { + return func(r *StreamsLogsEnableRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f StreamsLogsEnable) WithFilterPath(v ...string) func(*StreamsLogsEnableRequest) { + return func(r *StreamsLogsEnableRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f StreamsLogsEnable) WithHeader(h map[string]string) func(*StreamsLogsEnableRequest) { + return func(r *StreamsLogsEnableRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f StreamsLogsEnable) WithOpaqueID(s string) func(*StreamsLogsEnableRequest) { + return func(r *StreamsLogsEnableRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.streams.status.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.streams.status.go new file mode 100644 index 000000000..91ddf87a9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.streams.status.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newStreamsStatusFunc(t Transport) StreamsStatus { + return func(o ...func(*StreamsStatusRequest)) (*Response, error) { + var r = StreamsStatusRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// StreamsStatus return the current status of the streams feature for each streams type +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/streams-status.html. +type StreamsStatus func(o ...func(*StreamsStatusRequest)) (*Response, error) + +// StreamsStatusRequest configures the Streams Status API request. +type StreamsStatusRequest struct { + MaterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r StreamsStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "streams.status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_streams/status")) + path.WriteString("http://") + path.WriteString("/_streams/status") + + params = make(map[string]string) + + if r.MaterTimeout != 0 { + params["mater_timeout"] = formatDuration(r.MaterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "streams.status") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "streams.status") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f StreamsStatus) WithContext(v context.Context) func(*StreamsStatusRequest) { + return func(r *StreamsStatusRequest) { + r.ctx = v + } +} + +// WithMaterTimeout - period to wait for a response. if no response is received before the timeout expires, the request fails and returns an error.. +func (f StreamsStatus) WithMaterTimeout(v time.Duration) func(*StreamsStatusRequest) { + return func(r *StreamsStatusRequest) { + r.MaterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f StreamsStatus) WithPretty() func(*StreamsStatusRequest) { + return func(r *StreamsStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f StreamsStatus) WithHuman() func(*StreamsStatusRequest) { + return func(r *StreamsStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f StreamsStatus) WithErrorTrace() func(*StreamsStatusRequest) { + return func(r *StreamsStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f StreamsStatus) WithFilterPath(v ...string) func(*StreamsStatusRequest) { + return func(r *StreamsStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f StreamsStatus) WithHeader(h map[string]string) func(*StreamsStatusRequest) { + return func(r *StreamsStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f StreamsStatus) WithOpaqueID(s string) func(*StreamsStatusRequest) { + return func(r *StreamsStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.delete_synonym.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.delete_synonym.go new file mode 100644 index 000000000..9f4ba9ef8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.delete_synonym.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSynonymsDeleteSynonymFunc(t Transport) SynonymsDeleteSynonym { + return func(id string, o ...func(*SynonymsDeleteSynonymRequest)) (*Response, error) { + var r = SynonymsDeleteSynonymRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SynonymsDeleteSynonym deletes a synonym set +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonyms-set.html. +type SynonymsDeleteSynonym func(id string, o ...func(*SynonymsDeleteSynonymRequest)) (*Response, error) + +// SynonymsDeleteSynonymRequest configures the Synonyms Delete Synonym API request. +type SynonymsDeleteSynonymRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SynonymsDeleteSynonymRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.delete_synonym") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_synonyms") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "synonyms.delete_synonym") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "synonyms.delete_synonym") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SynonymsDeleteSynonym) WithContext(v context.Context) func(*SynonymsDeleteSynonymRequest) { + return func(r *SynonymsDeleteSynonymRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SynonymsDeleteSynonym) WithPretty() func(*SynonymsDeleteSynonymRequest) { + return func(r *SynonymsDeleteSynonymRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SynonymsDeleteSynonym) WithHuman() func(*SynonymsDeleteSynonymRequest) { + return func(r *SynonymsDeleteSynonymRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SynonymsDeleteSynonym) WithErrorTrace() func(*SynonymsDeleteSynonymRequest) { + return func(r *SynonymsDeleteSynonymRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SynonymsDeleteSynonym) WithFilterPath(v ...string) func(*SynonymsDeleteSynonymRequest) { + return func(r *SynonymsDeleteSynonymRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SynonymsDeleteSynonym) WithHeader(h map[string]string) func(*SynonymsDeleteSynonymRequest) { + return func(r *SynonymsDeleteSynonymRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SynonymsDeleteSynonym) WithOpaqueID(s string) func(*SynonymsDeleteSynonymRequest) { + return func(r *SynonymsDeleteSynonymRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.delete_synonym_rule.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.delete_synonym_rule.go new file mode 100644 index 000000000..b2c79cd9c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.delete_synonym_rule.go @@ -0,0 +1,243 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newSynonymsDeleteSynonymRuleFunc(t Transport) SynonymsDeleteSynonymRule { + return func(rule_id string, set_id string, o ...func(*SynonymsDeleteSynonymRuleRequest)) (*Response, error) { + var r = SynonymsDeleteSynonymRuleRequest{RuleID: rule_id, SetID: set_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SynonymsDeleteSynonymRule deletes a synonym rule in a synonym set +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonym-rule.html. +type SynonymsDeleteSynonymRule func(rule_id string, set_id string, o ...func(*SynonymsDeleteSynonymRuleRequest)) (*Response, error) + +// SynonymsDeleteSynonymRuleRequest configures the Synonyms Delete Synonym Rule API request. +type SynonymsDeleteSynonymRuleRequest struct { + RuleID string + SetID string + + Refresh *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SynonymsDeleteSynonymRuleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.delete_synonym_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_synonyms") + 1 + len(r.SetID) + 1 + len(r.RuleID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + path.WriteString(r.SetID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "set_id", r.SetID) + } + path.WriteString("/") + path.WriteString(r.RuleID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "rule_id", r.RuleID) + } + + params = make(map[string]string) + + if r.Refresh != nil { + params["refresh"] = strconv.FormatBool(*r.Refresh) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "synonyms.delete_synonym_rule") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "synonyms.delete_synonym_rule") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SynonymsDeleteSynonymRule) WithContext(v context.Context) func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + r.ctx = v + } +} + +// WithRefresh - refresh search analyzers to update synonyms. +func (f SynonymsDeleteSynonymRule) WithRefresh(v bool) func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + r.Refresh = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SynonymsDeleteSynonymRule) WithPretty() func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SynonymsDeleteSynonymRule) WithHuman() func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SynonymsDeleteSynonymRule) WithErrorTrace() func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SynonymsDeleteSynonymRule) WithFilterPath(v ...string) func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SynonymsDeleteSynonymRule) WithHeader(h map[string]string) func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SynonymsDeleteSynonymRule) WithOpaqueID(s string) func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.get_synonym.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.get_synonym.go new file mode 100644 index 000000000..e180aff55 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.get_synonym.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newSynonymsGetSynonymFunc(t Transport) SynonymsGetSynonym { + return func(id string, o ...func(*SynonymsGetSynonymRequest)) (*Response, error) { + var r = SynonymsGetSynonymRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SynonymsGetSynonym retrieves a synonym set +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonyms-set.html. +type SynonymsGetSynonym func(id string, o ...func(*SynonymsGetSynonymRequest)) (*Response, error) + +// SynonymsGetSynonymRequest configures the Synonyms Get Synonym API request. +type SynonymsGetSynonymRequest struct { + DocumentID string + + From *int + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SynonymsGetSynonymRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.get_synonym") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_synonyms") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "synonyms.get_synonym") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "synonyms.get_synonym") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SynonymsGetSynonym) WithContext(v context.Context) func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + r.ctx = v + } +} + +// WithFrom - starting offset. +func (f SynonymsGetSynonym) WithFrom(v int) func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + r.From = &v + } +} + +// WithSize - specifies a max number of results to get. +func (f SynonymsGetSynonym) WithSize(v int) func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SynonymsGetSynonym) WithPretty() func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SynonymsGetSynonym) WithHuman() func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SynonymsGetSynonym) WithErrorTrace() func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SynonymsGetSynonym) WithFilterPath(v ...string) func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SynonymsGetSynonym) WithHeader(h map[string]string) func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SynonymsGetSynonym) WithOpaqueID(s string) func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.get_synonym_rule.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.get_synonym_rule.go new file mode 100644 index 000000000..28807bc92 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.get_synonym_rule.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSynonymsGetSynonymRuleFunc(t Transport) SynonymsGetSynonymRule { + return func(rule_id string, set_id string, o ...func(*SynonymsGetSynonymRuleRequest)) (*Response, error) { + var r = SynonymsGetSynonymRuleRequest{RuleID: rule_id, SetID: set_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SynonymsGetSynonymRule retrieves a synonym rule from a synonym set +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonym-rule.html. +type SynonymsGetSynonymRule func(rule_id string, set_id string, o ...func(*SynonymsGetSynonymRuleRequest)) (*Response, error) + +// SynonymsGetSynonymRuleRequest configures the Synonyms Get Synonym Rule API request. +type SynonymsGetSynonymRuleRequest struct { + RuleID string + SetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SynonymsGetSynonymRuleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.get_synonym_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_synonyms") + 1 + len(r.SetID) + 1 + len(r.RuleID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + path.WriteString(r.SetID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "set_id", r.SetID) + } + path.WriteString("/") + path.WriteString(r.RuleID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "rule_id", r.RuleID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "synonyms.get_synonym_rule") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "synonyms.get_synonym_rule") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SynonymsGetSynonymRule) WithContext(v context.Context) func(*SynonymsGetSynonymRuleRequest) { + return func(r *SynonymsGetSynonymRuleRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SynonymsGetSynonymRule) WithPretty() func(*SynonymsGetSynonymRuleRequest) { + return func(r *SynonymsGetSynonymRuleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SynonymsGetSynonymRule) WithHuman() func(*SynonymsGetSynonymRuleRequest) { + return func(r *SynonymsGetSynonymRuleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SynonymsGetSynonymRule) WithErrorTrace() func(*SynonymsGetSynonymRuleRequest) { + return func(r *SynonymsGetSynonymRuleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SynonymsGetSynonymRule) WithFilterPath(v ...string) func(*SynonymsGetSynonymRuleRequest) { + return func(r *SynonymsGetSynonymRuleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SynonymsGetSynonymRule) WithHeader(h map[string]string) func(*SynonymsGetSynonymRuleRequest) { + return func(r *SynonymsGetSynonymRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SynonymsGetSynonymRule) WithOpaqueID(s string) func(*SynonymsGetSynonymRuleRequest) { + return func(r *SynonymsGetSynonymRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.get_synonyms_sets.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.get_synonyms_sets.go new file mode 100644 index 000000000..b90a15375 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.get_synonyms_sets.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newSynonymsGetSynonymsSetsFunc(t Transport) SynonymsGetSynonymsSets { + return func(o ...func(*SynonymsGetSynonymsSetsRequest)) (*Response, error) { + var r = SynonymsGetSynonymsSetsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SynonymsGetSynonymsSets retrieves a summary of all defined synonym sets +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/list-synonyms-sets.html. +type SynonymsGetSynonymsSets func(o ...func(*SynonymsGetSynonymsSetsRequest)) (*Response, error) + +// SynonymsGetSynonymsSetsRequest configures the Synonyms Get Synonyms Sets API request. +type SynonymsGetSynonymsSetsRequest struct { + From *int + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SynonymsGetSynonymsSetsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.get_synonyms_sets") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_synonyms")) + path.WriteString("http://") + path.WriteString("/_synonyms") + + params = make(map[string]string) + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "synonyms.get_synonyms_sets") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "synonyms.get_synonyms_sets") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SynonymsGetSynonymsSets) WithContext(v context.Context) func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + r.ctx = v + } +} + +// WithFrom - starting offset. +func (f SynonymsGetSynonymsSets) WithFrom(v int) func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + r.From = &v + } +} + +// WithSize - specifies a max number of results to get. +func (f SynonymsGetSynonymsSets) WithSize(v int) func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SynonymsGetSynonymsSets) WithPretty() func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SynonymsGetSynonymsSets) WithHuman() func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SynonymsGetSynonymsSets) WithErrorTrace() func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SynonymsGetSynonymsSets) WithFilterPath(v ...string) func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SynonymsGetSynonymsSets) WithHeader(h map[string]string) func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SynonymsGetSynonymsSets) WithOpaqueID(s string) func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.put_synonym.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.put_synonym.go new file mode 100644 index 000000000..00bf711b1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.put_synonym.go @@ -0,0 +1,247 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newSynonymsPutSynonymFunc(t Transport) SynonymsPutSynonym { + return func(id string, body io.Reader, o ...func(*SynonymsPutSynonymRequest)) (*Response, error) { + var r = SynonymsPutSynonymRequest{DocumentID: id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SynonymsPutSynonym creates or updates a synonyms set +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonyms-set.html. +type SynonymsPutSynonym func(id string, body io.Reader, o ...func(*SynonymsPutSynonymRequest)) (*Response, error) + +// SynonymsPutSynonymRequest configures the Synonyms Put Synonym API request. +type SynonymsPutSynonymRequest struct { + DocumentID string + + Body io.Reader + + Refresh *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SynonymsPutSynonymRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.put_synonym") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_synonyms") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Refresh != nil { + params["refresh"] = strconv.FormatBool(*r.Refresh) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "synonyms.put_synonym") + if reader := instrument.RecordRequestBody(ctx, "synonyms.put_synonym", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "synonyms.put_synonym") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SynonymsPutSynonym) WithContext(v context.Context) func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + r.ctx = v + } +} + +// WithRefresh - refresh search analyzers to update synonyms. +func (f SynonymsPutSynonym) WithRefresh(v bool) func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + r.Refresh = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SynonymsPutSynonym) WithPretty() func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SynonymsPutSynonym) WithHuman() func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SynonymsPutSynonym) WithErrorTrace() func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SynonymsPutSynonym) WithFilterPath(v ...string) func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SynonymsPutSynonym) WithHeader(h map[string]string) func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SynonymsPutSynonym) WithOpaqueID(s string) func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.put_synonym_rule.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.put_synonym_rule.go new file mode 100644 index 000000000..eb28efc52 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.synonyms.put_synonym_rule.go @@ -0,0 +1,253 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newSynonymsPutSynonymRuleFunc(t Transport) SynonymsPutSynonymRule { + return func(body io.Reader, rule_id string, set_id string, o ...func(*SynonymsPutSynonymRuleRequest)) (*Response, error) { + var r = SynonymsPutSynonymRuleRequest{Body: body, RuleID: rule_id, SetID: set_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SynonymsPutSynonymRule creates or updates a synonym rule in a synonym set +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonym-rule.html. +type SynonymsPutSynonymRule func(body io.Reader, rule_id string, set_id string, o ...func(*SynonymsPutSynonymRuleRequest)) (*Response, error) + +// SynonymsPutSynonymRuleRequest configures the Synonyms Put Synonym Rule API request. +type SynonymsPutSynonymRuleRequest struct { + Body io.Reader + + RuleID string + SetID string + + Refresh *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SynonymsPutSynonymRuleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.put_synonym_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_synonyms") + 1 + len(r.SetID) + 1 + len(r.RuleID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + path.WriteString(r.SetID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "set_id", r.SetID) + } + path.WriteString("/") + path.WriteString(r.RuleID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "rule_id", r.RuleID) + } + + params = make(map[string]string) + + if r.Refresh != nil { + params["refresh"] = strconv.FormatBool(*r.Refresh) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "synonyms.put_synonym_rule") + if reader := instrument.RecordRequestBody(ctx, "synonyms.put_synonym_rule", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "synonyms.put_synonym_rule") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SynonymsPutSynonymRule) WithContext(v context.Context) func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + r.ctx = v + } +} + +// WithRefresh - refresh search analyzers to update synonyms. +func (f SynonymsPutSynonymRule) WithRefresh(v bool) func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + r.Refresh = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SynonymsPutSynonymRule) WithPretty() func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SynonymsPutSynonymRule) WithHuman() func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SynonymsPutSynonymRule) WithErrorTrace() func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SynonymsPutSynonymRule) WithFilterPath(v ...string) func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SynonymsPutSynonymRule) WithHeader(h map[string]string) func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SynonymsPutSynonymRule) WithOpaqueID(s string) func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.tasks.cancel.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.tasks.cancel.go new file mode 100644 index 000000000..ab08e5456 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.tasks.cancel.go @@ -0,0 +1,286 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newTasksCancelFunc(t Transport) TasksCancel { + return func(o ...func(*TasksCancelRequest)) (*Response, error) { + var r = TasksCancelRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TasksCancel cancels a task, if it can be cancelled through an API. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html. +type TasksCancel func(o ...func(*TasksCancelRequest)) (*Response, error) + +// TasksCancelRequest configures the Tasks Cancel API request. +type TasksCancelRequest struct { + TaskID string + + Actions []string + Nodes []string + ParentTaskID string + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TasksCancelRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "tasks.cancel") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_tasks") + 1 + len(r.TaskID) + 1 + len("_cancel")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_tasks") + if r.TaskID != "" { + path.WriteString("/") + path.WriteString(r.TaskID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_id", r.TaskID) + } + } + path.WriteString("/") + path.WriteString("_cancel") + + params = make(map[string]string) + + if len(r.Actions) > 0 { + params["actions"] = strings.Join(r.Actions, ",") + } + + if len(r.Nodes) > 0 { + params["nodes"] = strings.Join(r.Nodes, ",") + } + + if r.ParentTaskID != "" { + params["parent_task_id"] = r.ParentTaskID + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "tasks.cancel") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "tasks.cancel") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TasksCancel) WithContext(v context.Context) func(*TasksCancelRequest) { + return func(r *TasksCancelRequest) { + r.ctx = v + } +} + +// WithTaskID - cancel the task with specified task ID (node_id:task_number). +func (f TasksCancel) WithTaskID(v string) func(*TasksCancelRequest) { + return func(r *TasksCancelRequest) { + r.TaskID = v + } +} + +// WithActions - a list of actions that should be cancelled. leave empty to cancel all.. +func (f TasksCancel) WithActions(v ...string) func(*TasksCancelRequest) { + return func(r *TasksCancelRequest) { + r.Actions = v + } +} + +// WithNodes - a list of node ids or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. +func (f TasksCancel) WithNodes(v ...string) func(*TasksCancelRequest) { + return func(r *TasksCancelRequest) { + r.Nodes = v + } +} + +// WithParentTaskID - cancel tasks with specified parent task ID (node_id:task_number). set to -1 to cancel all.. +func (f TasksCancel) WithParentTaskID(v string) func(*TasksCancelRequest) { + return func(r *TasksCancelRequest) { + r.ParentTaskID = v + } +} + +// WithWaitForCompletion - should the request block until the cancellation of the task and its descendant tasks is completed. defaults to false. +func (f TasksCancel) WithWaitForCompletion(v bool) func(*TasksCancelRequest) { + return func(r *TasksCancelRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TasksCancel) WithPretty() func(*TasksCancelRequest) { + return func(r *TasksCancelRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TasksCancel) WithHuman() func(*TasksCancelRequest) { + return func(r *TasksCancelRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TasksCancel) WithErrorTrace() func(*TasksCancelRequest) { + return func(r *TasksCancelRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TasksCancel) WithFilterPath(v ...string) func(*TasksCancelRequest) { + return func(r *TasksCancelRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TasksCancel) WithHeader(h map[string]string) func(*TasksCancelRequest) { + return func(r *TasksCancelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TasksCancel) WithOpaqueID(s string) func(*TasksCancelRequest) { + return func(r *TasksCancelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.tasks.get.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.tasks.get.go new file mode 100644 index 000000000..f75bb13a3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.tasks.get.go @@ -0,0 +1,252 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newTasksGetFunc(t Transport) TasksGet { + return func(task_id string, o ...func(*TasksGetRequest)) (*Response, error) { + var r = TasksGetRequest{TaskID: task_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TasksGet returns information about a task. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html. +type TasksGet func(task_id string, o ...func(*TasksGetRequest)) (*Response, error) + +// TasksGetRequest configures the Tasks Get API request. +type TasksGetRequest struct { + TaskID string + + Timeout time.Duration + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TasksGetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "tasks.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_tasks") + 1 + len(r.TaskID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_tasks") + path.WriteString("/") + path.WriteString(r.TaskID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_id", r.TaskID) + } + + params = make(map[string]string) + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "tasks.get") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "tasks.get") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TasksGet) WithContext(v context.Context) func(*TasksGetRequest) { + return func(r *TasksGetRequest) { + r.ctx = v + } +} + +// WithTimeout - explicit operation timeout. +func (f TasksGet) WithTimeout(v time.Duration) func(*TasksGetRequest) { + return func(r *TasksGetRequest) { + r.Timeout = v + } +} + +// WithWaitForCompletion - wait for the matching tasks to complete (default: false). +func (f TasksGet) WithWaitForCompletion(v bool) func(*TasksGetRequest) { + return func(r *TasksGetRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TasksGet) WithPretty() func(*TasksGetRequest) { + return func(r *TasksGetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TasksGet) WithHuman() func(*TasksGetRequest) { + return func(r *TasksGetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TasksGet) WithErrorTrace() func(*TasksGetRequest) { + return func(r *TasksGetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TasksGet) WithFilterPath(v ...string) func(*TasksGetRequest) { + return func(r *TasksGetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TasksGet) WithHeader(h map[string]string) func(*TasksGetRequest) { + return func(r *TasksGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TasksGet) WithOpaqueID(s string) func(*TasksGetRequest) { + return func(r *TasksGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.tasks.list.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.tasks.list.go new file mode 100644 index 000000000..87d367586 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.tasks.list.go @@ -0,0 +1,304 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newTasksListFunc(t Transport) TasksList { + return func(o ...func(*TasksListRequest)) (*Response, error) { + var r = TasksListRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TasksList returns a list of tasks. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html. +type TasksList func(o ...func(*TasksListRequest)) (*Response, error) + +// TasksListRequest configures the Tasks List API request. +type TasksListRequest struct { + Actions []string + Detailed *bool + GroupBy string + Nodes []string + ParentTaskID string + Timeout time.Duration + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TasksListRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "tasks.list") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_tasks")) + path.WriteString("http://") + path.WriteString("/_tasks") + + params = make(map[string]string) + + if len(r.Actions) > 0 { + params["actions"] = strings.Join(r.Actions, ",") + } + + if r.Detailed != nil { + params["detailed"] = strconv.FormatBool(*r.Detailed) + } + + if r.GroupBy != "" { + params["group_by"] = r.GroupBy + } + + if len(r.Nodes) > 0 { + params["nodes"] = strings.Join(r.Nodes, ",") + } + + if r.ParentTaskID != "" { + params["parent_task_id"] = r.ParentTaskID + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "tasks.list") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "tasks.list") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TasksList) WithContext(v context.Context) func(*TasksListRequest) { + return func(r *TasksListRequest) { + r.ctx = v + } +} + +// WithActions - a list of actions that should be returned. leave empty to return all.. +func (f TasksList) WithActions(v ...string) func(*TasksListRequest) { + return func(r *TasksListRequest) { + r.Actions = v + } +} + +// WithDetailed - return detailed task information (default: false). +func (f TasksList) WithDetailed(v bool) func(*TasksListRequest) { + return func(r *TasksListRequest) { + r.Detailed = &v + } +} + +// WithGroupBy - group tasks by nodes or parent/child relationships. +func (f TasksList) WithGroupBy(v string) func(*TasksListRequest) { + return func(r *TasksListRequest) { + r.GroupBy = v + } +} + +// WithNodes - a list of node ids or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. +func (f TasksList) WithNodes(v ...string) func(*TasksListRequest) { + return func(r *TasksListRequest) { + r.Nodes = v + } +} + +// WithParentTaskID - return tasks with specified parent task ID (node_id:task_number). set to -1 to return all.. +func (f TasksList) WithParentTaskID(v string) func(*TasksListRequest) { + return func(r *TasksListRequest) { + r.ParentTaskID = v + } +} + +// WithTimeout - explicit operation timeout. +func (f TasksList) WithTimeout(v time.Duration) func(*TasksListRequest) { + return func(r *TasksListRequest) { + r.Timeout = v + } +} + +// WithWaitForCompletion - wait for the matching tasks to complete (default: false). +func (f TasksList) WithWaitForCompletion(v bool) func(*TasksListRequest) { + return func(r *TasksListRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TasksList) WithPretty() func(*TasksListRequest) { + return func(r *TasksListRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TasksList) WithHuman() func(*TasksListRequest) { + return func(r *TasksListRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TasksList) WithErrorTrace() func(*TasksListRequest) { + return func(r *TasksListRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TasksList) WithFilterPath(v ...string) func(*TasksListRequest) { + return func(r *TasksListRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TasksList) WithHeader(h map[string]string) func(*TasksListRequest) { + return func(r *TasksListRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TasksList) WithOpaqueID(s string) func(*TasksListRequest) { + return func(r *TasksListRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.terms_enum.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.terms_enum.go new file mode 100644 index 000000000..b3c9ebc87 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.terms_enum.go @@ -0,0 +1,245 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "io" + "net/http" + "strings" +) + +func newTermsEnumFunc(t Transport) TermsEnum { + return func(index []string, o ...func(*TermsEnumRequest)) (*Response, error) { + var r = TermsEnumRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TermsEnum the terms enum API can be used to discover terms in the index that begin with the provided string. It is designed for low-latency look-ups used in auto-complete scenarios. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/search-terms-enum.html. +type TermsEnum func(index []string, o ...func(*TermsEnumRequest)) (*Response, error) + +// TermsEnumRequest configures the Terms Enum API request. +type TermsEnumRequest struct { + Index []string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TermsEnumRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "terms_enum") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_terms_enum")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_terms_enum") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "terms_enum") + if reader := instrument.RecordRequestBody(ctx, "terms_enum", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "terms_enum") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TermsEnum) WithContext(v context.Context) func(*TermsEnumRequest) { + return func(r *TermsEnumRequest) { + r.ctx = v + } +} + +// WithBody - field name, string which is the prefix expected in matching terms, timeout and size for max number of results. +func (f TermsEnum) WithBody(v io.Reader) func(*TermsEnumRequest) { + return func(r *TermsEnumRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TermsEnum) WithPretty() func(*TermsEnumRequest) { + return func(r *TermsEnumRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TermsEnum) WithHuman() func(*TermsEnumRequest) { + return func(r *TermsEnumRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TermsEnum) WithErrorTrace() func(*TermsEnumRequest) { + return func(r *TermsEnumRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TermsEnum) WithFilterPath(v ...string) func(*TermsEnumRequest) { + return func(r *TermsEnumRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TermsEnum) WithHeader(h map[string]string) func(*TermsEnumRequest) { + return func(r *TermsEnumRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TermsEnum) WithOpaqueID(s string) func(*TermsEnumRequest) { + return func(r *TermsEnumRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.termvectors.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.termvectors.go new file mode 100644 index 000000000..0bf926319 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.termvectors.go @@ -0,0 +1,389 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newTermvectorsFunc(t Transport) Termvectors { + return func(index string, o ...func(*TermvectorsRequest)) (*Response, error) { + var r = TermvectorsRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Termvectors returns information and statistics about terms in the fields of a particular document. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-termvectors.html. +type Termvectors func(index string, o ...func(*TermvectorsRequest)) (*Response, error) + +// TermvectorsRequest configures the Termvectors API request. +type TermvectorsRequest struct { + Index string + DocumentID string + + Body io.Reader + + Fields []string + FieldStatistics *bool + Offsets *bool + Payloads *bool + Positions *bool + Preference string + Realtime *bool + Routing string + TermStatistics *bool + Version *int + VersionType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TermvectorsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "termvectors") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_termvectors") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_termvectors") + if r.DocumentID != "" { + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + } + + params = make(map[string]string) + + if len(r.Fields) > 0 { + params["fields"] = strings.Join(r.Fields, ",") + } + + if r.FieldStatistics != nil { + params["field_statistics"] = strconv.FormatBool(*r.FieldStatistics) + } + + if r.Offsets != nil { + params["offsets"] = strconv.FormatBool(*r.Offsets) + } + + if r.Payloads != nil { + params["payloads"] = strconv.FormatBool(*r.Payloads) + } + + if r.Positions != nil { + params["positions"] = strconv.FormatBool(*r.Positions) + } + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Realtime != nil { + params["realtime"] = strconv.FormatBool(*r.Realtime) + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if r.TermStatistics != nil { + params["term_statistics"] = strconv.FormatBool(*r.TermStatistics) + } + + if r.Version != nil { + params["version"] = strconv.FormatInt(int64(*r.Version), 10) + } + + if r.VersionType != "" { + params["version_type"] = r.VersionType + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "termvectors") + if reader := instrument.RecordRequestBody(ctx, "termvectors", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "termvectors") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Termvectors) WithContext(v context.Context) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.ctx = v + } +} + +// WithBody - Define parameters and or supply a document to get termvectors for. See documentation.. +func (f Termvectors) WithBody(v io.Reader) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.Body = v + } +} + +// WithDocumentID - the ID of the document, when not specified a doc param should be supplied.. +func (f Termvectors) WithDocumentID(v string) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.DocumentID = v + } +} + +// WithFields - a list of fields to return.. +func (f Termvectors) WithFields(v ...string) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.Fields = v + } +} + +// WithFieldStatistics - specifies if document count, sum of document frequencies and sum of total term frequencies should be returned.. +func (f Termvectors) WithFieldStatistics(v bool) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.FieldStatistics = &v + } +} + +// WithOffsets - specifies if term offsets should be returned.. +func (f Termvectors) WithOffsets(v bool) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.Offsets = &v + } +} + +// WithPayloads - specifies if term payloads should be returned.. +func (f Termvectors) WithPayloads(v bool) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.Payloads = &v + } +} + +// WithPositions - specifies if term positions should be returned.. +func (f Termvectors) WithPositions(v bool) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.Positions = &v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random).. +func (f Termvectors) WithPreference(v string) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.Preference = v + } +} + +// WithRealtime - specifies if request is real-time as opposed to near-real-time (default: true).. +func (f Termvectors) WithRealtime(v bool) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.Realtime = &v + } +} + +// WithRouting - specific routing value.. +func (f Termvectors) WithRouting(v string) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.Routing = v + } +} + +// WithTermStatistics - specifies if total term frequency and document frequency should be returned.. +func (f Termvectors) WithTermStatistics(v bool) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.TermStatistics = &v + } +} + +// WithVersion - explicit version number for concurrency control. +func (f Termvectors) WithVersion(v int) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.Version = &v + } +} + +// WithVersionType - specific version type. +func (f Termvectors) WithVersionType(v string) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.VersionType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Termvectors) WithPretty() func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Termvectors) WithHuman() func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Termvectors) WithErrorTrace() func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Termvectors) WithFilterPath(v ...string) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Termvectors) WithHeader(h map[string]string) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Termvectors) WithOpaqueID(s string) func(*TermvectorsRequest) { + return func(r *TermvectorsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.update.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.update.go new file mode 100644 index 000000000..3adf1df18 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.update.go @@ -0,0 +1,398 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newUpdateFunc(t Transport) Update { + return func(index string, id string, body io.Reader, o ...func(*UpdateRequest)) (*Response, error) { + var r = UpdateRequest{Index: index, DocumentID: id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// Update updates a document with a script or partial document. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html. +type Update func(index string, id string, body io.Reader, o ...func(*UpdateRequest)) (*Response, error) + +// UpdateRequest configures the Update API request. +type UpdateRequest struct { + Index string + DocumentID string + + Body io.Reader + + IfPrimaryTerm *int + IfSeqNo *int + IncludeSourceOnError *bool + Lang string + Refresh string + RequireAlias *bool + RetryOnConflict *int + Routing string + Source []string + SourceExcludes []string + SourceIncludes []string + Timeout time.Duration + WaitForActiveShards string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r UpdateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "update") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_update") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_update") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.IfPrimaryTerm != nil { + params["if_primary_term"] = strconv.FormatInt(int64(*r.IfPrimaryTerm), 10) + } + + if r.IfSeqNo != nil { + params["if_seq_no"] = strconv.FormatInt(int64(*r.IfSeqNo), 10) + } + + if r.IncludeSourceOnError != nil { + params["include_source_on_error"] = strconv.FormatBool(*r.IncludeSourceOnError) + } + + if r.Lang != "" { + params["lang"] = r.Lang + } + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.RequireAlias != nil { + params["require_alias"] = strconv.FormatBool(*r.RequireAlias) + } + + if r.RetryOnConflict != nil { + params["retry_on_conflict"] = strconv.FormatInt(int64(*r.RetryOnConflict), 10) + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if len(r.Source) > 0 { + params["_source"] = strings.Join(r.Source, ",") + } + + if len(r.SourceExcludes) > 0 { + params["_source_excludes"] = strings.Join(r.SourceExcludes, ",") + } + + if len(r.SourceIncludes) > 0 { + params["_source_includes"] = strings.Join(r.SourceIncludes, ",") + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "update") + if reader := instrument.RecordRequestBody(ctx, "update", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "update") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f Update) WithContext(v context.Context) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.ctx = v + } +} + +// WithIfPrimaryTerm - only perform the update operation if the last operation that has changed the document has the specified primary term. +func (f Update) WithIfPrimaryTerm(v int) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.IfPrimaryTerm = &v + } +} + +// WithIfSeqNo - only perform the update operation if the last operation that has changed the document has the specified sequence number. +func (f Update) WithIfSeqNo(v int) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.IfSeqNo = &v + } +} + +// WithIncludeSourceOnError - true or false if to include the document source in the error message in case of parsing errors. defaults to true.. +func (f Update) WithIncludeSourceOnError(v bool) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.IncludeSourceOnError = &v + } +} + +// WithLang - the script language (default: painless). +func (f Update) WithLang(v string) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.Lang = v + } +} + +// WithRefresh - if `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes.. +func (f Update) WithRefresh(v string) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.Refresh = v + } +} + +// WithRequireAlias - when true, requires destination is an alias. default is false. +func (f Update) WithRequireAlias(v bool) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.RequireAlias = &v + } +} + +// WithRetryOnConflict - specify how many times should the operation be retried when a conflict occurs (default: 0). +func (f Update) WithRetryOnConflict(v int) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.RetryOnConflict = &v + } +} + +// WithRouting - specific routing value. +func (f Update) WithRouting(v string) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.Routing = v + } +} + +// WithSource - true or false to return the _source field or not, or a list of fields to return. +func (f Update) WithSource(v ...string) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.Source = v + } +} + +// WithSourceExcludes - a list of fields to exclude from the returned _source field. +func (f Update) WithSourceExcludes(v ...string) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.SourceExcludes = v + } +} + +// WithSourceIncludes - a list of fields to extract and return from the _source field. +func (f Update) WithSourceIncludes(v ...string) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.SourceIncludes = v + } +} + +// WithTimeout - explicit operation timeout. +func (f Update) WithTimeout(v time.Duration) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.Timeout = v + } +} + +// WithWaitForActiveShards - sets the number of shard copies that must be active before proceeding with the update operation. defaults to 1, meaning the primary shard only. set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1). +func (f Update) WithWaitForActiveShards(v string) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.WaitForActiveShards = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f Update) WithPretty() func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f Update) WithHuman() func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f Update) WithErrorTrace() func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f Update) WithFilterPath(v ...string) func(*UpdateRequest) { + return func(r *UpdateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f Update) WithHeader(h map[string]string) func(*UpdateRequest) { + return func(r *UpdateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f Update) WithOpaqueID(s string) func(*UpdateRequest) { + return func(r *UpdateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.update_by_query.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.update_by_query.go new file mode 100644 index 000000000..c61b42c8a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.update_by_query.go @@ -0,0 +1,622 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newUpdateByQueryFunc(t Transport) UpdateByQuery { + return func(index []string, o ...func(*UpdateByQueryRequest)) (*Response, error) { + var r = UpdateByQueryRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// UpdateByQuery performs an update on every document in the index without changing the source, +// for example to pick up a mapping change. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html. +type UpdateByQuery func(index []string, o ...func(*UpdateByQueryRequest)) (*Response, error) + +// UpdateByQueryRequest configures the Update By Query API request. +type UpdateByQueryRequest struct { + Index []string + + Body io.Reader + + AllowNoIndices *bool + Analyzer string + AnalyzeWildcard *bool + Conflicts string + DefaultOperator string + Df string + ExpandWildcards string + From *int + IgnoreUnavailable *bool + Lenient *bool + MaxDocs *int + Pipeline string + Preference string + Query string + Refresh *bool + RequestCache *bool + RequestsPerSecond *int + Routing []string + Scroll time.Duration + ScrollSize *int + SearchTimeout time.Duration + SearchType string + Slices interface{} + Sort []string + Stats []string + TerminateAfter *int + Timeout time.Duration + Version *bool + VersionType *bool + WaitForActiveShards string + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r UpdateByQueryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "update_by_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_update_by_query")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_update_by_query") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.Analyzer != "" { + params["analyzer"] = r.Analyzer + } + + if r.AnalyzeWildcard != nil { + params["analyze_wildcard"] = strconv.FormatBool(*r.AnalyzeWildcard) + } + + if r.Conflicts != "" { + params["conflicts"] = r.Conflicts + } + + if r.DefaultOperator != "" { + params["default_operator"] = r.DefaultOperator + } + + if r.Df != "" { + params["df"] = r.Df + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Lenient != nil { + params["lenient"] = strconv.FormatBool(*r.Lenient) + } + + if r.MaxDocs != nil { + params["max_docs"] = strconv.FormatInt(int64(*r.MaxDocs), 10) + } + + if r.Pipeline != "" { + params["pipeline"] = r.Pipeline + } + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Query != "" { + params["q"] = r.Query + } + + if r.Refresh != nil { + params["refresh"] = strconv.FormatBool(*r.Refresh) + } + + if r.RequestCache != nil { + params["request_cache"] = strconv.FormatBool(*r.RequestCache) + } + + if r.RequestsPerSecond != nil { + params["requests_per_second"] = strconv.FormatInt(int64(*r.RequestsPerSecond), 10) + } + + if len(r.Routing) > 0 { + params["routing"] = strings.Join(r.Routing, ",") + } + + if r.Scroll != 0 { + params["scroll"] = formatDuration(r.Scroll) + } + + if r.ScrollSize != nil { + params["scroll_size"] = strconv.FormatInt(int64(*r.ScrollSize), 10) + } + + if r.SearchTimeout != 0 { + params["search_timeout"] = formatDuration(r.SearchTimeout) + } + + if r.SearchType != "" { + params["search_type"] = r.SearchType + } + + if r.Slices != nil { + params["slices"] = fmt.Sprintf("%v", r.Slices) + } + + if len(r.Sort) > 0 { + params["sort"] = strings.Join(r.Sort, ",") + } + + if len(r.Stats) > 0 { + params["stats"] = strings.Join(r.Stats, ",") + } + + if r.TerminateAfter != nil { + params["terminate_after"] = strconv.FormatInt(int64(*r.TerminateAfter), 10) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Version != nil { + params["version"] = strconv.FormatBool(*r.Version) + } + + if r.VersionType != nil { + params["version_type"] = strconv.FormatBool(*r.VersionType) + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "update_by_query") + if reader := instrument.RecordRequestBody(ctx, "update_by_query", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "update_by_query") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f UpdateByQuery) WithContext(v context.Context) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.ctx = v + } +} + +// WithBody - The search definition using the Query DSL. +func (f UpdateByQuery) WithBody(v io.Reader) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Body = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f UpdateByQuery) WithAllowNoIndices(v bool) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.AllowNoIndices = &v + } +} + +// WithAnalyzer - the analyzer to use for the query string. +func (f UpdateByQuery) WithAnalyzer(v string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Analyzer = v + } +} + +// WithAnalyzeWildcard - specify whether wildcard and prefix queries should be analyzed (default: false). +func (f UpdateByQuery) WithAnalyzeWildcard(v bool) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.AnalyzeWildcard = &v + } +} + +// WithConflicts - what to do when the update by query hits version conflicts?. +func (f UpdateByQuery) WithConflicts(v string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Conflicts = v + } +} + +// WithDefaultOperator - the default operator for query string query (and or or). +func (f UpdateByQuery) WithDefaultOperator(v string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.DefaultOperator = v + } +} + +// WithDf - the field to use as default where no field prefix is given in the query string. +func (f UpdateByQuery) WithDf(v string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Df = v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f UpdateByQuery) WithExpandWildcards(v string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.ExpandWildcards = v + } +} + +// WithFrom - starting offset (default: 0). +func (f UpdateByQuery) WithFrom(v int) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.From = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f UpdateByQuery) WithIgnoreUnavailable(v bool) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithLenient - specify whether format-based query failures (such as providing text to a numeric field) should be ignored. +func (f UpdateByQuery) WithLenient(v bool) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Lenient = &v + } +} + +// WithMaxDocs - maximum number of documents to process (default: all documents). +func (f UpdateByQuery) WithMaxDocs(v int) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.MaxDocs = &v + } +} + +// WithPipeline - ingest pipeline to set on index requests made by this action. (default: none). +func (f UpdateByQuery) WithPipeline(v string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Pipeline = v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random). +func (f UpdateByQuery) WithPreference(v string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Preference = v + } +} + +// WithQuery - query in the lucene query string syntax. +func (f UpdateByQuery) WithQuery(v string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Query = v + } +} + +// WithRefresh - should the affected indexes be refreshed?. +func (f UpdateByQuery) WithRefresh(v bool) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Refresh = &v + } +} + +// WithRequestCache - specify if request cache should be used for this request or not, defaults to index level setting. +func (f UpdateByQuery) WithRequestCache(v bool) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.RequestCache = &v + } +} + +// WithRequestsPerSecond - the throttle to set on this request in sub-requests per second. -1 means no throttle.. +func (f UpdateByQuery) WithRequestsPerSecond(v int) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.RequestsPerSecond = &v + } +} + +// WithRouting - a list of specific routing values. +func (f UpdateByQuery) WithRouting(v ...string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Routing = v + } +} + +// WithScroll - specify how long a consistent view of the index should be maintained for scrolled search. +func (f UpdateByQuery) WithScroll(v time.Duration) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Scroll = v + } +} + +// WithScrollSize - size on the scroll request powering the update by query. +func (f UpdateByQuery) WithScrollSize(v int) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.ScrollSize = &v + } +} + +// WithSearchTimeout - explicit timeout for each search request. defaults to no timeout.. +func (f UpdateByQuery) WithSearchTimeout(v time.Duration) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.SearchTimeout = v + } +} + +// WithSearchType - search operation type. +func (f UpdateByQuery) WithSearchType(v string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.SearchType = v + } +} + +// WithSlices - the number of slices this task should be divided into. defaults to 1, meaning the task isn't sliced into subtasks. can be set to `auto`.. +func (f UpdateByQuery) WithSlices(v interface{}) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Slices = v + } +} + +// WithSort - a list of : pairs. +func (f UpdateByQuery) WithSort(v ...string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Sort = v + } +} + +// WithStats - specific 'tag' of the request for logging and statistical purposes. +func (f UpdateByQuery) WithStats(v ...string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Stats = v + } +} + +// WithTerminateAfter - the maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early.. +func (f UpdateByQuery) WithTerminateAfter(v int) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.TerminateAfter = &v + } +} + +// WithTimeout - time each individual bulk request should wait for shards that are unavailable.. +func (f UpdateByQuery) WithTimeout(v time.Duration) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Timeout = v + } +} + +// WithVersion - specify whether to return document version as part of a hit. +func (f UpdateByQuery) WithVersion(v bool) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Version = &v + } +} + +// WithVersionType - should the document increment the version number (internal) on hit or not (reindex). +func (f UpdateByQuery) WithVersionType(v bool) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.VersionType = &v + } +} + +// WithWaitForActiveShards - sets the number of shard copies that must be active before proceeding with the update by query operation. defaults to 1, meaning the primary shard only. set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1). +func (f UpdateByQuery) WithWaitForActiveShards(v string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.WaitForActiveShards = v + } +} + +// WithWaitForCompletion - should the request should block until the update by query operation is complete.. +func (f UpdateByQuery) WithWaitForCompletion(v bool) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f UpdateByQuery) WithPretty() func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f UpdateByQuery) WithHuman() func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f UpdateByQuery) WithErrorTrace() func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f UpdateByQuery) WithFilterPath(v ...string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f UpdateByQuery) WithHeader(h map[string]string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f UpdateByQuery) WithOpaqueID(s string) func(*UpdateByQueryRequest) { + return func(r *UpdateByQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.update_by_query_rethrottle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.update_by_query_rethrottle.go new file mode 100644 index 000000000..299d0e13c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.update_by_query_rethrottle.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newUpdateByQueryRethrottleFunc(t Transport) UpdateByQueryRethrottle { + return func(task_id string, requests_per_second *int, o ...func(*UpdateByQueryRethrottleRequest)) (*Response, error) { + var r = UpdateByQueryRethrottleRequest{TaskID: task_id, RequestsPerSecond: requests_per_second} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// UpdateByQueryRethrottle changes the number of requests per second for a particular Update By Query operation. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html. +type UpdateByQueryRethrottle func(task_id string, requests_per_second *int, o ...func(*UpdateByQueryRethrottleRequest)) (*Response, error) + +// UpdateByQueryRethrottleRequest configures the Update By Query Rethrottle API request. +type UpdateByQueryRethrottleRequest struct { + TaskID string + + RequestsPerSecond *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r UpdateByQueryRethrottleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "update_by_query_rethrottle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_update_by_query") + 1 + len(r.TaskID) + 1 + len("_rethrottle")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_update_by_query") + path.WriteString("/") + path.WriteString(r.TaskID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "task_id", r.TaskID) + } + path.WriteString("/") + path.WriteString("_rethrottle") + + params = make(map[string]string) + + if r.RequestsPerSecond != nil { + params["requests_per_second"] = strconv.FormatInt(int64(*r.RequestsPerSecond), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "update_by_query_rethrottle") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "update_by_query_rethrottle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f UpdateByQueryRethrottle) WithContext(v context.Context) func(*UpdateByQueryRethrottleRequest) { + return func(r *UpdateByQueryRethrottleRequest) { + r.ctx = v + } +} + +// WithRequestsPerSecond - the throttle to set on this request in floating sub-requests per second. -1 means set no throttle.. +func (f UpdateByQueryRethrottle) WithRequestsPerSecond(v int) func(*UpdateByQueryRethrottleRequest) { + return func(r *UpdateByQueryRethrottleRequest) { + r.RequestsPerSecond = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f UpdateByQueryRethrottle) WithPretty() func(*UpdateByQueryRethrottleRequest) { + return func(r *UpdateByQueryRethrottleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f UpdateByQueryRethrottle) WithHuman() func(*UpdateByQueryRethrottleRequest) { + return func(r *UpdateByQueryRethrottleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f UpdateByQueryRethrottle) WithErrorTrace() func(*UpdateByQueryRethrottleRequest) { + return func(r *UpdateByQueryRethrottleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f UpdateByQueryRethrottle) WithFilterPath(v ...string) func(*UpdateByQueryRethrottleRequest) { + return func(r *UpdateByQueryRethrottleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f UpdateByQueryRethrottle) WithHeader(h map[string]string) func(*UpdateByQueryRethrottleRequest) { + return func(r *UpdateByQueryRethrottleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f UpdateByQueryRethrottle) WithOpaqueID(s string) func(*UpdateByQueryRethrottleRequest) { + return func(r *UpdateByQueryRethrottleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.async_search.delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.async_search.delete.go new file mode 100644 index 000000000..362e9a512 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.async_search.delete.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newAsyncSearchDeleteFunc(t Transport) AsyncSearchDelete { + return func(id string, o ...func(*AsyncSearchDeleteRequest)) (*Response, error) { + var r = AsyncSearchDeleteRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// AsyncSearchDelete - Deletes an async search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html. +type AsyncSearchDelete func(id string, o ...func(*AsyncSearchDeleteRequest)) (*Response, error) + +// AsyncSearchDeleteRequest configures the Async Search Delete API request. +type AsyncSearchDeleteRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r AsyncSearchDeleteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "async_search.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_async_search") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_async_search") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "async_search.delete") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "async_search.delete") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f AsyncSearchDelete) WithContext(v context.Context) func(*AsyncSearchDeleteRequest) { + return func(r *AsyncSearchDeleteRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f AsyncSearchDelete) WithPretty() func(*AsyncSearchDeleteRequest) { + return func(r *AsyncSearchDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f AsyncSearchDelete) WithHuman() func(*AsyncSearchDeleteRequest) { + return func(r *AsyncSearchDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f AsyncSearchDelete) WithErrorTrace() func(*AsyncSearchDeleteRequest) { + return func(r *AsyncSearchDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f AsyncSearchDelete) WithFilterPath(v ...string) func(*AsyncSearchDeleteRequest) { + return func(r *AsyncSearchDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f AsyncSearchDelete) WithHeader(h map[string]string) func(*AsyncSearchDeleteRequest) { + return func(r *AsyncSearchDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f AsyncSearchDelete) WithOpaqueID(s string) func(*AsyncSearchDeleteRequest) { + return func(r *AsyncSearchDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.async_search.get.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.async_search.get.go new file mode 100644 index 000000000..0c28944e4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.async_search.get.go @@ -0,0 +1,262 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newAsyncSearchGetFunc(t Transport) AsyncSearchGet { + return func(id string, o ...func(*AsyncSearchGetRequest)) (*Response, error) { + var r = AsyncSearchGetRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// AsyncSearchGet - Retrieves the results of a previously submitted async search request given its ID. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html. +type AsyncSearchGet func(id string, o ...func(*AsyncSearchGetRequest)) (*Response, error) + +// AsyncSearchGetRequest configures the Async Search Get API request. +type AsyncSearchGetRequest struct { + DocumentID string + + KeepAlive time.Duration + TypedKeys *bool + WaitForCompletionTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r AsyncSearchGetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "async_search.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_async_search") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_async_search") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.KeepAlive != 0 { + params["keep_alive"] = formatDuration(r.KeepAlive) + } + + if r.TypedKeys != nil { + params["typed_keys"] = strconv.FormatBool(*r.TypedKeys) + } + + if r.WaitForCompletionTimeout != 0 { + params["wait_for_completion_timeout"] = formatDuration(r.WaitForCompletionTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "async_search.get") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "async_search.get") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f AsyncSearchGet) WithContext(v context.Context) func(*AsyncSearchGetRequest) { + return func(r *AsyncSearchGetRequest) { + r.ctx = v + } +} + +// WithKeepAlive - specify the time interval in which the results (partial or final) for this search will be available. +func (f AsyncSearchGet) WithKeepAlive(v time.Duration) func(*AsyncSearchGetRequest) { + return func(r *AsyncSearchGetRequest) { + r.KeepAlive = v + } +} + +// WithTypedKeys - specify whether aggregation and suggester names should be prefixed by their respective types in the response. +func (f AsyncSearchGet) WithTypedKeys(v bool) func(*AsyncSearchGetRequest) { + return func(r *AsyncSearchGetRequest) { + r.TypedKeys = &v + } +} + +// WithWaitForCompletionTimeout - specify the time that the request should block waiting for the final response. +func (f AsyncSearchGet) WithWaitForCompletionTimeout(v time.Duration) func(*AsyncSearchGetRequest) { + return func(r *AsyncSearchGetRequest) { + r.WaitForCompletionTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f AsyncSearchGet) WithPretty() func(*AsyncSearchGetRequest) { + return func(r *AsyncSearchGetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f AsyncSearchGet) WithHuman() func(*AsyncSearchGetRequest) { + return func(r *AsyncSearchGetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f AsyncSearchGet) WithErrorTrace() func(*AsyncSearchGetRequest) { + return func(r *AsyncSearchGetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f AsyncSearchGet) WithFilterPath(v ...string) func(*AsyncSearchGetRequest) { + return func(r *AsyncSearchGetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f AsyncSearchGet) WithHeader(h map[string]string) func(*AsyncSearchGetRequest) { + return func(r *AsyncSearchGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f AsyncSearchGet) WithOpaqueID(s string) func(*AsyncSearchGetRequest) { + return func(r *AsyncSearchGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.async_search.status.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.async_search.status.go new file mode 100644 index 000000000..964a4d706 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.async_search.status.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newAsyncSearchStatusFunc(t Transport) AsyncSearchStatus { + return func(id string, o ...func(*AsyncSearchStatusRequest)) (*Response, error) { + var r = AsyncSearchStatusRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// AsyncSearchStatus - Retrieves the status of a previously submitted async search request given its ID. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html. +type AsyncSearchStatus func(id string, o ...func(*AsyncSearchStatusRequest)) (*Response, error) + +// AsyncSearchStatusRequest configures the Async Search Status API request. +type AsyncSearchStatusRequest struct { + DocumentID string + + KeepAlive time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r AsyncSearchStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "async_search.status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_async_search") + 1 + len("status") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_async_search") + path.WriteString("/") + path.WriteString("status") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.KeepAlive != 0 { + params["keep_alive"] = formatDuration(r.KeepAlive) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "async_search.status") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "async_search.status") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f AsyncSearchStatus) WithContext(v context.Context) func(*AsyncSearchStatusRequest) { + return func(r *AsyncSearchStatusRequest) { + r.ctx = v + } +} + +// WithKeepAlive - specify the time interval in which the results (partial or final) for this search will be available. +func (f AsyncSearchStatus) WithKeepAlive(v time.Duration) func(*AsyncSearchStatusRequest) { + return func(r *AsyncSearchStatusRequest) { + r.KeepAlive = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f AsyncSearchStatus) WithPretty() func(*AsyncSearchStatusRequest) { + return func(r *AsyncSearchStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f AsyncSearchStatus) WithHuman() func(*AsyncSearchStatusRequest) { + return func(r *AsyncSearchStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f AsyncSearchStatus) WithErrorTrace() func(*AsyncSearchStatusRequest) { + return func(r *AsyncSearchStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f AsyncSearchStatus) WithFilterPath(v ...string) func(*AsyncSearchStatusRequest) { + return func(r *AsyncSearchStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f AsyncSearchStatus) WithHeader(h map[string]string) func(*AsyncSearchStatusRequest) { + return func(r *AsyncSearchStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f AsyncSearchStatus) WithOpaqueID(s string) func(*AsyncSearchStatusRequest) { + return func(r *AsyncSearchStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.async_search.submit.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.async_search.submit.go new file mode 100644 index 000000000..5efc99c4a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.async_search.submit.go @@ -0,0 +1,769 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newAsyncSearchSubmitFunc(t Transport) AsyncSearchSubmit { + return func(o ...func(*AsyncSearchSubmitRequest)) (*Response, error) { + var r = AsyncSearchSubmitRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// AsyncSearchSubmit - Executes a search request asynchronously. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html. +type AsyncSearchSubmit func(o ...func(*AsyncSearchSubmitRequest)) (*Response, error) + +// AsyncSearchSubmitRequest configures the Async Search Submit API request. +type AsyncSearchSubmitRequest struct { + Index []string + + Body io.Reader + + AllowNoIndices *bool + AllowPartialSearchResults *bool + Analyzer string + AnalyzeWildcard *bool + BatchedReduceSize *int + CcsMinimizeRoundtrips *bool + DefaultOperator string + Df string + DocvalueFields []string + ExpandWildcards string + Explain *bool + From *int + IgnoreThrottled *bool + IgnoreUnavailable *bool + KeepAlive time.Duration + KeepOnCompletion *bool + Lenient *bool + MaxConcurrentShardRequests *int + Preference string + Query string + RequestCache *bool + RestTotalHitsAsInt *bool + Routing []string + SearchType string + SeqNoPrimaryTerm *bool + Size *int + Sort []string + Source []string + SourceExcludes []string + SourceIncludes []string + Stats []string + StoredFields []string + SuggestField string + SuggestMode string + SuggestSize *int + SuggestText string + TerminateAfter *int + Timeout time.Duration + TrackScores *bool + TrackTotalHits interface{} + TypedKeys *bool + Version *bool + WaitForCompletionTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r AsyncSearchSubmitRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "async_search.submit") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_async_search")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_async_search") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.AllowPartialSearchResults != nil { + params["allow_partial_search_results"] = strconv.FormatBool(*r.AllowPartialSearchResults) + } + + if r.Analyzer != "" { + params["analyzer"] = r.Analyzer + } + + if r.AnalyzeWildcard != nil { + params["analyze_wildcard"] = strconv.FormatBool(*r.AnalyzeWildcard) + } + + if r.BatchedReduceSize != nil { + params["batched_reduce_size"] = strconv.FormatInt(int64(*r.BatchedReduceSize), 10) + } + + if r.CcsMinimizeRoundtrips != nil { + params["ccs_minimize_roundtrips"] = strconv.FormatBool(*r.CcsMinimizeRoundtrips) + } + + if r.DefaultOperator != "" { + params["default_operator"] = r.DefaultOperator + } + + if r.Df != "" { + params["df"] = r.Df + } + + if len(r.DocvalueFields) > 0 { + params["docvalue_fields"] = strings.Join(r.DocvalueFields, ",") + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.Explain != nil { + params["explain"] = strconv.FormatBool(*r.Explain) + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.IgnoreThrottled != nil { + params["ignore_throttled"] = strconv.FormatBool(*r.IgnoreThrottled) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.KeepAlive != 0 { + params["keep_alive"] = formatDuration(r.KeepAlive) + } + + if r.KeepOnCompletion != nil { + params["keep_on_completion"] = strconv.FormatBool(*r.KeepOnCompletion) + } + + if r.Lenient != nil { + params["lenient"] = strconv.FormatBool(*r.Lenient) + } + + if r.MaxConcurrentShardRequests != nil { + params["max_concurrent_shard_requests"] = strconv.FormatInt(int64(*r.MaxConcurrentShardRequests), 10) + } + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Query != "" { + params["q"] = r.Query + } + + if r.RequestCache != nil { + params["request_cache"] = strconv.FormatBool(*r.RequestCache) + } + + if r.RestTotalHitsAsInt != nil { + params["rest_total_hits_as_int"] = strconv.FormatBool(*r.RestTotalHitsAsInt) + } + + if len(r.Routing) > 0 { + params["routing"] = strings.Join(r.Routing, ",") + } + + if r.SearchType != "" { + params["search_type"] = r.SearchType + } + + if r.SeqNoPrimaryTerm != nil { + params["seq_no_primary_term"] = strconv.FormatBool(*r.SeqNoPrimaryTerm) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if len(r.Sort) > 0 { + params["sort"] = strings.Join(r.Sort, ",") + } + + if len(r.Source) > 0 { + params["_source"] = strings.Join(r.Source, ",") + } + + if len(r.SourceExcludes) > 0 { + params["_source_excludes"] = strings.Join(r.SourceExcludes, ",") + } + + if len(r.SourceIncludes) > 0 { + params["_source_includes"] = strings.Join(r.SourceIncludes, ",") + } + + if len(r.Stats) > 0 { + params["stats"] = strings.Join(r.Stats, ",") + } + + if len(r.StoredFields) > 0 { + params["stored_fields"] = strings.Join(r.StoredFields, ",") + } + + if r.SuggestField != "" { + params["suggest_field"] = r.SuggestField + } + + if r.SuggestMode != "" { + params["suggest_mode"] = r.SuggestMode + } + + if r.SuggestSize != nil { + params["suggest_size"] = strconv.FormatInt(int64(*r.SuggestSize), 10) + } + + if r.SuggestText != "" { + params["suggest_text"] = r.SuggestText + } + + if r.TerminateAfter != nil { + params["terminate_after"] = strconv.FormatInt(int64(*r.TerminateAfter), 10) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.TrackScores != nil { + params["track_scores"] = strconv.FormatBool(*r.TrackScores) + } + + if r.TrackTotalHits != nil { + params["track_total_hits"] = fmt.Sprintf("%v", r.TrackTotalHits) + } + + if r.TypedKeys != nil { + params["typed_keys"] = strconv.FormatBool(*r.TypedKeys) + } + + if r.Version != nil { + params["version"] = strconv.FormatBool(*r.Version) + } + + if r.WaitForCompletionTimeout != 0 { + params["wait_for_completion_timeout"] = formatDuration(r.WaitForCompletionTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "async_search.submit") + if reader := instrument.RecordRequestBody(ctx, "async_search.submit", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "async_search.submit") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f AsyncSearchSubmit) WithContext(v context.Context) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.ctx = v + } +} + +// WithBody - The search definition using the Query DSL. +func (f AsyncSearchSubmit) WithBody(v io.Reader) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Body = v + } +} + +// WithIndex - a list of index names to search; use _all to perform the operation on all indices. +func (f AsyncSearchSubmit) WithIndex(v ...string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f AsyncSearchSubmit) WithAllowNoIndices(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.AllowNoIndices = &v + } +} + +// WithAllowPartialSearchResults - indicate if an error should be returned if there is a partial search failure or timeout. +func (f AsyncSearchSubmit) WithAllowPartialSearchResults(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.AllowPartialSearchResults = &v + } +} + +// WithAnalyzer - the analyzer to use for the query string. +func (f AsyncSearchSubmit) WithAnalyzer(v string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Analyzer = v + } +} + +// WithAnalyzeWildcard - specify whether wildcard and prefix queries should be analyzed (default: false). +func (f AsyncSearchSubmit) WithAnalyzeWildcard(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.AnalyzeWildcard = &v + } +} + +// WithBatchedReduceSize - the number of shard results that should be reduced at once on the coordinating node. this value should be used as the granularity at which progress results will be made available.. +func (f AsyncSearchSubmit) WithBatchedReduceSize(v int) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.BatchedReduceSize = &v + } +} + +// WithCcsMinimizeRoundtrips - when doing a cross-cluster search, setting it to true may improve overall search latency, particularly when searching clusters with a large number of shards. however, when set to true, the progress of searches on the remote clusters will not be received until the search finishes on all clusters.. +func (f AsyncSearchSubmit) WithCcsMinimizeRoundtrips(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.CcsMinimizeRoundtrips = &v + } +} + +// WithDefaultOperator - the default operator for query string query (and or or). +func (f AsyncSearchSubmit) WithDefaultOperator(v string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.DefaultOperator = v + } +} + +// WithDf - the field to use as default where no field prefix is given in the query string. +func (f AsyncSearchSubmit) WithDf(v string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Df = v + } +} + +// WithDocvalueFields - a list of fields to return as the docvalue representation of a field for each hit. +func (f AsyncSearchSubmit) WithDocvalueFields(v ...string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.DocvalueFields = v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f AsyncSearchSubmit) WithExpandWildcards(v string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.ExpandWildcards = v + } +} + +// WithExplain - specify whether to return detailed information about score computation as part of a hit. +func (f AsyncSearchSubmit) WithExplain(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Explain = &v + } +} + +// WithFrom - starting offset (default: 0). +func (f AsyncSearchSubmit) WithFrom(v int) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.From = &v + } +} + +// WithIgnoreThrottled - whether specified concrete, expanded or aliased indices should be ignored when throttled. +func (f AsyncSearchSubmit) WithIgnoreThrottled(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.IgnoreThrottled = &v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f AsyncSearchSubmit) WithIgnoreUnavailable(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithKeepAlive - update the time interval in which the results (partial or final) for this search will be available. +func (f AsyncSearchSubmit) WithKeepAlive(v time.Duration) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.KeepAlive = v + } +} + +// WithKeepOnCompletion - control whether the response should be stored in the cluster if it completed within the provided [wait_for_completion] time (default: false). +func (f AsyncSearchSubmit) WithKeepOnCompletion(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.KeepOnCompletion = &v + } +} + +// WithLenient - specify whether format-based query failures (such as providing text to a numeric field) should be ignored. +func (f AsyncSearchSubmit) WithLenient(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Lenient = &v + } +} + +// WithMaxConcurrentShardRequests - the number of concurrent shard requests per node this search executes concurrently. this value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. +func (f AsyncSearchSubmit) WithMaxConcurrentShardRequests(v int) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.MaxConcurrentShardRequests = &v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random). +func (f AsyncSearchSubmit) WithPreference(v string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Preference = v + } +} + +// WithQuery - query in the lucene query string syntax. +func (f AsyncSearchSubmit) WithQuery(v string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Query = v + } +} + +// WithRequestCache - specify if request cache should be used for this request or not, defaults to true. +func (f AsyncSearchSubmit) WithRequestCache(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.RequestCache = &v + } +} + +// WithRestTotalHitsAsInt - indicates whether hits.total should be rendered as an integer or an object in the rest search response. +func (f AsyncSearchSubmit) WithRestTotalHitsAsInt(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.RestTotalHitsAsInt = &v + } +} + +// WithRouting - a list of specific routing values. +func (f AsyncSearchSubmit) WithRouting(v ...string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Routing = v + } +} + +// WithSearchType - search operation type. +func (f AsyncSearchSubmit) WithSearchType(v string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.SearchType = v + } +} + +// WithSeqNoPrimaryTerm - specify whether to return sequence number and primary term of the last modification of each hit. +func (f AsyncSearchSubmit) WithSeqNoPrimaryTerm(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.SeqNoPrimaryTerm = &v + } +} + +// WithSize - number of hits to return (default: 10). +func (f AsyncSearchSubmit) WithSize(v int) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Size = &v + } +} + +// WithSort - a list of : pairs. +func (f AsyncSearchSubmit) WithSort(v ...string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Sort = v + } +} + +// WithSource - true or false to return the _source field or not, or a list of fields to return. +func (f AsyncSearchSubmit) WithSource(v ...string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Source = v + } +} + +// WithSourceExcludes - a list of fields to exclude from the returned _source field. +func (f AsyncSearchSubmit) WithSourceExcludes(v ...string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.SourceExcludes = v + } +} + +// WithSourceIncludes - a list of fields to extract and return from the _source field. +func (f AsyncSearchSubmit) WithSourceIncludes(v ...string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.SourceIncludes = v + } +} + +// WithStats - specific 'tag' of the request for logging and statistical purposes. +func (f AsyncSearchSubmit) WithStats(v ...string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Stats = v + } +} + +// WithStoredFields - a list of stored fields to return as part of a hit. +func (f AsyncSearchSubmit) WithStoredFields(v ...string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.StoredFields = v + } +} + +// WithSuggestField - specify which field to use for suggestions. +func (f AsyncSearchSubmit) WithSuggestField(v string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.SuggestField = v + } +} + +// WithSuggestMode - specify suggest mode. +func (f AsyncSearchSubmit) WithSuggestMode(v string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.SuggestMode = v + } +} + +// WithSuggestSize - how many suggestions to return in response. +func (f AsyncSearchSubmit) WithSuggestSize(v int) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.SuggestSize = &v + } +} + +// WithSuggestText - the source text for which the suggestions should be returned. +func (f AsyncSearchSubmit) WithSuggestText(v string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.SuggestText = v + } +} + +// WithTerminateAfter - the maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early.. +func (f AsyncSearchSubmit) WithTerminateAfter(v int) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.TerminateAfter = &v + } +} + +// WithTimeout - explicit operation timeout. +func (f AsyncSearchSubmit) WithTimeout(v time.Duration) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Timeout = v + } +} + +// WithTrackScores - whether to calculate and return scores even if they are not used for sorting. +func (f AsyncSearchSubmit) WithTrackScores(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.TrackScores = &v + } +} + +// WithTrackTotalHits - indicate if the number of documents that match the query should be tracked. a number can also be specified, to accurately track the total hit count up to the number.. +func (f AsyncSearchSubmit) WithTrackTotalHits(v interface{}) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.TrackTotalHits = v + } +} + +// WithTypedKeys - specify whether aggregation and suggester names should be prefixed by their respective types in the response. +func (f AsyncSearchSubmit) WithTypedKeys(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.TypedKeys = &v + } +} + +// WithVersion - specify whether to return document version as part of a hit. +func (f AsyncSearchSubmit) WithVersion(v bool) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Version = &v + } +} + +// WithWaitForCompletionTimeout - specify the time that the request should block waiting for the final response. +func (f AsyncSearchSubmit) WithWaitForCompletionTimeout(v time.Duration) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.WaitForCompletionTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f AsyncSearchSubmit) WithPretty() func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f AsyncSearchSubmit) WithHuman() func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f AsyncSearchSubmit) WithErrorTrace() func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f AsyncSearchSubmit) WithFilterPath(v ...string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f AsyncSearchSubmit) WithHeader(h map[string]string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f AsyncSearchSubmit) WithOpaqueID(s string) func(*AsyncSearchSubmitRequest) { + return func(r *AsyncSearchSubmitRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go new file mode 100644 index 000000000..805e7c9ba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newAutoscalingDeleteAutoscalingPolicyFunc(t Transport) AutoscalingDeleteAutoscalingPolicy { + return func(name string, o ...func(*AutoscalingDeleteAutoscalingPolicyRequest)) (*Response, error) { + var r = AutoscalingDeleteAutoscalingPolicyRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// AutoscalingDeleteAutoscalingPolicy - Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-delete-autoscaling-policy.html. +type AutoscalingDeleteAutoscalingPolicy func(name string, o ...func(*AutoscalingDeleteAutoscalingPolicyRequest)) (*Response, error) + +// AutoscalingDeleteAutoscalingPolicyRequest configures the Autoscaling Delete Autoscaling Policy API request. +type AutoscalingDeleteAutoscalingPolicyRequest struct { + Name string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r AutoscalingDeleteAutoscalingPolicyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "autoscaling.delete_autoscaling_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_autoscaling") + 1 + len("policy") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_autoscaling") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "autoscaling.delete_autoscaling_policy") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "autoscaling.delete_autoscaling_policy") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f AutoscalingDeleteAutoscalingPolicy) WithContext(v context.Context) func(*AutoscalingDeleteAutoscalingPolicyRequest) { + return func(r *AutoscalingDeleteAutoscalingPolicyRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for processing on master node. +func (f AutoscalingDeleteAutoscalingPolicy) WithMasterTimeout(v time.Duration) func(*AutoscalingDeleteAutoscalingPolicyRequest) { + return func(r *AutoscalingDeleteAutoscalingPolicyRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - timeout for acknowledgement of update from all nodes in cluster. +func (f AutoscalingDeleteAutoscalingPolicy) WithTimeout(v time.Duration) func(*AutoscalingDeleteAutoscalingPolicyRequest) { + return func(r *AutoscalingDeleteAutoscalingPolicyRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f AutoscalingDeleteAutoscalingPolicy) WithPretty() func(*AutoscalingDeleteAutoscalingPolicyRequest) { + return func(r *AutoscalingDeleteAutoscalingPolicyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f AutoscalingDeleteAutoscalingPolicy) WithHuman() func(*AutoscalingDeleteAutoscalingPolicyRequest) { + return func(r *AutoscalingDeleteAutoscalingPolicyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f AutoscalingDeleteAutoscalingPolicy) WithErrorTrace() func(*AutoscalingDeleteAutoscalingPolicyRequest) { + return func(r *AutoscalingDeleteAutoscalingPolicyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f AutoscalingDeleteAutoscalingPolicy) WithFilterPath(v ...string) func(*AutoscalingDeleteAutoscalingPolicyRequest) { + return func(r *AutoscalingDeleteAutoscalingPolicyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f AutoscalingDeleteAutoscalingPolicy) WithHeader(h map[string]string) func(*AutoscalingDeleteAutoscalingPolicyRequest) { + return func(r *AutoscalingDeleteAutoscalingPolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f AutoscalingDeleteAutoscalingPolicy) WithOpaqueID(s string) func(*AutoscalingDeleteAutoscalingPolicyRequest) { + return func(r *AutoscalingDeleteAutoscalingPolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go new file mode 100644 index 000000000..f0d0f2509 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newAutoscalingGetAutoscalingCapacityFunc(t Transport) AutoscalingGetAutoscalingCapacity { + return func(o ...func(*AutoscalingGetAutoscalingCapacityRequest)) (*Response, error) { + var r = AutoscalingGetAutoscalingCapacityRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// AutoscalingGetAutoscalingCapacity - Gets the current autoscaling capacity based on the configured autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-capacity.html. +type AutoscalingGetAutoscalingCapacity func(o ...func(*AutoscalingGetAutoscalingCapacityRequest)) (*Response, error) + +// AutoscalingGetAutoscalingCapacityRequest configures the Autoscaling Get Autoscaling Capacity API request. +type AutoscalingGetAutoscalingCapacityRequest struct { + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r AutoscalingGetAutoscalingCapacityRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "autoscaling.get_autoscaling_capacity") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_autoscaling/capacity")) + path.WriteString("http://") + path.WriteString("/_autoscaling/capacity") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "autoscaling.get_autoscaling_capacity") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "autoscaling.get_autoscaling_capacity") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f AutoscalingGetAutoscalingCapacity) WithContext(v context.Context) func(*AutoscalingGetAutoscalingCapacityRequest) { + return func(r *AutoscalingGetAutoscalingCapacityRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for processing on master node. +func (f AutoscalingGetAutoscalingCapacity) WithMasterTimeout(v time.Duration) func(*AutoscalingGetAutoscalingCapacityRequest) { + return func(r *AutoscalingGetAutoscalingCapacityRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f AutoscalingGetAutoscalingCapacity) WithPretty() func(*AutoscalingGetAutoscalingCapacityRequest) { + return func(r *AutoscalingGetAutoscalingCapacityRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f AutoscalingGetAutoscalingCapacity) WithHuman() func(*AutoscalingGetAutoscalingCapacityRequest) { + return func(r *AutoscalingGetAutoscalingCapacityRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f AutoscalingGetAutoscalingCapacity) WithErrorTrace() func(*AutoscalingGetAutoscalingCapacityRequest) { + return func(r *AutoscalingGetAutoscalingCapacityRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f AutoscalingGetAutoscalingCapacity) WithFilterPath(v ...string) func(*AutoscalingGetAutoscalingCapacityRequest) { + return func(r *AutoscalingGetAutoscalingCapacityRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f AutoscalingGetAutoscalingCapacity) WithHeader(h map[string]string) func(*AutoscalingGetAutoscalingCapacityRequest) { + return func(r *AutoscalingGetAutoscalingCapacityRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f AutoscalingGetAutoscalingCapacity) WithOpaqueID(s string) func(*AutoscalingGetAutoscalingCapacityRequest) { + return func(r *AutoscalingGetAutoscalingCapacityRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.autoscaling.get_autoscaling_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.autoscaling.get_autoscaling_policy.go new file mode 100644 index 000000000..7484e20c9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.autoscaling.get_autoscaling_policy.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newAutoscalingGetAutoscalingPolicyFunc(t Transport) AutoscalingGetAutoscalingPolicy { + return func(name string, o ...func(*AutoscalingGetAutoscalingPolicyRequest)) (*Response, error) { + var r = AutoscalingGetAutoscalingPolicyRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// AutoscalingGetAutoscalingPolicy - Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-policy.html. +type AutoscalingGetAutoscalingPolicy func(name string, o ...func(*AutoscalingGetAutoscalingPolicyRequest)) (*Response, error) + +// AutoscalingGetAutoscalingPolicyRequest configures the Autoscaling Get Autoscaling Policy API request. +type AutoscalingGetAutoscalingPolicyRequest struct { + Name string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r AutoscalingGetAutoscalingPolicyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "autoscaling.get_autoscaling_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_autoscaling") + 1 + len("policy") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_autoscaling") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "autoscaling.get_autoscaling_policy") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "autoscaling.get_autoscaling_policy") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f AutoscalingGetAutoscalingPolicy) WithContext(v context.Context) func(*AutoscalingGetAutoscalingPolicyRequest) { + return func(r *AutoscalingGetAutoscalingPolicyRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for processing on master node. +func (f AutoscalingGetAutoscalingPolicy) WithMasterTimeout(v time.Duration) func(*AutoscalingGetAutoscalingPolicyRequest) { + return func(r *AutoscalingGetAutoscalingPolicyRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f AutoscalingGetAutoscalingPolicy) WithPretty() func(*AutoscalingGetAutoscalingPolicyRequest) { + return func(r *AutoscalingGetAutoscalingPolicyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f AutoscalingGetAutoscalingPolicy) WithHuman() func(*AutoscalingGetAutoscalingPolicyRequest) { + return func(r *AutoscalingGetAutoscalingPolicyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f AutoscalingGetAutoscalingPolicy) WithErrorTrace() func(*AutoscalingGetAutoscalingPolicyRequest) { + return func(r *AutoscalingGetAutoscalingPolicyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f AutoscalingGetAutoscalingPolicy) WithFilterPath(v ...string) func(*AutoscalingGetAutoscalingPolicyRequest) { + return func(r *AutoscalingGetAutoscalingPolicyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f AutoscalingGetAutoscalingPolicy) WithHeader(h map[string]string) func(*AutoscalingGetAutoscalingPolicyRequest) { + return func(r *AutoscalingGetAutoscalingPolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f AutoscalingGetAutoscalingPolicy) WithOpaqueID(s string) func(*AutoscalingGetAutoscalingPolicyRequest) { + return func(r *AutoscalingGetAutoscalingPolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.autoscaling.put_autoscaling_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.autoscaling.put_autoscaling_policy.go new file mode 100644 index 000000000..da8f366bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.autoscaling.put_autoscaling_policy.go @@ -0,0 +1,261 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newAutoscalingPutAutoscalingPolicyFunc(t Transport) AutoscalingPutAutoscalingPolicy { + return func(name string, body io.Reader, o ...func(*AutoscalingPutAutoscalingPolicyRequest)) (*Response, error) { + var r = AutoscalingPutAutoscalingPolicyRequest{Name: name, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// AutoscalingPutAutoscalingPolicy - Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-put-autoscaling-policy.html. +type AutoscalingPutAutoscalingPolicy func(name string, body io.Reader, o ...func(*AutoscalingPutAutoscalingPolicyRequest)) (*Response, error) + +// AutoscalingPutAutoscalingPolicyRequest configures the Autoscaling Put Autoscaling Policy API request. +type AutoscalingPutAutoscalingPolicyRequest struct { + Body io.Reader + + Name string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r AutoscalingPutAutoscalingPolicyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "autoscaling.put_autoscaling_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_autoscaling") + 1 + len("policy") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_autoscaling") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "autoscaling.put_autoscaling_policy") + if reader := instrument.RecordRequestBody(ctx, "autoscaling.put_autoscaling_policy", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "autoscaling.put_autoscaling_policy") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f AutoscalingPutAutoscalingPolicy) WithContext(v context.Context) func(*AutoscalingPutAutoscalingPolicyRequest) { + return func(r *AutoscalingPutAutoscalingPolicyRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for processing on master node. +func (f AutoscalingPutAutoscalingPolicy) WithMasterTimeout(v time.Duration) func(*AutoscalingPutAutoscalingPolicyRequest) { + return func(r *AutoscalingPutAutoscalingPolicyRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - timeout for acknowledgement of update from all nodes in cluster. +func (f AutoscalingPutAutoscalingPolicy) WithTimeout(v time.Duration) func(*AutoscalingPutAutoscalingPolicyRequest) { + return func(r *AutoscalingPutAutoscalingPolicyRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f AutoscalingPutAutoscalingPolicy) WithPretty() func(*AutoscalingPutAutoscalingPolicyRequest) { + return func(r *AutoscalingPutAutoscalingPolicyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f AutoscalingPutAutoscalingPolicy) WithHuman() func(*AutoscalingPutAutoscalingPolicyRequest) { + return func(r *AutoscalingPutAutoscalingPolicyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f AutoscalingPutAutoscalingPolicy) WithErrorTrace() func(*AutoscalingPutAutoscalingPolicyRequest) { + return func(r *AutoscalingPutAutoscalingPolicyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f AutoscalingPutAutoscalingPolicy) WithFilterPath(v ...string) func(*AutoscalingPutAutoscalingPolicyRequest) { + return func(r *AutoscalingPutAutoscalingPolicyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f AutoscalingPutAutoscalingPolicy) WithHeader(h map[string]string) func(*AutoscalingPutAutoscalingPolicyRequest) { + return func(r *AutoscalingPutAutoscalingPolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f AutoscalingPutAutoscalingPolicy) WithOpaqueID(s string) func(*AutoscalingPutAutoscalingPolicyRequest) { + return func(r *AutoscalingPutAutoscalingPolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.ml_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.ml_data_frame_analytics.go new file mode 100644 index 000000000..703d6f49c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.ml_data_frame_analytics.go @@ -0,0 +1,336 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newCatMLDataFrameAnalyticsFunc(t Transport) CatMLDataFrameAnalytics { + return func(o ...func(*CatMLDataFrameAnalyticsRequest)) (*Response, error) { + var r = CatMLDataFrameAnalyticsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatMLDataFrameAnalytics - Gets configuration and usage information about data frame analytics jobs. +// +// See full documentation at http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-dfanalytics.html. +type CatMLDataFrameAnalytics func(o ...func(*CatMLDataFrameAnalyticsRequest)) (*Response, error) + +// CatMLDataFrameAnalyticsRequest configures the CatML Data Frame Analytics API request. +type CatMLDataFrameAnalyticsRequest struct { + DocumentID string + + AllowNoMatch *bool + Bytes string + Format string + H []string + Help *bool + S []string + Time string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatMLDataFrameAnalyticsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.ml_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("ml") + 1 + len("data_frame") + 1 + len("analytics") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + if r.DocumentID != "" { + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + } + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.Bytes != "" { + params["bytes"] = r.Bytes + } + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.Time != "" { + params["time"] = r.Time + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.ml_data_frame_analytics") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.ml_data_frame_analytics") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatMLDataFrameAnalytics) WithContext(v context.Context) func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + r.ctx = v + } +} + +// WithDocumentID - the ID of the data frame analytics to fetch. +func (f CatMLDataFrameAnalytics) WithDocumentID(v string) func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + r.DocumentID = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no configs. (this includes `_all` string or when no configs have been specified). +func (f CatMLDataFrameAnalytics) WithAllowNoMatch(v bool) func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + r.AllowNoMatch = &v + } +} + +// WithBytes - the unit in which to display byte values. +func (f CatMLDataFrameAnalytics) WithBytes(v string) func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + r.Bytes = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatMLDataFrameAnalytics) WithFormat(v string) func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatMLDataFrameAnalytics) WithH(v ...string) func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatMLDataFrameAnalytics) WithHelp(v bool) func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + r.Help = &v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatMLDataFrameAnalytics) WithS(v ...string) func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + r.S = v + } +} + +// WithTime - the unit in which to display time values. +func (f CatMLDataFrameAnalytics) WithTime(v string) func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + r.Time = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatMLDataFrameAnalytics) WithV(v bool) func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatMLDataFrameAnalytics) WithPretty() func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatMLDataFrameAnalytics) WithHuman() func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatMLDataFrameAnalytics) WithErrorTrace() func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatMLDataFrameAnalytics) WithFilterPath(v ...string) func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatMLDataFrameAnalytics) WithHeader(h map[string]string) func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatMLDataFrameAnalytics) WithOpaqueID(s string) func(*CatMLDataFrameAnalyticsRequest) { + return func(r *CatMLDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.ml_datafeeds.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.ml_datafeeds.go new file mode 100644 index 000000000..41e689e03 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.ml_datafeeds.go @@ -0,0 +1,322 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newCatMLDatafeedsFunc(t Transport) CatMLDatafeeds { + return func(o ...func(*CatMLDatafeedsRequest)) (*Response, error) { + var r = CatMLDatafeedsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatMLDatafeeds - Gets configuration and usage information about datafeeds. +// +// See full documentation at http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-datafeeds.html. +type CatMLDatafeeds func(o ...func(*CatMLDatafeedsRequest)) (*Response, error) + +// CatMLDatafeedsRequest configures the CatML Datafeeds API request. +type CatMLDatafeedsRequest struct { + DatafeedID string + + AllowNoMatch *bool + Format string + H []string + Help *bool + S []string + Time string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatMLDatafeedsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.ml_datafeeds") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("ml") + 1 + len("datafeeds") + 1 + len(r.DatafeedID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("ml") + path.WriteString("/") + path.WriteString("datafeeds") + if r.DatafeedID != "" { + path.WriteString("/") + path.WriteString(r.DatafeedID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeed_id", r.DatafeedID) + } + } + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.Time != "" { + params["time"] = r.Time + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.ml_datafeeds") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.ml_datafeeds") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatMLDatafeeds) WithContext(v context.Context) func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + r.ctx = v + } +} + +// WithDatafeedID - the ID of the datafeeds stats to fetch. +func (f CatMLDatafeeds) WithDatafeedID(v string) func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + r.DatafeedID = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no datafeeds. (this includes `_all` string or when no datafeeds have been specified). +func (f CatMLDatafeeds) WithAllowNoMatch(v bool) func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + r.AllowNoMatch = &v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatMLDatafeeds) WithFormat(v string) func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatMLDatafeeds) WithH(v ...string) func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatMLDatafeeds) WithHelp(v bool) func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + r.Help = &v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatMLDatafeeds) WithS(v ...string) func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + r.S = v + } +} + +// WithTime - the unit in which to display time values. +func (f CatMLDatafeeds) WithTime(v string) func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + r.Time = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatMLDatafeeds) WithV(v bool) func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatMLDatafeeds) WithPretty() func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatMLDatafeeds) WithHuman() func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatMLDatafeeds) WithErrorTrace() func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatMLDatafeeds) WithFilterPath(v ...string) func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatMLDatafeeds) WithHeader(h map[string]string) func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatMLDatafeeds) WithOpaqueID(s string) func(*CatMLDatafeedsRequest) { + return func(r *CatMLDatafeedsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.ml_jobs.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.ml_jobs.go new file mode 100644 index 000000000..4933b449a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.ml_jobs.go @@ -0,0 +1,334 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newCatMLJobsFunc(t Transport) CatMLJobs { + return func(o ...func(*CatMLJobsRequest)) (*Response, error) { + var r = CatMLJobsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatMLJobs - Gets configuration and usage information about anomaly detection jobs. +// +// See full documentation at http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-anomaly-detectors.html. +type CatMLJobs func(o ...func(*CatMLJobsRequest)) (*Response, error) + +// CatMLJobsRequest configures the CatML Jobs API request. +type CatMLJobsRequest struct { + JobID string + + AllowNoMatch *bool + Bytes string + Format string + H []string + Help *bool + S []string + Time string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatMLJobsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.ml_jobs") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + if r.JobID != "" { + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + } + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.Bytes != "" { + params["bytes"] = r.Bytes + } + + if r.Format != "" { + params["format"] = r.Format + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.Time != "" { + params["time"] = r.Time + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.ml_jobs") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.ml_jobs") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatMLJobs) WithContext(v context.Context) func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + r.ctx = v + } +} + +// WithJobID - the ID of the jobs stats to fetch. +func (f CatMLJobs) WithJobID(v string) func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + r.JobID = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no jobs. (this includes `_all` string or when no jobs have been specified). +func (f CatMLJobs) WithAllowNoMatch(v bool) func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + r.AllowNoMatch = &v + } +} + +// WithBytes - the unit in which to display byte values. +func (f CatMLJobs) WithBytes(v string) func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + r.Bytes = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatMLJobs) WithFormat(v string) func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + r.Format = v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatMLJobs) WithH(v ...string) func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatMLJobs) WithHelp(v bool) func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + r.Help = &v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatMLJobs) WithS(v ...string) func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + r.S = v + } +} + +// WithTime - the unit in which to display time values. +func (f CatMLJobs) WithTime(v string) func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + r.Time = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatMLJobs) WithV(v bool) func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatMLJobs) WithPretty() func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatMLJobs) WithHuman() func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatMLJobs) WithErrorTrace() func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatMLJobs) WithFilterPath(v ...string) func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatMLJobs) WithHeader(h map[string]string) func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatMLJobs) WithOpaqueID(s string) func(*CatMLJobsRequest) { + return func(r *CatMLJobsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.ml_trained_models.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.ml_trained_models.go new file mode 100644 index 000000000..aa97b55e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.ml_trained_models.go @@ -0,0 +1,358 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newCatMLTrainedModelsFunc(t Transport) CatMLTrainedModels { + return func(o ...func(*CatMLTrainedModelsRequest)) (*Response, error) { + var r = CatMLTrainedModelsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatMLTrainedModels - Gets configuration and usage information about inference trained models. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-trained-model.html. +type CatMLTrainedModels func(o ...func(*CatMLTrainedModelsRequest)) (*Response, error) + +// CatMLTrainedModelsRequest configures the CatML Trained Models API request. +type CatMLTrainedModelsRequest struct { + ModelID string + + AllowNoMatch *bool + Bytes string + Format string + From *int + H []string + Help *bool + S []string + Size *int + Time string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatMLTrainedModelsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.ml_trained_models") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("ml") + 1 + len("trained_models") + 1 + len(r.ModelID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("ml") + path.WriteString("/") + path.WriteString("trained_models") + if r.ModelID != "" { + path.WriteString("/") + path.WriteString(r.ModelID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_id", r.ModelID) + } + } + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.Bytes != "" { + params["bytes"] = r.Bytes + } + + if r.Format != "" { + params["format"] = r.Format + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Time != "" { + params["time"] = r.Time + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.ml_trained_models") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.ml_trained_models") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatMLTrainedModels) WithContext(v context.Context) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.ctx = v + } +} + +// WithModelID - the ID of the trained models stats to fetch. +func (f CatMLTrainedModels) WithModelID(v string) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.ModelID = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no trained models. (this includes `_all` string or when no trained models have been specified). +func (f CatMLTrainedModels) WithAllowNoMatch(v bool) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.AllowNoMatch = &v + } +} + +// WithBytes - the unit in which to display byte values. +func (f CatMLTrainedModels) WithBytes(v string) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.Bytes = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatMLTrainedModels) WithFormat(v string) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.Format = v + } +} + +// WithFrom - skips a number of trained models. +func (f CatMLTrainedModels) WithFrom(v int) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.From = &v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatMLTrainedModels) WithH(v ...string) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatMLTrainedModels) WithHelp(v bool) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.Help = &v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatMLTrainedModels) WithS(v ...string) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.S = v + } +} + +// WithSize - specifies a max number of trained models to get. +func (f CatMLTrainedModels) WithSize(v int) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.Size = &v + } +} + +// WithTime - the unit in which to display time values. +func (f CatMLTrainedModels) WithTime(v string) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.Time = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatMLTrainedModels) WithV(v bool) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatMLTrainedModels) WithPretty() func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatMLTrainedModels) WithHuman() func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatMLTrainedModels) WithErrorTrace() func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatMLTrainedModels) WithFilterPath(v ...string) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatMLTrainedModels) WithHeader(h map[string]string) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatMLTrainedModels) WithOpaqueID(s string) func(*CatMLTrainedModelsRequest) { + return func(r *CatMLTrainedModelsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.transforms.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.transforms.go new file mode 100644 index 000000000..bcaac8734 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.cat.transforms.go @@ -0,0 +1,344 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newCatTransformsFunc(t Transport) CatTransforms { + return func(o ...func(*CatTransformsRequest)) (*Response, error) { + var r = CatTransformsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CatTransforms - Gets configuration and usage information about transforms. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-transforms.html. +type CatTransforms func(o ...func(*CatTransformsRequest)) (*Response, error) + +// CatTransformsRequest configures the Cat Transforms API request. +type CatTransformsRequest struct { + TransformID string + + AllowNoMatch *bool + Format string + From *int + H []string + Help *bool + S []string + Size *int + Time string + V *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CatTransformsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.transforms") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_cat") + 1 + len("transforms") + 1 + len(r.TransformID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("transforms") + if r.TransformID != "" { + path.WriteString("/") + path.WriteString(r.TransformID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "transform_id", r.TransformID) + } + } + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.Format != "" { + params["format"] = r.Format + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if len(r.H) > 0 { + params["h"] = strings.Join(r.H, ",") + } + + if r.Help != nil { + params["help"] = strconv.FormatBool(*r.Help) + } + + if len(r.S) > 0 { + params["s"] = strings.Join(r.S, ",") + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Time != "" { + params["time"] = r.Time + } + + if r.V != nil { + params["v"] = strconv.FormatBool(*r.V) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "cat.transforms") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.transforms") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CatTransforms) WithContext(v context.Context) func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.ctx = v + } +} + +// WithTransformID - the ID of the transform for which to get stats. '_all' or '*' implies all transforms. +func (f CatTransforms) WithTransformID(v string) func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.TransformID = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no transforms. (this includes `_all` string or when no transforms have been specified). +func (f CatTransforms) WithAllowNoMatch(v bool) func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.AllowNoMatch = &v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f CatTransforms) WithFormat(v string) func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.Format = v + } +} + +// WithFrom - skips a number of transform configs, defaults to 0. +func (f CatTransforms) WithFrom(v int) func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.From = &v + } +} + +// WithH - comma-separated list of column names to display. +func (f CatTransforms) WithH(v ...string) func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.H = v + } +} + +// WithHelp - return help information. +func (f CatTransforms) WithHelp(v bool) func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.Help = &v + } +} + +// WithS - comma-separated list of column names or column aliases to sort by. +func (f CatTransforms) WithS(v ...string) func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.S = v + } +} + +// WithSize - specifies a max number of transforms to get, defaults to 100. +func (f CatTransforms) WithSize(v int) func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.Size = &v + } +} + +// WithTime - the unit in which to display time values. +func (f CatTransforms) WithTime(v string) func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.Time = v + } +} + +// WithV - verbose mode. display column headers. +func (f CatTransforms) WithV(v bool) func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.V = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CatTransforms) WithPretty() func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CatTransforms) WithHuman() func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CatTransforms) WithErrorTrace() func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CatTransforms) WithFilterPath(v ...string) func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CatTransforms) WithHeader(h map[string]string) func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CatTransforms) WithOpaqueID(s string) func(*CatTransformsRequest) { + return func(r *CatTransformsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.delete_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.delete_auto_follow_pattern.go new file mode 100644 index 000000000..72e4dcf2d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.delete_auto_follow_pattern.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newCCRDeleteAutoFollowPatternFunc(t Transport) CCRDeleteAutoFollowPattern { + return func(name string, o ...func(*CCRDeleteAutoFollowPatternRequest)) (*Response, error) { + var r = CCRDeleteAutoFollowPatternRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CCRDeleteAutoFollowPattern - Deletes auto-follow patterns. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-delete-auto-follow-pattern.html. +type CCRDeleteAutoFollowPattern func(name string, o ...func(*CCRDeleteAutoFollowPatternRequest)) (*Response, error) + +// CCRDeleteAutoFollowPatternRequest configures the CCR Delete Auto Follow Pattern API request. +type CCRDeleteAutoFollowPatternRequest struct { + Name string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CCRDeleteAutoFollowPatternRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.delete_auto_follow_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ccr") + 1 + len("auto_follow") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("auto_follow") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.delete_auto_follow_pattern") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.delete_auto_follow_pattern") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CCRDeleteAutoFollowPattern) WithContext(v context.Context) func(*CCRDeleteAutoFollowPatternRequest) { + return func(r *CCRDeleteAutoFollowPatternRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CCRDeleteAutoFollowPattern) WithMasterTimeout(v time.Duration) func(*CCRDeleteAutoFollowPatternRequest) { + return func(r *CCRDeleteAutoFollowPatternRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CCRDeleteAutoFollowPattern) WithPretty() func(*CCRDeleteAutoFollowPatternRequest) { + return func(r *CCRDeleteAutoFollowPatternRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CCRDeleteAutoFollowPattern) WithHuman() func(*CCRDeleteAutoFollowPatternRequest) { + return func(r *CCRDeleteAutoFollowPatternRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CCRDeleteAutoFollowPattern) WithErrorTrace() func(*CCRDeleteAutoFollowPatternRequest) { + return func(r *CCRDeleteAutoFollowPatternRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CCRDeleteAutoFollowPattern) WithFilterPath(v ...string) func(*CCRDeleteAutoFollowPatternRequest) { + return func(r *CCRDeleteAutoFollowPatternRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CCRDeleteAutoFollowPattern) WithHeader(h map[string]string) func(*CCRDeleteAutoFollowPatternRequest) { + return func(r *CCRDeleteAutoFollowPatternRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CCRDeleteAutoFollowPattern) WithOpaqueID(s string) func(*CCRDeleteAutoFollowPatternRequest) { + return func(r *CCRDeleteAutoFollowPatternRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.follow.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.follow.go new file mode 100644 index 000000000..f1717bca7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.follow.go @@ -0,0 +1,261 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newCCRFollowFunc(t Transport) CCRFollow { + return func(index string, body io.Reader, o ...func(*CCRFollowRequest)) (*Response, error) { + var r = CCRFollowRequest{Index: index, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CCRFollow - Creates a new follower index configured to follow the referenced leader index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-follow.html. +type CCRFollow func(index string, body io.Reader, o ...func(*CCRFollowRequest)) (*Response, error) + +// CCRFollowRequest configures the CCR Follow API request. +type CCRFollowRequest struct { + Index string + + Body io.Reader + + MasterTimeout time.Duration + WaitForActiveShards string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CCRFollowRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.follow") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_ccr") + 1 + len("follow")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("follow") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.WaitForActiveShards != "" { + params["wait_for_active_shards"] = r.WaitForActiveShards + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.follow") + if reader := instrument.RecordRequestBody(ctx, "ccr.follow", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.follow") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CCRFollow) WithContext(v context.Context) func(*CCRFollowRequest) { + return func(r *CCRFollowRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CCRFollow) WithMasterTimeout(v time.Duration) func(*CCRFollowRequest) { + return func(r *CCRFollowRequest) { + r.MasterTimeout = v + } +} + +// WithWaitForActiveShards - sets the number of shard copies that must be active before returning. defaults to 0. set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1). +func (f CCRFollow) WithWaitForActiveShards(v string) func(*CCRFollowRequest) { + return func(r *CCRFollowRequest) { + r.WaitForActiveShards = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CCRFollow) WithPretty() func(*CCRFollowRequest) { + return func(r *CCRFollowRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CCRFollow) WithHuman() func(*CCRFollowRequest) { + return func(r *CCRFollowRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CCRFollow) WithErrorTrace() func(*CCRFollowRequest) { + return func(r *CCRFollowRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CCRFollow) WithFilterPath(v ...string) func(*CCRFollowRequest) { + return func(r *CCRFollowRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CCRFollow) WithHeader(h map[string]string) func(*CCRFollowRequest) { + return func(r *CCRFollowRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CCRFollow) WithOpaqueID(s string) func(*CCRFollowRequest) { + return func(r *CCRFollowRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.follow_info.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.follow_info.go new file mode 100644 index 000000000..e7df5f8de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.follow_info.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" + "time" +) + +func newCCRFollowInfoFunc(t Transport) CCRFollowInfo { + return func(index []string, o ...func(*CCRFollowInfoRequest)) (*Response, error) { + var r = CCRFollowInfoRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CCRFollowInfo - Retrieves information about all follower indices, including parameters and status for each follower index +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-info.html. +type CCRFollowInfo func(index []string, o ...func(*CCRFollowInfoRequest)) (*Response, error) + +// CCRFollowInfoRequest configures the CCR Follow Info API request. +type CCRFollowInfoRequest struct { + Index []string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CCRFollowInfoRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.follow_info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_ccr") + 1 + len("info")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("info") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.follow_info") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.follow_info") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CCRFollowInfo) WithContext(v context.Context) func(*CCRFollowInfoRequest) { + return func(r *CCRFollowInfoRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CCRFollowInfo) WithMasterTimeout(v time.Duration) func(*CCRFollowInfoRequest) { + return func(r *CCRFollowInfoRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CCRFollowInfo) WithPretty() func(*CCRFollowInfoRequest) { + return func(r *CCRFollowInfoRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CCRFollowInfo) WithHuman() func(*CCRFollowInfoRequest) { + return func(r *CCRFollowInfoRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CCRFollowInfo) WithErrorTrace() func(*CCRFollowInfoRequest) { + return func(r *CCRFollowInfoRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CCRFollowInfo) WithFilterPath(v ...string) func(*CCRFollowInfoRequest) { + return func(r *CCRFollowInfoRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CCRFollowInfo) WithHeader(h map[string]string) func(*CCRFollowInfoRequest) { + return func(r *CCRFollowInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CCRFollowInfo) WithOpaqueID(s string) func(*CCRFollowInfoRequest) { + return func(r *CCRFollowInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.follow_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.follow_stats.go new file mode 100644 index 000000000..c06d77bfb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.follow_stats.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" + "time" +) + +func newCCRFollowStatsFunc(t Transport) CCRFollowStats { + return func(index []string, o ...func(*CCRFollowStatsRequest)) (*Response, error) { + var r = CCRFollowStatsRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CCRFollowStats - Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-stats.html. +type CCRFollowStats func(index []string, o ...func(*CCRFollowStatsRequest)) (*Response, error) + +// CCRFollowStatsRequest configures the CCR Follow Stats API request. +type CCRFollowStatsRequest struct { + Index []string + + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CCRFollowStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.follow_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_ccr") + 1 + len("stats")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("stats") + + params = make(map[string]string) + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.follow_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.follow_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CCRFollowStats) WithContext(v context.Context) func(*CCRFollowStatsRequest) { + return func(r *CCRFollowStatsRequest) { + r.ctx = v + } +} + +// WithTimeout - explicit operation timeout. +func (f CCRFollowStats) WithTimeout(v time.Duration) func(*CCRFollowStatsRequest) { + return func(r *CCRFollowStatsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CCRFollowStats) WithPretty() func(*CCRFollowStatsRequest) { + return func(r *CCRFollowStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CCRFollowStats) WithHuman() func(*CCRFollowStatsRequest) { + return func(r *CCRFollowStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CCRFollowStats) WithErrorTrace() func(*CCRFollowStatsRequest) { + return func(r *CCRFollowStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CCRFollowStats) WithFilterPath(v ...string) func(*CCRFollowStatsRequest) { + return func(r *CCRFollowStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CCRFollowStats) WithHeader(h map[string]string) func(*CCRFollowStatsRequest) { + return func(r *CCRFollowStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CCRFollowStats) WithOpaqueID(s string) func(*CCRFollowStatsRequest) { + return func(r *CCRFollowStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.forget_follower.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.forget_follower.go new file mode 100644 index 000000000..5511c686c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.forget_follower.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newCCRForgetFollowerFunc(t Transport) CCRForgetFollower { + return func(index string, body io.Reader, o ...func(*CCRForgetFollowerRequest)) (*Response, error) { + var r = CCRForgetFollowerRequest{Index: index, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CCRForgetFollower - Removes the follower retention leases from the leader. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-forget-follower.html. +type CCRForgetFollower func(index string, body io.Reader, o ...func(*CCRForgetFollowerRequest)) (*Response, error) + +// CCRForgetFollowerRequest configures the CCR Forget Follower API request. +type CCRForgetFollowerRequest struct { + Index string + + Body io.Reader + + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CCRForgetFollowerRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.forget_follower") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_ccr") + 1 + len("forget_follower")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("forget_follower") + + params = make(map[string]string) + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.forget_follower") + if reader := instrument.RecordRequestBody(ctx, "ccr.forget_follower", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.forget_follower") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CCRForgetFollower) WithContext(v context.Context) func(*CCRForgetFollowerRequest) { + return func(r *CCRForgetFollowerRequest) { + r.ctx = v + } +} + +// WithTimeout - explicit operation timeout. +func (f CCRForgetFollower) WithTimeout(v time.Duration) func(*CCRForgetFollowerRequest) { + return func(r *CCRForgetFollowerRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CCRForgetFollower) WithPretty() func(*CCRForgetFollowerRequest) { + return func(r *CCRForgetFollowerRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CCRForgetFollower) WithHuman() func(*CCRForgetFollowerRequest) { + return func(r *CCRForgetFollowerRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CCRForgetFollower) WithErrorTrace() func(*CCRForgetFollowerRequest) { + return func(r *CCRForgetFollowerRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CCRForgetFollower) WithFilterPath(v ...string) func(*CCRForgetFollowerRequest) { + return func(r *CCRForgetFollowerRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CCRForgetFollower) WithHeader(h map[string]string) func(*CCRForgetFollowerRequest) { + return func(r *CCRForgetFollowerRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CCRForgetFollower) WithOpaqueID(s string) func(*CCRForgetFollowerRequest) { + return func(r *CCRForgetFollowerRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.get_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.get_auto_follow_pattern.go new file mode 100644 index 000000000..6645b337a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.get_auto_follow_pattern.go @@ -0,0 +1,248 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newCCRGetAutoFollowPatternFunc(t Transport) CCRGetAutoFollowPattern { + return func(o ...func(*CCRGetAutoFollowPatternRequest)) (*Response, error) { + var r = CCRGetAutoFollowPatternRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CCRGetAutoFollowPattern - Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-auto-follow-pattern.html. +type CCRGetAutoFollowPattern func(o ...func(*CCRGetAutoFollowPatternRequest)) (*Response, error) + +// CCRGetAutoFollowPatternRequest configures the CCR Get Auto Follow Pattern API request. +type CCRGetAutoFollowPatternRequest struct { + Name string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CCRGetAutoFollowPatternRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.get_auto_follow_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ccr") + 1 + len("auto_follow") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("auto_follow") + if r.Name != "" { + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.get_auto_follow_pattern") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.get_auto_follow_pattern") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CCRGetAutoFollowPattern) WithContext(v context.Context) func(*CCRGetAutoFollowPatternRequest) { + return func(r *CCRGetAutoFollowPatternRequest) { + r.ctx = v + } +} + +// WithName - the name of the auto follow pattern.. +func (f CCRGetAutoFollowPattern) WithName(v string) func(*CCRGetAutoFollowPatternRequest) { + return func(r *CCRGetAutoFollowPatternRequest) { + r.Name = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CCRGetAutoFollowPattern) WithMasterTimeout(v time.Duration) func(*CCRGetAutoFollowPatternRequest) { + return func(r *CCRGetAutoFollowPatternRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CCRGetAutoFollowPattern) WithPretty() func(*CCRGetAutoFollowPatternRequest) { + return func(r *CCRGetAutoFollowPatternRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CCRGetAutoFollowPattern) WithHuman() func(*CCRGetAutoFollowPatternRequest) { + return func(r *CCRGetAutoFollowPatternRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CCRGetAutoFollowPattern) WithErrorTrace() func(*CCRGetAutoFollowPatternRequest) { + return func(r *CCRGetAutoFollowPatternRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CCRGetAutoFollowPattern) WithFilterPath(v ...string) func(*CCRGetAutoFollowPatternRequest) { + return func(r *CCRGetAutoFollowPatternRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CCRGetAutoFollowPattern) WithHeader(h map[string]string) func(*CCRGetAutoFollowPatternRequest) { + return func(r *CCRGetAutoFollowPatternRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CCRGetAutoFollowPattern) WithOpaqueID(s string) func(*CCRGetAutoFollowPatternRequest) { + return func(r *CCRGetAutoFollowPatternRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.pause_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.pause_auto_follow_pattern.go new file mode 100644 index 000000000..7028ffb7e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.pause_auto_follow_pattern.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newCCRPauseAutoFollowPatternFunc(t Transport) CCRPauseAutoFollowPattern { + return func(name string, o ...func(*CCRPauseAutoFollowPatternRequest)) (*Response, error) { + var r = CCRPauseAutoFollowPatternRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CCRPauseAutoFollowPattern - Pauses an auto-follow pattern +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-pause-auto-follow-pattern.html. +type CCRPauseAutoFollowPattern func(name string, o ...func(*CCRPauseAutoFollowPatternRequest)) (*Response, error) + +// CCRPauseAutoFollowPatternRequest configures the CCR Pause Auto Follow Pattern API request. +type CCRPauseAutoFollowPatternRequest struct { + Name string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CCRPauseAutoFollowPatternRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.pause_auto_follow_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ccr") + 1 + len("auto_follow") + 1 + len(r.Name) + 1 + len("pause")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("auto_follow") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + path.WriteString("/") + path.WriteString("pause") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.pause_auto_follow_pattern") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.pause_auto_follow_pattern") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CCRPauseAutoFollowPattern) WithContext(v context.Context) func(*CCRPauseAutoFollowPatternRequest) { + return func(r *CCRPauseAutoFollowPatternRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CCRPauseAutoFollowPattern) WithMasterTimeout(v time.Duration) func(*CCRPauseAutoFollowPatternRequest) { + return func(r *CCRPauseAutoFollowPatternRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CCRPauseAutoFollowPattern) WithPretty() func(*CCRPauseAutoFollowPatternRequest) { + return func(r *CCRPauseAutoFollowPatternRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CCRPauseAutoFollowPattern) WithHuman() func(*CCRPauseAutoFollowPatternRequest) { + return func(r *CCRPauseAutoFollowPatternRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CCRPauseAutoFollowPattern) WithErrorTrace() func(*CCRPauseAutoFollowPatternRequest) { + return func(r *CCRPauseAutoFollowPatternRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CCRPauseAutoFollowPattern) WithFilterPath(v ...string) func(*CCRPauseAutoFollowPatternRequest) { + return func(r *CCRPauseAutoFollowPatternRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CCRPauseAutoFollowPattern) WithHeader(h map[string]string) func(*CCRPauseAutoFollowPatternRequest) { + return func(r *CCRPauseAutoFollowPatternRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CCRPauseAutoFollowPattern) WithOpaqueID(s string) func(*CCRPauseAutoFollowPatternRequest) { + return func(r *CCRPauseAutoFollowPatternRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.pause_follow.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.pause_follow.go new file mode 100644 index 000000000..23e5c469e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.pause_follow.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newCCRPauseFollowFunc(t Transport) CCRPauseFollow { + return func(index string, o ...func(*CCRPauseFollowRequest)) (*Response, error) { + var r = CCRPauseFollowRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CCRPauseFollow - Pauses a follower index. The follower index will not fetch any additional operations from the leader index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-pause-follow.html. +type CCRPauseFollow func(index string, o ...func(*CCRPauseFollowRequest)) (*Response, error) + +// CCRPauseFollowRequest configures the CCR Pause Follow API request. +type CCRPauseFollowRequest struct { + Index string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CCRPauseFollowRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.pause_follow") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_ccr") + 1 + len("pause_follow")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("pause_follow") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.pause_follow") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.pause_follow") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CCRPauseFollow) WithContext(v context.Context) func(*CCRPauseFollowRequest) { + return func(r *CCRPauseFollowRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CCRPauseFollow) WithMasterTimeout(v time.Duration) func(*CCRPauseFollowRequest) { + return func(r *CCRPauseFollowRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CCRPauseFollow) WithPretty() func(*CCRPauseFollowRequest) { + return func(r *CCRPauseFollowRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CCRPauseFollow) WithHuman() func(*CCRPauseFollowRequest) { + return func(r *CCRPauseFollowRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CCRPauseFollow) WithErrorTrace() func(*CCRPauseFollowRequest) { + return func(r *CCRPauseFollowRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CCRPauseFollow) WithFilterPath(v ...string) func(*CCRPauseFollowRequest) { + return func(r *CCRPauseFollowRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CCRPauseFollow) WithHeader(h map[string]string) func(*CCRPauseFollowRequest) { + return func(r *CCRPauseFollowRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CCRPauseFollow) WithOpaqueID(s string) func(*CCRPauseFollowRequest) { + return func(r *CCRPauseFollowRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.put_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.put_auto_follow_pattern.go new file mode 100644 index 000000000..08e145356 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.put_auto_follow_pattern.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newCCRPutAutoFollowPatternFunc(t Transport) CCRPutAutoFollowPattern { + return func(name string, body io.Reader, o ...func(*CCRPutAutoFollowPatternRequest)) (*Response, error) { + var r = CCRPutAutoFollowPatternRequest{Name: name, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CCRPutAutoFollowPattern - Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-auto-follow-pattern.html. +type CCRPutAutoFollowPattern func(name string, body io.Reader, o ...func(*CCRPutAutoFollowPatternRequest)) (*Response, error) + +// CCRPutAutoFollowPatternRequest configures the CCR Put Auto Follow Pattern API request. +type CCRPutAutoFollowPatternRequest struct { + Body io.Reader + + Name string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CCRPutAutoFollowPatternRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.put_auto_follow_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ccr") + 1 + len("auto_follow") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("auto_follow") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.put_auto_follow_pattern") + if reader := instrument.RecordRequestBody(ctx, "ccr.put_auto_follow_pattern", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.put_auto_follow_pattern") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CCRPutAutoFollowPattern) WithContext(v context.Context) func(*CCRPutAutoFollowPatternRequest) { + return func(r *CCRPutAutoFollowPatternRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CCRPutAutoFollowPattern) WithMasterTimeout(v time.Duration) func(*CCRPutAutoFollowPatternRequest) { + return func(r *CCRPutAutoFollowPatternRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CCRPutAutoFollowPattern) WithPretty() func(*CCRPutAutoFollowPatternRequest) { + return func(r *CCRPutAutoFollowPatternRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CCRPutAutoFollowPattern) WithHuman() func(*CCRPutAutoFollowPatternRequest) { + return func(r *CCRPutAutoFollowPatternRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CCRPutAutoFollowPattern) WithErrorTrace() func(*CCRPutAutoFollowPatternRequest) { + return func(r *CCRPutAutoFollowPatternRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CCRPutAutoFollowPattern) WithFilterPath(v ...string) func(*CCRPutAutoFollowPatternRequest) { + return func(r *CCRPutAutoFollowPatternRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CCRPutAutoFollowPattern) WithHeader(h map[string]string) func(*CCRPutAutoFollowPatternRequest) { + return func(r *CCRPutAutoFollowPatternRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CCRPutAutoFollowPattern) WithOpaqueID(s string) func(*CCRPutAutoFollowPatternRequest) { + return func(r *CCRPutAutoFollowPatternRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.resume_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.resume_auto_follow_pattern.go new file mode 100644 index 000000000..014264505 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.resume_auto_follow_pattern.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newCCRResumeAutoFollowPatternFunc(t Transport) CCRResumeAutoFollowPattern { + return func(name string, o ...func(*CCRResumeAutoFollowPatternRequest)) (*Response, error) { + var r = CCRResumeAutoFollowPatternRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CCRResumeAutoFollowPattern - Resumes an auto-follow pattern that has been paused +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-resume-auto-follow-pattern.html. +type CCRResumeAutoFollowPattern func(name string, o ...func(*CCRResumeAutoFollowPatternRequest)) (*Response, error) + +// CCRResumeAutoFollowPatternRequest configures the CCR Resume Auto Follow Pattern API request. +type CCRResumeAutoFollowPatternRequest struct { + Name string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CCRResumeAutoFollowPatternRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.resume_auto_follow_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ccr") + 1 + len("auto_follow") + 1 + len(r.Name) + 1 + len("resume")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("auto_follow") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + path.WriteString("/") + path.WriteString("resume") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.resume_auto_follow_pattern") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.resume_auto_follow_pattern") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CCRResumeAutoFollowPattern) WithContext(v context.Context) func(*CCRResumeAutoFollowPatternRequest) { + return func(r *CCRResumeAutoFollowPatternRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CCRResumeAutoFollowPattern) WithMasterTimeout(v time.Duration) func(*CCRResumeAutoFollowPatternRequest) { + return func(r *CCRResumeAutoFollowPatternRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CCRResumeAutoFollowPattern) WithPretty() func(*CCRResumeAutoFollowPatternRequest) { + return func(r *CCRResumeAutoFollowPatternRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CCRResumeAutoFollowPattern) WithHuman() func(*CCRResumeAutoFollowPatternRequest) { + return func(r *CCRResumeAutoFollowPatternRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CCRResumeAutoFollowPattern) WithErrorTrace() func(*CCRResumeAutoFollowPatternRequest) { + return func(r *CCRResumeAutoFollowPatternRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CCRResumeAutoFollowPattern) WithFilterPath(v ...string) func(*CCRResumeAutoFollowPatternRequest) { + return func(r *CCRResumeAutoFollowPatternRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CCRResumeAutoFollowPattern) WithHeader(h map[string]string) func(*CCRResumeAutoFollowPatternRequest) { + return func(r *CCRResumeAutoFollowPatternRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CCRResumeAutoFollowPattern) WithOpaqueID(s string) func(*CCRResumeAutoFollowPatternRequest) { + return func(r *CCRResumeAutoFollowPatternRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.resume_follow.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.resume_follow.go new file mode 100644 index 000000000..4c9a2b18c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.resume_follow.go @@ -0,0 +1,256 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newCCRResumeFollowFunc(t Transport) CCRResumeFollow { + return func(index string, o ...func(*CCRResumeFollowRequest)) (*Response, error) { + var r = CCRResumeFollowRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CCRResumeFollow - Resumes a follower index that has been paused +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-resume-follow.html. +type CCRResumeFollow func(index string, o ...func(*CCRResumeFollowRequest)) (*Response, error) + +// CCRResumeFollowRequest configures the CCR Resume Follow API request. +type CCRResumeFollowRequest struct { + Index string + + Body io.Reader + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CCRResumeFollowRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.resume_follow") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_ccr") + 1 + len("resume_follow")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("resume_follow") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.resume_follow") + if reader := instrument.RecordRequestBody(ctx, "ccr.resume_follow", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.resume_follow") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CCRResumeFollow) WithContext(v context.Context) func(*CCRResumeFollowRequest) { + return func(r *CCRResumeFollowRequest) { + r.ctx = v + } +} + +// WithBody - The name of the leader index and other optional ccr related parameters. +func (f CCRResumeFollow) WithBody(v io.Reader) func(*CCRResumeFollowRequest) { + return func(r *CCRResumeFollowRequest) { + r.Body = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CCRResumeFollow) WithMasterTimeout(v time.Duration) func(*CCRResumeFollowRequest) { + return func(r *CCRResumeFollowRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CCRResumeFollow) WithPretty() func(*CCRResumeFollowRequest) { + return func(r *CCRResumeFollowRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CCRResumeFollow) WithHuman() func(*CCRResumeFollowRequest) { + return func(r *CCRResumeFollowRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CCRResumeFollow) WithErrorTrace() func(*CCRResumeFollowRequest) { + return func(r *CCRResumeFollowRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CCRResumeFollow) WithFilterPath(v ...string) func(*CCRResumeFollowRequest) { + return func(r *CCRResumeFollowRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CCRResumeFollow) WithHeader(h map[string]string) func(*CCRResumeFollowRequest) { + return func(r *CCRResumeFollowRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CCRResumeFollow) WithOpaqueID(s string) func(*CCRResumeFollowRequest) { + return func(r *CCRResumeFollowRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.stats.go new file mode 100644 index 000000000..27dcea2f7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.stats.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newCCRStatsFunc(t Transport) CCRStats { + return func(o ...func(*CCRStatsRequest)) (*Response, error) { + var r = CCRStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CCRStats - Gets all stats related to cross-cluster replication. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-stats.html. +type CCRStats func(o ...func(*CCRStatsRequest)) (*Response, error) + +// CCRStatsRequest configures the CCR Stats API request. +type CCRStatsRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CCRStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_ccr/stats")) + path.WriteString("http://") + path.WriteString("/_ccr/stats") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CCRStats) WithContext(v context.Context) func(*CCRStatsRequest) { + return func(r *CCRStatsRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CCRStats) WithMasterTimeout(v time.Duration) func(*CCRStatsRequest) { + return func(r *CCRStatsRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f CCRStats) WithTimeout(v time.Duration) func(*CCRStatsRequest) { + return func(r *CCRStatsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CCRStats) WithPretty() func(*CCRStatsRequest) { + return func(r *CCRStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CCRStats) WithHuman() func(*CCRStatsRequest) { + return func(r *CCRStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CCRStats) WithErrorTrace() func(*CCRStatsRequest) { + return func(r *CCRStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CCRStats) WithFilterPath(v ...string) func(*CCRStatsRequest) { + return func(r *CCRStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CCRStats) WithHeader(h map[string]string) func(*CCRStatsRequest) { + return func(r *CCRStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CCRStats) WithOpaqueID(s string) func(*CCRStatsRequest) { + return func(r *CCRStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.unfollow.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.unfollow.go new file mode 100644 index 000000000..ea133d4a3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ccr.unfollow.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newCCRUnfollowFunc(t Transport) CCRUnfollow { + return func(index string, o ...func(*CCRUnfollowRequest)) (*Response, error) { + var r = CCRUnfollowRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// CCRUnfollow - Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-unfollow.html. +type CCRUnfollow func(index string, o ...func(*CCRUnfollowRequest)) (*Response, error) + +// CCRUnfollowRequest configures the CCR Unfollow API request. +type CCRUnfollowRequest struct { + Index string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r CCRUnfollowRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.unfollow") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_ccr") + 1 + len("unfollow")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("unfollow") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.unfollow") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.unfollow") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f CCRUnfollow) WithContext(v context.Context) func(*CCRUnfollowRequest) { + return func(r *CCRUnfollowRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f CCRUnfollow) WithMasterTimeout(v time.Duration) func(*CCRUnfollowRequest) { + return func(r *CCRUnfollowRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f CCRUnfollow) WithPretty() func(*CCRUnfollowRequest) { + return func(r *CCRUnfollowRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f CCRUnfollow) WithHuman() func(*CCRUnfollowRequest) { + return func(r *CCRUnfollowRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f CCRUnfollow) WithErrorTrace() func(*CCRUnfollowRequest) { + return func(r *CCRUnfollowRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f CCRUnfollow) WithFilterPath(v ...string) func(*CCRUnfollowRequest) { + return func(r *CCRUnfollowRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f CCRUnfollow) WithHeader(h map[string]string) func(*CCRUnfollowRequest) { + return func(r *CCRUnfollowRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f CCRUnfollow) WithOpaqueID(s string) func(*CCRUnfollowRequest) { + return func(r *CCRUnfollowRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.close_point_in_time.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.close_point_in_time.go new file mode 100644 index 000000000..dcb6a948c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.close_point_in_time.go @@ -0,0 +1,232 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newClosePointInTimeFunc(t Transport) ClosePointInTime { + return func(o ...func(*ClosePointInTimeRequest)) (*Response, error) { + var r = ClosePointInTimeRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClosePointInTime - Close a point in time +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html. +type ClosePointInTime func(o ...func(*ClosePointInTimeRequest)) (*Response, error) + +// ClosePointInTimeRequest configures the Close Point In Time API request. +type ClosePointInTimeRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ClosePointInTimeRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "close_point_in_time") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + len("/_pit")) + path.WriteString("http://") + path.WriteString("/_pit") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "close_point_in_time") + if reader := instrument.RecordRequestBody(ctx, "close_point_in_time", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "close_point_in_time") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClosePointInTime) WithContext(v context.Context) func(*ClosePointInTimeRequest) { + return func(r *ClosePointInTimeRequest) { + r.ctx = v + } +} + +// WithBody - a point-in-time id to close. +func (f ClosePointInTime) WithBody(v io.Reader) func(*ClosePointInTimeRequest) { + return func(r *ClosePointInTimeRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClosePointInTime) WithPretty() func(*ClosePointInTimeRequest) { + return func(r *ClosePointInTimeRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClosePointInTime) WithHuman() func(*ClosePointInTimeRequest) { + return func(r *ClosePointInTimeRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClosePointInTime) WithErrorTrace() func(*ClosePointInTimeRequest) { + return func(r *ClosePointInTimeRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClosePointInTime) WithFilterPath(v ...string) func(*ClosePointInTimeRequest) { + return func(r *ClosePointInTimeRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClosePointInTime) WithHeader(h map[string]string) func(*ClosePointInTimeRequest) { + return func(r *ClosePointInTimeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClosePointInTime) WithOpaqueID(s string) func(*ClosePointInTimeRequest) { + return func(r *ClosePointInTimeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.delete_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.delete_policy.go new file mode 100644 index 000000000..978bb7807 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.delete_policy.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newEnrichDeletePolicyFunc(t Transport) EnrichDeletePolicy { + return func(name string, o ...func(*EnrichDeletePolicyRequest)) (*Response, error) { + var r = EnrichDeletePolicyRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EnrichDeletePolicy - Deletes an existing enrich policy and its enrich index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-enrich-policy-api.html. +type EnrichDeletePolicy func(name string, o ...func(*EnrichDeletePolicyRequest)) (*Response, error) + +// EnrichDeletePolicyRequest configures the Enrich Delete Policy API request. +type EnrichDeletePolicyRequest struct { + Name string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EnrichDeletePolicyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "enrich.delete_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_enrich") + 1 + len("policy") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_enrich") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "enrich.delete_policy") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "enrich.delete_policy") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EnrichDeletePolicy) WithContext(v context.Context) func(*EnrichDeletePolicyRequest) { + return func(r *EnrichDeletePolicyRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for processing on master node. +func (f EnrichDeletePolicy) WithMasterTimeout(v time.Duration) func(*EnrichDeletePolicyRequest) { + return func(r *EnrichDeletePolicyRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EnrichDeletePolicy) WithPretty() func(*EnrichDeletePolicyRequest) { + return func(r *EnrichDeletePolicyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EnrichDeletePolicy) WithHuman() func(*EnrichDeletePolicyRequest) { + return func(r *EnrichDeletePolicyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EnrichDeletePolicy) WithErrorTrace() func(*EnrichDeletePolicyRequest) { + return func(r *EnrichDeletePolicyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EnrichDeletePolicy) WithFilterPath(v ...string) func(*EnrichDeletePolicyRequest) { + return func(r *EnrichDeletePolicyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EnrichDeletePolicy) WithHeader(h map[string]string) func(*EnrichDeletePolicyRequest) { + return func(r *EnrichDeletePolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EnrichDeletePolicy) WithOpaqueID(s string) func(*EnrichDeletePolicyRequest) { + return func(r *EnrichDeletePolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.execute_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.execute_policy.go new file mode 100644 index 000000000..185237a55 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.execute_policy.go @@ -0,0 +1,254 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newEnrichExecutePolicyFunc(t Transport) EnrichExecutePolicy { + return func(name string, o ...func(*EnrichExecutePolicyRequest)) (*Response, error) { + var r = EnrichExecutePolicyRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EnrichExecutePolicy - Creates the enrich index for an existing enrich policy. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/execute-enrich-policy-api.html. +type EnrichExecutePolicy func(name string, o ...func(*EnrichExecutePolicyRequest)) (*Response, error) + +// EnrichExecutePolicyRequest configures the Enrich Execute Policy API request. +type EnrichExecutePolicyRequest struct { + Name string + + MasterTimeout time.Duration + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EnrichExecutePolicyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "enrich.execute_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_enrich") + 1 + len("policy") + 1 + len(r.Name) + 1 + len("_execute")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_enrich") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + path.WriteString("/") + path.WriteString("_execute") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "enrich.execute_policy") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "enrich.execute_policy") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EnrichExecutePolicy) WithContext(v context.Context) func(*EnrichExecutePolicyRequest) { + return func(r *EnrichExecutePolicyRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for processing on master node. +func (f EnrichExecutePolicy) WithMasterTimeout(v time.Duration) func(*EnrichExecutePolicyRequest) { + return func(r *EnrichExecutePolicyRequest) { + r.MasterTimeout = v + } +} + +// WithWaitForCompletion - should the request should block until the execution is complete.. +func (f EnrichExecutePolicy) WithWaitForCompletion(v bool) func(*EnrichExecutePolicyRequest) { + return func(r *EnrichExecutePolicyRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EnrichExecutePolicy) WithPretty() func(*EnrichExecutePolicyRequest) { + return func(r *EnrichExecutePolicyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EnrichExecutePolicy) WithHuman() func(*EnrichExecutePolicyRequest) { + return func(r *EnrichExecutePolicyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EnrichExecutePolicy) WithErrorTrace() func(*EnrichExecutePolicyRequest) { + return func(r *EnrichExecutePolicyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EnrichExecutePolicy) WithFilterPath(v ...string) func(*EnrichExecutePolicyRequest) { + return func(r *EnrichExecutePolicyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EnrichExecutePolicy) WithHeader(h map[string]string) func(*EnrichExecutePolicyRequest) { + return func(r *EnrichExecutePolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EnrichExecutePolicy) WithOpaqueID(s string) func(*EnrichExecutePolicyRequest) { + return func(r *EnrichExecutePolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.get_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.get_policy.go new file mode 100644 index 000000000..afbf3820a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.get_policy.go @@ -0,0 +1,248 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newEnrichGetPolicyFunc(t Transport) EnrichGetPolicy { + return func(o ...func(*EnrichGetPolicyRequest)) (*Response, error) { + var r = EnrichGetPolicyRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EnrichGetPolicy - Gets information about an enrich policy. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/get-enrich-policy-api.html. +type EnrichGetPolicy func(o ...func(*EnrichGetPolicyRequest)) (*Response, error) + +// EnrichGetPolicyRequest configures the Enrich Get Policy API request. +type EnrichGetPolicyRequest struct { + Name []string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EnrichGetPolicyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "enrich.get_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_enrich") + 1 + len("policy") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_enrich") + path.WriteString("/") + path.WriteString("policy") + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "enrich.get_policy") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "enrich.get_policy") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EnrichGetPolicy) WithContext(v context.Context) func(*EnrichGetPolicyRequest) { + return func(r *EnrichGetPolicyRequest) { + r.ctx = v + } +} + +// WithName - a list of enrich policy names. +func (f EnrichGetPolicy) WithName(v ...string) func(*EnrichGetPolicyRequest) { + return func(r *EnrichGetPolicyRequest) { + r.Name = v + } +} + +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f EnrichGetPolicy) WithMasterTimeout(v time.Duration) func(*EnrichGetPolicyRequest) { + return func(r *EnrichGetPolicyRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EnrichGetPolicy) WithPretty() func(*EnrichGetPolicyRequest) { + return func(r *EnrichGetPolicyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EnrichGetPolicy) WithHuman() func(*EnrichGetPolicyRequest) { + return func(r *EnrichGetPolicyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EnrichGetPolicy) WithErrorTrace() func(*EnrichGetPolicyRequest) { + return func(r *EnrichGetPolicyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EnrichGetPolicy) WithFilterPath(v ...string) func(*EnrichGetPolicyRequest) { + return func(r *EnrichGetPolicyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EnrichGetPolicy) WithHeader(h map[string]string) func(*EnrichGetPolicyRequest) { + return func(r *EnrichGetPolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EnrichGetPolicy) WithOpaqueID(s string) func(*EnrichGetPolicyRequest) { + return func(r *EnrichGetPolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.put_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.put_policy.go new file mode 100644 index 000000000..52ca1d7d3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.put_policy.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newEnrichPutPolicyFunc(t Transport) EnrichPutPolicy { + return func(name string, body io.Reader, o ...func(*EnrichPutPolicyRequest)) (*Response, error) { + var r = EnrichPutPolicyRequest{Name: name, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EnrichPutPolicy - Creates a new enrich policy. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/put-enrich-policy-api.html. +type EnrichPutPolicy func(name string, body io.Reader, o ...func(*EnrichPutPolicyRequest)) (*Response, error) + +// EnrichPutPolicyRequest configures the Enrich Put Policy API request. +type EnrichPutPolicyRequest struct { + Body io.Reader + + Name string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EnrichPutPolicyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "enrich.put_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_enrich") + 1 + len("policy") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_enrich") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "enrich.put_policy") + if reader := instrument.RecordRequestBody(ctx, "enrich.put_policy", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "enrich.put_policy") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EnrichPutPolicy) WithContext(v context.Context) func(*EnrichPutPolicyRequest) { + return func(r *EnrichPutPolicyRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for processing on master node. +func (f EnrichPutPolicy) WithMasterTimeout(v time.Duration) func(*EnrichPutPolicyRequest) { + return func(r *EnrichPutPolicyRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EnrichPutPolicy) WithPretty() func(*EnrichPutPolicyRequest) { + return func(r *EnrichPutPolicyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EnrichPutPolicy) WithHuman() func(*EnrichPutPolicyRequest) { + return func(r *EnrichPutPolicyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EnrichPutPolicy) WithErrorTrace() func(*EnrichPutPolicyRequest) { + return func(r *EnrichPutPolicyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EnrichPutPolicy) WithFilterPath(v ...string) func(*EnrichPutPolicyRequest) { + return func(r *EnrichPutPolicyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EnrichPutPolicy) WithHeader(h map[string]string) func(*EnrichPutPolicyRequest) { + return func(r *EnrichPutPolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EnrichPutPolicy) WithOpaqueID(s string) func(*EnrichPutPolicyRequest) { + return func(r *EnrichPutPolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.stats.go new file mode 100644 index 000000000..950dbc6d2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.enrich.stats.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newEnrichStatsFunc(t Transport) EnrichStats { + return func(o ...func(*EnrichStatsRequest)) (*Response, error) { + var r = EnrichStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EnrichStats - Gets enrich coordinator statistics and information about enrich policies that are currently executing. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-stats-api.html. +type EnrichStats func(o ...func(*EnrichStatsRequest)) (*Response, error) + +// EnrichStatsRequest configures the Enrich Stats API request. +type EnrichStatsRequest struct { + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EnrichStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "enrich.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_enrich/_stats")) + path.WriteString("http://") + path.WriteString("/_enrich/_stats") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "enrich.stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "enrich.stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EnrichStats) WithContext(v context.Context) func(*EnrichStatsRequest) { + return func(r *EnrichStatsRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for waiting for new cluster state in case it is blocked. +func (f EnrichStats) WithMasterTimeout(v time.Duration) func(*EnrichStatsRequest) { + return func(r *EnrichStatsRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EnrichStats) WithPretty() func(*EnrichStatsRequest) { + return func(r *EnrichStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EnrichStats) WithHuman() func(*EnrichStatsRequest) { + return func(r *EnrichStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EnrichStats) WithErrorTrace() func(*EnrichStatsRequest) { + return func(r *EnrichStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EnrichStats) WithFilterPath(v ...string) func(*EnrichStatsRequest) { + return func(r *EnrichStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EnrichStats) WithHeader(h map[string]string) func(*EnrichStatsRequest) { + return func(r *EnrichStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EnrichStats) WithOpaqueID(s string) func(*EnrichStatsRequest) { + return func(r *EnrichStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.eql.delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.eql.delete.go new file mode 100644 index 000000000..8c815611e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.eql.delete.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newEqlDeleteFunc(t Transport) EqlDelete { + return func(id string, o ...func(*EqlDeleteRequest)) (*Response, error) { + var r = EqlDeleteRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EqlDelete - Deletes an async EQL search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html. +type EqlDelete func(id string, o ...func(*EqlDeleteRequest)) (*Response, error) + +// EqlDeleteRequest configures the Eql Delete API request. +type EqlDeleteRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EqlDeleteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "eql.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_eql") + 1 + len("search") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_eql") + path.WriteString("/") + path.WriteString("search") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "eql.delete") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "eql.delete") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EqlDelete) WithContext(v context.Context) func(*EqlDeleteRequest) { + return func(r *EqlDeleteRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EqlDelete) WithPretty() func(*EqlDeleteRequest) { + return func(r *EqlDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EqlDelete) WithHuman() func(*EqlDeleteRequest) { + return func(r *EqlDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EqlDelete) WithErrorTrace() func(*EqlDeleteRequest) { + return func(r *EqlDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EqlDelete) WithFilterPath(v ...string) func(*EqlDeleteRequest) { + return func(r *EqlDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EqlDelete) WithHeader(h map[string]string) func(*EqlDeleteRequest) { + return func(r *EqlDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EqlDelete) WithOpaqueID(s string) func(*EqlDeleteRequest) { + return func(r *EqlDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.eql.get.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.eql.get.go new file mode 100644 index 000000000..1f2c854a4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.eql.get.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newEqlGetFunc(t Transport) EqlGet { + return func(id string, o ...func(*EqlGetRequest)) (*Response, error) { + var r = EqlGetRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EqlGet - Returns async results from previously executed Event Query Language (EQL) search +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html. +type EqlGet func(id string, o ...func(*EqlGetRequest)) (*Response, error) + +// EqlGetRequest configures the Eql Get API request. +type EqlGetRequest struct { + DocumentID string + + KeepAlive time.Duration + WaitForCompletionTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EqlGetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "eql.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_eql") + 1 + len("search") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_eql") + path.WriteString("/") + path.WriteString("search") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.KeepAlive != 0 { + params["keep_alive"] = formatDuration(r.KeepAlive) + } + + if r.WaitForCompletionTimeout != 0 { + params["wait_for_completion_timeout"] = formatDuration(r.WaitForCompletionTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "eql.get") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "eql.get") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EqlGet) WithContext(v context.Context) func(*EqlGetRequest) { + return func(r *EqlGetRequest) { + r.ctx = v + } +} + +// WithKeepAlive - update the time interval in which the results (partial or final) for this search will be available. +func (f EqlGet) WithKeepAlive(v time.Duration) func(*EqlGetRequest) { + return func(r *EqlGetRequest) { + r.KeepAlive = v + } +} + +// WithWaitForCompletionTimeout - specify the time that the request should block waiting for the final response. +func (f EqlGet) WithWaitForCompletionTimeout(v time.Duration) func(*EqlGetRequest) { + return func(r *EqlGetRequest) { + r.WaitForCompletionTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EqlGet) WithPretty() func(*EqlGetRequest) { + return func(r *EqlGetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EqlGet) WithHuman() func(*EqlGetRequest) { + return func(r *EqlGetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EqlGet) WithErrorTrace() func(*EqlGetRequest) { + return func(r *EqlGetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EqlGet) WithFilterPath(v ...string) func(*EqlGetRequest) { + return func(r *EqlGetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EqlGet) WithHeader(h map[string]string) func(*EqlGetRequest) { + return func(r *EqlGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EqlGet) WithOpaqueID(s string) func(*EqlGetRequest) { + return func(r *EqlGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.eql.get_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.eql.get_status.go new file mode 100644 index 000000000..303d982bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.eql.get_status.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newEqlGetStatusFunc(t Transport) EqlGetStatus { + return func(id string, o ...func(*EqlGetStatusRequest)) (*Response, error) { + var r = EqlGetStatusRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EqlGetStatus - Returns the status of a previously submitted async or stored Event Query Language (EQL) search +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html. +type EqlGetStatus func(id string, o ...func(*EqlGetStatusRequest)) (*Response, error) + +// EqlGetStatusRequest configures the Eql Get Status API request. +type EqlGetStatusRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EqlGetStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "eql.get_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_eql") + 1 + len("search") + 1 + len("status") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_eql") + path.WriteString("/") + path.WriteString("search") + path.WriteString("/") + path.WriteString("status") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "eql.get_status") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "eql.get_status") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EqlGetStatus) WithContext(v context.Context) func(*EqlGetStatusRequest) { + return func(r *EqlGetStatusRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EqlGetStatus) WithPretty() func(*EqlGetStatusRequest) { + return func(r *EqlGetStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EqlGetStatus) WithHuman() func(*EqlGetStatusRequest) { + return func(r *EqlGetStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EqlGetStatus) WithErrorTrace() func(*EqlGetStatusRequest) { + return func(r *EqlGetStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EqlGetStatus) WithFilterPath(v ...string) func(*EqlGetStatusRequest) { + return func(r *EqlGetStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EqlGetStatus) WithHeader(h map[string]string) func(*EqlGetStatusRequest) { + return func(r *EqlGetStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EqlGetStatus) WithOpaqueID(s string) func(*EqlGetStatusRequest) { + return func(r *EqlGetStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.eql.search.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.eql.search.go new file mode 100644 index 000000000..c467e17d8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.eql.search.go @@ -0,0 +1,346 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newEqlSearchFunc(t Transport) EqlSearch { + return func(index string, body io.Reader, o ...func(*EqlSearchRequest)) (*Response, error) { + var r = EqlSearchRequest{Index: index, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EqlSearch - Returns results matching a query expressed in Event Query Language (EQL) +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html. +type EqlSearch func(index string, body io.Reader, o ...func(*EqlSearchRequest)) (*Response, error) + +// EqlSearchRequest configures the Eql Search API request. +type EqlSearchRequest struct { + Index string + + Body io.Reader + + AllowNoIndices *bool + AllowPartialSearchResults *bool + AllowPartialSequenceResults *bool + CcsMinimizeRoundtrips *bool + ExpandWildcards string + IgnoreUnavailable *bool + KeepAlive time.Duration + KeepOnCompletion *bool + WaitForCompletionTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EqlSearchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "eql.search") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_eql") + 1 + len("search")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_eql") + path.WriteString("/") + path.WriteString("search") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.AllowPartialSearchResults != nil { + params["allow_partial_search_results"] = strconv.FormatBool(*r.AllowPartialSearchResults) + } + + if r.AllowPartialSequenceResults != nil { + params["allow_partial_sequence_results"] = strconv.FormatBool(*r.AllowPartialSequenceResults) + } + + if r.CcsMinimizeRoundtrips != nil { + params["ccs_minimize_roundtrips"] = strconv.FormatBool(*r.CcsMinimizeRoundtrips) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.KeepAlive != 0 { + params["keep_alive"] = formatDuration(r.KeepAlive) + } + + if r.KeepOnCompletion != nil { + params["keep_on_completion"] = strconv.FormatBool(*r.KeepOnCompletion) + } + + if r.WaitForCompletionTimeout != 0 { + params["wait_for_completion_timeout"] = formatDuration(r.WaitForCompletionTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "eql.search") + if reader := instrument.RecordRequestBody(ctx, "eql.search", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "eql.search") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EqlSearch) WithContext(v context.Context) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f EqlSearch) WithAllowNoIndices(v bool) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.AllowNoIndices = &v + } +} + +// WithAllowPartialSearchResults - control whether the query should keep running in case of shard failures, and return partial results. +func (f EqlSearch) WithAllowPartialSearchResults(v bool) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.AllowPartialSearchResults = &v + } +} + +// WithAllowPartialSequenceResults - control whether a sequence query should return partial results or no results at all in case of shard failures. this option has effect only if [allow_partial_search_results] is true.. +func (f EqlSearch) WithAllowPartialSequenceResults(v bool) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.AllowPartialSequenceResults = &v + } +} + +// WithCcsMinimizeRoundtrips - indicates whether network round-trips should be minimized as part of cross-cluster search requests execution. +func (f EqlSearch) WithCcsMinimizeRoundtrips(v bool) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.CcsMinimizeRoundtrips = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f EqlSearch) WithExpandWildcards(v string) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f EqlSearch) WithIgnoreUnavailable(v bool) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithKeepAlive - update the time interval in which the results (partial or final) for this search will be available. +func (f EqlSearch) WithKeepAlive(v time.Duration) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.KeepAlive = v + } +} + +// WithKeepOnCompletion - control whether the response should be stored in the cluster if it completed within the provided [wait_for_completion] time (default: false). +func (f EqlSearch) WithKeepOnCompletion(v bool) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.KeepOnCompletion = &v + } +} + +// WithWaitForCompletionTimeout - specify the time that the request should block waiting for the final response. +func (f EqlSearch) WithWaitForCompletionTimeout(v time.Duration) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.WaitForCompletionTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EqlSearch) WithPretty() func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EqlSearch) WithHuman() func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EqlSearch) WithErrorTrace() func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EqlSearch) WithFilterPath(v ...string) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EqlSearch) WithHeader(h map[string]string) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EqlSearch) WithOpaqueID(s string) func(*EqlSearchRequest) { + return func(r *EqlSearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.async_query.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.async_query.go new file mode 100644 index 000000000..57787a0ff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.async_query.go @@ -0,0 +1,275 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newEsqlAsyncQueryFunc(t Transport) EsqlAsyncQuery { + return func(body io.Reader, o ...func(*EsqlAsyncQueryRequest)) (*Response, error) { + var r = EsqlAsyncQueryRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EsqlAsyncQuery - Executes an ESQL request asynchronously +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-api.html. +type EsqlAsyncQuery func(body io.Reader, o ...func(*EsqlAsyncQueryRequest)) (*Response, error) + +// EsqlAsyncQueryRequest configures the Esql Async Query API request. +type EsqlAsyncQueryRequest struct { + Body io.Reader + + AllowPartialResults *bool + Delimiter string + DropNullColumns *bool + Format string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EsqlAsyncQueryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_query/async")) + path.WriteString("http://") + path.WriteString("/_query/async") + + params = make(map[string]string) + + if r.AllowPartialResults != nil { + params["allow_partial_results"] = strconv.FormatBool(*r.AllowPartialResults) + } + + if r.Delimiter != "" { + params["delimiter"] = r.Delimiter + } + + if r.DropNullColumns != nil { + params["drop_null_columns"] = strconv.FormatBool(*r.DropNullColumns) + } + + if r.Format != "" { + params["format"] = r.Format + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query") + if reader := instrument.RecordRequestBody(ctx, "esql.async_query", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EsqlAsyncQuery) WithContext(v context.Context) func(*EsqlAsyncQueryRequest) { + return func(r *EsqlAsyncQueryRequest) { + r.ctx = v + } +} + +// WithAllowPartialResults - if `true`, partial results will be returned if there are shard failures, butthe query can continue to execute on other clusters and shards.if `false`, the entire query will fail if there areany failures.. +func (f EsqlAsyncQuery) WithAllowPartialResults(v bool) func(*EsqlAsyncQueryRequest) { + return func(r *EsqlAsyncQueryRequest) { + r.AllowPartialResults = &v + } +} + +// WithDelimiter - the character to use between values within a csv row. only valid for the csv format.. +func (f EsqlAsyncQuery) WithDelimiter(v string) func(*EsqlAsyncQueryRequest) { + return func(r *EsqlAsyncQueryRequest) { + r.Delimiter = v + } +} + +// WithDropNullColumns - should entirely null columns be removed from the results? their name and type will be returning in a new `all_columns` section.. +func (f EsqlAsyncQuery) WithDropNullColumns(v bool) func(*EsqlAsyncQueryRequest) { + return func(r *EsqlAsyncQueryRequest) { + r.DropNullColumns = &v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f EsqlAsyncQuery) WithFormat(v string) func(*EsqlAsyncQueryRequest) { + return func(r *EsqlAsyncQueryRequest) { + r.Format = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EsqlAsyncQuery) WithPretty() func(*EsqlAsyncQueryRequest) { + return func(r *EsqlAsyncQueryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EsqlAsyncQuery) WithHuman() func(*EsqlAsyncQueryRequest) { + return func(r *EsqlAsyncQueryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EsqlAsyncQuery) WithErrorTrace() func(*EsqlAsyncQueryRequest) { + return func(r *EsqlAsyncQueryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EsqlAsyncQuery) WithFilterPath(v ...string) func(*EsqlAsyncQueryRequest) { + return func(r *EsqlAsyncQueryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EsqlAsyncQuery) WithHeader(h map[string]string) func(*EsqlAsyncQueryRequest) { + return func(r *EsqlAsyncQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EsqlAsyncQuery) WithOpaqueID(s string) func(*EsqlAsyncQueryRequest) { + return func(r *EsqlAsyncQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.async_query_delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.async_query_delete.go new file mode 100644 index 000000000..19c77c978 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.async_query_delete.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newEsqlAsyncQueryDeleteFunc(t Transport) EsqlAsyncQueryDelete { + return func(id string, o ...func(*EsqlAsyncQueryDeleteRequest)) (*Response, error) { + var r = EsqlAsyncQueryDeleteRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EsqlAsyncQueryDelete - Delete an async query request given its ID. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-delete-api.html. +type EsqlAsyncQueryDelete func(id string, o ...func(*EsqlAsyncQueryDeleteRequest)) (*Response, error) + +// EsqlAsyncQueryDeleteRequest configures the Esql Async Query Delete API request. +type EsqlAsyncQueryDeleteRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EsqlAsyncQueryDeleteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_query") + 1 + len("async") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_delete") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_delete") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EsqlAsyncQueryDelete) WithContext(v context.Context) func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EsqlAsyncQueryDelete) WithPretty() func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EsqlAsyncQueryDelete) WithHuman() func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EsqlAsyncQueryDelete) WithErrorTrace() func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EsqlAsyncQueryDelete) WithFilterPath(v ...string) func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EsqlAsyncQueryDelete) WithHeader(h map[string]string) func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EsqlAsyncQueryDelete) WithOpaqueID(s string) func(*EsqlAsyncQueryDeleteRequest) { + return func(r *EsqlAsyncQueryDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.async_query_get.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.async_query_get.go new file mode 100644 index 000000000..69c2ec8cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.async_query_get.go @@ -0,0 +1,276 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newEsqlAsyncQueryGetFunc(t Transport) EsqlAsyncQueryGet { + return func(id string, o ...func(*EsqlAsyncQueryGetRequest)) (*Response, error) { + var r = EsqlAsyncQueryGetRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EsqlAsyncQueryGet - Retrieves the results of a previously submitted async query request given its ID. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-get-api.html. +type EsqlAsyncQueryGet func(id string, o ...func(*EsqlAsyncQueryGetRequest)) (*Response, error) + +// EsqlAsyncQueryGetRequest configures the Esql Async Query Get API request. +type EsqlAsyncQueryGetRequest struct { + DocumentID string + + DropNullColumns *bool + Format string + KeepAlive time.Duration + WaitForCompletionTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EsqlAsyncQueryGetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_query") + 1 + len("async") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.DropNullColumns != nil { + params["drop_null_columns"] = strconv.FormatBool(*r.DropNullColumns) + } + + if r.Format != "" { + params["format"] = r.Format + } + + if r.KeepAlive != 0 { + params["keep_alive"] = formatDuration(r.KeepAlive) + } + + if r.WaitForCompletionTimeout != 0 { + params["wait_for_completion_timeout"] = formatDuration(r.WaitForCompletionTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_get") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_get") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EsqlAsyncQueryGet) WithContext(v context.Context) func(*EsqlAsyncQueryGetRequest) { + return func(r *EsqlAsyncQueryGetRequest) { + r.ctx = v + } +} + +// WithDropNullColumns - should entirely null columns be removed from the results? their name and type will be returning in a new `all_columns` section.. +func (f EsqlAsyncQueryGet) WithDropNullColumns(v bool) func(*EsqlAsyncQueryGetRequest) { + return func(r *EsqlAsyncQueryGetRequest) { + r.DropNullColumns = &v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f EsqlAsyncQueryGet) WithFormat(v string) func(*EsqlAsyncQueryGetRequest) { + return func(r *EsqlAsyncQueryGetRequest) { + r.Format = v + } +} + +// WithKeepAlive - specify the time interval in which the results (partial or final) for this search will be available. +func (f EsqlAsyncQueryGet) WithKeepAlive(v time.Duration) func(*EsqlAsyncQueryGetRequest) { + return func(r *EsqlAsyncQueryGetRequest) { + r.KeepAlive = v + } +} + +// WithWaitForCompletionTimeout - specify the time that the request should block waiting for the final response. +func (f EsqlAsyncQueryGet) WithWaitForCompletionTimeout(v time.Duration) func(*EsqlAsyncQueryGetRequest) { + return func(r *EsqlAsyncQueryGetRequest) { + r.WaitForCompletionTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EsqlAsyncQueryGet) WithPretty() func(*EsqlAsyncQueryGetRequest) { + return func(r *EsqlAsyncQueryGetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EsqlAsyncQueryGet) WithHuman() func(*EsqlAsyncQueryGetRequest) { + return func(r *EsqlAsyncQueryGetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EsqlAsyncQueryGet) WithErrorTrace() func(*EsqlAsyncQueryGetRequest) { + return func(r *EsqlAsyncQueryGetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EsqlAsyncQueryGet) WithFilterPath(v ...string) func(*EsqlAsyncQueryGetRequest) { + return func(r *EsqlAsyncQueryGetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EsqlAsyncQueryGet) WithHeader(h map[string]string) func(*EsqlAsyncQueryGetRequest) { + return func(r *EsqlAsyncQueryGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EsqlAsyncQueryGet) WithOpaqueID(s string) func(*EsqlAsyncQueryGetRequest) { + return func(r *EsqlAsyncQueryGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.async_query_stop.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.async_query_stop.go new file mode 100644 index 000000000..2482148b9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.async_query_stop.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newEsqlAsyncQueryStopFunc(t Transport) EsqlAsyncQueryStop { + return func(id string, o ...func(*EsqlAsyncQueryStopRequest)) (*Response, error) { + var r = EsqlAsyncQueryStopRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EsqlAsyncQueryStop - Stops a previously submitted async query request given its ID and collects the results. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-stop-api.html. +type EsqlAsyncQueryStop func(id string, o ...func(*EsqlAsyncQueryStopRequest)) (*Response, error) + +// EsqlAsyncQueryStopRequest configures the Esql Async Query Stop API request. +type EsqlAsyncQueryStopRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EsqlAsyncQueryStopRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_query") + 1 + len("async") + 1 + len(r.DocumentID) + 1 + len("stop")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + path.WriteString("/") + path.WriteString("stop") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_stop") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_stop") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EsqlAsyncQueryStop) WithContext(v context.Context) func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EsqlAsyncQueryStop) WithPretty() func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EsqlAsyncQueryStop) WithHuman() func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EsqlAsyncQueryStop) WithErrorTrace() func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EsqlAsyncQueryStop) WithFilterPath(v ...string) func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EsqlAsyncQueryStop) WithHeader(h map[string]string) func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EsqlAsyncQueryStop) WithOpaqueID(s string) func(*EsqlAsyncQueryStopRequest) { + return func(r *EsqlAsyncQueryStopRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.get_query.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.get_query.go new file mode 100644 index 000000000..4616e78a1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.get_query.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newEsqlGetQueryFunc(t Transport) EsqlGetQuery { + return func(id string, o ...func(*EsqlGetQueryRequest)) (*Response, error) { + var r = EsqlGetQueryRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EsqlGetQuery - Executes a get ESQL query request +// +// This API is experimental. +type EsqlGetQuery func(id string, o ...func(*EsqlGetQueryRequest)) (*Response, error) + +// EsqlGetQueryRequest configures the Esql Get Query API request. +type EsqlGetQueryRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EsqlGetQueryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.get_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_query") + 1 + len("queries") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("queries") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "esql.get_query") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.get_query") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EsqlGetQuery) WithContext(v context.Context) func(*EsqlGetQueryRequest) { + return func(r *EsqlGetQueryRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EsqlGetQuery) WithPretty() func(*EsqlGetQueryRequest) { + return func(r *EsqlGetQueryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EsqlGetQuery) WithHuman() func(*EsqlGetQueryRequest) { + return func(r *EsqlGetQueryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EsqlGetQuery) WithErrorTrace() func(*EsqlGetQueryRequest) { + return func(r *EsqlGetQueryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EsqlGetQuery) WithFilterPath(v ...string) func(*EsqlGetQueryRequest) { + return func(r *EsqlGetQueryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EsqlGetQuery) WithHeader(h map[string]string) func(*EsqlGetQueryRequest) { + return func(r *EsqlGetQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EsqlGetQuery) WithOpaqueID(s string) func(*EsqlGetQueryRequest) { + return func(r *EsqlGetQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.list_queries.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.list_queries.go new file mode 100644 index 000000000..2b94eb0c0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.list_queries.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newEsqlListQueriesFunc(t Transport) EsqlListQueries { + return func(o ...func(*EsqlListQueriesRequest)) (*Response, error) { + var r = EsqlListQueriesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EsqlListQueries - Executes a list ESQL queries request +// +// This API is experimental. +type EsqlListQueries func(o ...func(*EsqlListQueriesRequest)) (*Response, error) + +// EsqlListQueriesRequest configures the Esql List Queries API request. +type EsqlListQueriesRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EsqlListQueriesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.list_queries") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_query/queries")) + path.WriteString("http://") + path.WriteString("/_query/queries") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "esql.list_queries") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.list_queries") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EsqlListQueries) WithContext(v context.Context) func(*EsqlListQueriesRequest) { + return func(r *EsqlListQueriesRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EsqlListQueries) WithPretty() func(*EsqlListQueriesRequest) { + return func(r *EsqlListQueriesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EsqlListQueries) WithHuman() func(*EsqlListQueriesRequest) { + return func(r *EsqlListQueriesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EsqlListQueries) WithErrorTrace() func(*EsqlListQueriesRequest) { + return func(r *EsqlListQueriesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EsqlListQueries) WithFilterPath(v ...string) func(*EsqlListQueriesRequest) { + return func(r *EsqlListQueriesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EsqlListQueries) WithHeader(h map[string]string) func(*EsqlListQueriesRequest) { + return func(r *EsqlListQueriesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EsqlListQueries) WithOpaqueID(s string) func(*EsqlListQueriesRequest) { + return func(r *EsqlListQueriesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.query.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.query.go new file mode 100644 index 000000000..e0b1869bc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.esql.query.go @@ -0,0 +1,275 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newEsqlQueryFunc(t Transport) EsqlQuery { + return func(body io.Reader, o ...func(*EsqlQueryRequest)) (*Response, error) { + var r = EsqlQueryRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EsqlQuery - Executes an ESQL request +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-query-api.html. +type EsqlQuery func(body io.Reader, o ...func(*EsqlQueryRequest)) (*Response, error) + +// EsqlQueryRequest configures the Esql Query API request. +type EsqlQueryRequest struct { + Body io.Reader + + AllowPartialResults *bool + Delimiter string + DropNullColumns *bool + Format string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r EsqlQueryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_query")) + path.WriteString("http://") + path.WriteString("/_query") + + params = make(map[string]string) + + if r.AllowPartialResults != nil { + params["allow_partial_results"] = strconv.FormatBool(*r.AllowPartialResults) + } + + if r.Delimiter != "" { + params["delimiter"] = r.Delimiter + } + + if r.DropNullColumns != nil { + params["drop_null_columns"] = strconv.FormatBool(*r.DropNullColumns) + } + + if r.Format != "" { + params["format"] = r.Format + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "esql.query") + if reader := instrument.RecordRequestBody(ctx, "esql.query", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.query") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EsqlQuery) WithContext(v context.Context) func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.ctx = v + } +} + +// WithAllowPartialResults - if `true`, partial results will be returned if there are shard failures, butthe query can continue to execute on other clusters and shards.if `false`, the entire query will fail if there areany failures.. +func (f EsqlQuery) WithAllowPartialResults(v bool) func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.AllowPartialResults = &v + } +} + +// WithDelimiter - the character to use between values within a csv row. only valid for the csv format.. +func (f EsqlQuery) WithDelimiter(v string) func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.Delimiter = v + } +} + +// WithDropNullColumns - should entirely null columns be removed from the results? their name and type will be returning in a new `all_columns` section.. +func (f EsqlQuery) WithDropNullColumns(v bool) func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.DropNullColumns = &v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f EsqlQuery) WithFormat(v string) func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.Format = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EsqlQuery) WithPretty() func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EsqlQuery) WithHuman() func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EsqlQuery) WithErrorTrace() func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EsqlQuery) WithFilterPath(v ...string) func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EsqlQuery) WithHeader(h map[string]string) func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EsqlQuery) WithOpaqueID(s string) func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.graph.explore.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.graph.explore.go new file mode 100644 index 000000000..36b5b2cd1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.graph.explore.go @@ -0,0 +1,273 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "io" + "net/http" + "strings" + "time" +) + +func newGraphExploreFunc(t Transport) GraphExplore { + return func(index []string, o ...func(*GraphExploreRequest)) (*Response, error) { + var r = GraphExploreRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// GraphExplore - Explore extracted and summarized information about the documents and terms in an index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html. +type GraphExplore func(index []string, o ...func(*GraphExploreRequest)) (*Response, error) + +// GraphExploreRequest configures the Graph Explore API request. +type GraphExploreRequest struct { + Index []string + + Body io.Reader + + Routing string + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r GraphExploreRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "graph.explore") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_graph") + 1 + len("explore")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_graph") + path.WriteString("/") + path.WriteString("explore") + + params = make(map[string]string) + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "graph.explore") + if reader := instrument.RecordRequestBody(ctx, "graph.explore", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "graph.explore") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f GraphExplore) WithContext(v context.Context) func(*GraphExploreRequest) { + return func(r *GraphExploreRequest) { + r.ctx = v + } +} + +// WithBody - Graph Query DSL. +func (f GraphExplore) WithBody(v io.Reader) func(*GraphExploreRequest) { + return func(r *GraphExploreRequest) { + r.Body = v + } +} + +// WithRouting - specific routing value. +func (f GraphExplore) WithRouting(v string) func(*GraphExploreRequest) { + return func(r *GraphExploreRequest) { + r.Routing = v + } +} + +// WithTimeout - explicit operation timeout. +func (f GraphExplore) WithTimeout(v time.Duration) func(*GraphExploreRequest) { + return func(r *GraphExploreRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f GraphExplore) WithPretty() func(*GraphExploreRequest) { + return func(r *GraphExploreRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f GraphExplore) WithHuman() func(*GraphExploreRequest) { + return func(r *GraphExploreRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f GraphExplore) WithErrorTrace() func(*GraphExploreRequest) { + return func(r *GraphExploreRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f GraphExplore) WithFilterPath(v ...string) func(*GraphExploreRequest) { + return func(r *GraphExploreRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f GraphExplore) WithHeader(h map[string]string) func(*GraphExploreRequest) { + return func(r *GraphExploreRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f GraphExplore) WithOpaqueID(s string) func(*GraphExploreRequest) { + return func(r *GraphExploreRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.delete_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.delete_lifecycle.go new file mode 100644 index 000000000..0198983f8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.delete_lifecycle.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newILMDeleteLifecycleFunc(t Transport) ILMDeleteLifecycle { + return func(policy string, o ...func(*ILMDeleteLifecycleRequest)) (*Response, error) { + var r = ILMDeleteLifecycleRequest{Policy: policy} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ILMDeleteLifecycle - Deletes the specified lifecycle policy definition. A currently used policy cannot be deleted. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html. +type ILMDeleteLifecycle func(policy string, o ...func(*ILMDeleteLifecycleRequest)) (*Response, error) + +// ILMDeleteLifecycleRequest configures the ILM Delete Lifecycle API request. +type ILMDeleteLifecycleRequest struct { + Policy string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ILMDeleteLifecycleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.delete_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ilm") + 1 + len("policy") + 1 + len(r.Policy)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + path.WriteString(r.Policy) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "policy", r.Policy) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.delete_lifecycle") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.delete_lifecycle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ILMDeleteLifecycle) WithContext(v context.Context) func(*ILMDeleteLifecycleRequest) { + return func(r *ILMDeleteLifecycleRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ILMDeleteLifecycle) WithMasterTimeout(v time.Duration) func(*ILMDeleteLifecycleRequest) { + return func(r *ILMDeleteLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ILMDeleteLifecycle) WithTimeout(v time.Duration) func(*ILMDeleteLifecycleRequest) { + return func(r *ILMDeleteLifecycleRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ILMDeleteLifecycle) WithPretty() func(*ILMDeleteLifecycleRequest) { + return func(r *ILMDeleteLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ILMDeleteLifecycle) WithHuman() func(*ILMDeleteLifecycleRequest) { + return func(r *ILMDeleteLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ILMDeleteLifecycle) WithErrorTrace() func(*ILMDeleteLifecycleRequest) { + return func(r *ILMDeleteLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ILMDeleteLifecycle) WithFilterPath(v ...string) func(*ILMDeleteLifecycleRequest) { + return func(r *ILMDeleteLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ILMDeleteLifecycle) WithHeader(h map[string]string) func(*ILMDeleteLifecycleRequest) { + return func(r *ILMDeleteLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ILMDeleteLifecycle) WithOpaqueID(s string) func(*ILMDeleteLifecycleRequest) { + return func(r *ILMDeleteLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.explain_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.explain_lifecycle.go new file mode 100644 index 000000000..16fba1eab --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.explain_lifecycle.go @@ -0,0 +1,264 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newILMExplainLifecycleFunc(t Transport) ILMExplainLifecycle { + return func(index string, o ...func(*ILMExplainLifecycleRequest)) (*Response, error) { + var r = ILMExplainLifecycleRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ILMExplainLifecycle - Retrieves information about the index's current lifecycle state, such as the currently executing phase, action, and step. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-explain-lifecycle.html. +type ILMExplainLifecycle func(index string, o ...func(*ILMExplainLifecycleRequest)) (*Response, error) + +// ILMExplainLifecycleRequest configures the ILM Explain Lifecycle API request. +type ILMExplainLifecycleRequest struct { + Index string + + MasterTimeout time.Duration + OnlyErrors *bool + OnlyManaged *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ILMExplainLifecycleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.explain_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_ilm") + 1 + len("explain")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("explain") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.OnlyErrors != nil { + params["only_errors"] = strconv.FormatBool(*r.OnlyErrors) + } + + if r.OnlyManaged != nil { + params["only_managed"] = strconv.FormatBool(*r.OnlyManaged) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.explain_lifecycle") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.explain_lifecycle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ILMExplainLifecycle) WithContext(v context.Context) func(*ILMExplainLifecycleRequest) { + return func(r *ILMExplainLifecycleRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ILMExplainLifecycle) WithMasterTimeout(v time.Duration) func(*ILMExplainLifecycleRequest) { + return func(r *ILMExplainLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithOnlyErrors - filters the indices included in the response to ones in an ilm error state, implies only_managed. +func (f ILMExplainLifecycle) WithOnlyErrors(v bool) func(*ILMExplainLifecycleRequest) { + return func(r *ILMExplainLifecycleRequest) { + r.OnlyErrors = &v + } +} + +// WithOnlyManaged - filters the indices included in the response to ones managed by ilm. +func (f ILMExplainLifecycle) WithOnlyManaged(v bool) func(*ILMExplainLifecycleRequest) { + return func(r *ILMExplainLifecycleRequest) { + r.OnlyManaged = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ILMExplainLifecycle) WithPretty() func(*ILMExplainLifecycleRequest) { + return func(r *ILMExplainLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ILMExplainLifecycle) WithHuman() func(*ILMExplainLifecycleRequest) { + return func(r *ILMExplainLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ILMExplainLifecycle) WithErrorTrace() func(*ILMExplainLifecycleRequest) { + return func(r *ILMExplainLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ILMExplainLifecycle) WithFilterPath(v ...string) func(*ILMExplainLifecycleRequest) { + return func(r *ILMExplainLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ILMExplainLifecycle) WithHeader(h map[string]string) func(*ILMExplainLifecycleRequest) { + return func(r *ILMExplainLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ILMExplainLifecycle) WithOpaqueID(s string) func(*ILMExplainLifecycleRequest) { + return func(r *ILMExplainLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.get_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.get_lifecycle.go new file mode 100644 index 000000000..5f0177d65 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.get_lifecycle.go @@ -0,0 +1,260 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newILMGetLifecycleFunc(t Transport) ILMGetLifecycle { + return func(o ...func(*ILMGetLifecycleRequest)) (*Response, error) { + var r = ILMGetLifecycleRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ILMGetLifecycle - Returns the specified policy definition. Includes the policy version and last modified date. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-lifecycle.html. +type ILMGetLifecycle func(o ...func(*ILMGetLifecycleRequest)) (*Response, error) + +// ILMGetLifecycleRequest configures the ILM Get Lifecycle API request. +type ILMGetLifecycleRequest struct { + Policy string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ILMGetLifecycleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.get_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ilm") + 1 + len("policy") + 1 + len(r.Policy)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("policy") + if r.Policy != "" { + path.WriteString("/") + path.WriteString(r.Policy) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "policy", r.Policy) + } + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.get_lifecycle") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.get_lifecycle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ILMGetLifecycle) WithContext(v context.Context) func(*ILMGetLifecycleRequest) { + return func(r *ILMGetLifecycleRequest) { + r.ctx = v + } +} + +// WithPolicy - the name of the index lifecycle policy. +func (f ILMGetLifecycle) WithPolicy(v string) func(*ILMGetLifecycleRequest) { + return func(r *ILMGetLifecycleRequest) { + r.Policy = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ILMGetLifecycle) WithMasterTimeout(v time.Duration) func(*ILMGetLifecycleRequest) { + return func(r *ILMGetLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ILMGetLifecycle) WithTimeout(v time.Duration) func(*ILMGetLifecycleRequest) { + return func(r *ILMGetLifecycleRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ILMGetLifecycle) WithPretty() func(*ILMGetLifecycleRequest) { + return func(r *ILMGetLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ILMGetLifecycle) WithHuman() func(*ILMGetLifecycleRequest) { + return func(r *ILMGetLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ILMGetLifecycle) WithErrorTrace() func(*ILMGetLifecycleRequest) { + return func(r *ILMGetLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ILMGetLifecycle) WithFilterPath(v ...string) func(*ILMGetLifecycleRequest) { + return func(r *ILMGetLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ILMGetLifecycle) WithHeader(h map[string]string) func(*ILMGetLifecycleRequest) { + return func(r *ILMGetLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ILMGetLifecycle) WithOpaqueID(s string) func(*ILMGetLifecycleRequest) { + return func(r *ILMGetLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.get_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.get_status.go new file mode 100644 index 000000000..f20c93b40 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.get_status.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newILMGetStatusFunc(t Transport) ILMGetStatus { + return func(o ...func(*ILMGetStatusRequest)) (*Response, error) { + var r = ILMGetStatusRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ILMGetStatus - Retrieves the current index lifecycle management (ILM) status. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-status.html. +type ILMGetStatus func(o ...func(*ILMGetStatusRequest)) (*Response, error) + +// ILMGetStatusRequest configures the ILM Get Status API request. +type ILMGetStatusRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ILMGetStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.get_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_ilm/status")) + path.WriteString("http://") + path.WriteString("/_ilm/status") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.get_status") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.get_status") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ILMGetStatus) WithContext(v context.Context) func(*ILMGetStatusRequest) { + return func(r *ILMGetStatusRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ILMGetStatus) WithPretty() func(*ILMGetStatusRequest) { + return func(r *ILMGetStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ILMGetStatus) WithHuman() func(*ILMGetStatusRequest) { + return func(r *ILMGetStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ILMGetStatus) WithErrorTrace() func(*ILMGetStatusRequest) { + return func(r *ILMGetStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ILMGetStatus) WithFilterPath(v ...string) func(*ILMGetStatusRequest) { + return func(r *ILMGetStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ILMGetStatus) WithHeader(h map[string]string) func(*ILMGetStatusRequest) { + return func(r *ILMGetStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ILMGetStatus) WithOpaqueID(s string) func(*ILMGetStatusRequest) { + return func(r *ILMGetStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.migrate_to_data_tiers.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.migrate_to_data_tiers.go new file mode 100644 index 000000000..e0912f440 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.migrate_to_data_tiers.go @@ -0,0 +1,259 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newILMMigrateToDataTiersFunc(t Transport) ILMMigrateToDataTiers { + return func(o ...func(*ILMMigrateToDataTiersRequest)) (*Response, error) { + var r = ILMMigrateToDataTiersRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ILMMigrateToDataTiers - Migrates the indices and ILM policies away from custom node attribute allocation routing to data tiers routing +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate-to-data-tiers.html. +type ILMMigrateToDataTiers func(o ...func(*ILMMigrateToDataTiersRequest)) (*Response, error) + +// ILMMigrateToDataTiersRequest configures the ILM Migrate To Data Tiers API request. +type ILMMigrateToDataTiersRequest struct { + Body io.Reader + + DryRun *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ILMMigrateToDataTiersRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.migrate_to_data_tiers") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_ilm/migrate_to_data_tiers")) + path.WriteString("http://") + path.WriteString("/_ilm/migrate_to_data_tiers") + + params = make(map[string]string) + + if r.DryRun != nil { + params["dry_run"] = strconv.FormatBool(*r.DryRun) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.migrate_to_data_tiers") + if reader := instrument.RecordRequestBody(ctx, "ilm.migrate_to_data_tiers", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.migrate_to_data_tiers") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ILMMigrateToDataTiers) WithContext(v context.Context) func(*ILMMigrateToDataTiersRequest) { + return func(r *ILMMigrateToDataTiersRequest) { + r.ctx = v + } +} + +// WithBody - Optionally specify a legacy index template name to delete and optionally specify a node attribute name used for index shard routing (defaults to "data"). +func (f ILMMigrateToDataTiers) WithBody(v io.Reader) func(*ILMMigrateToDataTiersRequest) { + return func(r *ILMMigrateToDataTiersRequest) { + r.Body = v + } +} + +// WithDryRun - if set to true it will simulate the migration, providing a way to retrieve the ilm policies and indices that need to be migrated. the default is false. +func (f ILMMigrateToDataTiers) WithDryRun(v bool) func(*ILMMigrateToDataTiersRequest) { + return func(r *ILMMigrateToDataTiersRequest) { + r.DryRun = &v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ILMMigrateToDataTiers) WithMasterTimeout(v time.Duration) func(*ILMMigrateToDataTiersRequest) { + return func(r *ILMMigrateToDataTiersRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ILMMigrateToDataTiers) WithPretty() func(*ILMMigrateToDataTiersRequest) { + return func(r *ILMMigrateToDataTiersRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ILMMigrateToDataTiers) WithHuman() func(*ILMMigrateToDataTiersRequest) { + return func(r *ILMMigrateToDataTiersRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ILMMigrateToDataTiers) WithErrorTrace() func(*ILMMigrateToDataTiersRequest) { + return func(r *ILMMigrateToDataTiersRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ILMMigrateToDataTiers) WithFilterPath(v ...string) func(*ILMMigrateToDataTiersRequest) { + return func(r *ILMMigrateToDataTiersRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ILMMigrateToDataTiers) WithHeader(h map[string]string) func(*ILMMigrateToDataTiersRequest) { + return func(r *ILMMigrateToDataTiersRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ILMMigrateToDataTiers) WithOpaqueID(s string) func(*ILMMigrateToDataTiersRequest) { + return func(r *ILMMigrateToDataTiersRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.move_to_step.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.move_to_step.go new file mode 100644 index 000000000..7b6360ef4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.move_to_step.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newILMMoveToStepFunc(t Transport) ILMMoveToStep { + return func(index string, o ...func(*ILMMoveToStepRequest)) (*Response, error) { + var r = ILMMoveToStepRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ILMMoveToStep - Manually moves an index into the specified step and executes that step. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-move-to-step.html. +type ILMMoveToStep func(index string, o ...func(*ILMMoveToStepRequest)) (*Response, error) + +// ILMMoveToStepRequest configures the ILM Move To Step API request. +type ILMMoveToStepRequest struct { + Index string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ILMMoveToStepRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.move_to_step") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ilm") + 1 + len("move") + 1 + len(r.Index)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("move") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.move_to_step") + if reader := instrument.RecordRequestBody(ctx, "ilm.move_to_step", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.move_to_step") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ILMMoveToStep) WithContext(v context.Context) func(*ILMMoveToStepRequest) { + return func(r *ILMMoveToStepRequest) { + r.ctx = v + } +} + +// WithBody - The new lifecycle step to move to. +func (f ILMMoveToStep) WithBody(v io.Reader) func(*ILMMoveToStepRequest) { + return func(r *ILMMoveToStepRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ILMMoveToStep) WithPretty() func(*ILMMoveToStepRequest) { + return func(r *ILMMoveToStepRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ILMMoveToStep) WithHuman() func(*ILMMoveToStepRequest) { + return func(r *ILMMoveToStepRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ILMMoveToStep) WithErrorTrace() func(*ILMMoveToStepRequest) { + return func(r *ILMMoveToStepRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ILMMoveToStep) WithFilterPath(v ...string) func(*ILMMoveToStepRequest) { + return func(r *ILMMoveToStepRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ILMMoveToStep) WithHeader(h map[string]string) func(*ILMMoveToStepRequest) { + return func(r *ILMMoveToStepRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ILMMoveToStep) WithOpaqueID(s string) func(*ILMMoveToStepRequest) { + return func(r *ILMMoveToStepRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.put_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.put_lifecycle.go new file mode 100644 index 000000000..058df7ebb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.put_lifecycle.go @@ -0,0 +1,268 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newILMPutLifecycleFunc(t Transport) ILMPutLifecycle { + return func(policy string, o ...func(*ILMPutLifecycleRequest)) (*Response, error) { + var r = ILMPutLifecycleRequest{Policy: policy} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ILMPutLifecycle - Creates a lifecycle policy +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-put-lifecycle.html. +type ILMPutLifecycle func(policy string, o ...func(*ILMPutLifecycleRequest)) (*Response, error) + +// ILMPutLifecycleRequest configures the ILM Put Lifecycle API request. +type ILMPutLifecycleRequest struct { + Body io.Reader + + Policy string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ILMPutLifecycleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.put_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ilm") + 1 + len("policy") + 1 + len(r.Policy)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + path.WriteString(r.Policy) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "policy", r.Policy) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.put_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "ilm.put_lifecycle", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.put_lifecycle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ILMPutLifecycle) WithContext(v context.Context) func(*ILMPutLifecycleRequest) { + return func(r *ILMPutLifecycleRequest) { + r.ctx = v + } +} + +// WithBody - The lifecycle policy definition to register. +func (f ILMPutLifecycle) WithBody(v io.Reader) func(*ILMPutLifecycleRequest) { + return func(r *ILMPutLifecycleRequest) { + r.Body = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ILMPutLifecycle) WithMasterTimeout(v time.Duration) func(*ILMPutLifecycleRequest) { + return func(r *ILMPutLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ILMPutLifecycle) WithTimeout(v time.Duration) func(*ILMPutLifecycleRequest) { + return func(r *ILMPutLifecycleRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ILMPutLifecycle) WithPretty() func(*ILMPutLifecycleRequest) { + return func(r *ILMPutLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ILMPutLifecycle) WithHuman() func(*ILMPutLifecycleRequest) { + return func(r *ILMPutLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ILMPutLifecycle) WithErrorTrace() func(*ILMPutLifecycleRequest) { + return func(r *ILMPutLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ILMPutLifecycle) WithFilterPath(v ...string) func(*ILMPutLifecycleRequest) { + return func(r *ILMPutLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ILMPutLifecycle) WithHeader(h map[string]string) func(*ILMPutLifecycleRequest) { + return func(r *ILMPutLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ILMPutLifecycle) WithOpaqueID(s string) func(*ILMPutLifecycleRequest) { + return func(r *ILMPutLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.remove_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.remove_policy.go new file mode 100644 index 000000000..1f75ddf10 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.remove_policy.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newILMRemovePolicyFunc(t Transport) ILMRemovePolicy { + return func(index string, o ...func(*ILMRemovePolicyRequest)) (*Response, error) { + var r = ILMRemovePolicyRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ILMRemovePolicy - Removes the assigned lifecycle policy and stops managing the specified index +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html. +type ILMRemovePolicy func(index string, o ...func(*ILMRemovePolicyRequest)) (*Response, error) + +// ILMRemovePolicyRequest configures the ILM Remove Policy API request. +type ILMRemovePolicyRequest struct { + Index string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ILMRemovePolicyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.remove_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_ilm") + 1 + len("remove")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("remove") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.remove_policy") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.remove_policy") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ILMRemovePolicy) WithContext(v context.Context) func(*ILMRemovePolicyRequest) { + return func(r *ILMRemovePolicyRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ILMRemovePolicy) WithPretty() func(*ILMRemovePolicyRequest) { + return func(r *ILMRemovePolicyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ILMRemovePolicy) WithHuman() func(*ILMRemovePolicyRequest) { + return func(r *ILMRemovePolicyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ILMRemovePolicy) WithErrorTrace() func(*ILMRemovePolicyRequest) { + return func(r *ILMRemovePolicyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ILMRemovePolicy) WithFilterPath(v ...string) func(*ILMRemovePolicyRequest) { + return func(r *ILMRemovePolicyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ILMRemovePolicy) WithHeader(h map[string]string) func(*ILMRemovePolicyRequest) { + return func(r *ILMRemovePolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ILMRemovePolicy) WithOpaqueID(s string) func(*ILMRemovePolicyRequest) { + return func(r *ILMRemovePolicyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.retry.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.retry.go new file mode 100644 index 000000000..ceda7e10e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.retry.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newILMRetryFunc(t Transport) ILMRetry { + return func(index string, o ...func(*ILMRetryRequest)) (*Response, error) { + var r = ILMRetryRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ILMRetry - Retries executing the policy for an index that is in the ERROR step. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-retry-policy.html. +type ILMRetry func(index string, o ...func(*ILMRetryRequest)) (*Response, error) + +// ILMRetryRequest configures the ILM Retry API request. +type ILMRetryRequest struct { + Index string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ILMRetryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.retry") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_ilm") + 1 + len("retry")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("retry") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.retry") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.retry") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ILMRetry) WithContext(v context.Context) func(*ILMRetryRequest) { + return func(r *ILMRetryRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ILMRetry) WithPretty() func(*ILMRetryRequest) { + return func(r *ILMRetryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ILMRetry) WithHuman() func(*ILMRetryRequest) { + return func(r *ILMRetryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ILMRetry) WithErrorTrace() func(*ILMRetryRequest) { + return func(r *ILMRetryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ILMRetry) WithFilterPath(v ...string) func(*ILMRetryRequest) { + return func(r *ILMRetryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ILMRetry) WithHeader(h map[string]string) func(*ILMRetryRequest) { + return func(r *ILMRetryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ILMRetry) WithOpaqueID(s string) func(*ILMRetryRequest) { + return func(r *ILMRetryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.start.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.start.go new file mode 100644 index 000000000..51e33f96f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.start.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newILMStartFunc(t Transport) ILMStart { + return func(o ...func(*ILMStartRequest)) (*Response, error) { + var r = ILMStartRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ILMStart - Start the index lifecycle management (ILM) plugin. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-start.html. +type ILMStart func(o ...func(*ILMStartRequest)) (*Response, error) + +// ILMStartRequest configures the ILM Start API request. +type ILMStartRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ILMStartRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.start") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_ilm/start")) + path.WriteString("http://") + path.WriteString("/_ilm/start") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.start") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.start") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ILMStart) WithContext(v context.Context) func(*ILMStartRequest) { + return func(r *ILMStartRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ILMStart) WithMasterTimeout(v time.Duration) func(*ILMStartRequest) { + return func(r *ILMStartRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ILMStart) WithTimeout(v time.Duration) func(*ILMStartRequest) { + return func(r *ILMStartRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ILMStart) WithPretty() func(*ILMStartRequest) { + return func(r *ILMStartRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ILMStart) WithHuman() func(*ILMStartRequest) { + return func(r *ILMStartRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ILMStart) WithErrorTrace() func(*ILMStartRequest) { + return func(r *ILMStartRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ILMStart) WithFilterPath(v ...string) func(*ILMStartRequest) { + return func(r *ILMStartRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ILMStart) WithHeader(h map[string]string) func(*ILMStartRequest) { + return func(r *ILMStartRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ILMStart) WithOpaqueID(s string) func(*ILMStartRequest) { + return func(r *ILMStartRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.stop.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.stop.go new file mode 100644 index 000000000..3d6f11e20 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ilm.stop.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newILMStopFunc(t Transport) ILMStop { + return func(o ...func(*ILMStopRequest)) (*Response, error) { + var r = ILMStopRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ILMStop - Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-stop.html. +type ILMStop func(o ...func(*ILMStopRequest)) (*Response, error) + +// ILMStopRequest configures the ILM Stop API request. +type ILMStopRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ILMStopRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_ilm/stop")) + path.WriteString("http://") + path.WriteString("/_ilm/stop") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.stop") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.stop") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ILMStop) WithContext(v context.Context) func(*ILMStopRequest) { + return func(r *ILMStopRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f ILMStop) WithMasterTimeout(v time.Duration) func(*ILMStopRequest) { + return func(r *ILMStopRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f ILMStop) WithTimeout(v time.Duration) func(*ILMStopRequest) { + return func(r *ILMStopRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ILMStop) WithPretty() func(*ILMStopRequest) { + return func(r *ILMStopRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ILMStop) WithHuman() func(*ILMStopRequest) { + return func(r *ILMStopRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ILMStop) WithErrorTrace() func(*ILMStopRequest) { + return func(r *ILMStopRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ILMStop) WithFilterPath(v ...string) func(*ILMStopRequest) { + return func(r *ILMStopRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ILMStop) WithHeader(h map[string]string) func(*ILMStopRequest) { + return func(r *ILMStopRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ILMStop) WithOpaqueID(s string) func(*ILMStopRequest) { + return func(r *ILMStopRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.create_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.create_data_stream.go new file mode 100644 index 000000000..69787a067 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.create_data_stream.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newIndicesCreateDataStreamFunc(t Transport) IndicesCreateDataStream { + return func(name string, o ...func(*IndicesCreateDataStreamRequest)) (*Response, error) { + var r = IndicesCreateDataStreamRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesCreateDataStream - Creates a data stream +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html. +type IndicesCreateDataStream func(name string, o ...func(*IndicesCreateDataStreamRequest)) (*Response, error) + +// IndicesCreateDataStreamRequest configures the Indices Create Data Stream API request. +type IndicesCreateDataStreamRequest struct { + Name string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesCreateDataStreamRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.create_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.create_data_stream") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.create_data_stream") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesCreateDataStream) WithContext(v context.Context) func(*IndicesCreateDataStreamRequest) { + return func(r *IndicesCreateDataStreamRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesCreateDataStream) WithMasterTimeout(v time.Duration) func(*IndicesCreateDataStreamRequest) { + return func(r *IndicesCreateDataStreamRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - specify timeout for acknowledging the cluster state update. +func (f IndicesCreateDataStream) WithTimeout(v time.Duration) func(*IndicesCreateDataStreamRequest) { + return func(r *IndicesCreateDataStreamRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesCreateDataStream) WithPretty() func(*IndicesCreateDataStreamRequest) { + return func(r *IndicesCreateDataStreamRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesCreateDataStream) WithHuman() func(*IndicesCreateDataStreamRequest) { + return func(r *IndicesCreateDataStreamRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesCreateDataStream) WithErrorTrace() func(*IndicesCreateDataStreamRequest) { + return func(r *IndicesCreateDataStreamRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesCreateDataStream) WithFilterPath(v ...string) func(*IndicesCreateDataStreamRequest) { + return func(r *IndicesCreateDataStreamRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesCreateDataStream) WithHeader(h map[string]string) func(*IndicesCreateDataStreamRequest) { + return func(r *IndicesCreateDataStreamRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesCreateDataStream) WithOpaqueID(s string) func(*IndicesCreateDataStreamRequest) { + return func(r *IndicesCreateDataStreamRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.data_streams_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.data_streams_stats.go new file mode 100644 index 000000000..6d0709121 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.data_streams_stats.go @@ -0,0 +1,234 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newIndicesDataStreamsStatsFunc(t Transport) IndicesDataStreamsStats { + return func(o ...func(*IndicesDataStreamsStatsRequest)) (*Response, error) { + var r = IndicesDataStreamsStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesDataStreamsStats - Provides statistics on operations happening in a data stream. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html. +type IndicesDataStreamsStats func(o ...func(*IndicesDataStreamsStatsRequest)) (*Response, error) + +// IndicesDataStreamsStatsRequest configures the Indices Data Streams Stats API request. +type IndicesDataStreamsStatsRequest struct { + Name []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesDataStreamsStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.data_streams_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(strings.Join(r.Name, ",")) + 1 + len("_stats")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + } + path.WriteString("/") + path.WriteString("_stats") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.data_streams_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.data_streams_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesDataStreamsStats) WithContext(v context.Context) func(*IndicesDataStreamsStatsRequest) { + return func(r *IndicesDataStreamsStatsRequest) { + r.ctx = v + } +} + +// WithName - a list of data stream names; use _all to perform the operation on all data streams. +func (f IndicesDataStreamsStats) WithName(v ...string) func(*IndicesDataStreamsStatsRequest) { + return func(r *IndicesDataStreamsStatsRequest) { + r.Name = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesDataStreamsStats) WithPretty() func(*IndicesDataStreamsStatsRequest) { + return func(r *IndicesDataStreamsStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesDataStreamsStats) WithHuman() func(*IndicesDataStreamsStatsRequest) { + return func(r *IndicesDataStreamsStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesDataStreamsStats) WithErrorTrace() func(*IndicesDataStreamsStatsRequest) { + return func(r *IndicesDataStreamsStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesDataStreamsStats) WithFilterPath(v ...string) func(*IndicesDataStreamsStatsRequest) { + return func(r *IndicesDataStreamsStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesDataStreamsStats) WithHeader(h map[string]string) func(*IndicesDataStreamsStatsRequest) { + return func(r *IndicesDataStreamsStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesDataStreamsStats) WithOpaqueID(s string) func(*IndicesDataStreamsStatsRequest) { + return func(r *IndicesDataStreamsStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.delete_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.delete_data_stream.go new file mode 100644 index 000000000..9005c88a9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.delete_data_stream.go @@ -0,0 +1,254 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" + "time" +) + +func newIndicesDeleteDataStreamFunc(t Transport) IndicesDeleteDataStream { + return func(name []string, o ...func(*IndicesDeleteDataStreamRequest)) (*Response, error) { + var r = IndicesDeleteDataStreamRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesDeleteDataStream - Deletes a data stream. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html. +type IndicesDeleteDataStream func(name []string, o ...func(*IndicesDeleteDataStreamRequest)) (*Response, error) + +// IndicesDeleteDataStreamRequest configures the Indices Delete Data Stream API request. +type IndicesDeleteDataStreamRequest struct { + Name []string + + ExpandWildcards string + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesDeleteDataStreamRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + + params = make(map[string]string) + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.delete_data_stream") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.delete_data_stream") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesDeleteDataStream) WithContext(v context.Context) func(*IndicesDeleteDataStreamRequest) { + return func(r *IndicesDeleteDataStreamRequest) { + r.ctx = v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +func (f IndicesDeleteDataStream) WithExpandWildcards(v string) func(*IndicesDeleteDataStreamRequest) { + return func(r *IndicesDeleteDataStreamRequest) { + r.ExpandWildcards = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesDeleteDataStream) WithMasterTimeout(v time.Duration) func(*IndicesDeleteDataStreamRequest) { + return func(r *IndicesDeleteDataStreamRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesDeleteDataStream) WithPretty() func(*IndicesDeleteDataStreamRequest) { + return func(r *IndicesDeleteDataStreamRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesDeleteDataStream) WithHuman() func(*IndicesDeleteDataStreamRequest) { + return func(r *IndicesDeleteDataStreamRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesDeleteDataStream) WithErrorTrace() func(*IndicesDeleteDataStreamRequest) { + return func(r *IndicesDeleteDataStreamRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesDeleteDataStream) WithFilterPath(v ...string) func(*IndicesDeleteDataStreamRequest) { + return func(r *IndicesDeleteDataStreamRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesDeleteDataStream) WithHeader(h map[string]string) func(*IndicesDeleteDataStreamRequest) { + return func(r *IndicesDeleteDataStreamRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesDeleteDataStream) WithOpaqueID(s string) func(*IndicesDeleteDataStreamRequest) { + return func(r *IndicesDeleteDataStreamRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.delete_data_stream_options.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.delete_data_stream_options.go new file mode 100644 index 000000000..eeb703555 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.delete_data_stream_options.go @@ -0,0 +1,268 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" + "time" +) + +func newIndicesDeleteDataStreamOptionsFunc(t Transport) IndicesDeleteDataStreamOptions { + return func(name []string, o ...func(*IndicesDeleteDataStreamOptionsRequest)) (*Response, error) { + var r = IndicesDeleteDataStreamOptionsRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesDeleteDataStreamOptions - Deletes the data stream options of the selected data streams. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html. +type IndicesDeleteDataStreamOptions func(name []string, o ...func(*IndicesDeleteDataStreamOptionsRequest)) (*Response, error) + +// IndicesDeleteDataStreamOptionsRequest configures the Indices Delete Data Stream Options API request. +type IndicesDeleteDataStreamOptionsRequest struct { + Name []string + + ExpandWildcards string + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesDeleteDataStreamOptionsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_data_stream_options") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(strings.Join(r.Name, ",")) + 1 + len("_options")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + path.WriteString("/") + path.WriteString("_options") + + params = make(map[string]string) + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.delete_data_stream_options") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.delete_data_stream_options") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesDeleteDataStreamOptions) WithContext(v context.Context) func(*IndicesDeleteDataStreamOptionsRequest) { + return func(r *IndicesDeleteDataStreamOptionsRequest) { + r.ctx = v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +func (f IndicesDeleteDataStreamOptions) WithExpandWildcards(v string) func(*IndicesDeleteDataStreamOptionsRequest) { + return func(r *IndicesDeleteDataStreamOptionsRequest) { + r.ExpandWildcards = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesDeleteDataStreamOptions) WithMasterTimeout(v time.Duration) func(*IndicesDeleteDataStreamOptionsRequest) { + return func(r *IndicesDeleteDataStreamOptionsRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit timestamp for the document. +func (f IndicesDeleteDataStreamOptions) WithTimeout(v time.Duration) func(*IndicesDeleteDataStreamOptionsRequest) { + return func(r *IndicesDeleteDataStreamOptionsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesDeleteDataStreamOptions) WithPretty() func(*IndicesDeleteDataStreamOptionsRequest) { + return func(r *IndicesDeleteDataStreamOptionsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesDeleteDataStreamOptions) WithHuman() func(*IndicesDeleteDataStreamOptionsRequest) { + return func(r *IndicesDeleteDataStreamOptionsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesDeleteDataStreamOptions) WithErrorTrace() func(*IndicesDeleteDataStreamOptionsRequest) { + return func(r *IndicesDeleteDataStreamOptionsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesDeleteDataStreamOptions) WithFilterPath(v ...string) func(*IndicesDeleteDataStreamOptionsRequest) { + return func(r *IndicesDeleteDataStreamOptionsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesDeleteDataStreamOptions) WithHeader(h map[string]string) func(*IndicesDeleteDataStreamOptionsRequest) { + return func(r *IndicesDeleteDataStreamOptionsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesDeleteDataStreamOptions) WithOpaqueID(s string) func(*IndicesDeleteDataStreamOptionsRequest) { + return func(r *IndicesDeleteDataStreamOptionsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.get_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.get_data_stream.go new file mode 100644 index 000000000..9ccb27967 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.get_data_stream.go @@ -0,0 +1,283 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesGetDataStreamFunc(t Transport) IndicesGetDataStream { + return func(o ...func(*IndicesGetDataStreamRequest)) (*Response, error) { + var r = IndicesGetDataStreamRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetDataStream - Returns data streams. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html. +type IndicesGetDataStream func(o ...func(*IndicesGetDataStreamRequest)) (*Response, error) + +// IndicesGetDataStreamRequest configures the Indices Get Data Stream API request. +type IndicesGetDataStreamRequest struct { + Name []string + + ExpandWildcards string + IncludeDefaults *bool + MasterTimeout time.Duration + Verbose *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetDataStreamRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + } + + params = make(map[string]string) + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Verbose != nil { + params["verbose"] = strconv.FormatBool(*r.Verbose) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_data_stream") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_data_stream") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetDataStream) WithContext(v context.Context) func(*IndicesGetDataStreamRequest) { + return func(r *IndicesGetDataStreamRequest) { + r.ctx = v + } +} + +// WithName - a list of data streams to get; use `*` to get all data streams. +func (f IndicesGetDataStream) WithName(v ...string) func(*IndicesGetDataStreamRequest) { + return func(r *IndicesGetDataStreamRequest) { + r.Name = v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +func (f IndicesGetDataStream) WithExpandWildcards(v string) func(*IndicesGetDataStreamRequest) { + return func(r *IndicesGetDataStreamRequest) { + r.ExpandWildcards = v + } +} + +// WithIncludeDefaults - return all relevant default configurations for the data stream (default: false). +func (f IndicesGetDataStream) WithIncludeDefaults(v bool) func(*IndicesGetDataStreamRequest) { + return func(r *IndicesGetDataStreamRequest) { + r.IncludeDefaults = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesGetDataStream) WithMasterTimeout(v time.Duration) func(*IndicesGetDataStreamRequest) { + return func(r *IndicesGetDataStreamRequest) { + r.MasterTimeout = v + } +} + +// WithVerbose - whether the maximum timestamp for each data stream should be calculated and returned (default: false). +func (f IndicesGetDataStream) WithVerbose(v bool) func(*IndicesGetDataStreamRequest) { + return func(r *IndicesGetDataStreamRequest) { + r.Verbose = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetDataStream) WithPretty() func(*IndicesGetDataStreamRequest) { + return func(r *IndicesGetDataStreamRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetDataStream) WithHuman() func(*IndicesGetDataStreamRequest) { + return func(r *IndicesGetDataStreamRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetDataStream) WithErrorTrace() func(*IndicesGetDataStreamRequest) { + return func(r *IndicesGetDataStreamRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetDataStream) WithFilterPath(v ...string) func(*IndicesGetDataStreamRequest) { + return func(r *IndicesGetDataStreamRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetDataStream) WithHeader(h map[string]string) func(*IndicesGetDataStreamRequest) { + return func(r *IndicesGetDataStreamRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetDataStream) WithOpaqueID(s string) func(*IndicesGetDataStreamRequest) { + return func(r *IndicesGetDataStreamRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.get_data_stream_options.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.get_data_stream_options.go new file mode 100644 index 000000000..8cc68ce2e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.get_data_stream_options.go @@ -0,0 +1,256 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" + "time" +) + +func newIndicesGetDataStreamOptionsFunc(t Transport) IndicesGetDataStreamOptions { + return func(name []string, o ...func(*IndicesGetDataStreamOptionsRequest)) (*Response, error) { + var r = IndicesGetDataStreamOptionsRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetDataStreamOptions - Returns the data stream options of the selected data streams. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html. +type IndicesGetDataStreamOptions func(name []string, o ...func(*IndicesGetDataStreamOptionsRequest)) (*Response, error) + +// IndicesGetDataStreamOptionsRequest configures the Indices Get Data Stream Options API request. +type IndicesGetDataStreamOptionsRequest struct { + Name []string + + ExpandWildcards string + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetDataStreamOptionsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_stream_options") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(strings.Join(r.Name, ",")) + 1 + len("_options")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + path.WriteString("/") + path.WriteString("_options") + + params = make(map[string]string) + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_data_stream_options") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_data_stream_options") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetDataStreamOptions) WithContext(v context.Context) func(*IndicesGetDataStreamOptionsRequest) { + return func(r *IndicesGetDataStreamOptionsRequest) { + r.ctx = v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +func (f IndicesGetDataStreamOptions) WithExpandWildcards(v string) func(*IndicesGetDataStreamOptionsRequest) { + return func(r *IndicesGetDataStreamOptionsRequest) { + r.ExpandWildcards = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesGetDataStreamOptions) WithMasterTimeout(v time.Duration) func(*IndicesGetDataStreamOptionsRequest) { + return func(r *IndicesGetDataStreamOptionsRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetDataStreamOptions) WithPretty() func(*IndicesGetDataStreamOptionsRequest) { + return func(r *IndicesGetDataStreamOptionsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetDataStreamOptions) WithHuman() func(*IndicesGetDataStreamOptionsRequest) { + return func(r *IndicesGetDataStreamOptionsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetDataStreamOptions) WithErrorTrace() func(*IndicesGetDataStreamOptionsRequest) { + return func(r *IndicesGetDataStreamOptionsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetDataStreamOptions) WithFilterPath(v ...string) func(*IndicesGetDataStreamOptionsRequest) { + return func(r *IndicesGetDataStreamOptionsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetDataStreamOptions) WithHeader(h map[string]string) func(*IndicesGetDataStreamOptionsRequest) { + return func(r *IndicesGetDataStreamOptionsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetDataStreamOptions) WithOpaqueID(s string) func(*IndicesGetDataStreamOptionsRequest) { + return func(r *IndicesGetDataStreamOptionsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.get_data_stream_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.get_data_stream_settings.go new file mode 100644 index 000000000..931399b06 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.get_data_stream_settings.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newIndicesGetDataStreamSettingsFunc(t Transport) IndicesGetDataStreamSettings { + return func(name string, o ...func(*IndicesGetDataStreamSettingsRequest)) (*Response, error) { + var r = IndicesGetDataStreamSettingsRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetDataStreamSettings - Gets a data stream's settings +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html. +type IndicesGetDataStreamSettings func(name string, o ...func(*IndicesGetDataStreamSettingsRequest)) (*Response, error) + +// IndicesGetDataStreamSettingsRequest configures the Indices Get Data Stream Settings API request. +type IndicesGetDataStreamSettingsRequest struct { + Name string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesGetDataStreamSettingsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_stream_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(r.Name) + 1 + len("_settings")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + path.WriteString("/") + path.WriteString("_settings") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_data_stream_settings") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_data_stream_settings") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetDataStreamSettings) WithContext(v context.Context) func(*IndicesGetDataStreamSettingsRequest) { + return func(r *IndicesGetDataStreamSettingsRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - period to wait for a connection to the master node. +func (f IndicesGetDataStreamSettings) WithMasterTimeout(v time.Duration) func(*IndicesGetDataStreamSettingsRequest) { + return func(r *IndicesGetDataStreamSettingsRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetDataStreamSettings) WithPretty() func(*IndicesGetDataStreamSettingsRequest) { + return func(r *IndicesGetDataStreamSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetDataStreamSettings) WithHuman() func(*IndicesGetDataStreamSettingsRequest) { + return func(r *IndicesGetDataStreamSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetDataStreamSettings) WithErrorTrace() func(*IndicesGetDataStreamSettingsRequest) { + return func(r *IndicesGetDataStreamSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetDataStreamSettings) WithFilterPath(v ...string) func(*IndicesGetDataStreamSettingsRequest) { + return func(r *IndicesGetDataStreamSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetDataStreamSettings) WithHeader(h map[string]string) func(*IndicesGetDataStreamSettingsRequest) { + return func(r *IndicesGetDataStreamSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetDataStreamSettings) WithOpaqueID(s string) func(*IndicesGetDataStreamSettingsRequest) { + return func(r *IndicesGetDataStreamSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.migrate_to_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.migrate_to_data_stream.go new file mode 100644 index 000000000..7743e3921 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.migrate_to_data_stream.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newIndicesMigrateToDataStreamFunc(t Transport) IndicesMigrateToDataStream { + return func(name string, o ...func(*IndicesMigrateToDataStreamRequest)) (*Response, error) { + var r = IndicesMigrateToDataStreamRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesMigrateToDataStream - Migrates an alias to a data stream +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html. +type IndicesMigrateToDataStream func(name string, o ...func(*IndicesMigrateToDataStreamRequest)) (*Response, error) + +// IndicesMigrateToDataStreamRequest configures the Indices Migrate To Data Stream API request. +type IndicesMigrateToDataStreamRequest struct { + Name string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesMigrateToDataStreamRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.migrate_to_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_data_stream") + 1 + len("_migrate") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString("_migrate") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.migrate_to_data_stream") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.migrate_to_data_stream") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesMigrateToDataStream) WithContext(v context.Context) func(*IndicesMigrateToDataStreamRequest) { + return func(r *IndicesMigrateToDataStreamRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesMigrateToDataStream) WithMasterTimeout(v time.Duration) func(*IndicesMigrateToDataStreamRequest) { + return func(r *IndicesMigrateToDataStreamRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - specify timeout for acknowledging the cluster state update. +func (f IndicesMigrateToDataStream) WithTimeout(v time.Duration) func(*IndicesMigrateToDataStreamRequest) { + return func(r *IndicesMigrateToDataStreamRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesMigrateToDataStream) WithPretty() func(*IndicesMigrateToDataStreamRequest) { + return func(r *IndicesMigrateToDataStreamRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesMigrateToDataStream) WithHuman() func(*IndicesMigrateToDataStreamRequest) { + return func(r *IndicesMigrateToDataStreamRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesMigrateToDataStream) WithErrorTrace() func(*IndicesMigrateToDataStreamRequest) { + return func(r *IndicesMigrateToDataStreamRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesMigrateToDataStream) WithFilterPath(v ...string) func(*IndicesMigrateToDataStreamRequest) { + return func(r *IndicesMigrateToDataStreamRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesMigrateToDataStream) WithHeader(h map[string]string) func(*IndicesMigrateToDataStreamRequest) { + return func(r *IndicesMigrateToDataStreamRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesMigrateToDataStream) WithOpaqueID(s string) func(*IndicesMigrateToDataStreamRequest) { + return func(r *IndicesMigrateToDataStreamRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.promote_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.promote_data_stream.go new file mode 100644 index 000000000..516841a8e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.promote_data_stream.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newIndicesPromoteDataStreamFunc(t Transport) IndicesPromoteDataStream { + return func(name string, o ...func(*IndicesPromoteDataStreamRequest)) (*Response, error) { + var r = IndicesPromoteDataStreamRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesPromoteDataStream - Promotes a data stream from a replicated data stream managed by CCR to a regular data stream +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html. +type IndicesPromoteDataStream func(name string, o ...func(*IndicesPromoteDataStreamRequest)) (*Response, error) + +// IndicesPromoteDataStreamRequest configures the Indices Promote Data Stream API request. +type IndicesPromoteDataStreamRequest struct { + Name string + + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesPromoteDataStreamRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.promote_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_data_stream") + 1 + len("_promote") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString("_promote") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.promote_data_stream") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.promote_data_stream") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesPromoteDataStream) WithContext(v context.Context) func(*IndicesPromoteDataStreamRequest) { + return func(r *IndicesPromoteDataStreamRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesPromoteDataStream) WithMasterTimeout(v time.Duration) func(*IndicesPromoteDataStreamRequest) { + return func(r *IndicesPromoteDataStreamRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesPromoteDataStream) WithPretty() func(*IndicesPromoteDataStreamRequest) { + return func(r *IndicesPromoteDataStreamRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesPromoteDataStream) WithHuman() func(*IndicesPromoteDataStreamRequest) { + return func(r *IndicesPromoteDataStreamRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesPromoteDataStream) WithErrorTrace() func(*IndicesPromoteDataStreamRequest) { + return func(r *IndicesPromoteDataStreamRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesPromoteDataStream) WithFilterPath(v ...string) func(*IndicesPromoteDataStreamRequest) { + return func(r *IndicesPromoteDataStreamRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesPromoteDataStream) WithHeader(h map[string]string) func(*IndicesPromoteDataStreamRequest) { + return func(r *IndicesPromoteDataStreamRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesPromoteDataStream) WithOpaqueID(s string) func(*IndicesPromoteDataStreamRequest) { + return func(r *IndicesPromoteDataStreamRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.reload_search_analyzers.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.reload_search_analyzers.go new file mode 100644 index 000000000..59b057f37 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.indices.reload_search_analyzers.go @@ -0,0 +1,278 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" +) + +func newIndicesReloadSearchAnalyzersFunc(t Transport) IndicesReloadSearchAnalyzers { + return func(index []string, o ...func(*IndicesReloadSearchAnalyzersRequest)) (*Response, error) { + var r = IndicesReloadSearchAnalyzersRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesReloadSearchAnalyzers - Reloads an index's search analyzers and their resources. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-reload-analyzers.html. +type IndicesReloadSearchAnalyzers func(index []string, o ...func(*IndicesReloadSearchAnalyzersRequest)) (*Response, error) + +// IndicesReloadSearchAnalyzersRequest configures the Indices Reload Search Analyzers API request. +type IndicesReloadSearchAnalyzersRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + Resource string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r IndicesReloadSearchAnalyzersRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.reload_search_analyzers") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_reload_search_analyzers")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_reload_search_analyzers") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Resource != "" { + params["resource"] = r.Resource + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "indices.reload_search_analyzers") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.reload_search_analyzers") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesReloadSearchAnalyzers) WithContext(v context.Context) func(*IndicesReloadSearchAnalyzersRequest) { + return func(r *IndicesReloadSearchAnalyzersRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f IndicesReloadSearchAnalyzers) WithAllowNoIndices(v bool) func(*IndicesReloadSearchAnalyzersRequest) { + return func(r *IndicesReloadSearchAnalyzersRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f IndicesReloadSearchAnalyzers) WithExpandWildcards(v string) func(*IndicesReloadSearchAnalyzersRequest) { + return func(r *IndicesReloadSearchAnalyzersRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f IndicesReloadSearchAnalyzers) WithIgnoreUnavailable(v bool) func(*IndicesReloadSearchAnalyzersRequest) { + return func(r *IndicesReloadSearchAnalyzersRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithResource - changed resource to reload analyzers from if applicable. +func (f IndicesReloadSearchAnalyzers) WithResource(v string) func(*IndicesReloadSearchAnalyzersRequest) { + return func(r *IndicesReloadSearchAnalyzersRequest) { + r.Resource = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesReloadSearchAnalyzers) WithPretty() func(*IndicesReloadSearchAnalyzersRequest) { + return func(r *IndicesReloadSearchAnalyzersRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesReloadSearchAnalyzers) WithHuman() func(*IndicesReloadSearchAnalyzersRequest) { + return func(r *IndicesReloadSearchAnalyzersRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesReloadSearchAnalyzers) WithErrorTrace() func(*IndicesReloadSearchAnalyzersRequest) { + return func(r *IndicesReloadSearchAnalyzersRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesReloadSearchAnalyzers) WithFilterPath(v ...string) func(*IndicesReloadSearchAnalyzersRequest) { + return func(r *IndicesReloadSearchAnalyzersRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesReloadSearchAnalyzers) WithHeader(h map[string]string) func(*IndicesReloadSearchAnalyzersRequest) { + return func(r *IndicesReloadSearchAnalyzersRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesReloadSearchAnalyzers) WithOpaqueID(s string) func(*IndicesReloadSearchAnalyzersRequest) { + return func(r *IndicesReloadSearchAnalyzersRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.delete.go new file mode 100644 index 000000000..b862a5aa9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.delete.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newLicenseDeleteFunc(t Transport) LicenseDelete { + return func(o ...func(*LicenseDeleteRequest)) (*Response, error) { + var r = LicenseDeleteRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// LicenseDelete - Deletes licensing information for the cluster +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-license.html. +type LicenseDelete func(o ...func(*LicenseDeleteRequest)) (*Response, error) + +// LicenseDeleteRequest configures the License Delete API request. +type LicenseDeleteRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r LicenseDeleteRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + len("/_license")) + path.WriteString("http://") + path.WriteString("/_license") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "license.delete") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "license.delete") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f LicenseDelete) WithContext(v context.Context) func(*LicenseDeleteRequest) { + return func(r *LicenseDeleteRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for processing on master node. +func (f LicenseDelete) WithMasterTimeout(v time.Duration) func(*LicenseDeleteRequest) { + return func(r *LicenseDeleteRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - timeout for acknowledgement of update from all nodes in cluster. +func (f LicenseDelete) WithTimeout(v time.Duration) func(*LicenseDeleteRequest) { + return func(r *LicenseDeleteRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f LicenseDelete) WithPretty() func(*LicenseDeleteRequest) { + return func(r *LicenseDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f LicenseDelete) WithHuman() func(*LicenseDeleteRequest) { + return func(r *LicenseDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f LicenseDelete) WithErrorTrace() func(*LicenseDeleteRequest) { + return func(r *LicenseDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f LicenseDelete) WithFilterPath(v ...string) func(*LicenseDeleteRequest) { + return func(r *LicenseDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f LicenseDelete) WithHeader(h map[string]string) func(*LicenseDeleteRequest) { + return func(r *LicenseDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f LicenseDelete) WithOpaqueID(s string) func(*LicenseDeleteRequest) { + return func(r *LicenseDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.get.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.get.go new file mode 100644 index 000000000..03c037464 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.get.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newLicenseGetFunc(t Transport) LicenseGet { + return func(o ...func(*LicenseGetRequest)) (*Response, error) { + var r = LicenseGetRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// LicenseGet - Retrieves licensing information for the cluster +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html. +type LicenseGet func(o ...func(*LicenseGetRequest)) (*Response, error) + +// LicenseGetRequest configures the License Get API request. +type LicenseGetRequest struct { + AcceptEnterprise *bool + Local *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r LicenseGetRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_license")) + path.WriteString("http://") + path.WriteString("/_license") + + params = make(map[string]string) + + if r.AcceptEnterprise != nil { + params["accept_enterprise"] = strconv.FormatBool(*r.AcceptEnterprise) + } + + if r.Local != nil { + params["local"] = strconv.FormatBool(*r.Local) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "license.get") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "license.get") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f LicenseGet) WithContext(v context.Context) func(*LicenseGetRequest) { + return func(r *LicenseGetRequest) { + r.ctx = v + } +} + +// WithAcceptEnterprise - supported for backwards compatibility with 7.x. if this param is used it must be set to true. +func (f LicenseGet) WithAcceptEnterprise(v bool) func(*LicenseGetRequest) { + return func(r *LicenseGetRequest) { + r.AcceptEnterprise = &v + } +} + +// WithLocal - return local information, do not retrieve the state from master node (default: false). +func (f LicenseGet) WithLocal(v bool) func(*LicenseGetRequest) { + return func(r *LicenseGetRequest) { + r.Local = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f LicenseGet) WithPretty() func(*LicenseGetRequest) { + return func(r *LicenseGetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f LicenseGet) WithHuman() func(*LicenseGetRequest) { + return func(r *LicenseGetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f LicenseGet) WithErrorTrace() func(*LicenseGetRequest) { + return func(r *LicenseGetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f LicenseGet) WithFilterPath(v ...string) func(*LicenseGetRequest) { + return func(r *LicenseGetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f LicenseGet) WithHeader(h map[string]string) func(*LicenseGetRequest) { + return func(r *LicenseGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f LicenseGet) WithOpaqueID(s string) func(*LicenseGetRequest) { + return func(r *LicenseGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.get_basic_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.get_basic_status.go new file mode 100644 index 000000000..8179486b2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.get_basic_status.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newLicenseGetBasicStatusFunc(t Transport) LicenseGetBasicStatus { + return func(o ...func(*LicenseGetBasicStatusRequest)) (*Response, error) { + var r = LicenseGetBasicStatusRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// LicenseGetBasicStatus - Retrieves information about the status of the basic license. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-basic-status.html. +type LicenseGetBasicStatus func(o ...func(*LicenseGetBasicStatusRequest)) (*Response, error) + +// LicenseGetBasicStatusRequest configures the License Get Basic Status API request. +type LicenseGetBasicStatusRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r LicenseGetBasicStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.get_basic_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_license/basic_status")) + path.WriteString("http://") + path.WriteString("/_license/basic_status") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "license.get_basic_status") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "license.get_basic_status") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f LicenseGetBasicStatus) WithContext(v context.Context) func(*LicenseGetBasicStatusRequest) { + return func(r *LicenseGetBasicStatusRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f LicenseGetBasicStatus) WithPretty() func(*LicenseGetBasicStatusRequest) { + return func(r *LicenseGetBasicStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f LicenseGetBasicStatus) WithHuman() func(*LicenseGetBasicStatusRequest) { + return func(r *LicenseGetBasicStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f LicenseGetBasicStatus) WithErrorTrace() func(*LicenseGetBasicStatusRequest) { + return func(r *LicenseGetBasicStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f LicenseGetBasicStatus) WithFilterPath(v ...string) func(*LicenseGetBasicStatusRequest) { + return func(r *LicenseGetBasicStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f LicenseGetBasicStatus) WithHeader(h map[string]string) func(*LicenseGetBasicStatusRequest) { + return func(r *LicenseGetBasicStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f LicenseGetBasicStatus) WithOpaqueID(s string) func(*LicenseGetBasicStatusRequest) { + return func(r *LicenseGetBasicStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.get_trial_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.get_trial_status.go new file mode 100644 index 000000000..7983dbf29 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.get_trial_status.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newLicenseGetTrialStatusFunc(t Transport) LicenseGetTrialStatus { + return func(o ...func(*LicenseGetTrialStatusRequest)) (*Response, error) { + var r = LicenseGetTrialStatusRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// LicenseGetTrialStatus - Retrieves information about the status of the trial license. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trial-status.html. +type LicenseGetTrialStatus func(o ...func(*LicenseGetTrialStatusRequest)) (*Response, error) + +// LicenseGetTrialStatusRequest configures the License Get Trial Status API request. +type LicenseGetTrialStatusRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r LicenseGetTrialStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.get_trial_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_license/trial_status")) + path.WriteString("http://") + path.WriteString("/_license/trial_status") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "license.get_trial_status") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "license.get_trial_status") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f LicenseGetTrialStatus) WithContext(v context.Context) func(*LicenseGetTrialStatusRequest) { + return func(r *LicenseGetTrialStatusRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f LicenseGetTrialStatus) WithPretty() func(*LicenseGetTrialStatusRequest) { + return func(r *LicenseGetTrialStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f LicenseGetTrialStatus) WithHuman() func(*LicenseGetTrialStatusRequest) { + return func(r *LicenseGetTrialStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f LicenseGetTrialStatus) WithErrorTrace() func(*LicenseGetTrialStatusRequest) { + return func(r *LicenseGetTrialStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f LicenseGetTrialStatus) WithFilterPath(v ...string) func(*LicenseGetTrialStatusRequest) { + return func(r *LicenseGetTrialStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f LicenseGetTrialStatus) WithHeader(h map[string]string) func(*LicenseGetTrialStatusRequest) { + return func(r *LicenseGetTrialStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f LicenseGetTrialStatus) WithOpaqueID(s string) func(*LicenseGetTrialStatusRequest) { + return func(r *LicenseGetTrialStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.post.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.post.go new file mode 100644 index 000000000..dbb13face --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.post.go @@ -0,0 +1,271 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newLicensePostFunc(t Transport) LicensePost { + return func(o ...func(*LicensePostRequest)) (*Response, error) { + var r = LicensePostRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// LicensePost - Updates the license for the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/update-license.html. +type LicensePost func(o ...func(*LicensePostRequest)) (*Response, error) + +// LicensePostRequest configures the License Post API request. +type LicensePostRequest struct { + Body io.Reader + + Acknowledge *bool + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r LicensePostRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.post") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + len("/_license")) + path.WriteString("http://") + path.WriteString("/_license") + + params = make(map[string]string) + + if r.Acknowledge != nil { + params["acknowledge"] = strconv.FormatBool(*r.Acknowledge) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "license.post") + if reader := instrument.RecordRequestBody(ctx, "license.post", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "license.post") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f LicensePost) WithContext(v context.Context) func(*LicensePostRequest) { + return func(r *LicensePostRequest) { + r.ctx = v + } +} + +// WithBody - licenses to be installed. +func (f LicensePost) WithBody(v io.Reader) func(*LicensePostRequest) { + return func(r *LicensePostRequest) { + r.Body = v + } +} + +// WithAcknowledge - whether the user has acknowledged acknowledge messages (default: false). +func (f LicensePost) WithAcknowledge(v bool) func(*LicensePostRequest) { + return func(r *LicensePostRequest) { + r.Acknowledge = &v + } +} + +// WithMasterTimeout - timeout for processing on master node. +func (f LicensePost) WithMasterTimeout(v time.Duration) func(*LicensePostRequest) { + return func(r *LicensePostRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - timeout for acknowledgement of update from all nodes in cluster. +func (f LicensePost) WithTimeout(v time.Duration) func(*LicensePostRequest) { + return func(r *LicensePostRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f LicensePost) WithPretty() func(*LicensePostRequest) { + return func(r *LicensePostRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f LicensePost) WithHuman() func(*LicensePostRequest) { + return func(r *LicensePostRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f LicensePost) WithErrorTrace() func(*LicensePostRequest) { + return func(r *LicensePostRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f LicensePost) WithFilterPath(v ...string) func(*LicensePostRequest) { + return func(r *LicensePostRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f LicensePost) WithHeader(h map[string]string) func(*LicensePostRequest) { + return func(r *LicensePostRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f LicensePost) WithOpaqueID(s string) func(*LicensePostRequest) { + return func(r *LicensePostRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.post_start_basic.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.post_start_basic.go new file mode 100644 index 000000000..fda37250e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.post_start_basic.go @@ -0,0 +1,254 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newLicensePostStartBasicFunc(t Transport) LicensePostStartBasic { + return func(o ...func(*LicensePostStartBasicRequest)) (*Response, error) { + var r = LicensePostStartBasicRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// LicensePostStartBasic - Starts an indefinite basic license. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/start-basic.html. +type LicensePostStartBasic func(o ...func(*LicensePostStartBasicRequest)) (*Response, error) + +// LicensePostStartBasicRequest configures the License Post Start Basic API request. +type LicensePostStartBasicRequest struct { + Acknowledge *bool + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r LicensePostStartBasicRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.post_start_basic") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_license/start_basic")) + path.WriteString("http://") + path.WriteString("/_license/start_basic") + + params = make(map[string]string) + + if r.Acknowledge != nil { + params["acknowledge"] = strconv.FormatBool(*r.Acknowledge) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "license.post_start_basic") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "license.post_start_basic") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f LicensePostStartBasic) WithContext(v context.Context) func(*LicensePostStartBasicRequest) { + return func(r *LicensePostStartBasicRequest) { + r.ctx = v + } +} + +// WithAcknowledge - whether the user has acknowledged acknowledge messages (default: false). +func (f LicensePostStartBasic) WithAcknowledge(v bool) func(*LicensePostStartBasicRequest) { + return func(r *LicensePostStartBasicRequest) { + r.Acknowledge = &v + } +} + +// WithMasterTimeout - timeout for processing on master node. +func (f LicensePostStartBasic) WithMasterTimeout(v time.Duration) func(*LicensePostStartBasicRequest) { + return func(r *LicensePostStartBasicRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - timeout for acknowledgement of update from all nodes in cluster. +func (f LicensePostStartBasic) WithTimeout(v time.Duration) func(*LicensePostStartBasicRequest) { + return func(r *LicensePostStartBasicRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f LicensePostStartBasic) WithPretty() func(*LicensePostStartBasicRequest) { + return func(r *LicensePostStartBasicRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f LicensePostStartBasic) WithHuman() func(*LicensePostStartBasicRequest) { + return func(r *LicensePostStartBasicRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f LicensePostStartBasic) WithErrorTrace() func(*LicensePostStartBasicRequest) { + return func(r *LicensePostStartBasicRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f LicensePostStartBasic) WithFilterPath(v ...string) func(*LicensePostStartBasicRequest) { + return func(r *LicensePostStartBasicRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f LicensePostStartBasic) WithHeader(h map[string]string) func(*LicensePostStartBasicRequest) { + return func(r *LicensePostStartBasicRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f LicensePostStartBasic) WithOpaqueID(s string) func(*LicensePostStartBasicRequest) { + return func(r *LicensePostStartBasicRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.post_start_trial.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.post_start_trial.go new file mode 100644 index 000000000..af68a61a2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.license.post_start_trial.go @@ -0,0 +1,254 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newLicensePostStartTrialFunc(t Transport) LicensePostStartTrial { + return func(o ...func(*LicensePostStartTrialRequest)) (*Response, error) { + var r = LicensePostStartTrialRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// LicensePostStartTrial - starts a limited time trial license. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trial.html. +type LicensePostStartTrial func(o ...func(*LicensePostStartTrialRequest)) (*Response, error) + +// LicensePostStartTrialRequest configures the License Post Start Trial API request. +type LicensePostStartTrialRequest struct { + Acknowledge *bool + MasterTimeout time.Duration + DocumentType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r LicensePostStartTrialRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.post_start_trial") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_license/start_trial")) + path.WriteString("http://") + path.WriteString("/_license/start_trial") + + params = make(map[string]string) + + if r.Acknowledge != nil { + params["acknowledge"] = strconv.FormatBool(*r.Acknowledge) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.DocumentType != "" { + params["type"] = r.DocumentType + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "license.post_start_trial") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "license.post_start_trial") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f LicensePostStartTrial) WithContext(v context.Context) func(*LicensePostStartTrialRequest) { + return func(r *LicensePostStartTrialRequest) { + r.ctx = v + } +} + +// WithAcknowledge - whether the user has acknowledged acknowledge messages (default: false). +func (f LicensePostStartTrial) WithAcknowledge(v bool) func(*LicensePostStartTrialRequest) { + return func(r *LicensePostStartTrialRequest) { + r.Acknowledge = &v + } +} + +// WithMasterTimeout - timeout for processing on master node. +func (f LicensePostStartTrial) WithMasterTimeout(v time.Duration) func(*LicensePostStartTrialRequest) { + return func(r *LicensePostStartTrialRequest) { + r.MasterTimeout = v + } +} + +// WithDocumentType - the type of trial license to generate (default: "trial"). +func (f LicensePostStartTrial) WithDocumentType(v string) func(*LicensePostStartTrialRequest) { + return func(r *LicensePostStartTrialRequest) { + r.DocumentType = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f LicensePostStartTrial) WithPretty() func(*LicensePostStartTrialRequest) { + return func(r *LicensePostStartTrialRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f LicensePostStartTrial) WithHuman() func(*LicensePostStartTrialRequest) { + return func(r *LicensePostStartTrialRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f LicensePostStartTrial) WithErrorTrace() func(*LicensePostStartTrialRequest) { + return func(r *LicensePostStartTrialRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f LicensePostStartTrial) WithFilterPath(v ...string) func(*LicensePostStartTrialRequest) { + return func(r *LicensePostStartTrialRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f LicensePostStartTrial) WithHeader(h map[string]string) func(*LicensePostStartTrialRequest) { + return func(r *LicensePostStartTrialRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f LicensePostStartTrial) WithOpaqueID(s string) func(*LicensePostStartTrialRequest) { + return func(r *LicensePostStartTrialRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.logstash.delete_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.logstash.delete_pipeline.go new file mode 100644 index 000000000..a63506959 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.logstash.delete_pipeline.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newLogstashDeletePipelineFunc(t Transport) LogstashDeletePipeline { + return func(id string, o ...func(*LogstashDeletePipelineRequest)) (*Response, error) { + var r = LogstashDeletePipelineRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// LogstashDeletePipeline - Deletes Logstash Pipelines used by Central Management +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-delete-pipeline.html. +type LogstashDeletePipeline func(id string, o ...func(*LogstashDeletePipelineRequest)) (*Response, error) + +// LogstashDeletePipelineRequest configures the Logstash Delete Pipeline API request. +type LogstashDeletePipelineRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r LogstashDeletePipelineRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "logstash.delete_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_logstash") + 1 + len("pipeline") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_logstash") + path.WriteString("/") + path.WriteString("pipeline") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "logstash.delete_pipeline") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "logstash.delete_pipeline") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f LogstashDeletePipeline) WithContext(v context.Context) func(*LogstashDeletePipelineRequest) { + return func(r *LogstashDeletePipelineRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f LogstashDeletePipeline) WithPretty() func(*LogstashDeletePipelineRequest) { + return func(r *LogstashDeletePipelineRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f LogstashDeletePipeline) WithHuman() func(*LogstashDeletePipelineRequest) { + return func(r *LogstashDeletePipelineRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f LogstashDeletePipeline) WithErrorTrace() func(*LogstashDeletePipelineRequest) { + return func(r *LogstashDeletePipelineRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f LogstashDeletePipeline) WithFilterPath(v ...string) func(*LogstashDeletePipelineRequest) { + return func(r *LogstashDeletePipelineRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f LogstashDeletePipeline) WithHeader(h map[string]string) func(*LogstashDeletePipelineRequest) { + return func(r *LogstashDeletePipelineRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f LogstashDeletePipeline) WithOpaqueID(s string) func(*LogstashDeletePipelineRequest) { + return func(r *LogstashDeletePipelineRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.logstash.get_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.logstash.get_pipeline.go new file mode 100644 index 000000000..20b69e6ad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.logstash.get_pipeline.go @@ -0,0 +1,234 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newLogstashGetPipelineFunc(t Transport) LogstashGetPipeline { + return func(o ...func(*LogstashGetPipelineRequest)) (*Response, error) { + var r = LogstashGetPipelineRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// LogstashGetPipeline - Retrieves Logstash Pipelines used by Central Management +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-get-pipeline.html. +type LogstashGetPipeline func(o ...func(*LogstashGetPipelineRequest)) (*Response, error) + +// LogstashGetPipelineRequest configures the Logstash Get Pipeline API request. +type LogstashGetPipelineRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r LogstashGetPipelineRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "logstash.get_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_logstash") + 1 + len("pipeline") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_logstash") + path.WriteString("/") + path.WriteString("pipeline") + if r.DocumentID != "" { + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "logstash.get_pipeline") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "logstash.get_pipeline") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f LogstashGetPipeline) WithContext(v context.Context) func(*LogstashGetPipelineRequest) { + return func(r *LogstashGetPipelineRequest) { + r.ctx = v + } +} + +// WithDocumentID - a list of pipeline ids. +func (f LogstashGetPipeline) WithDocumentID(v string) func(*LogstashGetPipelineRequest) { + return func(r *LogstashGetPipelineRequest) { + r.DocumentID = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f LogstashGetPipeline) WithPretty() func(*LogstashGetPipelineRequest) { + return func(r *LogstashGetPipelineRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f LogstashGetPipeline) WithHuman() func(*LogstashGetPipelineRequest) { + return func(r *LogstashGetPipelineRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f LogstashGetPipeline) WithErrorTrace() func(*LogstashGetPipelineRequest) { + return func(r *LogstashGetPipelineRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f LogstashGetPipeline) WithFilterPath(v ...string) func(*LogstashGetPipelineRequest) { + return func(r *LogstashGetPipelineRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f LogstashGetPipeline) WithHeader(h map[string]string) func(*LogstashGetPipelineRequest) { + return func(r *LogstashGetPipelineRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f LogstashGetPipeline) WithOpaqueID(s string) func(*LogstashGetPipelineRequest) { + return func(r *LogstashGetPipelineRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.logstash.put_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.logstash.put_pipeline.go new file mode 100644 index 000000000..520097b3e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.logstash.put_pipeline.go @@ -0,0 +1,235 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newLogstashPutPipelineFunc(t Transport) LogstashPutPipeline { + return func(id string, body io.Reader, o ...func(*LogstashPutPipelineRequest)) (*Response, error) { + var r = LogstashPutPipelineRequest{DocumentID: id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// LogstashPutPipeline - Adds and updates Logstash Pipelines used for Central Management +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-put-pipeline.html. +type LogstashPutPipeline func(id string, body io.Reader, o ...func(*LogstashPutPipelineRequest)) (*Response, error) + +// LogstashPutPipelineRequest configures the Logstash Put Pipeline API request. +type LogstashPutPipelineRequest struct { + DocumentID string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r LogstashPutPipelineRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "logstash.put_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_logstash") + 1 + len("pipeline") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_logstash") + path.WriteString("/") + path.WriteString("pipeline") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "logstash.put_pipeline") + if reader := instrument.RecordRequestBody(ctx, "logstash.put_pipeline", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "logstash.put_pipeline") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f LogstashPutPipeline) WithContext(v context.Context) func(*LogstashPutPipelineRequest) { + return func(r *LogstashPutPipelineRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f LogstashPutPipeline) WithPretty() func(*LogstashPutPipelineRequest) { + return func(r *LogstashPutPipelineRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f LogstashPutPipeline) WithHuman() func(*LogstashPutPipelineRequest) { + return func(r *LogstashPutPipelineRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f LogstashPutPipeline) WithErrorTrace() func(*LogstashPutPipelineRequest) { + return func(r *LogstashPutPipelineRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f LogstashPutPipeline) WithFilterPath(v ...string) func(*LogstashPutPipelineRequest) { + return func(r *LogstashPutPipelineRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f LogstashPutPipeline) WithHeader(h map[string]string) func(*LogstashPutPipelineRequest) { + return func(r *LogstashPutPipelineRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f LogstashPutPipeline) WithOpaqueID(s string) func(*LogstashPutPipelineRequest) { + return func(r *LogstashPutPipelineRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.migration.deprecations.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.migration.deprecations.go new file mode 100644 index 000000000..2db7de447 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.migration.deprecations.go @@ -0,0 +1,234 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newMigrationDeprecationsFunc(t Transport) MigrationDeprecations { + return func(o ...func(*MigrationDeprecationsRequest)) (*Response, error) { + var r = MigrationDeprecationsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MigrationDeprecations - Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-deprecation.html. +type MigrationDeprecations func(o ...func(*MigrationDeprecationsRequest)) (*Response, error) + +// MigrationDeprecationsRequest configures the Migration Deprecations API request. +type MigrationDeprecationsRequest struct { + Index string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MigrationDeprecationsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "migration.deprecations") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_migration") + 1 + len("deprecations")) + path.WriteString("http://") + if r.Index != "" { + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + } + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("deprecations") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "migration.deprecations") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "migration.deprecations") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MigrationDeprecations) WithContext(v context.Context) func(*MigrationDeprecationsRequest) { + return func(r *MigrationDeprecationsRequest) { + r.ctx = v + } +} + +// WithIndex - index pattern. +func (f MigrationDeprecations) WithIndex(v string) func(*MigrationDeprecationsRequest) { + return func(r *MigrationDeprecationsRequest) { + r.Index = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MigrationDeprecations) WithPretty() func(*MigrationDeprecationsRequest) { + return func(r *MigrationDeprecationsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MigrationDeprecations) WithHuman() func(*MigrationDeprecationsRequest) { + return func(r *MigrationDeprecationsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MigrationDeprecations) WithErrorTrace() func(*MigrationDeprecationsRequest) { + return func(r *MigrationDeprecationsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MigrationDeprecations) WithFilterPath(v ...string) func(*MigrationDeprecationsRequest) { + return func(r *MigrationDeprecationsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MigrationDeprecations) WithHeader(h map[string]string) func(*MigrationDeprecationsRequest) { + return func(r *MigrationDeprecationsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MigrationDeprecations) WithOpaqueID(s string) func(*MigrationDeprecationsRequest) { + return func(r *MigrationDeprecationsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.migration.get_feature_upgrade_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.migration.get_feature_upgrade_status.go new file mode 100644 index 000000000..6115f7a55 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.migration.get_feature_upgrade_status.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newMigrationGetFeatureUpgradeStatusFunc(t Transport) MigrationGetFeatureUpgradeStatus { + return func(o ...func(*MigrationGetFeatureUpgradeStatusRequest)) (*Response, error) { + var r = MigrationGetFeatureUpgradeStatusRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MigrationGetFeatureUpgradeStatus - Find out whether system features need to be upgraded or not +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html. +type MigrationGetFeatureUpgradeStatus func(o ...func(*MigrationGetFeatureUpgradeStatusRequest)) (*Response, error) + +// MigrationGetFeatureUpgradeStatusRequest configures the Migration Get Feature Upgrade Status API request. +type MigrationGetFeatureUpgradeStatusRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MigrationGetFeatureUpgradeStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "migration.get_feature_upgrade_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_migration/system_features")) + path.WriteString("http://") + path.WriteString("/_migration/system_features") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "migration.get_feature_upgrade_status") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "migration.get_feature_upgrade_status") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MigrationGetFeatureUpgradeStatus) WithContext(v context.Context) func(*MigrationGetFeatureUpgradeStatusRequest) { + return func(r *MigrationGetFeatureUpgradeStatusRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MigrationGetFeatureUpgradeStatus) WithPretty() func(*MigrationGetFeatureUpgradeStatusRequest) { + return func(r *MigrationGetFeatureUpgradeStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MigrationGetFeatureUpgradeStatus) WithHuman() func(*MigrationGetFeatureUpgradeStatusRequest) { + return func(r *MigrationGetFeatureUpgradeStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MigrationGetFeatureUpgradeStatus) WithErrorTrace() func(*MigrationGetFeatureUpgradeStatusRequest) { + return func(r *MigrationGetFeatureUpgradeStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MigrationGetFeatureUpgradeStatus) WithFilterPath(v ...string) func(*MigrationGetFeatureUpgradeStatusRequest) { + return func(r *MigrationGetFeatureUpgradeStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MigrationGetFeatureUpgradeStatus) WithHeader(h map[string]string) func(*MigrationGetFeatureUpgradeStatusRequest) { + return func(r *MigrationGetFeatureUpgradeStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MigrationGetFeatureUpgradeStatus) WithOpaqueID(s string) func(*MigrationGetFeatureUpgradeStatusRequest) { + return func(r *MigrationGetFeatureUpgradeStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.migration.post_feature_upgrade.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.migration.post_feature_upgrade.go new file mode 100644 index 000000000..5fcc61375 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.migration.post_feature_upgrade.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newMigrationPostFeatureUpgradeFunc(t Transport) MigrationPostFeatureUpgrade { + return func(o ...func(*MigrationPostFeatureUpgradeRequest)) (*Response, error) { + var r = MigrationPostFeatureUpgradeRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MigrationPostFeatureUpgrade - Begin upgrades for system features +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html. +type MigrationPostFeatureUpgrade func(o ...func(*MigrationPostFeatureUpgradeRequest)) (*Response, error) + +// MigrationPostFeatureUpgradeRequest configures the Migration Post Feature Upgrade API request. +type MigrationPostFeatureUpgradeRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MigrationPostFeatureUpgradeRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "migration.post_feature_upgrade") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_migration/system_features")) + path.WriteString("http://") + path.WriteString("/_migration/system_features") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "migration.post_feature_upgrade") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "migration.post_feature_upgrade") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MigrationPostFeatureUpgrade) WithContext(v context.Context) func(*MigrationPostFeatureUpgradeRequest) { + return func(r *MigrationPostFeatureUpgradeRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MigrationPostFeatureUpgrade) WithPretty() func(*MigrationPostFeatureUpgradeRequest) { + return func(r *MigrationPostFeatureUpgradeRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MigrationPostFeatureUpgrade) WithHuman() func(*MigrationPostFeatureUpgradeRequest) { + return func(r *MigrationPostFeatureUpgradeRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MigrationPostFeatureUpgrade) WithErrorTrace() func(*MigrationPostFeatureUpgradeRequest) { + return func(r *MigrationPostFeatureUpgradeRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MigrationPostFeatureUpgrade) WithFilterPath(v ...string) func(*MigrationPostFeatureUpgradeRequest) { + return func(r *MigrationPostFeatureUpgradeRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MigrationPostFeatureUpgrade) WithHeader(h map[string]string) func(*MigrationPostFeatureUpgradeRequest) { + return func(r *MigrationPostFeatureUpgradeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MigrationPostFeatureUpgrade) WithOpaqueID(s string) func(*MigrationPostFeatureUpgradeRequest) { + return func(r *MigrationPostFeatureUpgradeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go new file mode 100644 index 000000000..82fec8605 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go @@ -0,0 +1,231 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newMLClearTrainedModelDeploymentCacheFunc(t Transport) MLClearTrainedModelDeploymentCache { + return func(model_id string, o ...func(*MLClearTrainedModelDeploymentCacheRequest)) (*Response, error) { + var r = MLClearTrainedModelDeploymentCacheRequest{ModelID: model_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLClearTrainedModelDeploymentCache - Clear the cached results from a trained model deployment +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-trained-model-deployment-cache.html. +type MLClearTrainedModelDeploymentCache func(model_id string, o ...func(*MLClearTrainedModelDeploymentCacheRequest)) (*Response, error) + +// MLClearTrainedModelDeploymentCacheRequest configures the ML Clear Trained Model Deployment Cache API request. +type MLClearTrainedModelDeploymentCacheRequest struct { + ModelID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLClearTrainedModelDeploymentCacheRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.clear_trained_model_deployment_cache") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID) + 1 + len("deployment") + 1 + len("cache") + 1 + len("_clear")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + path.WriteString(r.ModelID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_id", r.ModelID) + } + path.WriteString("/") + path.WriteString("deployment") + path.WriteString("/") + path.WriteString("cache") + path.WriteString("/") + path.WriteString("_clear") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.clear_trained_model_deployment_cache") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.clear_trained_model_deployment_cache") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLClearTrainedModelDeploymentCache) WithContext(v context.Context) func(*MLClearTrainedModelDeploymentCacheRequest) { + return func(r *MLClearTrainedModelDeploymentCacheRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLClearTrainedModelDeploymentCache) WithPretty() func(*MLClearTrainedModelDeploymentCacheRequest) { + return func(r *MLClearTrainedModelDeploymentCacheRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLClearTrainedModelDeploymentCache) WithHuman() func(*MLClearTrainedModelDeploymentCacheRequest) { + return func(r *MLClearTrainedModelDeploymentCacheRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLClearTrainedModelDeploymentCache) WithErrorTrace() func(*MLClearTrainedModelDeploymentCacheRequest) { + return func(r *MLClearTrainedModelDeploymentCacheRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLClearTrainedModelDeploymentCache) WithFilterPath(v ...string) func(*MLClearTrainedModelDeploymentCacheRequest) { + return func(r *MLClearTrainedModelDeploymentCacheRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLClearTrainedModelDeploymentCache) WithHeader(h map[string]string) func(*MLClearTrainedModelDeploymentCacheRequest) { + return func(r *MLClearTrainedModelDeploymentCacheRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLClearTrainedModelDeploymentCache) WithOpaqueID(s string) func(*MLClearTrainedModelDeploymentCacheRequest) { + return func(r *MLClearTrainedModelDeploymentCacheRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.close_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.close_job.go new file mode 100644 index 000000000..e61cb6437 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.close_job.go @@ -0,0 +1,283 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newMLCloseJobFunc(t Transport) MLCloseJob { + return func(job_id string, o ...func(*MLCloseJobRequest)) (*Response, error) { + var r = MLCloseJobRequest{JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLCloseJob - Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html. +type MLCloseJob func(job_id string, o ...func(*MLCloseJobRequest)) (*Response, error) + +// MLCloseJobRequest configures the ML Close Job API request. +type MLCloseJobRequest struct { + Body io.Reader + + JobID string + + AllowNoMatch *bool + Force *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLCloseJobRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.close_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("_close")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("_close") + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.Force != nil { + params["force"] = strconv.FormatBool(*r.Force) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.close_job") + if reader := instrument.RecordRequestBody(ctx, "ml.close_job", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.close_job") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLCloseJob) WithContext(v context.Context) func(*MLCloseJobRequest) { + return func(r *MLCloseJobRequest) { + r.ctx = v + } +} + +// WithBody - The URL params optionally sent in the body. +func (f MLCloseJob) WithBody(v io.Reader) func(*MLCloseJobRequest) { + return func(r *MLCloseJobRequest) { + r.Body = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no jobs. (this includes `_all` string or when no jobs have been specified). +func (f MLCloseJob) WithAllowNoMatch(v bool) func(*MLCloseJobRequest) { + return func(r *MLCloseJobRequest) { + r.AllowNoMatch = &v + } +} + +// WithForce - true if the job should be forcefully closed. +func (f MLCloseJob) WithForce(v bool) func(*MLCloseJobRequest) { + return func(r *MLCloseJobRequest) { + r.Force = &v + } +} + +// WithTimeout - controls the time to wait until a job has closed. default to 30 minutes. +func (f MLCloseJob) WithTimeout(v time.Duration) func(*MLCloseJobRequest) { + return func(r *MLCloseJobRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLCloseJob) WithPretty() func(*MLCloseJobRequest) { + return func(r *MLCloseJobRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLCloseJob) WithHuman() func(*MLCloseJobRequest) { + return func(r *MLCloseJobRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLCloseJob) WithErrorTrace() func(*MLCloseJobRequest) { + return func(r *MLCloseJobRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLCloseJob) WithFilterPath(v ...string) func(*MLCloseJobRequest) { + return func(r *MLCloseJobRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLCloseJob) WithHeader(h map[string]string) func(*MLCloseJobRequest) { + return func(r *MLCloseJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLCloseJob) WithOpaqueID(s string) func(*MLCloseJobRequest) { + return func(r *MLCloseJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_calendar.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_calendar.go new file mode 100644 index 000000000..7e50e6b3f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_calendar.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newMLDeleteCalendarFunc(t Transport) MLDeleteCalendar { + return func(calendar_id string, o ...func(*MLDeleteCalendarRequest)) (*Response, error) { + var r = MLDeleteCalendarRequest{CalendarID: calendar_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLDeleteCalendar - Deletes a calendar. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar.html. +type MLDeleteCalendar func(calendar_id string, o ...func(*MLDeleteCalendarRequest)) (*Response, error) + +// MLDeleteCalendarRequest configures the ML Delete Calendar API request. +type MLDeleteCalendarRequest struct { + CalendarID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLDeleteCalendarRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_calendar") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ml") + 1 + len("calendars") + 1 + len(r.CalendarID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + path.WriteString(r.CalendarID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendar_id", r.CalendarID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_calendar") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_calendar") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLDeleteCalendar) WithContext(v context.Context) func(*MLDeleteCalendarRequest) { + return func(r *MLDeleteCalendarRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLDeleteCalendar) WithPretty() func(*MLDeleteCalendarRequest) { + return func(r *MLDeleteCalendarRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLDeleteCalendar) WithHuman() func(*MLDeleteCalendarRequest) { + return func(r *MLDeleteCalendarRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLDeleteCalendar) WithErrorTrace() func(*MLDeleteCalendarRequest) { + return func(r *MLDeleteCalendarRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLDeleteCalendar) WithFilterPath(v ...string) func(*MLDeleteCalendarRequest) { + return func(r *MLDeleteCalendarRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLDeleteCalendar) WithHeader(h map[string]string) func(*MLDeleteCalendarRequest) { + return func(r *MLDeleteCalendarRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLDeleteCalendar) WithOpaqueID(s string) func(*MLDeleteCalendarRequest) { + return func(r *MLDeleteCalendarRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_calendar_event.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_calendar_event.go new file mode 100644 index 000000000..99bbb7704 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_calendar_event.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newMLDeleteCalendarEventFunc(t Transport) MLDeleteCalendarEvent { + return func(calendar_id string, event_id string, o ...func(*MLDeleteCalendarEventRequest)) (*Response, error) { + var r = MLDeleteCalendarEventRequest{CalendarID: calendar_id, EventID: event_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLDeleteCalendarEvent - Deletes scheduled events from a calendar. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-event.html. +type MLDeleteCalendarEvent func(calendar_id string, event_id string, o ...func(*MLDeleteCalendarEventRequest)) (*Response, error) + +// MLDeleteCalendarEventRequest configures the ML Delete Calendar Event API request. +type MLDeleteCalendarEventRequest struct { + CalendarID string + EventID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLDeleteCalendarEventRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_calendar_event") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ml") + 1 + len("calendars") + 1 + len(r.CalendarID) + 1 + len("events") + 1 + len(r.EventID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + path.WriteString(r.CalendarID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendar_id", r.CalendarID) + } + path.WriteString("/") + path.WriteString("events") + path.WriteString("/") + path.WriteString(r.EventID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "event_id", r.EventID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_calendar_event") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_calendar_event") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLDeleteCalendarEvent) WithContext(v context.Context) func(*MLDeleteCalendarEventRequest) { + return func(r *MLDeleteCalendarEventRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLDeleteCalendarEvent) WithPretty() func(*MLDeleteCalendarEventRequest) { + return func(r *MLDeleteCalendarEventRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLDeleteCalendarEvent) WithHuman() func(*MLDeleteCalendarEventRequest) { + return func(r *MLDeleteCalendarEventRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLDeleteCalendarEvent) WithErrorTrace() func(*MLDeleteCalendarEventRequest) { + return func(r *MLDeleteCalendarEventRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLDeleteCalendarEvent) WithFilterPath(v ...string) func(*MLDeleteCalendarEventRequest) { + return func(r *MLDeleteCalendarEventRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLDeleteCalendarEvent) WithHeader(h map[string]string) func(*MLDeleteCalendarEventRequest) { + return func(r *MLDeleteCalendarEventRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLDeleteCalendarEvent) WithOpaqueID(s string) func(*MLDeleteCalendarEventRequest) { + return func(r *MLDeleteCalendarEventRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_calendar_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_calendar_job.go new file mode 100644 index 000000000..6aa8d1f7f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_calendar_job.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newMLDeleteCalendarJobFunc(t Transport) MLDeleteCalendarJob { + return func(calendar_id string, job_id string, o ...func(*MLDeleteCalendarJobRequest)) (*Response, error) { + var r = MLDeleteCalendarJobRequest{CalendarID: calendar_id, JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLDeleteCalendarJob - Deletes anomaly detection jobs from a calendar. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-job.html. +type MLDeleteCalendarJob func(calendar_id string, job_id string, o ...func(*MLDeleteCalendarJobRequest)) (*Response, error) + +// MLDeleteCalendarJobRequest configures the ML Delete Calendar Job API request. +type MLDeleteCalendarJobRequest struct { + CalendarID string + JobID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLDeleteCalendarJobRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_calendar_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ml") + 1 + len("calendars") + 1 + len(r.CalendarID) + 1 + len("jobs") + 1 + len(r.JobID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + path.WriteString(r.CalendarID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendar_id", r.CalendarID) + } + path.WriteString("/") + path.WriteString("jobs") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_calendar_job") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_calendar_job") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLDeleteCalendarJob) WithContext(v context.Context) func(*MLDeleteCalendarJobRequest) { + return func(r *MLDeleteCalendarJobRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLDeleteCalendarJob) WithPretty() func(*MLDeleteCalendarJobRequest) { + return func(r *MLDeleteCalendarJobRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLDeleteCalendarJob) WithHuman() func(*MLDeleteCalendarJobRequest) { + return func(r *MLDeleteCalendarJobRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLDeleteCalendarJob) WithErrorTrace() func(*MLDeleteCalendarJobRequest) { + return func(r *MLDeleteCalendarJobRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLDeleteCalendarJob) WithFilterPath(v ...string) func(*MLDeleteCalendarJobRequest) { + return func(r *MLDeleteCalendarJobRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLDeleteCalendarJob) WithHeader(h map[string]string) func(*MLDeleteCalendarJobRequest) { + return func(r *MLDeleteCalendarJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLDeleteCalendarJob) WithOpaqueID(s string) func(*MLDeleteCalendarJobRequest) { + return func(r *MLDeleteCalendarJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_data_frame_analytics.go new file mode 100644 index 000000000..931ecf280 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_data_frame_analytics.go @@ -0,0 +1,254 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newMLDeleteDataFrameAnalyticsFunc(t Transport) MLDeleteDataFrameAnalytics { + return func(id string, o ...func(*MLDeleteDataFrameAnalyticsRequest)) (*Response, error) { + var r = MLDeleteDataFrameAnalyticsRequest{ID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLDeleteDataFrameAnalytics - Deletes an existing data frame analytics job. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-dfanalytics.html. +type MLDeleteDataFrameAnalytics func(id string, o ...func(*MLDeleteDataFrameAnalyticsRequest)) (*Response, error) + +// MLDeleteDataFrameAnalyticsRequest configures the ML Delete Data Frame Analytics API request. +type MLDeleteDataFrameAnalyticsRequest struct { + ID string + + Force *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLDeleteDataFrameAnalyticsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ml") + 1 + len("data_frame") + 1 + len("analytics") + 1 + len(r.ID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + path.WriteString(r.ID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.ID) + } + + params = make(map[string]string) + + if r.Force != nil { + params["force"] = strconv.FormatBool(*r.Force) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_data_frame_analytics") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_data_frame_analytics") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLDeleteDataFrameAnalytics) WithContext(v context.Context) func(*MLDeleteDataFrameAnalyticsRequest) { + return func(r *MLDeleteDataFrameAnalyticsRequest) { + r.ctx = v + } +} + +// WithForce - true if the job should be forcefully deleted. +func (f MLDeleteDataFrameAnalytics) WithForce(v bool) func(*MLDeleteDataFrameAnalyticsRequest) { + return func(r *MLDeleteDataFrameAnalyticsRequest) { + r.Force = &v + } +} + +// WithTimeout - controls the time to wait until a job is deleted. defaults to 1 minute. +func (f MLDeleteDataFrameAnalytics) WithTimeout(v time.Duration) func(*MLDeleteDataFrameAnalyticsRequest) { + return func(r *MLDeleteDataFrameAnalyticsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLDeleteDataFrameAnalytics) WithPretty() func(*MLDeleteDataFrameAnalyticsRequest) { + return func(r *MLDeleteDataFrameAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLDeleteDataFrameAnalytics) WithHuman() func(*MLDeleteDataFrameAnalyticsRequest) { + return func(r *MLDeleteDataFrameAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLDeleteDataFrameAnalytics) WithErrorTrace() func(*MLDeleteDataFrameAnalyticsRequest) { + return func(r *MLDeleteDataFrameAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLDeleteDataFrameAnalytics) WithFilterPath(v ...string) func(*MLDeleteDataFrameAnalyticsRequest) { + return func(r *MLDeleteDataFrameAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLDeleteDataFrameAnalytics) WithHeader(h map[string]string) func(*MLDeleteDataFrameAnalyticsRequest) { + return func(r *MLDeleteDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLDeleteDataFrameAnalytics) WithOpaqueID(s string) func(*MLDeleteDataFrameAnalyticsRequest) { + return func(r *MLDeleteDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_datafeed.go new file mode 100644 index 000000000..1d33eab05 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_datafeed.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newMLDeleteDatafeedFunc(t Transport) MLDeleteDatafeed { + return func(datafeed_id string, o ...func(*MLDeleteDatafeedRequest)) (*Response, error) { + var r = MLDeleteDatafeedRequest{DatafeedID: datafeed_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLDeleteDatafeed - Deletes an existing datafeed. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html. +type MLDeleteDatafeed func(datafeed_id string, o ...func(*MLDeleteDatafeedRequest)) (*Response, error) + +// MLDeleteDatafeedRequest configures the ML Delete Datafeed API request. +type MLDeleteDatafeedRequest struct { + DatafeedID string + + Force *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLDeleteDatafeedRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_datafeed") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ml") + 1 + len("datafeeds") + 1 + len(r.DatafeedID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + path.WriteString(r.DatafeedID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeed_id", r.DatafeedID) + } + + params = make(map[string]string) + + if r.Force != nil { + params["force"] = strconv.FormatBool(*r.Force) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_datafeed") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_datafeed") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLDeleteDatafeed) WithContext(v context.Context) func(*MLDeleteDatafeedRequest) { + return func(r *MLDeleteDatafeedRequest) { + r.ctx = v + } +} + +// WithForce - true if the datafeed should be forcefully deleted. +func (f MLDeleteDatafeed) WithForce(v bool) func(*MLDeleteDatafeedRequest) { + return func(r *MLDeleteDatafeedRequest) { + r.Force = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLDeleteDatafeed) WithPretty() func(*MLDeleteDatafeedRequest) { + return func(r *MLDeleteDatafeedRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLDeleteDatafeed) WithHuman() func(*MLDeleteDatafeedRequest) { + return func(r *MLDeleteDatafeedRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLDeleteDatafeed) WithErrorTrace() func(*MLDeleteDatafeedRequest) { + return func(r *MLDeleteDatafeedRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLDeleteDatafeed) WithFilterPath(v ...string) func(*MLDeleteDatafeedRequest) { + return func(r *MLDeleteDatafeedRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLDeleteDatafeed) WithHeader(h map[string]string) func(*MLDeleteDatafeedRequest) { + return func(r *MLDeleteDatafeedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLDeleteDatafeed) WithOpaqueID(s string) func(*MLDeleteDatafeedRequest) { + return func(r *MLDeleteDatafeedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_expired_data.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_expired_data.go new file mode 100644 index 000000000..1873af362 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_expired_data.go @@ -0,0 +1,278 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newMLDeleteExpiredDataFunc(t Transport) MLDeleteExpiredData { + return func(o ...func(*MLDeleteExpiredDataRequest)) (*Response, error) { + var r = MLDeleteExpiredDataRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLDeleteExpiredData - Deletes expired and unused machine learning data. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-expired-data.html. +type MLDeleteExpiredData func(o ...func(*MLDeleteExpiredDataRequest)) (*Response, error) + +// MLDeleteExpiredDataRequest configures the ML Delete Expired Data API request. +type MLDeleteExpiredDataRequest struct { + Body io.Reader + + JobID string + + RequestsPerSecond *int + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLDeleteExpiredDataRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_expired_data") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ml") + 1 + len("_delete_expired_data") + 1 + len(r.JobID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("_delete_expired_data") + if r.JobID != "" { + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + } + + params = make(map[string]string) + + if r.RequestsPerSecond != nil { + params["requests_per_second"] = strconv.FormatInt(int64(*r.RequestsPerSecond), 10) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_expired_data") + if reader := instrument.RecordRequestBody(ctx, "ml.delete_expired_data", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_expired_data") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLDeleteExpiredData) WithContext(v context.Context) func(*MLDeleteExpiredDataRequest) { + return func(r *MLDeleteExpiredDataRequest) { + r.ctx = v + } +} + +// WithBody - deleting expired data parameters. +func (f MLDeleteExpiredData) WithBody(v io.Reader) func(*MLDeleteExpiredDataRequest) { + return func(r *MLDeleteExpiredDataRequest) { + r.Body = v + } +} + +// WithJobID - the ID of the job(s) to perform expired data hygiene for. +func (f MLDeleteExpiredData) WithJobID(v string) func(*MLDeleteExpiredDataRequest) { + return func(r *MLDeleteExpiredDataRequest) { + r.JobID = v + } +} + +// WithRequestsPerSecond - the desired requests per second for the deletion processes.. +func (f MLDeleteExpiredData) WithRequestsPerSecond(v int) func(*MLDeleteExpiredDataRequest) { + return func(r *MLDeleteExpiredDataRequest) { + r.RequestsPerSecond = &v + } +} + +// WithTimeout - how long can the underlying delete processes run until they are canceled. +func (f MLDeleteExpiredData) WithTimeout(v time.Duration) func(*MLDeleteExpiredDataRequest) { + return func(r *MLDeleteExpiredDataRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLDeleteExpiredData) WithPretty() func(*MLDeleteExpiredDataRequest) { + return func(r *MLDeleteExpiredDataRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLDeleteExpiredData) WithHuman() func(*MLDeleteExpiredDataRequest) { + return func(r *MLDeleteExpiredDataRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLDeleteExpiredData) WithErrorTrace() func(*MLDeleteExpiredDataRequest) { + return func(r *MLDeleteExpiredDataRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLDeleteExpiredData) WithFilterPath(v ...string) func(*MLDeleteExpiredDataRequest) { + return func(r *MLDeleteExpiredDataRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLDeleteExpiredData) WithHeader(h map[string]string) func(*MLDeleteExpiredDataRequest) { + return func(r *MLDeleteExpiredDataRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLDeleteExpiredData) WithOpaqueID(s string) func(*MLDeleteExpiredDataRequest) { + return func(r *MLDeleteExpiredDataRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_filter.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_filter.go new file mode 100644 index 000000000..b4329fc36 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_filter.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newMLDeleteFilterFunc(t Transport) MLDeleteFilter { + return func(filter_id string, o ...func(*MLDeleteFilterRequest)) (*Response, error) { + var r = MLDeleteFilterRequest{FilterID: filter_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLDeleteFilter - Deletes a filter. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-filter.html. +type MLDeleteFilter func(filter_id string, o ...func(*MLDeleteFilterRequest)) (*Response, error) + +// MLDeleteFilterRequest configures the ML Delete Filter API request. +type MLDeleteFilterRequest struct { + FilterID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLDeleteFilterRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_filter") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ml") + 1 + len("filters") + 1 + len(r.FilterID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("filters") + path.WriteString("/") + path.WriteString(r.FilterID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "filter_id", r.FilterID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_filter") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_filter") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLDeleteFilter) WithContext(v context.Context) func(*MLDeleteFilterRequest) { + return func(r *MLDeleteFilterRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLDeleteFilter) WithPretty() func(*MLDeleteFilterRequest) { + return func(r *MLDeleteFilterRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLDeleteFilter) WithHuman() func(*MLDeleteFilterRequest) { + return func(r *MLDeleteFilterRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLDeleteFilter) WithErrorTrace() func(*MLDeleteFilterRequest) { + return func(r *MLDeleteFilterRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLDeleteFilter) WithFilterPath(v ...string) func(*MLDeleteFilterRequest) { + return func(r *MLDeleteFilterRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLDeleteFilter) WithHeader(h map[string]string) func(*MLDeleteFilterRequest) { + return func(r *MLDeleteFilterRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLDeleteFilter) WithOpaqueID(s string) func(*MLDeleteFilterRequest) { + return func(r *MLDeleteFilterRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_forecast.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_forecast.go new file mode 100644 index 000000000..8f6c342f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_forecast.go @@ -0,0 +1,269 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newMLDeleteForecastFunc(t Transport) MLDeleteForecast { + return func(job_id string, o ...func(*MLDeleteForecastRequest)) (*Response, error) { + var r = MLDeleteForecastRequest{JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLDeleteForecast - Deletes forecasts from a machine learning job. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-forecast.html. +type MLDeleteForecast func(job_id string, o ...func(*MLDeleteForecastRequest)) (*Response, error) + +// MLDeleteForecastRequest configures the ML Delete Forecast API request. +type MLDeleteForecastRequest struct { + ForecastID string + JobID string + + AllowNoForecasts *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLDeleteForecastRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_forecast") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("_forecast") + 1 + len(r.ForecastID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("_forecast") + if r.ForecastID != "" { + path.WriteString("/") + path.WriteString(r.ForecastID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "forecast_id", r.ForecastID) + } + } + + params = make(map[string]string) + + if r.AllowNoForecasts != nil { + params["allow_no_forecasts"] = strconv.FormatBool(*r.AllowNoForecasts) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_forecast") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_forecast") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLDeleteForecast) WithContext(v context.Context) func(*MLDeleteForecastRequest) { + return func(r *MLDeleteForecastRequest) { + r.ctx = v + } +} + +// WithForecastID - the ID of the forecast to delete, can be comma delimited list. leaving blank implies `_all`. +func (f MLDeleteForecast) WithForecastID(v string) func(*MLDeleteForecastRequest) { + return func(r *MLDeleteForecastRequest) { + r.ForecastID = v + } +} + +// WithAllowNoForecasts - whether to ignore if `_all` matches no forecasts. +func (f MLDeleteForecast) WithAllowNoForecasts(v bool) func(*MLDeleteForecastRequest) { + return func(r *MLDeleteForecastRequest) { + r.AllowNoForecasts = &v + } +} + +// WithTimeout - controls the time to wait until the forecast(s) are deleted. default to 30 seconds. +func (f MLDeleteForecast) WithTimeout(v time.Duration) func(*MLDeleteForecastRequest) { + return func(r *MLDeleteForecastRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLDeleteForecast) WithPretty() func(*MLDeleteForecastRequest) { + return func(r *MLDeleteForecastRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLDeleteForecast) WithHuman() func(*MLDeleteForecastRequest) { + return func(r *MLDeleteForecastRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLDeleteForecast) WithErrorTrace() func(*MLDeleteForecastRequest) { + return func(r *MLDeleteForecastRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLDeleteForecast) WithFilterPath(v ...string) func(*MLDeleteForecastRequest) { + return func(r *MLDeleteForecastRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLDeleteForecast) WithHeader(h map[string]string) func(*MLDeleteForecastRequest) { + return func(r *MLDeleteForecastRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLDeleteForecast) WithOpaqueID(s string) func(*MLDeleteForecastRequest) { + return func(r *MLDeleteForecastRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_job.go new file mode 100644 index 000000000..7284cdf47 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_job.go @@ -0,0 +1,263 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newMLDeleteJobFunc(t Transport) MLDeleteJob { + return func(job_id string, o ...func(*MLDeleteJobRequest)) (*Response, error) { + var r = MLDeleteJobRequest{JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLDeleteJob - Deletes an existing anomaly detection job. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html. +type MLDeleteJob func(job_id string, o ...func(*MLDeleteJobRequest)) (*Response, error) + +// MLDeleteJobRequest configures the ML Delete Job API request. +type MLDeleteJobRequest struct { + JobID string + + DeleteUserAnnotations *bool + Force *bool + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLDeleteJobRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + + params = make(map[string]string) + + if r.DeleteUserAnnotations != nil { + params["delete_user_annotations"] = strconv.FormatBool(*r.DeleteUserAnnotations) + } + + if r.Force != nil { + params["force"] = strconv.FormatBool(*r.Force) + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_job") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_job") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLDeleteJob) WithContext(v context.Context) func(*MLDeleteJobRequest) { + return func(r *MLDeleteJobRequest) { + r.ctx = v + } +} + +// WithDeleteUserAnnotations - should annotations added by the user be deleted. +func (f MLDeleteJob) WithDeleteUserAnnotations(v bool) func(*MLDeleteJobRequest) { + return func(r *MLDeleteJobRequest) { + r.DeleteUserAnnotations = &v + } +} + +// WithForce - true if the job should be forcefully deleted. +func (f MLDeleteJob) WithForce(v bool) func(*MLDeleteJobRequest) { + return func(r *MLDeleteJobRequest) { + r.Force = &v + } +} + +// WithWaitForCompletion - should this request wait until the operation has completed before returning. +func (f MLDeleteJob) WithWaitForCompletion(v bool) func(*MLDeleteJobRequest) { + return func(r *MLDeleteJobRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLDeleteJob) WithPretty() func(*MLDeleteJobRequest) { + return func(r *MLDeleteJobRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLDeleteJob) WithHuman() func(*MLDeleteJobRequest) { + return func(r *MLDeleteJobRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLDeleteJob) WithErrorTrace() func(*MLDeleteJobRequest) { + return func(r *MLDeleteJobRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLDeleteJob) WithFilterPath(v ...string) func(*MLDeleteJobRequest) { + return func(r *MLDeleteJobRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLDeleteJob) WithHeader(h map[string]string) func(*MLDeleteJobRequest) { + return func(r *MLDeleteJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLDeleteJob) WithOpaqueID(s string) func(*MLDeleteJobRequest) { + return func(r *MLDeleteJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_model_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_model_snapshot.go new file mode 100644 index 000000000..22389abbb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_model_snapshot.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newMLDeleteModelSnapshotFunc(t Transport) MLDeleteModelSnapshot { + return func(snapshot_id string, job_id string, o ...func(*MLDeleteModelSnapshotRequest)) (*Response, error) { + var r = MLDeleteModelSnapshotRequest{SnapshotID: snapshot_id, JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLDeleteModelSnapshot - Deletes an existing model snapshot. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapshot.html. +type MLDeleteModelSnapshot func(snapshot_id string, job_id string, o ...func(*MLDeleteModelSnapshotRequest)) (*Response, error) + +// MLDeleteModelSnapshotRequest configures the ML Delete Model Snapshot API request. +type MLDeleteModelSnapshotRequest struct { + JobID string + SnapshotID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLDeleteModelSnapshotRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_model_snapshot") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("model_snapshots") + 1 + len(r.SnapshotID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("model_snapshots") + path.WriteString("/") + path.WriteString(r.SnapshotID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot_id", r.SnapshotID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_model_snapshot") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_model_snapshot") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLDeleteModelSnapshot) WithContext(v context.Context) func(*MLDeleteModelSnapshotRequest) { + return func(r *MLDeleteModelSnapshotRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLDeleteModelSnapshot) WithPretty() func(*MLDeleteModelSnapshotRequest) { + return func(r *MLDeleteModelSnapshotRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLDeleteModelSnapshot) WithHuman() func(*MLDeleteModelSnapshotRequest) { + return func(r *MLDeleteModelSnapshotRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLDeleteModelSnapshot) WithErrorTrace() func(*MLDeleteModelSnapshotRequest) { + return func(r *MLDeleteModelSnapshotRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLDeleteModelSnapshot) WithFilterPath(v ...string) func(*MLDeleteModelSnapshotRequest) { + return func(r *MLDeleteModelSnapshotRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLDeleteModelSnapshot) WithHeader(h map[string]string) func(*MLDeleteModelSnapshotRequest) { + return func(r *MLDeleteModelSnapshotRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLDeleteModelSnapshot) WithOpaqueID(s string) func(*MLDeleteModelSnapshotRequest) { + return func(r *MLDeleteModelSnapshotRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_trained_model.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_trained_model.go new file mode 100644 index 000000000..7895f36b5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_trained_model.go @@ -0,0 +1,252 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newMLDeleteTrainedModelFunc(t Transport) MLDeleteTrainedModel { + return func(model_id string, o ...func(*MLDeleteTrainedModelRequest)) (*Response, error) { + var r = MLDeleteTrainedModelRequest{ModelID: model_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLDeleteTrainedModel - Deletes an existing trained inference model that is currently not referenced by an ingest pipeline. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models.html. +type MLDeleteTrainedModel func(model_id string, o ...func(*MLDeleteTrainedModelRequest)) (*Response, error) + +// MLDeleteTrainedModelRequest configures the ML Delete Trained Model API request. +type MLDeleteTrainedModelRequest struct { + ModelID string + + Force *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLDeleteTrainedModelRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_trained_model") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + path.WriteString(r.ModelID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_id", r.ModelID) + } + + params = make(map[string]string) + + if r.Force != nil { + params["force"] = strconv.FormatBool(*r.Force) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_trained_model") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_trained_model") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLDeleteTrainedModel) WithContext(v context.Context) func(*MLDeleteTrainedModelRequest) { + return func(r *MLDeleteTrainedModelRequest) { + r.ctx = v + } +} + +// WithForce - true if the model should be forcefully deleted. +func (f MLDeleteTrainedModel) WithForce(v bool) func(*MLDeleteTrainedModelRequest) { + return func(r *MLDeleteTrainedModelRequest) { + r.Force = &v + } +} + +// WithTimeout - controls the amount of time to wait for the model to be deleted.. +func (f MLDeleteTrainedModel) WithTimeout(v time.Duration) func(*MLDeleteTrainedModelRequest) { + return func(r *MLDeleteTrainedModelRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLDeleteTrainedModel) WithPretty() func(*MLDeleteTrainedModelRequest) { + return func(r *MLDeleteTrainedModelRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLDeleteTrainedModel) WithHuman() func(*MLDeleteTrainedModelRequest) { + return func(r *MLDeleteTrainedModelRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLDeleteTrainedModel) WithErrorTrace() func(*MLDeleteTrainedModelRequest) { + return func(r *MLDeleteTrainedModelRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLDeleteTrainedModel) WithFilterPath(v ...string) func(*MLDeleteTrainedModelRequest) { + return func(r *MLDeleteTrainedModelRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLDeleteTrainedModel) WithHeader(h map[string]string) func(*MLDeleteTrainedModelRequest) { + return func(r *MLDeleteTrainedModelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLDeleteTrainedModel) WithOpaqueID(s string) func(*MLDeleteTrainedModelRequest) { + return func(r *MLDeleteTrainedModelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_trained_model_alias.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_trained_model_alias.go new file mode 100644 index 000000000..e6bc84f28 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.delete_trained_model_alias.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newMLDeleteTrainedModelAliasFunc(t Transport) MLDeleteTrainedModelAlias { + return func(model_alias string, model_id string, o ...func(*MLDeleteTrainedModelAliasRequest)) (*Response, error) { + var r = MLDeleteTrainedModelAliasRequest{ModelAlias: model_alias, ModelID: model_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLDeleteTrainedModelAlias - Deletes a model alias that refers to the trained model +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models-aliases.html. +type MLDeleteTrainedModelAlias func(model_alias string, model_id string, o ...func(*MLDeleteTrainedModelAliasRequest)) (*Response, error) + +// MLDeleteTrainedModelAliasRequest configures the ML Delete Trained Model Alias API request. +type MLDeleteTrainedModelAliasRequest struct { + ModelAlias string + ModelID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLDeleteTrainedModelAliasRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_trained_model_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID) + 1 + len("model_aliases") + 1 + len(r.ModelAlias)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + path.WriteString(r.ModelID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_id", r.ModelID) + } + path.WriteString("/") + path.WriteString("model_aliases") + path.WriteString("/") + path.WriteString(r.ModelAlias) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_alias", r.ModelAlias) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_trained_model_alias") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_trained_model_alias") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLDeleteTrainedModelAlias) WithContext(v context.Context) func(*MLDeleteTrainedModelAliasRequest) { + return func(r *MLDeleteTrainedModelAliasRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLDeleteTrainedModelAlias) WithPretty() func(*MLDeleteTrainedModelAliasRequest) { + return func(r *MLDeleteTrainedModelAliasRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLDeleteTrainedModelAlias) WithHuman() func(*MLDeleteTrainedModelAliasRequest) { + return func(r *MLDeleteTrainedModelAliasRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLDeleteTrainedModelAlias) WithErrorTrace() func(*MLDeleteTrainedModelAliasRequest) { + return func(r *MLDeleteTrainedModelAliasRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLDeleteTrainedModelAlias) WithFilterPath(v ...string) func(*MLDeleteTrainedModelAliasRequest) { + return func(r *MLDeleteTrainedModelAliasRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLDeleteTrainedModelAlias) WithHeader(h map[string]string) func(*MLDeleteTrainedModelAliasRequest) { + return func(r *MLDeleteTrainedModelAliasRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLDeleteTrainedModelAlias) WithOpaqueID(s string) func(*MLDeleteTrainedModelAliasRequest) { + return func(r *MLDeleteTrainedModelAliasRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.estimate_model_memory.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.estimate_model_memory.go new file mode 100644 index 000000000..5c77a487f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.estimate_model_memory.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLEstimateModelMemoryFunc(t Transport) MLEstimateModelMemory { + return func(body io.Reader, o ...func(*MLEstimateModelMemoryRequest)) (*Response, error) { + var r = MLEstimateModelMemoryRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLEstimateModelMemory - Estimates the model memory +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-apis.html. +type MLEstimateModelMemory func(body io.Reader, o ...func(*MLEstimateModelMemoryRequest)) (*Response, error) + +// MLEstimateModelMemoryRequest configures the ML Estimate Model Memory API request. +type MLEstimateModelMemoryRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLEstimateModelMemoryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.estimate_model_memory") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_ml/anomaly_detectors/_estimate_model_memory")) + path.WriteString("http://") + path.WriteString("/_ml/anomaly_detectors/_estimate_model_memory") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.estimate_model_memory") + if reader := instrument.RecordRequestBody(ctx, "ml.estimate_model_memory", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.estimate_model_memory") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLEstimateModelMemory) WithContext(v context.Context) func(*MLEstimateModelMemoryRequest) { + return func(r *MLEstimateModelMemoryRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLEstimateModelMemory) WithPretty() func(*MLEstimateModelMemoryRequest) { + return func(r *MLEstimateModelMemoryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLEstimateModelMemory) WithHuman() func(*MLEstimateModelMemoryRequest) { + return func(r *MLEstimateModelMemoryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLEstimateModelMemory) WithErrorTrace() func(*MLEstimateModelMemoryRequest) { + return func(r *MLEstimateModelMemoryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLEstimateModelMemory) WithFilterPath(v ...string) func(*MLEstimateModelMemoryRequest) { + return func(r *MLEstimateModelMemoryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLEstimateModelMemory) WithHeader(h map[string]string) func(*MLEstimateModelMemoryRequest) { + return func(r *MLEstimateModelMemoryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLEstimateModelMemory) WithOpaqueID(s string) func(*MLEstimateModelMemoryRequest) { + return func(r *MLEstimateModelMemoryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.evaluate_data_frame.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.evaluate_data_frame.go new file mode 100644 index 000000000..f20c0eb72 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.evaluate_data_frame.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLEvaluateDataFrameFunc(t Transport) MLEvaluateDataFrame { + return func(body io.Reader, o ...func(*MLEvaluateDataFrameRequest)) (*Response, error) { + var r = MLEvaluateDataFrameRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLEvaluateDataFrame - Evaluates the data frame analytics for an annotated index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/evaluate-dfanalytics.html. +type MLEvaluateDataFrame func(body io.Reader, o ...func(*MLEvaluateDataFrameRequest)) (*Response, error) + +// MLEvaluateDataFrameRequest configures the ML Evaluate Data Frame API request. +type MLEvaluateDataFrameRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLEvaluateDataFrameRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.evaluate_data_frame") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_ml/data_frame/_evaluate")) + path.WriteString("http://") + path.WriteString("/_ml/data_frame/_evaluate") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.evaluate_data_frame") + if reader := instrument.RecordRequestBody(ctx, "ml.evaluate_data_frame", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.evaluate_data_frame") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLEvaluateDataFrame) WithContext(v context.Context) func(*MLEvaluateDataFrameRequest) { + return func(r *MLEvaluateDataFrameRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLEvaluateDataFrame) WithPretty() func(*MLEvaluateDataFrameRequest) { + return func(r *MLEvaluateDataFrameRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLEvaluateDataFrame) WithHuman() func(*MLEvaluateDataFrameRequest) { + return func(r *MLEvaluateDataFrameRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLEvaluateDataFrame) WithErrorTrace() func(*MLEvaluateDataFrameRequest) { + return func(r *MLEvaluateDataFrameRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLEvaluateDataFrame) WithFilterPath(v ...string) func(*MLEvaluateDataFrameRequest) { + return func(r *MLEvaluateDataFrameRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLEvaluateDataFrame) WithHeader(h map[string]string) func(*MLEvaluateDataFrameRequest) { + return func(r *MLEvaluateDataFrameRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLEvaluateDataFrame) WithOpaqueID(s string) func(*MLEvaluateDataFrameRequest) { + return func(r *MLEvaluateDataFrameRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.explain_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.explain_data_frame_analytics.go new file mode 100644 index 000000000..33e97e008 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.explain_data_frame_analytics.go @@ -0,0 +1,255 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLExplainDataFrameAnalyticsFunc(t Transport) MLExplainDataFrameAnalytics { + return func(o ...func(*MLExplainDataFrameAnalyticsRequest)) (*Response, error) { + var r = MLExplainDataFrameAnalyticsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLExplainDataFrameAnalytics - Explains a data frame analytics config. +// +// See full documentation at http://www.elastic.co/guide/en/elasticsearch/reference/current/explain-dfanalytics.html. +type MLExplainDataFrameAnalytics func(o ...func(*MLExplainDataFrameAnalyticsRequest)) (*Response, error) + +// MLExplainDataFrameAnalyticsRequest configures the ML Explain Data Frame Analytics API request. +type MLExplainDataFrameAnalyticsRequest struct { + DocumentID string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLExplainDataFrameAnalyticsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.explain_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("data_frame") + 1 + len("analytics") + 1 + len(r.DocumentID) + 1 + len("_explain")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + if r.DocumentID != "" { + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + } + path.WriteString("/") + path.WriteString("_explain") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.explain_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "ml.explain_data_frame_analytics", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.explain_data_frame_analytics") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLExplainDataFrameAnalytics) WithContext(v context.Context) func(*MLExplainDataFrameAnalyticsRequest) { + return func(r *MLExplainDataFrameAnalyticsRequest) { + r.ctx = v + } +} + +// WithBody - The data frame analytics config to explain. +func (f MLExplainDataFrameAnalytics) WithBody(v io.Reader) func(*MLExplainDataFrameAnalyticsRequest) { + return func(r *MLExplainDataFrameAnalyticsRequest) { + r.Body = v + } +} + +// WithDocumentID - the ID of the data frame analytics to explain. +func (f MLExplainDataFrameAnalytics) WithDocumentID(v string) func(*MLExplainDataFrameAnalyticsRequest) { + return func(r *MLExplainDataFrameAnalyticsRequest) { + r.DocumentID = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLExplainDataFrameAnalytics) WithPretty() func(*MLExplainDataFrameAnalyticsRequest) { + return func(r *MLExplainDataFrameAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLExplainDataFrameAnalytics) WithHuman() func(*MLExplainDataFrameAnalyticsRequest) { + return func(r *MLExplainDataFrameAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLExplainDataFrameAnalytics) WithErrorTrace() func(*MLExplainDataFrameAnalyticsRequest) { + return func(r *MLExplainDataFrameAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLExplainDataFrameAnalytics) WithFilterPath(v ...string) func(*MLExplainDataFrameAnalyticsRequest) { + return func(r *MLExplainDataFrameAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLExplainDataFrameAnalytics) WithHeader(h map[string]string) func(*MLExplainDataFrameAnalyticsRequest) { + return func(r *MLExplainDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLExplainDataFrameAnalytics) WithOpaqueID(s string) func(*MLExplainDataFrameAnalyticsRequest) { + return func(r *MLExplainDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.flush_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.flush_job.go new file mode 100644 index 000000000..391170a8a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.flush_job.go @@ -0,0 +1,306 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLFlushJobFunc(t Transport) MLFlushJob { + return func(job_id string, o ...func(*MLFlushJobRequest)) (*Response, error) { + var r = MLFlushJobRequest{JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLFlushJob - Forces any buffered data to be processed by the job. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html. +type MLFlushJob func(job_id string, o ...func(*MLFlushJobRequest)) (*Response, error) + +// MLFlushJobRequest configures the ML Flush Job API request. +type MLFlushJobRequest struct { + Body io.Reader + + JobID string + + AdvanceTime string + CalcInterim *bool + End string + SkipTime string + Start string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLFlushJobRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.flush_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("_flush")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("_flush") + + params = make(map[string]string) + + if r.AdvanceTime != "" { + params["advance_time"] = r.AdvanceTime + } + + if r.CalcInterim != nil { + params["calc_interim"] = strconv.FormatBool(*r.CalcInterim) + } + + if r.End != "" { + params["end"] = r.End + } + + if r.SkipTime != "" { + params["skip_time"] = r.SkipTime + } + + if r.Start != "" { + params["start"] = r.Start + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.flush_job") + if reader := instrument.RecordRequestBody(ctx, "ml.flush_job", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.flush_job") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLFlushJob) WithContext(v context.Context) func(*MLFlushJobRequest) { + return func(r *MLFlushJobRequest) { + r.ctx = v + } +} + +// WithBody - Flush parameters. +func (f MLFlushJob) WithBody(v io.Reader) func(*MLFlushJobRequest) { + return func(r *MLFlushJobRequest) { + r.Body = v + } +} + +// WithAdvanceTime - advances time to the given value generating results and updating the model for the advanced interval. +func (f MLFlushJob) WithAdvanceTime(v string) func(*MLFlushJobRequest) { + return func(r *MLFlushJobRequest) { + r.AdvanceTime = v + } +} + +// WithCalcInterim - calculates interim results for the most recent bucket or all buckets within the latency period. +func (f MLFlushJob) WithCalcInterim(v bool) func(*MLFlushJobRequest) { + return func(r *MLFlushJobRequest) { + r.CalcInterim = &v + } +} + +// WithEnd - when used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results. +func (f MLFlushJob) WithEnd(v string) func(*MLFlushJobRequest) { + return func(r *MLFlushJobRequest) { + r.End = v + } +} + +// WithSkipTime - skips time to the given value without generating results or updating the model for the skipped interval. +func (f MLFlushJob) WithSkipTime(v string) func(*MLFlushJobRequest) { + return func(r *MLFlushJobRequest) { + r.SkipTime = v + } +} + +// WithStart - when used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results. +func (f MLFlushJob) WithStart(v string) func(*MLFlushJobRequest) { + return func(r *MLFlushJobRequest) { + r.Start = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLFlushJob) WithPretty() func(*MLFlushJobRequest) { + return func(r *MLFlushJobRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLFlushJob) WithHuman() func(*MLFlushJobRequest) { + return func(r *MLFlushJobRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLFlushJob) WithErrorTrace() func(*MLFlushJobRequest) { + return func(r *MLFlushJobRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLFlushJob) WithFilterPath(v ...string) func(*MLFlushJobRequest) { + return func(r *MLFlushJobRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLFlushJob) WithHeader(h map[string]string) func(*MLFlushJobRequest) { + return func(r *MLFlushJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLFlushJob) WithOpaqueID(s string) func(*MLFlushJobRequest) { + return func(r *MLFlushJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.forecast.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.forecast.go new file mode 100644 index 000000000..31bd37ecf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.forecast.go @@ -0,0 +1,282 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newMLForecastFunc(t Transport) MLForecast { + return func(job_id string, o ...func(*MLForecastRequest)) (*Response, error) { + var r = MLForecastRequest{JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLForecast - Predicts the future behavior of a time series by using its historical behavior. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-forecast.html. +type MLForecast func(job_id string, o ...func(*MLForecastRequest)) (*Response, error) + +// MLForecastRequest configures the ML Forecast API request. +type MLForecastRequest struct { + Body io.Reader + + JobID string + + Duration time.Duration + ExpiresIn time.Duration + MaxModelMemory string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLForecastRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.forecast") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("_forecast")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("_forecast") + + params = make(map[string]string) + + if r.Duration != 0 { + params["duration"] = formatDuration(r.Duration) + } + + if r.ExpiresIn != 0 { + params["expires_in"] = formatDuration(r.ExpiresIn) + } + + if r.MaxModelMemory != "" { + params["max_model_memory"] = r.MaxModelMemory + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.forecast") + if reader := instrument.RecordRequestBody(ctx, "ml.forecast", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.forecast") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLForecast) WithContext(v context.Context) func(*MLForecastRequest) { + return func(r *MLForecastRequest) { + r.ctx = v + } +} + +// WithBody - Query parameters can be specified in the body. +func (f MLForecast) WithBody(v io.Reader) func(*MLForecastRequest) { + return func(r *MLForecastRequest) { + r.Body = v + } +} + +// WithDuration - the duration of the forecast. +func (f MLForecast) WithDuration(v time.Duration) func(*MLForecastRequest) { + return func(r *MLForecastRequest) { + r.Duration = v + } +} + +// WithExpiresIn - the time interval after which the forecast expires. expired forecasts will be deleted at the first opportunity.. +func (f MLForecast) WithExpiresIn(v time.Duration) func(*MLForecastRequest) { + return func(r *MLForecastRequest) { + r.ExpiresIn = v + } +} + +// WithMaxModelMemory - the max memory able to be used by the forecast. default is 20mb.. +func (f MLForecast) WithMaxModelMemory(v string) func(*MLForecastRequest) { + return func(r *MLForecastRequest) { + r.MaxModelMemory = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLForecast) WithPretty() func(*MLForecastRequest) { + return func(r *MLForecastRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLForecast) WithHuman() func(*MLForecastRequest) { + return func(r *MLForecastRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLForecast) WithErrorTrace() func(*MLForecastRequest) { + return func(r *MLForecastRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLForecast) WithFilterPath(v ...string) func(*MLForecastRequest) { + return func(r *MLForecastRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLForecast) WithHeader(h map[string]string) func(*MLForecastRequest) { + return func(r *MLForecastRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLForecast) WithOpaqueID(s string) func(*MLForecastRequest) { + return func(r *MLForecastRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_buckets.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_buckets.go new file mode 100644 index 000000000..74c1354de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_buckets.go @@ -0,0 +1,372 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLGetBucketsFunc(t Transport) MLGetBuckets { + return func(job_id string, o ...func(*MLGetBucketsRequest)) (*Response, error) { + var r = MLGetBucketsRequest{JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetBuckets - Retrieves anomaly detection job results for one or more buckets. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html. +type MLGetBuckets func(job_id string, o ...func(*MLGetBucketsRequest)) (*Response, error) + +// MLGetBucketsRequest configures the ML Get Buckets API request. +type MLGetBucketsRequest struct { + Body io.Reader + + JobID string + Timestamp string + + AnomalyScore interface{} + Desc *bool + End string + ExcludeInterim *bool + Expand *bool + From *int + Size *int + Sort string + Start string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetBucketsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_buckets") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("results") + 1 + len("buckets") + 1 + len(r.Timestamp)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("results") + path.WriteString("/") + path.WriteString("buckets") + if r.Timestamp != "" { + path.WriteString("/") + path.WriteString(r.Timestamp) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "timestamp", r.Timestamp) + } + } + + params = make(map[string]string) + + if r.AnomalyScore != nil { + params["anomaly_score"] = fmt.Sprintf("%v", r.AnomalyScore) + } + + if r.Desc != nil { + params["desc"] = strconv.FormatBool(*r.Desc) + } + + if r.End != "" { + params["end"] = r.End + } + + if r.ExcludeInterim != nil { + params["exclude_interim"] = strconv.FormatBool(*r.ExcludeInterim) + } + + if r.Expand != nil { + params["expand"] = strconv.FormatBool(*r.Expand) + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Sort != "" { + params["sort"] = r.Sort + } + + if r.Start != "" { + params["start"] = r.Start + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_buckets") + if reader := instrument.RecordRequestBody(ctx, "ml.get_buckets", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_buckets") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetBuckets) WithContext(v context.Context) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.ctx = v + } +} + +// WithBody - Bucket selection details if not provided in URI. +func (f MLGetBuckets) WithBody(v io.Reader) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.Body = v + } +} + +// WithTimestamp - the timestamp of the desired single bucket result. +func (f MLGetBuckets) WithTimestamp(v string) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.Timestamp = v + } +} + +// WithAnomalyScore - filter for the most anomalous buckets. +func (f MLGetBuckets) WithAnomalyScore(v interface{}) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.AnomalyScore = v + } +} + +// WithDesc - set the sort direction. +func (f MLGetBuckets) WithDesc(v bool) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.Desc = &v + } +} + +// WithEnd - end time filter for buckets. +func (f MLGetBuckets) WithEnd(v string) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.End = v + } +} + +// WithExcludeInterim - exclude interim results. +func (f MLGetBuckets) WithExcludeInterim(v bool) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.ExcludeInterim = &v + } +} + +// WithExpand - include anomaly records. +func (f MLGetBuckets) WithExpand(v bool) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.Expand = &v + } +} + +// WithFrom - skips a number of buckets. +func (f MLGetBuckets) WithFrom(v int) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.From = &v + } +} + +// WithSize - specifies a max number of buckets to get. +func (f MLGetBuckets) WithSize(v int) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.Size = &v + } +} + +// WithSort - sort buckets by a particular field. +func (f MLGetBuckets) WithSort(v string) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.Sort = v + } +} + +// WithStart - start time filter for buckets. +func (f MLGetBuckets) WithStart(v string) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.Start = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetBuckets) WithPretty() func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetBuckets) WithHuman() func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetBuckets) WithErrorTrace() func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetBuckets) WithFilterPath(v ...string) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetBuckets) WithHeader(h map[string]string) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetBuckets) WithOpaqueID(s string) func(*MLGetBucketsRequest) { + return func(r *MLGetBucketsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_calendar_events.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_calendar_events.go new file mode 100644 index 000000000..f91a33a9c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_calendar_events.go @@ -0,0 +1,290 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" +) + +func newMLGetCalendarEventsFunc(t Transport) MLGetCalendarEvents { + return func(calendar_id string, o ...func(*MLGetCalendarEventsRequest)) (*Response, error) { + var r = MLGetCalendarEventsRequest{CalendarID: calendar_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetCalendarEvents - Retrieves information about the scheduled events in calendars. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar-event.html. +type MLGetCalendarEvents func(calendar_id string, o ...func(*MLGetCalendarEventsRequest)) (*Response, error) + +// MLGetCalendarEventsRequest configures the ML Get Calendar Events API request. +type MLGetCalendarEventsRequest struct { + CalendarID string + + End interface{} + From *int + JobID string + Size *int + Start string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetCalendarEventsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_calendar_events") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ml") + 1 + len("calendars") + 1 + len(r.CalendarID) + 1 + len("events")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + path.WriteString(r.CalendarID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendar_id", r.CalendarID) + } + path.WriteString("/") + path.WriteString("events") + + params = make(map[string]string) + + if r.End != nil { + params["end"] = fmt.Sprintf("%v", r.End) + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.JobID != "" { + params["job_id"] = r.JobID + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Start != "" { + params["start"] = r.Start + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_calendar_events") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_calendar_events") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetCalendarEvents) WithContext(v context.Context) func(*MLGetCalendarEventsRequest) { + return func(r *MLGetCalendarEventsRequest) { + r.ctx = v + } +} + +// WithEnd - get events before this time. +func (f MLGetCalendarEvents) WithEnd(v interface{}) func(*MLGetCalendarEventsRequest) { + return func(r *MLGetCalendarEventsRequest) { + r.End = v + } +} + +// WithFrom - skips a number of events. +func (f MLGetCalendarEvents) WithFrom(v int) func(*MLGetCalendarEventsRequest) { + return func(r *MLGetCalendarEventsRequest) { + r.From = &v + } +} + +// WithJobID - get events for the job. when this option is used calendar_id must be '_all'. +func (f MLGetCalendarEvents) WithJobID(v string) func(*MLGetCalendarEventsRequest) { + return func(r *MLGetCalendarEventsRequest) { + r.JobID = v + } +} + +// WithSize - specifies a max number of events to get. +func (f MLGetCalendarEvents) WithSize(v int) func(*MLGetCalendarEventsRequest) { + return func(r *MLGetCalendarEventsRequest) { + r.Size = &v + } +} + +// WithStart - get events after this time. +func (f MLGetCalendarEvents) WithStart(v string) func(*MLGetCalendarEventsRequest) { + return func(r *MLGetCalendarEventsRequest) { + r.Start = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetCalendarEvents) WithPretty() func(*MLGetCalendarEventsRequest) { + return func(r *MLGetCalendarEventsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetCalendarEvents) WithHuman() func(*MLGetCalendarEventsRequest) { + return func(r *MLGetCalendarEventsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetCalendarEvents) WithErrorTrace() func(*MLGetCalendarEventsRequest) { + return func(r *MLGetCalendarEventsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetCalendarEvents) WithFilterPath(v ...string) func(*MLGetCalendarEventsRequest) { + return func(r *MLGetCalendarEventsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetCalendarEvents) WithHeader(h map[string]string) func(*MLGetCalendarEventsRequest) { + return func(r *MLGetCalendarEventsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetCalendarEvents) WithOpaqueID(s string) func(*MLGetCalendarEventsRequest) { + return func(r *MLGetCalendarEventsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_calendars.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_calendars.go new file mode 100644 index 000000000..cc651c17c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_calendars.go @@ -0,0 +1,277 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLGetCalendarsFunc(t Transport) MLGetCalendars { + return func(o ...func(*MLGetCalendarsRequest)) (*Response, error) { + var r = MLGetCalendarsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetCalendars - Retrieves configuration information for calendars. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar.html. +type MLGetCalendars func(o ...func(*MLGetCalendarsRequest)) (*Response, error) + +// MLGetCalendarsRequest configures the ML Get Calendars API request. +type MLGetCalendarsRequest struct { + Body io.Reader + + CalendarID string + + From *int + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetCalendarsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_calendars") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("calendars") + 1 + len(r.CalendarID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + if r.CalendarID != "" { + path.WriteString("/") + path.WriteString(r.CalendarID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendar_id", r.CalendarID) + } + } + + params = make(map[string]string) + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_calendars") + if reader := instrument.RecordRequestBody(ctx, "ml.get_calendars", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_calendars") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetCalendars) WithContext(v context.Context) func(*MLGetCalendarsRequest) { + return func(r *MLGetCalendarsRequest) { + r.ctx = v + } +} + +// WithBody - The from and size parameters optionally sent in the body. +func (f MLGetCalendars) WithBody(v io.Reader) func(*MLGetCalendarsRequest) { + return func(r *MLGetCalendarsRequest) { + r.Body = v + } +} + +// WithCalendarID - the ID of the calendar to fetch. +func (f MLGetCalendars) WithCalendarID(v string) func(*MLGetCalendarsRequest) { + return func(r *MLGetCalendarsRequest) { + r.CalendarID = v + } +} + +// WithFrom - skips a number of calendars. +func (f MLGetCalendars) WithFrom(v int) func(*MLGetCalendarsRequest) { + return func(r *MLGetCalendarsRequest) { + r.From = &v + } +} + +// WithSize - specifies a max number of calendars to get. +func (f MLGetCalendars) WithSize(v int) func(*MLGetCalendarsRequest) { + return func(r *MLGetCalendarsRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetCalendars) WithPretty() func(*MLGetCalendarsRequest) { + return func(r *MLGetCalendarsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetCalendars) WithHuman() func(*MLGetCalendarsRequest) { + return func(r *MLGetCalendarsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetCalendars) WithErrorTrace() func(*MLGetCalendarsRequest) { + return func(r *MLGetCalendarsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetCalendars) WithFilterPath(v ...string) func(*MLGetCalendarsRequest) { + return func(r *MLGetCalendarsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetCalendars) WithHeader(h map[string]string) func(*MLGetCalendarsRequest) { + return func(r *MLGetCalendarsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetCalendars) WithOpaqueID(s string) func(*MLGetCalendarsRequest) { + return func(r *MLGetCalendarsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_categories.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_categories.go new file mode 100644 index 000000000..86a597f99 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_categories.go @@ -0,0 +1,301 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLGetCategoriesFunc(t Transport) MLGetCategories { + return func(job_id string, o ...func(*MLGetCategoriesRequest)) (*Response, error) { + var r = MLGetCategoriesRequest{JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetCategories - Retrieves anomaly detection job results for one or more categories. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html. +type MLGetCategories func(job_id string, o ...func(*MLGetCategoriesRequest)) (*Response, error) + +// MLGetCategoriesRequest configures the ML Get Categories API request. +type MLGetCategoriesRequest struct { + Body io.Reader + + CategoryID *int + JobID string + + From *int + PartitionFieldValue string + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetCategoriesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_categories") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("results") + 1 + len("categories")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("results") + path.WriteString("/") + path.WriteString("categories") + if r.CategoryID != nil { + value := strconv.FormatInt(int64(*r.CategoryID), 10) + path.Grow(1 + len(value)) + path.WriteString("/") + path.WriteString(value) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "category_id", value) + } + } + + params = make(map[string]string) + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.PartitionFieldValue != "" { + params["partition_field_value"] = r.PartitionFieldValue + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_categories") + if reader := instrument.RecordRequestBody(ctx, "ml.get_categories", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_categories") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetCategories) WithContext(v context.Context) func(*MLGetCategoriesRequest) { + return func(r *MLGetCategoriesRequest) { + r.ctx = v + } +} + +// WithBody - Category selection details if not provided in URI. +func (f MLGetCategories) WithBody(v io.Reader) func(*MLGetCategoriesRequest) { + return func(r *MLGetCategoriesRequest) { + r.Body = v + } +} + +// WithCategoryID - the identifier of the category definition of interest. +func (f MLGetCategories) WithCategoryID(v int) func(*MLGetCategoriesRequest) { + return func(r *MLGetCategoriesRequest) { + r.CategoryID = &v + } +} + +// WithFrom - skips a number of categories. +func (f MLGetCategories) WithFrom(v int) func(*MLGetCategoriesRequest) { + return func(r *MLGetCategoriesRequest) { + r.From = &v + } +} + +// WithPartitionFieldValue - specifies the partition to retrieve categories for. this is optional, and should never be used for jobs where per-partition categorization is disabled.. +func (f MLGetCategories) WithPartitionFieldValue(v string) func(*MLGetCategoriesRequest) { + return func(r *MLGetCategoriesRequest) { + r.PartitionFieldValue = v + } +} + +// WithSize - specifies a max number of categories to get. +func (f MLGetCategories) WithSize(v int) func(*MLGetCategoriesRequest) { + return func(r *MLGetCategoriesRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetCategories) WithPretty() func(*MLGetCategoriesRequest) { + return func(r *MLGetCategoriesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetCategories) WithHuman() func(*MLGetCategoriesRequest) { + return func(r *MLGetCategoriesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetCategories) WithErrorTrace() func(*MLGetCategoriesRequest) { + return func(r *MLGetCategoriesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetCategories) WithFilterPath(v ...string) func(*MLGetCategoriesRequest) { + return func(r *MLGetCategoriesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetCategories) WithHeader(h map[string]string) func(*MLGetCategoriesRequest) { + return func(r *MLGetCategoriesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetCategories) WithOpaqueID(s string) func(*MLGetCategoriesRequest) { + return func(r *MLGetCategoriesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_data_frame_analytics.go new file mode 100644 index 000000000..b1cd2be83 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_data_frame_analytics.go @@ -0,0 +1,286 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newMLGetDataFrameAnalyticsFunc(t Transport) MLGetDataFrameAnalytics { + return func(o ...func(*MLGetDataFrameAnalyticsRequest)) (*Response, error) { + var r = MLGetDataFrameAnalyticsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetDataFrameAnalytics - Retrieves configuration information for data frame analytics jobs. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics.html. +type MLGetDataFrameAnalytics func(o ...func(*MLGetDataFrameAnalyticsRequest)) (*Response, error) + +// MLGetDataFrameAnalyticsRequest configures the ML Get Data Frame Analytics API request. +type MLGetDataFrameAnalyticsRequest struct { + ID string + + AllowNoMatch *bool + ExcludeGenerated *bool + From *int + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetDataFrameAnalyticsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ml") + 1 + len("data_frame") + 1 + len("analytics") + 1 + len(r.ID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + if r.ID != "" { + path.WriteString("/") + path.WriteString(r.ID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.ID) + } + } + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.ExcludeGenerated != nil { + params["exclude_generated"] = strconv.FormatBool(*r.ExcludeGenerated) + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_data_frame_analytics") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_data_frame_analytics") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetDataFrameAnalytics) WithContext(v context.Context) func(*MLGetDataFrameAnalyticsRequest) { + return func(r *MLGetDataFrameAnalyticsRequest) { + r.ctx = v + } +} + +// WithID - the ID of the data frame analytics to fetch. +func (f MLGetDataFrameAnalytics) WithID(v string) func(*MLGetDataFrameAnalyticsRequest) { + return func(r *MLGetDataFrameAnalyticsRequest) { + r.ID = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no data frame analytics. (this includes `_all` string or when no data frame analytics have been specified). +func (f MLGetDataFrameAnalytics) WithAllowNoMatch(v bool) func(*MLGetDataFrameAnalyticsRequest) { + return func(r *MLGetDataFrameAnalyticsRequest) { + r.AllowNoMatch = &v + } +} + +// WithExcludeGenerated - omits fields that are illegal to set on data frame analytics put. +func (f MLGetDataFrameAnalytics) WithExcludeGenerated(v bool) func(*MLGetDataFrameAnalyticsRequest) { + return func(r *MLGetDataFrameAnalyticsRequest) { + r.ExcludeGenerated = &v + } +} + +// WithFrom - skips a number of analytics. +func (f MLGetDataFrameAnalytics) WithFrom(v int) func(*MLGetDataFrameAnalyticsRequest) { + return func(r *MLGetDataFrameAnalyticsRequest) { + r.From = &v + } +} + +// WithSize - specifies a max number of analytics to get. +func (f MLGetDataFrameAnalytics) WithSize(v int) func(*MLGetDataFrameAnalyticsRequest) { + return func(r *MLGetDataFrameAnalyticsRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetDataFrameAnalytics) WithPretty() func(*MLGetDataFrameAnalyticsRequest) { + return func(r *MLGetDataFrameAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetDataFrameAnalytics) WithHuman() func(*MLGetDataFrameAnalyticsRequest) { + return func(r *MLGetDataFrameAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetDataFrameAnalytics) WithErrorTrace() func(*MLGetDataFrameAnalyticsRequest) { + return func(r *MLGetDataFrameAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetDataFrameAnalytics) WithFilterPath(v ...string) func(*MLGetDataFrameAnalyticsRequest) { + return func(r *MLGetDataFrameAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetDataFrameAnalytics) WithHeader(h map[string]string) func(*MLGetDataFrameAnalyticsRequest) { + return func(r *MLGetDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetDataFrameAnalytics) WithOpaqueID(s string) func(*MLGetDataFrameAnalyticsRequest) { + return func(r *MLGetDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_data_frame_analytics_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_data_frame_analytics_stats.go new file mode 100644 index 000000000..132011f98 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_data_frame_analytics_stats.go @@ -0,0 +1,288 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newMLGetDataFrameAnalyticsStatsFunc(t Transport) MLGetDataFrameAnalyticsStats { + return func(o ...func(*MLGetDataFrameAnalyticsStatsRequest)) (*Response, error) { + var r = MLGetDataFrameAnalyticsStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetDataFrameAnalyticsStats - Retrieves usage information for data frame analytics jobs. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics-stats.html. +type MLGetDataFrameAnalyticsStats func(o ...func(*MLGetDataFrameAnalyticsStatsRequest)) (*Response, error) + +// MLGetDataFrameAnalyticsStatsRequest configures the ML Get Data Frame Analytics Stats API request. +type MLGetDataFrameAnalyticsStatsRequest struct { + ID string + + AllowNoMatch *bool + From *int + Size *int + Verbose *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetDataFrameAnalyticsStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_data_frame_analytics_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ml") + 1 + len("data_frame") + 1 + len("analytics") + 1 + len(r.ID) + 1 + len("_stats")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + if r.ID != "" { + path.WriteString("/") + path.WriteString(r.ID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.ID) + } + } + path.WriteString("/") + path.WriteString("_stats") + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Verbose != nil { + params["verbose"] = strconv.FormatBool(*r.Verbose) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_data_frame_analytics_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_data_frame_analytics_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetDataFrameAnalyticsStats) WithContext(v context.Context) func(*MLGetDataFrameAnalyticsStatsRequest) { + return func(r *MLGetDataFrameAnalyticsStatsRequest) { + r.ctx = v + } +} + +// WithID - the ID of the data frame analytics stats to fetch. +func (f MLGetDataFrameAnalyticsStats) WithID(v string) func(*MLGetDataFrameAnalyticsStatsRequest) { + return func(r *MLGetDataFrameAnalyticsStatsRequest) { + r.ID = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no data frame analytics. (this includes `_all` string or when no data frame analytics have been specified). +func (f MLGetDataFrameAnalyticsStats) WithAllowNoMatch(v bool) func(*MLGetDataFrameAnalyticsStatsRequest) { + return func(r *MLGetDataFrameAnalyticsStatsRequest) { + r.AllowNoMatch = &v + } +} + +// WithFrom - skips a number of analytics. +func (f MLGetDataFrameAnalyticsStats) WithFrom(v int) func(*MLGetDataFrameAnalyticsStatsRequest) { + return func(r *MLGetDataFrameAnalyticsStatsRequest) { + r.From = &v + } +} + +// WithSize - specifies a max number of analytics to get. +func (f MLGetDataFrameAnalyticsStats) WithSize(v int) func(*MLGetDataFrameAnalyticsStatsRequest) { + return func(r *MLGetDataFrameAnalyticsStatsRequest) { + r.Size = &v + } +} + +// WithVerbose - whether the stats response should be verbose. +func (f MLGetDataFrameAnalyticsStats) WithVerbose(v bool) func(*MLGetDataFrameAnalyticsStatsRequest) { + return func(r *MLGetDataFrameAnalyticsStatsRequest) { + r.Verbose = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetDataFrameAnalyticsStats) WithPretty() func(*MLGetDataFrameAnalyticsStatsRequest) { + return func(r *MLGetDataFrameAnalyticsStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetDataFrameAnalyticsStats) WithHuman() func(*MLGetDataFrameAnalyticsStatsRequest) { + return func(r *MLGetDataFrameAnalyticsStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetDataFrameAnalyticsStats) WithErrorTrace() func(*MLGetDataFrameAnalyticsStatsRequest) { + return func(r *MLGetDataFrameAnalyticsStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetDataFrameAnalyticsStats) WithFilterPath(v ...string) func(*MLGetDataFrameAnalyticsStatsRequest) { + return func(r *MLGetDataFrameAnalyticsStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetDataFrameAnalyticsStats) WithHeader(h map[string]string) func(*MLGetDataFrameAnalyticsStatsRequest) { + return func(r *MLGetDataFrameAnalyticsStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetDataFrameAnalyticsStats) WithOpaqueID(s string) func(*MLGetDataFrameAnalyticsStatsRequest) { + return func(r *MLGetDataFrameAnalyticsStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_datafeed_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_datafeed_stats.go new file mode 100644 index 000000000..6516a1805 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_datafeed_stats.go @@ -0,0 +1,250 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newMLGetDatafeedStatsFunc(t Transport) MLGetDatafeedStats { + return func(o ...func(*MLGetDatafeedStatsRequest)) (*Response, error) { + var r = MLGetDatafeedStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetDatafeedStats - Retrieves usage information for datafeeds. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html. +type MLGetDatafeedStats func(o ...func(*MLGetDatafeedStatsRequest)) (*Response, error) + +// MLGetDatafeedStatsRequest configures the ML Get Datafeed Stats API request. +type MLGetDatafeedStatsRequest struct { + DatafeedID string + + AllowNoMatch *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetDatafeedStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_datafeed_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ml") + 1 + len("datafeeds") + 1 + len(r.DatafeedID) + 1 + len("_stats")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + if r.DatafeedID != "" { + path.WriteString("/") + path.WriteString(r.DatafeedID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeed_id", r.DatafeedID) + } + } + path.WriteString("/") + path.WriteString("_stats") + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_datafeed_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_datafeed_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetDatafeedStats) WithContext(v context.Context) func(*MLGetDatafeedStatsRequest) { + return func(r *MLGetDatafeedStatsRequest) { + r.ctx = v + } +} + +// WithDatafeedID - the ID of the datafeeds stats to fetch. +func (f MLGetDatafeedStats) WithDatafeedID(v string) func(*MLGetDatafeedStatsRequest) { + return func(r *MLGetDatafeedStatsRequest) { + r.DatafeedID = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no datafeeds. (this includes `_all` string or when no datafeeds have been specified). +func (f MLGetDatafeedStats) WithAllowNoMatch(v bool) func(*MLGetDatafeedStatsRequest) { + return func(r *MLGetDatafeedStatsRequest) { + r.AllowNoMatch = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetDatafeedStats) WithPretty() func(*MLGetDatafeedStatsRequest) { + return func(r *MLGetDatafeedStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetDatafeedStats) WithHuman() func(*MLGetDatafeedStatsRequest) { + return func(r *MLGetDatafeedStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetDatafeedStats) WithErrorTrace() func(*MLGetDatafeedStatsRequest) { + return func(r *MLGetDatafeedStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetDatafeedStats) WithFilterPath(v ...string) func(*MLGetDatafeedStatsRequest) { + return func(r *MLGetDatafeedStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetDatafeedStats) WithHeader(h map[string]string) func(*MLGetDatafeedStatsRequest) { + return func(r *MLGetDatafeedStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetDatafeedStats) WithOpaqueID(s string) func(*MLGetDatafeedStatsRequest) { + return func(r *MLGetDatafeedStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_datafeeds.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_datafeeds.go new file mode 100644 index 000000000..c4ad6d6af --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_datafeeds.go @@ -0,0 +1,260 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newMLGetDatafeedsFunc(t Transport) MLGetDatafeeds { + return func(o ...func(*MLGetDatafeedsRequest)) (*Response, error) { + var r = MLGetDatafeedsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetDatafeeds - Retrieves configuration information for datafeeds. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html. +type MLGetDatafeeds func(o ...func(*MLGetDatafeedsRequest)) (*Response, error) + +// MLGetDatafeedsRequest configures the ML Get Datafeeds API request. +type MLGetDatafeedsRequest struct { + DatafeedID string + + AllowNoMatch *bool + ExcludeGenerated *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetDatafeedsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_datafeeds") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ml") + 1 + len("datafeeds") + 1 + len(r.DatafeedID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + if r.DatafeedID != "" { + path.WriteString("/") + path.WriteString(r.DatafeedID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeed_id", r.DatafeedID) + } + } + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.ExcludeGenerated != nil { + params["exclude_generated"] = strconv.FormatBool(*r.ExcludeGenerated) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_datafeeds") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_datafeeds") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetDatafeeds) WithContext(v context.Context) func(*MLGetDatafeedsRequest) { + return func(r *MLGetDatafeedsRequest) { + r.ctx = v + } +} + +// WithDatafeedID - the ID of the datafeeds to fetch. +func (f MLGetDatafeeds) WithDatafeedID(v string) func(*MLGetDatafeedsRequest) { + return func(r *MLGetDatafeedsRequest) { + r.DatafeedID = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no datafeeds. (this includes `_all` string or when no datafeeds have been specified). +func (f MLGetDatafeeds) WithAllowNoMatch(v bool) func(*MLGetDatafeedsRequest) { + return func(r *MLGetDatafeedsRequest) { + r.AllowNoMatch = &v + } +} + +// WithExcludeGenerated - omits fields that are illegal to set on datafeed put. +func (f MLGetDatafeeds) WithExcludeGenerated(v bool) func(*MLGetDatafeedsRequest) { + return func(r *MLGetDatafeedsRequest) { + r.ExcludeGenerated = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetDatafeeds) WithPretty() func(*MLGetDatafeedsRequest) { + return func(r *MLGetDatafeedsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetDatafeeds) WithHuman() func(*MLGetDatafeedsRequest) { + return func(r *MLGetDatafeedsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetDatafeeds) WithErrorTrace() func(*MLGetDatafeedsRequest) { + return func(r *MLGetDatafeedsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetDatafeeds) WithFilterPath(v ...string) func(*MLGetDatafeedsRequest) { + return func(r *MLGetDatafeedsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetDatafeeds) WithHeader(h map[string]string) func(*MLGetDatafeedsRequest) { + return func(r *MLGetDatafeedsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetDatafeeds) WithOpaqueID(s string) func(*MLGetDatafeedsRequest) { + return func(r *MLGetDatafeedsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_filters.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_filters.go new file mode 100644 index 000000000..a4f27d50a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_filters.go @@ -0,0 +1,260 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newMLGetFiltersFunc(t Transport) MLGetFilters { + return func(o ...func(*MLGetFiltersRequest)) (*Response, error) { + var r = MLGetFiltersRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetFilters - Retrieves filters. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-filter.html. +type MLGetFilters func(o ...func(*MLGetFiltersRequest)) (*Response, error) + +// MLGetFiltersRequest configures the ML Get Filters API request. +type MLGetFiltersRequest struct { + FilterID string + + From *int + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetFiltersRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_filters") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ml") + 1 + len("filters") + 1 + len(r.FilterID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("filters") + if r.FilterID != "" { + path.WriteString("/") + path.WriteString(r.FilterID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "filter_id", r.FilterID) + } + } + + params = make(map[string]string) + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_filters") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_filters") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetFilters) WithContext(v context.Context) func(*MLGetFiltersRequest) { + return func(r *MLGetFiltersRequest) { + r.ctx = v + } +} + +// WithFilterID - the ID of the filter to fetch. +func (f MLGetFilters) WithFilterID(v string) func(*MLGetFiltersRequest) { + return func(r *MLGetFiltersRequest) { + r.FilterID = v + } +} + +// WithFrom - skips a number of filters. +func (f MLGetFilters) WithFrom(v int) func(*MLGetFiltersRequest) { + return func(r *MLGetFiltersRequest) { + r.From = &v + } +} + +// WithSize - specifies a max number of filters to get. +func (f MLGetFilters) WithSize(v int) func(*MLGetFiltersRequest) { + return func(r *MLGetFiltersRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetFilters) WithPretty() func(*MLGetFiltersRequest) { + return func(r *MLGetFiltersRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetFilters) WithHuman() func(*MLGetFiltersRequest) { + return func(r *MLGetFiltersRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetFilters) WithErrorTrace() func(*MLGetFiltersRequest) { + return func(r *MLGetFiltersRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetFilters) WithFilterPath(v ...string) func(*MLGetFiltersRequest) { + return func(r *MLGetFiltersRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetFilters) WithHeader(h map[string]string) func(*MLGetFiltersRequest) { + return func(r *MLGetFiltersRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetFilters) WithOpaqueID(s string) func(*MLGetFiltersRequest) { + return func(r *MLGetFiltersRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_influencers.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_influencers.go new file mode 100644 index 000000000..8923d8728 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_influencers.go @@ -0,0 +1,345 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLGetInfluencersFunc(t Transport) MLGetInfluencers { + return func(job_id string, o ...func(*MLGetInfluencersRequest)) (*Response, error) { + var r = MLGetInfluencersRequest{JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetInfluencers - Retrieves anomaly detection job results for one or more influencers. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influencer.html. +type MLGetInfluencers func(job_id string, o ...func(*MLGetInfluencersRequest)) (*Response, error) + +// MLGetInfluencersRequest configures the ML Get Influencers API request. +type MLGetInfluencersRequest struct { + Body io.Reader + + JobID string + + Desc *bool + End string + ExcludeInterim *bool + From *int + InfluencerScore interface{} + Size *int + Sort string + Start string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetInfluencersRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_influencers") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("results") + 1 + len("influencers")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("results") + path.WriteString("/") + path.WriteString("influencers") + + params = make(map[string]string) + + if r.Desc != nil { + params["desc"] = strconv.FormatBool(*r.Desc) + } + + if r.End != "" { + params["end"] = r.End + } + + if r.ExcludeInterim != nil { + params["exclude_interim"] = strconv.FormatBool(*r.ExcludeInterim) + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.InfluencerScore != nil { + params["influencer_score"] = fmt.Sprintf("%v", r.InfluencerScore) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Sort != "" { + params["sort"] = r.Sort + } + + if r.Start != "" { + params["start"] = r.Start + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_influencers") + if reader := instrument.RecordRequestBody(ctx, "ml.get_influencers", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_influencers") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetInfluencers) WithContext(v context.Context) func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + r.ctx = v + } +} + +// WithBody - Influencer selection criteria. +func (f MLGetInfluencers) WithBody(v io.Reader) func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + r.Body = v + } +} + +// WithDesc - whether the results should be sorted in decending order. +func (f MLGetInfluencers) WithDesc(v bool) func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + r.Desc = &v + } +} + +// WithEnd - end timestamp for the requested influencers. +func (f MLGetInfluencers) WithEnd(v string) func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + r.End = v + } +} + +// WithExcludeInterim - exclude interim results. +func (f MLGetInfluencers) WithExcludeInterim(v bool) func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + r.ExcludeInterim = &v + } +} + +// WithFrom - skips a number of influencers. +func (f MLGetInfluencers) WithFrom(v int) func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + r.From = &v + } +} + +// WithInfluencerScore - influencer score threshold for the requested influencers. +func (f MLGetInfluencers) WithInfluencerScore(v interface{}) func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + r.InfluencerScore = v + } +} + +// WithSize - specifies a max number of influencers to get. +func (f MLGetInfluencers) WithSize(v int) func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + r.Size = &v + } +} + +// WithSort - sort field for the requested influencers. +func (f MLGetInfluencers) WithSort(v string) func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + r.Sort = v + } +} + +// WithStart - start timestamp for the requested influencers. +func (f MLGetInfluencers) WithStart(v string) func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + r.Start = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetInfluencers) WithPretty() func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetInfluencers) WithHuman() func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetInfluencers) WithErrorTrace() func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetInfluencers) WithFilterPath(v ...string) func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetInfluencers) WithHeader(h map[string]string) func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetInfluencers) WithOpaqueID(s string) func(*MLGetInfluencersRequest) { + return func(r *MLGetInfluencersRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_job_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_job_stats.go new file mode 100644 index 000000000..f5f463a86 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_job_stats.go @@ -0,0 +1,250 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newMLGetJobStatsFunc(t Transport) MLGetJobStats { + return func(o ...func(*MLGetJobStatsRequest)) (*Response, error) { + var r = MLGetJobStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetJobStats - Retrieves usage information for anomaly detection jobs. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html. +type MLGetJobStats func(o ...func(*MLGetJobStatsRequest)) (*Response, error) + +// MLGetJobStatsRequest configures the ML Get Job Stats API request. +type MLGetJobStatsRequest struct { + JobID string + + AllowNoMatch *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetJobStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_job_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("_stats")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + if r.JobID != "" { + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + } + path.WriteString("/") + path.WriteString("_stats") + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_job_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_job_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetJobStats) WithContext(v context.Context) func(*MLGetJobStatsRequest) { + return func(r *MLGetJobStatsRequest) { + r.ctx = v + } +} + +// WithJobID - the ID of the jobs stats to fetch. +func (f MLGetJobStats) WithJobID(v string) func(*MLGetJobStatsRequest) { + return func(r *MLGetJobStatsRequest) { + r.JobID = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no jobs. (this includes `_all` string or when no jobs have been specified). +func (f MLGetJobStats) WithAllowNoMatch(v bool) func(*MLGetJobStatsRequest) { + return func(r *MLGetJobStatsRequest) { + r.AllowNoMatch = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetJobStats) WithPretty() func(*MLGetJobStatsRequest) { + return func(r *MLGetJobStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetJobStats) WithHuman() func(*MLGetJobStatsRequest) { + return func(r *MLGetJobStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetJobStats) WithErrorTrace() func(*MLGetJobStatsRequest) { + return func(r *MLGetJobStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetJobStats) WithFilterPath(v ...string) func(*MLGetJobStatsRequest) { + return func(r *MLGetJobStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetJobStats) WithHeader(h map[string]string) func(*MLGetJobStatsRequest) { + return func(r *MLGetJobStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetJobStats) WithOpaqueID(s string) func(*MLGetJobStatsRequest) { + return func(r *MLGetJobStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_jobs.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_jobs.go new file mode 100644 index 000000000..09a4c7c8b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_jobs.go @@ -0,0 +1,260 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newMLGetJobsFunc(t Transport) MLGetJobs { + return func(o ...func(*MLGetJobsRequest)) (*Response, error) { + var r = MLGetJobsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetJobs - Retrieves configuration information for anomaly detection jobs. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html. +type MLGetJobs func(o ...func(*MLGetJobsRequest)) (*Response, error) + +// MLGetJobsRequest configures the ML Get Jobs API request. +type MLGetJobsRequest struct { + JobID string + + AllowNoMatch *bool + ExcludeGenerated *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetJobsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_jobs") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + if r.JobID != "" { + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + } + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.ExcludeGenerated != nil { + params["exclude_generated"] = strconv.FormatBool(*r.ExcludeGenerated) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_jobs") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_jobs") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetJobs) WithContext(v context.Context) func(*MLGetJobsRequest) { + return func(r *MLGetJobsRequest) { + r.ctx = v + } +} + +// WithJobID - the ID of the jobs to fetch. +func (f MLGetJobs) WithJobID(v string) func(*MLGetJobsRequest) { + return func(r *MLGetJobsRequest) { + r.JobID = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no jobs. (this includes `_all` string or when no jobs have been specified). +func (f MLGetJobs) WithAllowNoMatch(v bool) func(*MLGetJobsRequest) { + return func(r *MLGetJobsRequest) { + r.AllowNoMatch = &v + } +} + +// WithExcludeGenerated - omits fields that are illegal to set on job put. +func (f MLGetJobs) WithExcludeGenerated(v bool) func(*MLGetJobsRequest) { + return func(r *MLGetJobsRequest) { + r.ExcludeGenerated = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetJobs) WithPretty() func(*MLGetJobsRequest) { + return func(r *MLGetJobsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetJobs) WithHuman() func(*MLGetJobsRequest) { + return func(r *MLGetJobsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetJobs) WithErrorTrace() func(*MLGetJobsRequest) { + return func(r *MLGetJobsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetJobs) WithFilterPath(v ...string) func(*MLGetJobsRequest) { + return func(r *MLGetJobsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetJobs) WithHeader(h map[string]string) func(*MLGetJobsRequest) { + return func(r *MLGetJobsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetJobs) WithOpaqueID(s string) func(*MLGetJobsRequest) { + return func(r *MLGetJobsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_memory_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_memory_stats.go new file mode 100644 index 000000000..da30b1471 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_memory_stats.go @@ -0,0 +1,262 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newMLGetMemoryStatsFunc(t Transport) MLGetMemoryStats { + return func(o ...func(*MLGetMemoryStatsRequest)) (*Response, error) { + var r = MLGetMemoryStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetMemoryStats - Returns information on how ML is using memory. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-memory.html. +type MLGetMemoryStats func(o ...func(*MLGetMemoryStatsRequest)) (*Response, error) + +// MLGetMemoryStatsRequest configures the ML Get Memory Stats API request. +type MLGetMemoryStatsRequest struct { + NodeID string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetMemoryStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_memory_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ml") + 1 + len("memory") + 1 + len(r.NodeID) + 1 + len("_stats")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("memory") + if r.NodeID != "" { + path.WriteString("/") + path.WriteString(r.NodeID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "node_id", r.NodeID) + } + } + path.WriteString("/") + path.WriteString("_stats") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_memory_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_memory_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetMemoryStats) WithContext(v context.Context) func(*MLGetMemoryStatsRequest) { + return func(r *MLGetMemoryStatsRequest) { + r.ctx = v + } +} + +// WithNodeID - specifies the node or nodes to retrieve stats for.. +func (f MLGetMemoryStats) WithNodeID(v string) func(*MLGetMemoryStatsRequest) { + return func(r *MLGetMemoryStatsRequest) { + r.NodeID = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f MLGetMemoryStats) WithMasterTimeout(v time.Duration) func(*MLGetMemoryStatsRequest) { + return func(r *MLGetMemoryStatsRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f MLGetMemoryStats) WithTimeout(v time.Duration) func(*MLGetMemoryStatsRequest) { + return func(r *MLGetMemoryStatsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetMemoryStats) WithPretty() func(*MLGetMemoryStatsRequest) { + return func(r *MLGetMemoryStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetMemoryStats) WithHuman() func(*MLGetMemoryStatsRequest) { + return func(r *MLGetMemoryStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetMemoryStats) WithErrorTrace() func(*MLGetMemoryStatsRequest) { + return func(r *MLGetMemoryStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetMemoryStats) WithFilterPath(v ...string) func(*MLGetMemoryStatsRequest) { + return func(r *MLGetMemoryStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetMemoryStats) WithHeader(h map[string]string) func(*MLGetMemoryStatsRequest) { + return func(r *MLGetMemoryStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetMemoryStats) WithOpaqueID(s string) func(*MLGetMemoryStatsRequest) { + return func(r *MLGetMemoryStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go new file mode 100644 index 000000000..92e65dc6f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newMLGetModelSnapshotUpgradeStatsFunc(t Transport) MLGetModelSnapshotUpgradeStats { + return func(snapshot_id string, job_id string, o ...func(*MLGetModelSnapshotUpgradeStatsRequest)) (*Response, error) { + var r = MLGetModelSnapshotUpgradeStatsRequest{SnapshotID: snapshot_id, JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetModelSnapshotUpgradeStats - Gets stats for anomaly detection job model snapshot upgrades that are in progress. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-model-snapshot-upgrade-stats.html. +type MLGetModelSnapshotUpgradeStats func(snapshot_id string, job_id string, o ...func(*MLGetModelSnapshotUpgradeStatsRequest)) (*Response, error) + +// MLGetModelSnapshotUpgradeStatsRequest configures the ML Get Model Snapshot Upgrade Stats API request. +type MLGetModelSnapshotUpgradeStatsRequest struct { + JobID string + SnapshotID string + + AllowNoMatch *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetModelSnapshotUpgradeStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_model_snapshot_upgrade_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("model_snapshots") + 1 + len(r.SnapshotID) + 1 + len("_upgrade") + 1 + len("_stats")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("model_snapshots") + path.WriteString("/") + path.WriteString(r.SnapshotID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot_id", r.SnapshotID) + } + path.WriteString("/") + path.WriteString("_upgrade") + path.WriteString("/") + path.WriteString("_stats") + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_model_snapshot_upgrade_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_model_snapshot_upgrade_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetModelSnapshotUpgradeStats) WithContext(v context.Context) func(*MLGetModelSnapshotUpgradeStatsRequest) { + return func(r *MLGetModelSnapshotUpgradeStatsRequest) { + r.ctx = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no jobs or no snapshots. (this includes the `_all` string.). +func (f MLGetModelSnapshotUpgradeStats) WithAllowNoMatch(v bool) func(*MLGetModelSnapshotUpgradeStatsRequest) { + return func(r *MLGetModelSnapshotUpgradeStatsRequest) { + r.AllowNoMatch = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetModelSnapshotUpgradeStats) WithPretty() func(*MLGetModelSnapshotUpgradeStatsRequest) { + return func(r *MLGetModelSnapshotUpgradeStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetModelSnapshotUpgradeStats) WithHuman() func(*MLGetModelSnapshotUpgradeStatsRequest) { + return func(r *MLGetModelSnapshotUpgradeStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetModelSnapshotUpgradeStats) WithErrorTrace() func(*MLGetModelSnapshotUpgradeStatsRequest) { + return func(r *MLGetModelSnapshotUpgradeStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetModelSnapshotUpgradeStats) WithFilterPath(v ...string) func(*MLGetModelSnapshotUpgradeStatsRequest) { + return func(r *MLGetModelSnapshotUpgradeStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetModelSnapshotUpgradeStats) WithHeader(h map[string]string) func(*MLGetModelSnapshotUpgradeStatsRequest) { + return func(r *MLGetModelSnapshotUpgradeStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetModelSnapshotUpgradeStats) WithOpaqueID(s string) func(*MLGetModelSnapshotUpgradeStatsRequest) { + return func(r *MLGetModelSnapshotUpgradeStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_model_snapshots.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_model_snapshots.go new file mode 100644 index 000000000..435c71c3b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_model_snapshots.go @@ -0,0 +1,334 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLGetModelSnapshotsFunc(t Transport) MLGetModelSnapshots { + return func(job_id string, o ...func(*MLGetModelSnapshotsRequest)) (*Response, error) { + var r = MLGetModelSnapshotsRequest{JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetModelSnapshots - Retrieves information about model snapshots. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html. +type MLGetModelSnapshots func(job_id string, o ...func(*MLGetModelSnapshotsRequest)) (*Response, error) + +// MLGetModelSnapshotsRequest configures the ML Get Model Snapshots API request. +type MLGetModelSnapshotsRequest struct { + Body io.Reader + + JobID string + SnapshotID string + + Desc *bool + End interface{} + From *int + Size *int + Sort string + Start interface{} + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetModelSnapshotsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_model_snapshots") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("model_snapshots") + 1 + len(r.SnapshotID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("model_snapshots") + if r.SnapshotID != "" { + path.WriteString("/") + path.WriteString(r.SnapshotID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot_id", r.SnapshotID) + } + } + + params = make(map[string]string) + + if r.Desc != nil { + params["desc"] = strconv.FormatBool(*r.Desc) + } + + if r.End != nil { + params["end"] = fmt.Sprintf("%v", r.End) + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Sort != "" { + params["sort"] = r.Sort + } + + if r.Start != nil { + params["start"] = fmt.Sprintf("%v", r.Start) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_model_snapshots") + if reader := instrument.RecordRequestBody(ctx, "ml.get_model_snapshots", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_model_snapshots") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetModelSnapshots) WithContext(v context.Context) func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + r.ctx = v + } +} + +// WithBody - Model snapshot selection criteria. +func (f MLGetModelSnapshots) WithBody(v io.Reader) func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + r.Body = v + } +} + +// WithSnapshotID - the ID of the snapshot to fetch. +func (f MLGetModelSnapshots) WithSnapshotID(v string) func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + r.SnapshotID = v + } +} + +// WithDesc - true if the results should be sorted in descending order. +func (f MLGetModelSnapshots) WithDesc(v bool) func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + r.Desc = &v + } +} + +// WithEnd - the filter 'end' query parameter. +func (f MLGetModelSnapshots) WithEnd(v interface{}) func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + r.End = v + } +} + +// WithFrom - skips a number of documents. +func (f MLGetModelSnapshots) WithFrom(v int) func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + r.From = &v + } +} + +// WithSize - the default number of documents returned in queries as a string.. +func (f MLGetModelSnapshots) WithSize(v int) func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + r.Size = &v + } +} + +// WithSort - name of the field to sort on. +func (f MLGetModelSnapshots) WithSort(v string) func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + r.Sort = v + } +} + +// WithStart - the filter 'start' query parameter. +func (f MLGetModelSnapshots) WithStart(v interface{}) func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + r.Start = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetModelSnapshots) WithPretty() func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetModelSnapshots) WithHuman() func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetModelSnapshots) WithErrorTrace() func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetModelSnapshots) WithFilterPath(v ...string) func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetModelSnapshots) WithHeader(h map[string]string) func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetModelSnapshots) WithOpaqueID(s string) func(*MLGetModelSnapshotsRequest) { + return func(r *MLGetModelSnapshotsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_overall_buckets.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_overall_buckets.go new file mode 100644 index 000000000..4e70c6537 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_overall_buckets.go @@ -0,0 +1,333 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLGetOverallBucketsFunc(t Transport) MLGetOverallBuckets { + return func(job_id string, o ...func(*MLGetOverallBucketsRequest)) (*Response, error) { + var r = MLGetOverallBucketsRequest{JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetOverallBuckets - Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-overall-buckets.html. +type MLGetOverallBuckets func(job_id string, o ...func(*MLGetOverallBucketsRequest)) (*Response, error) + +// MLGetOverallBucketsRequest configures the ML Get Overall Buckets API request. +type MLGetOverallBucketsRequest struct { + Body io.Reader + + JobID string + + AllowNoMatch *bool + BucketSpan string + End string + ExcludeInterim *bool + OverallScore interface{} + Start string + TopN *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetOverallBucketsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_overall_buckets") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("results") + 1 + len("overall_buckets")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("results") + path.WriteString("/") + path.WriteString("overall_buckets") + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.BucketSpan != "" { + params["bucket_span"] = r.BucketSpan + } + + if r.End != "" { + params["end"] = r.End + } + + if r.ExcludeInterim != nil { + params["exclude_interim"] = strconv.FormatBool(*r.ExcludeInterim) + } + + if r.OverallScore != nil { + params["overall_score"] = fmt.Sprintf("%v", r.OverallScore) + } + + if r.Start != "" { + params["start"] = r.Start + } + + if r.TopN != nil { + params["top_n"] = strconv.FormatInt(int64(*r.TopN), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_overall_buckets") + if reader := instrument.RecordRequestBody(ctx, "ml.get_overall_buckets", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_overall_buckets") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetOverallBuckets) WithContext(v context.Context) func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + r.ctx = v + } +} + +// WithBody - Overall bucket selection details if not provided in URI. +func (f MLGetOverallBuckets) WithBody(v io.Reader) func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + r.Body = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no jobs. (this includes `_all` string or when no jobs have been specified). +func (f MLGetOverallBuckets) WithAllowNoMatch(v bool) func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + r.AllowNoMatch = &v + } +} + +// WithBucketSpan - the span of the overall buckets. defaults to the longest job bucket_span. +func (f MLGetOverallBuckets) WithBucketSpan(v string) func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + r.BucketSpan = v + } +} + +// WithEnd - returns overall buckets with timestamps earlier than this time. +func (f MLGetOverallBuckets) WithEnd(v string) func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + r.End = v + } +} + +// WithExcludeInterim - if true overall buckets that include interim buckets will be excluded. +func (f MLGetOverallBuckets) WithExcludeInterim(v bool) func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + r.ExcludeInterim = &v + } +} + +// WithOverallScore - returns overall buckets with overall scores higher than this value. +func (f MLGetOverallBuckets) WithOverallScore(v interface{}) func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + r.OverallScore = v + } +} + +// WithStart - returns overall buckets with timestamps after this time. +func (f MLGetOverallBuckets) WithStart(v string) func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + r.Start = v + } +} + +// WithTopN - the number of top job bucket scores to be used in the overall_score calculation. +func (f MLGetOverallBuckets) WithTopN(v int) func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + r.TopN = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetOverallBuckets) WithPretty() func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetOverallBuckets) WithHuman() func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetOverallBuckets) WithErrorTrace() func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetOverallBuckets) WithFilterPath(v ...string) func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetOverallBuckets) WithHeader(h map[string]string) func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetOverallBuckets) WithOpaqueID(s string) func(*MLGetOverallBucketsRequest) { + return func(r *MLGetOverallBucketsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_records.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_records.go new file mode 100644 index 000000000..63ae6e150 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_records.go @@ -0,0 +1,345 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLGetRecordsFunc(t Transport) MLGetRecords { + return func(job_id string, o ...func(*MLGetRecordsRequest)) (*Response, error) { + var r = MLGetRecordsRequest{JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetRecords - Retrieves anomaly records for an anomaly detection job. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html. +type MLGetRecords func(job_id string, o ...func(*MLGetRecordsRequest)) (*Response, error) + +// MLGetRecordsRequest configures the ML Get Records API request. +type MLGetRecordsRequest struct { + Body io.Reader + + JobID string + + Desc *bool + End string + ExcludeInterim *bool + From *int + RecordScore interface{} + Size *int + Sort string + Start string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetRecordsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_records") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("results") + 1 + len("records")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("results") + path.WriteString("/") + path.WriteString("records") + + params = make(map[string]string) + + if r.Desc != nil { + params["desc"] = strconv.FormatBool(*r.Desc) + } + + if r.End != "" { + params["end"] = r.End + } + + if r.ExcludeInterim != nil { + params["exclude_interim"] = strconv.FormatBool(*r.ExcludeInterim) + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.RecordScore != nil { + params["record_score"] = fmt.Sprintf("%v", r.RecordScore) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Sort != "" { + params["sort"] = r.Sort + } + + if r.Start != "" { + params["start"] = r.Start + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_records") + if reader := instrument.RecordRequestBody(ctx, "ml.get_records", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_records") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetRecords) WithContext(v context.Context) func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + r.ctx = v + } +} + +// WithBody - Record selection criteria. +func (f MLGetRecords) WithBody(v io.Reader) func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + r.Body = v + } +} + +// WithDesc - set the sort direction. +func (f MLGetRecords) WithDesc(v bool) func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + r.Desc = &v + } +} + +// WithEnd - end time filter for records. +func (f MLGetRecords) WithEnd(v string) func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + r.End = v + } +} + +// WithExcludeInterim - exclude interim results. +func (f MLGetRecords) WithExcludeInterim(v bool) func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + r.ExcludeInterim = &v + } +} + +// WithFrom - skips a number of records. +func (f MLGetRecords) WithFrom(v int) func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + r.From = &v + } +} + +// WithRecordScore - returns records with anomaly scores greater or equal than this value. +func (f MLGetRecords) WithRecordScore(v interface{}) func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + r.RecordScore = v + } +} + +// WithSize - specifies a max number of records to get. +func (f MLGetRecords) WithSize(v int) func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + r.Size = &v + } +} + +// WithSort - sort records by a particular field. +func (f MLGetRecords) WithSort(v string) func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + r.Sort = v + } +} + +// WithStart - start time filter for records. +func (f MLGetRecords) WithStart(v string) func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + r.Start = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetRecords) WithPretty() func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetRecords) WithHuman() func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetRecords) WithErrorTrace() func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetRecords) WithFilterPath(v ...string) func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetRecords) WithHeader(h map[string]string) func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetRecords) WithOpaqueID(s string) func(*MLGetRecordsRequest) { + return func(r *MLGetRecordsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_trained_models.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_trained_models.go new file mode 100644 index 000000000..47a354ed4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_trained_models.go @@ -0,0 +1,320 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newMLGetTrainedModelsFunc(t Transport) MLGetTrainedModels { + return func(o ...func(*MLGetTrainedModelsRequest)) (*Response, error) { + var r = MLGetTrainedModelsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetTrainedModels - Retrieves configuration information for a trained inference model. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models.html. +type MLGetTrainedModels func(o ...func(*MLGetTrainedModelsRequest)) (*Response, error) + +// MLGetTrainedModelsRequest configures the ML Get Trained Models API request. +type MLGetTrainedModelsRequest struct { + ModelID string + + AllowNoMatch *bool + DecompressDefinition *bool + ExcludeGenerated *bool + From *int + Include string + Size *int + Tags []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetTrainedModelsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_trained_models") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + if r.ModelID != "" { + path.WriteString("/") + path.WriteString(r.ModelID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_id", r.ModelID) + } + } + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.DecompressDefinition != nil { + params["decompress_definition"] = strconv.FormatBool(*r.DecompressDefinition) + } + + if r.ExcludeGenerated != nil { + params["exclude_generated"] = strconv.FormatBool(*r.ExcludeGenerated) + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Include != "" { + params["include"] = r.Include + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if len(r.Tags) > 0 { + params["tags"] = strings.Join(r.Tags, ",") + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_trained_models") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_trained_models") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetTrainedModels) WithContext(v context.Context) func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + r.ctx = v + } +} + +// WithModelID - the ID of the trained models to fetch. +func (f MLGetTrainedModels) WithModelID(v string) func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + r.ModelID = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no trained models. (this includes `_all` string or when no trained models have been specified). +func (f MLGetTrainedModels) WithAllowNoMatch(v bool) func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + r.AllowNoMatch = &v + } +} + +// WithDecompressDefinition - should the model definition be decompressed into valid json or returned in a custom compressed format. defaults to true.. +func (f MLGetTrainedModels) WithDecompressDefinition(v bool) func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + r.DecompressDefinition = &v + } +} + +// WithExcludeGenerated - omits fields that are illegal to set on model put. +func (f MLGetTrainedModels) WithExcludeGenerated(v bool) func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + r.ExcludeGenerated = &v + } +} + +// WithFrom - skips a number of trained models. +func (f MLGetTrainedModels) WithFrom(v int) func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + r.From = &v + } +} + +// WithInclude - a comma-separate list of fields to optionally include. valid options are 'definition' and 'total_feature_importance'. default is none.. +func (f MLGetTrainedModels) WithInclude(v string) func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + r.Include = v + } +} + +// WithSize - specifies a max number of trained models to get. +func (f MLGetTrainedModels) WithSize(v int) func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + r.Size = &v + } +} + +// WithTags - a list of tags that the model must have.. +func (f MLGetTrainedModels) WithTags(v ...string) func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + r.Tags = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetTrainedModels) WithPretty() func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetTrainedModels) WithHuman() func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetTrainedModels) WithErrorTrace() func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetTrainedModels) WithFilterPath(v ...string) func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetTrainedModels) WithHeader(h map[string]string) func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetTrainedModels) WithOpaqueID(s string) func(*MLGetTrainedModelsRequest) { + return func(r *MLGetTrainedModelsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_trained_models_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_trained_models_stats.go new file mode 100644 index 000000000..345150efb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.get_trained_models_stats.go @@ -0,0 +1,274 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newMLGetTrainedModelsStatsFunc(t Transport) MLGetTrainedModelsStats { + return func(o ...func(*MLGetTrainedModelsStatsRequest)) (*Response, error) { + var r = MLGetTrainedModelsStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLGetTrainedModelsStats - Retrieves usage information for trained inference models. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models-stats.html. +type MLGetTrainedModelsStats func(o ...func(*MLGetTrainedModelsStatsRequest)) (*Response, error) + +// MLGetTrainedModelsStatsRequest configures the ML Get Trained Models Stats API request. +type MLGetTrainedModelsStatsRequest struct { + ModelID string + + AllowNoMatch *bool + From *int + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLGetTrainedModelsStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_trained_models_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID) + 1 + len("_stats")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + if r.ModelID != "" { + path.WriteString("/") + path.WriteString(r.ModelID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_id", r.ModelID) + } + } + path.WriteString("/") + path.WriteString("_stats") + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_trained_models_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_trained_models_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLGetTrainedModelsStats) WithContext(v context.Context) func(*MLGetTrainedModelsStatsRequest) { + return func(r *MLGetTrainedModelsStatsRequest) { + r.ctx = v + } +} + +// WithModelID - the ID of the trained models stats to fetch. +func (f MLGetTrainedModelsStats) WithModelID(v string) func(*MLGetTrainedModelsStatsRequest) { + return func(r *MLGetTrainedModelsStatsRequest) { + r.ModelID = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no trained models. (this includes `_all` string or when no trained models have been specified). +func (f MLGetTrainedModelsStats) WithAllowNoMatch(v bool) func(*MLGetTrainedModelsStatsRequest) { + return func(r *MLGetTrainedModelsStatsRequest) { + r.AllowNoMatch = &v + } +} + +// WithFrom - skips a number of trained models. +func (f MLGetTrainedModelsStats) WithFrom(v int) func(*MLGetTrainedModelsStatsRequest) { + return func(r *MLGetTrainedModelsStatsRequest) { + r.From = &v + } +} + +// WithSize - specifies a max number of trained models to get. +func (f MLGetTrainedModelsStats) WithSize(v int) func(*MLGetTrainedModelsStatsRequest) { + return func(r *MLGetTrainedModelsStatsRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLGetTrainedModelsStats) WithPretty() func(*MLGetTrainedModelsStatsRequest) { + return func(r *MLGetTrainedModelsStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLGetTrainedModelsStats) WithHuman() func(*MLGetTrainedModelsStatsRequest) { + return func(r *MLGetTrainedModelsStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLGetTrainedModelsStats) WithErrorTrace() func(*MLGetTrainedModelsStatsRequest) { + return func(r *MLGetTrainedModelsStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLGetTrainedModelsStats) WithFilterPath(v ...string) func(*MLGetTrainedModelsStatsRequest) { + return func(r *MLGetTrainedModelsStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLGetTrainedModelsStats) WithHeader(h map[string]string) func(*MLGetTrainedModelsStatsRequest) { + return func(r *MLGetTrainedModelsStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLGetTrainedModelsStats) WithOpaqueID(s string) func(*MLGetTrainedModelsStatsRequest) { + return func(r *MLGetTrainedModelsStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.infer_trained_model.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.infer_trained_model.go new file mode 100644 index 000000000..1eea98fa5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.infer_trained_model.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newMLInferTrainedModelFunc(t Transport) MLInferTrainedModel { + return func(body io.Reader, model_id string, o ...func(*MLInferTrainedModelRequest)) (*Response, error) { + var r = MLInferTrainedModelRequest{Body: body, ModelID: model_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLInferTrainedModel - Evaluate a trained model. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model.html. +type MLInferTrainedModel func(body io.Reader, model_id string, o ...func(*MLInferTrainedModelRequest)) (*Response, error) + +// MLInferTrainedModelRequest configures the ML Infer Trained Model API request. +type MLInferTrainedModelRequest struct { + Body io.Reader + + ModelID string + + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLInferTrainedModelRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.infer_trained_model") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID) + 1 + len("_infer")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + path.WriteString(r.ModelID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_id", r.ModelID) + } + path.WriteString("/") + path.WriteString("_infer") + + params = make(map[string]string) + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.infer_trained_model") + if reader := instrument.RecordRequestBody(ctx, "ml.infer_trained_model", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.infer_trained_model") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLInferTrainedModel) WithContext(v context.Context) func(*MLInferTrainedModelRequest) { + return func(r *MLInferTrainedModelRequest) { + r.ctx = v + } +} + +// WithTimeout - controls the amount of time to wait for inference results.. +func (f MLInferTrainedModel) WithTimeout(v time.Duration) func(*MLInferTrainedModelRequest) { + return func(r *MLInferTrainedModelRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLInferTrainedModel) WithPretty() func(*MLInferTrainedModelRequest) { + return func(r *MLInferTrainedModelRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLInferTrainedModel) WithHuman() func(*MLInferTrainedModelRequest) { + return func(r *MLInferTrainedModelRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLInferTrainedModel) WithErrorTrace() func(*MLInferTrainedModelRequest) { + return func(r *MLInferTrainedModelRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLInferTrainedModel) WithFilterPath(v ...string) func(*MLInferTrainedModelRequest) { + return func(r *MLInferTrainedModelRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLInferTrainedModel) WithHeader(h map[string]string) func(*MLInferTrainedModelRequest) { + return func(r *MLInferTrainedModelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLInferTrainedModel) WithOpaqueID(s string) func(*MLInferTrainedModelRequest) { + return func(r *MLInferTrainedModelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.info.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.info.go new file mode 100644 index 000000000..34dd4eed0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.info.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newMLInfoFunc(t Transport) MLInfo { + return func(o ...func(*MLInfoRequest)) (*Response, error) { + var r = MLInfoRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLInfo - Returns defaults and limits used by machine learning. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-info.html. +type MLInfo func(o ...func(*MLInfoRequest)) (*Response, error) + +// MLInfoRequest configures the ML Info API request. +type MLInfoRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLInfoRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_ml/info")) + path.WriteString("http://") + path.WriteString("/_ml/info") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.info") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.info") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLInfo) WithContext(v context.Context) func(*MLInfoRequest) { + return func(r *MLInfoRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLInfo) WithPretty() func(*MLInfoRequest) { + return func(r *MLInfoRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLInfo) WithHuman() func(*MLInfoRequest) { + return func(r *MLInfoRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLInfo) WithErrorTrace() func(*MLInfoRequest) { + return func(r *MLInfoRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLInfo) WithFilterPath(v ...string) func(*MLInfoRequest) { + return func(r *MLInfoRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLInfo) WithHeader(h map[string]string) func(*MLInfoRequest) { + return func(r *MLInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLInfo) WithOpaqueID(s string) func(*MLInfoRequest) { + return func(r *MLInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.open_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.open_job.go new file mode 100644 index 000000000..b26b2c5fe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.open_job.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLOpenJobFunc(t Transport) MLOpenJob { + return func(job_id string, o ...func(*MLOpenJobRequest)) (*Response, error) { + var r = MLOpenJobRequest{JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLOpenJob - Opens one or more anomaly detection jobs. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html. +type MLOpenJob func(job_id string, o ...func(*MLOpenJobRequest)) (*Response, error) + +// MLOpenJobRequest configures the ML Open Job API request. +type MLOpenJobRequest struct { + Body io.Reader + + JobID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLOpenJobRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.open_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("_open")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("_open") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.open_job") + if reader := instrument.RecordRequestBody(ctx, "ml.open_job", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.open_job") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLOpenJob) WithContext(v context.Context) func(*MLOpenJobRequest) { + return func(r *MLOpenJobRequest) { + r.ctx = v + } +} + +// WithBody - Query parameters can be specified in the body. +func (f MLOpenJob) WithBody(v io.Reader) func(*MLOpenJobRequest) { + return func(r *MLOpenJobRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLOpenJob) WithPretty() func(*MLOpenJobRequest) { + return func(r *MLOpenJobRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLOpenJob) WithHuman() func(*MLOpenJobRequest) { + return func(r *MLOpenJobRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLOpenJob) WithErrorTrace() func(*MLOpenJobRequest) { + return func(r *MLOpenJobRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLOpenJob) WithFilterPath(v ...string) func(*MLOpenJobRequest) { + return func(r *MLOpenJobRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLOpenJob) WithHeader(h map[string]string) func(*MLOpenJobRequest) { + return func(r *MLOpenJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLOpenJob) WithOpaqueID(s string) func(*MLOpenJobRequest) { + return func(r *MLOpenJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.post_calendar_events.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.post_calendar_events.go new file mode 100644 index 000000000..62ee608a2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.post_calendar_events.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLPostCalendarEventsFunc(t Transport) MLPostCalendarEvents { + return func(calendar_id string, body io.Reader, o ...func(*MLPostCalendarEventsRequest)) (*Response, error) { + var r = MLPostCalendarEventsRequest{CalendarID: calendar_id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLPostCalendarEvents - Posts scheduled events in a calendar. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-calendar-event.html. +type MLPostCalendarEvents func(calendar_id string, body io.Reader, o ...func(*MLPostCalendarEventsRequest)) (*Response, error) + +// MLPostCalendarEventsRequest configures the ML Post Calendar Events API request. +type MLPostCalendarEventsRequest struct { + Body io.Reader + + CalendarID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLPostCalendarEventsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.post_calendar_events") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("calendars") + 1 + len(r.CalendarID) + 1 + len("events")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + path.WriteString(r.CalendarID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendar_id", r.CalendarID) + } + path.WriteString("/") + path.WriteString("events") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.post_calendar_events") + if reader := instrument.RecordRequestBody(ctx, "ml.post_calendar_events", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.post_calendar_events") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLPostCalendarEvents) WithContext(v context.Context) func(*MLPostCalendarEventsRequest) { + return func(r *MLPostCalendarEventsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLPostCalendarEvents) WithPretty() func(*MLPostCalendarEventsRequest) { + return func(r *MLPostCalendarEventsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLPostCalendarEvents) WithHuman() func(*MLPostCalendarEventsRequest) { + return func(r *MLPostCalendarEventsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLPostCalendarEvents) WithErrorTrace() func(*MLPostCalendarEventsRequest) { + return func(r *MLPostCalendarEventsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLPostCalendarEvents) WithFilterPath(v ...string) func(*MLPostCalendarEventsRequest) { + return func(r *MLPostCalendarEventsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLPostCalendarEvents) WithHeader(h map[string]string) func(*MLPostCalendarEventsRequest) { + return func(r *MLPostCalendarEventsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLPostCalendarEvents) WithOpaqueID(s string) func(*MLPostCalendarEventsRequest) { + return func(r *MLPostCalendarEventsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.post_data.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.post_data.go new file mode 100644 index 000000000..84ebfbb6e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.post_data.go @@ -0,0 +1,262 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLPostDataFunc(t Transport) MLPostData { + return func(job_id string, body io.Reader, o ...func(*MLPostDataRequest)) (*Response, error) { + var r = MLPostDataRequest{JobID: job_id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLPostData - Sends data to an anomaly detection job for analysis. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html. +type MLPostData func(job_id string, body io.Reader, o ...func(*MLPostDataRequest)) (*Response, error) + +// MLPostDataRequest configures the ML Post Data API request. +type MLPostDataRequest struct { + Body io.Reader + + JobID string + + ResetEnd string + ResetStart string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLPostDataRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.post_data") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("_data")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("_data") + + params = make(map[string]string) + + if r.ResetEnd != "" { + params["reset_end"] = r.ResetEnd + } + + if r.ResetStart != "" { + params["reset_start"] = r.ResetStart + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.post_data") + if reader := instrument.RecordRequestBody(ctx, "ml.post_data", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.post_data") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLPostData) WithContext(v context.Context) func(*MLPostDataRequest) { + return func(r *MLPostDataRequest) { + r.ctx = v + } +} + +// WithResetEnd - optional parameter to specify the end of the bucket resetting range. +func (f MLPostData) WithResetEnd(v string) func(*MLPostDataRequest) { + return func(r *MLPostDataRequest) { + r.ResetEnd = v + } +} + +// WithResetStart - optional parameter to specify the start of the bucket resetting range. +func (f MLPostData) WithResetStart(v string) func(*MLPostDataRequest) { + return func(r *MLPostDataRequest) { + r.ResetStart = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLPostData) WithPretty() func(*MLPostDataRequest) { + return func(r *MLPostDataRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLPostData) WithHuman() func(*MLPostDataRequest) { + return func(r *MLPostDataRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLPostData) WithErrorTrace() func(*MLPostDataRequest) { + return func(r *MLPostDataRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLPostData) WithFilterPath(v ...string) func(*MLPostDataRequest) { + return func(r *MLPostDataRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLPostData) WithHeader(h map[string]string) func(*MLPostDataRequest) { + return func(r *MLPostDataRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLPostData) WithOpaqueID(s string) func(*MLPostDataRequest) { + return func(r *MLPostDataRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.preview_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.preview_data_frame_analytics.go new file mode 100644 index 000000000..4d7a6467a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.preview_data_frame_analytics.go @@ -0,0 +1,255 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLPreviewDataFrameAnalyticsFunc(t Transport) MLPreviewDataFrameAnalytics { + return func(o ...func(*MLPreviewDataFrameAnalyticsRequest)) (*Response, error) { + var r = MLPreviewDataFrameAnalyticsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLPreviewDataFrameAnalytics - Previews that will be analyzed given a data frame analytics config. +// +// See full documentation at http://www.elastic.co/guide/en/elasticsearch/reference/current/preview-dfanalytics.html. +type MLPreviewDataFrameAnalytics func(o ...func(*MLPreviewDataFrameAnalyticsRequest)) (*Response, error) + +// MLPreviewDataFrameAnalyticsRequest configures the ML Preview Data Frame Analytics API request. +type MLPreviewDataFrameAnalyticsRequest struct { + DocumentID string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLPreviewDataFrameAnalyticsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.preview_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("data_frame") + 1 + len("analytics") + 1 + len(r.DocumentID) + 1 + len("_preview")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + if r.DocumentID != "" { + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + } + path.WriteString("/") + path.WriteString("_preview") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.preview_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "ml.preview_data_frame_analytics", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.preview_data_frame_analytics") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLPreviewDataFrameAnalytics) WithContext(v context.Context) func(*MLPreviewDataFrameAnalyticsRequest) { + return func(r *MLPreviewDataFrameAnalyticsRequest) { + r.ctx = v + } +} + +// WithBody - The data frame analytics config to preview. +func (f MLPreviewDataFrameAnalytics) WithBody(v io.Reader) func(*MLPreviewDataFrameAnalyticsRequest) { + return func(r *MLPreviewDataFrameAnalyticsRequest) { + r.Body = v + } +} + +// WithDocumentID - the ID of the data frame analytics to preview. +func (f MLPreviewDataFrameAnalytics) WithDocumentID(v string) func(*MLPreviewDataFrameAnalyticsRequest) { + return func(r *MLPreviewDataFrameAnalyticsRequest) { + r.DocumentID = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLPreviewDataFrameAnalytics) WithPretty() func(*MLPreviewDataFrameAnalyticsRequest) { + return func(r *MLPreviewDataFrameAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLPreviewDataFrameAnalytics) WithHuman() func(*MLPreviewDataFrameAnalyticsRequest) { + return func(r *MLPreviewDataFrameAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLPreviewDataFrameAnalytics) WithErrorTrace() func(*MLPreviewDataFrameAnalyticsRequest) { + return func(r *MLPreviewDataFrameAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLPreviewDataFrameAnalytics) WithFilterPath(v ...string) func(*MLPreviewDataFrameAnalyticsRequest) { + return func(r *MLPreviewDataFrameAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLPreviewDataFrameAnalytics) WithHeader(h map[string]string) func(*MLPreviewDataFrameAnalyticsRequest) { + return func(r *MLPreviewDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLPreviewDataFrameAnalytics) WithOpaqueID(s string) func(*MLPreviewDataFrameAnalyticsRequest) { + return func(r *MLPreviewDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.preview_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.preview_datafeed.go new file mode 100644 index 000000000..bf02350d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.preview_datafeed.go @@ -0,0 +1,278 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLPreviewDatafeedFunc(t Transport) MLPreviewDatafeed { + return func(o ...func(*MLPreviewDatafeedRequest)) (*Response, error) { + var r = MLPreviewDatafeedRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLPreviewDatafeed - Previews a datafeed. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html. +type MLPreviewDatafeed func(o ...func(*MLPreviewDatafeedRequest)) (*Response, error) + +// MLPreviewDatafeedRequest configures the ML Preview Datafeed API request. +type MLPreviewDatafeedRequest struct { + Body io.Reader + + DatafeedID string + + End string + Start string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLPreviewDatafeedRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.preview_datafeed") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("datafeeds") + 1 + len(r.DatafeedID) + 1 + len("_preview")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + if r.DatafeedID != "" { + path.WriteString("/") + path.WriteString(r.DatafeedID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeed_id", r.DatafeedID) + } + } + path.WriteString("/") + path.WriteString("_preview") + + params = make(map[string]string) + + if r.End != "" { + params["end"] = r.End + } + + if r.Start != "" { + params["start"] = r.Start + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.preview_datafeed") + if reader := instrument.RecordRequestBody(ctx, "ml.preview_datafeed", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.preview_datafeed") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLPreviewDatafeed) WithContext(v context.Context) func(*MLPreviewDatafeedRequest) { + return func(r *MLPreviewDatafeedRequest) { + r.ctx = v + } +} + +// WithBody - The datafeed config and job config with which to execute the preview. +func (f MLPreviewDatafeed) WithBody(v io.Reader) func(*MLPreviewDatafeedRequest) { + return func(r *MLPreviewDatafeedRequest) { + r.Body = v + } +} + +// WithDatafeedID - the ID of the datafeed to preview. +func (f MLPreviewDatafeed) WithDatafeedID(v string) func(*MLPreviewDatafeedRequest) { + return func(r *MLPreviewDatafeedRequest) { + r.DatafeedID = v + } +} + +// WithEnd - the end time when the datafeed preview should stop. +func (f MLPreviewDatafeed) WithEnd(v string) func(*MLPreviewDatafeedRequest) { + return func(r *MLPreviewDatafeedRequest) { + r.End = v + } +} + +// WithStart - the start time from where the datafeed preview should begin. +func (f MLPreviewDatafeed) WithStart(v string) func(*MLPreviewDatafeedRequest) { + return func(r *MLPreviewDatafeedRequest) { + r.Start = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLPreviewDatafeed) WithPretty() func(*MLPreviewDatafeedRequest) { + return func(r *MLPreviewDatafeedRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLPreviewDatafeed) WithHuman() func(*MLPreviewDatafeedRequest) { + return func(r *MLPreviewDatafeedRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLPreviewDatafeed) WithErrorTrace() func(*MLPreviewDatafeedRequest) { + return func(r *MLPreviewDatafeedRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLPreviewDatafeed) WithFilterPath(v ...string) func(*MLPreviewDatafeedRequest) { + return func(r *MLPreviewDatafeedRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLPreviewDatafeed) WithHeader(h map[string]string) func(*MLPreviewDatafeedRequest) { + return func(r *MLPreviewDatafeedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLPreviewDatafeed) WithOpaqueID(s string) func(*MLPreviewDatafeedRequest) { + return func(r *MLPreviewDatafeedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_calendar.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_calendar.go new file mode 100644 index 000000000..9ac0f6f56 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_calendar.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLPutCalendarFunc(t Transport) MLPutCalendar { + return func(calendar_id string, o ...func(*MLPutCalendarRequest)) (*Response, error) { + var r = MLPutCalendarRequest{CalendarID: calendar_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLPutCalendar - Instantiates a calendar. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar.html. +type MLPutCalendar func(calendar_id string, o ...func(*MLPutCalendarRequest)) (*Response, error) + +// MLPutCalendarRequest configures the ML Put Calendar API request. +type MLPutCalendarRequest struct { + Body io.Reader + + CalendarID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLPutCalendarRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_calendar") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ml") + 1 + len("calendars") + 1 + len(r.CalendarID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + path.WriteString(r.CalendarID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendar_id", r.CalendarID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_calendar") + if reader := instrument.RecordRequestBody(ctx, "ml.put_calendar", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_calendar") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLPutCalendar) WithContext(v context.Context) func(*MLPutCalendarRequest) { + return func(r *MLPutCalendarRequest) { + r.ctx = v + } +} + +// WithBody - The calendar details. +func (f MLPutCalendar) WithBody(v io.Reader) func(*MLPutCalendarRequest) { + return func(r *MLPutCalendarRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLPutCalendar) WithPretty() func(*MLPutCalendarRequest) { + return func(r *MLPutCalendarRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLPutCalendar) WithHuman() func(*MLPutCalendarRequest) { + return func(r *MLPutCalendarRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLPutCalendar) WithErrorTrace() func(*MLPutCalendarRequest) { + return func(r *MLPutCalendarRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLPutCalendar) WithFilterPath(v ...string) func(*MLPutCalendarRequest) { + return func(r *MLPutCalendarRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLPutCalendar) WithHeader(h map[string]string) func(*MLPutCalendarRequest) { + return func(r *MLPutCalendarRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLPutCalendar) WithOpaqueID(s string) func(*MLPutCalendarRequest) { + return func(r *MLPutCalendarRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_calendar_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_calendar_job.go new file mode 100644 index 000000000..5f6d72758 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_calendar_job.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newMLPutCalendarJobFunc(t Transport) MLPutCalendarJob { + return func(calendar_id string, job_id string, o ...func(*MLPutCalendarJobRequest)) (*Response, error) { + var r = MLPutCalendarJobRequest{CalendarID: calendar_id, JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLPutCalendarJob - Adds an anomaly detection job to a calendar. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar-job.html. +type MLPutCalendarJob func(calendar_id string, job_id string, o ...func(*MLPutCalendarJobRequest)) (*Response, error) + +// MLPutCalendarJobRequest configures the ML Put Calendar Job API request. +type MLPutCalendarJobRequest struct { + CalendarID string + JobID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLPutCalendarJobRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_calendar_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ml") + 1 + len("calendars") + 1 + len(r.CalendarID) + 1 + len("jobs") + 1 + len(r.JobID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + path.WriteString(r.CalendarID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendar_id", r.CalendarID) + } + path.WriteString("/") + path.WriteString("jobs") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_calendar_job") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_calendar_job") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLPutCalendarJob) WithContext(v context.Context) func(*MLPutCalendarJobRequest) { + return func(r *MLPutCalendarJobRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLPutCalendarJob) WithPretty() func(*MLPutCalendarJobRequest) { + return func(r *MLPutCalendarJobRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLPutCalendarJob) WithHuman() func(*MLPutCalendarJobRequest) { + return func(r *MLPutCalendarJobRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLPutCalendarJob) WithErrorTrace() func(*MLPutCalendarJobRequest) { + return func(r *MLPutCalendarJobRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLPutCalendarJob) WithFilterPath(v ...string) func(*MLPutCalendarJobRequest) { + return func(r *MLPutCalendarJobRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLPutCalendarJob) WithHeader(h map[string]string) func(*MLPutCalendarJobRequest) { + return func(r *MLPutCalendarJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLPutCalendarJob) WithOpaqueID(s string) func(*MLPutCalendarJobRequest) { + return func(r *MLPutCalendarJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_data_frame_analytics.go new file mode 100644 index 000000000..668e084ea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_data_frame_analytics.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLPutDataFrameAnalyticsFunc(t Transport) MLPutDataFrameAnalytics { + return func(id string, body io.Reader, o ...func(*MLPutDataFrameAnalyticsRequest)) (*Response, error) { + var r = MLPutDataFrameAnalyticsRequest{ID: id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLPutDataFrameAnalytics - Instantiates a data frame analytics job. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/put-dfanalytics.html. +type MLPutDataFrameAnalytics func(id string, body io.Reader, o ...func(*MLPutDataFrameAnalyticsRequest)) (*Response, error) + +// MLPutDataFrameAnalyticsRequest configures the ML Put Data Frame Analytics API request. +type MLPutDataFrameAnalyticsRequest struct { + ID string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLPutDataFrameAnalyticsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ml") + 1 + len("data_frame") + 1 + len("analytics") + 1 + len(r.ID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + path.WriteString(r.ID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.ID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "ml.put_data_frame_analytics", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_data_frame_analytics") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLPutDataFrameAnalytics) WithContext(v context.Context) func(*MLPutDataFrameAnalyticsRequest) { + return func(r *MLPutDataFrameAnalyticsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLPutDataFrameAnalytics) WithPretty() func(*MLPutDataFrameAnalyticsRequest) { + return func(r *MLPutDataFrameAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLPutDataFrameAnalytics) WithHuman() func(*MLPutDataFrameAnalyticsRequest) { + return func(r *MLPutDataFrameAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLPutDataFrameAnalytics) WithErrorTrace() func(*MLPutDataFrameAnalyticsRequest) { + return func(r *MLPutDataFrameAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLPutDataFrameAnalytics) WithFilterPath(v ...string) func(*MLPutDataFrameAnalyticsRequest) { + return func(r *MLPutDataFrameAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLPutDataFrameAnalytics) WithHeader(h map[string]string) func(*MLPutDataFrameAnalyticsRequest) { + return func(r *MLPutDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLPutDataFrameAnalytics) WithOpaqueID(s string) func(*MLPutDataFrameAnalyticsRequest) { + return func(r *MLPutDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_datafeed.go new file mode 100644 index 000000000..13b255f60 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_datafeed.go @@ -0,0 +1,285 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLPutDatafeedFunc(t Transport) MLPutDatafeed { + return func(body io.Reader, datafeed_id string, o ...func(*MLPutDatafeedRequest)) (*Response, error) { + var r = MLPutDatafeedRequest{Body: body, DatafeedID: datafeed_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLPutDatafeed - Instantiates a datafeed. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html. +type MLPutDatafeed func(body io.Reader, datafeed_id string, o ...func(*MLPutDatafeedRequest)) (*Response, error) + +// MLPutDatafeedRequest configures the ML Put Datafeed API request. +type MLPutDatafeedRequest struct { + Body io.Reader + + DatafeedID string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreThrottled *bool + IgnoreUnavailable *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLPutDatafeedRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_datafeed") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ml") + 1 + len("datafeeds") + 1 + len(r.DatafeedID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + path.WriteString(r.DatafeedID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeed_id", r.DatafeedID) + } + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreThrottled != nil { + params["ignore_throttled"] = strconv.FormatBool(*r.IgnoreThrottled) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_datafeed") + if reader := instrument.RecordRequestBody(ctx, "ml.put_datafeed", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_datafeed") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLPutDatafeed) WithContext(v context.Context) func(*MLPutDatafeedRequest) { + return func(r *MLPutDatafeedRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - ignore if the source indices expressions resolves to no concrete indices (default: true). +func (f MLPutDatafeed) WithAllowNoIndices(v bool) func(*MLPutDatafeedRequest) { + return func(r *MLPutDatafeedRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether source index expressions should get expanded to open or closed indices (default: open). +func (f MLPutDatafeed) WithExpandWildcards(v string) func(*MLPutDatafeedRequest) { + return func(r *MLPutDatafeedRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreThrottled - ignore indices that are marked as throttled (default: true). +func (f MLPutDatafeed) WithIgnoreThrottled(v bool) func(*MLPutDatafeedRequest) { + return func(r *MLPutDatafeedRequest) { + r.IgnoreThrottled = &v + } +} + +// WithIgnoreUnavailable - ignore unavailable indexes (default: false). +func (f MLPutDatafeed) WithIgnoreUnavailable(v bool) func(*MLPutDatafeedRequest) { + return func(r *MLPutDatafeedRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLPutDatafeed) WithPretty() func(*MLPutDatafeedRequest) { + return func(r *MLPutDatafeedRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLPutDatafeed) WithHuman() func(*MLPutDatafeedRequest) { + return func(r *MLPutDatafeedRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLPutDatafeed) WithErrorTrace() func(*MLPutDatafeedRequest) { + return func(r *MLPutDatafeedRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLPutDatafeed) WithFilterPath(v ...string) func(*MLPutDatafeedRequest) { + return func(r *MLPutDatafeedRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLPutDatafeed) WithHeader(h map[string]string) func(*MLPutDatafeedRequest) { + return func(r *MLPutDatafeedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLPutDatafeed) WithOpaqueID(s string) func(*MLPutDatafeedRequest) { + return func(r *MLPutDatafeedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_filter.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_filter.go new file mode 100644 index 000000000..406e2fbfd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_filter.go @@ -0,0 +1,235 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLPutFilterFunc(t Transport) MLPutFilter { + return func(body io.Reader, filter_id string, o ...func(*MLPutFilterRequest)) (*Response, error) { + var r = MLPutFilterRequest{Body: body, FilterID: filter_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLPutFilter - Instantiates a filter. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-filter.html. +type MLPutFilter func(body io.Reader, filter_id string, o ...func(*MLPutFilterRequest)) (*Response, error) + +// MLPutFilterRequest configures the ML Put Filter API request. +type MLPutFilterRequest struct { + Body io.Reader + + FilterID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLPutFilterRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_filter") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ml") + 1 + len("filters") + 1 + len(r.FilterID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("filters") + path.WriteString("/") + path.WriteString(r.FilterID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "filter_id", r.FilterID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_filter") + if reader := instrument.RecordRequestBody(ctx, "ml.put_filter", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_filter") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLPutFilter) WithContext(v context.Context) func(*MLPutFilterRequest) { + return func(r *MLPutFilterRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLPutFilter) WithPretty() func(*MLPutFilterRequest) { + return func(r *MLPutFilterRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLPutFilter) WithHuman() func(*MLPutFilterRequest) { + return func(r *MLPutFilterRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLPutFilter) WithErrorTrace() func(*MLPutFilterRequest) { + return func(r *MLPutFilterRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLPutFilter) WithFilterPath(v ...string) func(*MLPutFilterRequest) { + return func(r *MLPutFilterRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLPutFilter) WithHeader(h map[string]string) func(*MLPutFilterRequest) { + return func(r *MLPutFilterRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLPutFilter) WithOpaqueID(s string) func(*MLPutFilterRequest) { + return func(r *MLPutFilterRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_job.go new file mode 100644 index 000000000..88452e0ae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_job.go @@ -0,0 +1,285 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLPutJobFunc(t Transport) MLPutJob { + return func(job_id string, body io.Reader, o ...func(*MLPutJobRequest)) (*Response, error) { + var r = MLPutJobRequest{JobID: job_id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLPutJob - Instantiates an anomaly detection job. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html. +type MLPutJob func(job_id string, body io.Reader, o ...func(*MLPutJobRequest)) (*Response, error) + +// MLPutJobRequest configures the ML Put Job API request. +type MLPutJobRequest struct { + Body io.Reader + + JobID string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreThrottled *bool + IgnoreUnavailable *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLPutJobRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreThrottled != nil { + params["ignore_throttled"] = strconv.FormatBool(*r.IgnoreThrottled) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_job") + if reader := instrument.RecordRequestBody(ctx, "ml.put_job", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_job") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLPutJob) WithContext(v context.Context) func(*MLPutJobRequest) { + return func(r *MLPutJobRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - ignore if the source indices expressions resolves to no concrete indices (default: true). only set if datafeed_config is provided.. +func (f MLPutJob) WithAllowNoIndices(v bool) func(*MLPutJobRequest) { + return func(r *MLPutJobRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether source index expressions should get expanded to open or closed indices (default: open). only set if datafeed_config is provided.. +func (f MLPutJob) WithExpandWildcards(v string) func(*MLPutJobRequest) { + return func(r *MLPutJobRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreThrottled - ignore indices that are marked as throttled (default: true). only set if datafeed_config is provided.. +func (f MLPutJob) WithIgnoreThrottled(v bool) func(*MLPutJobRequest) { + return func(r *MLPutJobRequest) { + r.IgnoreThrottled = &v + } +} + +// WithIgnoreUnavailable - ignore unavailable indexes (default: false). only set if datafeed_config is provided.. +func (f MLPutJob) WithIgnoreUnavailable(v bool) func(*MLPutJobRequest) { + return func(r *MLPutJobRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLPutJob) WithPretty() func(*MLPutJobRequest) { + return func(r *MLPutJobRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLPutJob) WithHuman() func(*MLPutJobRequest) { + return func(r *MLPutJobRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLPutJob) WithErrorTrace() func(*MLPutJobRequest) { + return func(r *MLPutJobRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLPutJob) WithFilterPath(v ...string) func(*MLPutJobRequest) { + return func(r *MLPutJobRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLPutJob) WithHeader(h map[string]string) func(*MLPutJobRequest) { + return func(r *MLPutJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLPutJob) WithOpaqueID(s string) func(*MLPutJobRequest) { + return func(r *MLPutJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_trained_model.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_trained_model.go new file mode 100644 index 000000000..4fd33e6df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_trained_model.go @@ -0,0 +1,261 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLPutTrainedModelFunc(t Transport) MLPutTrainedModel { + return func(body io.Reader, model_id string, o ...func(*MLPutTrainedModelRequest)) (*Response, error) { + var r = MLPutTrainedModelRequest{Body: body, ModelID: model_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLPutTrainedModel - Creates an inference trained model. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models.html. +type MLPutTrainedModel func(body io.Reader, model_id string, o ...func(*MLPutTrainedModelRequest)) (*Response, error) + +// MLPutTrainedModelRequest configures the ML Put Trained Model API request. +type MLPutTrainedModelRequest struct { + Body io.Reader + + ModelID string + + DeferDefinitionDecompression *bool + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLPutTrainedModelRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_trained_model") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + path.WriteString(r.ModelID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_id", r.ModelID) + } + + params = make(map[string]string) + + if r.DeferDefinitionDecompression != nil { + params["defer_definition_decompression"] = strconv.FormatBool(*r.DeferDefinitionDecompression) + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_trained_model") + if reader := instrument.RecordRequestBody(ctx, "ml.put_trained_model", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_trained_model") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLPutTrainedModel) WithContext(v context.Context) func(*MLPutTrainedModelRequest) { + return func(r *MLPutTrainedModelRequest) { + r.ctx = v + } +} + +// WithDeferDefinitionDecompression - if set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations.. +func (f MLPutTrainedModel) WithDeferDefinitionDecompression(v bool) func(*MLPutTrainedModelRequest) { + return func(r *MLPutTrainedModelRequest) { + r.DeferDefinitionDecompression = &v + } +} + +// WithWaitForCompletion - whether to wait for all child operations(e.g. model download) to complete, before returning or not. default to false. +func (f MLPutTrainedModel) WithWaitForCompletion(v bool) func(*MLPutTrainedModelRequest) { + return func(r *MLPutTrainedModelRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLPutTrainedModel) WithPretty() func(*MLPutTrainedModelRequest) { + return func(r *MLPutTrainedModelRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLPutTrainedModel) WithHuman() func(*MLPutTrainedModelRequest) { + return func(r *MLPutTrainedModelRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLPutTrainedModel) WithErrorTrace() func(*MLPutTrainedModelRequest) { + return func(r *MLPutTrainedModelRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLPutTrainedModel) WithFilterPath(v ...string) func(*MLPutTrainedModelRequest) { + return func(r *MLPutTrainedModelRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLPutTrainedModel) WithHeader(h map[string]string) func(*MLPutTrainedModelRequest) { + return func(r *MLPutTrainedModelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLPutTrainedModel) WithOpaqueID(s string) func(*MLPutTrainedModelRequest) { + return func(r *MLPutTrainedModelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_trained_model_alias.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_trained_model_alias.go new file mode 100644 index 000000000..6c9e588df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_trained_model_alias.go @@ -0,0 +1,247 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newMLPutTrainedModelAliasFunc(t Transport) MLPutTrainedModelAlias { + return func(model_alias string, model_id string, o ...func(*MLPutTrainedModelAliasRequest)) (*Response, error) { + var r = MLPutTrainedModelAliasRequest{ModelAlias: model_alias, ModelID: model_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLPutTrainedModelAlias - Creates a new model alias (or reassigns an existing one) to refer to the trained model +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models-aliases.html. +type MLPutTrainedModelAlias func(model_alias string, model_id string, o ...func(*MLPutTrainedModelAliasRequest)) (*Response, error) + +// MLPutTrainedModelAliasRequest configures the ML Put Trained Model Alias API request. +type MLPutTrainedModelAliasRequest struct { + ModelAlias string + ModelID string + + Reassign *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLPutTrainedModelAliasRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_trained_model_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID) + 1 + len("model_aliases") + 1 + len(r.ModelAlias)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + path.WriteString(r.ModelID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_id", r.ModelID) + } + path.WriteString("/") + path.WriteString("model_aliases") + path.WriteString("/") + path.WriteString(r.ModelAlias) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_alias", r.ModelAlias) + } + + params = make(map[string]string) + + if r.Reassign != nil { + params["reassign"] = strconv.FormatBool(*r.Reassign) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_trained_model_alias") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_trained_model_alias") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLPutTrainedModelAlias) WithContext(v context.Context) func(*MLPutTrainedModelAliasRequest) { + return func(r *MLPutTrainedModelAliasRequest) { + r.ctx = v + } +} + +// WithReassign - if the model_alias already exists and points to a separate model_id, this parameter must be true. defaults to false.. +func (f MLPutTrainedModelAlias) WithReassign(v bool) func(*MLPutTrainedModelAliasRequest) { + return func(r *MLPutTrainedModelAliasRequest) { + r.Reassign = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLPutTrainedModelAlias) WithPretty() func(*MLPutTrainedModelAliasRequest) { + return func(r *MLPutTrainedModelAliasRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLPutTrainedModelAlias) WithHuman() func(*MLPutTrainedModelAliasRequest) { + return func(r *MLPutTrainedModelAliasRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLPutTrainedModelAlias) WithErrorTrace() func(*MLPutTrainedModelAliasRequest) { + return func(r *MLPutTrainedModelAliasRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLPutTrainedModelAlias) WithFilterPath(v ...string) func(*MLPutTrainedModelAliasRequest) { + return func(r *MLPutTrainedModelAliasRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLPutTrainedModelAlias) WithHeader(h map[string]string) func(*MLPutTrainedModelAliasRequest) { + return func(r *MLPutTrainedModelAliasRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLPutTrainedModelAlias) WithOpaqueID(s string) func(*MLPutTrainedModelAliasRequest) { + return func(r *MLPutTrainedModelAliasRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_trained_model_definition_part.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_trained_model_definition_part.go new file mode 100644 index 000000000..4c00133b2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_trained_model_definition_part.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLPutTrainedModelDefinitionPartFunc(t Transport) MLPutTrainedModelDefinitionPart { + return func(body io.Reader, model_id string, part *int, o ...func(*MLPutTrainedModelDefinitionPartRequest)) (*Response, error) { + var r = MLPutTrainedModelDefinitionPartRequest{Body: body, ModelID: model_id, Part: part} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLPutTrainedModelDefinitionPart - Creates part of a trained model definition +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-definition-part.html. +type MLPutTrainedModelDefinitionPart func(body io.Reader, model_id string, part *int, o ...func(*MLPutTrainedModelDefinitionPartRequest)) (*Response, error) + +// MLPutTrainedModelDefinitionPartRequest configures the ML Put Trained Model Definition Part API request. +type MLPutTrainedModelDefinitionPartRequest struct { + Body io.Reader + + ModelID string + Part *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLPutTrainedModelDefinitionPartRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_trained_model_definition_part") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + if r.Part == nil { + return nil, errors.New("part is required and cannot be nil") + } + + path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID) + 1 + len("definition") + 1 + len(strconv.Itoa(*r.Part))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + path.WriteString(r.ModelID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_id", r.ModelID) + } + path.WriteString("/") + path.WriteString("definition") + path.WriteString("/") + path.WriteString(strconv.Itoa(*r.Part)) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "part", strconv.Itoa(*r.Part)) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_trained_model_definition_part") + if reader := instrument.RecordRequestBody(ctx, "ml.put_trained_model_definition_part", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_trained_model_definition_part") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLPutTrainedModelDefinitionPart) WithContext(v context.Context) func(*MLPutTrainedModelDefinitionPartRequest) { + return func(r *MLPutTrainedModelDefinitionPartRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLPutTrainedModelDefinitionPart) WithPretty() func(*MLPutTrainedModelDefinitionPartRequest) { + return func(r *MLPutTrainedModelDefinitionPartRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLPutTrainedModelDefinitionPart) WithHuman() func(*MLPutTrainedModelDefinitionPartRequest) { + return func(r *MLPutTrainedModelDefinitionPartRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLPutTrainedModelDefinitionPart) WithErrorTrace() func(*MLPutTrainedModelDefinitionPartRequest) { + return func(r *MLPutTrainedModelDefinitionPartRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLPutTrainedModelDefinitionPart) WithFilterPath(v ...string) func(*MLPutTrainedModelDefinitionPartRequest) { + return func(r *MLPutTrainedModelDefinitionPartRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLPutTrainedModelDefinitionPart) WithHeader(h map[string]string) func(*MLPutTrainedModelDefinitionPartRequest) { + return func(r *MLPutTrainedModelDefinitionPartRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLPutTrainedModelDefinitionPart) WithOpaqueID(s string) func(*MLPutTrainedModelDefinitionPartRequest) { + return func(r *MLPutTrainedModelDefinitionPartRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_trained_model_vocabulary.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_trained_model_vocabulary.go new file mode 100644 index 000000000..666b935b5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.put_trained_model_vocabulary.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLPutTrainedModelVocabularyFunc(t Transport) MLPutTrainedModelVocabulary { + return func(body io.Reader, model_id string, o ...func(*MLPutTrainedModelVocabularyRequest)) (*Response, error) { + var r = MLPutTrainedModelVocabularyRequest{Body: body, ModelID: model_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLPutTrainedModelVocabulary - Creates a trained model vocabulary +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-vocabulary.html. +type MLPutTrainedModelVocabulary func(body io.Reader, model_id string, o ...func(*MLPutTrainedModelVocabularyRequest)) (*Response, error) + +// MLPutTrainedModelVocabularyRequest configures the ML Put Trained Model Vocabulary API request. +type MLPutTrainedModelVocabularyRequest struct { + Body io.Reader + + ModelID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLPutTrainedModelVocabularyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_trained_model_vocabulary") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID) + 1 + len("vocabulary")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + path.WriteString(r.ModelID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_id", r.ModelID) + } + path.WriteString("/") + path.WriteString("vocabulary") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_trained_model_vocabulary") + if reader := instrument.RecordRequestBody(ctx, "ml.put_trained_model_vocabulary", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_trained_model_vocabulary") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLPutTrainedModelVocabulary) WithContext(v context.Context) func(*MLPutTrainedModelVocabularyRequest) { + return func(r *MLPutTrainedModelVocabularyRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLPutTrainedModelVocabulary) WithPretty() func(*MLPutTrainedModelVocabularyRequest) { + return func(r *MLPutTrainedModelVocabularyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLPutTrainedModelVocabulary) WithHuman() func(*MLPutTrainedModelVocabularyRequest) { + return func(r *MLPutTrainedModelVocabularyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLPutTrainedModelVocabulary) WithErrorTrace() func(*MLPutTrainedModelVocabularyRequest) { + return func(r *MLPutTrainedModelVocabularyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLPutTrainedModelVocabulary) WithFilterPath(v ...string) func(*MLPutTrainedModelVocabularyRequest) { + return func(r *MLPutTrainedModelVocabularyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLPutTrainedModelVocabulary) WithHeader(h map[string]string) func(*MLPutTrainedModelVocabularyRequest) { + return func(r *MLPutTrainedModelVocabularyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLPutTrainedModelVocabulary) WithOpaqueID(s string) func(*MLPutTrainedModelVocabularyRequest) { + return func(r *MLPutTrainedModelVocabularyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.reset_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.reset_job.go new file mode 100644 index 000000000..01fe90435 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.reset_job.go @@ -0,0 +1,253 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newMLResetJobFunc(t Transport) MLResetJob { + return func(job_id string, o ...func(*MLResetJobRequest)) (*Response, error) { + var r = MLResetJobRequest{JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLResetJob - Resets an existing anomaly detection job. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-reset-job.html. +type MLResetJob func(job_id string, o ...func(*MLResetJobRequest)) (*Response, error) + +// MLResetJobRequest configures the ML Reset Job API request. +type MLResetJobRequest struct { + JobID string + + DeleteUserAnnotations *bool + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLResetJobRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.reset_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("_reset")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("_reset") + + params = make(map[string]string) + + if r.DeleteUserAnnotations != nil { + params["delete_user_annotations"] = strconv.FormatBool(*r.DeleteUserAnnotations) + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.reset_job") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.reset_job") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLResetJob) WithContext(v context.Context) func(*MLResetJobRequest) { + return func(r *MLResetJobRequest) { + r.ctx = v + } +} + +// WithDeleteUserAnnotations - should annotations added by the user be deleted. +func (f MLResetJob) WithDeleteUserAnnotations(v bool) func(*MLResetJobRequest) { + return func(r *MLResetJobRequest) { + r.DeleteUserAnnotations = &v + } +} + +// WithWaitForCompletion - should this request wait until the operation has completed before returning. +func (f MLResetJob) WithWaitForCompletion(v bool) func(*MLResetJobRequest) { + return func(r *MLResetJobRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLResetJob) WithPretty() func(*MLResetJobRequest) { + return func(r *MLResetJobRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLResetJob) WithHuman() func(*MLResetJobRequest) { + return func(r *MLResetJobRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLResetJob) WithErrorTrace() func(*MLResetJobRequest) { + return func(r *MLResetJobRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLResetJob) WithFilterPath(v ...string) func(*MLResetJobRequest) { + return func(r *MLResetJobRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLResetJob) WithHeader(h map[string]string) func(*MLResetJobRequest) { + return func(r *MLResetJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLResetJob) WithOpaqueID(s string) func(*MLResetJobRequest) { + return func(r *MLResetJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.revert_model_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.revert_model_snapshot.go new file mode 100644 index 000000000..17654d6fc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.revert_model_snapshot.go @@ -0,0 +1,266 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLRevertModelSnapshotFunc(t Transport) MLRevertModelSnapshot { + return func(snapshot_id string, job_id string, o ...func(*MLRevertModelSnapshotRequest)) (*Response, error) { + var r = MLRevertModelSnapshotRequest{SnapshotID: snapshot_id, JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLRevertModelSnapshot - Reverts to a specific snapshot. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapshot.html. +type MLRevertModelSnapshot func(snapshot_id string, job_id string, o ...func(*MLRevertModelSnapshotRequest)) (*Response, error) + +// MLRevertModelSnapshotRequest configures the ML Revert Model Snapshot API request. +type MLRevertModelSnapshotRequest struct { + Body io.Reader + + JobID string + SnapshotID string + + DeleteInterveningResults *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLRevertModelSnapshotRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.revert_model_snapshot") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("model_snapshots") + 1 + len(r.SnapshotID) + 1 + len("_revert")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("model_snapshots") + path.WriteString("/") + path.WriteString(r.SnapshotID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot_id", r.SnapshotID) + } + path.WriteString("/") + path.WriteString("_revert") + + params = make(map[string]string) + + if r.DeleteInterveningResults != nil { + params["delete_intervening_results"] = strconv.FormatBool(*r.DeleteInterveningResults) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.revert_model_snapshot") + if reader := instrument.RecordRequestBody(ctx, "ml.revert_model_snapshot", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.revert_model_snapshot") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLRevertModelSnapshot) WithContext(v context.Context) func(*MLRevertModelSnapshotRequest) { + return func(r *MLRevertModelSnapshotRequest) { + r.ctx = v + } +} + +// WithBody - Reversion options. +func (f MLRevertModelSnapshot) WithBody(v io.Reader) func(*MLRevertModelSnapshotRequest) { + return func(r *MLRevertModelSnapshotRequest) { + r.Body = v + } +} + +// WithDeleteInterveningResults - should we reset the results back to the time of the snapshot?. +func (f MLRevertModelSnapshot) WithDeleteInterveningResults(v bool) func(*MLRevertModelSnapshotRequest) { + return func(r *MLRevertModelSnapshotRequest) { + r.DeleteInterveningResults = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLRevertModelSnapshot) WithPretty() func(*MLRevertModelSnapshotRequest) { + return func(r *MLRevertModelSnapshotRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLRevertModelSnapshot) WithHuman() func(*MLRevertModelSnapshotRequest) { + return func(r *MLRevertModelSnapshotRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLRevertModelSnapshot) WithErrorTrace() func(*MLRevertModelSnapshotRequest) { + return func(r *MLRevertModelSnapshotRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLRevertModelSnapshot) WithFilterPath(v ...string) func(*MLRevertModelSnapshotRequest) { + return func(r *MLRevertModelSnapshotRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLRevertModelSnapshot) WithHeader(h map[string]string) func(*MLRevertModelSnapshotRequest) { + return func(r *MLRevertModelSnapshotRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLRevertModelSnapshot) WithOpaqueID(s string) func(*MLRevertModelSnapshotRequest) { + return func(r *MLRevertModelSnapshotRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.set_upgrade_mode.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.set_upgrade_mode.go new file mode 100644 index 000000000..cc1b3626a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.set_upgrade_mode.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newMLSetUpgradeModeFunc(t Transport) MLSetUpgradeMode { + return func(o ...func(*MLSetUpgradeModeRequest)) (*Response, error) { + var r = MLSetUpgradeModeRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLSetUpgradeMode - Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-set-upgrade-mode.html. +type MLSetUpgradeMode func(o ...func(*MLSetUpgradeModeRequest)) (*Response, error) + +// MLSetUpgradeModeRequest configures the ML Set Upgrade Mode API request. +type MLSetUpgradeModeRequest struct { + Enabled *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLSetUpgradeModeRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.set_upgrade_mode") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_ml/set_upgrade_mode")) + path.WriteString("http://") + path.WriteString("/_ml/set_upgrade_mode") + + params = make(map[string]string) + + if r.Enabled != nil { + params["enabled"] = strconv.FormatBool(*r.Enabled) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.set_upgrade_mode") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.set_upgrade_mode") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLSetUpgradeMode) WithContext(v context.Context) func(*MLSetUpgradeModeRequest) { + return func(r *MLSetUpgradeModeRequest) { + r.ctx = v + } +} + +// WithEnabled - whether to enable upgrade_mode ml setting or not. defaults to false.. +func (f MLSetUpgradeMode) WithEnabled(v bool) func(*MLSetUpgradeModeRequest) { + return func(r *MLSetUpgradeModeRequest) { + r.Enabled = &v + } +} + +// WithTimeout - controls the time to wait before action times out. defaults to 30 seconds. +func (f MLSetUpgradeMode) WithTimeout(v time.Duration) func(*MLSetUpgradeModeRequest) { + return func(r *MLSetUpgradeModeRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLSetUpgradeMode) WithPretty() func(*MLSetUpgradeModeRequest) { + return func(r *MLSetUpgradeModeRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLSetUpgradeMode) WithHuman() func(*MLSetUpgradeModeRequest) { + return func(r *MLSetUpgradeModeRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLSetUpgradeMode) WithErrorTrace() func(*MLSetUpgradeModeRequest) { + return func(r *MLSetUpgradeModeRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLSetUpgradeMode) WithFilterPath(v ...string) func(*MLSetUpgradeModeRequest) { + return func(r *MLSetUpgradeModeRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLSetUpgradeMode) WithHeader(h map[string]string) func(*MLSetUpgradeModeRequest) { + return func(r *MLSetUpgradeModeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLSetUpgradeMode) WithOpaqueID(s string) func(*MLSetUpgradeModeRequest) { + return func(r *MLSetUpgradeModeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.start_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.start_data_frame_analytics.go new file mode 100644 index 000000000..af981c514 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.start_data_frame_analytics.go @@ -0,0 +1,260 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newMLStartDataFrameAnalyticsFunc(t Transport) MLStartDataFrameAnalytics { + return func(id string, o ...func(*MLStartDataFrameAnalyticsRequest)) (*Response, error) { + var r = MLStartDataFrameAnalyticsRequest{ID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLStartDataFrameAnalytics - Starts a data frame analytics job. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/start-dfanalytics.html. +type MLStartDataFrameAnalytics func(id string, o ...func(*MLStartDataFrameAnalyticsRequest)) (*Response, error) + +// MLStartDataFrameAnalyticsRequest configures the ML Start Data Frame Analytics API request. +type MLStartDataFrameAnalyticsRequest struct { + ID string + + Body io.Reader + + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLStartDataFrameAnalyticsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.start_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("data_frame") + 1 + len("analytics") + 1 + len(r.ID) + 1 + len("_start")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + path.WriteString(r.ID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.ID) + } + path.WriteString("/") + path.WriteString("_start") + + params = make(map[string]string) + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.start_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "ml.start_data_frame_analytics", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.start_data_frame_analytics") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLStartDataFrameAnalytics) WithContext(v context.Context) func(*MLStartDataFrameAnalyticsRequest) { + return func(r *MLStartDataFrameAnalyticsRequest) { + r.ctx = v + } +} + +// WithBody - The start data frame analytics parameters. +func (f MLStartDataFrameAnalytics) WithBody(v io.Reader) func(*MLStartDataFrameAnalyticsRequest) { + return func(r *MLStartDataFrameAnalyticsRequest) { + r.Body = v + } +} + +// WithTimeout - controls the time to wait until the task has started. defaults to 20 seconds. +func (f MLStartDataFrameAnalytics) WithTimeout(v time.Duration) func(*MLStartDataFrameAnalyticsRequest) { + return func(r *MLStartDataFrameAnalyticsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLStartDataFrameAnalytics) WithPretty() func(*MLStartDataFrameAnalyticsRequest) { + return func(r *MLStartDataFrameAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLStartDataFrameAnalytics) WithHuman() func(*MLStartDataFrameAnalyticsRequest) { + return func(r *MLStartDataFrameAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLStartDataFrameAnalytics) WithErrorTrace() func(*MLStartDataFrameAnalyticsRequest) { + return func(r *MLStartDataFrameAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLStartDataFrameAnalytics) WithFilterPath(v ...string) func(*MLStartDataFrameAnalyticsRequest) { + return func(r *MLStartDataFrameAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLStartDataFrameAnalytics) WithHeader(h map[string]string) func(*MLStartDataFrameAnalyticsRequest) { + return func(r *MLStartDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLStartDataFrameAnalytics) WithOpaqueID(s string) func(*MLStartDataFrameAnalyticsRequest) { + return func(r *MLStartDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.start_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.start_datafeed.go new file mode 100644 index 000000000..d4ecbfc82 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.start_datafeed.go @@ -0,0 +1,282 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newMLStartDatafeedFunc(t Transport) MLStartDatafeed { + return func(datafeed_id string, o ...func(*MLStartDatafeedRequest)) (*Response, error) { + var r = MLStartDatafeedRequest{DatafeedID: datafeed_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLStartDatafeed - Starts one or more datafeeds. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html. +type MLStartDatafeed func(datafeed_id string, o ...func(*MLStartDatafeedRequest)) (*Response, error) + +// MLStartDatafeedRequest configures the ML Start Datafeed API request. +type MLStartDatafeedRequest struct { + Body io.Reader + + DatafeedID string + + End string + Start string + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLStartDatafeedRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.start_datafeed") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("datafeeds") + 1 + len(r.DatafeedID) + 1 + len("_start")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + path.WriteString(r.DatafeedID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeed_id", r.DatafeedID) + } + path.WriteString("/") + path.WriteString("_start") + + params = make(map[string]string) + + if r.End != "" { + params["end"] = r.End + } + + if r.Start != "" { + params["start"] = r.Start + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.start_datafeed") + if reader := instrument.RecordRequestBody(ctx, "ml.start_datafeed", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.start_datafeed") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLStartDatafeed) WithContext(v context.Context) func(*MLStartDatafeedRequest) { + return func(r *MLStartDatafeedRequest) { + r.ctx = v + } +} + +// WithBody - The start datafeed parameters. +func (f MLStartDatafeed) WithBody(v io.Reader) func(*MLStartDatafeedRequest) { + return func(r *MLStartDatafeedRequest) { + r.Body = v + } +} + +// WithEnd - the end time when the datafeed should stop. when not set, the datafeed continues in real time. +func (f MLStartDatafeed) WithEnd(v string) func(*MLStartDatafeedRequest) { + return func(r *MLStartDatafeedRequest) { + r.End = v + } +} + +// WithStart - the start time from where the datafeed should begin. +func (f MLStartDatafeed) WithStart(v string) func(*MLStartDatafeedRequest) { + return func(r *MLStartDatafeedRequest) { + r.Start = v + } +} + +// WithTimeout - controls the time to wait until a datafeed has started. default to 20 seconds. +func (f MLStartDatafeed) WithTimeout(v time.Duration) func(*MLStartDatafeedRequest) { + return func(r *MLStartDatafeedRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLStartDatafeed) WithPretty() func(*MLStartDatafeedRequest) { + return func(r *MLStartDatafeedRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLStartDatafeed) WithHuman() func(*MLStartDatafeedRequest) { + return func(r *MLStartDatafeedRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLStartDatafeed) WithErrorTrace() func(*MLStartDatafeedRequest) { + return func(r *MLStartDatafeedRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLStartDatafeed) WithFilterPath(v ...string) func(*MLStartDatafeedRequest) { + return func(r *MLStartDatafeedRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLStartDatafeed) WithHeader(h map[string]string) func(*MLStartDatafeedRequest) { + return func(r *MLStartDatafeedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLStartDatafeed) WithOpaqueID(s string) func(*MLStartDatafeedRequest) { + return func(r *MLStartDatafeedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.start_trained_model_deployment.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.start_trained_model_deployment.go new file mode 100644 index 000000000..707a9a3b9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.start_trained_model_deployment.go @@ -0,0 +1,345 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newMLStartTrainedModelDeploymentFunc(t Transport) MLStartTrainedModelDeployment { + return func(model_id string, o ...func(*MLStartTrainedModelDeploymentRequest)) (*Response, error) { + var r = MLStartTrainedModelDeploymentRequest{ModelID: model_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLStartTrainedModelDeployment - Start a trained model deployment. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-model-deployment.html. +type MLStartTrainedModelDeployment func(model_id string, o ...func(*MLStartTrainedModelDeploymentRequest)) (*Response, error) + +// MLStartTrainedModelDeploymentRequest configures the ML Start Trained Model Deployment API request. +type MLStartTrainedModelDeploymentRequest struct { + Body io.Reader + + ModelID string + + CacheSize string + DeploymentID string + NumberOfAllocations *int + Priority string + QueueCapacity *int + ThreadsPerAllocation *int + Timeout time.Duration + WaitFor string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLStartTrainedModelDeploymentRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.start_trained_model_deployment") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID) + 1 + len("deployment") + 1 + len("_start")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + path.WriteString(r.ModelID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_id", r.ModelID) + } + path.WriteString("/") + path.WriteString("deployment") + path.WriteString("/") + path.WriteString("_start") + + params = make(map[string]string) + + if r.CacheSize != "" { + params["cache_size"] = r.CacheSize + } + + if r.DeploymentID != "" { + params["deployment_id"] = r.DeploymentID + } + + if r.NumberOfAllocations != nil { + params["number_of_allocations"] = strconv.FormatInt(int64(*r.NumberOfAllocations), 10) + } + + if r.Priority != "" { + params["priority"] = r.Priority + } + + if r.QueueCapacity != nil { + params["queue_capacity"] = strconv.FormatInt(int64(*r.QueueCapacity), 10) + } + + if r.ThreadsPerAllocation != nil { + params["threads_per_allocation"] = strconv.FormatInt(int64(*r.ThreadsPerAllocation), 10) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitFor != "" { + params["wait_for"] = r.WaitFor + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.start_trained_model_deployment") + if reader := instrument.RecordRequestBody(ctx, "ml.start_trained_model_deployment", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.start_trained_model_deployment") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLStartTrainedModelDeployment) WithContext(v context.Context) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.ctx = v + } +} + +// WithBody - The settings for the trained model deployment. +func (f MLStartTrainedModelDeployment) WithBody(v io.Reader) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.Body = v + } +} + +// WithCacheSize - a byte-size value for configuring the inference cache size. for example, 20mb.. +func (f MLStartTrainedModelDeployment) WithCacheSize(v string) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.CacheSize = v + } +} + +// WithDeploymentID - the ID of the new deployment. defaults to the model_id if not set.. +func (f MLStartTrainedModelDeployment) WithDeploymentID(v string) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.DeploymentID = v + } +} + +// WithNumberOfAllocations - the total number of allocations this model is assigned across machine learning nodes.. +func (f MLStartTrainedModelDeployment) WithNumberOfAllocations(v int) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.NumberOfAllocations = &v + } +} + +// WithPriority - the deployment priority.. +func (f MLStartTrainedModelDeployment) WithPriority(v string) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.Priority = v + } +} + +// WithQueueCapacity - controls how many inference requests are allowed in the queue at a time.. +func (f MLStartTrainedModelDeployment) WithQueueCapacity(v int) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.QueueCapacity = &v + } +} + +// WithThreadsPerAllocation - the number of threads used by each model allocation during inference.. +func (f MLStartTrainedModelDeployment) WithThreadsPerAllocation(v int) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.ThreadsPerAllocation = &v + } +} + +// WithTimeout - controls the amount of time to wait for the model to deploy.. +func (f MLStartTrainedModelDeployment) WithTimeout(v time.Duration) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.Timeout = v + } +} + +// WithWaitFor - the allocation status for which to wait. +func (f MLStartTrainedModelDeployment) WithWaitFor(v string) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.WaitFor = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLStartTrainedModelDeployment) WithPretty() func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLStartTrainedModelDeployment) WithHuman() func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLStartTrainedModelDeployment) WithErrorTrace() func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLStartTrainedModelDeployment) WithFilterPath(v ...string) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLStartTrainedModelDeployment) WithHeader(h map[string]string) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLStartTrainedModelDeployment) WithOpaqueID(s string) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.stop_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.stop_data_frame_analytics.go new file mode 100644 index 000000000..6e70b0f88 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.stop_data_frame_analytics.go @@ -0,0 +1,285 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newMLStopDataFrameAnalyticsFunc(t Transport) MLStopDataFrameAnalytics { + return func(id string, o ...func(*MLStopDataFrameAnalyticsRequest)) (*Response, error) { + var r = MLStopDataFrameAnalyticsRequest{ID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLStopDataFrameAnalytics - Stops one or more data frame analytics jobs. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-dfanalytics.html. +type MLStopDataFrameAnalytics func(id string, o ...func(*MLStopDataFrameAnalyticsRequest)) (*Response, error) + +// MLStopDataFrameAnalyticsRequest configures the ML Stop Data Frame Analytics API request. +type MLStopDataFrameAnalyticsRequest struct { + ID string + + Body io.Reader + + AllowNoMatch *bool + Force *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLStopDataFrameAnalyticsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.stop_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("data_frame") + 1 + len("analytics") + 1 + len(r.ID) + 1 + len("_stop")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + path.WriteString(r.ID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.ID) + } + path.WriteString("/") + path.WriteString("_stop") + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.Force != nil { + params["force"] = strconv.FormatBool(*r.Force) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.stop_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "ml.stop_data_frame_analytics", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.stop_data_frame_analytics") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLStopDataFrameAnalytics) WithContext(v context.Context) func(*MLStopDataFrameAnalyticsRequest) { + return func(r *MLStopDataFrameAnalyticsRequest) { + r.ctx = v + } +} + +// WithBody - The stop data frame analytics parameters. +func (f MLStopDataFrameAnalytics) WithBody(v io.Reader) func(*MLStopDataFrameAnalyticsRequest) { + return func(r *MLStopDataFrameAnalyticsRequest) { + r.Body = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no data frame analytics. (this includes `_all` string or when no data frame analytics have been specified). +func (f MLStopDataFrameAnalytics) WithAllowNoMatch(v bool) func(*MLStopDataFrameAnalyticsRequest) { + return func(r *MLStopDataFrameAnalyticsRequest) { + r.AllowNoMatch = &v + } +} + +// WithForce - true if the data frame analytics should be forcefully stopped. +func (f MLStopDataFrameAnalytics) WithForce(v bool) func(*MLStopDataFrameAnalyticsRequest) { + return func(r *MLStopDataFrameAnalyticsRequest) { + r.Force = &v + } +} + +// WithTimeout - controls the time to wait until the task has stopped. defaults to 20 seconds. +func (f MLStopDataFrameAnalytics) WithTimeout(v time.Duration) func(*MLStopDataFrameAnalyticsRequest) { + return func(r *MLStopDataFrameAnalyticsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLStopDataFrameAnalytics) WithPretty() func(*MLStopDataFrameAnalyticsRequest) { + return func(r *MLStopDataFrameAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLStopDataFrameAnalytics) WithHuman() func(*MLStopDataFrameAnalyticsRequest) { + return func(r *MLStopDataFrameAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLStopDataFrameAnalytics) WithErrorTrace() func(*MLStopDataFrameAnalyticsRequest) { + return func(r *MLStopDataFrameAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLStopDataFrameAnalytics) WithFilterPath(v ...string) func(*MLStopDataFrameAnalyticsRequest) { + return func(r *MLStopDataFrameAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLStopDataFrameAnalytics) WithHeader(h map[string]string) func(*MLStopDataFrameAnalyticsRequest) { + return func(r *MLStopDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLStopDataFrameAnalytics) WithOpaqueID(s string) func(*MLStopDataFrameAnalyticsRequest) { + return func(r *MLStopDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.stop_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.stop_datafeed.go new file mode 100644 index 000000000..5196b700a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.stop_datafeed.go @@ -0,0 +1,283 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newMLStopDatafeedFunc(t Transport) MLStopDatafeed { + return func(datafeed_id string, o ...func(*MLStopDatafeedRequest)) (*Response, error) { + var r = MLStopDatafeedRequest{DatafeedID: datafeed_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLStopDatafeed - Stops one or more datafeeds. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html. +type MLStopDatafeed func(datafeed_id string, o ...func(*MLStopDatafeedRequest)) (*Response, error) + +// MLStopDatafeedRequest configures the ML Stop Datafeed API request. +type MLStopDatafeedRequest struct { + Body io.Reader + + DatafeedID string + + AllowNoMatch *bool + Force *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLStopDatafeedRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.stop_datafeed") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("datafeeds") + 1 + len(r.DatafeedID) + 1 + len("_stop")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + path.WriteString(r.DatafeedID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeed_id", r.DatafeedID) + } + path.WriteString("/") + path.WriteString("_stop") + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.Force != nil { + params["force"] = strconv.FormatBool(*r.Force) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.stop_datafeed") + if reader := instrument.RecordRequestBody(ctx, "ml.stop_datafeed", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.stop_datafeed") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLStopDatafeed) WithContext(v context.Context) func(*MLStopDatafeedRequest) { + return func(r *MLStopDatafeedRequest) { + r.ctx = v + } +} + +// WithBody - The URL params optionally sent in the body. +func (f MLStopDatafeed) WithBody(v io.Reader) func(*MLStopDatafeedRequest) { + return func(r *MLStopDatafeedRequest) { + r.Body = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no datafeeds. (this includes `_all` string or when no datafeeds have been specified). +func (f MLStopDatafeed) WithAllowNoMatch(v bool) func(*MLStopDatafeedRequest) { + return func(r *MLStopDatafeedRequest) { + r.AllowNoMatch = &v + } +} + +// WithForce - true if the datafeed should be forcefully stopped.. +func (f MLStopDatafeed) WithForce(v bool) func(*MLStopDatafeedRequest) { + return func(r *MLStopDatafeedRequest) { + r.Force = &v + } +} + +// WithTimeout - controls the time to wait until a datafeed has stopped. default to 20 seconds. +func (f MLStopDatafeed) WithTimeout(v time.Duration) func(*MLStopDatafeedRequest) { + return func(r *MLStopDatafeedRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLStopDatafeed) WithPretty() func(*MLStopDatafeedRequest) { + return func(r *MLStopDatafeedRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLStopDatafeed) WithHuman() func(*MLStopDatafeedRequest) { + return func(r *MLStopDatafeedRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLStopDatafeed) WithErrorTrace() func(*MLStopDatafeedRequest) { + return func(r *MLStopDatafeedRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLStopDatafeed) WithFilterPath(v ...string) func(*MLStopDatafeedRequest) { + return func(r *MLStopDatafeedRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLStopDatafeed) WithHeader(h map[string]string) func(*MLStopDatafeedRequest) { + return func(r *MLStopDatafeedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLStopDatafeed) WithOpaqueID(s string) func(*MLStopDatafeedRequest) { + return func(r *MLStopDatafeedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.stop_trained_model_deployment.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.stop_trained_model_deployment.go new file mode 100644 index 000000000..baa49b0db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.stop_trained_model_deployment.go @@ -0,0 +1,272 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLStopTrainedModelDeploymentFunc(t Transport) MLStopTrainedModelDeployment { + return func(model_id string, o ...func(*MLStopTrainedModelDeploymentRequest)) (*Response, error) { + var r = MLStopTrainedModelDeploymentRequest{ModelID: model_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLStopTrainedModelDeployment - Stop a trained model deployment. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-trained-model-deployment.html. +type MLStopTrainedModelDeployment func(model_id string, o ...func(*MLStopTrainedModelDeploymentRequest)) (*Response, error) + +// MLStopTrainedModelDeploymentRequest configures the ML Stop Trained Model Deployment API request. +type MLStopTrainedModelDeploymentRequest struct { + Body io.Reader + + ModelID string + + AllowNoMatch *bool + Force *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLStopTrainedModelDeploymentRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.stop_trained_model_deployment") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID) + 1 + len("deployment") + 1 + len("_stop")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + path.WriteString(r.ModelID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_id", r.ModelID) + } + path.WriteString("/") + path.WriteString("deployment") + path.WriteString("/") + path.WriteString("_stop") + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.Force != nil { + params["force"] = strconv.FormatBool(*r.Force) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.stop_trained_model_deployment") + if reader := instrument.RecordRequestBody(ctx, "ml.stop_trained_model_deployment", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.stop_trained_model_deployment") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLStopTrainedModelDeployment) WithContext(v context.Context) func(*MLStopTrainedModelDeploymentRequest) { + return func(r *MLStopTrainedModelDeploymentRequest) { + r.ctx = v + } +} + +// WithBody - The stop deployment parameters. +func (f MLStopTrainedModelDeployment) WithBody(v io.Reader) func(*MLStopTrainedModelDeploymentRequest) { + return func(r *MLStopTrainedModelDeploymentRequest) { + r.Body = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no deployments. (this includes `_all` string or when no deployments have been specified). +func (f MLStopTrainedModelDeployment) WithAllowNoMatch(v bool) func(*MLStopTrainedModelDeploymentRequest) { + return func(r *MLStopTrainedModelDeploymentRequest) { + r.AllowNoMatch = &v + } +} + +// WithForce - true if the deployment should be forcefully stopped. +func (f MLStopTrainedModelDeployment) WithForce(v bool) func(*MLStopTrainedModelDeploymentRequest) { + return func(r *MLStopTrainedModelDeploymentRequest) { + r.Force = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLStopTrainedModelDeployment) WithPretty() func(*MLStopTrainedModelDeploymentRequest) { + return func(r *MLStopTrainedModelDeploymentRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLStopTrainedModelDeployment) WithHuman() func(*MLStopTrainedModelDeploymentRequest) { + return func(r *MLStopTrainedModelDeploymentRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLStopTrainedModelDeployment) WithErrorTrace() func(*MLStopTrainedModelDeploymentRequest) { + return func(r *MLStopTrainedModelDeploymentRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLStopTrainedModelDeployment) WithFilterPath(v ...string) func(*MLStopTrainedModelDeploymentRequest) { + return func(r *MLStopTrainedModelDeploymentRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLStopTrainedModelDeployment) WithHeader(h map[string]string) func(*MLStopTrainedModelDeploymentRequest) { + return func(r *MLStopTrainedModelDeploymentRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLStopTrainedModelDeployment) WithOpaqueID(s string) func(*MLStopTrainedModelDeploymentRequest) { + return func(r *MLStopTrainedModelDeploymentRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_data_frame_analytics.go new file mode 100644 index 000000000..c03806ce9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_data_frame_analytics.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLUpdateDataFrameAnalyticsFunc(t Transport) MLUpdateDataFrameAnalytics { + return func(id string, body io.Reader, o ...func(*MLUpdateDataFrameAnalyticsRequest)) (*Response, error) { + var r = MLUpdateDataFrameAnalyticsRequest{DocumentID: id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLUpdateDataFrameAnalytics - Updates certain properties of a data frame analytics job. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/update-dfanalytics.html. +type MLUpdateDataFrameAnalytics func(id string, body io.Reader, o ...func(*MLUpdateDataFrameAnalyticsRequest)) (*Response, error) + +// MLUpdateDataFrameAnalyticsRequest configures the ML Update Data Frame Analytics API request. +type MLUpdateDataFrameAnalyticsRequest struct { + DocumentID string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLUpdateDataFrameAnalyticsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.update_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("data_frame") + 1 + len("analytics") + 1 + len(r.DocumentID) + 1 + len("_update")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + path.WriteString("/") + path.WriteString("_update") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.update_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "ml.update_data_frame_analytics", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.update_data_frame_analytics") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLUpdateDataFrameAnalytics) WithContext(v context.Context) func(*MLUpdateDataFrameAnalyticsRequest) { + return func(r *MLUpdateDataFrameAnalyticsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLUpdateDataFrameAnalytics) WithPretty() func(*MLUpdateDataFrameAnalyticsRequest) { + return func(r *MLUpdateDataFrameAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLUpdateDataFrameAnalytics) WithHuman() func(*MLUpdateDataFrameAnalyticsRequest) { + return func(r *MLUpdateDataFrameAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLUpdateDataFrameAnalytics) WithErrorTrace() func(*MLUpdateDataFrameAnalyticsRequest) { + return func(r *MLUpdateDataFrameAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLUpdateDataFrameAnalytics) WithFilterPath(v ...string) func(*MLUpdateDataFrameAnalyticsRequest) { + return func(r *MLUpdateDataFrameAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLUpdateDataFrameAnalytics) WithHeader(h map[string]string) func(*MLUpdateDataFrameAnalyticsRequest) { + return func(r *MLUpdateDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLUpdateDataFrameAnalytics) WithOpaqueID(s string) func(*MLUpdateDataFrameAnalyticsRequest) { + return func(r *MLUpdateDataFrameAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_datafeed.go new file mode 100644 index 000000000..5c6b7b5d8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_datafeed.go @@ -0,0 +1,287 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLUpdateDatafeedFunc(t Transport) MLUpdateDatafeed { + return func(body io.Reader, datafeed_id string, o ...func(*MLUpdateDatafeedRequest)) (*Response, error) { + var r = MLUpdateDatafeedRequest{Body: body, DatafeedID: datafeed_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLUpdateDatafeed - Updates certain properties of a datafeed. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html. +type MLUpdateDatafeed func(body io.Reader, datafeed_id string, o ...func(*MLUpdateDatafeedRequest)) (*Response, error) + +// MLUpdateDatafeedRequest configures the ML Update Datafeed API request. +type MLUpdateDatafeedRequest struct { + Body io.Reader + + DatafeedID string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreThrottled *bool + IgnoreUnavailable *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLUpdateDatafeedRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.update_datafeed") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("datafeeds") + 1 + len(r.DatafeedID) + 1 + len("_update")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + path.WriteString(r.DatafeedID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeed_id", r.DatafeedID) + } + path.WriteString("/") + path.WriteString("_update") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreThrottled != nil { + params["ignore_throttled"] = strconv.FormatBool(*r.IgnoreThrottled) + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.update_datafeed") + if reader := instrument.RecordRequestBody(ctx, "ml.update_datafeed", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.update_datafeed") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLUpdateDatafeed) WithContext(v context.Context) func(*MLUpdateDatafeedRequest) { + return func(r *MLUpdateDatafeedRequest) { + r.ctx = v + } +} + +// WithAllowNoIndices - ignore if the source indices expressions resolves to no concrete indices (default: true). +func (f MLUpdateDatafeed) WithAllowNoIndices(v bool) func(*MLUpdateDatafeedRequest) { + return func(r *MLUpdateDatafeedRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether source index expressions should get expanded to open or closed indices (default: open). +func (f MLUpdateDatafeed) WithExpandWildcards(v string) func(*MLUpdateDatafeedRequest) { + return func(r *MLUpdateDatafeedRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreThrottled - ignore indices that are marked as throttled (default: true). +func (f MLUpdateDatafeed) WithIgnoreThrottled(v bool) func(*MLUpdateDatafeedRequest) { + return func(r *MLUpdateDatafeedRequest) { + r.IgnoreThrottled = &v + } +} + +// WithIgnoreUnavailable - ignore unavailable indexes (default: false). +func (f MLUpdateDatafeed) WithIgnoreUnavailable(v bool) func(*MLUpdateDatafeedRequest) { + return func(r *MLUpdateDatafeedRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLUpdateDatafeed) WithPretty() func(*MLUpdateDatafeedRequest) { + return func(r *MLUpdateDatafeedRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLUpdateDatafeed) WithHuman() func(*MLUpdateDatafeedRequest) { + return func(r *MLUpdateDatafeedRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLUpdateDatafeed) WithErrorTrace() func(*MLUpdateDatafeedRequest) { + return func(r *MLUpdateDatafeedRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLUpdateDatafeed) WithFilterPath(v ...string) func(*MLUpdateDatafeedRequest) { + return func(r *MLUpdateDatafeedRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLUpdateDatafeed) WithHeader(h map[string]string) func(*MLUpdateDatafeedRequest) { + return func(r *MLUpdateDatafeedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLUpdateDatafeed) WithOpaqueID(s string) func(*MLUpdateDatafeedRequest) { + return func(r *MLUpdateDatafeedRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_filter.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_filter.go new file mode 100644 index 000000000..9c0974d0f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_filter.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLUpdateFilterFunc(t Transport) MLUpdateFilter { + return func(body io.Reader, filter_id string, o ...func(*MLUpdateFilterRequest)) (*Response, error) { + var r = MLUpdateFilterRequest{Body: body, FilterID: filter_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLUpdateFilter - Updates the description of a filter, adds items, or removes items. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-filter.html. +type MLUpdateFilter func(body io.Reader, filter_id string, o ...func(*MLUpdateFilterRequest)) (*Response, error) + +// MLUpdateFilterRequest configures the ML Update Filter API request. +type MLUpdateFilterRequest struct { + Body io.Reader + + FilterID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLUpdateFilterRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.update_filter") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("filters") + 1 + len(r.FilterID) + 1 + len("_update")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("filters") + path.WriteString("/") + path.WriteString(r.FilterID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "filter_id", r.FilterID) + } + path.WriteString("/") + path.WriteString("_update") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.update_filter") + if reader := instrument.RecordRequestBody(ctx, "ml.update_filter", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.update_filter") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLUpdateFilter) WithContext(v context.Context) func(*MLUpdateFilterRequest) { + return func(r *MLUpdateFilterRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLUpdateFilter) WithPretty() func(*MLUpdateFilterRequest) { + return func(r *MLUpdateFilterRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLUpdateFilter) WithHuman() func(*MLUpdateFilterRequest) { + return func(r *MLUpdateFilterRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLUpdateFilter) WithErrorTrace() func(*MLUpdateFilterRequest) { + return func(r *MLUpdateFilterRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLUpdateFilter) WithFilterPath(v ...string) func(*MLUpdateFilterRequest) { + return func(r *MLUpdateFilterRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLUpdateFilter) WithHeader(h map[string]string) func(*MLUpdateFilterRequest) { + return func(r *MLUpdateFilterRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLUpdateFilter) WithOpaqueID(s string) func(*MLUpdateFilterRequest) { + return func(r *MLUpdateFilterRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_job.go new file mode 100644 index 000000000..4aaad15ff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_job.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLUpdateJobFunc(t Transport) MLUpdateJob { + return func(job_id string, body io.Reader, o ...func(*MLUpdateJobRequest)) (*Response, error) { + var r = MLUpdateJobRequest{JobID: job_id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLUpdateJob - Updates certain properties of an anomaly detection job. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html. +type MLUpdateJob func(job_id string, body io.Reader, o ...func(*MLUpdateJobRequest)) (*Response, error) + +// MLUpdateJobRequest configures the ML Update Job API request. +type MLUpdateJobRequest struct { + Body io.Reader + + JobID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLUpdateJobRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.update_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("_update")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("_update") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.update_job") + if reader := instrument.RecordRequestBody(ctx, "ml.update_job", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.update_job") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLUpdateJob) WithContext(v context.Context) func(*MLUpdateJobRequest) { + return func(r *MLUpdateJobRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLUpdateJob) WithPretty() func(*MLUpdateJobRequest) { + return func(r *MLUpdateJobRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLUpdateJob) WithHuman() func(*MLUpdateJobRequest) { + return func(r *MLUpdateJobRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLUpdateJob) WithErrorTrace() func(*MLUpdateJobRequest) { + return func(r *MLUpdateJobRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLUpdateJob) WithFilterPath(v ...string) func(*MLUpdateJobRequest) { + return func(r *MLUpdateJobRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLUpdateJob) WithHeader(h map[string]string) func(*MLUpdateJobRequest) { + return func(r *MLUpdateJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLUpdateJob) WithOpaqueID(s string) func(*MLUpdateJobRequest) { + return func(r *MLUpdateJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_model_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_model_snapshot.go new file mode 100644 index 000000000..6fc4f29c1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_model_snapshot.go @@ -0,0 +1,245 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLUpdateModelSnapshotFunc(t Transport) MLUpdateModelSnapshot { + return func(snapshot_id string, job_id string, body io.Reader, o ...func(*MLUpdateModelSnapshotRequest)) (*Response, error) { + var r = MLUpdateModelSnapshotRequest{SnapshotID: snapshot_id, JobID: job_id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLUpdateModelSnapshot - Updates certain properties of a snapshot. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html. +type MLUpdateModelSnapshot func(snapshot_id string, job_id string, body io.Reader, o ...func(*MLUpdateModelSnapshotRequest)) (*Response, error) + +// MLUpdateModelSnapshotRequest configures the ML Update Model Snapshot API request. +type MLUpdateModelSnapshotRequest struct { + Body io.Reader + + JobID string + SnapshotID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLUpdateModelSnapshotRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.update_model_snapshot") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("model_snapshots") + 1 + len(r.SnapshotID) + 1 + len("_update")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("model_snapshots") + path.WriteString("/") + path.WriteString(r.SnapshotID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot_id", r.SnapshotID) + } + path.WriteString("/") + path.WriteString("_update") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.update_model_snapshot") + if reader := instrument.RecordRequestBody(ctx, "ml.update_model_snapshot", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.update_model_snapshot") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLUpdateModelSnapshot) WithContext(v context.Context) func(*MLUpdateModelSnapshotRequest) { + return func(r *MLUpdateModelSnapshotRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLUpdateModelSnapshot) WithPretty() func(*MLUpdateModelSnapshotRequest) { + return func(r *MLUpdateModelSnapshotRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLUpdateModelSnapshot) WithHuman() func(*MLUpdateModelSnapshotRequest) { + return func(r *MLUpdateModelSnapshotRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLUpdateModelSnapshot) WithErrorTrace() func(*MLUpdateModelSnapshotRequest) { + return func(r *MLUpdateModelSnapshotRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLUpdateModelSnapshot) WithFilterPath(v ...string) func(*MLUpdateModelSnapshotRequest) { + return func(r *MLUpdateModelSnapshotRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLUpdateModelSnapshot) WithHeader(h map[string]string) func(*MLUpdateModelSnapshotRequest) { + return func(r *MLUpdateModelSnapshotRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLUpdateModelSnapshot) WithOpaqueID(s string) func(*MLUpdateModelSnapshotRequest) { + return func(r *MLUpdateModelSnapshotRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_trained_model_deployment.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_trained_model_deployment.go new file mode 100644 index 000000000..af0ec102b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.update_trained_model_deployment.go @@ -0,0 +1,260 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newMLUpdateTrainedModelDeploymentFunc(t Transport) MLUpdateTrainedModelDeployment { + return func(model_id string, o ...func(*MLUpdateTrainedModelDeploymentRequest)) (*Response, error) { + var r = MLUpdateTrainedModelDeploymentRequest{ModelID: model_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLUpdateTrainedModelDeployment - Updates certain properties of trained model deployment. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/update-trained-model-deployment.html. +type MLUpdateTrainedModelDeployment func(model_id string, o ...func(*MLUpdateTrainedModelDeploymentRequest)) (*Response, error) + +// MLUpdateTrainedModelDeploymentRequest configures the ML Update Trained Model Deployment API request. +type MLUpdateTrainedModelDeploymentRequest struct { + Body io.Reader + + ModelID string + + NumberOfAllocations *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLUpdateTrainedModelDeploymentRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.update_trained_model_deployment") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("trained_models") + 1 + len(r.ModelID) + 1 + len("deployment") + 1 + len("_update")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + path.WriteString(r.ModelID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "model_id", r.ModelID) + } + path.WriteString("/") + path.WriteString("deployment") + path.WriteString("/") + path.WriteString("_update") + + params = make(map[string]string) + + if r.NumberOfAllocations != nil { + params["number_of_allocations"] = strconv.FormatInt(int64(*r.NumberOfAllocations), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.update_trained_model_deployment") + if reader := instrument.RecordRequestBody(ctx, "ml.update_trained_model_deployment", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.update_trained_model_deployment") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLUpdateTrainedModelDeployment) WithContext(v context.Context) func(*MLUpdateTrainedModelDeploymentRequest) { + return func(r *MLUpdateTrainedModelDeploymentRequest) { + r.ctx = v + } +} + +// WithBody - The updated trained model deployment settings. +func (f MLUpdateTrainedModelDeployment) WithBody(v io.Reader) func(*MLUpdateTrainedModelDeploymentRequest) { + return func(r *MLUpdateTrainedModelDeploymentRequest) { + r.Body = v + } +} + +// WithNumberOfAllocations - update the model deployment to this number of allocations.. +func (f MLUpdateTrainedModelDeployment) WithNumberOfAllocations(v int) func(*MLUpdateTrainedModelDeploymentRequest) { + return func(r *MLUpdateTrainedModelDeploymentRequest) { + r.NumberOfAllocations = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLUpdateTrainedModelDeployment) WithPretty() func(*MLUpdateTrainedModelDeploymentRequest) { + return func(r *MLUpdateTrainedModelDeploymentRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLUpdateTrainedModelDeployment) WithHuman() func(*MLUpdateTrainedModelDeploymentRequest) { + return func(r *MLUpdateTrainedModelDeploymentRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLUpdateTrainedModelDeployment) WithErrorTrace() func(*MLUpdateTrainedModelDeploymentRequest) { + return func(r *MLUpdateTrainedModelDeploymentRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLUpdateTrainedModelDeployment) WithFilterPath(v ...string) func(*MLUpdateTrainedModelDeploymentRequest) { + return func(r *MLUpdateTrainedModelDeploymentRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLUpdateTrainedModelDeployment) WithHeader(h map[string]string) func(*MLUpdateTrainedModelDeploymentRequest) { + return func(r *MLUpdateTrainedModelDeploymentRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLUpdateTrainedModelDeployment) WithOpaqueID(s string) func(*MLUpdateTrainedModelDeploymentRequest) { + return func(r *MLUpdateTrainedModelDeploymentRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.upgrade_job_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.upgrade_job_snapshot.go new file mode 100644 index 000000000..d7e4b91e7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.upgrade_job_snapshot.go @@ -0,0 +1,262 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newMLUpgradeJobSnapshotFunc(t Transport) MLUpgradeJobSnapshot { + return func(snapshot_id string, job_id string, o ...func(*MLUpgradeJobSnapshotRequest)) (*Response, error) { + var r = MLUpgradeJobSnapshotRequest{SnapshotID: snapshot_id, JobID: job_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLUpgradeJobSnapshot - Upgrades a given job snapshot to the current major version. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-upgrade-job-model-snapshot.html. +type MLUpgradeJobSnapshot func(snapshot_id string, job_id string, o ...func(*MLUpgradeJobSnapshotRequest)) (*Response, error) + +// MLUpgradeJobSnapshotRequest configures the ML Upgrade Job Snapshot API request. +type MLUpgradeJobSnapshotRequest struct { + JobID string + SnapshotID string + + Timeout time.Duration + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLUpgradeJobSnapshotRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.upgrade_job_snapshot") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_ml") + 1 + len("anomaly_detectors") + 1 + len(r.JobID) + 1 + len("model_snapshots") + 1 + len(r.SnapshotID) + 1 + len("_upgrade")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "job_id", r.JobID) + } + path.WriteString("/") + path.WriteString("model_snapshots") + path.WriteString("/") + path.WriteString(r.SnapshotID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot_id", r.SnapshotID) + } + path.WriteString("/") + path.WriteString("_upgrade") + + params = make(map[string]string) + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.upgrade_job_snapshot") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.upgrade_job_snapshot") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLUpgradeJobSnapshot) WithContext(v context.Context) func(*MLUpgradeJobSnapshotRequest) { + return func(r *MLUpgradeJobSnapshotRequest) { + r.ctx = v + } +} + +// WithTimeout - how long should the api wait for the job to be opened and the old snapshot to be loaded.. +func (f MLUpgradeJobSnapshot) WithTimeout(v time.Duration) func(*MLUpgradeJobSnapshotRequest) { + return func(r *MLUpgradeJobSnapshotRequest) { + r.Timeout = v + } +} + +// WithWaitForCompletion - should the request wait until the task is complete before responding to the caller. default is false.. +func (f MLUpgradeJobSnapshot) WithWaitForCompletion(v bool) func(*MLUpgradeJobSnapshotRequest) { + return func(r *MLUpgradeJobSnapshotRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLUpgradeJobSnapshot) WithPretty() func(*MLUpgradeJobSnapshotRequest) { + return func(r *MLUpgradeJobSnapshotRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLUpgradeJobSnapshot) WithHuman() func(*MLUpgradeJobSnapshotRequest) { + return func(r *MLUpgradeJobSnapshotRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLUpgradeJobSnapshot) WithErrorTrace() func(*MLUpgradeJobSnapshotRequest) { + return func(r *MLUpgradeJobSnapshotRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLUpgradeJobSnapshot) WithFilterPath(v ...string) func(*MLUpgradeJobSnapshotRequest) { + return func(r *MLUpgradeJobSnapshotRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLUpgradeJobSnapshot) WithHeader(h map[string]string) func(*MLUpgradeJobSnapshotRequest) { + return func(r *MLUpgradeJobSnapshotRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLUpgradeJobSnapshot) WithOpaqueID(s string) func(*MLUpgradeJobSnapshotRequest) { + return func(r *MLUpgradeJobSnapshotRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.validate.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.validate.go new file mode 100644 index 000000000..1b65cbdd7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.validate.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLValidateFunc(t Transport) MLValidate { + return func(body io.Reader, o ...func(*MLValidateRequest)) (*Response, error) { + var r = MLValidateRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLValidate - Validates an anomaly detection job. +// +// See full documentation at https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html. +type MLValidate func(body io.Reader, o ...func(*MLValidateRequest)) (*Response, error) + +// MLValidateRequest configures the ML Validate API request. +type MLValidateRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLValidateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.validate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_ml/anomaly_detectors/_validate")) + path.WriteString("http://") + path.WriteString("/_ml/anomaly_detectors/_validate") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.validate") + if reader := instrument.RecordRequestBody(ctx, "ml.validate", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.validate") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLValidate) WithContext(v context.Context) func(*MLValidateRequest) { + return func(r *MLValidateRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLValidate) WithPretty() func(*MLValidateRequest) { + return func(r *MLValidateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLValidate) WithHuman() func(*MLValidateRequest) { + return func(r *MLValidateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLValidate) WithErrorTrace() func(*MLValidateRequest) { + return func(r *MLValidateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLValidate) WithFilterPath(v ...string) func(*MLValidateRequest) { + return func(r *MLValidateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLValidate) WithHeader(h map[string]string) func(*MLValidateRequest) { + return func(r *MLValidateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLValidate) WithOpaqueID(s string) func(*MLValidateRequest) { + return func(r *MLValidateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.validate_detector.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.validate_detector.go new file mode 100644 index 000000000..8d3d403dc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ml.validate_detector.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMLValidateDetectorFunc(t Transport) MLValidateDetector { + return func(body io.Reader, o ...func(*MLValidateDetectorRequest)) (*Response, error) { + var r = MLValidateDetectorRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MLValidateDetector - Validates an anomaly detection detector. +// +// See full documentation at https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html. +type MLValidateDetector func(body io.Reader, o ...func(*MLValidateDetectorRequest)) (*Response, error) + +// MLValidateDetectorRequest configures the ML Validate Detector API request. +type MLValidateDetectorRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MLValidateDetectorRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.validate_detector") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_ml/anomaly_detectors/_validate/detector")) + path.WriteString("http://") + path.WriteString("/_ml/anomaly_detectors/_validate/detector") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ml.validate_detector") + if reader := instrument.RecordRequestBody(ctx, "ml.validate_detector", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.validate_detector") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MLValidateDetector) WithContext(v context.Context) func(*MLValidateDetectorRequest) { + return func(r *MLValidateDetectorRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MLValidateDetector) WithPretty() func(*MLValidateDetectorRequest) { + return func(r *MLValidateDetectorRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MLValidateDetector) WithHuman() func(*MLValidateDetectorRequest) { + return func(r *MLValidateDetectorRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MLValidateDetector) WithErrorTrace() func(*MLValidateDetectorRequest) { + return func(r *MLValidateDetectorRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MLValidateDetector) WithFilterPath(v ...string) func(*MLValidateDetectorRequest) { + return func(r *MLValidateDetectorRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MLValidateDetector) WithHeader(h map[string]string) func(*MLValidateDetectorRequest) { + return func(r *MLValidateDetectorRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MLValidateDetector) WithOpaqueID(s string) func(*MLValidateDetectorRequest) { + return func(r *MLValidateDetectorRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.monitoring.bulk.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.monitoring.bulk.go new file mode 100644 index 000000000..bca149242 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.monitoring.bulk.go @@ -0,0 +1,281 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newMonitoringBulkFunc(t Transport) MonitoringBulk { + return func(body io.Reader, o ...func(*MonitoringBulkRequest)) (*Response, error) { + var r = MonitoringBulkRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// MonitoringBulk - Used by the monitoring features to send monitoring data. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/monitor-elasticsearch-cluster.html. +type MonitoringBulk func(body io.Reader, o ...func(*MonitoringBulkRequest)) (*Response, error) + +// MonitoringBulkRequest configures the Monitoring Bulk API request. +type MonitoringBulkRequest struct { + Body io.Reader + + DocumentType string + + Interval string + SystemAPIVersion string + SystemID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r MonitoringBulkRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "monitoring.bulk") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_monitoring") + 1 + len(r.DocumentType) + 1 + len("bulk")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_monitoring") + if r.DocumentType != "" { + path.WriteString("/") + path.WriteString(r.DocumentType) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "type", r.DocumentType) + } + } + path.WriteString("/") + path.WriteString("bulk") + + params = make(map[string]string) + + if r.Interval != "" { + params["interval"] = r.Interval + } + + if r.SystemAPIVersion != "" { + params["system_api_version"] = r.SystemAPIVersion + } + + if r.SystemID != "" { + params["system_id"] = r.SystemID + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "monitoring.bulk") + if reader := instrument.RecordRequestBody(ctx, "monitoring.bulk", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "monitoring.bulk") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f MonitoringBulk) WithContext(v context.Context) func(*MonitoringBulkRequest) { + return func(r *MonitoringBulkRequest) { + r.ctx = v + } +} + +// WithDocumentType - default document type for items which don't provide one. +func (f MonitoringBulk) WithDocumentType(v string) func(*MonitoringBulkRequest) { + return func(r *MonitoringBulkRequest) { + r.DocumentType = v + } +} + +// WithInterval - collection interval (e.g., '10s' or '10000ms') of the payload. +func (f MonitoringBulk) WithInterval(v string) func(*MonitoringBulkRequest) { + return func(r *MonitoringBulkRequest) { + r.Interval = v + } +} + +// WithSystemAPIVersion - api version of the monitored system. +func (f MonitoringBulk) WithSystemAPIVersion(v string) func(*MonitoringBulkRequest) { + return func(r *MonitoringBulkRequest) { + r.SystemAPIVersion = v + } +} + +// WithSystemID - identifier of the monitored system. +func (f MonitoringBulk) WithSystemID(v string) func(*MonitoringBulkRequest) { + return func(r *MonitoringBulkRequest) { + r.SystemID = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f MonitoringBulk) WithPretty() func(*MonitoringBulkRequest) { + return func(r *MonitoringBulkRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f MonitoringBulk) WithHuman() func(*MonitoringBulkRequest) { + return func(r *MonitoringBulkRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f MonitoringBulk) WithErrorTrace() func(*MonitoringBulkRequest) { + return func(r *MonitoringBulkRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f MonitoringBulk) WithFilterPath(v ...string) func(*MonitoringBulkRequest) { + return func(r *MonitoringBulkRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f MonitoringBulk) WithHeader(h map[string]string) func(*MonitoringBulkRequest) { + return func(r *MonitoringBulkRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f MonitoringBulk) WithOpaqueID(s string) func(*MonitoringBulkRequest) { + return func(r *MonitoringBulkRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.open_point_in_time.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.open_point_in_time.go new file mode 100644 index 000000000..83179b9e1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.open_point_in_time.go @@ -0,0 +1,331 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "io" + "net/http" + "strconv" + "strings" +) + +func newOpenPointInTimeFunc(t Transport) OpenPointInTime { + return func(index []string, keep_alive string, o ...func(*OpenPointInTimeRequest)) (*Response, error) { + var r = OpenPointInTimeRequest{Index: index, KeepAlive: keep_alive} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// OpenPointInTime - Open a point in time that can be used in subsequent searches +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time-api.html. +type OpenPointInTime func(index []string, keep_alive string, o ...func(*OpenPointInTimeRequest)) (*Response, error) + +// OpenPointInTimeRequest configures the Open Point In Time API request. +type OpenPointInTimeRequest struct { + Index []string + + Body io.Reader + + AllowPartialSearchResults *bool + ExpandWildcards string + IgnoreUnavailable *bool + KeepAlive string + MaxConcurrentShardRequests *int + Preference string + Routing string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r OpenPointInTimeRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "open_point_in_time") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_pit")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_pit") + + params = make(map[string]string) + + if r.AllowPartialSearchResults != nil { + params["allow_partial_search_results"] = strconv.FormatBool(*r.AllowPartialSearchResults) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.KeepAlive != "" { + params["keep_alive"] = r.KeepAlive + } + + if r.MaxConcurrentShardRequests != nil { + params["max_concurrent_shard_requests"] = strconv.FormatInt(int64(*r.MaxConcurrentShardRequests), 10) + } + + if r.Preference != "" { + params["preference"] = r.Preference + } + + if r.Routing != "" { + params["routing"] = r.Routing + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "open_point_in_time") + if reader := instrument.RecordRequestBody(ctx, "open_point_in_time", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "open_point_in_time") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f OpenPointInTime) WithContext(v context.Context) func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.ctx = v + } +} + +// WithBody - An index_filter specified with the Query DSL. +func (f OpenPointInTime) WithBody(v io.Reader) func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.Body = v + } +} + +// WithAllowPartialSearchResults - specify whether to tolerate shards missing when creating the point-in-time, or otherwise throw an exception. (default: false). +func (f OpenPointInTime) WithAllowPartialSearchResults(v bool) func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.AllowPartialSearchResults = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f OpenPointInTime) WithExpandWildcards(v string) func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f OpenPointInTime) WithIgnoreUnavailable(v bool) func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithKeepAlive - specific the time to live for the point in time. +func (f OpenPointInTime) WithKeepAlive(v string) func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.KeepAlive = v + } +} + +// WithMaxConcurrentShardRequests - the number of concurrent shard requests per node executed concurrently when opening this point-in-time. this value should be used to limit the impact of opening the point-in-time on the cluster. +func (f OpenPointInTime) WithMaxConcurrentShardRequests(v int) func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.MaxConcurrentShardRequests = &v + } +} + +// WithPreference - specify the node or shard the operation should be performed on (default: random). +func (f OpenPointInTime) WithPreference(v string) func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.Preference = v + } +} + +// WithRouting - specific routing value. +func (f OpenPointInTime) WithRouting(v string) func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.Routing = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f OpenPointInTime) WithPretty() func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f OpenPointInTime) WithHuman() func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f OpenPointInTime) WithErrorTrace() func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f OpenPointInTime) WithFilterPath(v ...string) func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f OpenPointInTime) WithHeader(h map[string]string) func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f OpenPointInTime) WithOpaqueID(s string) func(*OpenPointInTimeRequest) { + return func(r *OpenPointInTimeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.profiling.flamegraph.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.profiling.flamegraph.go new file mode 100644 index 000000000..7a350e819 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.profiling.flamegraph.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newProfilingFlamegraphFunc(t Transport) ProfilingFlamegraph { + return func(body io.Reader, o ...func(*ProfilingFlamegraphRequest)) (*Response, error) { + var r = ProfilingFlamegraphRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ProfilingFlamegraph - Extracts a UI-optimized structure to render flamegraphs from Universal Profiling. +// +// See full documentation at https://www.elastic.co/guide/en/observability/current/universal-profiling.html. +type ProfilingFlamegraph func(body io.Reader, o ...func(*ProfilingFlamegraphRequest)) (*Response, error) + +// ProfilingFlamegraphRequest configures the Profiling Flamegraph API request. +type ProfilingFlamegraphRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r ProfilingFlamegraphRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "profiling.flamegraph") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_profiling/flamegraph")) + path.WriteString("http://") + path.WriteString("/_profiling/flamegraph") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "profiling.flamegraph") + if reader := instrument.RecordRequestBody(ctx, "profiling.flamegraph", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "profiling.flamegraph") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ProfilingFlamegraph) WithContext(v context.Context) func(*ProfilingFlamegraphRequest) { + return func(r *ProfilingFlamegraphRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ProfilingFlamegraph) WithPretty() func(*ProfilingFlamegraphRequest) { + return func(r *ProfilingFlamegraphRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ProfilingFlamegraph) WithHuman() func(*ProfilingFlamegraphRequest) { + return func(r *ProfilingFlamegraphRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ProfilingFlamegraph) WithErrorTrace() func(*ProfilingFlamegraphRequest) { + return func(r *ProfilingFlamegraphRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ProfilingFlamegraph) WithFilterPath(v ...string) func(*ProfilingFlamegraphRequest) { + return func(r *ProfilingFlamegraphRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ProfilingFlamegraph) WithHeader(h map[string]string) func(*ProfilingFlamegraphRequest) { + return func(r *ProfilingFlamegraphRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ProfilingFlamegraph) WithOpaqueID(s string) func(*ProfilingFlamegraphRequest) { + return func(r *ProfilingFlamegraphRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.delete_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.delete_job.go new file mode 100644 index 000000000..d1335eb91 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.delete_job.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newRollupDeleteJobFunc(t Transport) RollupDeleteJob { + return func(id string, o ...func(*RollupDeleteJobRequest)) (*Response, error) { + var r = RollupDeleteJobRequest{JobID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// RollupDeleteJob - Deletes an existing rollup job. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-delete-job.html. +type RollupDeleteJob func(id string, o ...func(*RollupDeleteJobRequest)) (*Response, error) + +// RollupDeleteJobRequest configures the Rollup Delete Job API request. +type RollupDeleteJobRequest struct { + JobID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r RollupDeleteJobRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.delete_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_rollup") + 1 + len("job") + 1 + len(r.JobID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("job") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.JobID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.delete_job") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.delete_job") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f RollupDeleteJob) WithContext(v context.Context) func(*RollupDeleteJobRequest) { + return func(r *RollupDeleteJobRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f RollupDeleteJob) WithPretty() func(*RollupDeleteJobRequest) { + return func(r *RollupDeleteJobRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f RollupDeleteJob) WithHuman() func(*RollupDeleteJobRequest) { + return func(r *RollupDeleteJobRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f RollupDeleteJob) WithErrorTrace() func(*RollupDeleteJobRequest) { + return func(r *RollupDeleteJobRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f RollupDeleteJob) WithFilterPath(v ...string) func(*RollupDeleteJobRequest) { + return func(r *RollupDeleteJobRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f RollupDeleteJob) WithHeader(h map[string]string) func(*RollupDeleteJobRequest) { + return func(r *RollupDeleteJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f RollupDeleteJob) WithOpaqueID(s string) func(*RollupDeleteJobRequest) { + return func(r *RollupDeleteJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.get_jobs.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.get_jobs.go new file mode 100644 index 000000000..f55b3a314 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.get_jobs.go @@ -0,0 +1,236 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newRollupGetJobsFunc(t Transport) RollupGetJobs { + return func(o ...func(*RollupGetJobsRequest)) (*Response, error) { + var r = RollupGetJobsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// RollupGetJobs - Retrieves the configuration, stats, and status of rollup jobs. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-job.html. +type RollupGetJobs func(o ...func(*RollupGetJobsRequest)) (*Response, error) + +// RollupGetJobsRequest configures the Rollup Get Jobs API request. +type RollupGetJobsRequest struct { + JobID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r RollupGetJobsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.get_jobs") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_rollup") + 1 + len("job") + 1 + len(r.JobID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("job") + if r.JobID != "" { + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.JobID) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.get_jobs") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.get_jobs") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f RollupGetJobs) WithContext(v context.Context) func(*RollupGetJobsRequest) { + return func(r *RollupGetJobsRequest) { + r.ctx = v + } +} + +// WithJobID - the ID of the job(s) to fetch. accepts glob patterns, or left blank for all jobs. +func (f RollupGetJobs) WithJobID(v string) func(*RollupGetJobsRequest) { + return func(r *RollupGetJobsRequest) { + r.JobID = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f RollupGetJobs) WithPretty() func(*RollupGetJobsRequest) { + return func(r *RollupGetJobsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f RollupGetJobs) WithHuman() func(*RollupGetJobsRequest) { + return func(r *RollupGetJobsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f RollupGetJobs) WithErrorTrace() func(*RollupGetJobsRequest) { + return func(r *RollupGetJobsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f RollupGetJobs) WithFilterPath(v ...string) func(*RollupGetJobsRequest) { + return func(r *RollupGetJobsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f RollupGetJobs) WithHeader(h map[string]string) func(*RollupGetJobsRequest) { + return func(r *RollupGetJobsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f RollupGetJobs) WithOpaqueID(s string) func(*RollupGetJobsRequest) { + return func(r *RollupGetJobsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.get_rollup_caps.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.get_rollup_caps.go new file mode 100644 index 000000000..1073bcb59 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.get_rollup_caps.go @@ -0,0 +1,236 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newRollupGetRollupCapsFunc(t Transport) RollupGetRollupCaps { + return func(o ...func(*RollupGetRollupCapsRequest)) (*Response, error) { + var r = RollupGetRollupCapsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// RollupGetRollupCaps - Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-caps.html. +type RollupGetRollupCaps func(o ...func(*RollupGetRollupCapsRequest)) (*Response, error) + +// RollupGetRollupCapsRequest configures the Rollup Get Rollup Caps API request. +type RollupGetRollupCapsRequest struct { + Index string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r RollupGetRollupCapsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.get_rollup_caps") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_rollup") + 1 + len("data") + 1 + len(r.Index)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("data") + if r.Index != "" { + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.Index) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.get_rollup_caps") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.get_rollup_caps") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f RollupGetRollupCaps) WithContext(v context.Context) func(*RollupGetRollupCapsRequest) { + return func(r *RollupGetRollupCapsRequest) { + r.ctx = v + } +} + +// WithIndex - the ID of the index to check rollup capabilities on, or left blank for all jobs. +func (f RollupGetRollupCaps) WithIndex(v string) func(*RollupGetRollupCapsRequest) { + return func(r *RollupGetRollupCapsRequest) { + r.Index = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f RollupGetRollupCaps) WithPretty() func(*RollupGetRollupCapsRequest) { + return func(r *RollupGetRollupCapsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f RollupGetRollupCaps) WithHuman() func(*RollupGetRollupCapsRequest) { + return func(r *RollupGetRollupCapsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f RollupGetRollupCaps) WithErrorTrace() func(*RollupGetRollupCapsRequest) { + return func(r *RollupGetRollupCapsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f RollupGetRollupCaps) WithFilterPath(v ...string) func(*RollupGetRollupCapsRequest) { + return func(r *RollupGetRollupCapsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f RollupGetRollupCaps) WithHeader(h map[string]string) func(*RollupGetRollupCapsRequest) { + return func(r *RollupGetRollupCapsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f RollupGetRollupCaps) WithOpaqueID(s string) func(*RollupGetRollupCapsRequest) { + return func(r *RollupGetRollupCapsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.get_rollup_index_caps.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.get_rollup_index_caps.go new file mode 100644 index 000000000..084dffc26 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.get_rollup_index_caps.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newRollupGetRollupIndexCapsFunc(t Transport) RollupGetRollupIndexCaps { + return func(index string, o ...func(*RollupGetRollupIndexCapsRequest)) (*Response, error) { + var r = RollupGetRollupIndexCapsRequest{Index: index} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// RollupGetRollupIndexCaps - Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored). +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-index-caps.html. +type RollupGetRollupIndexCaps func(index string, o ...func(*RollupGetRollupIndexCapsRequest)) (*Response, error) + +// RollupGetRollupIndexCapsRequest configures the Rollup Get Rollup Index Caps API request. +type RollupGetRollupIndexCapsRequest struct { + Index string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r RollupGetRollupIndexCapsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.get_rollup_index_caps") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_rollup") + 1 + len("data")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.Index) + } + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("data") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.get_rollup_index_caps") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.get_rollup_index_caps") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f RollupGetRollupIndexCaps) WithContext(v context.Context) func(*RollupGetRollupIndexCapsRequest) { + return func(r *RollupGetRollupIndexCapsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f RollupGetRollupIndexCaps) WithPretty() func(*RollupGetRollupIndexCapsRequest) { + return func(r *RollupGetRollupIndexCapsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f RollupGetRollupIndexCaps) WithHuman() func(*RollupGetRollupIndexCapsRequest) { + return func(r *RollupGetRollupIndexCapsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f RollupGetRollupIndexCaps) WithErrorTrace() func(*RollupGetRollupIndexCapsRequest) { + return func(r *RollupGetRollupIndexCapsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f RollupGetRollupIndexCaps) WithFilterPath(v ...string) func(*RollupGetRollupIndexCapsRequest) { + return func(r *RollupGetRollupIndexCapsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f RollupGetRollupIndexCaps) WithHeader(h map[string]string) func(*RollupGetRollupIndexCapsRequest) { + return func(r *RollupGetRollupIndexCapsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f RollupGetRollupIndexCaps) WithOpaqueID(s string) func(*RollupGetRollupIndexCapsRequest) { + return func(r *RollupGetRollupIndexCapsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.put_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.put_job.go new file mode 100644 index 000000000..32a8419b2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.put_job.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newRollupPutJobFunc(t Transport) RollupPutJob { + return func(id string, body io.Reader, o ...func(*RollupPutJobRequest)) (*Response, error) { + var r = RollupPutJobRequest{JobID: id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// RollupPutJob - Creates a rollup job. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-put-job.html. +type RollupPutJob func(id string, body io.Reader, o ...func(*RollupPutJobRequest)) (*Response, error) + +// RollupPutJobRequest configures the Rollup Put Job API request. +type RollupPutJobRequest struct { + JobID string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r RollupPutJobRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.put_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_rollup") + 1 + len("job") + 1 + len(r.JobID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("job") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.JobID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.put_job") + if reader := instrument.RecordRequestBody(ctx, "rollup.put_job", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.put_job") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f RollupPutJob) WithContext(v context.Context) func(*RollupPutJobRequest) { + return func(r *RollupPutJobRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f RollupPutJob) WithPretty() func(*RollupPutJobRequest) { + return func(r *RollupPutJobRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f RollupPutJob) WithHuman() func(*RollupPutJobRequest) { + return func(r *RollupPutJobRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f RollupPutJob) WithErrorTrace() func(*RollupPutJobRequest) { + return func(r *RollupPutJobRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f RollupPutJob) WithFilterPath(v ...string) func(*RollupPutJobRequest) { + return func(r *RollupPutJobRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f RollupPutJob) WithHeader(h map[string]string) func(*RollupPutJobRequest) { + return func(r *RollupPutJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f RollupPutJob) WithOpaqueID(s string) func(*RollupPutJobRequest) { + return func(r *RollupPutJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.rollup_search.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.rollup_search.go new file mode 100644 index 000000000..94b407dff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.rollup_search.go @@ -0,0 +1,266 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "io" + "net/http" + "strconv" + "strings" +) + +func newRollupRollupSearchFunc(t Transport) RollupRollupSearch { + return func(index []string, body io.Reader, o ...func(*RollupRollupSearchRequest)) (*Response, error) { + var r = RollupRollupSearchRequest{Index: index, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// RollupRollupSearch - Enables searching rolled-up data using the standard query DSL. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-search.html. +type RollupRollupSearch func(index []string, body io.Reader, o ...func(*RollupRollupSearchRequest)) (*Response, error) + +// RollupRollupSearchRequest configures the Rollup Rollup Search API request. +type RollupRollupSearchRequest struct { + Index []string + + Body io.Reader + + RestTotalHitsAsInt *bool + TypedKeys *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r RollupRollupSearchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.rollup_search") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Index) == 0 { + return nil, errors.New("index is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_rollup_search")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + path.WriteString("/") + path.WriteString("_rollup_search") + + params = make(map[string]string) + + if r.RestTotalHitsAsInt != nil { + params["rest_total_hits_as_int"] = strconv.FormatBool(*r.RestTotalHitsAsInt) + } + + if r.TypedKeys != nil { + params["typed_keys"] = strconv.FormatBool(*r.TypedKeys) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.rollup_search") + if reader := instrument.RecordRequestBody(ctx, "rollup.rollup_search", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.rollup_search") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f RollupRollupSearch) WithContext(v context.Context) func(*RollupRollupSearchRequest) { + return func(r *RollupRollupSearchRequest) { + r.ctx = v + } +} + +// WithRestTotalHitsAsInt - indicates whether hits.total should be rendered as an integer or an object in the rest search response. +func (f RollupRollupSearch) WithRestTotalHitsAsInt(v bool) func(*RollupRollupSearchRequest) { + return func(r *RollupRollupSearchRequest) { + r.RestTotalHitsAsInt = &v + } +} + +// WithTypedKeys - specify whether aggregation and suggester names should be prefixed by their respective types in the response. +func (f RollupRollupSearch) WithTypedKeys(v bool) func(*RollupRollupSearchRequest) { + return func(r *RollupRollupSearchRequest) { + r.TypedKeys = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f RollupRollupSearch) WithPretty() func(*RollupRollupSearchRequest) { + return func(r *RollupRollupSearchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f RollupRollupSearch) WithHuman() func(*RollupRollupSearchRequest) { + return func(r *RollupRollupSearchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f RollupRollupSearch) WithErrorTrace() func(*RollupRollupSearchRequest) { + return func(r *RollupRollupSearchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f RollupRollupSearch) WithFilterPath(v ...string) func(*RollupRollupSearchRequest) { + return func(r *RollupRollupSearchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f RollupRollupSearch) WithHeader(h map[string]string) func(*RollupRollupSearchRequest) { + return func(r *RollupRollupSearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f RollupRollupSearch) WithOpaqueID(s string) func(*RollupRollupSearchRequest) { + return func(r *RollupRollupSearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.start_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.start_job.go new file mode 100644 index 000000000..a657c45b3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.start_job.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newRollupStartJobFunc(t Transport) RollupStartJob { + return func(id string, o ...func(*RollupStartJobRequest)) (*Response, error) { + var r = RollupStartJobRequest{JobID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// RollupStartJob - Starts an existing, stopped rollup job. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-start-job.html. +type RollupStartJob func(id string, o ...func(*RollupStartJobRequest)) (*Response, error) + +// RollupStartJobRequest configures the Rollup Start Job API request. +type RollupStartJobRequest struct { + JobID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r RollupStartJobRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.start_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_rollup") + 1 + len("job") + 1 + len(r.JobID) + 1 + len("_start")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("job") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.JobID) + } + path.WriteString("/") + path.WriteString("_start") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.start_job") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.start_job") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f RollupStartJob) WithContext(v context.Context) func(*RollupStartJobRequest) { + return func(r *RollupStartJobRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f RollupStartJob) WithPretty() func(*RollupStartJobRequest) { + return func(r *RollupStartJobRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f RollupStartJob) WithHuman() func(*RollupStartJobRequest) { + return func(r *RollupStartJobRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f RollupStartJob) WithErrorTrace() func(*RollupStartJobRequest) { + return func(r *RollupStartJobRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f RollupStartJob) WithFilterPath(v ...string) func(*RollupStartJobRequest) { + return func(r *RollupStartJobRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f RollupStartJob) WithHeader(h map[string]string) func(*RollupStartJobRequest) { + return func(r *RollupStartJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f RollupStartJob) WithOpaqueID(s string) func(*RollupStartJobRequest) { + return func(r *RollupStartJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.stop_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.stop_job.go new file mode 100644 index 000000000..bc107e029 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.rollup.stop_job.go @@ -0,0 +1,256 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newRollupStopJobFunc(t Transport) RollupStopJob { + return func(id string, o ...func(*RollupStopJobRequest)) (*Response, error) { + var r = RollupStopJobRequest{JobID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// RollupStopJob - Stops an existing, started rollup job. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-stop-job.html. +type RollupStopJob func(id string, o ...func(*RollupStopJobRequest)) (*Response, error) + +// RollupStopJobRequest configures the Rollup Stop Job API request. +type RollupStopJobRequest struct { + JobID string + + Timeout time.Duration + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r RollupStopJobRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.stop_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_rollup") + 1 + len("job") + 1 + len(r.JobID) + 1 + len("_stop")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("job") + path.WriteString("/") + path.WriteString(r.JobID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.JobID) + } + path.WriteString("/") + path.WriteString("_stop") + + params = make(map[string]string) + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.stop_job") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.stop_job") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f RollupStopJob) WithContext(v context.Context) func(*RollupStopJobRequest) { + return func(r *RollupStopJobRequest) { + r.ctx = v + } +} + +// WithTimeout - block for (at maximum) the specified duration while waiting for the job to stop. defaults to 30s.. +func (f RollupStopJob) WithTimeout(v time.Duration) func(*RollupStopJobRequest) { + return func(r *RollupStopJobRequest) { + r.Timeout = v + } +} + +// WithWaitForCompletion - true if the api should block until the job has fully stopped, false if should be executed async. defaults to false.. +func (f RollupStopJob) WithWaitForCompletion(v bool) func(*RollupStopJobRequest) { + return func(r *RollupStopJobRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f RollupStopJob) WithPretty() func(*RollupStopJobRequest) { + return func(r *RollupStopJobRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f RollupStopJob) WithHuman() func(*RollupStopJobRequest) { + return func(r *RollupStopJobRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f RollupStopJob) WithErrorTrace() func(*RollupStopJobRequest) { + return func(r *RollupStopJobRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f RollupStopJob) WithFilterPath(v ...string) func(*RollupStopJobRequest) { + return func(r *RollupStopJobRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f RollupStopJob) WithHeader(h map[string]string) func(*RollupStopJobRequest) { + return func(r *RollupStopJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f RollupStopJob) WithOpaqueID(s string) func(*RollupStopJobRequest) { + return func(r *RollupStopJobRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.searchable_snapshots.cache_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.searchable_snapshots.cache_stats.go new file mode 100644 index 000000000..d0fa1cb14 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.searchable_snapshots.cache_stats.go @@ -0,0 +1,238 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSearchableSnapshotsCacheStatsFunc(t Transport) SearchableSnapshotsCacheStats { + return func(o ...func(*SearchableSnapshotsCacheStatsRequest)) (*Response, error) { + var r = SearchableSnapshotsCacheStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchableSnapshotsCacheStats - Retrieve node-level cache statistics about searchable snapshots. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html. +type SearchableSnapshotsCacheStats func(o ...func(*SearchableSnapshotsCacheStatsRequest)) (*Response, error) + +// SearchableSnapshotsCacheStatsRequest configures the Searchable Snapshots Cache Stats API request. +type SearchableSnapshotsCacheStatsRequest struct { + NodeID []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchableSnapshotsCacheStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "searchable_snapshots.cache_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_searchable_snapshots") + 1 + len(strings.Join(r.NodeID, ",")) + 1 + len("cache") + 1 + len("stats")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_searchable_snapshots") + if len(r.NodeID) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.NodeID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "node_id", strings.Join(r.NodeID, ",")) + } + } + path.WriteString("/") + path.WriteString("cache") + path.WriteString("/") + path.WriteString("stats") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "searchable_snapshots.cache_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "searchable_snapshots.cache_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchableSnapshotsCacheStats) WithContext(v context.Context) func(*SearchableSnapshotsCacheStatsRequest) { + return func(r *SearchableSnapshotsCacheStatsRequest) { + r.ctx = v + } +} + +// WithNodeID - a list of node ids or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. +func (f SearchableSnapshotsCacheStats) WithNodeID(v ...string) func(*SearchableSnapshotsCacheStatsRequest) { + return func(r *SearchableSnapshotsCacheStatsRequest) { + r.NodeID = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchableSnapshotsCacheStats) WithPretty() func(*SearchableSnapshotsCacheStatsRequest) { + return func(r *SearchableSnapshotsCacheStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchableSnapshotsCacheStats) WithHuman() func(*SearchableSnapshotsCacheStatsRequest) { + return func(r *SearchableSnapshotsCacheStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchableSnapshotsCacheStats) WithErrorTrace() func(*SearchableSnapshotsCacheStatsRequest) { + return func(r *SearchableSnapshotsCacheStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchableSnapshotsCacheStats) WithFilterPath(v ...string) func(*SearchableSnapshotsCacheStatsRequest) { + return func(r *SearchableSnapshotsCacheStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchableSnapshotsCacheStats) WithHeader(h map[string]string) func(*SearchableSnapshotsCacheStatsRequest) { + return func(r *SearchableSnapshotsCacheStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchableSnapshotsCacheStats) WithOpaqueID(s string) func(*SearchableSnapshotsCacheStatsRequest) { + return func(r *SearchableSnapshotsCacheStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.searchable_snapshots.clear_cache.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.searchable_snapshots.clear_cache.go new file mode 100644 index 000000000..6e59b486c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.searchable_snapshots.clear_cache.go @@ -0,0 +1,276 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newSearchableSnapshotsClearCacheFunc(t Transport) SearchableSnapshotsClearCache { + return func(o ...func(*SearchableSnapshotsClearCacheRequest)) (*Response, error) { + var r = SearchableSnapshotsClearCacheRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchableSnapshotsClearCache - Clear the cache of searchable snapshots. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html. +type SearchableSnapshotsClearCache func(o ...func(*SearchableSnapshotsClearCacheRequest)) (*Response, error) + +// SearchableSnapshotsClearCacheRequest configures the Searchable Snapshots Clear Cache API request. +type SearchableSnapshotsClearCacheRequest struct { + Index []string + + AllowNoIndices *bool + ExpandWildcards string + IgnoreUnavailable *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchableSnapshotsClearCacheRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "searchable_snapshots.clear_cache") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_searchable_snapshots") + 1 + len("cache") + 1 + len("clear")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_searchable_snapshots") + path.WriteString("/") + path.WriteString("cache") + path.WriteString("/") + path.WriteString("clear") + + params = make(map[string]string) + + if r.AllowNoIndices != nil { + params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) + } + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IgnoreUnavailable != nil { + params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "searchable_snapshots.clear_cache") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "searchable_snapshots.clear_cache") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchableSnapshotsClearCache) WithContext(v context.Context) func(*SearchableSnapshotsClearCacheRequest) { + return func(r *SearchableSnapshotsClearCacheRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names. +func (f SearchableSnapshotsClearCache) WithIndex(v ...string) func(*SearchableSnapshotsClearCacheRequest) { + return func(r *SearchableSnapshotsClearCacheRequest) { + r.Index = v + } +} + +// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). +func (f SearchableSnapshotsClearCache) WithAllowNoIndices(v bool) func(*SearchableSnapshotsClearCacheRequest) { + return func(r *SearchableSnapshotsClearCacheRequest) { + r.AllowNoIndices = &v + } +} + +// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. +func (f SearchableSnapshotsClearCache) WithExpandWildcards(v string) func(*SearchableSnapshotsClearCacheRequest) { + return func(r *SearchableSnapshotsClearCacheRequest) { + r.ExpandWildcards = v + } +} + +// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). +func (f SearchableSnapshotsClearCache) WithIgnoreUnavailable(v bool) func(*SearchableSnapshotsClearCacheRequest) { + return func(r *SearchableSnapshotsClearCacheRequest) { + r.IgnoreUnavailable = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchableSnapshotsClearCache) WithPretty() func(*SearchableSnapshotsClearCacheRequest) { + return func(r *SearchableSnapshotsClearCacheRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchableSnapshotsClearCache) WithHuman() func(*SearchableSnapshotsClearCacheRequest) { + return func(r *SearchableSnapshotsClearCacheRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchableSnapshotsClearCache) WithErrorTrace() func(*SearchableSnapshotsClearCacheRequest) { + return func(r *SearchableSnapshotsClearCacheRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchableSnapshotsClearCache) WithFilterPath(v ...string) func(*SearchableSnapshotsClearCacheRequest) { + return func(r *SearchableSnapshotsClearCacheRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchableSnapshotsClearCache) WithHeader(h map[string]string) func(*SearchableSnapshotsClearCacheRequest) { + return func(r *SearchableSnapshotsClearCacheRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchableSnapshotsClearCache) WithOpaqueID(s string) func(*SearchableSnapshotsClearCacheRequest) { + return func(r *SearchableSnapshotsClearCacheRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.searchable_snapshots.mount.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.searchable_snapshots.mount.go new file mode 100644 index 000000000..632266bea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.searchable_snapshots.mount.go @@ -0,0 +1,280 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newSearchableSnapshotsMountFunc(t Transport) SearchableSnapshotsMount { + return func(repository string, snapshot string, body io.Reader, o ...func(*SearchableSnapshotsMountRequest)) (*Response, error) { + var r = SearchableSnapshotsMountRequest{Repository: repository, Snapshot: snapshot, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchableSnapshotsMount - Mount a snapshot as a searchable index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-mount-snapshot.html. +type SearchableSnapshotsMount func(repository string, snapshot string, body io.Reader, o ...func(*SearchableSnapshotsMountRequest)) (*Response, error) + +// SearchableSnapshotsMountRequest configures the Searchable Snapshots Mount API request. +type SearchableSnapshotsMountRequest struct { + Body io.Reader + + Repository string + Snapshot string + + MasterTimeout time.Duration + Storage string + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchableSnapshotsMountRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "searchable_snapshots.mount") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_snapshot") + 1 + len(r.Repository) + 1 + len(r.Snapshot) + 1 + len("_mount")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + path.WriteString(r.Repository) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.Repository) + } + path.WriteString("/") + path.WriteString(r.Snapshot) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot", r.Snapshot) + } + path.WriteString("/") + path.WriteString("_mount") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Storage != "" { + params["storage"] = r.Storage + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "searchable_snapshots.mount") + if reader := instrument.RecordRequestBody(ctx, "searchable_snapshots.mount", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "searchable_snapshots.mount") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchableSnapshotsMount) WithContext(v context.Context) func(*SearchableSnapshotsMountRequest) { + return func(r *SearchableSnapshotsMountRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SearchableSnapshotsMount) WithMasterTimeout(v time.Duration) func(*SearchableSnapshotsMountRequest) { + return func(r *SearchableSnapshotsMountRequest) { + r.MasterTimeout = v + } +} + +// WithStorage - selects the kind of local storage used to accelerate searches. experimental, and defaults to `full_copy`. +func (f SearchableSnapshotsMount) WithStorage(v string) func(*SearchableSnapshotsMountRequest) { + return func(r *SearchableSnapshotsMountRequest) { + r.Storage = v + } +} + +// WithWaitForCompletion - should this request wait until the operation has completed before returning. +func (f SearchableSnapshotsMount) WithWaitForCompletion(v bool) func(*SearchableSnapshotsMountRequest) { + return func(r *SearchableSnapshotsMountRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchableSnapshotsMount) WithPretty() func(*SearchableSnapshotsMountRequest) { + return func(r *SearchableSnapshotsMountRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchableSnapshotsMount) WithHuman() func(*SearchableSnapshotsMountRequest) { + return func(r *SearchableSnapshotsMountRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchableSnapshotsMount) WithErrorTrace() func(*SearchableSnapshotsMountRequest) { + return func(r *SearchableSnapshotsMountRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchableSnapshotsMount) WithFilterPath(v ...string) func(*SearchableSnapshotsMountRequest) { + return func(r *SearchableSnapshotsMountRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchableSnapshotsMount) WithHeader(h map[string]string) func(*SearchableSnapshotsMountRequest) { + return func(r *SearchableSnapshotsMountRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchableSnapshotsMount) WithOpaqueID(s string) func(*SearchableSnapshotsMountRequest) { + return func(r *SearchableSnapshotsMountRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.searchable_snapshots.stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.searchable_snapshots.stats.go new file mode 100644 index 000000000..34a942ce6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.searchable_snapshots.stats.go @@ -0,0 +1,247 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSearchableSnapshotsStatsFunc(t Transport) SearchableSnapshotsStats { + return func(o ...func(*SearchableSnapshotsStatsRequest)) (*Response, error) { + var r = SearchableSnapshotsStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchableSnapshotsStats - Retrieve shard-level statistics about searchable snapshots. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-apis.html. +type SearchableSnapshotsStats func(o ...func(*SearchableSnapshotsStatsRequest)) (*Response, error) + +// SearchableSnapshotsStatsRequest configures the Searchable Snapshots Stats API request. +type SearchableSnapshotsStatsRequest struct { + Index []string + + Level string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SearchableSnapshotsStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "searchable_snapshots.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_searchable_snapshots") + 1 + len("stats")) + path.WriteString("http://") + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Index, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", strings.Join(r.Index, ",")) + } + } + path.WriteString("/") + path.WriteString("_searchable_snapshots") + path.WriteString("/") + path.WriteString("stats") + + params = make(map[string]string) + + if r.Level != "" { + params["level"] = r.Level + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "searchable_snapshots.stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "searchable_snapshots.stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchableSnapshotsStats) WithContext(v context.Context) func(*SearchableSnapshotsStatsRequest) { + return func(r *SearchableSnapshotsStatsRequest) { + r.ctx = v + } +} + +// WithIndex - a list of index names. +func (f SearchableSnapshotsStats) WithIndex(v ...string) func(*SearchableSnapshotsStatsRequest) { + return func(r *SearchableSnapshotsStatsRequest) { + r.Index = v + } +} + +// WithLevel - return stats aggregated at cluster, index or shard level. +func (f SearchableSnapshotsStats) WithLevel(v string) func(*SearchableSnapshotsStatsRequest) { + return func(r *SearchableSnapshotsStatsRequest) { + r.Level = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchableSnapshotsStats) WithPretty() func(*SearchableSnapshotsStatsRequest) { + return func(r *SearchableSnapshotsStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchableSnapshotsStats) WithHuman() func(*SearchableSnapshotsStatsRequest) { + return func(r *SearchableSnapshotsStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchableSnapshotsStats) WithErrorTrace() func(*SearchableSnapshotsStatsRequest) { + return func(r *SearchableSnapshotsStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchableSnapshotsStats) WithFilterPath(v ...string) func(*SearchableSnapshotsStatsRequest) { + return func(r *SearchableSnapshotsStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchableSnapshotsStats) WithHeader(h map[string]string) func(*SearchableSnapshotsStatsRequest) { + return func(r *SearchableSnapshotsStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchableSnapshotsStats) WithOpaqueID(s string) func(*SearchableSnapshotsStatsRequest) { + return func(r *SearchableSnapshotsStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.activate_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.activate_user_profile.go new file mode 100644 index 000000000..f8c1ebf30 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.activate_user_profile.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityActivateUserProfileFunc(t Transport) SecurityActivateUserProfile { + return func(body io.Reader, o ...func(*SecurityActivateUserProfileRequest)) (*Response, error) { + var r = SecurityActivateUserProfileRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityActivateUserProfile - Creates or updates the user profile on behalf of another user. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-activate-user-profile.html. +type SecurityActivateUserProfile func(body io.Reader, o ...func(*SecurityActivateUserProfileRequest)) (*Response, error) + +// SecurityActivateUserProfileRequest configures the Security Activate User Profile API request. +type SecurityActivateUserProfileRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityActivateUserProfileRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.activate_user_profile") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/profile/_activate")) + path.WriteString("http://") + path.WriteString("/_security/profile/_activate") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.activate_user_profile") + if reader := instrument.RecordRequestBody(ctx, "security.activate_user_profile", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.activate_user_profile") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityActivateUserProfile) WithContext(v context.Context) func(*SecurityActivateUserProfileRequest) { + return func(r *SecurityActivateUserProfileRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityActivateUserProfile) WithPretty() func(*SecurityActivateUserProfileRequest) { + return func(r *SecurityActivateUserProfileRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityActivateUserProfile) WithHuman() func(*SecurityActivateUserProfileRequest) { + return func(r *SecurityActivateUserProfileRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityActivateUserProfile) WithErrorTrace() func(*SecurityActivateUserProfileRequest) { + return func(r *SecurityActivateUserProfileRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityActivateUserProfile) WithFilterPath(v ...string) func(*SecurityActivateUserProfileRequest) { + return func(r *SecurityActivateUserProfileRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityActivateUserProfile) WithHeader(h map[string]string) func(*SecurityActivateUserProfileRequest) { + return func(r *SecurityActivateUserProfileRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityActivateUserProfile) WithOpaqueID(s string) func(*SecurityActivateUserProfileRequest) { + return func(r *SecurityActivateUserProfileRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.authenticate.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.authenticate.go new file mode 100644 index 000000000..72bba6b8a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.authenticate.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityAuthenticateFunc(t Transport) SecurityAuthenticate { + return func(o ...func(*SecurityAuthenticateRequest)) (*Response, error) { + var r = SecurityAuthenticateRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityAuthenticate - Enables authentication as a user and retrieve information about the authenticated user. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-authenticate.html. +type SecurityAuthenticate func(o ...func(*SecurityAuthenticateRequest)) (*Response, error) + +// SecurityAuthenticateRequest configures the Security Authenticate API request. +type SecurityAuthenticateRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityAuthenticateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.authenticate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_security/_authenticate")) + path.WriteString("http://") + path.WriteString("/_security/_authenticate") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.authenticate") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.authenticate") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityAuthenticate) WithContext(v context.Context) func(*SecurityAuthenticateRequest) { + return func(r *SecurityAuthenticateRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityAuthenticate) WithPretty() func(*SecurityAuthenticateRequest) { + return func(r *SecurityAuthenticateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityAuthenticate) WithHuman() func(*SecurityAuthenticateRequest) { + return func(r *SecurityAuthenticateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityAuthenticate) WithErrorTrace() func(*SecurityAuthenticateRequest) { + return func(r *SecurityAuthenticateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityAuthenticate) WithFilterPath(v ...string) func(*SecurityAuthenticateRequest) { + return func(r *SecurityAuthenticateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityAuthenticate) WithHeader(h map[string]string) func(*SecurityAuthenticateRequest) { + return func(r *SecurityAuthenticateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityAuthenticate) WithOpaqueID(s string) func(*SecurityAuthenticateRequest) { + return func(r *SecurityAuthenticateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.bulk_delete_role.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.bulk_delete_role.go new file mode 100644 index 000000000..ff15c95e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.bulk_delete_role.go @@ -0,0 +1,238 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityBulkDeleteRoleFunc(t Transport) SecurityBulkDeleteRole { + return func(body io.Reader, o ...func(*SecurityBulkDeleteRoleRequest)) (*Response, error) { + var r = SecurityBulkDeleteRoleRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityBulkDeleteRole - Bulk delete roles in the native realm. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-delete-role.html. +type SecurityBulkDeleteRole func(body io.Reader, o ...func(*SecurityBulkDeleteRoleRequest)) (*Response, error) + +// SecurityBulkDeleteRoleRequest configures the Security Bulk Delete Role API request. +type SecurityBulkDeleteRoleRequest struct { + Body io.Reader + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityBulkDeleteRoleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.bulk_delete_role") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + len("/_security/role")) + path.WriteString("http://") + path.WriteString("/_security/role") + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.bulk_delete_role") + if reader := instrument.RecordRequestBody(ctx, "security.bulk_delete_role", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.bulk_delete_role") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityBulkDeleteRole) WithContext(v context.Context) func(*SecurityBulkDeleteRoleRequest) { + return func(r *SecurityBulkDeleteRoleRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityBulkDeleteRole) WithRefresh(v string) func(*SecurityBulkDeleteRoleRequest) { + return func(r *SecurityBulkDeleteRoleRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityBulkDeleteRole) WithPretty() func(*SecurityBulkDeleteRoleRequest) { + return func(r *SecurityBulkDeleteRoleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityBulkDeleteRole) WithHuman() func(*SecurityBulkDeleteRoleRequest) { + return func(r *SecurityBulkDeleteRoleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityBulkDeleteRole) WithErrorTrace() func(*SecurityBulkDeleteRoleRequest) { + return func(r *SecurityBulkDeleteRoleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityBulkDeleteRole) WithFilterPath(v ...string) func(*SecurityBulkDeleteRoleRequest) { + return func(r *SecurityBulkDeleteRoleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityBulkDeleteRole) WithHeader(h map[string]string) func(*SecurityBulkDeleteRoleRequest) { + return func(r *SecurityBulkDeleteRoleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityBulkDeleteRole) WithOpaqueID(s string) func(*SecurityBulkDeleteRoleRequest) { + return func(r *SecurityBulkDeleteRoleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.bulk_put_role.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.bulk_put_role.go new file mode 100644 index 000000000..f20c2d6b1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.bulk_put_role.go @@ -0,0 +1,238 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityBulkPutRoleFunc(t Transport) SecurityBulkPutRole { + return func(body io.Reader, o ...func(*SecurityBulkPutRoleRequest)) (*Response, error) { + var r = SecurityBulkPutRoleRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityBulkPutRole - Bulk adds and updates roles in the native realm. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-put-role.html. +type SecurityBulkPutRole func(body io.Reader, o ...func(*SecurityBulkPutRoleRequest)) (*Response, error) + +// SecurityBulkPutRoleRequest configures the Security Bulk Put Role API request. +type SecurityBulkPutRoleRequest struct { + Body io.Reader + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityBulkPutRoleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.bulk_put_role") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/role")) + path.WriteString("http://") + path.WriteString("/_security/role") + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.bulk_put_role") + if reader := instrument.RecordRequestBody(ctx, "security.bulk_put_role", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.bulk_put_role") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityBulkPutRole) WithContext(v context.Context) func(*SecurityBulkPutRoleRequest) { + return func(r *SecurityBulkPutRoleRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityBulkPutRole) WithRefresh(v string) func(*SecurityBulkPutRoleRequest) { + return func(r *SecurityBulkPutRoleRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityBulkPutRole) WithPretty() func(*SecurityBulkPutRoleRequest) { + return func(r *SecurityBulkPutRoleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityBulkPutRole) WithHuman() func(*SecurityBulkPutRoleRequest) { + return func(r *SecurityBulkPutRoleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityBulkPutRole) WithErrorTrace() func(*SecurityBulkPutRoleRequest) { + return func(r *SecurityBulkPutRoleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityBulkPutRole) WithFilterPath(v ...string) func(*SecurityBulkPutRoleRequest) { + return func(r *SecurityBulkPutRoleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityBulkPutRole) WithHeader(h map[string]string) func(*SecurityBulkPutRoleRequest) { + return func(r *SecurityBulkPutRoleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityBulkPutRole) WithOpaqueID(s string) func(*SecurityBulkPutRoleRequest) { + return func(r *SecurityBulkPutRoleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.bulk_update_api_keys.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.bulk_update_api_keys.go new file mode 100644 index 000000000..b9666a728 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.bulk_update_api_keys.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityBulkUpdateAPIKeysFunc(t Transport) SecurityBulkUpdateAPIKeys { + return func(body io.Reader, o ...func(*SecurityBulkUpdateAPIKeysRequest)) (*Response, error) { + var r = SecurityBulkUpdateAPIKeysRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityBulkUpdateAPIKeys - Updates the attributes of multiple existing API keys. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-update-api-keys.html. +type SecurityBulkUpdateAPIKeys func(body io.Reader, o ...func(*SecurityBulkUpdateAPIKeysRequest)) (*Response, error) + +// SecurityBulkUpdateAPIKeysRequest configures the Security Bulk UpdateAPI Keys API request. +type SecurityBulkUpdateAPIKeysRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityBulkUpdateAPIKeysRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.bulk_update_api_keys") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/api_key/_bulk_update")) + path.WriteString("http://") + path.WriteString("/_security/api_key/_bulk_update") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.bulk_update_api_keys") + if reader := instrument.RecordRequestBody(ctx, "security.bulk_update_api_keys", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.bulk_update_api_keys") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityBulkUpdateAPIKeys) WithContext(v context.Context) func(*SecurityBulkUpdateAPIKeysRequest) { + return func(r *SecurityBulkUpdateAPIKeysRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityBulkUpdateAPIKeys) WithPretty() func(*SecurityBulkUpdateAPIKeysRequest) { + return func(r *SecurityBulkUpdateAPIKeysRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityBulkUpdateAPIKeys) WithHuman() func(*SecurityBulkUpdateAPIKeysRequest) { + return func(r *SecurityBulkUpdateAPIKeysRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityBulkUpdateAPIKeys) WithErrorTrace() func(*SecurityBulkUpdateAPIKeysRequest) { + return func(r *SecurityBulkUpdateAPIKeysRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityBulkUpdateAPIKeys) WithFilterPath(v ...string) func(*SecurityBulkUpdateAPIKeysRequest) { + return func(r *SecurityBulkUpdateAPIKeysRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityBulkUpdateAPIKeys) WithHeader(h map[string]string) func(*SecurityBulkUpdateAPIKeysRequest) { + return func(r *SecurityBulkUpdateAPIKeysRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityBulkUpdateAPIKeys) WithOpaqueID(s string) func(*SecurityBulkUpdateAPIKeysRequest) { + return func(r *SecurityBulkUpdateAPIKeysRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.change_password.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.change_password.go new file mode 100644 index 000000000..1b71ccf93 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.change_password.go @@ -0,0 +1,259 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityChangePasswordFunc(t Transport) SecurityChangePassword { + return func(body io.Reader, o ...func(*SecurityChangePasswordRequest)) (*Response, error) { + var r = SecurityChangePasswordRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityChangePassword - Changes the passwords of users in the native realm and built-in users. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-change-password.html. +type SecurityChangePassword func(body io.Reader, o ...func(*SecurityChangePasswordRequest)) (*Response, error) + +// SecurityChangePasswordRequest configures the Security Change Password API request. +type SecurityChangePasswordRequest struct { + Body io.Reader + + Username string + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityChangePasswordRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.change_password") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_security") + 1 + len("user") + 1 + len(r.Username) + 1 + len("_password")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + if r.Username != "" { + path.WriteString("/") + path.WriteString(r.Username) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "username", r.Username) + } + } + path.WriteString("/") + path.WriteString("_password") + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.change_password") + if reader := instrument.RecordRequestBody(ctx, "security.change_password", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.change_password") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityChangePassword) WithContext(v context.Context) func(*SecurityChangePasswordRequest) { + return func(r *SecurityChangePasswordRequest) { + r.ctx = v + } +} + +// WithUsername - the username of the user to change the password for. +func (f SecurityChangePassword) WithUsername(v string) func(*SecurityChangePasswordRequest) { + return func(r *SecurityChangePasswordRequest) { + r.Username = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityChangePassword) WithRefresh(v string) func(*SecurityChangePasswordRequest) { + return func(r *SecurityChangePasswordRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityChangePassword) WithPretty() func(*SecurityChangePasswordRequest) { + return func(r *SecurityChangePasswordRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityChangePassword) WithHuman() func(*SecurityChangePasswordRequest) { + return func(r *SecurityChangePasswordRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityChangePassword) WithErrorTrace() func(*SecurityChangePasswordRequest) { + return func(r *SecurityChangePasswordRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityChangePassword) WithFilterPath(v ...string) func(*SecurityChangePasswordRequest) { + return func(r *SecurityChangePasswordRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityChangePassword) WithHeader(h map[string]string) func(*SecurityChangePasswordRequest) { + return func(r *SecurityChangePasswordRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityChangePassword) WithOpaqueID(s string) func(*SecurityChangePasswordRequest) { + return func(r *SecurityChangePasswordRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_api_key_cache.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_api_key_cache.go new file mode 100644 index 000000000..6e5540236 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_api_key_cache.go @@ -0,0 +1,232 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" +) + +func newSecurityClearAPIKeyCacheFunc(t Transport) SecurityClearAPIKeyCache { + return func(ids []string, o ...func(*SecurityClearAPIKeyCacheRequest)) (*Response, error) { + var r = SecurityClearAPIKeyCacheRequest{Ids: ids} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityClearAPIKeyCache - Clear a subset or all entries from the API key cache. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-api-key-cache.html. +type SecurityClearAPIKeyCache func(ids []string, o ...func(*SecurityClearAPIKeyCacheRequest)) (*Response, error) + +// SecurityClearAPIKeyCacheRequest configures the Security ClearAPI Key Cache API request. +type SecurityClearAPIKeyCacheRequest struct { + Ids []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityClearAPIKeyCacheRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_api_key_cache") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Ids) == 0 { + return nil, errors.New("ids is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_security") + 1 + len("api_key") + 1 + len(strings.Join(r.Ids, ",")) + 1 + len("_clear_cache")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("api_key") + path.WriteString("/") + path.WriteString(strings.Join(r.Ids, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "ids", strings.Join(r.Ids, ",")) + } + path.WriteString("/") + path.WriteString("_clear_cache") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.clear_api_key_cache") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.clear_api_key_cache") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityClearAPIKeyCache) WithContext(v context.Context) func(*SecurityClearAPIKeyCacheRequest) { + return func(r *SecurityClearAPIKeyCacheRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityClearAPIKeyCache) WithPretty() func(*SecurityClearAPIKeyCacheRequest) { + return func(r *SecurityClearAPIKeyCacheRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityClearAPIKeyCache) WithHuman() func(*SecurityClearAPIKeyCacheRequest) { + return func(r *SecurityClearAPIKeyCacheRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityClearAPIKeyCache) WithErrorTrace() func(*SecurityClearAPIKeyCacheRequest) { + return func(r *SecurityClearAPIKeyCacheRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityClearAPIKeyCache) WithFilterPath(v ...string) func(*SecurityClearAPIKeyCacheRequest) { + return func(r *SecurityClearAPIKeyCacheRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityClearAPIKeyCache) WithHeader(h map[string]string) func(*SecurityClearAPIKeyCacheRequest) { + return func(r *SecurityClearAPIKeyCacheRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityClearAPIKeyCache) WithOpaqueID(s string) func(*SecurityClearAPIKeyCacheRequest) { + return func(r *SecurityClearAPIKeyCacheRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_cached_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_cached_privileges.go new file mode 100644 index 000000000..e9e4620af --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_cached_privileges.go @@ -0,0 +1,232 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" +) + +func newSecurityClearCachedPrivilegesFunc(t Transport) SecurityClearCachedPrivileges { + return func(application []string, o ...func(*SecurityClearCachedPrivilegesRequest)) (*Response, error) { + var r = SecurityClearCachedPrivilegesRequest{Application: application} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityClearCachedPrivileges - Evicts application privileges from the native application privileges cache. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-privilege-cache.html. +type SecurityClearCachedPrivileges func(application []string, o ...func(*SecurityClearCachedPrivilegesRequest)) (*Response, error) + +// SecurityClearCachedPrivilegesRequest configures the Security Clear Cached Privileges API request. +type SecurityClearCachedPrivilegesRequest struct { + Application []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityClearCachedPrivilegesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_cached_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Application) == 0 { + return nil, errors.New("application is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_security") + 1 + len("privilege") + 1 + len(strings.Join(r.Application, ",")) + 1 + len("_clear_cache")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("privilege") + path.WriteString("/") + path.WriteString(strings.Join(r.Application, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "application", strings.Join(r.Application, ",")) + } + path.WriteString("/") + path.WriteString("_clear_cache") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.clear_cached_privileges") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.clear_cached_privileges") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityClearCachedPrivileges) WithContext(v context.Context) func(*SecurityClearCachedPrivilegesRequest) { + return func(r *SecurityClearCachedPrivilegesRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityClearCachedPrivileges) WithPretty() func(*SecurityClearCachedPrivilegesRequest) { + return func(r *SecurityClearCachedPrivilegesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityClearCachedPrivileges) WithHuman() func(*SecurityClearCachedPrivilegesRequest) { + return func(r *SecurityClearCachedPrivilegesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityClearCachedPrivileges) WithErrorTrace() func(*SecurityClearCachedPrivilegesRequest) { + return func(r *SecurityClearCachedPrivilegesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityClearCachedPrivileges) WithFilterPath(v ...string) func(*SecurityClearCachedPrivilegesRequest) { + return func(r *SecurityClearCachedPrivilegesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityClearCachedPrivileges) WithHeader(h map[string]string) func(*SecurityClearCachedPrivilegesRequest) { + return func(r *SecurityClearCachedPrivilegesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityClearCachedPrivileges) WithOpaqueID(s string) func(*SecurityClearCachedPrivilegesRequest) { + return func(r *SecurityClearCachedPrivilegesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_cached_realms.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_cached_realms.go new file mode 100644 index 000000000..b08ea2131 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_cached_realms.go @@ -0,0 +1,245 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" +) + +func newSecurityClearCachedRealmsFunc(t Transport) SecurityClearCachedRealms { + return func(realms []string, o ...func(*SecurityClearCachedRealmsRequest)) (*Response, error) { + var r = SecurityClearCachedRealmsRequest{Realms: realms} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityClearCachedRealms - Evicts users from the user cache. Can completely clear the cache or evict specific users. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-cache.html. +type SecurityClearCachedRealms func(realms []string, o ...func(*SecurityClearCachedRealmsRequest)) (*Response, error) + +// SecurityClearCachedRealmsRequest configures the Security Clear Cached Realms API request. +type SecurityClearCachedRealmsRequest struct { + Realms []string + + Usernames []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityClearCachedRealmsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_cached_realms") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Realms) == 0 { + return nil, errors.New("realms is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_security") + 1 + len("realm") + 1 + len(strings.Join(r.Realms, ",")) + 1 + len("_clear_cache")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("realm") + path.WriteString("/") + path.WriteString(strings.Join(r.Realms, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "realms", strings.Join(r.Realms, ",")) + } + path.WriteString("/") + path.WriteString("_clear_cache") + + params = make(map[string]string) + + if len(r.Usernames) > 0 { + params["usernames"] = strings.Join(r.Usernames, ",") + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.clear_cached_realms") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.clear_cached_realms") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityClearCachedRealms) WithContext(v context.Context) func(*SecurityClearCachedRealmsRequest) { + return func(r *SecurityClearCachedRealmsRequest) { + r.ctx = v + } +} + +// WithUsernames - comma-separated list of usernames to clear from the cache. +func (f SecurityClearCachedRealms) WithUsernames(v ...string) func(*SecurityClearCachedRealmsRequest) { + return func(r *SecurityClearCachedRealmsRequest) { + r.Usernames = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityClearCachedRealms) WithPretty() func(*SecurityClearCachedRealmsRequest) { + return func(r *SecurityClearCachedRealmsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityClearCachedRealms) WithHuman() func(*SecurityClearCachedRealmsRequest) { + return func(r *SecurityClearCachedRealmsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityClearCachedRealms) WithErrorTrace() func(*SecurityClearCachedRealmsRequest) { + return func(r *SecurityClearCachedRealmsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityClearCachedRealms) WithFilterPath(v ...string) func(*SecurityClearCachedRealmsRequest) { + return func(r *SecurityClearCachedRealmsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityClearCachedRealms) WithHeader(h map[string]string) func(*SecurityClearCachedRealmsRequest) { + return func(r *SecurityClearCachedRealmsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityClearCachedRealms) WithOpaqueID(s string) func(*SecurityClearCachedRealmsRequest) { + return func(r *SecurityClearCachedRealmsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_cached_roles.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_cached_roles.go new file mode 100644 index 000000000..333644ef2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_cached_roles.go @@ -0,0 +1,232 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" +) + +func newSecurityClearCachedRolesFunc(t Transport) SecurityClearCachedRoles { + return func(name []string, o ...func(*SecurityClearCachedRolesRequest)) (*Response, error) { + var r = SecurityClearCachedRolesRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityClearCachedRoles - Evicts roles from the native role cache. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-role-cache.html. +type SecurityClearCachedRoles func(name []string, o ...func(*SecurityClearCachedRolesRequest)) (*Response, error) + +// SecurityClearCachedRolesRequest configures the Security Clear Cached Roles API request. +type SecurityClearCachedRolesRequest struct { + Name []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityClearCachedRolesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_cached_roles") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_security") + 1 + len("role") + 1 + len(strings.Join(r.Name, ",")) + 1 + len("_clear_cache")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + path.WriteString("/") + path.WriteString("_clear_cache") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.clear_cached_roles") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.clear_cached_roles") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityClearCachedRoles) WithContext(v context.Context) func(*SecurityClearCachedRolesRequest) { + return func(r *SecurityClearCachedRolesRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityClearCachedRoles) WithPretty() func(*SecurityClearCachedRolesRequest) { + return func(r *SecurityClearCachedRolesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityClearCachedRoles) WithHuman() func(*SecurityClearCachedRolesRequest) { + return func(r *SecurityClearCachedRolesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityClearCachedRoles) WithErrorTrace() func(*SecurityClearCachedRolesRequest) { + return func(r *SecurityClearCachedRolesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityClearCachedRoles) WithFilterPath(v ...string) func(*SecurityClearCachedRolesRequest) { + return func(r *SecurityClearCachedRolesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityClearCachedRoles) WithHeader(h map[string]string) func(*SecurityClearCachedRolesRequest) { + return func(r *SecurityClearCachedRolesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityClearCachedRoles) WithOpaqueID(s string) func(*SecurityClearCachedRolesRequest) { + return func(r *SecurityClearCachedRolesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_cached_service_tokens.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_cached_service_tokens.go new file mode 100644 index 000000000..c1f12de94 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.clear_cached_service_tokens.go @@ -0,0 +1,248 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" +) + +func newSecurityClearCachedServiceTokensFunc(t Transport) SecurityClearCachedServiceTokens { + return func(name []string, namespace string, service string, o ...func(*SecurityClearCachedServiceTokensRequest)) (*Response, error) { + var r = SecurityClearCachedServiceTokensRequest{Name: name, Namespace: namespace, Service: service} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityClearCachedServiceTokens - Evicts tokens from the service account token caches. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-service-token-caches.html. +type SecurityClearCachedServiceTokens func(name []string, namespace string, service string, o ...func(*SecurityClearCachedServiceTokensRequest)) (*Response, error) + +// SecurityClearCachedServiceTokensRequest configures the Security Clear Cached Service Tokens API request. +type SecurityClearCachedServiceTokensRequest struct { + Name []string + Namespace string + Service string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityClearCachedServiceTokensRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_cached_service_tokens") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_security") + 1 + len("service") + 1 + len(r.Namespace) + 1 + len(r.Service) + 1 + len("credential") + 1 + len("token") + 1 + len(strings.Join(r.Name, ",")) + 1 + len("_clear_cache")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("service") + path.WriteString("/") + path.WriteString(r.Namespace) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "namespace", r.Namespace) + } + path.WriteString("/") + path.WriteString(r.Service) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "service", r.Service) + } + path.WriteString("/") + path.WriteString("credential") + path.WriteString("/") + path.WriteString("token") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + path.WriteString("/") + path.WriteString("_clear_cache") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.clear_cached_service_tokens") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.clear_cached_service_tokens") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityClearCachedServiceTokens) WithContext(v context.Context) func(*SecurityClearCachedServiceTokensRequest) { + return func(r *SecurityClearCachedServiceTokensRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityClearCachedServiceTokens) WithPretty() func(*SecurityClearCachedServiceTokensRequest) { + return func(r *SecurityClearCachedServiceTokensRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityClearCachedServiceTokens) WithHuman() func(*SecurityClearCachedServiceTokensRequest) { + return func(r *SecurityClearCachedServiceTokensRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityClearCachedServiceTokens) WithErrorTrace() func(*SecurityClearCachedServiceTokensRequest) { + return func(r *SecurityClearCachedServiceTokensRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityClearCachedServiceTokens) WithFilterPath(v ...string) func(*SecurityClearCachedServiceTokensRequest) { + return func(r *SecurityClearCachedServiceTokensRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityClearCachedServiceTokens) WithHeader(h map[string]string) func(*SecurityClearCachedServiceTokensRequest) { + return func(r *SecurityClearCachedServiceTokensRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityClearCachedServiceTokens) WithOpaqueID(s string) func(*SecurityClearCachedServiceTokensRequest) { + return func(r *SecurityClearCachedServiceTokensRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.create_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.create_api_key.go new file mode 100644 index 000000000..4ec392edd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.create_api_key.go @@ -0,0 +1,238 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityCreateAPIKeyFunc(t Transport) SecurityCreateAPIKey { + return func(body io.Reader, o ...func(*SecurityCreateAPIKeyRequest)) (*Response, error) { + var r = SecurityCreateAPIKeyRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityCreateAPIKey - Creates an API key for access without requiring basic authentication. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html. +type SecurityCreateAPIKey func(body io.Reader, o ...func(*SecurityCreateAPIKeyRequest)) (*Response, error) + +// SecurityCreateAPIKeyRequest configures the Security CreateAPI Key API request. +type SecurityCreateAPIKeyRequest struct { + Body io.Reader + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityCreateAPIKeyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.create_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + len("/_security/api_key")) + path.WriteString("http://") + path.WriteString("/_security/api_key") + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.create_api_key") + if reader := instrument.RecordRequestBody(ctx, "security.create_api_key", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.create_api_key") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityCreateAPIKey) WithContext(v context.Context) func(*SecurityCreateAPIKeyRequest) { + return func(r *SecurityCreateAPIKeyRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityCreateAPIKey) WithRefresh(v string) func(*SecurityCreateAPIKeyRequest) { + return func(r *SecurityCreateAPIKeyRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityCreateAPIKey) WithPretty() func(*SecurityCreateAPIKeyRequest) { + return func(r *SecurityCreateAPIKeyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityCreateAPIKey) WithHuman() func(*SecurityCreateAPIKeyRequest) { + return func(r *SecurityCreateAPIKeyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityCreateAPIKey) WithErrorTrace() func(*SecurityCreateAPIKeyRequest) { + return func(r *SecurityCreateAPIKeyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityCreateAPIKey) WithFilterPath(v ...string) func(*SecurityCreateAPIKeyRequest) { + return func(r *SecurityCreateAPIKeyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityCreateAPIKey) WithHeader(h map[string]string) func(*SecurityCreateAPIKeyRequest) { + return func(r *SecurityCreateAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityCreateAPIKey) WithOpaqueID(s string) func(*SecurityCreateAPIKeyRequest) { + return func(r *SecurityCreateAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.create_cross_cluster_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.create_cross_cluster_api_key.go new file mode 100644 index 000000000..e0bf480f4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.create_cross_cluster_api_key.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityCreateCrossClusterAPIKeyFunc(t Transport) SecurityCreateCrossClusterAPIKey { + return func(body io.Reader, o ...func(*SecurityCreateCrossClusterAPIKeyRequest)) (*Response, error) { + var r = SecurityCreateCrossClusterAPIKeyRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityCreateCrossClusterAPIKey - Creates a cross-cluster API key for API key based remote cluster access. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-cross-cluster-api-key.html. +type SecurityCreateCrossClusterAPIKey func(body io.Reader, o ...func(*SecurityCreateCrossClusterAPIKeyRequest)) (*Response, error) + +// SecurityCreateCrossClusterAPIKeyRequest configures the Security Create Cross ClusterAPI Key API request. +type SecurityCreateCrossClusterAPIKeyRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityCreateCrossClusterAPIKeyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.create_cross_cluster_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/cross_cluster/api_key")) + path.WriteString("http://") + path.WriteString("/_security/cross_cluster/api_key") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.create_cross_cluster_api_key") + if reader := instrument.RecordRequestBody(ctx, "security.create_cross_cluster_api_key", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.create_cross_cluster_api_key") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityCreateCrossClusterAPIKey) WithContext(v context.Context) func(*SecurityCreateCrossClusterAPIKeyRequest) { + return func(r *SecurityCreateCrossClusterAPIKeyRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityCreateCrossClusterAPIKey) WithPretty() func(*SecurityCreateCrossClusterAPIKeyRequest) { + return func(r *SecurityCreateCrossClusterAPIKeyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityCreateCrossClusterAPIKey) WithHuman() func(*SecurityCreateCrossClusterAPIKeyRequest) { + return func(r *SecurityCreateCrossClusterAPIKeyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityCreateCrossClusterAPIKey) WithErrorTrace() func(*SecurityCreateCrossClusterAPIKeyRequest) { + return func(r *SecurityCreateCrossClusterAPIKeyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityCreateCrossClusterAPIKey) WithFilterPath(v ...string) func(*SecurityCreateCrossClusterAPIKeyRequest) { + return func(r *SecurityCreateCrossClusterAPIKeyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityCreateCrossClusterAPIKey) WithHeader(h map[string]string) func(*SecurityCreateCrossClusterAPIKeyRequest) { + return func(r *SecurityCreateCrossClusterAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityCreateCrossClusterAPIKey) WithOpaqueID(s string) func(*SecurityCreateCrossClusterAPIKeyRequest) { + return func(r *SecurityCreateCrossClusterAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.create_service_token.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.create_service_token.go new file mode 100644 index 000000000..b40a2fd93 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.create_service_token.go @@ -0,0 +1,267 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityCreateServiceTokenFunc(t Transport) SecurityCreateServiceToken { + return func(namespace string, service string, o ...func(*SecurityCreateServiceTokenRequest)) (*Response, error) { + var r = SecurityCreateServiceTokenRequest{Namespace: namespace, Service: service} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityCreateServiceToken - Creates a service account token for access without requiring basic authentication. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-service-token.html. +type SecurityCreateServiceToken func(namespace string, service string, o ...func(*SecurityCreateServiceTokenRequest)) (*Response, error) + +// SecurityCreateServiceTokenRequest configures the Security Create Service Token API request. +type SecurityCreateServiceTokenRequest struct { + Name string + Namespace string + Service string + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityCreateServiceTokenRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.create_service_token") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + if r.Name != "" { + method = "POST" + } else { + method = "PUT" + } + + path.Grow(7 + 1 + len("_security") + 1 + len("service") + 1 + len(r.Namespace) + 1 + len(r.Service) + 1 + len("credential") + 1 + len("token") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("service") + path.WriteString("/") + path.WriteString(r.Namespace) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "namespace", r.Namespace) + } + path.WriteString("/") + path.WriteString(r.Service) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "service", r.Service) + } + path.WriteString("/") + path.WriteString("credential") + path.WriteString("/") + path.WriteString("token") + if r.Name != "" { + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + } + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.create_service_token") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.create_service_token") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityCreateServiceToken) WithContext(v context.Context) func(*SecurityCreateServiceTokenRequest) { + return func(r *SecurityCreateServiceTokenRequest) { + r.ctx = v + } +} + +// WithName - an identifier for the token name. +func (f SecurityCreateServiceToken) WithName(v string) func(*SecurityCreateServiceTokenRequest) { + return func(r *SecurityCreateServiceTokenRequest) { + r.Name = v + } +} + +// WithRefresh - if `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityCreateServiceToken) WithRefresh(v string) func(*SecurityCreateServiceTokenRequest) { + return func(r *SecurityCreateServiceTokenRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityCreateServiceToken) WithPretty() func(*SecurityCreateServiceTokenRequest) { + return func(r *SecurityCreateServiceTokenRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityCreateServiceToken) WithHuman() func(*SecurityCreateServiceTokenRequest) { + return func(r *SecurityCreateServiceTokenRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityCreateServiceToken) WithErrorTrace() func(*SecurityCreateServiceTokenRequest) { + return func(r *SecurityCreateServiceTokenRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityCreateServiceToken) WithFilterPath(v ...string) func(*SecurityCreateServiceTokenRequest) { + return func(r *SecurityCreateServiceTokenRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityCreateServiceToken) WithHeader(h map[string]string) func(*SecurityCreateServiceTokenRequest) { + return func(r *SecurityCreateServiceTokenRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityCreateServiceToken) WithOpaqueID(s string) func(*SecurityCreateServiceTokenRequest) { + return func(r *SecurityCreateServiceTokenRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delegate_pki.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delegate_pki.go new file mode 100644 index 000000000..080bfdc0a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delegate_pki.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityDelegatePkiFunc(t Transport) SecurityDelegatePki { + return func(body io.Reader, o ...func(*SecurityDelegatePkiRequest)) (*Response, error) { + var r = SecurityDelegatePkiRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityDelegatePki - Delegate PKI authentication. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delegate-pki-authentication.html. +type SecurityDelegatePki func(body io.Reader, o ...func(*SecurityDelegatePkiRequest)) (*Response, error) + +// SecurityDelegatePkiRequest configures the Security Delegate Pki API request. +type SecurityDelegatePkiRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityDelegatePkiRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delegate_pki") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/delegate_pki")) + path.WriteString("http://") + path.WriteString("/_security/delegate_pki") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.delegate_pki") + if reader := instrument.RecordRequestBody(ctx, "security.delegate_pki", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delegate_pki") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityDelegatePki) WithContext(v context.Context) func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityDelegatePki) WithPretty() func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityDelegatePki) WithHuman() func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityDelegatePki) WithErrorTrace() func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityDelegatePki) WithFilterPath(v ...string) func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityDelegatePki) WithHeader(h map[string]string) func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityDelegatePki) WithOpaqueID(s string) func(*SecurityDelegatePkiRequest) { + return func(r *SecurityDelegatePkiRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_privileges.go new file mode 100644 index 000000000..34b51523c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_privileges.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityDeletePrivilegesFunc(t Transport) SecurityDeletePrivileges { + return func(name string, application string, o ...func(*SecurityDeletePrivilegesRequest)) (*Response, error) { + var r = SecurityDeletePrivilegesRequest{Name: name, Application: application} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityDeletePrivileges - Removes application privileges. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-privilege.html. +type SecurityDeletePrivileges func(name string, application string, o ...func(*SecurityDeletePrivilegesRequest)) (*Response, error) + +// SecurityDeletePrivilegesRequest configures the Security Delete Privileges API request. +type SecurityDeletePrivilegesRequest struct { + Application string + Name string + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityDeletePrivilegesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_security") + 1 + len("privilege") + 1 + len(r.Application) + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("privilege") + path.WriteString("/") + path.WriteString(r.Application) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "application", r.Application) + } + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.delete_privileges") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delete_privileges") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityDeletePrivileges) WithContext(v context.Context) func(*SecurityDeletePrivilegesRequest) { + return func(r *SecurityDeletePrivilegesRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityDeletePrivileges) WithRefresh(v string) func(*SecurityDeletePrivilegesRequest) { + return func(r *SecurityDeletePrivilegesRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityDeletePrivileges) WithPretty() func(*SecurityDeletePrivilegesRequest) { + return func(r *SecurityDeletePrivilegesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityDeletePrivileges) WithHuman() func(*SecurityDeletePrivilegesRequest) { + return func(r *SecurityDeletePrivilegesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityDeletePrivileges) WithErrorTrace() func(*SecurityDeletePrivilegesRequest) { + return func(r *SecurityDeletePrivilegesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityDeletePrivileges) WithFilterPath(v ...string) func(*SecurityDeletePrivilegesRequest) { + return func(r *SecurityDeletePrivilegesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityDeletePrivileges) WithHeader(h map[string]string) func(*SecurityDeletePrivilegesRequest) { + return func(r *SecurityDeletePrivilegesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityDeletePrivileges) WithOpaqueID(s string) func(*SecurityDeletePrivilegesRequest) { + return func(r *SecurityDeletePrivilegesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_role.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_role.go new file mode 100644 index 000000000..c5a9bdeca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_role.go @@ -0,0 +1,238 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityDeleteRoleFunc(t Transport) SecurityDeleteRole { + return func(name string, o ...func(*SecurityDeleteRoleRequest)) (*Response, error) { + var r = SecurityDeleteRoleRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityDeleteRole - Removes roles in the native realm. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role.html. +type SecurityDeleteRole func(name string, o ...func(*SecurityDeleteRoleRequest)) (*Response, error) + +// SecurityDeleteRoleRequest configures the Security Delete Role API request. +type SecurityDeleteRoleRequest struct { + Name string + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityDeleteRoleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_role") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_security") + 1 + len("role") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.delete_role") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delete_role") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityDeleteRole) WithContext(v context.Context) func(*SecurityDeleteRoleRequest) { + return func(r *SecurityDeleteRoleRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityDeleteRole) WithRefresh(v string) func(*SecurityDeleteRoleRequest) { + return func(r *SecurityDeleteRoleRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityDeleteRole) WithPretty() func(*SecurityDeleteRoleRequest) { + return func(r *SecurityDeleteRoleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityDeleteRole) WithHuman() func(*SecurityDeleteRoleRequest) { + return func(r *SecurityDeleteRoleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityDeleteRole) WithErrorTrace() func(*SecurityDeleteRoleRequest) { + return func(r *SecurityDeleteRoleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityDeleteRole) WithFilterPath(v ...string) func(*SecurityDeleteRoleRequest) { + return func(r *SecurityDeleteRoleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityDeleteRole) WithHeader(h map[string]string) func(*SecurityDeleteRoleRequest) { + return func(r *SecurityDeleteRoleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityDeleteRole) WithOpaqueID(s string) func(*SecurityDeleteRoleRequest) { + return func(r *SecurityDeleteRoleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_role_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_role_mapping.go new file mode 100644 index 000000000..13f000a93 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_role_mapping.go @@ -0,0 +1,238 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityDeleteRoleMappingFunc(t Transport) SecurityDeleteRoleMapping { + return func(name string, o ...func(*SecurityDeleteRoleMappingRequest)) (*Response, error) { + var r = SecurityDeleteRoleMappingRequest{Name: name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityDeleteRoleMapping - Removes role mappings. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role-mapping.html. +type SecurityDeleteRoleMapping func(name string, o ...func(*SecurityDeleteRoleMappingRequest)) (*Response, error) + +// SecurityDeleteRoleMappingRequest configures the Security Delete Role Mapping API request. +type SecurityDeleteRoleMappingRequest struct { + Name string + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityDeleteRoleMappingRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_role_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_security") + 1 + len("role_mapping") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role_mapping") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.delete_role_mapping") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delete_role_mapping") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityDeleteRoleMapping) WithContext(v context.Context) func(*SecurityDeleteRoleMappingRequest) { + return func(r *SecurityDeleteRoleMappingRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityDeleteRoleMapping) WithRefresh(v string) func(*SecurityDeleteRoleMappingRequest) { + return func(r *SecurityDeleteRoleMappingRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityDeleteRoleMapping) WithPretty() func(*SecurityDeleteRoleMappingRequest) { + return func(r *SecurityDeleteRoleMappingRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityDeleteRoleMapping) WithHuman() func(*SecurityDeleteRoleMappingRequest) { + return func(r *SecurityDeleteRoleMappingRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityDeleteRoleMapping) WithErrorTrace() func(*SecurityDeleteRoleMappingRequest) { + return func(r *SecurityDeleteRoleMappingRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityDeleteRoleMapping) WithFilterPath(v ...string) func(*SecurityDeleteRoleMappingRequest) { + return func(r *SecurityDeleteRoleMappingRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityDeleteRoleMapping) WithHeader(h map[string]string) func(*SecurityDeleteRoleMappingRequest) { + return func(r *SecurityDeleteRoleMappingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityDeleteRoleMapping) WithOpaqueID(s string) func(*SecurityDeleteRoleMappingRequest) { + return func(r *SecurityDeleteRoleMappingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_service_token.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_service_token.go new file mode 100644 index 000000000..b08d37d5f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_service_token.go @@ -0,0 +1,254 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityDeleteServiceTokenFunc(t Transport) SecurityDeleteServiceToken { + return func(name string, namespace string, service string, o ...func(*SecurityDeleteServiceTokenRequest)) (*Response, error) { + var r = SecurityDeleteServiceTokenRequest{Name: name, Namespace: namespace, Service: service} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityDeleteServiceToken - Deletes a service account token. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-service-token.html. +type SecurityDeleteServiceToken func(name string, namespace string, service string, o ...func(*SecurityDeleteServiceTokenRequest)) (*Response, error) + +// SecurityDeleteServiceTokenRequest configures the Security Delete Service Token API request. +type SecurityDeleteServiceTokenRequest struct { + Name string + Namespace string + Service string + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityDeleteServiceTokenRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_service_token") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_security") + 1 + len("service") + 1 + len(r.Namespace) + 1 + len(r.Service) + 1 + len("credential") + 1 + len("token") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("service") + path.WriteString("/") + path.WriteString(r.Namespace) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "namespace", r.Namespace) + } + path.WriteString("/") + path.WriteString(r.Service) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "service", r.Service) + } + path.WriteString("/") + path.WriteString("credential") + path.WriteString("/") + path.WriteString("token") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.delete_service_token") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delete_service_token") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityDeleteServiceToken) WithContext(v context.Context) func(*SecurityDeleteServiceTokenRequest) { + return func(r *SecurityDeleteServiceTokenRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityDeleteServiceToken) WithRefresh(v string) func(*SecurityDeleteServiceTokenRequest) { + return func(r *SecurityDeleteServiceTokenRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityDeleteServiceToken) WithPretty() func(*SecurityDeleteServiceTokenRequest) { + return func(r *SecurityDeleteServiceTokenRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityDeleteServiceToken) WithHuman() func(*SecurityDeleteServiceTokenRequest) { + return func(r *SecurityDeleteServiceTokenRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityDeleteServiceToken) WithErrorTrace() func(*SecurityDeleteServiceTokenRequest) { + return func(r *SecurityDeleteServiceTokenRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityDeleteServiceToken) WithFilterPath(v ...string) func(*SecurityDeleteServiceTokenRequest) { + return func(r *SecurityDeleteServiceTokenRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityDeleteServiceToken) WithHeader(h map[string]string) func(*SecurityDeleteServiceTokenRequest) { + return func(r *SecurityDeleteServiceTokenRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityDeleteServiceToken) WithOpaqueID(s string) func(*SecurityDeleteServiceTokenRequest) { + return func(r *SecurityDeleteServiceTokenRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_user.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_user.go new file mode 100644 index 000000000..3b61f6661 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.delete_user.go @@ -0,0 +1,238 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityDeleteUserFunc(t Transport) SecurityDeleteUser { + return func(username string, o ...func(*SecurityDeleteUserRequest)) (*Response, error) { + var r = SecurityDeleteUserRequest{Username: username} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityDeleteUser - Deletes users from the native realm. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html. +type SecurityDeleteUser func(username string, o ...func(*SecurityDeleteUserRequest)) (*Response, error) + +// SecurityDeleteUserRequest configures the Security Delete User API request. +type SecurityDeleteUserRequest struct { + Username string + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityDeleteUserRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_security") + 1 + len("user") + 1 + len(r.Username)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + path.WriteString("/") + path.WriteString(r.Username) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "username", r.Username) + } + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.delete_user") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delete_user") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityDeleteUser) WithContext(v context.Context) func(*SecurityDeleteUserRequest) { + return func(r *SecurityDeleteUserRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityDeleteUser) WithRefresh(v string) func(*SecurityDeleteUserRequest) { + return func(r *SecurityDeleteUserRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityDeleteUser) WithPretty() func(*SecurityDeleteUserRequest) { + return func(r *SecurityDeleteUserRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityDeleteUser) WithHuman() func(*SecurityDeleteUserRequest) { + return func(r *SecurityDeleteUserRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityDeleteUser) WithErrorTrace() func(*SecurityDeleteUserRequest) { + return func(r *SecurityDeleteUserRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityDeleteUser) WithFilterPath(v ...string) func(*SecurityDeleteUserRequest) { + return func(r *SecurityDeleteUserRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityDeleteUser) WithHeader(h map[string]string) func(*SecurityDeleteUserRequest) { + return func(r *SecurityDeleteUserRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityDeleteUser) WithOpaqueID(s string) func(*SecurityDeleteUserRequest) { + return func(r *SecurityDeleteUserRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.disable_user.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.disable_user.go new file mode 100644 index 000000000..27d912d5c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.disable_user.go @@ -0,0 +1,240 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityDisableUserFunc(t Transport) SecurityDisableUser { + return func(username string, o ...func(*SecurityDisableUserRequest)) (*Response, error) { + var r = SecurityDisableUserRequest{Username: username} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityDisableUser - Disables users in the native realm. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user.html. +type SecurityDisableUser func(username string, o ...func(*SecurityDisableUserRequest)) (*Response, error) + +// SecurityDisableUserRequest configures the Security Disable User API request. +type SecurityDisableUserRequest struct { + Username string + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityDisableUserRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.disable_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_security") + 1 + len("user") + 1 + len(r.Username) + 1 + len("_disable")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + path.WriteString("/") + path.WriteString(r.Username) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "username", r.Username) + } + path.WriteString("/") + path.WriteString("_disable") + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.disable_user") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.disable_user") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityDisableUser) WithContext(v context.Context) func(*SecurityDisableUserRequest) { + return func(r *SecurityDisableUserRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityDisableUser) WithRefresh(v string) func(*SecurityDisableUserRequest) { + return func(r *SecurityDisableUserRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityDisableUser) WithPretty() func(*SecurityDisableUserRequest) { + return func(r *SecurityDisableUserRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityDisableUser) WithHuman() func(*SecurityDisableUserRequest) { + return func(r *SecurityDisableUserRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityDisableUser) WithErrorTrace() func(*SecurityDisableUserRequest) { + return func(r *SecurityDisableUserRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityDisableUser) WithFilterPath(v ...string) func(*SecurityDisableUserRequest) { + return func(r *SecurityDisableUserRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityDisableUser) WithHeader(h map[string]string) func(*SecurityDisableUserRequest) { + return func(r *SecurityDisableUserRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityDisableUser) WithOpaqueID(s string) func(*SecurityDisableUserRequest) { + return func(r *SecurityDisableUserRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.disable_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.disable_user_profile.go new file mode 100644 index 000000000..c1c23b507 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.disable_user_profile.go @@ -0,0 +1,240 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityDisableUserProfileFunc(t Transport) SecurityDisableUserProfile { + return func(uid string, o ...func(*SecurityDisableUserProfileRequest)) (*Response, error) { + var r = SecurityDisableUserProfileRequest{UID: uid} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityDisableUserProfile - Disables a user profile so it's not visible in user profile searches. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-disable-user-profile.html. +type SecurityDisableUserProfile func(uid string, o ...func(*SecurityDisableUserProfileRequest)) (*Response, error) + +// SecurityDisableUserProfileRequest configures the Security Disable User Profile API request. +type SecurityDisableUserProfileRequest struct { + UID string + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityDisableUserProfileRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.disable_user_profile") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_security") + 1 + len("profile") + 1 + len(r.UID) + 1 + len("_disable")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("profile") + path.WriteString("/") + path.WriteString(r.UID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "uid", r.UID) + } + path.WriteString("/") + path.WriteString("_disable") + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.disable_user_profile") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.disable_user_profile") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityDisableUserProfile) WithContext(v context.Context) func(*SecurityDisableUserProfileRequest) { + return func(r *SecurityDisableUserProfileRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityDisableUserProfile) WithRefresh(v string) func(*SecurityDisableUserProfileRequest) { + return func(r *SecurityDisableUserProfileRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityDisableUserProfile) WithPretty() func(*SecurityDisableUserProfileRequest) { + return func(r *SecurityDisableUserProfileRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityDisableUserProfile) WithHuman() func(*SecurityDisableUserProfileRequest) { + return func(r *SecurityDisableUserProfileRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityDisableUserProfile) WithErrorTrace() func(*SecurityDisableUserProfileRequest) { + return func(r *SecurityDisableUserProfileRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityDisableUserProfile) WithFilterPath(v ...string) func(*SecurityDisableUserProfileRequest) { + return func(r *SecurityDisableUserProfileRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityDisableUserProfile) WithHeader(h map[string]string) func(*SecurityDisableUserProfileRequest) { + return func(r *SecurityDisableUserProfileRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityDisableUserProfile) WithOpaqueID(s string) func(*SecurityDisableUserProfileRequest) { + return func(r *SecurityDisableUserProfileRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.enable_user.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.enable_user.go new file mode 100644 index 000000000..11052e0ab --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.enable_user.go @@ -0,0 +1,240 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityEnableUserFunc(t Transport) SecurityEnableUser { + return func(username string, o ...func(*SecurityEnableUserRequest)) (*Response, error) { + var r = SecurityEnableUserRequest{Username: username} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityEnableUser - Enables users in the native realm. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user.html. +type SecurityEnableUser func(username string, o ...func(*SecurityEnableUserRequest)) (*Response, error) + +// SecurityEnableUserRequest configures the Security Enable User API request. +type SecurityEnableUserRequest struct { + Username string + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityEnableUserRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.enable_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_security") + 1 + len("user") + 1 + len(r.Username) + 1 + len("_enable")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + path.WriteString("/") + path.WriteString(r.Username) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "username", r.Username) + } + path.WriteString("/") + path.WriteString("_enable") + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.enable_user") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.enable_user") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityEnableUser) WithContext(v context.Context) func(*SecurityEnableUserRequest) { + return func(r *SecurityEnableUserRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityEnableUser) WithRefresh(v string) func(*SecurityEnableUserRequest) { + return func(r *SecurityEnableUserRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityEnableUser) WithPretty() func(*SecurityEnableUserRequest) { + return func(r *SecurityEnableUserRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityEnableUser) WithHuman() func(*SecurityEnableUserRequest) { + return func(r *SecurityEnableUserRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityEnableUser) WithErrorTrace() func(*SecurityEnableUserRequest) { + return func(r *SecurityEnableUserRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityEnableUser) WithFilterPath(v ...string) func(*SecurityEnableUserRequest) { + return func(r *SecurityEnableUserRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityEnableUser) WithHeader(h map[string]string) func(*SecurityEnableUserRequest) { + return func(r *SecurityEnableUserRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityEnableUser) WithOpaqueID(s string) func(*SecurityEnableUserRequest) { + return func(r *SecurityEnableUserRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.enable_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.enable_user_profile.go new file mode 100644 index 000000000..440498788 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.enable_user_profile.go @@ -0,0 +1,240 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityEnableUserProfileFunc(t Transport) SecurityEnableUserProfile { + return func(uid string, o ...func(*SecurityEnableUserProfileRequest)) (*Response, error) { + var r = SecurityEnableUserProfileRequest{UID: uid} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityEnableUserProfile - Enables a user profile so it's visible in user profile searches. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-enable-user-profile.html. +type SecurityEnableUserProfile func(uid string, o ...func(*SecurityEnableUserProfileRequest)) (*Response, error) + +// SecurityEnableUserProfileRequest configures the Security Enable User Profile API request. +type SecurityEnableUserProfileRequest struct { + UID string + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityEnableUserProfileRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.enable_user_profile") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_security") + 1 + len("profile") + 1 + len(r.UID) + 1 + len("_enable")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("profile") + path.WriteString("/") + path.WriteString(r.UID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "uid", r.UID) + } + path.WriteString("/") + path.WriteString("_enable") + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.enable_user_profile") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.enable_user_profile") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityEnableUserProfile) WithContext(v context.Context) func(*SecurityEnableUserProfileRequest) { + return func(r *SecurityEnableUserProfileRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityEnableUserProfile) WithRefresh(v string) func(*SecurityEnableUserProfileRequest) { + return func(r *SecurityEnableUserProfileRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityEnableUserProfile) WithPretty() func(*SecurityEnableUserProfileRequest) { + return func(r *SecurityEnableUserProfileRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityEnableUserProfile) WithHuman() func(*SecurityEnableUserProfileRequest) { + return func(r *SecurityEnableUserProfileRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityEnableUserProfile) WithErrorTrace() func(*SecurityEnableUserProfileRequest) { + return func(r *SecurityEnableUserProfileRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityEnableUserProfile) WithFilterPath(v ...string) func(*SecurityEnableUserProfileRequest) { + return func(r *SecurityEnableUserProfileRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityEnableUserProfile) WithHeader(h map[string]string) func(*SecurityEnableUserProfileRequest) { + return func(r *SecurityEnableUserProfileRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityEnableUserProfile) WithOpaqueID(s string) func(*SecurityEnableUserProfileRequest) { + return func(r *SecurityEnableUserProfileRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.enroll_kibana.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.enroll_kibana.go new file mode 100644 index 000000000..06ed386d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.enroll_kibana.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityEnrollKibanaFunc(t Transport) SecurityEnrollKibana { + return func(o ...func(*SecurityEnrollKibanaRequest)) (*Response, error) { + var r = SecurityEnrollKibanaRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityEnrollKibana - Allows a kibana instance to configure itself to communicate with a secured elasticsearch cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-kibana-enrollment.html. +type SecurityEnrollKibana func(o ...func(*SecurityEnrollKibanaRequest)) (*Response, error) + +// SecurityEnrollKibanaRequest configures the Security Enroll Kibana API request. +type SecurityEnrollKibanaRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityEnrollKibanaRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.enroll_kibana") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_security/enroll/kibana")) + path.WriteString("http://") + path.WriteString("/_security/enroll/kibana") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.enroll_kibana") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.enroll_kibana") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityEnrollKibana) WithContext(v context.Context) func(*SecurityEnrollKibanaRequest) { + return func(r *SecurityEnrollKibanaRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityEnrollKibana) WithPretty() func(*SecurityEnrollKibanaRequest) { + return func(r *SecurityEnrollKibanaRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityEnrollKibana) WithHuman() func(*SecurityEnrollKibanaRequest) { + return func(r *SecurityEnrollKibanaRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityEnrollKibana) WithErrorTrace() func(*SecurityEnrollKibanaRequest) { + return func(r *SecurityEnrollKibanaRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityEnrollKibana) WithFilterPath(v ...string) func(*SecurityEnrollKibanaRequest) { + return func(r *SecurityEnrollKibanaRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityEnrollKibana) WithHeader(h map[string]string) func(*SecurityEnrollKibanaRequest) { + return func(r *SecurityEnrollKibanaRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityEnrollKibana) WithOpaqueID(s string) func(*SecurityEnrollKibanaRequest) { + return func(r *SecurityEnrollKibanaRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.enroll_node.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.enroll_node.go new file mode 100644 index 000000000..e7b9951e7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.enroll_node.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityEnrollNodeFunc(t Transport) SecurityEnrollNode { + return func(o ...func(*SecurityEnrollNodeRequest)) (*Response, error) { + var r = SecurityEnrollNodeRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityEnrollNode - Allows a new node to enroll to an existing cluster with security enabled. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-node-enrollment.html. +type SecurityEnrollNode func(o ...func(*SecurityEnrollNodeRequest)) (*Response, error) + +// SecurityEnrollNodeRequest configures the Security Enroll Node API request. +type SecurityEnrollNodeRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityEnrollNodeRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.enroll_node") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_security/enroll/node")) + path.WriteString("http://") + path.WriteString("/_security/enroll/node") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.enroll_node") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.enroll_node") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityEnrollNode) WithContext(v context.Context) func(*SecurityEnrollNodeRequest) { + return func(r *SecurityEnrollNodeRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityEnrollNode) WithPretty() func(*SecurityEnrollNodeRequest) { + return func(r *SecurityEnrollNodeRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityEnrollNode) WithHuman() func(*SecurityEnrollNodeRequest) { + return func(r *SecurityEnrollNodeRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityEnrollNode) WithErrorTrace() func(*SecurityEnrollNodeRequest) { + return func(r *SecurityEnrollNodeRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityEnrollNode) WithFilterPath(v ...string) func(*SecurityEnrollNodeRequest) { + return func(r *SecurityEnrollNodeRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityEnrollNode) WithHeader(h map[string]string) func(*SecurityEnrollNodeRequest) { + return func(r *SecurityEnrollNodeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityEnrollNode) WithOpaqueID(s string) func(*SecurityEnrollNodeRequest) { + return func(r *SecurityEnrollNodeRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_api_key.go new file mode 100644 index 000000000..261763359 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_api_key.go @@ -0,0 +1,313 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newSecurityGetAPIKeyFunc(t Transport) SecurityGetAPIKey { + return func(o ...func(*SecurityGetAPIKeyRequest)) (*Response, error) { + var r = SecurityGetAPIKeyRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityGetAPIKey - Retrieves information for one or more API keys. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-api-key.html. +type SecurityGetAPIKey func(o ...func(*SecurityGetAPIKeyRequest)) (*Response, error) + +// SecurityGetAPIKeyRequest configures the Security GetAPI Key API request. +type SecurityGetAPIKeyRequest struct { + ActiveOnly *bool + ID string + Name string + Owner *bool + RealmName string + Username string + WithLimitedBy *bool + WithProfileUID *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityGetAPIKeyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_security/api_key")) + path.WriteString("http://") + path.WriteString("/_security/api_key") + + params = make(map[string]string) + + if r.ActiveOnly != nil { + params["active_only"] = strconv.FormatBool(*r.ActiveOnly) + } + + if r.ID != "" { + params["id"] = r.ID + } + + if r.Name != "" { + params["name"] = r.Name + } + + if r.Owner != nil { + params["owner"] = strconv.FormatBool(*r.Owner) + } + + if r.RealmName != "" { + params["realm_name"] = r.RealmName + } + + if r.Username != "" { + params["username"] = r.Username + } + + if r.WithLimitedBy != nil { + params["with_limited_by"] = strconv.FormatBool(*r.WithLimitedBy) + } + + if r.WithProfileUID != nil { + params["with_profile_uid"] = strconv.FormatBool(*r.WithProfileUID) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_api_key") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_api_key") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityGetAPIKey) WithContext(v context.Context) func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + r.ctx = v + } +} + +// WithActiveOnly - flag to limit response to only active (not invalidated or expired) api keys. +func (f SecurityGetAPIKey) WithActiveOnly(v bool) func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + r.ActiveOnly = &v + } +} + +// WithID - api key ID of the api key to be retrieved. +func (f SecurityGetAPIKey) WithID(v string) func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + r.ID = v + } +} + +// WithName - api key name of the api key to be retrieved. +func (f SecurityGetAPIKey) WithName(v string) func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + r.Name = v + } +} + +// WithOwner - flag to query api keys owned by the currently authenticated user. +func (f SecurityGetAPIKey) WithOwner(v bool) func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + r.Owner = &v + } +} + +// WithRealmName - realm name of the user who created this api key to be retrieved. +func (f SecurityGetAPIKey) WithRealmName(v string) func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + r.RealmName = v + } +} + +// WithUsername - user name of the user who created this api key to be retrieved. +func (f SecurityGetAPIKey) WithUsername(v string) func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + r.Username = v + } +} + +// WithWithLimitedBy - flag to show the limited-by role descriptors of api keys. +func (f SecurityGetAPIKey) WithWithLimitedBy(v bool) func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + r.WithLimitedBy = &v + } +} + +// WithWithProfileUID - flag to also retrieve the api key's owner profile uid, if it exists. +func (f SecurityGetAPIKey) WithWithProfileUID(v bool) func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + r.WithProfileUID = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityGetAPIKey) WithPretty() func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityGetAPIKey) WithHuman() func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityGetAPIKey) WithErrorTrace() func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityGetAPIKey) WithFilterPath(v ...string) func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityGetAPIKey) WithHeader(h map[string]string) func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityGetAPIKey) WithOpaqueID(s string) func(*SecurityGetAPIKeyRequest) { + return func(r *SecurityGetAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_builtin_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_builtin_privileges.go new file mode 100644 index 000000000..c0a8d59cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_builtin_privileges.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityGetBuiltinPrivilegesFunc(t Transport) SecurityGetBuiltinPrivileges { + return func(o ...func(*SecurityGetBuiltinPrivilegesRequest)) (*Response, error) { + var r = SecurityGetBuiltinPrivilegesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityGetBuiltinPrivileges - Retrieves the list of cluster privileges and index privileges that are available in this version of Elasticsearch. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-builtin-privileges.html. +type SecurityGetBuiltinPrivileges func(o ...func(*SecurityGetBuiltinPrivilegesRequest)) (*Response, error) + +// SecurityGetBuiltinPrivilegesRequest configures the Security Get Builtin Privileges API request. +type SecurityGetBuiltinPrivilegesRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityGetBuiltinPrivilegesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_builtin_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_security/privilege/_builtin")) + path.WriteString("http://") + path.WriteString("/_security/privilege/_builtin") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_builtin_privileges") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_builtin_privileges") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityGetBuiltinPrivileges) WithContext(v context.Context) func(*SecurityGetBuiltinPrivilegesRequest) { + return func(r *SecurityGetBuiltinPrivilegesRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityGetBuiltinPrivileges) WithPretty() func(*SecurityGetBuiltinPrivilegesRequest) { + return func(r *SecurityGetBuiltinPrivilegesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityGetBuiltinPrivileges) WithHuman() func(*SecurityGetBuiltinPrivilegesRequest) { + return func(r *SecurityGetBuiltinPrivilegesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityGetBuiltinPrivileges) WithErrorTrace() func(*SecurityGetBuiltinPrivilegesRequest) { + return func(r *SecurityGetBuiltinPrivilegesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityGetBuiltinPrivileges) WithFilterPath(v ...string) func(*SecurityGetBuiltinPrivilegesRequest) { + return func(r *SecurityGetBuiltinPrivilegesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityGetBuiltinPrivileges) WithHeader(h map[string]string) func(*SecurityGetBuiltinPrivilegesRequest) { + return func(r *SecurityGetBuiltinPrivilegesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityGetBuiltinPrivileges) WithOpaqueID(s string) func(*SecurityGetBuiltinPrivilegesRequest) { + return func(r *SecurityGetBuiltinPrivilegesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_privileges.go new file mode 100644 index 000000000..2c3736524 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_privileges.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityGetPrivilegesFunc(t Transport) SecurityGetPrivileges { + return func(o ...func(*SecurityGetPrivilegesRequest)) (*Response, error) { + var r = SecurityGetPrivilegesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityGetPrivileges - Retrieves application privileges. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-privileges.html. +type SecurityGetPrivileges func(o ...func(*SecurityGetPrivilegesRequest)) (*Response, error) + +// SecurityGetPrivilegesRequest configures the Security Get Privileges API request. +type SecurityGetPrivilegesRequest struct { + Application string + Name string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityGetPrivilegesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_security") + 1 + len("privilege") + 1 + len(r.Application) + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("privilege") + if r.Application != "" { + path.WriteString("/") + path.WriteString(r.Application) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "application", r.Application) + } + } + if r.Name != "" { + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_privileges") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_privileges") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityGetPrivileges) WithContext(v context.Context) func(*SecurityGetPrivilegesRequest) { + return func(r *SecurityGetPrivilegesRequest) { + r.ctx = v + } +} + +// WithApplication - application name. +func (f SecurityGetPrivileges) WithApplication(v string) func(*SecurityGetPrivilegesRequest) { + return func(r *SecurityGetPrivilegesRequest) { + r.Application = v + } +} + +// WithName - privilege name. +func (f SecurityGetPrivileges) WithName(v string) func(*SecurityGetPrivilegesRequest) { + return func(r *SecurityGetPrivilegesRequest) { + r.Name = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityGetPrivileges) WithPretty() func(*SecurityGetPrivilegesRequest) { + return func(r *SecurityGetPrivilegesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityGetPrivileges) WithHuman() func(*SecurityGetPrivilegesRequest) { + return func(r *SecurityGetPrivilegesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityGetPrivileges) WithErrorTrace() func(*SecurityGetPrivilegesRequest) { + return func(r *SecurityGetPrivilegesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityGetPrivileges) WithFilterPath(v ...string) func(*SecurityGetPrivilegesRequest) { + return func(r *SecurityGetPrivilegesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityGetPrivileges) WithHeader(h map[string]string) func(*SecurityGetPrivilegesRequest) { + return func(r *SecurityGetPrivilegesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityGetPrivileges) WithOpaqueID(s string) func(*SecurityGetPrivilegesRequest) { + return func(r *SecurityGetPrivilegesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_role.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_role.go new file mode 100644 index 000000000..6ee1b8aca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_role.go @@ -0,0 +1,234 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityGetRoleFunc(t Transport) SecurityGetRole { + return func(o ...func(*SecurityGetRoleRequest)) (*Response, error) { + var r = SecurityGetRoleRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityGetRole - Retrieves roles in the native realm. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role.html. +type SecurityGetRole func(o ...func(*SecurityGetRoleRequest)) (*Response, error) + +// SecurityGetRoleRequest configures the Security Get Role API request. +type SecurityGetRoleRequest struct { + Name []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityGetRoleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_role") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_security") + 1 + len("role") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role") + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_role") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_role") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityGetRole) WithContext(v context.Context) func(*SecurityGetRoleRequest) { + return func(r *SecurityGetRoleRequest) { + r.ctx = v + } +} + +// WithName - a list of role names. +func (f SecurityGetRole) WithName(v ...string) func(*SecurityGetRoleRequest) { + return func(r *SecurityGetRoleRequest) { + r.Name = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityGetRole) WithPretty() func(*SecurityGetRoleRequest) { + return func(r *SecurityGetRoleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityGetRole) WithHuman() func(*SecurityGetRoleRequest) { + return func(r *SecurityGetRoleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityGetRole) WithErrorTrace() func(*SecurityGetRoleRequest) { + return func(r *SecurityGetRoleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityGetRole) WithFilterPath(v ...string) func(*SecurityGetRoleRequest) { + return func(r *SecurityGetRoleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityGetRole) WithHeader(h map[string]string) func(*SecurityGetRoleRequest) { + return func(r *SecurityGetRoleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityGetRole) WithOpaqueID(s string) func(*SecurityGetRoleRequest) { + return func(r *SecurityGetRoleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_role_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_role_mapping.go new file mode 100644 index 000000000..8a01d5342 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_role_mapping.go @@ -0,0 +1,234 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityGetRoleMappingFunc(t Transport) SecurityGetRoleMapping { + return func(o ...func(*SecurityGetRoleMappingRequest)) (*Response, error) { + var r = SecurityGetRoleMappingRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityGetRoleMapping - Retrieves role mappings. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role-mapping.html. +type SecurityGetRoleMapping func(o ...func(*SecurityGetRoleMappingRequest)) (*Response, error) + +// SecurityGetRoleMappingRequest configures the Security Get Role Mapping API request. +type SecurityGetRoleMappingRequest struct { + Name []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityGetRoleMappingRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_role_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_security") + 1 + len("role_mapping") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role_mapping") + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", strings.Join(r.Name, ",")) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_role_mapping") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_role_mapping") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityGetRoleMapping) WithContext(v context.Context) func(*SecurityGetRoleMappingRequest) { + return func(r *SecurityGetRoleMappingRequest) { + r.ctx = v + } +} + +// WithName - a list of role-mapping names. +func (f SecurityGetRoleMapping) WithName(v ...string) func(*SecurityGetRoleMappingRequest) { + return func(r *SecurityGetRoleMappingRequest) { + r.Name = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityGetRoleMapping) WithPretty() func(*SecurityGetRoleMappingRequest) { + return func(r *SecurityGetRoleMappingRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityGetRoleMapping) WithHuman() func(*SecurityGetRoleMappingRequest) { + return func(r *SecurityGetRoleMappingRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityGetRoleMapping) WithErrorTrace() func(*SecurityGetRoleMappingRequest) { + return func(r *SecurityGetRoleMappingRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityGetRoleMapping) WithFilterPath(v ...string) func(*SecurityGetRoleMappingRequest) { + return func(r *SecurityGetRoleMappingRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityGetRoleMapping) WithHeader(h map[string]string) func(*SecurityGetRoleMappingRequest) { + return func(r *SecurityGetRoleMappingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityGetRoleMapping) WithOpaqueID(s string) func(*SecurityGetRoleMappingRequest) { + return func(r *SecurityGetRoleMappingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_service_accounts.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_service_accounts.go new file mode 100644 index 000000000..9f71851a5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_service_accounts.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityGetServiceAccountsFunc(t Transport) SecurityGetServiceAccounts { + return func(o ...func(*SecurityGetServiceAccountsRequest)) (*Response, error) { + var r = SecurityGetServiceAccountsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityGetServiceAccounts - Retrieves information about service accounts. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-accounts.html. +type SecurityGetServiceAccounts func(o ...func(*SecurityGetServiceAccountsRequest)) (*Response, error) + +// SecurityGetServiceAccountsRequest configures the Security Get Service Accounts API request. +type SecurityGetServiceAccountsRequest struct { + Namespace string + Service string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityGetServiceAccountsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_service_accounts") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_security") + 1 + len("service") + 1 + len(r.Namespace) + 1 + len(r.Service)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("service") + if r.Namespace != "" { + path.WriteString("/") + path.WriteString(r.Namespace) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "namespace", r.Namespace) + } + } + if r.Service != "" { + path.WriteString("/") + path.WriteString(r.Service) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "service", r.Service) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_service_accounts") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_service_accounts") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityGetServiceAccounts) WithContext(v context.Context) func(*SecurityGetServiceAccountsRequest) { + return func(r *SecurityGetServiceAccountsRequest) { + r.ctx = v + } +} + +// WithNamespace - an identifier for the namespace. +func (f SecurityGetServiceAccounts) WithNamespace(v string) func(*SecurityGetServiceAccountsRequest) { + return func(r *SecurityGetServiceAccountsRequest) { + r.Namespace = v + } +} + +// WithService - an identifier for the service name. +func (f SecurityGetServiceAccounts) WithService(v string) func(*SecurityGetServiceAccountsRequest) { + return func(r *SecurityGetServiceAccountsRequest) { + r.Service = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityGetServiceAccounts) WithPretty() func(*SecurityGetServiceAccountsRequest) { + return func(r *SecurityGetServiceAccountsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityGetServiceAccounts) WithHuman() func(*SecurityGetServiceAccountsRequest) { + return func(r *SecurityGetServiceAccountsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityGetServiceAccounts) WithErrorTrace() func(*SecurityGetServiceAccountsRequest) { + return func(r *SecurityGetServiceAccountsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityGetServiceAccounts) WithFilterPath(v ...string) func(*SecurityGetServiceAccountsRequest) { + return func(r *SecurityGetServiceAccountsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityGetServiceAccounts) WithHeader(h map[string]string) func(*SecurityGetServiceAccountsRequest) { + return func(r *SecurityGetServiceAccountsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityGetServiceAccounts) WithOpaqueID(s string) func(*SecurityGetServiceAccountsRequest) { + return func(r *SecurityGetServiceAccountsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_service_credentials.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_service_credentials.go new file mode 100644 index 000000000..d70b51fcf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_service_credentials.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityGetServiceCredentialsFunc(t Transport) SecurityGetServiceCredentials { + return func(namespace string, service string, o ...func(*SecurityGetServiceCredentialsRequest)) (*Response, error) { + var r = SecurityGetServiceCredentialsRequest{Namespace: namespace, Service: service} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityGetServiceCredentials - Retrieves information of all service credentials for a service account. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-credentials.html. +type SecurityGetServiceCredentials func(namespace string, service string, o ...func(*SecurityGetServiceCredentialsRequest)) (*Response, error) + +// SecurityGetServiceCredentialsRequest configures the Security Get Service Credentials API request. +type SecurityGetServiceCredentialsRequest struct { + Namespace string + Service string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityGetServiceCredentialsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_service_credentials") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_security") + 1 + len("service") + 1 + len(r.Namespace) + 1 + len(r.Service) + 1 + len("credential")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("service") + path.WriteString("/") + path.WriteString(r.Namespace) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "namespace", r.Namespace) + } + path.WriteString("/") + path.WriteString(r.Service) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "service", r.Service) + } + path.WriteString("/") + path.WriteString("credential") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_service_credentials") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_service_credentials") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityGetServiceCredentials) WithContext(v context.Context) func(*SecurityGetServiceCredentialsRequest) { + return func(r *SecurityGetServiceCredentialsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityGetServiceCredentials) WithPretty() func(*SecurityGetServiceCredentialsRequest) { + return func(r *SecurityGetServiceCredentialsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityGetServiceCredentials) WithHuman() func(*SecurityGetServiceCredentialsRequest) { + return func(r *SecurityGetServiceCredentialsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityGetServiceCredentials) WithErrorTrace() func(*SecurityGetServiceCredentialsRequest) { + return func(r *SecurityGetServiceCredentialsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityGetServiceCredentials) WithFilterPath(v ...string) func(*SecurityGetServiceCredentialsRequest) { + return func(r *SecurityGetServiceCredentialsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityGetServiceCredentials) WithHeader(h map[string]string) func(*SecurityGetServiceCredentialsRequest) { + return func(r *SecurityGetServiceCredentialsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityGetServiceCredentials) WithOpaqueID(s string) func(*SecurityGetServiceCredentialsRequest) { + return func(r *SecurityGetServiceCredentialsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_settings.go new file mode 100644 index 000000000..55ea9d06b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_settings.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newSecurityGetSettingsFunc(t Transport) SecurityGetSettings { + return func(o ...func(*SecurityGetSettingsRequest)) (*Response, error) { + var r = SecurityGetSettingsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityGetSettings - Retrieve settings for the security system indices +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-settings.html. +type SecurityGetSettings func(o ...func(*SecurityGetSettingsRequest)) (*Response, error) + +// SecurityGetSettingsRequest configures the Security Get Settings API request. +type SecurityGetSettingsRequest struct { + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityGetSettingsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_security/settings")) + path.WriteString("http://") + path.WriteString("/_security/settings") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_settings") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_settings") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityGetSettings) WithContext(v context.Context) func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for connection to master. +func (f SecurityGetSettings) WithMasterTimeout(v time.Duration) func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityGetSettings) WithPretty() func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityGetSettings) WithHuman() func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityGetSettings) WithErrorTrace() func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityGetSettings) WithFilterPath(v ...string) func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityGetSettings) WithHeader(h map[string]string) func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityGetSettings) WithOpaqueID(s string) func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_token.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_token.go new file mode 100644 index 000000000..008f03e0d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_token.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityGetTokenFunc(t Transport) SecurityGetToken { + return func(body io.Reader, o ...func(*SecurityGetTokenRequest)) (*Response, error) { + var r = SecurityGetTokenRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityGetToken - Creates a bearer token for access without requiring basic authentication. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-token.html. +type SecurityGetToken func(body io.Reader, o ...func(*SecurityGetTokenRequest)) (*Response, error) + +// SecurityGetTokenRequest configures the Security Get Token API request. +type SecurityGetTokenRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityGetTokenRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_token") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/oauth2/token")) + path.WriteString("http://") + path.WriteString("/_security/oauth2/token") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_token") + if reader := instrument.RecordRequestBody(ctx, "security.get_token", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_token") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityGetToken) WithContext(v context.Context) func(*SecurityGetTokenRequest) { + return func(r *SecurityGetTokenRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityGetToken) WithPretty() func(*SecurityGetTokenRequest) { + return func(r *SecurityGetTokenRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityGetToken) WithHuman() func(*SecurityGetTokenRequest) { + return func(r *SecurityGetTokenRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityGetToken) WithErrorTrace() func(*SecurityGetTokenRequest) { + return func(r *SecurityGetTokenRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityGetToken) WithFilterPath(v ...string) func(*SecurityGetTokenRequest) { + return func(r *SecurityGetTokenRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityGetToken) WithHeader(h map[string]string) func(*SecurityGetTokenRequest) { + return func(r *SecurityGetTokenRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityGetToken) WithOpaqueID(s string) func(*SecurityGetTokenRequest) { + return func(r *SecurityGetTokenRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_user.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_user.go new file mode 100644 index 000000000..24f4cdc66 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_user.go @@ -0,0 +1,248 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newSecurityGetUserFunc(t Transport) SecurityGetUser { + return func(o ...func(*SecurityGetUserRequest)) (*Response, error) { + var r = SecurityGetUserRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityGetUser - Retrieves information about users in the native realm and built-in users. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html. +type SecurityGetUser func(o ...func(*SecurityGetUserRequest)) (*Response, error) + +// SecurityGetUserRequest configures the Security Get User API request. +type SecurityGetUserRequest struct { + Username []string + + WithProfileUID *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityGetUserRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_security") + 1 + len("user") + 1 + len(strings.Join(r.Username, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + if len(r.Username) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Username, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "username", strings.Join(r.Username, ",")) + } + } + + params = make(map[string]string) + + if r.WithProfileUID != nil { + params["with_profile_uid"] = strconv.FormatBool(*r.WithProfileUID) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_user") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_user") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityGetUser) WithContext(v context.Context) func(*SecurityGetUserRequest) { + return func(r *SecurityGetUserRequest) { + r.ctx = v + } +} + +// WithUsername - a list of usernames. +func (f SecurityGetUser) WithUsername(v ...string) func(*SecurityGetUserRequest) { + return func(r *SecurityGetUserRequest) { + r.Username = v + } +} + +// WithWithProfileUID - flag to retrieve profile uid (if exists) associated to the user. +func (f SecurityGetUser) WithWithProfileUID(v bool) func(*SecurityGetUserRequest) { + return func(r *SecurityGetUserRequest) { + r.WithProfileUID = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityGetUser) WithPretty() func(*SecurityGetUserRequest) { + return func(r *SecurityGetUserRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityGetUser) WithHuman() func(*SecurityGetUserRequest) { + return func(r *SecurityGetUserRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityGetUser) WithErrorTrace() func(*SecurityGetUserRequest) { + return func(r *SecurityGetUserRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityGetUser) WithFilterPath(v ...string) func(*SecurityGetUserRequest) { + return func(r *SecurityGetUserRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityGetUser) WithHeader(h map[string]string) func(*SecurityGetUserRequest) { + return func(r *SecurityGetUserRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityGetUser) WithOpaqueID(s string) func(*SecurityGetUserRequest) { + return func(r *SecurityGetUserRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_user_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_user_privileges.go new file mode 100644 index 000000000..977c4002e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_user_privileges.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityGetUserPrivilegesFunc(t Transport) SecurityGetUserPrivileges { + return func(o ...func(*SecurityGetUserPrivilegesRequest)) (*Response, error) { + var r = SecurityGetUserPrivilegesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityGetUserPrivileges - Retrieves security privileges for the logged in user. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-privileges.html. +type SecurityGetUserPrivileges func(o ...func(*SecurityGetUserPrivilegesRequest)) (*Response, error) + +// SecurityGetUserPrivilegesRequest configures the Security Get User Privileges API request. +type SecurityGetUserPrivilegesRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityGetUserPrivilegesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_user_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_security/user/_privileges")) + path.WriteString("http://") + path.WriteString("/_security/user/_privileges") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_user_privileges") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_user_privileges") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityGetUserPrivileges) WithContext(v context.Context) func(*SecurityGetUserPrivilegesRequest) { + return func(r *SecurityGetUserPrivilegesRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityGetUserPrivileges) WithPretty() func(*SecurityGetUserPrivilegesRequest) { + return func(r *SecurityGetUserPrivilegesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityGetUserPrivileges) WithHuman() func(*SecurityGetUserPrivilegesRequest) { + return func(r *SecurityGetUserPrivilegesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityGetUserPrivileges) WithErrorTrace() func(*SecurityGetUserPrivilegesRequest) { + return func(r *SecurityGetUserPrivilegesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityGetUserPrivileges) WithFilterPath(v ...string) func(*SecurityGetUserPrivilegesRequest) { + return func(r *SecurityGetUserPrivilegesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityGetUserPrivileges) WithHeader(h map[string]string) func(*SecurityGetUserPrivilegesRequest) { + return func(r *SecurityGetUserPrivilegesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityGetUserPrivileges) WithOpaqueID(s string) func(*SecurityGetUserPrivilegesRequest) { + return func(r *SecurityGetUserPrivilegesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_user_profile.go new file mode 100644 index 000000000..4074a8bc4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.get_user_profile.go @@ -0,0 +1,243 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" +) + +func newSecurityGetUserProfileFunc(t Transport) SecurityGetUserProfile { + return func(uid []string, o ...func(*SecurityGetUserProfileRequest)) (*Response, error) { + var r = SecurityGetUserProfileRequest{UID: uid} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityGetUserProfile - Retrieves user profiles for the given unique ID(s). +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-profile.html. +type SecurityGetUserProfile func(uid []string, o ...func(*SecurityGetUserProfileRequest)) (*Response, error) + +// SecurityGetUserProfileRequest configures the Security Get User Profile API request. +type SecurityGetUserProfileRequest struct { + UID []string + + Data []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityGetUserProfileRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_user_profile") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + if len(r.UID) == 0 { + return nil, errors.New("uid is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_security") + 1 + len("profile") + 1 + len(strings.Join(r.UID, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("profile") + path.WriteString("/") + path.WriteString(strings.Join(r.UID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "uid", strings.Join(r.UID, ",")) + } + + params = make(map[string]string) + + if len(r.Data) > 0 { + params["data"] = strings.Join(r.Data, ",") + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_user_profile") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_user_profile") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityGetUserProfile) WithContext(v context.Context) func(*SecurityGetUserProfileRequest) { + return func(r *SecurityGetUserProfileRequest) { + r.ctx = v + } +} + +// WithData - a list of keys for which the corresponding application data are retrieved.. +func (f SecurityGetUserProfile) WithData(v ...string) func(*SecurityGetUserProfileRequest) { + return func(r *SecurityGetUserProfileRequest) { + r.Data = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityGetUserProfile) WithPretty() func(*SecurityGetUserProfileRequest) { + return func(r *SecurityGetUserProfileRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityGetUserProfile) WithHuman() func(*SecurityGetUserProfileRequest) { + return func(r *SecurityGetUserProfileRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityGetUserProfile) WithErrorTrace() func(*SecurityGetUserProfileRequest) { + return func(r *SecurityGetUserProfileRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityGetUserProfile) WithFilterPath(v ...string) func(*SecurityGetUserProfileRequest) { + return func(r *SecurityGetUserProfileRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityGetUserProfile) WithHeader(h map[string]string) func(*SecurityGetUserProfileRequest) { + return func(r *SecurityGetUserProfileRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityGetUserProfile) WithOpaqueID(s string) func(*SecurityGetUserProfileRequest) { + return func(r *SecurityGetUserProfileRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.grant_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.grant_api_key.go new file mode 100644 index 000000000..ccab01728 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.grant_api_key.go @@ -0,0 +1,238 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityGrantAPIKeyFunc(t Transport) SecurityGrantAPIKey { + return func(body io.Reader, o ...func(*SecurityGrantAPIKeyRequest)) (*Response, error) { + var r = SecurityGrantAPIKeyRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityGrantAPIKey - Creates an API key on behalf of another user. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-grant-api-key.html. +type SecurityGrantAPIKey func(body io.Reader, o ...func(*SecurityGrantAPIKeyRequest)) (*Response, error) + +// SecurityGrantAPIKeyRequest configures the Security GrantAPI Key API request. +type SecurityGrantAPIKeyRequest struct { + Body io.Reader + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityGrantAPIKeyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.grant_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/api_key/grant")) + path.WriteString("http://") + path.WriteString("/_security/api_key/grant") + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.grant_api_key") + if reader := instrument.RecordRequestBody(ctx, "security.grant_api_key", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.grant_api_key") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityGrantAPIKey) WithContext(v context.Context) func(*SecurityGrantAPIKeyRequest) { + return func(r *SecurityGrantAPIKeyRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityGrantAPIKey) WithRefresh(v string) func(*SecurityGrantAPIKeyRequest) { + return func(r *SecurityGrantAPIKeyRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityGrantAPIKey) WithPretty() func(*SecurityGrantAPIKeyRequest) { + return func(r *SecurityGrantAPIKeyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityGrantAPIKey) WithHuman() func(*SecurityGrantAPIKeyRequest) { + return func(r *SecurityGrantAPIKeyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityGrantAPIKey) WithErrorTrace() func(*SecurityGrantAPIKeyRequest) { + return func(r *SecurityGrantAPIKeyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityGrantAPIKey) WithFilterPath(v ...string) func(*SecurityGrantAPIKeyRequest) { + return func(r *SecurityGrantAPIKeyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityGrantAPIKey) WithHeader(h map[string]string) func(*SecurityGrantAPIKeyRequest) { + return func(r *SecurityGrantAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityGrantAPIKey) WithOpaqueID(s string) func(*SecurityGrantAPIKeyRequest) { + return func(r *SecurityGrantAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.has_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.has_privileges.go new file mode 100644 index 000000000..31c79c08c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.has_privileges.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityHasPrivilegesFunc(t Transport) SecurityHasPrivileges { + return func(body io.Reader, o ...func(*SecurityHasPrivilegesRequest)) (*Response, error) { + var r = SecurityHasPrivilegesRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityHasPrivileges - Determines whether the specified user has a specified list of privileges. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges.html. +type SecurityHasPrivileges func(body io.Reader, o ...func(*SecurityHasPrivilegesRequest)) (*Response, error) + +// SecurityHasPrivilegesRequest configures the Security Has Privileges API request. +type SecurityHasPrivilegesRequest struct { + Body io.Reader + + User string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityHasPrivilegesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.has_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_security") + 1 + len("user") + 1 + len(r.User) + 1 + len("_has_privileges")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + if r.User != "" { + path.WriteString("/") + path.WriteString(r.User) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "user", r.User) + } + } + path.WriteString("/") + path.WriteString("_has_privileges") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.has_privileges") + if reader := instrument.RecordRequestBody(ctx, "security.has_privileges", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.has_privileges") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityHasPrivileges) WithContext(v context.Context) func(*SecurityHasPrivilegesRequest) { + return func(r *SecurityHasPrivilegesRequest) { + r.ctx = v + } +} + +// WithUser - username. +func (f SecurityHasPrivileges) WithUser(v string) func(*SecurityHasPrivilegesRequest) { + return func(r *SecurityHasPrivilegesRequest) { + r.User = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityHasPrivileges) WithPretty() func(*SecurityHasPrivilegesRequest) { + return func(r *SecurityHasPrivilegesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityHasPrivileges) WithHuman() func(*SecurityHasPrivilegesRequest) { + return func(r *SecurityHasPrivilegesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityHasPrivileges) WithErrorTrace() func(*SecurityHasPrivilegesRequest) { + return func(r *SecurityHasPrivilegesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityHasPrivileges) WithFilterPath(v ...string) func(*SecurityHasPrivilegesRequest) { + return func(r *SecurityHasPrivilegesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityHasPrivileges) WithHeader(h map[string]string) func(*SecurityHasPrivilegesRequest) { + return func(r *SecurityHasPrivilegesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityHasPrivileges) WithOpaqueID(s string) func(*SecurityHasPrivilegesRequest) { + return func(r *SecurityHasPrivilegesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.has_privileges_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.has_privileges_user_profile.go new file mode 100644 index 000000000..a52ca13ce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.has_privileges_user_profile.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityHasPrivilegesUserProfileFunc(t Transport) SecurityHasPrivilegesUserProfile { + return func(body io.Reader, o ...func(*SecurityHasPrivilegesUserProfileRequest)) (*Response, error) { + var r = SecurityHasPrivilegesUserProfileRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityHasPrivilegesUserProfile - Determines whether the users associated with the specified profile IDs have all the requested privileges. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges-user-profile.html. +type SecurityHasPrivilegesUserProfile func(body io.Reader, o ...func(*SecurityHasPrivilegesUserProfileRequest)) (*Response, error) + +// SecurityHasPrivilegesUserProfileRequest configures the Security Has Privileges User Profile API request. +type SecurityHasPrivilegesUserProfileRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityHasPrivilegesUserProfileRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.has_privileges_user_profile") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/profile/_has_privileges")) + path.WriteString("http://") + path.WriteString("/_security/profile/_has_privileges") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.has_privileges_user_profile") + if reader := instrument.RecordRequestBody(ctx, "security.has_privileges_user_profile", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.has_privileges_user_profile") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityHasPrivilegesUserProfile) WithContext(v context.Context) func(*SecurityHasPrivilegesUserProfileRequest) { + return func(r *SecurityHasPrivilegesUserProfileRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityHasPrivilegesUserProfile) WithPretty() func(*SecurityHasPrivilegesUserProfileRequest) { + return func(r *SecurityHasPrivilegesUserProfileRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityHasPrivilegesUserProfile) WithHuman() func(*SecurityHasPrivilegesUserProfileRequest) { + return func(r *SecurityHasPrivilegesUserProfileRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityHasPrivilegesUserProfile) WithErrorTrace() func(*SecurityHasPrivilegesUserProfileRequest) { + return func(r *SecurityHasPrivilegesUserProfileRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityHasPrivilegesUserProfile) WithFilterPath(v ...string) func(*SecurityHasPrivilegesUserProfileRequest) { + return func(r *SecurityHasPrivilegesUserProfileRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityHasPrivilegesUserProfile) WithHeader(h map[string]string) func(*SecurityHasPrivilegesUserProfileRequest) { + return func(r *SecurityHasPrivilegesUserProfileRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityHasPrivilegesUserProfile) WithOpaqueID(s string) func(*SecurityHasPrivilegesUserProfileRequest) { + return func(r *SecurityHasPrivilegesUserProfileRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.invalidate_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.invalidate_api_key.go new file mode 100644 index 000000000..d8604b1ea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.invalidate_api_key.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityInvalidateAPIKeyFunc(t Transport) SecurityInvalidateAPIKey { + return func(body io.Reader, o ...func(*SecurityInvalidateAPIKeyRequest)) (*Response, error) { + var r = SecurityInvalidateAPIKeyRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityInvalidateAPIKey - Invalidates one or more API keys. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-api-key.html. +type SecurityInvalidateAPIKey func(body io.Reader, o ...func(*SecurityInvalidateAPIKeyRequest)) (*Response, error) + +// SecurityInvalidateAPIKeyRequest configures the Security InvalidateAPI Key API request. +type SecurityInvalidateAPIKeyRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityInvalidateAPIKeyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.invalidate_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + len("/_security/api_key")) + path.WriteString("http://") + path.WriteString("/_security/api_key") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.invalidate_api_key") + if reader := instrument.RecordRequestBody(ctx, "security.invalidate_api_key", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.invalidate_api_key") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityInvalidateAPIKey) WithContext(v context.Context) func(*SecurityInvalidateAPIKeyRequest) { + return func(r *SecurityInvalidateAPIKeyRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityInvalidateAPIKey) WithPretty() func(*SecurityInvalidateAPIKeyRequest) { + return func(r *SecurityInvalidateAPIKeyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityInvalidateAPIKey) WithHuman() func(*SecurityInvalidateAPIKeyRequest) { + return func(r *SecurityInvalidateAPIKeyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityInvalidateAPIKey) WithErrorTrace() func(*SecurityInvalidateAPIKeyRequest) { + return func(r *SecurityInvalidateAPIKeyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityInvalidateAPIKey) WithFilterPath(v ...string) func(*SecurityInvalidateAPIKeyRequest) { + return func(r *SecurityInvalidateAPIKeyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityInvalidateAPIKey) WithHeader(h map[string]string) func(*SecurityInvalidateAPIKeyRequest) { + return func(r *SecurityInvalidateAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityInvalidateAPIKey) WithOpaqueID(s string) func(*SecurityInvalidateAPIKeyRequest) { + return func(r *SecurityInvalidateAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.invalidate_token.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.invalidate_token.go new file mode 100644 index 000000000..dfc62203a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.invalidate_token.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityInvalidateTokenFunc(t Transport) SecurityInvalidateToken { + return func(body io.Reader, o ...func(*SecurityInvalidateTokenRequest)) (*Response, error) { + var r = SecurityInvalidateTokenRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityInvalidateToken - Invalidates one or more access tokens or refresh tokens. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-token.html. +type SecurityInvalidateToken func(body io.Reader, o ...func(*SecurityInvalidateTokenRequest)) (*Response, error) + +// SecurityInvalidateTokenRequest configures the Security Invalidate Token API request. +type SecurityInvalidateTokenRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityInvalidateTokenRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.invalidate_token") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + len("/_security/oauth2/token")) + path.WriteString("http://") + path.WriteString("/_security/oauth2/token") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.invalidate_token") + if reader := instrument.RecordRequestBody(ctx, "security.invalidate_token", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.invalidate_token") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityInvalidateToken) WithContext(v context.Context) func(*SecurityInvalidateTokenRequest) { + return func(r *SecurityInvalidateTokenRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityInvalidateToken) WithPretty() func(*SecurityInvalidateTokenRequest) { + return func(r *SecurityInvalidateTokenRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityInvalidateToken) WithHuman() func(*SecurityInvalidateTokenRequest) { + return func(r *SecurityInvalidateTokenRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityInvalidateToken) WithErrorTrace() func(*SecurityInvalidateTokenRequest) { + return func(r *SecurityInvalidateTokenRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityInvalidateToken) WithFilterPath(v ...string) func(*SecurityInvalidateTokenRequest) { + return func(r *SecurityInvalidateTokenRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityInvalidateToken) WithHeader(h map[string]string) func(*SecurityInvalidateTokenRequest) { + return func(r *SecurityInvalidateTokenRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityInvalidateToken) WithOpaqueID(s string) func(*SecurityInvalidateTokenRequest) { + return func(r *SecurityInvalidateTokenRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.oidc_authenticate.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.oidc_authenticate.go new file mode 100644 index 000000000..8031757e1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.oidc_authenticate.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityOidcAuthenticateFunc(t Transport) SecurityOidcAuthenticate { + return func(body io.Reader, o ...func(*SecurityOidcAuthenticateRequest)) (*Response, error) { + var r = SecurityOidcAuthenticateRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityOidcAuthenticate - Exchanges an OpenID Connection authentication response message for an Elasticsearch access token and refresh token pair +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-authenticate.html. +type SecurityOidcAuthenticate func(body io.Reader, o ...func(*SecurityOidcAuthenticateRequest)) (*Response, error) + +// SecurityOidcAuthenticateRequest configures the Security Oidc Authenticate API request. +type SecurityOidcAuthenticateRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityOidcAuthenticateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.oidc_authenticate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/oidc/authenticate")) + path.WriteString("http://") + path.WriteString("/_security/oidc/authenticate") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.oidc_authenticate") + if reader := instrument.RecordRequestBody(ctx, "security.oidc_authenticate", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.oidc_authenticate") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityOidcAuthenticate) WithContext(v context.Context) func(*SecurityOidcAuthenticateRequest) { + return func(r *SecurityOidcAuthenticateRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityOidcAuthenticate) WithPretty() func(*SecurityOidcAuthenticateRequest) { + return func(r *SecurityOidcAuthenticateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityOidcAuthenticate) WithHuman() func(*SecurityOidcAuthenticateRequest) { + return func(r *SecurityOidcAuthenticateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityOidcAuthenticate) WithErrorTrace() func(*SecurityOidcAuthenticateRequest) { + return func(r *SecurityOidcAuthenticateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityOidcAuthenticate) WithFilterPath(v ...string) func(*SecurityOidcAuthenticateRequest) { + return func(r *SecurityOidcAuthenticateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityOidcAuthenticate) WithHeader(h map[string]string) func(*SecurityOidcAuthenticateRequest) { + return func(r *SecurityOidcAuthenticateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityOidcAuthenticate) WithOpaqueID(s string) func(*SecurityOidcAuthenticateRequest) { + return func(r *SecurityOidcAuthenticateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.oidc_logout.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.oidc_logout.go new file mode 100644 index 000000000..7852e82dd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.oidc_logout.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityOidcLogoutFunc(t Transport) SecurityOidcLogout { + return func(body io.Reader, o ...func(*SecurityOidcLogoutRequest)) (*Response, error) { + var r = SecurityOidcLogoutRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityOidcLogout - Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-logout.html. +type SecurityOidcLogout func(body io.Reader, o ...func(*SecurityOidcLogoutRequest)) (*Response, error) + +// SecurityOidcLogoutRequest configures the Security Oidc Logout API request. +type SecurityOidcLogoutRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityOidcLogoutRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.oidc_logout") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/oidc/logout")) + path.WriteString("http://") + path.WriteString("/_security/oidc/logout") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.oidc_logout") + if reader := instrument.RecordRequestBody(ctx, "security.oidc_logout", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.oidc_logout") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityOidcLogout) WithContext(v context.Context) func(*SecurityOidcLogoutRequest) { + return func(r *SecurityOidcLogoutRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityOidcLogout) WithPretty() func(*SecurityOidcLogoutRequest) { + return func(r *SecurityOidcLogoutRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityOidcLogout) WithHuman() func(*SecurityOidcLogoutRequest) { + return func(r *SecurityOidcLogoutRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityOidcLogout) WithErrorTrace() func(*SecurityOidcLogoutRequest) { + return func(r *SecurityOidcLogoutRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityOidcLogout) WithFilterPath(v ...string) func(*SecurityOidcLogoutRequest) { + return func(r *SecurityOidcLogoutRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityOidcLogout) WithHeader(h map[string]string) func(*SecurityOidcLogoutRequest) { + return func(r *SecurityOidcLogoutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityOidcLogout) WithOpaqueID(s string) func(*SecurityOidcLogoutRequest) { + return func(r *SecurityOidcLogoutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.oidc_prepare_authentication.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.oidc_prepare_authentication.go new file mode 100644 index 000000000..bbb434704 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.oidc_prepare_authentication.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityOidcPrepareAuthenticationFunc(t Transport) SecurityOidcPrepareAuthentication { + return func(body io.Reader, o ...func(*SecurityOidcPrepareAuthenticationRequest)) (*Response, error) { + var r = SecurityOidcPrepareAuthenticationRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityOidcPrepareAuthentication - Creates an OAuth 2.0 authentication request as a URL string +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-prepare-authentication.html. +type SecurityOidcPrepareAuthentication func(body io.Reader, o ...func(*SecurityOidcPrepareAuthenticationRequest)) (*Response, error) + +// SecurityOidcPrepareAuthenticationRequest configures the Security Oidc Prepare Authentication API request. +type SecurityOidcPrepareAuthenticationRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityOidcPrepareAuthenticationRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.oidc_prepare_authentication") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/oidc/prepare")) + path.WriteString("http://") + path.WriteString("/_security/oidc/prepare") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.oidc_prepare_authentication") + if reader := instrument.RecordRequestBody(ctx, "security.oidc_prepare_authentication", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.oidc_prepare_authentication") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityOidcPrepareAuthentication) WithContext(v context.Context) func(*SecurityOidcPrepareAuthenticationRequest) { + return func(r *SecurityOidcPrepareAuthenticationRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityOidcPrepareAuthentication) WithPretty() func(*SecurityOidcPrepareAuthenticationRequest) { + return func(r *SecurityOidcPrepareAuthenticationRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityOidcPrepareAuthentication) WithHuman() func(*SecurityOidcPrepareAuthenticationRequest) { + return func(r *SecurityOidcPrepareAuthenticationRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityOidcPrepareAuthentication) WithErrorTrace() func(*SecurityOidcPrepareAuthenticationRequest) { + return func(r *SecurityOidcPrepareAuthenticationRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityOidcPrepareAuthentication) WithFilterPath(v ...string) func(*SecurityOidcPrepareAuthenticationRequest) { + return func(r *SecurityOidcPrepareAuthenticationRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityOidcPrepareAuthentication) WithHeader(h map[string]string) func(*SecurityOidcPrepareAuthenticationRequest) { + return func(r *SecurityOidcPrepareAuthenticationRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityOidcPrepareAuthentication) WithOpaqueID(s string) func(*SecurityOidcPrepareAuthenticationRequest) { + return func(r *SecurityOidcPrepareAuthenticationRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.put_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.put_privileges.go new file mode 100644 index 000000000..6dd0f1692 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.put_privileges.go @@ -0,0 +1,238 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityPutPrivilegesFunc(t Transport) SecurityPutPrivileges { + return func(body io.Reader, o ...func(*SecurityPutPrivilegesRequest)) (*Response, error) { + var r = SecurityPutPrivilegesRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityPutPrivileges - Adds or updates application privileges. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-privileges.html. +type SecurityPutPrivileges func(body io.Reader, o ...func(*SecurityPutPrivilegesRequest)) (*Response, error) + +// SecurityPutPrivilegesRequest configures the Security Put Privileges API request. +type SecurityPutPrivilegesRequest struct { + Body io.Reader + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityPutPrivilegesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.put_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + len("/_security/privilege")) + path.WriteString("http://") + path.WriteString("/_security/privilege") + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.put_privileges") + if reader := instrument.RecordRequestBody(ctx, "security.put_privileges", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.put_privileges") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityPutPrivileges) WithContext(v context.Context) func(*SecurityPutPrivilegesRequest) { + return func(r *SecurityPutPrivilegesRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityPutPrivileges) WithRefresh(v string) func(*SecurityPutPrivilegesRequest) { + return func(r *SecurityPutPrivilegesRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityPutPrivileges) WithPretty() func(*SecurityPutPrivilegesRequest) { + return func(r *SecurityPutPrivilegesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityPutPrivileges) WithHuman() func(*SecurityPutPrivilegesRequest) { + return func(r *SecurityPutPrivilegesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityPutPrivileges) WithErrorTrace() func(*SecurityPutPrivilegesRequest) { + return func(r *SecurityPutPrivilegesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityPutPrivileges) WithFilterPath(v ...string) func(*SecurityPutPrivilegesRequest) { + return func(r *SecurityPutPrivilegesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityPutPrivileges) WithHeader(h map[string]string) func(*SecurityPutPrivilegesRequest) { + return func(r *SecurityPutPrivilegesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityPutPrivileges) WithOpaqueID(s string) func(*SecurityPutPrivilegesRequest) { + return func(r *SecurityPutPrivilegesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.put_role.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.put_role.go new file mode 100644 index 000000000..c1515dc4c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.put_role.go @@ -0,0 +1,248 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityPutRoleFunc(t Transport) SecurityPutRole { + return func(name string, body io.Reader, o ...func(*SecurityPutRoleRequest)) (*Response, error) { + var r = SecurityPutRoleRequest{Name: name, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityPutRole - Adds and updates roles in the native realm. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role.html. +type SecurityPutRole func(name string, body io.Reader, o ...func(*SecurityPutRoleRequest)) (*Response, error) + +// SecurityPutRoleRequest configures the Security Put Role API request. +type SecurityPutRoleRequest struct { + Body io.Reader + + Name string + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityPutRoleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.put_role") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_security") + 1 + len("role") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.put_role") + if reader := instrument.RecordRequestBody(ctx, "security.put_role", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.put_role") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityPutRole) WithContext(v context.Context) func(*SecurityPutRoleRequest) { + return func(r *SecurityPutRoleRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityPutRole) WithRefresh(v string) func(*SecurityPutRoleRequest) { + return func(r *SecurityPutRoleRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityPutRole) WithPretty() func(*SecurityPutRoleRequest) { + return func(r *SecurityPutRoleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityPutRole) WithHuman() func(*SecurityPutRoleRequest) { + return func(r *SecurityPutRoleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityPutRole) WithErrorTrace() func(*SecurityPutRoleRequest) { + return func(r *SecurityPutRoleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityPutRole) WithFilterPath(v ...string) func(*SecurityPutRoleRequest) { + return func(r *SecurityPutRoleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityPutRole) WithHeader(h map[string]string) func(*SecurityPutRoleRequest) { + return func(r *SecurityPutRoleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityPutRole) WithOpaqueID(s string) func(*SecurityPutRoleRequest) { + return func(r *SecurityPutRoleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.put_role_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.put_role_mapping.go new file mode 100644 index 000000000..ee79ec53e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.put_role_mapping.go @@ -0,0 +1,248 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityPutRoleMappingFunc(t Transport) SecurityPutRoleMapping { + return func(name string, body io.Reader, o ...func(*SecurityPutRoleMappingRequest)) (*Response, error) { + var r = SecurityPutRoleMappingRequest{Name: name, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityPutRoleMapping - Creates and updates role mappings. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html. +type SecurityPutRoleMapping func(name string, body io.Reader, o ...func(*SecurityPutRoleMappingRequest)) (*Response, error) + +// SecurityPutRoleMappingRequest configures the Security Put Role Mapping API request. +type SecurityPutRoleMappingRequest struct { + Body io.Reader + + Name string + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityPutRoleMappingRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.put_role_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_security") + 1 + len("role_mapping") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role_mapping") + path.WriteString("/") + path.WriteString(r.Name) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.Name) + } + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.put_role_mapping") + if reader := instrument.RecordRequestBody(ctx, "security.put_role_mapping", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.put_role_mapping") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityPutRoleMapping) WithContext(v context.Context) func(*SecurityPutRoleMappingRequest) { + return func(r *SecurityPutRoleMappingRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityPutRoleMapping) WithRefresh(v string) func(*SecurityPutRoleMappingRequest) { + return func(r *SecurityPutRoleMappingRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityPutRoleMapping) WithPretty() func(*SecurityPutRoleMappingRequest) { + return func(r *SecurityPutRoleMappingRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityPutRoleMapping) WithHuman() func(*SecurityPutRoleMappingRequest) { + return func(r *SecurityPutRoleMappingRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityPutRoleMapping) WithErrorTrace() func(*SecurityPutRoleMappingRequest) { + return func(r *SecurityPutRoleMappingRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityPutRoleMapping) WithFilterPath(v ...string) func(*SecurityPutRoleMappingRequest) { + return func(r *SecurityPutRoleMappingRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityPutRoleMapping) WithHeader(h map[string]string) func(*SecurityPutRoleMappingRequest) { + return func(r *SecurityPutRoleMappingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityPutRoleMapping) WithOpaqueID(s string) func(*SecurityPutRoleMappingRequest) { + return func(r *SecurityPutRoleMappingRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.put_user.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.put_user.go new file mode 100644 index 000000000..24e424e9d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.put_user.go @@ -0,0 +1,248 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityPutUserFunc(t Transport) SecurityPutUser { + return func(username string, body io.Reader, o ...func(*SecurityPutUserRequest)) (*Response, error) { + var r = SecurityPutUserRequest{Username: username, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityPutUser - Adds and updates users in the native realm. These users are commonly referred to as native users. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html. +type SecurityPutUser func(username string, body io.Reader, o ...func(*SecurityPutUserRequest)) (*Response, error) + +// SecurityPutUserRequest configures the Security Put User API request. +type SecurityPutUserRequest struct { + Body io.Reader + + Username string + + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityPutUserRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.put_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_security") + 1 + len("user") + 1 + len(r.Username)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + path.WriteString("/") + path.WriteString(r.Username) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "username", r.Username) + } + + params = make(map[string]string) + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.put_user") + if reader := instrument.RecordRequestBody(ctx, "security.put_user", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.put_user") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityPutUser) WithContext(v context.Context) func(*SecurityPutUserRequest) { + return func(r *SecurityPutUserRequest) { + r.ctx = v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityPutUser) WithRefresh(v string) func(*SecurityPutUserRequest) { + return func(r *SecurityPutUserRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityPutUser) WithPretty() func(*SecurityPutUserRequest) { + return func(r *SecurityPutUserRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityPutUser) WithHuman() func(*SecurityPutUserRequest) { + return func(r *SecurityPutUserRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityPutUser) WithErrorTrace() func(*SecurityPutUserRequest) { + return func(r *SecurityPutUserRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityPutUser) WithFilterPath(v ...string) func(*SecurityPutUserRequest) { + return func(r *SecurityPutUserRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityPutUser) WithHeader(h map[string]string) func(*SecurityPutUserRequest) { + return func(r *SecurityPutUserRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityPutUser) WithOpaqueID(s string) func(*SecurityPutUserRequest) { + return func(r *SecurityPutUserRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.query_api_keys.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.query_api_keys.go new file mode 100644 index 000000000..33e4b2c24 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.query_api_keys.go @@ -0,0 +1,270 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newSecurityQueryAPIKeysFunc(t Transport) SecurityQueryAPIKeys { + return func(o ...func(*SecurityQueryAPIKeysRequest)) (*Response, error) { + var r = SecurityQueryAPIKeysRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityQueryAPIKeys - Retrieves information for API keys using a subset of query DSL +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-api-key.html. +type SecurityQueryAPIKeys func(o ...func(*SecurityQueryAPIKeysRequest)) (*Response, error) + +// SecurityQueryAPIKeysRequest configures the Security QueryAPI Keys API request. +type SecurityQueryAPIKeysRequest struct { + Body io.Reader + + TypedKeys *bool + WithLimitedBy *bool + WithProfileUID *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityQueryAPIKeysRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.query_api_keys") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/_query/api_key")) + path.WriteString("http://") + path.WriteString("/_security/_query/api_key") + + params = make(map[string]string) + + if r.TypedKeys != nil { + params["typed_keys"] = strconv.FormatBool(*r.TypedKeys) + } + + if r.WithLimitedBy != nil { + params["with_limited_by"] = strconv.FormatBool(*r.WithLimitedBy) + } + + if r.WithProfileUID != nil { + params["with_profile_uid"] = strconv.FormatBool(*r.WithProfileUID) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.query_api_keys") + if reader := instrument.RecordRequestBody(ctx, "security.query_api_keys", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.query_api_keys") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityQueryAPIKeys) WithContext(v context.Context) func(*SecurityQueryAPIKeysRequest) { + return func(r *SecurityQueryAPIKeysRequest) { + r.ctx = v + } +} + +// WithBody - From, size, query, sort and search_after. +func (f SecurityQueryAPIKeys) WithBody(v io.Reader) func(*SecurityQueryAPIKeysRequest) { + return func(r *SecurityQueryAPIKeysRequest) { + r.Body = v + } +} + +// WithTypedKeys - flag to prefix aggregation names by their respective types in the response. +func (f SecurityQueryAPIKeys) WithTypedKeys(v bool) func(*SecurityQueryAPIKeysRequest) { + return func(r *SecurityQueryAPIKeysRequest) { + r.TypedKeys = &v + } +} + +// WithWithLimitedBy - flag to show the limited-by role descriptors of api keys. +func (f SecurityQueryAPIKeys) WithWithLimitedBy(v bool) func(*SecurityQueryAPIKeysRequest) { + return func(r *SecurityQueryAPIKeysRequest) { + r.WithLimitedBy = &v + } +} + +// WithWithProfileUID - flag to also retrieve the api key's owner profile uid, if it exists. +func (f SecurityQueryAPIKeys) WithWithProfileUID(v bool) func(*SecurityQueryAPIKeysRequest) { + return func(r *SecurityQueryAPIKeysRequest) { + r.WithProfileUID = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityQueryAPIKeys) WithPretty() func(*SecurityQueryAPIKeysRequest) { + return func(r *SecurityQueryAPIKeysRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityQueryAPIKeys) WithHuman() func(*SecurityQueryAPIKeysRequest) { + return func(r *SecurityQueryAPIKeysRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityQueryAPIKeys) WithErrorTrace() func(*SecurityQueryAPIKeysRequest) { + return func(r *SecurityQueryAPIKeysRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityQueryAPIKeys) WithFilterPath(v ...string) func(*SecurityQueryAPIKeysRequest) { + return func(r *SecurityQueryAPIKeysRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityQueryAPIKeys) WithHeader(h map[string]string) func(*SecurityQueryAPIKeysRequest) { + return func(r *SecurityQueryAPIKeysRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityQueryAPIKeys) WithOpaqueID(s string) func(*SecurityQueryAPIKeysRequest) { + return func(r *SecurityQueryAPIKeysRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.query_role.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.query_role.go new file mode 100644 index 000000000..965195084 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.query_role.go @@ -0,0 +1,232 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityQueryRoleFunc(t Transport) SecurityQueryRole { + return func(o ...func(*SecurityQueryRoleRequest)) (*Response, error) { + var r = SecurityQueryRoleRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityQueryRole - Retrieves information for Roles using a subset of query DSL +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-role.html. +type SecurityQueryRole func(o ...func(*SecurityQueryRoleRequest)) (*Response, error) + +// SecurityQueryRoleRequest configures the Security Query Role API request. +type SecurityQueryRoleRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityQueryRoleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.query_role") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/_query/role")) + path.WriteString("http://") + path.WriteString("/_security/_query/role") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.query_role") + if reader := instrument.RecordRequestBody(ctx, "security.query_role", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.query_role") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityQueryRole) WithContext(v context.Context) func(*SecurityQueryRoleRequest) { + return func(r *SecurityQueryRoleRequest) { + r.ctx = v + } +} + +// WithBody - From, size, query, sort and search_after. +func (f SecurityQueryRole) WithBody(v io.Reader) func(*SecurityQueryRoleRequest) { + return func(r *SecurityQueryRoleRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityQueryRole) WithPretty() func(*SecurityQueryRoleRequest) { + return func(r *SecurityQueryRoleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityQueryRole) WithHuman() func(*SecurityQueryRoleRequest) { + return func(r *SecurityQueryRoleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityQueryRole) WithErrorTrace() func(*SecurityQueryRoleRequest) { + return func(r *SecurityQueryRoleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityQueryRole) WithFilterPath(v ...string) func(*SecurityQueryRoleRequest) { + return func(r *SecurityQueryRoleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityQueryRole) WithHeader(h map[string]string) func(*SecurityQueryRoleRequest) { + return func(r *SecurityQueryRoleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityQueryRole) WithOpaqueID(s string) func(*SecurityQueryRoleRequest) { + return func(r *SecurityQueryRoleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.query_user.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.query_user.go new file mode 100644 index 000000000..8f1d60a9e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.query_user.go @@ -0,0 +1,246 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newSecurityQueryUserFunc(t Transport) SecurityQueryUser { + return func(o ...func(*SecurityQueryUserRequest)) (*Response, error) { + var r = SecurityQueryUserRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityQueryUser - Retrieves information for Users using a subset of query DSL +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-user.html. +type SecurityQueryUser func(o ...func(*SecurityQueryUserRequest)) (*Response, error) + +// SecurityQueryUserRequest configures the Security Query User API request. +type SecurityQueryUserRequest struct { + Body io.Reader + + WithProfileUID *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityQueryUserRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.query_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/_query/user")) + path.WriteString("http://") + path.WriteString("/_security/_query/user") + + params = make(map[string]string) + + if r.WithProfileUID != nil { + params["with_profile_uid"] = strconv.FormatBool(*r.WithProfileUID) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.query_user") + if reader := instrument.RecordRequestBody(ctx, "security.query_user", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.query_user") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityQueryUser) WithContext(v context.Context) func(*SecurityQueryUserRequest) { + return func(r *SecurityQueryUserRequest) { + r.ctx = v + } +} + +// WithBody - From, size, query, sort and search_after. +func (f SecurityQueryUser) WithBody(v io.Reader) func(*SecurityQueryUserRequest) { + return func(r *SecurityQueryUserRequest) { + r.Body = v + } +} + +// WithWithProfileUID - flag to retrieve profile uid (if exists) associated with the user. +func (f SecurityQueryUser) WithWithProfileUID(v bool) func(*SecurityQueryUserRequest) { + return func(r *SecurityQueryUserRequest) { + r.WithProfileUID = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityQueryUser) WithPretty() func(*SecurityQueryUserRequest) { + return func(r *SecurityQueryUserRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityQueryUser) WithHuman() func(*SecurityQueryUserRequest) { + return func(r *SecurityQueryUserRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityQueryUser) WithErrorTrace() func(*SecurityQueryUserRequest) { + return func(r *SecurityQueryUserRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityQueryUser) WithFilterPath(v ...string) func(*SecurityQueryUserRequest) { + return func(r *SecurityQueryUserRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityQueryUser) WithHeader(h map[string]string) func(*SecurityQueryUserRequest) { + return func(r *SecurityQueryUserRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityQueryUser) WithOpaqueID(s string) func(*SecurityQueryUserRequest) { + return func(r *SecurityQueryUserRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_authenticate.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_authenticate.go new file mode 100644 index 000000000..0effb869e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_authenticate.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecuritySamlAuthenticateFunc(t Transport) SecuritySamlAuthenticate { + return func(body io.Reader, o ...func(*SecuritySamlAuthenticateRequest)) (*Response, error) { + var r = SecuritySamlAuthenticateRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecuritySamlAuthenticate - Exchanges a SAML Response message for an Elasticsearch access token and refresh token pair +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-authenticate.html. +type SecuritySamlAuthenticate func(body io.Reader, o ...func(*SecuritySamlAuthenticateRequest)) (*Response, error) + +// SecuritySamlAuthenticateRequest configures the Security Saml Authenticate API request. +type SecuritySamlAuthenticateRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecuritySamlAuthenticateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.saml_authenticate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/saml/authenticate")) + path.WriteString("http://") + path.WriteString("/_security/saml/authenticate") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.saml_authenticate") + if reader := instrument.RecordRequestBody(ctx, "security.saml_authenticate", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.saml_authenticate") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecuritySamlAuthenticate) WithContext(v context.Context) func(*SecuritySamlAuthenticateRequest) { + return func(r *SecuritySamlAuthenticateRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecuritySamlAuthenticate) WithPretty() func(*SecuritySamlAuthenticateRequest) { + return func(r *SecuritySamlAuthenticateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecuritySamlAuthenticate) WithHuman() func(*SecuritySamlAuthenticateRequest) { + return func(r *SecuritySamlAuthenticateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecuritySamlAuthenticate) WithErrorTrace() func(*SecuritySamlAuthenticateRequest) { + return func(r *SecuritySamlAuthenticateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecuritySamlAuthenticate) WithFilterPath(v ...string) func(*SecuritySamlAuthenticateRequest) { + return func(r *SecuritySamlAuthenticateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecuritySamlAuthenticate) WithHeader(h map[string]string) func(*SecuritySamlAuthenticateRequest) { + return func(r *SecuritySamlAuthenticateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecuritySamlAuthenticate) WithOpaqueID(s string) func(*SecuritySamlAuthenticateRequest) { + return func(r *SecuritySamlAuthenticateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_complete_logout.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_complete_logout.go new file mode 100644 index 000000000..041b13f72 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_complete_logout.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecuritySamlCompleteLogoutFunc(t Transport) SecuritySamlCompleteLogout { + return func(body io.Reader, o ...func(*SecuritySamlCompleteLogoutRequest)) (*Response, error) { + var r = SecuritySamlCompleteLogoutRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecuritySamlCompleteLogout - Verifies the logout response sent from the SAML IdP +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-complete-logout.html. +type SecuritySamlCompleteLogout func(body io.Reader, o ...func(*SecuritySamlCompleteLogoutRequest)) (*Response, error) + +// SecuritySamlCompleteLogoutRequest configures the Security Saml Complete Logout API request. +type SecuritySamlCompleteLogoutRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecuritySamlCompleteLogoutRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.saml_complete_logout") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/saml/complete_logout")) + path.WriteString("http://") + path.WriteString("/_security/saml/complete_logout") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.saml_complete_logout") + if reader := instrument.RecordRequestBody(ctx, "security.saml_complete_logout", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.saml_complete_logout") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecuritySamlCompleteLogout) WithContext(v context.Context) func(*SecuritySamlCompleteLogoutRequest) { + return func(r *SecuritySamlCompleteLogoutRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecuritySamlCompleteLogout) WithPretty() func(*SecuritySamlCompleteLogoutRequest) { + return func(r *SecuritySamlCompleteLogoutRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecuritySamlCompleteLogout) WithHuman() func(*SecuritySamlCompleteLogoutRequest) { + return func(r *SecuritySamlCompleteLogoutRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecuritySamlCompleteLogout) WithErrorTrace() func(*SecuritySamlCompleteLogoutRequest) { + return func(r *SecuritySamlCompleteLogoutRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecuritySamlCompleteLogout) WithFilterPath(v ...string) func(*SecuritySamlCompleteLogoutRequest) { + return func(r *SecuritySamlCompleteLogoutRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecuritySamlCompleteLogout) WithHeader(h map[string]string) func(*SecuritySamlCompleteLogoutRequest) { + return func(r *SecuritySamlCompleteLogoutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecuritySamlCompleteLogout) WithOpaqueID(s string) func(*SecuritySamlCompleteLogoutRequest) { + return func(r *SecuritySamlCompleteLogoutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_invalidate.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_invalidate.go new file mode 100644 index 000000000..95db4c69f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_invalidate.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecuritySamlInvalidateFunc(t Transport) SecuritySamlInvalidate { + return func(body io.Reader, o ...func(*SecuritySamlInvalidateRequest)) (*Response, error) { + var r = SecuritySamlInvalidateRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecuritySamlInvalidate - Consumes a SAML LogoutRequest +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-invalidate.html. +type SecuritySamlInvalidate func(body io.Reader, o ...func(*SecuritySamlInvalidateRequest)) (*Response, error) + +// SecuritySamlInvalidateRequest configures the Security Saml Invalidate API request. +type SecuritySamlInvalidateRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecuritySamlInvalidateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.saml_invalidate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/saml/invalidate")) + path.WriteString("http://") + path.WriteString("/_security/saml/invalidate") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.saml_invalidate") + if reader := instrument.RecordRequestBody(ctx, "security.saml_invalidate", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.saml_invalidate") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecuritySamlInvalidate) WithContext(v context.Context) func(*SecuritySamlInvalidateRequest) { + return func(r *SecuritySamlInvalidateRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecuritySamlInvalidate) WithPretty() func(*SecuritySamlInvalidateRequest) { + return func(r *SecuritySamlInvalidateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecuritySamlInvalidate) WithHuman() func(*SecuritySamlInvalidateRequest) { + return func(r *SecuritySamlInvalidateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecuritySamlInvalidate) WithErrorTrace() func(*SecuritySamlInvalidateRequest) { + return func(r *SecuritySamlInvalidateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecuritySamlInvalidate) WithFilterPath(v ...string) func(*SecuritySamlInvalidateRequest) { + return func(r *SecuritySamlInvalidateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecuritySamlInvalidate) WithHeader(h map[string]string) func(*SecuritySamlInvalidateRequest) { + return func(r *SecuritySamlInvalidateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecuritySamlInvalidate) WithOpaqueID(s string) func(*SecuritySamlInvalidateRequest) { + return func(r *SecuritySamlInvalidateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_logout.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_logout.go new file mode 100644 index 000000000..1b3f53b4b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_logout.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecuritySamlLogoutFunc(t Transport) SecuritySamlLogout { + return func(body io.Reader, o ...func(*SecuritySamlLogoutRequest)) (*Response, error) { + var r = SecuritySamlLogoutRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecuritySamlLogout - Invalidates an access token and a refresh token that were generated via the SAML Authenticate API +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-logout.html. +type SecuritySamlLogout func(body io.Reader, o ...func(*SecuritySamlLogoutRequest)) (*Response, error) + +// SecuritySamlLogoutRequest configures the Security Saml Logout API request. +type SecuritySamlLogoutRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecuritySamlLogoutRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.saml_logout") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/saml/logout")) + path.WriteString("http://") + path.WriteString("/_security/saml/logout") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.saml_logout") + if reader := instrument.RecordRequestBody(ctx, "security.saml_logout", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.saml_logout") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecuritySamlLogout) WithContext(v context.Context) func(*SecuritySamlLogoutRequest) { + return func(r *SecuritySamlLogoutRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecuritySamlLogout) WithPretty() func(*SecuritySamlLogoutRequest) { + return func(r *SecuritySamlLogoutRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecuritySamlLogout) WithHuman() func(*SecuritySamlLogoutRequest) { + return func(r *SecuritySamlLogoutRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecuritySamlLogout) WithErrorTrace() func(*SecuritySamlLogoutRequest) { + return func(r *SecuritySamlLogoutRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecuritySamlLogout) WithFilterPath(v ...string) func(*SecuritySamlLogoutRequest) { + return func(r *SecuritySamlLogoutRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecuritySamlLogout) WithHeader(h map[string]string) func(*SecuritySamlLogoutRequest) { + return func(r *SecuritySamlLogoutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecuritySamlLogout) WithOpaqueID(s string) func(*SecuritySamlLogoutRequest) { + return func(r *SecuritySamlLogoutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_prepare_authentication.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_prepare_authentication.go new file mode 100644 index 000000000..8e2692108 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_prepare_authentication.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecuritySamlPrepareAuthenticationFunc(t Transport) SecuritySamlPrepareAuthentication { + return func(body io.Reader, o ...func(*SecuritySamlPrepareAuthenticationRequest)) (*Response, error) { + var r = SecuritySamlPrepareAuthenticationRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecuritySamlPrepareAuthentication - Creates a SAML authentication request +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-prepare-authentication.html. +type SecuritySamlPrepareAuthentication func(body io.Reader, o ...func(*SecuritySamlPrepareAuthenticationRequest)) (*Response, error) + +// SecuritySamlPrepareAuthenticationRequest configures the Security Saml Prepare Authentication API request. +type SecuritySamlPrepareAuthenticationRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecuritySamlPrepareAuthenticationRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.saml_prepare_authentication") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/saml/prepare")) + path.WriteString("http://") + path.WriteString("/_security/saml/prepare") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.saml_prepare_authentication") + if reader := instrument.RecordRequestBody(ctx, "security.saml_prepare_authentication", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.saml_prepare_authentication") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecuritySamlPrepareAuthentication) WithContext(v context.Context) func(*SecuritySamlPrepareAuthenticationRequest) { + return func(r *SecuritySamlPrepareAuthenticationRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecuritySamlPrepareAuthentication) WithPretty() func(*SecuritySamlPrepareAuthenticationRequest) { + return func(r *SecuritySamlPrepareAuthenticationRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecuritySamlPrepareAuthentication) WithHuman() func(*SecuritySamlPrepareAuthenticationRequest) { + return func(r *SecuritySamlPrepareAuthenticationRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecuritySamlPrepareAuthentication) WithErrorTrace() func(*SecuritySamlPrepareAuthenticationRequest) { + return func(r *SecuritySamlPrepareAuthenticationRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecuritySamlPrepareAuthentication) WithFilterPath(v ...string) func(*SecuritySamlPrepareAuthenticationRequest) { + return func(r *SecuritySamlPrepareAuthenticationRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecuritySamlPrepareAuthentication) WithHeader(h map[string]string) func(*SecuritySamlPrepareAuthenticationRequest) { + return func(r *SecuritySamlPrepareAuthenticationRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecuritySamlPrepareAuthentication) WithOpaqueID(s string) func(*SecuritySamlPrepareAuthenticationRequest) { + return func(r *SecuritySamlPrepareAuthenticationRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_service_provider_metadata.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_service_provider_metadata.go new file mode 100644 index 000000000..02651c2f0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.saml_service_provider_metadata.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecuritySamlServiceProviderMetadataFunc(t Transport) SecuritySamlServiceProviderMetadata { + return func(realm_name string, o ...func(*SecuritySamlServiceProviderMetadataRequest)) (*Response, error) { + var r = SecuritySamlServiceProviderMetadataRequest{RealmName: realm_name} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecuritySamlServiceProviderMetadata - Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-sp-metadata.html. +type SecuritySamlServiceProviderMetadata func(realm_name string, o ...func(*SecuritySamlServiceProviderMetadataRequest)) (*Response, error) + +// SecuritySamlServiceProviderMetadataRequest configures the Security Saml Service Provider Metadata API request. +type SecuritySamlServiceProviderMetadataRequest struct { + RealmName string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecuritySamlServiceProviderMetadataRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.saml_service_provider_metadata") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_security") + 1 + len("saml") + 1 + len("metadata") + 1 + len(r.RealmName)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("saml") + path.WriteString("/") + path.WriteString("metadata") + path.WriteString("/") + path.WriteString(r.RealmName) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "realm_name", r.RealmName) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.saml_service_provider_metadata") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.saml_service_provider_metadata") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecuritySamlServiceProviderMetadata) WithContext(v context.Context) func(*SecuritySamlServiceProviderMetadataRequest) { + return func(r *SecuritySamlServiceProviderMetadataRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecuritySamlServiceProviderMetadata) WithPretty() func(*SecuritySamlServiceProviderMetadataRequest) { + return func(r *SecuritySamlServiceProviderMetadataRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecuritySamlServiceProviderMetadata) WithHuman() func(*SecuritySamlServiceProviderMetadataRequest) { + return func(r *SecuritySamlServiceProviderMetadataRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecuritySamlServiceProviderMetadata) WithErrorTrace() func(*SecuritySamlServiceProviderMetadataRequest) { + return func(r *SecuritySamlServiceProviderMetadataRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecuritySamlServiceProviderMetadata) WithFilterPath(v ...string) func(*SecuritySamlServiceProviderMetadataRequest) { + return func(r *SecuritySamlServiceProviderMetadataRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecuritySamlServiceProviderMetadata) WithHeader(h map[string]string) func(*SecuritySamlServiceProviderMetadataRequest) { + return func(r *SecuritySamlServiceProviderMetadataRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecuritySamlServiceProviderMetadata) WithOpaqueID(s string) func(*SecuritySamlServiceProviderMetadataRequest) { + return func(r *SecuritySamlServiceProviderMetadataRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.suggest_user_profiles.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.suggest_user_profiles.go new file mode 100644 index 000000000..b218e109a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.suggest_user_profiles.go @@ -0,0 +1,245 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecuritySuggestUserProfilesFunc(t Transport) SecuritySuggestUserProfiles { + return func(o ...func(*SecuritySuggestUserProfilesRequest)) (*Response, error) { + var r = SecuritySuggestUserProfilesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecuritySuggestUserProfiles - Get suggestions for user profiles that match specified search criteria. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-suggest-user-profile.html. +type SecuritySuggestUserProfiles func(o ...func(*SecuritySuggestUserProfilesRequest)) (*Response, error) + +// SecuritySuggestUserProfilesRequest configures the Security Suggest User Profiles API request. +type SecuritySuggestUserProfilesRequest struct { + Body io.Reader + + Data []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecuritySuggestUserProfilesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.suggest_user_profiles") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_security/profile/_suggest")) + path.WriteString("http://") + path.WriteString("/_security/profile/_suggest") + + params = make(map[string]string) + + if len(r.Data) > 0 { + params["data"] = strings.Join(r.Data, ",") + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.suggest_user_profiles") + if reader := instrument.RecordRequestBody(ctx, "security.suggest_user_profiles", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.suggest_user_profiles") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecuritySuggestUserProfiles) WithContext(v context.Context) func(*SecuritySuggestUserProfilesRequest) { + return func(r *SecuritySuggestUserProfilesRequest) { + r.ctx = v + } +} + +// WithBody - The suggestion definition for user profiles. +func (f SecuritySuggestUserProfiles) WithBody(v io.Reader) func(*SecuritySuggestUserProfilesRequest) { + return func(r *SecuritySuggestUserProfilesRequest) { + r.Body = v + } +} + +// WithData - a list of keys for which the corresponding application data are retrieved.. +func (f SecuritySuggestUserProfiles) WithData(v ...string) func(*SecuritySuggestUserProfilesRequest) { + return func(r *SecuritySuggestUserProfilesRequest) { + r.Data = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecuritySuggestUserProfiles) WithPretty() func(*SecuritySuggestUserProfilesRequest) { + return func(r *SecuritySuggestUserProfilesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecuritySuggestUserProfiles) WithHuman() func(*SecuritySuggestUserProfilesRequest) { + return func(r *SecuritySuggestUserProfilesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecuritySuggestUserProfiles) WithErrorTrace() func(*SecuritySuggestUserProfilesRequest) { + return func(r *SecuritySuggestUserProfilesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecuritySuggestUserProfiles) WithFilterPath(v ...string) func(*SecuritySuggestUserProfilesRequest) { + return func(r *SecuritySuggestUserProfilesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecuritySuggestUserProfiles) WithHeader(h map[string]string) func(*SecuritySuggestUserProfilesRequest) { + return func(r *SecuritySuggestUserProfilesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecuritySuggestUserProfiles) WithOpaqueID(s string) func(*SecuritySuggestUserProfilesRequest) { + return func(r *SecuritySuggestUserProfilesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.update_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.update_api_key.go new file mode 100644 index 000000000..7cd57113b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.update_api_key.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityUpdateAPIKeyFunc(t Transport) SecurityUpdateAPIKey { + return func(id string, o ...func(*SecurityUpdateAPIKeyRequest)) (*Response, error) { + var r = SecurityUpdateAPIKeyRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityUpdateAPIKey - Updates attributes of an existing API key. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-api-key.html. +type SecurityUpdateAPIKey func(id string, o ...func(*SecurityUpdateAPIKeyRequest)) (*Response, error) + +// SecurityUpdateAPIKeyRequest configures the Security UpdateAPI Key API request. +type SecurityUpdateAPIKeyRequest struct { + DocumentID string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityUpdateAPIKeyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.update_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_security") + 1 + len("api_key") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("api_key") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.update_api_key") + if reader := instrument.RecordRequestBody(ctx, "security.update_api_key", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.update_api_key") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityUpdateAPIKey) WithContext(v context.Context) func(*SecurityUpdateAPIKeyRequest) { + return func(r *SecurityUpdateAPIKeyRequest) { + r.ctx = v + } +} + +// WithBody - The API key request to update attributes of an API key.. +func (f SecurityUpdateAPIKey) WithBody(v io.Reader) func(*SecurityUpdateAPIKeyRequest) { + return func(r *SecurityUpdateAPIKeyRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityUpdateAPIKey) WithPretty() func(*SecurityUpdateAPIKeyRequest) { + return func(r *SecurityUpdateAPIKeyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityUpdateAPIKey) WithHuman() func(*SecurityUpdateAPIKeyRequest) { + return func(r *SecurityUpdateAPIKeyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityUpdateAPIKey) WithErrorTrace() func(*SecurityUpdateAPIKeyRequest) { + return func(r *SecurityUpdateAPIKeyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityUpdateAPIKey) WithFilterPath(v ...string) func(*SecurityUpdateAPIKeyRequest) { + return func(r *SecurityUpdateAPIKeyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityUpdateAPIKey) WithHeader(h map[string]string) func(*SecurityUpdateAPIKeyRequest) { + return func(r *SecurityUpdateAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityUpdateAPIKey) WithOpaqueID(s string) func(*SecurityUpdateAPIKeyRequest) { + return func(r *SecurityUpdateAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.update_cross_cluster_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.update_cross_cluster_api_key.go new file mode 100644 index 000000000..7caeb0e42 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.update_cross_cluster_api_key.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityUpdateCrossClusterAPIKeyFunc(t Transport) SecurityUpdateCrossClusterAPIKey { + return func(id string, body io.Reader, o ...func(*SecurityUpdateCrossClusterAPIKeyRequest)) (*Response, error) { + var r = SecurityUpdateCrossClusterAPIKeyRequest{DocumentID: id, Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityUpdateCrossClusterAPIKey - Updates attributes of an existing cross-cluster API key. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-cross-cluster-api-key.html. +type SecurityUpdateCrossClusterAPIKey func(id string, body io.Reader, o ...func(*SecurityUpdateCrossClusterAPIKeyRequest)) (*Response, error) + +// SecurityUpdateCrossClusterAPIKeyRequest configures the Security Update Cross ClusterAPI Key API request. +type SecurityUpdateCrossClusterAPIKeyRequest struct { + DocumentID string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityUpdateCrossClusterAPIKeyRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.update_cross_cluster_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_security") + 1 + len("cross_cluster") + 1 + len("api_key") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("cross_cluster") + path.WriteString("/") + path.WriteString("api_key") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.update_cross_cluster_api_key") + if reader := instrument.RecordRequestBody(ctx, "security.update_cross_cluster_api_key", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.update_cross_cluster_api_key") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityUpdateCrossClusterAPIKey) WithContext(v context.Context) func(*SecurityUpdateCrossClusterAPIKeyRequest) { + return func(r *SecurityUpdateCrossClusterAPIKeyRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityUpdateCrossClusterAPIKey) WithPretty() func(*SecurityUpdateCrossClusterAPIKeyRequest) { + return func(r *SecurityUpdateCrossClusterAPIKeyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityUpdateCrossClusterAPIKey) WithHuman() func(*SecurityUpdateCrossClusterAPIKeyRequest) { + return func(r *SecurityUpdateCrossClusterAPIKeyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityUpdateCrossClusterAPIKey) WithErrorTrace() func(*SecurityUpdateCrossClusterAPIKeyRequest) { + return func(r *SecurityUpdateCrossClusterAPIKeyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityUpdateCrossClusterAPIKey) WithFilterPath(v ...string) func(*SecurityUpdateCrossClusterAPIKeyRequest) { + return func(r *SecurityUpdateCrossClusterAPIKeyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityUpdateCrossClusterAPIKey) WithHeader(h map[string]string) func(*SecurityUpdateCrossClusterAPIKeyRequest) { + return func(r *SecurityUpdateCrossClusterAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityUpdateCrossClusterAPIKey) WithOpaqueID(s string) func(*SecurityUpdateCrossClusterAPIKeyRequest) { + return func(r *SecurityUpdateCrossClusterAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.update_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.update_settings.go new file mode 100644 index 000000000..9dbc2fc26 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.update_settings.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newSecurityUpdateSettingsFunc(t Transport) SecurityUpdateSettings { + return func(body io.Reader, o ...func(*SecurityUpdateSettingsRequest)) (*Response, error) { + var r = SecurityUpdateSettingsRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityUpdateSettings - Update settings for the security system index +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-settings.html. +type SecurityUpdateSettings func(body io.Reader, o ...func(*SecurityUpdateSettingsRequest)) (*Response, error) + +// SecurityUpdateSettingsRequest configures the Security Update Settings API request. +type SecurityUpdateSettingsRequest struct { + Body io.Reader + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityUpdateSettingsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.update_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + len("/_security/settings")) + path.WriteString("http://") + path.WriteString("/_security/settings") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.update_settings") + if reader := instrument.RecordRequestBody(ctx, "security.update_settings", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.update_settings") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityUpdateSettings) WithContext(v context.Context) func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for connection to master. +func (f SecurityUpdateSettings) WithMasterTimeout(v time.Duration) func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - timeout for acknowledgements from all nodes. +func (f SecurityUpdateSettings) WithTimeout(v time.Duration) func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityUpdateSettings) WithPretty() func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityUpdateSettings) WithHuman() func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityUpdateSettings) WithErrorTrace() func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityUpdateSettings) WithFilterPath(v ...string) func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityUpdateSettings) WithHeader(h map[string]string) func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityUpdateSettings) WithOpaqueID(s string) func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.update_user_profile_data.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.update_user_profile_data.go new file mode 100644 index 000000000..9fa90c210 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.security.update_user_profile_data.go @@ -0,0 +1,275 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newSecurityUpdateUserProfileDataFunc(t Transport) SecurityUpdateUserProfileData { + return func(body io.Reader, uid string, o ...func(*SecurityUpdateUserProfileDataRequest)) (*Response, error) { + var r = SecurityUpdateUserProfileDataRequest{Body: body, UID: uid} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityUpdateUserProfileData - Update application specific data for the user profile of the given unique ID. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-user-profile-data.html. +type SecurityUpdateUserProfileData func(body io.Reader, uid string, o ...func(*SecurityUpdateUserProfileDataRequest)) (*Response, error) + +// SecurityUpdateUserProfileDataRequest configures the Security Update User Profile Data API request. +type SecurityUpdateUserProfileDataRequest struct { + Body io.Reader + + UID string + + IfPrimaryTerm *int + IfSeqNo *int + Refresh string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SecurityUpdateUserProfileDataRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.update_user_profile_data") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_security") + 1 + len("profile") + 1 + len(r.UID) + 1 + len("_data")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("profile") + path.WriteString("/") + path.WriteString(r.UID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "uid", r.UID) + } + path.WriteString("/") + path.WriteString("_data") + + params = make(map[string]string) + + if r.IfPrimaryTerm != nil { + params["if_primary_term"] = strconv.FormatInt(int64(*r.IfPrimaryTerm), 10) + } + + if r.IfSeqNo != nil { + params["if_seq_no"] = strconv.FormatInt(int64(*r.IfSeqNo), 10) + } + + if r.Refresh != "" { + params["refresh"] = r.Refresh + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "security.update_user_profile_data") + if reader := instrument.RecordRequestBody(ctx, "security.update_user_profile_data", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.update_user_profile_data") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityUpdateUserProfileData) WithContext(v context.Context) func(*SecurityUpdateUserProfileDataRequest) { + return func(r *SecurityUpdateUserProfileDataRequest) { + r.ctx = v + } +} + +// WithIfPrimaryTerm - only perform the update operation if the last operation that has changed the document has the specified primary term. +func (f SecurityUpdateUserProfileData) WithIfPrimaryTerm(v int) func(*SecurityUpdateUserProfileDataRequest) { + return func(r *SecurityUpdateUserProfileDataRequest) { + r.IfPrimaryTerm = &v + } +} + +// WithIfSeqNo - only perform the update operation if the last operation that has changed the document has the specified sequence number. +func (f SecurityUpdateUserProfileData) WithIfSeqNo(v int) func(*SecurityUpdateUserProfileDataRequest) { + return func(r *SecurityUpdateUserProfileDataRequest) { + r.IfSeqNo = &v + } +} + +// WithRefresh - if `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes.. +func (f SecurityUpdateUserProfileData) WithRefresh(v string) func(*SecurityUpdateUserProfileDataRequest) { + return func(r *SecurityUpdateUserProfileDataRequest) { + r.Refresh = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityUpdateUserProfileData) WithPretty() func(*SecurityUpdateUserProfileDataRequest) { + return func(r *SecurityUpdateUserProfileDataRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityUpdateUserProfileData) WithHuman() func(*SecurityUpdateUserProfileDataRequest) { + return func(r *SecurityUpdateUserProfileDataRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityUpdateUserProfileData) WithErrorTrace() func(*SecurityUpdateUserProfileDataRequest) { + return func(r *SecurityUpdateUserProfileDataRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityUpdateUserProfileData) WithFilterPath(v ...string) func(*SecurityUpdateUserProfileDataRequest) { + return func(r *SecurityUpdateUserProfileDataRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityUpdateUserProfileData) WithHeader(h map[string]string) func(*SecurityUpdateUserProfileDataRequest) { + return func(r *SecurityUpdateUserProfileDataRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityUpdateUserProfileData) WithOpaqueID(s string) func(*SecurityUpdateUserProfileDataRequest) { + return func(r *SecurityUpdateUserProfileDataRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.delete_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.delete_lifecycle.go new file mode 100644 index 000000000..d53efec9b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.delete_lifecycle.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newSlmDeleteLifecycleFunc(t Transport) SlmDeleteLifecycle { + return func(policy_id string, o ...func(*SlmDeleteLifecycleRequest)) (*Response, error) { + var r = SlmDeleteLifecycleRequest{PolicyID: policy_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SlmDeleteLifecycle - Deletes an existing snapshot lifecycle policy. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-delete-policy.html. +type SlmDeleteLifecycle func(policy_id string, o ...func(*SlmDeleteLifecycleRequest)) (*Response, error) + +// SlmDeleteLifecycleRequest configures the Slm Delete Lifecycle API request. +type SlmDeleteLifecycleRequest struct { + PolicyID string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SlmDeleteLifecycleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.delete_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_slm") + 1 + len("policy") + 1 + len(r.PolicyID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_slm") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + path.WriteString(r.PolicyID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "policy_id", r.PolicyID) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "slm.delete_lifecycle") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.delete_lifecycle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SlmDeleteLifecycle) WithContext(v context.Context) func(*SlmDeleteLifecycleRequest) { + return func(r *SlmDeleteLifecycleRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmDeleteLifecycle) WithMasterTimeout(v time.Duration) func(*SlmDeleteLifecycleRequest) { + return func(r *SlmDeleteLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmDeleteLifecycle) WithTimeout(v time.Duration) func(*SlmDeleteLifecycleRequest) { + return func(r *SlmDeleteLifecycleRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SlmDeleteLifecycle) WithPretty() func(*SlmDeleteLifecycleRequest) { + return func(r *SlmDeleteLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SlmDeleteLifecycle) WithHuman() func(*SlmDeleteLifecycleRequest) { + return func(r *SlmDeleteLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SlmDeleteLifecycle) WithErrorTrace() func(*SlmDeleteLifecycleRequest) { + return func(r *SlmDeleteLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SlmDeleteLifecycle) WithFilterPath(v ...string) func(*SlmDeleteLifecycleRequest) { + return func(r *SlmDeleteLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SlmDeleteLifecycle) WithHeader(h map[string]string) func(*SlmDeleteLifecycleRequest) { + return func(r *SlmDeleteLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SlmDeleteLifecycle) WithOpaqueID(s string) func(*SlmDeleteLifecycleRequest) { + return func(r *SlmDeleteLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.execute_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.execute_lifecycle.go new file mode 100644 index 000000000..ce9968450 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.execute_lifecycle.go @@ -0,0 +1,253 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newSlmExecuteLifecycleFunc(t Transport) SlmExecuteLifecycle { + return func(policy_id string, o ...func(*SlmExecuteLifecycleRequest)) (*Response, error) { + var r = SlmExecuteLifecycleRequest{PolicyID: policy_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SlmExecuteLifecycle - Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-lifecycle.html. +type SlmExecuteLifecycle func(policy_id string, o ...func(*SlmExecuteLifecycleRequest)) (*Response, error) + +// SlmExecuteLifecycleRequest configures the Slm Execute Lifecycle API request. +type SlmExecuteLifecycleRequest struct { + PolicyID string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SlmExecuteLifecycleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.execute_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_slm") + 1 + len("policy") + 1 + len(r.PolicyID) + 1 + len("_execute")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_slm") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + path.WriteString(r.PolicyID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "policy_id", r.PolicyID) + } + path.WriteString("/") + path.WriteString("_execute") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "slm.execute_lifecycle") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.execute_lifecycle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SlmExecuteLifecycle) WithContext(v context.Context) func(*SlmExecuteLifecycleRequest) { + return func(r *SlmExecuteLifecycleRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmExecuteLifecycle) WithMasterTimeout(v time.Duration) func(*SlmExecuteLifecycleRequest) { + return func(r *SlmExecuteLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmExecuteLifecycle) WithTimeout(v time.Duration) func(*SlmExecuteLifecycleRequest) { + return func(r *SlmExecuteLifecycleRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SlmExecuteLifecycle) WithPretty() func(*SlmExecuteLifecycleRequest) { + return func(r *SlmExecuteLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SlmExecuteLifecycle) WithHuman() func(*SlmExecuteLifecycleRequest) { + return func(r *SlmExecuteLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SlmExecuteLifecycle) WithErrorTrace() func(*SlmExecuteLifecycleRequest) { + return func(r *SlmExecuteLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SlmExecuteLifecycle) WithFilterPath(v ...string) func(*SlmExecuteLifecycleRequest) { + return func(r *SlmExecuteLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SlmExecuteLifecycle) WithHeader(h map[string]string) func(*SlmExecuteLifecycleRequest) { + return func(r *SlmExecuteLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SlmExecuteLifecycle) WithOpaqueID(s string) func(*SlmExecuteLifecycleRequest) { + return func(r *SlmExecuteLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.execute_retention.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.execute_retention.go new file mode 100644 index 000000000..753d04bbe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.execute_retention.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newSlmExecuteRetentionFunc(t Transport) SlmExecuteRetention { + return func(o ...func(*SlmExecuteRetentionRequest)) (*Response, error) { + var r = SlmExecuteRetentionRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SlmExecuteRetention - Deletes any snapshots that are expired according to the policy's retention rules. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-retention.html. +type SlmExecuteRetention func(o ...func(*SlmExecuteRetentionRequest)) (*Response, error) + +// SlmExecuteRetentionRequest configures the Slm Execute Retention API request. +type SlmExecuteRetentionRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SlmExecuteRetentionRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.execute_retention") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_slm/_execute_retention")) + path.WriteString("http://") + path.WriteString("/_slm/_execute_retention") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "slm.execute_retention") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.execute_retention") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SlmExecuteRetention) WithContext(v context.Context) func(*SlmExecuteRetentionRequest) { + return func(r *SlmExecuteRetentionRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmExecuteRetention) WithMasterTimeout(v time.Duration) func(*SlmExecuteRetentionRequest) { + return func(r *SlmExecuteRetentionRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmExecuteRetention) WithTimeout(v time.Duration) func(*SlmExecuteRetentionRequest) { + return func(r *SlmExecuteRetentionRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SlmExecuteRetention) WithPretty() func(*SlmExecuteRetentionRequest) { + return func(r *SlmExecuteRetentionRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SlmExecuteRetention) WithHuman() func(*SlmExecuteRetentionRequest) { + return func(r *SlmExecuteRetentionRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SlmExecuteRetention) WithErrorTrace() func(*SlmExecuteRetentionRequest) { + return func(r *SlmExecuteRetentionRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SlmExecuteRetention) WithFilterPath(v ...string) func(*SlmExecuteRetentionRequest) { + return func(r *SlmExecuteRetentionRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SlmExecuteRetention) WithHeader(h map[string]string) func(*SlmExecuteRetentionRequest) { + return func(r *SlmExecuteRetentionRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SlmExecuteRetention) WithOpaqueID(s string) func(*SlmExecuteRetentionRequest) { + return func(r *SlmExecuteRetentionRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.get_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.get_lifecycle.go new file mode 100644 index 000000000..1b814af6b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.get_lifecycle.go @@ -0,0 +1,260 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newSlmGetLifecycleFunc(t Transport) SlmGetLifecycle { + return func(o ...func(*SlmGetLifecycleRequest)) (*Response, error) { + var r = SlmGetLifecycleRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SlmGetLifecycle - Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-policy.html. +type SlmGetLifecycle func(o ...func(*SlmGetLifecycleRequest)) (*Response, error) + +// SlmGetLifecycleRequest configures the Slm Get Lifecycle API request. +type SlmGetLifecycleRequest struct { + PolicyID []string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SlmGetLifecycleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.get_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_slm") + 1 + len("policy") + 1 + len(strings.Join(r.PolicyID, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_slm") + path.WriteString("/") + path.WriteString("policy") + if len(r.PolicyID) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.PolicyID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "policy_id", strings.Join(r.PolicyID, ",")) + } + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "slm.get_lifecycle") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.get_lifecycle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SlmGetLifecycle) WithContext(v context.Context) func(*SlmGetLifecycleRequest) { + return func(r *SlmGetLifecycleRequest) { + r.ctx = v + } +} + +// WithPolicyID - comma-separated list of snapshot lifecycle policies to retrieve. +func (f SlmGetLifecycle) WithPolicyID(v ...string) func(*SlmGetLifecycleRequest) { + return func(r *SlmGetLifecycleRequest) { + r.PolicyID = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmGetLifecycle) WithMasterTimeout(v time.Duration) func(*SlmGetLifecycleRequest) { + return func(r *SlmGetLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmGetLifecycle) WithTimeout(v time.Duration) func(*SlmGetLifecycleRequest) { + return func(r *SlmGetLifecycleRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SlmGetLifecycle) WithPretty() func(*SlmGetLifecycleRequest) { + return func(r *SlmGetLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SlmGetLifecycle) WithHuman() func(*SlmGetLifecycleRequest) { + return func(r *SlmGetLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SlmGetLifecycle) WithErrorTrace() func(*SlmGetLifecycleRequest) { + return func(r *SlmGetLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SlmGetLifecycle) WithFilterPath(v ...string) func(*SlmGetLifecycleRequest) { + return func(r *SlmGetLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SlmGetLifecycle) WithHeader(h map[string]string) func(*SlmGetLifecycleRequest) { + return func(r *SlmGetLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SlmGetLifecycle) WithOpaqueID(s string) func(*SlmGetLifecycleRequest) { + return func(r *SlmGetLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.get_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.get_stats.go new file mode 100644 index 000000000..1dcd07fc3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.get_stats.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newSlmGetStatsFunc(t Transport) SlmGetStats { + return func(o ...func(*SlmGetStatsRequest)) (*Response, error) { + var r = SlmGetStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SlmGetStats - Returns global and policy-level statistics about actions taken by snapshot lifecycle management. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-stats.html. +type SlmGetStats func(o ...func(*SlmGetStatsRequest)) (*Response, error) + +// SlmGetStatsRequest configures the Slm Get Stats API request. +type SlmGetStatsRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SlmGetStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.get_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_slm/stats")) + path.WriteString("http://") + path.WriteString("/_slm/stats") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "slm.get_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.get_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SlmGetStats) WithContext(v context.Context) func(*SlmGetStatsRequest) { + return func(r *SlmGetStatsRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmGetStats) WithMasterTimeout(v time.Duration) func(*SlmGetStatsRequest) { + return func(r *SlmGetStatsRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmGetStats) WithTimeout(v time.Duration) func(*SlmGetStatsRequest) { + return func(r *SlmGetStatsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SlmGetStats) WithPretty() func(*SlmGetStatsRequest) { + return func(r *SlmGetStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SlmGetStats) WithHuman() func(*SlmGetStatsRequest) { + return func(r *SlmGetStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SlmGetStats) WithErrorTrace() func(*SlmGetStatsRequest) { + return func(r *SlmGetStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SlmGetStats) WithFilterPath(v ...string) func(*SlmGetStatsRequest) { + return func(r *SlmGetStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SlmGetStats) WithHeader(h map[string]string) func(*SlmGetStatsRequest) { + return func(r *SlmGetStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SlmGetStats) WithOpaqueID(s string) func(*SlmGetStatsRequest) { + return func(r *SlmGetStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.get_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.get_status.go new file mode 100644 index 000000000..6a025ab67 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.get_status.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newSlmGetStatusFunc(t Transport) SlmGetStatus { + return func(o ...func(*SlmGetStatusRequest)) (*Response, error) { + var r = SlmGetStatusRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SlmGetStatus - Retrieves the status of snapshot lifecycle management (SLM). +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-status.html. +type SlmGetStatus func(o ...func(*SlmGetStatusRequest)) (*Response, error) + +// SlmGetStatusRequest configures the Slm Get Status API request. +type SlmGetStatusRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SlmGetStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.get_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_slm/status")) + path.WriteString("http://") + path.WriteString("/_slm/status") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "slm.get_status") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.get_status") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SlmGetStatus) WithContext(v context.Context) func(*SlmGetStatusRequest) { + return func(r *SlmGetStatusRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmGetStatus) WithMasterTimeout(v time.Duration) func(*SlmGetStatusRequest) { + return func(r *SlmGetStatusRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmGetStatus) WithTimeout(v time.Duration) func(*SlmGetStatusRequest) { + return func(r *SlmGetStatusRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SlmGetStatus) WithPretty() func(*SlmGetStatusRequest) { + return func(r *SlmGetStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SlmGetStatus) WithHuman() func(*SlmGetStatusRequest) { + return func(r *SlmGetStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SlmGetStatus) WithErrorTrace() func(*SlmGetStatusRequest) { + return func(r *SlmGetStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SlmGetStatus) WithFilterPath(v ...string) func(*SlmGetStatusRequest) { + return func(r *SlmGetStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SlmGetStatus) WithHeader(h map[string]string) func(*SlmGetStatusRequest) { + return func(r *SlmGetStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SlmGetStatus) WithOpaqueID(s string) func(*SlmGetStatusRequest) { + return func(r *SlmGetStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.put_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.put_lifecycle.go new file mode 100644 index 000000000..afcb90fa3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.put_lifecycle.go @@ -0,0 +1,268 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newSlmPutLifecycleFunc(t Transport) SlmPutLifecycle { + return func(policy_id string, o ...func(*SlmPutLifecycleRequest)) (*Response, error) { + var r = SlmPutLifecycleRequest{PolicyID: policy_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SlmPutLifecycle - Creates or updates a snapshot lifecycle policy. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-put-policy.html. +type SlmPutLifecycle func(policy_id string, o ...func(*SlmPutLifecycleRequest)) (*Response, error) + +// SlmPutLifecycleRequest configures the Slm Put Lifecycle API request. +type SlmPutLifecycleRequest struct { + Body io.Reader + + PolicyID string + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SlmPutLifecycleRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.put_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_slm") + 1 + len("policy") + 1 + len(r.PolicyID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_slm") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + path.WriteString(r.PolicyID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "policy_id", r.PolicyID) + } + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "slm.put_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "slm.put_lifecycle", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.put_lifecycle") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SlmPutLifecycle) WithContext(v context.Context) func(*SlmPutLifecycleRequest) { + return func(r *SlmPutLifecycleRequest) { + r.ctx = v + } +} + +// WithBody - The snapshot lifecycle policy definition to register. +func (f SlmPutLifecycle) WithBody(v io.Reader) func(*SlmPutLifecycleRequest) { + return func(r *SlmPutLifecycleRequest) { + r.Body = v + } +} + +// WithMasterTimeout - explicit operation timeout for connection to master node. +func (f SlmPutLifecycle) WithMasterTimeout(v time.Duration) func(*SlmPutLifecycleRequest) { + return func(r *SlmPutLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit operation timeout. +func (f SlmPutLifecycle) WithTimeout(v time.Duration) func(*SlmPutLifecycleRequest) { + return func(r *SlmPutLifecycleRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SlmPutLifecycle) WithPretty() func(*SlmPutLifecycleRequest) { + return func(r *SlmPutLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SlmPutLifecycle) WithHuman() func(*SlmPutLifecycleRequest) { + return func(r *SlmPutLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SlmPutLifecycle) WithErrorTrace() func(*SlmPutLifecycleRequest) { + return func(r *SlmPutLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SlmPutLifecycle) WithFilterPath(v ...string) func(*SlmPutLifecycleRequest) { + return func(r *SlmPutLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SlmPutLifecycle) WithHeader(h map[string]string) func(*SlmPutLifecycleRequest) { + return func(r *SlmPutLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SlmPutLifecycle) WithOpaqueID(s string) func(*SlmPutLifecycleRequest) { + return func(r *SlmPutLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.start.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.start.go new file mode 100644 index 000000000..faf89c2ad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.start.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newSlmStartFunc(t Transport) SlmStart { + return func(o ...func(*SlmStartRequest)) (*Response, error) { + var r = SlmStartRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SlmStart - Turns on snapshot lifecycle management (SLM). +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-start.html. +type SlmStart func(o ...func(*SlmStartRequest)) (*Response, error) + +// SlmStartRequest configures the Slm Start API request. +type SlmStartRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SlmStartRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.start") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_slm/start")) + path.WriteString("http://") + path.WriteString("/_slm/start") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "slm.start") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.start") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SlmStart) WithContext(v context.Context) func(*SlmStartRequest) { + return func(r *SlmStartRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for processing on master node. +func (f SlmStart) WithMasterTimeout(v time.Duration) func(*SlmStartRequest) { + return func(r *SlmStartRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - timeout for acknowledgement of update from all nodes in cluster. +func (f SlmStart) WithTimeout(v time.Duration) func(*SlmStartRequest) { + return func(r *SlmStartRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SlmStart) WithPretty() func(*SlmStartRequest) { + return func(r *SlmStartRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SlmStart) WithHuman() func(*SlmStartRequest) { + return func(r *SlmStartRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SlmStart) WithErrorTrace() func(*SlmStartRequest) { + return func(r *SlmStartRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SlmStart) WithFilterPath(v ...string) func(*SlmStartRequest) { + return func(r *SlmStartRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SlmStart) WithHeader(h map[string]string) func(*SlmStartRequest) { + return func(r *SlmStartRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SlmStart) WithOpaqueID(s string) func(*SlmStartRequest) { + return func(r *SlmStartRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.stop.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.stop.go new file mode 100644 index 000000000..4944b44cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.slm.stop.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newSlmStopFunc(t Transport) SlmStop { + return func(o ...func(*SlmStopRequest)) (*Response, error) { + var r = SlmStopRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SlmStop - Turns off snapshot lifecycle management (SLM). +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-stop.html. +type SlmStop func(o ...func(*SlmStopRequest)) (*Response, error) + +// SlmStopRequest configures the Slm Stop API request. +type SlmStopRequest struct { + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SlmStopRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_slm/stop")) + path.WriteString("http://") + path.WriteString("/_slm/stop") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "slm.stop") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.stop") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SlmStop) WithContext(v context.Context) func(*SlmStopRequest) { + return func(r *SlmStopRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - timeout for processing on master node. +func (f SlmStop) WithMasterTimeout(v time.Duration) func(*SlmStopRequest) { + return func(r *SlmStopRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - timeout for acknowledgement of update from all nodes in cluster. +func (f SlmStop) WithTimeout(v time.Duration) func(*SlmStopRequest) { + return func(r *SlmStopRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SlmStop) WithPretty() func(*SlmStopRequest) { + return func(r *SlmStopRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SlmStop) WithHuman() func(*SlmStopRequest) { + return func(r *SlmStopRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SlmStop) WithErrorTrace() func(*SlmStopRequest) { + return func(r *SlmStopRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SlmStop) WithFilterPath(v ...string) func(*SlmStopRequest) { + return func(r *SlmStopRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SlmStop) WithHeader(h map[string]string) func(*SlmStopRequest) { + return func(r *SlmStopRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SlmStop) WithOpaqueID(s string) func(*SlmStopRequest) { + return func(r *SlmStopRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.clear_cursor.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.clear_cursor.go new file mode 100644 index 000000000..2b9773ad7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.clear_cursor.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSQLClearCursorFunc(t Transport) SQLClearCursor { + return func(body io.Reader, o ...func(*SQLClearCursorRequest)) (*Response, error) { + var r = SQLClearCursorRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SQLClearCursor - Clears the SQL cursor +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-sql-cursor-api.html. +type SQLClearCursor func(body io.Reader, o ...func(*SQLClearCursorRequest)) (*Response, error) + +// SQLClearCursorRequest configures the SQL Clear Cursor API request. +type SQLClearCursorRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SQLClearCursorRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.clear_cursor") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_sql/close")) + path.WriteString("http://") + path.WriteString("/_sql/close") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "sql.clear_cursor") + if reader := instrument.RecordRequestBody(ctx, "sql.clear_cursor", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "sql.clear_cursor") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SQLClearCursor) WithContext(v context.Context) func(*SQLClearCursorRequest) { + return func(r *SQLClearCursorRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SQLClearCursor) WithPretty() func(*SQLClearCursorRequest) { + return func(r *SQLClearCursorRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SQLClearCursor) WithHuman() func(*SQLClearCursorRequest) { + return func(r *SQLClearCursorRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SQLClearCursor) WithErrorTrace() func(*SQLClearCursorRequest) { + return func(r *SQLClearCursorRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SQLClearCursor) WithFilterPath(v ...string) func(*SQLClearCursorRequest) { + return func(r *SQLClearCursorRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SQLClearCursor) WithHeader(h map[string]string) func(*SQLClearCursorRequest) { + return func(r *SQLClearCursorRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SQLClearCursor) WithOpaqueID(s string) func(*SQLClearCursorRequest) { + return func(r *SQLClearCursorRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.delete_async.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.delete_async.go new file mode 100644 index 000000000..c91b9c4b3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.delete_async.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSQLDeleteAsyncFunc(t Transport) SQLDeleteAsync { + return func(id string, o ...func(*SQLDeleteAsyncRequest)) (*Response, error) { + var r = SQLDeleteAsyncRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SQLDeleteAsync - Deletes an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-async-sql-search-api.html. +type SQLDeleteAsync func(id string, o ...func(*SQLDeleteAsyncRequest)) (*Response, error) + +// SQLDeleteAsyncRequest configures the SQL Delete Async API request. +type SQLDeleteAsyncRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SQLDeleteAsyncRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.delete_async") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_sql") + 1 + len("async") + 1 + len("delete") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_sql") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + path.WriteString("delete") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "sql.delete_async") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "sql.delete_async") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SQLDeleteAsync) WithContext(v context.Context) func(*SQLDeleteAsyncRequest) { + return func(r *SQLDeleteAsyncRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SQLDeleteAsync) WithPretty() func(*SQLDeleteAsyncRequest) { + return func(r *SQLDeleteAsyncRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SQLDeleteAsync) WithHuman() func(*SQLDeleteAsyncRequest) { + return func(r *SQLDeleteAsyncRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SQLDeleteAsync) WithErrorTrace() func(*SQLDeleteAsyncRequest) { + return func(r *SQLDeleteAsyncRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SQLDeleteAsync) WithFilterPath(v ...string) func(*SQLDeleteAsyncRequest) { + return func(r *SQLDeleteAsyncRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SQLDeleteAsync) WithHeader(h map[string]string) func(*SQLDeleteAsyncRequest) { + return func(r *SQLDeleteAsyncRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SQLDeleteAsync) WithOpaqueID(s string) func(*SQLDeleteAsyncRequest) { + return func(r *SQLDeleteAsyncRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.get_async.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.get_async.go new file mode 100644 index 000000000..50e9d66da --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.get_async.go @@ -0,0 +1,275 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newSQLGetAsyncFunc(t Transport) SQLGetAsync { + return func(id string, o ...func(*SQLGetAsyncRequest)) (*Response, error) { + var r = SQLGetAsyncRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SQLGetAsync - Returns the current status and available results for an async SQL search or stored synchronous SQL search +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-api.html. +type SQLGetAsync func(id string, o ...func(*SQLGetAsyncRequest)) (*Response, error) + +// SQLGetAsyncRequest configures the SQL Get Async API request. +type SQLGetAsyncRequest struct { + DocumentID string + + Delimiter string + Format string + KeepAlive time.Duration + WaitForCompletionTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SQLGetAsyncRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.get_async") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_sql") + 1 + len("async") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_sql") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Delimiter != "" { + params["delimiter"] = r.Delimiter + } + + if r.Format != "" { + params["format"] = r.Format + } + + if r.KeepAlive != 0 { + params["keep_alive"] = formatDuration(r.KeepAlive) + } + + if r.WaitForCompletionTimeout != 0 { + params["wait_for_completion_timeout"] = formatDuration(r.WaitForCompletionTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "sql.get_async") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "sql.get_async") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SQLGetAsync) WithContext(v context.Context) func(*SQLGetAsyncRequest) { + return func(r *SQLGetAsyncRequest) { + r.ctx = v + } +} + +// WithDelimiter - separator for csv results. +func (f SQLGetAsync) WithDelimiter(v string) func(*SQLGetAsyncRequest) { + return func(r *SQLGetAsyncRequest) { + r.Delimiter = v + } +} + +// WithFormat - short version of the accept header, e.g. json, yaml. +func (f SQLGetAsync) WithFormat(v string) func(*SQLGetAsyncRequest) { + return func(r *SQLGetAsyncRequest) { + r.Format = v + } +} + +// WithKeepAlive - retention period for the search and its results. +func (f SQLGetAsync) WithKeepAlive(v time.Duration) func(*SQLGetAsyncRequest) { + return func(r *SQLGetAsyncRequest) { + r.KeepAlive = v + } +} + +// WithWaitForCompletionTimeout - duration to wait for complete results. +func (f SQLGetAsync) WithWaitForCompletionTimeout(v time.Duration) func(*SQLGetAsyncRequest) { + return func(r *SQLGetAsyncRequest) { + r.WaitForCompletionTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SQLGetAsync) WithPretty() func(*SQLGetAsyncRequest) { + return func(r *SQLGetAsyncRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SQLGetAsync) WithHuman() func(*SQLGetAsyncRequest) { + return func(r *SQLGetAsyncRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SQLGetAsync) WithErrorTrace() func(*SQLGetAsyncRequest) { + return func(r *SQLGetAsyncRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SQLGetAsync) WithFilterPath(v ...string) func(*SQLGetAsyncRequest) { + return func(r *SQLGetAsyncRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SQLGetAsync) WithHeader(h map[string]string) func(*SQLGetAsyncRequest) { + return func(r *SQLGetAsyncRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SQLGetAsync) WithOpaqueID(s string) func(*SQLGetAsyncRequest) { + return func(r *SQLGetAsyncRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.get_async_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.get_async_status.go new file mode 100644 index 000000000..f5ff09437 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.get_async_status.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSQLGetAsyncStatusFunc(t Transport) SQLGetAsyncStatus { + return func(id string, o ...func(*SQLGetAsyncStatusRequest)) (*Response, error) { + var r = SQLGetAsyncStatusRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SQLGetAsyncStatus - Returns the current status of an async SQL search or a stored synchronous SQL search +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-status-api.html. +type SQLGetAsyncStatus func(id string, o ...func(*SQLGetAsyncStatusRequest)) (*Response, error) + +// SQLGetAsyncStatusRequest configures the SQL Get Async Status API request. +type SQLGetAsyncStatusRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SQLGetAsyncStatusRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.get_async_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_sql") + 1 + len("async") + 1 + len("status") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_sql") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + path.WriteString("status") + path.WriteString("/") + path.WriteString(r.DocumentID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.DocumentID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "sql.get_async_status") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "sql.get_async_status") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SQLGetAsyncStatus) WithContext(v context.Context) func(*SQLGetAsyncStatusRequest) { + return func(r *SQLGetAsyncStatusRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SQLGetAsyncStatus) WithPretty() func(*SQLGetAsyncStatusRequest) { + return func(r *SQLGetAsyncStatusRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SQLGetAsyncStatus) WithHuman() func(*SQLGetAsyncStatusRequest) { + return func(r *SQLGetAsyncStatusRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SQLGetAsyncStatus) WithErrorTrace() func(*SQLGetAsyncStatusRequest) { + return func(r *SQLGetAsyncStatusRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SQLGetAsyncStatus) WithFilterPath(v ...string) func(*SQLGetAsyncStatusRequest) { + return func(r *SQLGetAsyncStatusRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SQLGetAsyncStatus) WithHeader(h map[string]string) func(*SQLGetAsyncStatusRequest) { + return func(r *SQLGetAsyncStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SQLGetAsyncStatus) WithOpaqueID(s string) func(*SQLGetAsyncStatusRequest) { + return func(r *SQLGetAsyncStatusRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.query.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.query.go new file mode 100644 index 000000000..e9e5a1adb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.query.go @@ -0,0 +1,238 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSQLQueryFunc(t Transport) SQLQuery { + return func(body io.Reader, o ...func(*SQLQueryRequest)) (*Response, error) { + var r = SQLQueryRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SQLQuery - Executes a SQL request +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-search-api.html. +type SQLQuery func(body io.Reader, o ...func(*SQLQueryRequest)) (*Response, error) + +// SQLQueryRequest configures the SQL Query API request. +type SQLQueryRequest struct { + Body io.Reader + + Format string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SQLQueryRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_sql")) + path.WriteString("http://") + path.WriteString("/_sql") + + params = make(map[string]string) + + if r.Format != "" { + params["format"] = r.Format + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "sql.query") + if reader := instrument.RecordRequestBody(ctx, "sql.query", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "sql.query") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SQLQuery) WithContext(v context.Context) func(*SQLQueryRequest) { + return func(r *SQLQueryRequest) { + r.ctx = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f SQLQuery) WithFormat(v string) func(*SQLQueryRequest) { + return func(r *SQLQueryRequest) { + r.Format = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SQLQuery) WithPretty() func(*SQLQueryRequest) { + return func(r *SQLQueryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SQLQuery) WithHuman() func(*SQLQueryRequest) { + return func(r *SQLQueryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SQLQuery) WithErrorTrace() func(*SQLQueryRequest) { + return func(r *SQLQueryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SQLQuery) WithFilterPath(v ...string) func(*SQLQueryRequest) { + return func(r *SQLQueryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SQLQuery) WithHeader(h map[string]string) func(*SQLQueryRequest) { + return func(r *SQLQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SQLQuery) WithOpaqueID(s string) func(*SQLQueryRequest) { + return func(r *SQLQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.translate.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.translate.go new file mode 100644 index 000000000..6f99ed5d8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.sql.translate.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSQLTranslateFunc(t Transport) SQLTranslate { + return func(body io.Reader, o ...func(*SQLTranslateRequest)) (*Response, error) { + var r = SQLTranslateRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SQLTranslate - Translates SQL into Elasticsearch queries +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-translate-api.html. +type SQLTranslate func(body io.Reader, o ...func(*SQLTranslateRequest)) (*Response, error) + +// SQLTranslateRequest configures the SQL Translate API request. +type SQLTranslateRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SQLTranslateRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.translate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_sql/translate")) + path.WriteString("http://") + path.WriteString("/_sql/translate") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "sql.translate") + if reader := instrument.RecordRequestBody(ctx, "sql.translate", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "sql.translate") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SQLTranslate) WithContext(v context.Context) func(*SQLTranslateRequest) { + return func(r *SQLTranslateRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SQLTranslate) WithPretty() func(*SQLTranslateRequest) { + return func(r *SQLTranslateRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SQLTranslate) WithHuman() func(*SQLTranslateRequest) { + return func(r *SQLTranslateRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SQLTranslate) WithErrorTrace() func(*SQLTranslateRequest) { + return func(r *SQLTranslateRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SQLTranslate) WithFilterPath(v ...string) func(*SQLTranslateRequest) { + return func(r *SQLTranslateRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SQLTranslate) WithHeader(h map[string]string) func(*SQLTranslateRequest) { + return func(r *SQLTranslateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SQLTranslate) WithOpaqueID(s string) func(*SQLTranslateRequest) { + return func(r *SQLTranslateRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ssl.certificates.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ssl.certificates.go new file mode 100644 index 000000000..171d12835 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.ssl.certificates.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSSLCertificatesFunc(t Transport) SSLCertificates { + return func(o ...func(*SSLCertificatesRequest)) (*Response, error) { + var r = SSLCertificatesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SSLCertificates - Retrieves information about the X.509 certificates used to encrypt communications in the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ssl.html. +type SSLCertificates func(o ...func(*SSLCertificatesRequest)) (*Response, error) + +// SSLCertificatesRequest configures the SSL Certificates API request. +type SSLCertificatesRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r SSLCertificatesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ssl.certificates") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_ssl/certificates")) + path.WriteString("http://") + path.WriteString("/_ssl/certificates") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "ssl.certificates") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ssl.certificates") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SSLCertificates) WithContext(v context.Context) func(*SSLCertificatesRequest) { + return func(r *SSLCertificatesRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SSLCertificates) WithPretty() func(*SSLCertificatesRequest) { + return func(r *SSLCertificatesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SSLCertificates) WithHuman() func(*SSLCertificatesRequest) { + return func(r *SSLCertificatesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SSLCertificates) WithErrorTrace() func(*SSLCertificatesRequest) { + return func(r *SSLCertificatesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SSLCertificates) WithFilterPath(v ...string) func(*SSLCertificatesRequest) { + return func(r *SSLCertificatesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SSLCertificates) WithHeader(h map[string]string) func(*SSLCertificatesRequest) { + return func(r *SSLCertificatesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SSLCertificates) WithOpaqueID(s string) func(*SSLCertificatesRequest) { + return func(r *SSLCertificatesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.text_structure.find_field_structure.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.text_structure.find_field_structure.go new file mode 100644 index 000000000..36074d647 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.text_structure.find_field_structure.go @@ -0,0 +1,386 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newTextStructureFindFieldStructureFunc(t Transport) TextStructureFindFieldStructure { + return func(index string, field string, o ...func(*TextStructureFindFieldStructureRequest)) (*Response, error) { + var r = TextStructureFindFieldStructureRequest{Index: index, Field: field} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TextStructureFindFieldStructure - Finds the structure of a text field in an index. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/find-field-structure.html. +type TextStructureFindFieldStructure func(index string, field string, o ...func(*TextStructureFindFieldStructureRequest)) (*Response, error) + +// TextStructureFindFieldStructureRequest configures the Text Structure Find Field Structure API request. +type TextStructureFindFieldStructureRequest struct { + ColumnNames []string + Delimiter string + DocumentsToSample *int + EcsCompatibility string + Explain *bool + Field string + Format string + GrokPattern string + Index string + Quote string + ShouldTrimFields *bool + Timeout time.Duration + TimestampField string + TimestampFormat string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TextStructureFindFieldStructureRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "text_structure.find_field_structure") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_text_structure/find_field_structure")) + path.WriteString("http://") + path.WriteString("/_text_structure/find_field_structure") + + params = make(map[string]string) + + if len(r.ColumnNames) > 0 { + params["column_names"] = strings.Join(r.ColumnNames, ",") + } + + if r.Delimiter != "" { + params["delimiter"] = r.Delimiter + } + + if r.DocumentsToSample != nil { + params["documents_to_sample"] = strconv.FormatInt(int64(*r.DocumentsToSample), 10) + } + + if r.EcsCompatibility != "" { + params["ecs_compatibility"] = r.EcsCompatibility + } + + if r.Explain != nil { + params["explain"] = strconv.FormatBool(*r.Explain) + } + + if r.Field != "" { + params["field"] = r.Field + } + + if r.Format != "" { + params["format"] = r.Format + } + + if r.GrokPattern != "" { + params["grok_pattern"] = r.GrokPattern + } + + if r.Index != "" { + params["index"] = r.Index + } + + if r.Quote != "" { + params["quote"] = r.Quote + } + + if r.ShouldTrimFields != nil { + params["should_trim_fields"] = strconv.FormatBool(*r.ShouldTrimFields) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.TimestampField != "" { + params["timestamp_field"] = r.TimestampField + } + + if r.TimestampFormat != "" { + params["timestamp_format"] = r.TimestampFormat + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "text_structure.find_field_structure") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "text_structure.find_field_structure") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TextStructureFindFieldStructure) WithContext(v context.Context) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.ctx = v + } +} + +// WithColumnNames - optional parameter containing a comma separated list of the column names for a delimited file. +func (f TextStructureFindFieldStructure) WithColumnNames(v ...string) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.ColumnNames = v + } +} + +// WithDelimiter - optional parameter to specify the delimiter character for a delimited file - must be a single character. +func (f TextStructureFindFieldStructure) WithDelimiter(v string) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.Delimiter = v + } +} + +// WithDocumentsToSample - how many documents should be included in the analysis. +func (f TextStructureFindFieldStructure) WithDocumentsToSample(v int) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.DocumentsToSample = &v + } +} + +// WithEcsCompatibility - optional parameter to specify the compatibility mode with ecs grok patterns - may be either 'v1' or 'disabled'. +func (f TextStructureFindFieldStructure) WithEcsCompatibility(v string) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.EcsCompatibility = v + } +} + +// WithExplain - whether to include a commentary on how the structure was derived. +func (f TextStructureFindFieldStructure) WithExplain(v bool) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.Explain = &v + } +} + +// WithField - the field that should be analyzed. +func (f TextStructureFindFieldStructure) WithField(v string) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.Field = v + } +} + +// WithFormat - optional parameter to specify the high level file format. +func (f TextStructureFindFieldStructure) WithFormat(v string) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.Format = v + } +} + +// WithGrokPattern - optional parameter to specify the grok pattern that should be used to extract fields from messages in a semi-structured text file. +func (f TextStructureFindFieldStructure) WithGrokPattern(v string) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.GrokPattern = v + } +} + +// WithIndex - the index containing the analyzed field. +func (f TextStructureFindFieldStructure) WithIndex(v string) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.Index = v + } +} + +// WithQuote - optional parameter to specify the quote character for a delimited file - must be a single character. +func (f TextStructureFindFieldStructure) WithQuote(v string) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.Quote = v + } +} + +// WithShouldTrimFields - optional parameter to specify whether the values between delimiters in a delimited file should have whitespace trimmed from them. +func (f TextStructureFindFieldStructure) WithShouldTrimFields(v bool) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.ShouldTrimFields = &v + } +} + +// WithTimeout - timeout after which the analysis will be aborted. +func (f TextStructureFindFieldStructure) WithTimeout(v time.Duration) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.Timeout = v + } +} + +// WithTimestampField - optional parameter to specify the timestamp field in the file. +func (f TextStructureFindFieldStructure) WithTimestampField(v string) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.TimestampField = v + } +} + +// WithTimestampFormat - optional parameter to specify the timestamp format in the file - may be either a joda or java time format. +func (f TextStructureFindFieldStructure) WithTimestampFormat(v string) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.TimestampFormat = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TextStructureFindFieldStructure) WithPretty() func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TextStructureFindFieldStructure) WithHuman() func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TextStructureFindFieldStructure) WithErrorTrace() func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TextStructureFindFieldStructure) WithFilterPath(v ...string) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TextStructureFindFieldStructure) WithHeader(h map[string]string) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TextStructureFindFieldStructure) WithOpaqueID(s string) func(*TextStructureFindFieldStructureRequest) { + return func(r *TextStructureFindFieldStructureRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.text_structure.find_message_structure.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.text_structure.find_message_structure.go new file mode 100644 index 000000000..ed598d6e4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.text_structure.find_message_structure.go @@ -0,0 +1,360 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newTextStructureFindMessageStructureFunc(t Transport) TextStructureFindMessageStructure { + return func(body io.Reader, o ...func(*TextStructureFindMessageStructureRequest)) (*Response, error) { + var r = TextStructureFindMessageStructureRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TextStructureFindMessageStructure - Finds the structure of a list of messages. The messages must contain data that is suitable to be ingested into Elasticsearch. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/find-message-structure.html. +type TextStructureFindMessageStructure func(body io.Reader, o ...func(*TextStructureFindMessageStructureRequest)) (*Response, error) + +// TextStructureFindMessageStructureRequest configures the Text Structure Find Message Structure API request. +type TextStructureFindMessageStructureRequest struct { + Body io.Reader + + ColumnNames []string + Delimiter string + EcsCompatibility string + Explain *bool + Format string + GrokPattern string + Quote string + ShouldTrimFields *bool + Timeout time.Duration + TimestampField string + TimestampFormat string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TextStructureFindMessageStructureRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "text_structure.find_message_structure") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_text_structure/find_message_structure")) + path.WriteString("http://") + path.WriteString("/_text_structure/find_message_structure") + + params = make(map[string]string) + + if len(r.ColumnNames) > 0 { + params["column_names"] = strings.Join(r.ColumnNames, ",") + } + + if r.Delimiter != "" { + params["delimiter"] = r.Delimiter + } + + if r.EcsCompatibility != "" { + params["ecs_compatibility"] = r.EcsCompatibility + } + + if r.Explain != nil { + params["explain"] = strconv.FormatBool(*r.Explain) + } + + if r.Format != "" { + params["format"] = r.Format + } + + if r.GrokPattern != "" { + params["grok_pattern"] = r.GrokPattern + } + + if r.Quote != "" { + params["quote"] = r.Quote + } + + if r.ShouldTrimFields != nil { + params["should_trim_fields"] = strconv.FormatBool(*r.ShouldTrimFields) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.TimestampField != "" { + params["timestamp_field"] = r.TimestampField + } + + if r.TimestampFormat != "" { + params["timestamp_format"] = r.TimestampFormat + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "text_structure.find_message_structure") + if reader := instrument.RecordRequestBody(ctx, "text_structure.find_message_structure", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "text_structure.find_message_structure") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TextStructureFindMessageStructure) WithContext(v context.Context) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.ctx = v + } +} + +// WithColumnNames - optional parameter containing a comma separated list of the column names for a delimited file. +func (f TextStructureFindMessageStructure) WithColumnNames(v ...string) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.ColumnNames = v + } +} + +// WithDelimiter - optional parameter to specify the delimiter character for a delimited file - must be a single character. +func (f TextStructureFindMessageStructure) WithDelimiter(v string) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.Delimiter = v + } +} + +// WithEcsCompatibility - optional parameter to specify the compatibility mode with ecs grok patterns - may be either 'v1' or 'disabled'. +func (f TextStructureFindMessageStructure) WithEcsCompatibility(v string) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.EcsCompatibility = v + } +} + +// WithExplain - whether to include a commentary on how the structure was derived. +func (f TextStructureFindMessageStructure) WithExplain(v bool) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.Explain = &v + } +} + +// WithFormat - optional parameter to specify the high level file format. +func (f TextStructureFindMessageStructure) WithFormat(v string) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.Format = v + } +} + +// WithGrokPattern - optional parameter to specify the grok pattern that should be used to extract fields from messages in a semi-structured text file. +func (f TextStructureFindMessageStructure) WithGrokPattern(v string) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.GrokPattern = v + } +} + +// WithQuote - optional parameter to specify the quote character for a delimited file - must be a single character. +func (f TextStructureFindMessageStructure) WithQuote(v string) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.Quote = v + } +} + +// WithShouldTrimFields - optional parameter to specify whether the values between delimiters in a delimited file should have whitespace trimmed from them. +func (f TextStructureFindMessageStructure) WithShouldTrimFields(v bool) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.ShouldTrimFields = &v + } +} + +// WithTimeout - timeout after which the analysis will be aborted. +func (f TextStructureFindMessageStructure) WithTimeout(v time.Duration) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.Timeout = v + } +} + +// WithTimestampField - optional parameter to specify the timestamp field in the file. +func (f TextStructureFindMessageStructure) WithTimestampField(v string) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.TimestampField = v + } +} + +// WithTimestampFormat - optional parameter to specify the timestamp format in the file - may be either a joda or java time format. +func (f TextStructureFindMessageStructure) WithTimestampFormat(v string) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.TimestampFormat = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TextStructureFindMessageStructure) WithPretty() func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TextStructureFindMessageStructure) WithHuman() func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TextStructureFindMessageStructure) WithErrorTrace() func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TextStructureFindMessageStructure) WithFilterPath(v ...string) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TextStructureFindMessageStructure) WithHeader(h map[string]string) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TextStructureFindMessageStructure) WithOpaqueID(s string) func(*TextStructureFindMessageStructureRequest) { + return func(r *TextStructureFindMessageStructureRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.text_structure.find_structure.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.text_structure.find_structure.go new file mode 100644 index 000000000..833e1b594 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.text_structure.find_structure.go @@ -0,0 +1,408 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newTextStructureFindStructureFunc(t Transport) TextStructureFindStructure { + return func(body io.Reader, o ...func(*TextStructureFindStructureRequest)) (*Response, error) { + var r = TextStructureFindStructureRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TextStructureFindStructure - Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html. +type TextStructureFindStructure func(body io.Reader, o ...func(*TextStructureFindStructureRequest)) (*Response, error) + +// TextStructureFindStructureRequest configures the Text Structure Find Structure API request. +type TextStructureFindStructureRequest struct { + Body io.Reader + + Charset string + ColumnNames []string + Delimiter string + EcsCompatibility string + Explain *bool + Format string + GrokPattern string + HasHeaderRow *bool + LineMergeSizeLimit *int + LinesToSample *int + Quote string + ShouldTrimFields *bool + Timeout time.Duration + TimestampField string + TimestampFormat string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TextStructureFindStructureRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "text_structure.find_structure") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_text_structure/find_structure")) + path.WriteString("http://") + path.WriteString("/_text_structure/find_structure") + + params = make(map[string]string) + + if r.Charset != "" { + params["charset"] = r.Charset + } + + if len(r.ColumnNames) > 0 { + params["column_names"] = strings.Join(r.ColumnNames, ",") + } + + if r.Delimiter != "" { + params["delimiter"] = r.Delimiter + } + + if r.EcsCompatibility != "" { + params["ecs_compatibility"] = r.EcsCompatibility + } + + if r.Explain != nil { + params["explain"] = strconv.FormatBool(*r.Explain) + } + + if r.Format != "" { + params["format"] = r.Format + } + + if r.GrokPattern != "" { + params["grok_pattern"] = r.GrokPattern + } + + if r.HasHeaderRow != nil { + params["has_header_row"] = strconv.FormatBool(*r.HasHeaderRow) + } + + if r.LineMergeSizeLimit != nil { + params["line_merge_size_limit"] = strconv.FormatInt(int64(*r.LineMergeSizeLimit), 10) + } + + if r.LinesToSample != nil { + params["lines_to_sample"] = strconv.FormatInt(int64(*r.LinesToSample), 10) + } + + if r.Quote != "" { + params["quote"] = r.Quote + } + + if r.ShouldTrimFields != nil { + params["should_trim_fields"] = strconv.FormatBool(*r.ShouldTrimFields) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.TimestampField != "" { + params["timestamp_field"] = r.TimestampField + } + + if r.TimestampFormat != "" { + params["timestamp_format"] = r.TimestampFormat + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "text_structure.find_structure") + if reader := instrument.RecordRequestBody(ctx, "text_structure.find_structure", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "text_structure.find_structure") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TextStructureFindStructure) WithContext(v context.Context) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.ctx = v + } +} + +// WithCharset - optional parameter to specify the character set of the file. +func (f TextStructureFindStructure) WithCharset(v string) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.Charset = v + } +} + +// WithColumnNames - optional parameter containing a comma separated list of the column names for a delimited file. +func (f TextStructureFindStructure) WithColumnNames(v ...string) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.ColumnNames = v + } +} + +// WithDelimiter - optional parameter to specify the delimiter character for a delimited file - must be a single character. +func (f TextStructureFindStructure) WithDelimiter(v string) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.Delimiter = v + } +} + +// WithEcsCompatibility - optional parameter to specify the compatibility mode with ecs grok patterns - may be either 'v1' or 'disabled'. +func (f TextStructureFindStructure) WithEcsCompatibility(v string) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.EcsCompatibility = v + } +} + +// WithExplain - whether to include a commentary on how the structure was derived. +func (f TextStructureFindStructure) WithExplain(v bool) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.Explain = &v + } +} + +// WithFormat - optional parameter to specify the high level file format. +func (f TextStructureFindStructure) WithFormat(v string) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.Format = v + } +} + +// WithGrokPattern - optional parameter to specify the grok pattern that should be used to extract fields from messages in a semi-structured text file. +func (f TextStructureFindStructure) WithGrokPattern(v string) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.GrokPattern = v + } +} + +// WithHasHeaderRow - optional parameter to specify whether a delimited file includes the column names in its first row. +func (f TextStructureFindStructure) WithHasHeaderRow(v bool) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.HasHeaderRow = &v + } +} + +// WithLineMergeSizeLimit - maximum number of characters permitted in a single message when lines are merged to create messages.. +func (f TextStructureFindStructure) WithLineMergeSizeLimit(v int) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.LineMergeSizeLimit = &v + } +} + +// WithLinesToSample - how many lines of the file should be included in the analysis. +func (f TextStructureFindStructure) WithLinesToSample(v int) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.LinesToSample = &v + } +} + +// WithQuote - optional parameter to specify the quote character for a delimited file - must be a single character. +func (f TextStructureFindStructure) WithQuote(v string) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.Quote = v + } +} + +// WithShouldTrimFields - optional parameter to specify whether the values between delimiters in a delimited file should have whitespace trimmed from them. +func (f TextStructureFindStructure) WithShouldTrimFields(v bool) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.ShouldTrimFields = &v + } +} + +// WithTimeout - timeout after which the analysis will be aborted. +func (f TextStructureFindStructure) WithTimeout(v time.Duration) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.Timeout = v + } +} + +// WithTimestampField - optional parameter to specify the timestamp field in the file. +func (f TextStructureFindStructure) WithTimestampField(v string) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.TimestampField = v + } +} + +// WithTimestampFormat - optional parameter to specify the timestamp format in the file - may be either a joda or java time format. +func (f TextStructureFindStructure) WithTimestampFormat(v string) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.TimestampFormat = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TextStructureFindStructure) WithPretty() func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TextStructureFindStructure) WithHuman() func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TextStructureFindStructure) WithErrorTrace() func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TextStructureFindStructure) WithFilterPath(v ...string) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TextStructureFindStructure) WithHeader(h map[string]string) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TextStructureFindStructure) WithOpaqueID(s string) func(*TextStructureFindStructureRequest) { + return func(r *TextStructureFindStructureRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.text_structure.test_grok_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.text_structure.test_grok_pattern.go new file mode 100644 index 000000000..ca8823566 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.text_structure.test_grok_pattern.go @@ -0,0 +1,238 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newTextStructureTestGrokPatternFunc(t Transport) TextStructureTestGrokPattern { + return func(body io.Reader, o ...func(*TextStructureTestGrokPatternRequest)) (*Response, error) { + var r = TextStructureTestGrokPatternRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TextStructureTestGrokPattern - Tests a Grok pattern on some text. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/test-grok-pattern.html. +type TextStructureTestGrokPattern func(body io.Reader, o ...func(*TextStructureTestGrokPatternRequest)) (*Response, error) + +// TextStructureTestGrokPatternRequest configures the Text Structure Test Grok Pattern API request. +type TextStructureTestGrokPatternRequest struct { + Body io.Reader + + EcsCompatibility string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TextStructureTestGrokPatternRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "text_structure.test_grok_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_text_structure/test_grok_pattern")) + path.WriteString("http://") + path.WriteString("/_text_structure/test_grok_pattern") + + params = make(map[string]string) + + if r.EcsCompatibility != "" { + params["ecs_compatibility"] = r.EcsCompatibility + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "text_structure.test_grok_pattern") + if reader := instrument.RecordRequestBody(ctx, "text_structure.test_grok_pattern", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "text_structure.test_grok_pattern") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TextStructureTestGrokPattern) WithContext(v context.Context) func(*TextStructureTestGrokPatternRequest) { + return func(r *TextStructureTestGrokPatternRequest) { + r.ctx = v + } +} + +// WithEcsCompatibility - optional parameter to specify the compatibility mode with ecs grok patterns - may be either 'v1' or 'disabled'. +func (f TextStructureTestGrokPattern) WithEcsCompatibility(v string) func(*TextStructureTestGrokPatternRequest) { + return func(r *TextStructureTestGrokPatternRequest) { + r.EcsCompatibility = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TextStructureTestGrokPattern) WithPretty() func(*TextStructureTestGrokPatternRequest) { + return func(r *TextStructureTestGrokPatternRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TextStructureTestGrokPattern) WithHuman() func(*TextStructureTestGrokPatternRequest) { + return func(r *TextStructureTestGrokPatternRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TextStructureTestGrokPattern) WithErrorTrace() func(*TextStructureTestGrokPatternRequest) { + return func(r *TextStructureTestGrokPatternRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TextStructureTestGrokPattern) WithFilterPath(v ...string) func(*TextStructureTestGrokPatternRequest) { + return func(r *TextStructureTestGrokPatternRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TextStructureTestGrokPattern) WithHeader(h map[string]string) func(*TextStructureTestGrokPatternRequest) { + return func(r *TextStructureTestGrokPatternRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TextStructureTestGrokPattern) WithOpaqueID(s string) func(*TextStructureTestGrokPatternRequest) { + return func(r *TextStructureTestGrokPatternRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.delete_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.delete_transform.go new file mode 100644 index 000000000..40abd0d88 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.delete_transform.go @@ -0,0 +1,262 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newTransformDeleteTransformFunc(t Transport) TransformDeleteTransform { + return func(transform_id string, o ...func(*TransformDeleteTransformRequest)) (*Response, error) { + var r = TransformDeleteTransformRequest{TransformID: transform_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TransformDeleteTransform - Deletes an existing transform. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html. +type TransformDeleteTransform func(transform_id string, o ...func(*TransformDeleteTransformRequest)) (*Response, error) + +// TransformDeleteTransformRequest configures the Transform Delete Transform API request. +type TransformDeleteTransformRequest struct { + TransformID string + + DeleteDestIndex *bool + Force *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TransformDeleteTransformRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.delete_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_transform") + 1 + len(r.TransformID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + path.WriteString(r.TransformID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "transform_id", r.TransformID) + } + + params = make(map[string]string) + + if r.DeleteDestIndex != nil { + params["delete_dest_index"] = strconv.FormatBool(*r.DeleteDestIndex) + } + + if r.Force != nil { + params["force"] = strconv.FormatBool(*r.Force) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "transform.delete_transform") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.delete_transform") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TransformDeleteTransform) WithContext(v context.Context) func(*TransformDeleteTransformRequest) { + return func(r *TransformDeleteTransformRequest) { + r.ctx = v + } +} + +// WithDeleteDestIndex - when `true`, the destination index is deleted together with the transform. the default value is `false`, meaning that the destination index will not be deleted.. +func (f TransformDeleteTransform) WithDeleteDestIndex(v bool) func(*TransformDeleteTransformRequest) { + return func(r *TransformDeleteTransformRequest) { + r.DeleteDestIndex = &v + } +} + +// WithForce - when `true`, the transform is deleted regardless of its current state. the default value is `false`, meaning that the transform must be `stopped` before it can be deleted.. +func (f TransformDeleteTransform) WithForce(v bool) func(*TransformDeleteTransformRequest) { + return func(r *TransformDeleteTransformRequest) { + r.Force = &v + } +} + +// WithTimeout - controls the time to wait for the transform deletion. +func (f TransformDeleteTransform) WithTimeout(v time.Duration) func(*TransformDeleteTransformRequest) { + return func(r *TransformDeleteTransformRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TransformDeleteTransform) WithPretty() func(*TransformDeleteTransformRequest) { + return func(r *TransformDeleteTransformRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TransformDeleteTransform) WithHuman() func(*TransformDeleteTransformRequest) { + return func(r *TransformDeleteTransformRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TransformDeleteTransform) WithErrorTrace() func(*TransformDeleteTransformRequest) { + return func(r *TransformDeleteTransformRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TransformDeleteTransform) WithFilterPath(v ...string) func(*TransformDeleteTransformRequest) { + return func(r *TransformDeleteTransformRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TransformDeleteTransform) WithHeader(h map[string]string) func(*TransformDeleteTransformRequest) { + return func(r *TransformDeleteTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TransformDeleteTransform) WithOpaqueID(s string) func(*TransformDeleteTransformRequest) { + return func(r *TransformDeleteTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.get_node_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.get_node_stats.go new file mode 100644 index 000000000..35a26d7b0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.get_node_stats.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newTransformGetNodeStatsFunc(t Transport) TransformGetNodeStats { + return func(o ...func(*TransformGetNodeStatsRequest)) (*Response, error) { + var r = TransformGetNodeStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TransformGetNodeStats - Retrieves transform usage information for transform nodes. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-node-stats.html. +type TransformGetNodeStats func(o ...func(*TransformGetNodeStatsRequest)) (*Response, error) + +// TransformGetNodeStatsRequest configures the Transform Get Node Stats API request. +type TransformGetNodeStatsRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TransformGetNodeStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.get_node_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_transform/_node_stats")) + path.WriteString("http://") + path.WriteString("/_transform/_node_stats") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "transform.get_node_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.get_node_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TransformGetNodeStats) WithContext(v context.Context) func(*TransformGetNodeStatsRequest) { + return func(r *TransformGetNodeStatsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TransformGetNodeStats) WithPretty() func(*TransformGetNodeStatsRequest) { + return func(r *TransformGetNodeStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TransformGetNodeStats) WithHuman() func(*TransformGetNodeStatsRequest) { + return func(r *TransformGetNodeStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TransformGetNodeStats) WithErrorTrace() func(*TransformGetNodeStatsRequest) { + return func(r *TransformGetNodeStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TransformGetNodeStats) WithFilterPath(v ...string) func(*TransformGetNodeStatsRequest) { + return func(r *TransformGetNodeStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TransformGetNodeStats) WithHeader(h map[string]string) func(*TransformGetNodeStatsRequest) { + return func(r *TransformGetNodeStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TransformGetNodeStats) WithOpaqueID(s string) func(*TransformGetNodeStatsRequest) { + return func(r *TransformGetNodeStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.get_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.get_transform.go new file mode 100644 index 000000000..1ea0008e7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.get_transform.go @@ -0,0 +1,282 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newTransformGetTransformFunc(t Transport) TransformGetTransform { + return func(o ...func(*TransformGetTransformRequest)) (*Response, error) { + var r = TransformGetTransformRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TransformGetTransform - Retrieves configuration information for transforms. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.html. +type TransformGetTransform func(o ...func(*TransformGetTransformRequest)) (*Response, error) + +// TransformGetTransformRequest configures the Transform Get Transform API request. +type TransformGetTransformRequest struct { + TransformID string + + AllowNoMatch *bool + ExcludeGenerated *bool + From *int + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TransformGetTransformRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.get_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_transform") + 1 + len(r.TransformID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_transform") + if r.TransformID != "" { + path.WriteString("/") + path.WriteString(r.TransformID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "transform_id", r.TransformID) + } + } + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.ExcludeGenerated != nil { + params["exclude_generated"] = strconv.FormatBool(*r.ExcludeGenerated) + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "transform.get_transform") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.get_transform") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TransformGetTransform) WithContext(v context.Context) func(*TransformGetTransformRequest) { + return func(r *TransformGetTransformRequest) { + r.ctx = v + } +} + +// WithTransformID - the ID or comma delimited list of ID expressions of the transforms to get, '_all' or '*' implies get all transforms. +func (f TransformGetTransform) WithTransformID(v string) func(*TransformGetTransformRequest) { + return func(r *TransformGetTransformRequest) { + r.TransformID = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no transforms. (this includes `_all` string or when no transforms have been specified). +func (f TransformGetTransform) WithAllowNoMatch(v bool) func(*TransformGetTransformRequest) { + return func(r *TransformGetTransformRequest) { + r.AllowNoMatch = &v + } +} + +// WithExcludeGenerated - omits fields that are illegal to set on transform put. +func (f TransformGetTransform) WithExcludeGenerated(v bool) func(*TransformGetTransformRequest) { + return func(r *TransformGetTransformRequest) { + r.ExcludeGenerated = &v + } +} + +// WithFrom - skips a number of transform configs, defaults to 0. +func (f TransformGetTransform) WithFrom(v int) func(*TransformGetTransformRequest) { + return func(r *TransformGetTransformRequest) { + r.From = &v + } +} + +// WithSize - specifies a max number of transforms to get, defaults to 100. +func (f TransformGetTransform) WithSize(v int) func(*TransformGetTransformRequest) { + return func(r *TransformGetTransformRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TransformGetTransform) WithPretty() func(*TransformGetTransformRequest) { + return func(r *TransformGetTransformRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TransformGetTransform) WithHuman() func(*TransformGetTransformRequest) { + return func(r *TransformGetTransformRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TransformGetTransform) WithErrorTrace() func(*TransformGetTransformRequest) { + return func(r *TransformGetTransformRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TransformGetTransform) WithFilterPath(v ...string) func(*TransformGetTransformRequest) { + return func(r *TransformGetTransformRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TransformGetTransform) WithHeader(h map[string]string) func(*TransformGetTransformRequest) { + return func(r *TransformGetTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TransformGetTransform) WithOpaqueID(s string) func(*TransformGetTransformRequest) { + return func(r *TransformGetTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.get_transform_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.get_transform_stats.go new file mode 100644 index 000000000..90ae32311 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.get_transform_stats.go @@ -0,0 +1,276 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newTransformGetTransformStatsFunc(t Transport) TransformGetTransformStats { + return func(transform_id string, o ...func(*TransformGetTransformStatsRequest)) (*Response, error) { + var r = TransformGetTransformStatsRequest{TransformID: transform_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TransformGetTransformStats - Retrieves usage information for transforms. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-stats.html. +type TransformGetTransformStats func(transform_id string, o ...func(*TransformGetTransformStatsRequest)) (*Response, error) + +// TransformGetTransformStatsRequest configures the Transform Get Transform Stats API request. +type TransformGetTransformStatsRequest struct { + TransformID string + + AllowNoMatch *bool + From *int + Size *int + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TransformGetTransformStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.get_transform_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_transform") + 1 + len(r.TransformID) + 1 + len("_stats")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + path.WriteString(r.TransformID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "transform_id", r.TransformID) + } + path.WriteString("/") + path.WriteString("_stats") + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "transform.get_transform_stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.get_transform_stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TransformGetTransformStats) WithContext(v context.Context) func(*TransformGetTransformStatsRequest) { + return func(r *TransformGetTransformStatsRequest) { + r.ctx = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no transforms. (this includes `_all` string or when no transforms have been specified). +func (f TransformGetTransformStats) WithAllowNoMatch(v bool) func(*TransformGetTransformStatsRequest) { + return func(r *TransformGetTransformStatsRequest) { + r.AllowNoMatch = &v + } +} + +// WithFrom - skips a number of transform stats, defaults to 0. +func (f TransformGetTransformStats) WithFrom(v int) func(*TransformGetTransformStatsRequest) { + return func(r *TransformGetTransformStatsRequest) { + r.From = &v + } +} + +// WithSize - specifies a max number of transform stats to get, defaults to 100. +func (f TransformGetTransformStats) WithSize(v int) func(*TransformGetTransformStatsRequest) { + return func(r *TransformGetTransformStatsRequest) { + r.Size = &v + } +} + +// WithTimeout - controls the time to wait for the stats. +func (f TransformGetTransformStats) WithTimeout(v time.Duration) func(*TransformGetTransformStatsRequest) { + return func(r *TransformGetTransformStatsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TransformGetTransformStats) WithPretty() func(*TransformGetTransformStatsRequest) { + return func(r *TransformGetTransformStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TransformGetTransformStats) WithHuman() func(*TransformGetTransformStatsRequest) { + return func(r *TransformGetTransformStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TransformGetTransformStats) WithErrorTrace() func(*TransformGetTransformStatsRequest) { + return func(r *TransformGetTransformStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TransformGetTransformStats) WithFilterPath(v ...string) func(*TransformGetTransformStatsRequest) { + return func(r *TransformGetTransformStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TransformGetTransformStats) WithHeader(h map[string]string) func(*TransformGetTransformStatsRequest) { + return func(r *TransformGetTransformStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TransformGetTransformStats) WithOpaqueID(s string) func(*TransformGetTransformStatsRequest) { + return func(r *TransformGetTransformStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.preview_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.preview_transform.go new file mode 100644 index 000000000..8e042d6ce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.preview_transform.go @@ -0,0 +1,265 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newTransformPreviewTransformFunc(t Transport) TransformPreviewTransform { + return func(o ...func(*TransformPreviewTransformRequest)) (*Response, error) { + var r = TransformPreviewTransformRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TransformPreviewTransform - Previews a transform. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-transform.html. +type TransformPreviewTransform func(o ...func(*TransformPreviewTransformRequest)) (*Response, error) + +// TransformPreviewTransformRequest configures the Transform Preview Transform API request. +type TransformPreviewTransformRequest struct { + Body io.Reader + + TransformID string + + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TransformPreviewTransformRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.preview_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_transform") + 1 + len(r.TransformID) + 1 + len("_preview")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_transform") + if r.TransformID != "" { + path.WriteString("/") + path.WriteString(r.TransformID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "transform_id", r.TransformID) + } + } + path.WriteString("/") + path.WriteString("_preview") + + params = make(map[string]string) + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "transform.preview_transform") + if reader := instrument.RecordRequestBody(ctx, "transform.preview_transform", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.preview_transform") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TransformPreviewTransform) WithContext(v context.Context) func(*TransformPreviewTransformRequest) { + return func(r *TransformPreviewTransformRequest) { + r.ctx = v + } +} + +// WithBody - The definition for the transform to preview. +func (f TransformPreviewTransform) WithBody(v io.Reader) func(*TransformPreviewTransformRequest) { + return func(r *TransformPreviewTransformRequest) { + r.Body = v + } +} + +// WithTransformID - the ID of the transform to preview.. +func (f TransformPreviewTransform) WithTransformID(v string) func(*TransformPreviewTransformRequest) { + return func(r *TransformPreviewTransformRequest) { + r.TransformID = v + } +} + +// WithTimeout - controls the time to wait for the preview. +func (f TransformPreviewTransform) WithTimeout(v time.Duration) func(*TransformPreviewTransformRequest) { + return func(r *TransformPreviewTransformRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TransformPreviewTransform) WithPretty() func(*TransformPreviewTransformRequest) { + return func(r *TransformPreviewTransformRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TransformPreviewTransform) WithHuman() func(*TransformPreviewTransformRequest) { + return func(r *TransformPreviewTransformRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TransformPreviewTransform) WithErrorTrace() func(*TransformPreviewTransformRequest) { + return func(r *TransformPreviewTransformRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TransformPreviewTransform) WithFilterPath(v ...string) func(*TransformPreviewTransformRequest) { + return func(r *TransformPreviewTransformRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TransformPreviewTransform) WithHeader(h map[string]string) func(*TransformPreviewTransformRequest) { + return func(r *TransformPreviewTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TransformPreviewTransform) WithOpaqueID(s string) func(*TransformPreviewTransformRequest) { + return func(r *TransformPreviewTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.put_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.put_transform.go new file mode 100644 index 000000000..5ed94c072 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.put_transform.go @@ -0,0 +1,260 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newTransformPutTransformFunc(t Transport) TransformPutTransform { + return func(body io.Reader, transform_id string, o ...func(*TransformPutTransformRequest)) (*Response, error) { + var r = TransformPutTransformRequest{Body: body, TransformID: transform_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TransformPutTransform - Instantiates a transform. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html. +type TransformPutTransform func(body io.Reader, transform_id string, o ...func(*TransformPutTransformRequest)) (*Response, error) + +// TransformPutTransformRequest configures the Transform Put Transform API request. +type TransformPutTransformRequest struct { + Body io.Reader + + TransformID string + + DeferValidation *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TransformPutTransformRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.put_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_transform") + 1 + len(r.TransformID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + path.WriteString(r.TransformID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "transform_id", r.TransformID) + } + + params = make(map[string]string) + + if r.DeferValidation != nil { + params["defer_validation"] = strconv.FormatBool(*r.DeferValidation) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "transform.put_transform") + if reader := instrument.RecordRequestBody(ctx, "transform.put_transform", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.put_transform") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TransformPutTransform) WithContext(v context.Context) func(*TransformPutTransformRequest) { + return func(r *TransformPutTransformRequest) { + r.ctx = v + } +} + +// WithDeferValidation - if validations should be deferred until transform starts, defaults to false.. +func (f TransformPutTransform) WithDeferValidation(v bool) func(*TransformPutTransformRequest) { + return func(r *TransformPutTransformRequest) { + r.DeferValidation = &v + } +} + +// WithTimeout - controls the time to wait for the transform to start. +func (f TransformPutTransform) WithTimeout(v time.Duration) func(*TransformPutTransformRequest) { + return func(r *TransformPutTransformRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TransformPutTransform) WithPretty() func(*TransformPutTransformRequest) { + return func(r *TransformPutTransformRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TransformPutTransform) WithHuman() func(*TransformPutTransformRequest) { + return func(r *TransformPutTransformRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TransformPutTransform) WithErrorTrace() func(*TransformPutTransformRequest) { + return func(r *TransformPutTransformRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TransformPutTransform) WithFilterPath(v ...string) func(*TransformPutTransformRequest) { + return func(r *TransformPutTransformRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TransformPutTransform) WithHeader(h map[string]string) func(*TransformPutTransformRequest) { + return func(r *TransformPutTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TransformPutTransform) WithOpaqueID(s string) func(*TransformPutTransformRequest) { + return func(r *TransformPutTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.reset_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.reset_transform.go new file mode 100644 index 000000000..abf2f27af --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.reset_transform.go @@ -0,0 +1,252 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newTransformResetTransformFunc(t Transport) TransformResetTransform { + return func(transform_id string, o ...func(*TransformResetTransformRequest)) (*Response, error) { + var r = TransformResetTransformRequest{TransformID: transform_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TransformResetTransform - Resets an existing transform. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-transform.html. +type TransformResetTransform func(transform_id string, o ...func(*TransformResetTransformRequest)) (*Response, error) + +// TransformResetTransformRequest configures the Transform Reset Transform API request. +type TransformResetTransformRequest struct { + TransformID string + + Force *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TransformResetTransformRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.reset_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_transform") + 1 + len(r.TransformID) + 1 + len("_reset")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + path.WriteString(r.TransformID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "transform_id", r.TransformID) + } + path.WriteString("/") + path.WriteString("_reset") + + params = make(map[string]string) + + if r.Force != nil { + params["force"] = strconv.FormatBool(*r.Force) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "transform.reset_transform") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.reset_transform") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TransformResetTransform) WithContext(v context.Context) func(*TransformResetTransformRequest) { + return func(r *TransformResetTransformRequest) { + r.ctx = v + } +} + +// WithForce - when `true`, the transform is reset regardless of its current state. the default value is `false`, meaning that the transform must be `stopped` before it can be reset.. +func (f TransformResetTransform) WithForce(v bool) func(*TransformResetTransformRequest) { + return func(r *TransformResetTransformRequest) { + r.Force = &v + } +} + +// WithTimeout - controls the time to wait for the transform to reset. +func (f TransformResetTransform) WithTimeout(v time.Duration) func(*TransformResetTransformRequest) { + return func(r *TransformResetTransformRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TransformResetTransform) WithPretty() func(*TransformResetTransformRequest) { + return func(r *TransformResetTransformRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TransformResetTransform) WithHuman() func(*TransformResetTransformRequest) { + return func(r *TransformResetTransformRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TransformResetTransform) WithErrorTrace() func(*TransformResetTransformRequest) { + return func(r *TransformResetTransformRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TransformResetTransform) WithFilterPath(v ...string) func(*TransformResetTransformRequest) { + return func(r *TransformResetTransformRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TransformResetTransform) WithHeader(h map[string]string) func(*TransformResetTransformRequest) { + return func(r *TransformResetTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TransformResetTransform) WithOpaqueID(s string) func(*TransformResetTransformRequest) { + return func(r *TransformResetTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.schedule_now_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.schedule_now_transform.go new file mode 100644 index 000000000..52a384fc6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.schedule_now_transform.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newTransformScheduleNowTransformFunc(t Transport) TransformScheduleNowTransform { + return func(transform_id string, o ...func(*TransformScheduleNowTransformRequest)) (*Response, error) { + var r = TransformScheduleNowTransformRequest{TransformID: transform_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TransformScheduleNowTransform - Schedules now a transform. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/schedule-now-transform.html. +type TransformScheduleNowTransform func(transform_id string, o ...func(*TransformScheduleNowTransformRequest)) (*Response, error) + +// TransformScheduleNowTransformRequest configures the Transform Schedule Now Transform API request. +type TransformScheduleNowTransformRequest struct { + TransformID string + + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TransformScheduleNowTransformRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.schedule_now_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_transform") + 1 + len(r.TransformID) + 1 + len("_schedule_now")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + path.WriteString(r.TransformID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "transform_id", r.TransformID) + } + path.WriteString("/") + path.WriteString("_schedule_now") + + params = make(map[string]string) + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "transform.schedule_now_transform") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.schedule_now_transform") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TransformScheduleNowTransform) WithContext(v context.Context) func(*TransformScheduleNowTransformRequest) { + return func(r *TransformScheduleNowTransformRequest) { + r.ctx = v + } +} + +// WithTimeout - controls the time to wait for the scheduling to take place. +func (f TransformScheduleNowTransform) WithTimeout(v time.Duration) func(*TransformScheduleNowTransformRequest) { + return func(r *TransformScheduleNowTransformRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TransformScheduleNowTransform) WithPretty() func(*TransformScheduleNowTransformRequest) { + return func(r *TransformScheduleNowTransformRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TransformScheduleNowTransform) WithHuman() func(*TransformScheduleNowTransformRequest) { + return func(r *TransformScheduleNowTransformRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TransformScheduleNowTransform) WithErrorTrace() func(*TransformScheduleNowTransformRequest) { + return func(r *TransformScheduleNowTransformRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TransformScheduleNowTransform) WithFilterPath(v ...string) func(*TransformScheduleNowTransformRequest) { + return func(r *TransformScheduleNowTransformRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TransformScheduleNowTransform) WithHeader(h map[string]string) func(*TransformScheduleNowTransformRequest) { + return func(r *TransformScheduleNowTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TransformScheduleNowTransform) WithOpaqueID(s string) func(*TransformScheduleNowTransformRequest) { + return func(r *TransformScheduleNowTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.start_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.start_transform.go new file mode 100644 index 000000000..deb22a631 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.start_transform.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newTransformStartTransformFunc(t Transport) TransformStartTransform { + return func(transform_id string, o ...func(*TransformStartTransformRequest)) (*Response, error) { + var r = TransformStartTransformRequest{TransformID: transform_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TransformStartTransform - Starts one or more transforms. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform.html. +type TransformStartTransform func(transform_id string, o ...func(*TransformStartTransformRequest)) (*Response, error) + +// TransformStartTransformRequest configures the Transform Start Transform API request. +type TransformStartTransformRequest struct { + TransformID string + + From string + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TransformStartTransformRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.start_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_transform") + 1 + len(r.TransformID) + 1 + len("_start")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + path.WriteString(r.TransformID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "transform_id", r.TransformID) + } + path.WriteString("/") + path.WriteString("_start") + + params = make(map[string]string) + + if r.From != "" { + params["from"] = r.From + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "transform.start_transform") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.start_transform") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TransformStartTransform) WithContext(v context.Context) func(*TransformStartTransformRequest) { + return func(r *TransformStartTransformRequest) { + r.ctx = v + } +} + +// WithFrom - restricts the set of transformed entities to those changed after this time. +func (f TransformStartTransform) WithFrom(v string) func(*TransformStartTransformRequest) { + return func(r *TransformStartTransformRequest) { + r.From = v + } +} + +// WithTimeout - controls the time to wait for the transform to start. +func (f TransformStartTransform) WithTimeout(v time.Duration) func(*TransformStartTransformRequest) { + return func(r *TransformStartTransformRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TransformStartTransform) WithPretty() func(*TransformStartTransformRequest) { + return func(r *TransformStartTransformRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TransformStartTransform) WithHuman() func(*TransformStartTransformRequest) { + return func(r *TransformStartTransformRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TransformStartTransform) WithErrorTrace() func(*TransformStartTransformRequest) { + return func(r *TransformStartTransformRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TransformStartTransform) WithFilterPath(v ...string) func(*TransformStartTransformRequest) { + return func(r *TransformStartTransformRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TransformStartTransform) WithHeader(h map[string]string) func(*TransformStartTransformRequest) { + return func(r *TransformStartTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TransformStartTransform) WithOpaqueID(s string) func(*TransformStartTransformRequest) { + return func(r *TransformStartTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.stop_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.stop_transform.go new file mode 100644 index 000000000..053c256e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.stop_transform.go @@ -0,0 +1,288 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newTransformStopTransformFunc(t Transport) TransformStopTransform { + return func(transform_id string, o ...func(*TransformStopTransformRequest)) (*Response, error) { + var r = TransformStopTransformRequest{TransformID: transform_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TransformStopTransform - Stops one or more transforms. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.html. +type TransformStopTransform func(transform_id string, o ...func(*TransformStopTransformRequest)) (*Response, error) + +// TransformStopTransformRequest configures the Transform Stop Transform API request. +type TransformStopTransformRequest struct { + TransformID string + + AllowNoMatch *bool + Force *bool + Timeout time.Duration + WaitForCheckpoint *bool + WaitForCompletion *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TransformStopTransformRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.stop_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_transform") + 1 + len(r.TransformID) + 1 + len("_stop")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + path.WriteString(r.TransformID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "transform_id", r.TransformID) + } + path.WriteString("/") + path.WriteString("_stop") + + params = make(map[string]string) + + if r.AllowNoMatch != nil { + params["allow_no_match"] = strconv.FormatBool(*r.AllowNoMatch) + } + + if r.Force != nil { + params["force"] = strconv.FormatBool(*r.Force) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.WaitForCheckpoint != nil { + params["wait_for_checkpoint"] = strconv.FormatBool(*r.WaitForCheckpoint) + } + + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "transform.stop_transform") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.stop_transform") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TransformStopTransform) WithContext(v context.Context) func(*TransformStopTransformRequest) { + return func(r *TransformStopTransformRequest) { + r.ctx = v + } +} + +// WithAllowNoMatch - whether to ignore if a wildcard expression matches no transforms. (this includes `_all` string or when no transforms have been specified). +func (f TransformStopTransform) WithAllowNoMatch(v bool) func(*TransformStopTransformRequest) { + return func(r *TransformStopTransformRequest) { + r.AllowNoMatch = &v + } +} + +// WithForce - whether to force stop a failed transform or not. default to false. +func (f TransformStopTransform) WithForce(v bool) func(*TransformStopTransformRequest) { + return func(r *TransformStopTransformRequest) { + r.Force = &v + } +} + +// WithTimeout - controls the time to wait until the transform has stopped. default to 30 seconds. +func (f TransformStopTransform) WithTimeout(v time.Duration) func(*TransformStopTransformRequest) { + return func(r *TransformStopTransformRequest) { + r.Timeout = v + } +} + +// WithWaitForCheckpoint - whether to wait for the transform to reach a checkpoint before stopping. default to false. +func (f TransformStopTransform) WithWaitForCheckpoint(v bool) func(*TransformStopTransformRequest) { + return func(r *TransformStopTransformRequest) { + r.WaitForCheckpoint = &v + } +} + +// WithWaitForCompletion - whether to wait for the transform to fully stop before returning or not. default to false. +func (f TransformStopTransform) WithWaitForCompletion(v bool) func(*TransformStopTransformRequest) { + return func(r *TransformStopTransformRequest) { + r.WaitForCompletion = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TransformStopTransform) WithPretty() func(*TransformStopTransformRequest) { + return func(r *TransformStopTransformRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TransformStopTransform) WithHuman() func(*TransformStopTransformRequest) { + return func(r *TransformStopTransformRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TransformStopTransform) WithErrorTrace() func(*TransformStopTransformRequest) { + return func(r *TransformStopTransformRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TransformStopTransform) WithFilterPath(v ...string) func(*TransformStopTransformRequest) { + return func(r *TransformStopTransformRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TransformStopTransform) WithHeader(h map[string]string) func(*TransformStopTransformRequest) { + return func(r *TransformStopTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TransformStopTransform) WithOpaqueID(s string) func(*TransformStopTransformRequest) { + return func(r *TransformStopTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.update_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.update_transform.go new file mode 100644 index 000000000..56759aa49 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.update_transform.go @@ -0,0 +1,262 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +func newTransformUpdateTransformFunc(t Transport) TransformUpdateTransform { + return func(body io.Reader, transform_id string, o ...func(*TransformUpdateTransformRequest)) (*Response, error) { + var r = TransformUpdateTransformRequest{Body: body, TransformID: transform_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TransformUpdateTransform - Updates certain properties of a transform. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/update-transform.html. +type TransformUpdateTransform func(body io.Reader, transform_id string, o ...func(*TransformUpdateTransformRequest)) (*Response, error) + +// TransformUpdateTransformRequest configures the Transform Update Transform API request. +type TransformUpdateTransformRequest struct { + Body io.Reader + + TransformID string + + DeferValidation *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TransformUpdateTransformRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.update_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + 1 + len("_transform") + 1 + len(r.TransformID) + 1 + len("_update")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + path.WriteString(r.TransformID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "transform_id", r.TransformID) + } + path.WriteString("/") + path.WriteString("_update") + + params = make(map[string]string) + + if r.DeferValidation != nil { + params["defer_validation"] = strconv.FormatBool(*r.DeferValidation) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "transform.update_transform") + if reader := instrument.RecordRequestBody(ctx, "transform.update_transform", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.update_transform") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TransformUpdateTransform) WithContext(v context.Context) func(*TransformUpdateTransformRequest) { + return func(r *TransformUpdateTransformRequest) { + r.ctx = v + } +} + +// WithDeferValidation - if validations should be deferred until transform starts, defaults to false.. +func (f TransformUpdateTransform) WithDeferValidation(v bool) func(*TransformUpdateTransformRequest) { + return func(r *TransformUpdateTransformRequest) { + r.DeferValidation = &v + } +} + +// WithTimeout - controls the time to wait for the update. +func (f TransformUpdateTransform) WithTimeout(v time.Duration) func(*TransformUpdateTransformRequest) { + return func(r *TransformUpdateTransformRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TransformUpdateTransform) WithPretty() func(*TransformUpdateTransformRequest) { + return func(r *TransformUpdateTransformRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TransformUpdateTransform) WithHuman() func(*TransformUpdateTransformRequest) { + return func(r *TransformUpdateTransformRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TransformUpdateTransform) WithErrorTrace() func(*TransformUpdateTransformRequest) { + return func(r *TransformUpdateTransformRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TransformUpdateTransform) WithFilterPath(v ...string) func(*TransformUpdateTransformRequest) { + return func(r *TransformUpdateTransformRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TransformUpdateTransform) WithHeader(h map[string]string) func(*TransformUpdateTransformRequest) { + return func(r *TransformUpdateTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TransformUpdateTransform) WithOpaqueID(s string) func(*TransformUpdateTransformRequest) { + return func(r *TransformUpdateTransformRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.upgrade_transforms.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.upgrade_transforms.go new file mode 100644 index 000000000..9671188de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.transform.upgrade_transforms.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newTransformUpgradeTransformsFunc(t Transport) TransformUpgradeTransforms { + return func(o ...func(*TransformUpgradeTransformsRequest)) (*Response, error) { + var r = TransformUpgradeTransformsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// TransformUpgradeTransforms - Upgrades all transforms. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/upgrade-transforms.html. +type TransformUpgradeTransforms func(o ...func(*TransformUpgradeTransformsRequest)) (*Response, error) + +// TransformUpgradeTransformsRequest configures the Transform Upgrade Transforms API request. +type TransformUpgradeTransformsRequest struct { + DryRun *bool + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r TransformUpgradeTransformsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.upgrade_transforms") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_transform/_upgrade")) + path.WriteString("http://") + path.WriteString("/_transform/_upgrade") + + params = make(map[string]string) + + if r.DryRun != nil { + params["dry_run"] = strconv.FormatBool(*r.DryRun) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "transform.upgrade_transforms") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.upgrade_transforms") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f TransformUpgradeTransforms) WithContext(v context.Context) func(*TransformUpgradeTransformsRequest) { + return func(r *TransformUpgradeTransformsRequest) { + r.ctx = v + } +} + +// WithDryRun - whether to only check for updates but don't execute. +func (f TransformUpgradeTransforms) WithDryRun(v bool) func(*TransformUpgradeTransformsRequest) { + return func(r *TransformUpgradeTransformsRequest) { + r.DryRun = &v + } +} + +// WithTimeout - controls the time to wait for the upgrade. +func (f TransformUpgradeTransforms) WithTimeout(v time.Duration) func(*TransformUpgradeTransformsRequest) { + return func(r *TransformUpgradeTransformsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f TransformUpgradeTransforms) WithPretty() func(*TransformUpgradeTransformsRequest) { + return func(r *TransformUpgradeTransformsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f TransformUpgradeTransforms) WithHuman() func(*TransformUpgradeTransformsRequest) { + return func(r *TransformUpgradeTransformsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f TransformUpgradeTransforms) WithErrorTrace() func(*TransformUpgradeTransformsRequest) { + return func(r *TransformUpgradeTransformsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f TransformUpgradeTransforms) WithFilterPath(v ...string) func(*TransformUpgradeTransformsRequest) { + return func(r *TransformUpgradeTransformsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f TransformUpgradeTransforms) WithHeader(h map[string]string) func(*TransformUpgradeTransformsRequest) { + return func(r *TransformUpgradeTransformsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f TransformUpgradeTransforms) WithOpaqueID(s string) func(*TransformUpgradeTransformsRequest) { + return func(r *TransformUpgradeTransformsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.ack_watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.ack_watch.go new file mode 100644 index 000000000..870658173 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.ack_watch.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newWatcherAckWatchFunc(t Transport) WatcherAckWatch { + return func(watch_id string, o ...func(*WatcherAckWatchRequest)) (*Response, error) { + var r = WatcherAckWatchRequest{WatchID: watch_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherAckWatch - Acknowledges a watch, manually throttling the execution of the watch's actions. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html. +type WatcherAckWatch func(watch_id string, o ...func(*WatcherAckWatchRequest)) (*Response, error) + +// WatcherAckWatchRequest configures the Watcher Ack Watch API request. +type WatcherAckWatchRequest struct { + ActionID []string + WatchID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r WatcherAckWatchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.ack_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_watcher") + 1 + len("watch") + 1 + len(r.WatchID) + 1 + len("_ack") + 1 + len(strings.Join(r.ActionID, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + path.WriteString(r.WatchID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "watch_id", r.WatchID) + } + path.WriteString("/") + path.WriteString("_ack") + if len(r.ActionID) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.ActionID, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "action_id", strings.Join(r.ActionID, ",")) + } + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.ack_watch") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.ack_watch") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherAckWatch) WithContext(v context.Context) func(*WatcherAckWatchRequest) { + return func(r *WatcherAckWatchRequest) { + r.ctx = v + } +} + +// WithActionID - a list of the action ids to be acked. +func (f WatcherAckWatch) WithActionID(v ...string) func(*WatcherAckWatchRequest) { + return func(r *WatcherAckWatchRequest) { + r.ActionID = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherAckWatch) WithPretty() func(*WatcherAckWatchRequest) { + return func(r *WatcherAckWatchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherAckWatch) WithHuman() func(*WatcherAckWatchRequest) { + return func(r *WatcherAckWatchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherAckWatch) WithErrorTrace() func(*WatcherAckWatchRequest) { + return func(r *WatcherAckWatchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherAckWatch) WithFilterPath(v ...string) func(*WatcherAckWatchRequest) { + return func(r *WatcherAckWatchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherAckWatch) WithHeader(h map[string]string) func(*WatcherAckWatchRequest) { + return func(r *WatcherAckWatchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherAckWatch) WithOpaqueID(s string) func(*WatcherAckWatchRequest) { + return func(r *WatcherAckWatchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.activate_watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.activate_watch.go new file mode 100644 index 000000000..cfd579c4a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.activate_watch.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newWatcherActivateWatchFunc(t Transport) WatcherActivateWatch { + return func(watch_id string, o ...func(*WatcherActivateWatchRequest)) (*Response, error) { + var r = WatcherActivateWatchRequest{WatchID: watch_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherActivateWatch - Activates a currently inactive watch. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-activate-watch.html. +type WatcherActivateWatch func(watch_id string, o ...func(*WatcherActivateWatchRequest)) (*Response, error) + +// WatcherActivateWatchRequest configures the Watcher Activate Watch API request. +type WatcherActivateWatchRequest struct { + WatchID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r WatcherActivateWatchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.activate_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_watcher") + 1 + len("watch") + 1 + len(r.WatchID) + 1 + len("_activate")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + path.WriteString(r.WatchID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "watch_id", r.WatchID) + } + path.WriteString("/") + path.WriteString("_activate") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.activate_watch") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.activate_watch") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherActivateWatch) WithContext(v context.Context) func(*WatcherActivateWatchRequest) { + return func(r *WatcherActivateWatchRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherActivateWatch) WithPretty() func(*WatcherActivateWatchRequest) { + return func(r *WatcherActivateWatchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherActivateWatch) WithHuman() func(*WatcherActivateWatchRequest) { + return func(r *WatcherActivateWatchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherActivateWatch) WithErrorTrace() func(*WatcherActivateWatchRequest) { + return func(r *WatcherActivateWatchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherActivateWatch) WithFilterPath(v ...string) func(*WatcherActivateWatchRequest) { + return func(r *WatcherActivateWatchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherActivateWatch) WithHeader(h map[string]string) func(*WatcherActivateWatchRequest) { + return func(r *WatcherActivateWatchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherActivateWatch) WithOpaqueID(s string) func(*WatcherActivateWatchRequest) { + return func(r *WatcherActivateWatchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.deactivate_watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.deactivate_watch.go new file mode 100644 index 000000000..0d5815f0b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.deactivate_watch.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newWatcherDeactivateWatchFunc(t Transport) WatcherDeactivateWatch { + return func(watch_id string, o ...func(*WatcherDeactivateWatchRequest)) (*Response, error) { + var r = WatcherDeactivateWatchRequest{WatchID: watch_id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherDeactivateWatch - Deactivates a currently active watch. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-deactivate-watch.html. +type WatcherDeactivateWatch func(watch_id string, o ...func(*WatcherDeactivateWatchRequest)) (*Response, error) + +// WatcherDeactivateWatchRequest configures the Watcher Deactivate Watch API request. +type WatcherDeactivateWatchRequest struct { + WatchID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r WatcherDeactivateWatchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.deactivate_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_watcher") + 1 + len("watch") + 1 + len(r.WatchID) + 1 + len("_deactivate")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + path.WriteString(r.WatchID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "watch_id", r.WatchID) + } + path.WriteString("/") + path.WriteString("_deactivate") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.deactivate_watch") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.deactivate_watch") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherDeactivateWatch) WithContext(v context.Context) func(*WatcherDeactivateWatchRequest) { + return func(r *WatcherDeactivateWatchRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherDeactivateWatch) WithPretty() func(*WatcherDeactivateWatchRequest) { + return func(r *WatcherDeactivateWatchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherDeactivateWatch) WithHuman() func(*WatcherDeactivateWatchRequest) { + return func(r *WatcherDeactivateWatchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherDeactivateWatch) WithErrorTrace() func(*WatcherDeactivateWatchRequest) { + return func(r *WatcherDeactivateWatchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherDeactivateWatch) WithFilterPath(v ...string) func(*WatcherDeactivateWatchRequest) { + return func(r *WatcherDeactivateWatchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherDeactivateWatch) WithHeader(h map[string]string) func(*WatcherDeactivateWatchRequest) { + return func(r *WatcherDeactivateWatchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherDeactivateWatch) WithOpaqueID(s string) func(*WatcherDeactivateWatchRequest) { + return func(r *WatcherDeactivateWatchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.delete_watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.delete_watch.go new file mode 100644 index 000000000..0db20b9c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.delete_watch.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newWatcherDeleteWatchFunc(t Transport) WatcherDeleteWatch { + return func(id string, o ...func(*WatcherDeleteWatchRequest)) (*Response, error) { + var r = WatcherDeleteWatchRequest{WatchID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherDeleteWatch - Removes a watch from Watcher. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html. +type WatcherDeleteWatch func(id string, o ...func(*WatcherDeleteWatchRequest)) (*Response, error) + +// WatcherDeleteWatchRequest configures the Watcher Delete Watch API request. +type WatcherDeleteWatchRequest struct { + WatchID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r WatcherDeleteWatchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.delete_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "DELETE" + + path.Grow(7 + 1 + len("_watcher") + 1 + len("watch") + 1 + len(r.WatchID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + path.WriteString(r.WatchID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.WatchID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.delete_watch") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.delete_watch") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherDeleteWatch) WithContext(v context.Context) func(*WatcherDeleteWatchRequest) { + return func(r *WatcherDeleteWatchRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherDeleteWatch) WithPretty() func(*WatcherDeleteWatchRequest) { + return func(r *WatcherDeleteWatchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherDeleteWatch) WithHuman() func(*WatcherDeleteWatchRequest) { + return func(r *WatcherDeleteWatchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherDeleteWatch) WithErrorTrace() func(*WatcherDeleteWatchRequest) { + return func(r *WatcherDeleteWatchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherDeleteWatch) WithFilterPath(v ...string) func(*WatcherDeleteWatchRequest) { + return func(r *WatcherDeleteWatchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherDeleteWatch) WithHeader(h map[string]string) func(*WatcherDeleteWatchRequest) { + return func(r *WatcherDeleteWatchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherDeleteWatch) WithOpaqueID(s string) func(*WatcherDeleteWatchRequest) { + return func(r *WatcherDeleteWatchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.execute_watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.execute_watch.go new file mode 100644 index 000000000..a2eae8108 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.execute_watch.go @@ -0,0 +1,267 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newWatcherExecuteWatchFunc(t Transport) WatcherExecuteWatch { + return func(o ...func(*WatcherExecuteWatchRequest)) (*Response, error) { + var r = WatcherExecuteWatchRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherExecuteWatch - Forces the execution of a stored watch. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html. +type WatcherExecuteWatch func(o ...func(*WatcherExecuteWatchRequest)) (*Response, error) + +// WatcherExecuteWatchRequest configures the Watcher Execute Watch API request. +type WatcherExecuteWatchRequest struct { + WatchID string + + Body io.Reader + + Debug *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r WatcherExecuteWatchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.execute_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_watcher") + 1 + len("watch") + 1 + len(r.WatchID) + 1 + len("_execute")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + if r.WatchID != "" { + path.WriteString("/") + path.WriteString(r.WatchID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.WatchID) + } + } + path.WriteString("/") + path.WriteString("_execute") + + params = make(map[string]string) + + if r.Debug != nil { + params["debug"] = strconv.FormatBool(*r.Debug) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.execute_watch") + if reader := instrument.RecordRequestBody(ctx, "watcher.execute_watch", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.execute_watch") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherExecuteWatch) WithContext(v context.Context) func(*WatcherExecuteWatchRequest) { + return func(r *WatcherExecuteWatchRequest) { + r.ctx = v + } +} + +// WithBody - Execution control. +func (f WatcherExecuteWatch) WithBody(v io.Reader) func(*WatcherExecuteWatchRequest) { + return func(r *WatcherExecuteWatchRequest) { + r.Body = v + } +} + +// WithWatchID - watch ID. +func (f WatcherExecuteWatch) WithWatchID(v string) func(*WatcherExecuteWatchRequest) { + return func(r *WatcherExecuteWatchRequest) { + r.WatchID = v + } +} + +// WithDebug - indicates whether the watch should execute in debug mode. +func (f WatcherExecuteWatch) WithDebug(v bool) func(*WatcherExecuteWatchRequest) { + return func(r *WatcherExecuteWatchRequest) { + r.Debug = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherExecuteWatch) WithPretty() func(*WatcherExecuteWatchRequest) { + return func(r *WatcherExecuteWatchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherExecuteWatch) WithHuman() func(*WatcherExecuteWatchRequest) { + return func(r *WatcherExecuteWatchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherExecuteWatch) WithErrorTrace() func(*WatcherExecuteWatchRequest) { + return func(r *WatcherExecuteWatchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherExecuteWatch) WithFilterPath(v ...string) func(*WatcherExecuteWatchRequest) { + return func(r *WatcherExecuteWatchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherExecuteWatch) WithHeader(h map[string]string) func(*WatcherExecuteWatchRequest) { + return func(r *WatcherExecuteWatchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherExecuteWatch) WithOpaqueID(s string) func(*WatcherExecuteWatchRequest) { + return func(r *WatcherExecuteWatchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.get_settings.go new file mode 100644 index 000000000..686eae58c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.get_settings.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newWatcherGetSettingsFunc(t Transport) WatcherGetSettings { + return func(o ...func(*WatcherGetSettingsRequest)) (*Response, error) { + var r = WatcherGetSettingsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherGetSettings - Retrieve settings for the watcher system index +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-settings.html. +type WatcherGetSettings func(o ...func(*WatcherGetSettingsRequest)) (*Response, error) + +// WatcherGetSettingsRequest configures the Watcher Get Settings API request. +type WatcherGetSettingsRequest struct { + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r WatcherGetSettingsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_watcher/settings")) + path.WriteString("http://") + path.WriteString("/_watcher/settings") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.get_settings") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.get_settings") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherGetSettings) WithContext(v context.Context) func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f WatcherGetSettings) WithMasterTimeout(v time.Duration) func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherGetSettings) WithPretty() func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherGetSettings) WithHuman() func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherGetSettings) WithErrorTrace() func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherGetSettings) WithFilterPath(v ...string) func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherGetSettings) WithHeader(h map[string]string) func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherGetSettings) WithOpaqueID(s string) func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.get_watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.get_watch.go new file mode 100644 index 000000000..e59150398 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.get_watch.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newWatcherGetWatchFunc(t Transport) WatcherGetWatch { + return func(id string, o ...func(*WatcherGetWatchRequest)) (*Response, error) { + var r = WatcherGetWatchRequest{WatchID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherGetWatch - Retrieves a watch by its ID. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-watch.html. +type WatcherGetWatch func(id string, o ...func(*WatcherGetWatchRequest)) (*Response, error) + +// WatcherGetWatchRequest configures the Watcher Get Watch API request. +type WatcherGetWatchRequest struct { + WatchID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r WatcherGetWatchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.get_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_watcher") + 1 + len("watch") + 1 + len(r.WatchID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + path.WriteString(r.WatchID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.WatchID) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.get_watch") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.get_watch") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherGetWatch) WithContext(v context.Context) func(*WatcherGetWatchRequest) { + return func(r *WatcherGetWatchRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherGetWatch) WithPretty() func(*WatcherGetWatchRequest) { + return func(r *WatcherGetWatchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherGetWatch) WithHuman() func(*WatcherGetWatchRequest) { + return func(r *WatcherGetWatchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherGetWatch) WithErrorTrace() func(*WatcherGetWatchRequest) { + return func(r *WatcherGetWatchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherGetWatch) WithFilterPath(v ...string) func(*WatcherGetWatchRequest) { + return func(r *WatcherGetWatchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherGetWatch) WithHeader(h map[string]string) func(*WatcherGetWatchRequest) { + return func(r *WatcherGetWatchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherGetWatch) WithOpaqueID(s string) func(*WatcherGetWatchRequest) { + return func(r *WatcherGetWatchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.put_watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.put_watch.go new file mode 100644 index 000000000..7ba7666ce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.put_watch.go @@ -0,0 +1,292 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newWatcherPutWatchFunc(t Transport) WatcherPutWatch { + return func(id string, o ...func(*WatcherPutWatchRequest)) (*Response, error) { + var r = WatcherPutWatchRequest{WatchID: id} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherPutWatch - Creates a new watch, or updates an existing one. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html. +type WatcherPutWatch func(id string, o ...func(*WatcherPutWatchRequest)) (*Response, error) + +// WatcherPutWatchRequest configures the Watcher Put Watch API request. +type WatcherPutWatchRequest struct { + WatchID string + + Body io.Reader + + Active *bool + IfPrimaryTerm *int + IfSeqNo *int + Version *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r WatcherPutWatchRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.put_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + 1 + len("_watcher") + 1 + len("watch") + 1 + len(r.WatchID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + path.WriteString(r.WatchID) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.WatchID) + } + + params = make(map[string]string) + + if r.Active != nil { + params["active"] = strconv.FormatBool(*r.Active) + } + + if r.IfPrimaryTerm != nil { + params["if_primary_term"] = strconv.FormatInt(int64(*r.IfPrimaryTerm), 10) + } + + if r.IfSeqNo != nil { + params["if_seq_no"] = strconv.FormatInt(int64(*r.IfSeqNo), 10) + } + + if r.Version != nil { + params["version"] = strconv.FormatInt(int64(*r.Version), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.put_watch") + if reader := instrument.RecordRequestBody(ctx, "watcher.put_watch", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.put_watch") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherPutWatch) WithContext(v context.Context) func(*WatcherPutWatchRequest) { + return func(r *WatcherPutWatchRequest) { + r.ctx = v + } +} + +// WithBody - The watch. +func (f WatcherPutWatch) WithBody(v io.Reader) func(*WatcherPutWatchRequest) { + return func(r *WatcherPutWatchRequest) { + r.Body = v + } +} + +// WithActive - specify whether the watch is in/active by default. +func (f WatcherPutWatch) WithActive(v bool) func(*WatcherPutWatchRequest) { + return func(r *WatcherPutWatchRequest) { + r.Active = &v + } +} + +// WithIfPrimaryTerm - only update the watch if the last operation that has changed the watch has the specified primary term. +func (f WatcherPutWatch) WithIfPrimaryTerm(v int) func(*WatcherPutWatchRequest) { + return func(r *WatcherPutWatchRequest) { + r.IfPrimaryTerm = &v + } +} + +// WithIfSeqNo - only update the watch if the last operation that has changed the watch has the specified sequence number. +func (f WatcherPutWatch) WithIfSeqNo(v int) func(*WatcherPutWatchRequest) { + return func(r *WatcherPutWatchRequest) { + r.IfSeqNo = &v + } +} + +// WithVersion - explicit version number for concurrency control. +func (f WatcherPutWatch) WithVersion(v int) func(*WatcherPutWatchRequest) { + return func(r *WatcherPutWatchRequest) { + r.Version = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherPutWatch) WithPretty() func(*WatcherPutWatchRequest) { + return func(r *WatcherPutWatchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherPutWatch) WithHuman() func(*WatcherPutWatchRequest) { + return func(r *WatcherPutWatchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherPutWatch) WithErrorTrace() func(*WatcherPutWatchRequest) { + return func(r *WatcherPutWatchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherPutWatch) WithFilterPath(v ...string) func(*WatcherPutWatchRequest) { + return func(r *WatcherPutWatchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherPutWatch) WithHeader(h map[string]string) func(*WatcherPutWatchRequest) { + return func(r *WatcherPutWatchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherPutWatch) WithOpaqueID(s string) func(*WatcherPutWatchRequest) { + return func(r *WatcherPutWatchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.query_watches.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.query_watches.go new file mode 100644 index 000000000..dd6f59910 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.query_watches.go @@ -0,0 +1,232 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newWatcherQueryWatchesFunc(t Transport) WatcherQueryWatches { + return func(o ...func(*WatcherQueryWatchesRequest)) (*Response, error) { + var r = WatcherQueryWatchesRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherQueryWatches - Retrieves stored watches. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-query-watches.html. +type WatcherQueryWatches func(o ...func(*WatcherQueryWatchesRequest)) (*Response, error) + +// WatcherQueryWatchesRequest configures the Watcher Query Watches API request. +type WatcherQueryWatchesRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r WatcherQueryWatchesRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.query_watches") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_watcher/_query/watches")) + path.WriteString("http://") + path.WriteString("/_watcher/_query/watches") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.query_watches") + if reader := instrument.RecordRequestBody(ctx, "watcher.query_watches", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.query_watches") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherQueryWatches) WithContext(v context.Context) func(*WatcherQueryWatchesRequest) { + return func(r *WatcherQueryWatchesRequest) { + r.ctx = v + } +} + +// WithBody - From, size, query, sort and search_after. +func (f WatcherQueryWatches) WithBody(v io.Reader) func(*WatcherQueryWatchesRequest) { + return func(r *WatcherQueryWatchesRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherQueryWatches) WithPretty() func(*WatcherQueryWatchesRequest) { + return func(r *WatcherQueryWatchesRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherQueryWatches) WithHuman() func(*WatcherQueryWatchesRequest) { + return func(r *WatcherQueryWatchesRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherQueryWatches) WithErrorTrace() func(*WatcherQueryWatchesRequest) { + return func(r *WatcherQueryWatchesRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherQueryWatches) WithFilterPath(v ...string) func(*WatcherQueryWatchesRequest) { + return func(r *WatcherQueryWatchesRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherQueryWatches) WithHeader(h map[string]string) func(*WatcherQueryWatchesRequest) { + return func(r *WatcherQueryWatchesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherQueryWatches) WithOpaqueID(s string) func(*WatcherQueryWatchesRequest) { + return func(r *WatcherQueryWatchesRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.start.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.start.go new file mode 100644 index 000000000..b152c7ad9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.start.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newWatcherStartFunc(t Transport) WatcherStart { + return func(o ...func(*WatcherStartRequest)) (*Response, error) { + var r = WatcherStartRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherStart - Starts Watcher if it is not already running. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-start.html. +type WatcherStart func(o ...func(*WatcherStartRequest)) (*Response, error) + +// WatcherStartRequest configures the Watcher Start API request. +type WatcherStartRequest struct { + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r WatcherStartRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.start") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_watcher/_start")) + path.WriteString("http://") + path.WriteString("/_watcher/_start") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.start") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.start") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherStart) WithContext(v context.Context) func(*WatcherStartRequest) { + return func(r *WatcherStartRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f WatcherStart) WithMasterTimeout(v time.Duration) func(*WatcherStartRequest) { + return func(r *WatcherStartRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherStart) WithPretty() func(*WatcherStartRequest) { + return func(r *WatcherStartRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherStart) WithHuman() func(*WatcherStartRequest) { + return func(r *WatcherStartRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherStart) WithErrorTrace() func(*WatcherStartRequest) { + return func(r *WatcherStartRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherStart) WithFilterPath(v ...string) func(*WatcherStartRequest) { + return func(r *WatcherStartRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherStart) WithHeader(h map[string]string) func(*WatcherStartRequest) { + return func(r *WatcherStartRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherStart) WithOpaqueID(s string) func(*WatcherStartRequest) { + return func(r *WatcherStartRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.stats.go new file mode 100644 index 000000000..b8ebffbb4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.stats.go @@ -0,0 +1,252 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newWatcherStatsFunc(t Transport) WatcherStats { + return func(o ...func(*WatcherStatsRequest)) (*Response, error) { + var r = WatcherStatsRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherStats - Retrieves the current Watcher metrics. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stats.html. +type WatcherStats func(o ...func(*WatcherStatsRequest)) (*Response, error) + +// WatcherStatsRequest configures the Watcher Stats API request. +type WatcherStatsRequest struct { + Metric []string + + EmitStacktraces *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r WatcherStatsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + 1 + len("_watcher") + 1 + len("stats") + 1 + len(strings.Join(r.Metric, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("stats") + if len(r.Metric) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Metric, ",")) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", strings.Join(r.Metric, ",")) + } + } + + params = make(map[string]string) + + if r.EmitStacktraces != nil { + params["emit_stacktraces"] = strconv.FormatBool(*r.EmitStacktraces) + } + + if len(r.Metric) > 0 { + params["metric"] = strings.Join(r.Metric, ",") + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.stats") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.stats") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherStats) WithContext(v context.Context) func(*WatcherStatsRequest) { + return func(r *WatcherStatsRequest) { + r.ctx = v + } +} + +// WithMetric - controls what additional stat metrics should be include in the response. +func (f WatcherStats) WithMetric(v ...string) func(*WatcherStatsRequest) { + return func(r *WatcherStatsRequest) { + r.Metric = v + } +} + +// WithEmitStacktraces - emits stack traces of currently running watches. +func (f WatcherStats) WithEmitStacktraces(v bool) func(*WatcherStatsRequest) { + return func(r *WatcherStatsRequest) { + r.EmitStacktraces = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherStats) WithPretty() func(*WatcherStatsRequest) { + return func(r *WatcherStatsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherStats) WithHuman() func(*WatcherStatsRequest) { + return func(r *WatcherStatsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherStats) WithErrorTrace() func(*WatcherStatsRequest) { + return func(r *WatcherStatsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherStats) WithFilterPath(v ...string) func(*WatcherStatsRequest) { + return func(r *WatcherStatsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherStats) WithHeader(h map[string]string) func(*WatcherStatsRequest) { + return func(r *WatcherStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherStats) WithOpaqueID(s string) func(*WatcherStatsRequest) { + return func(r *WatcherStatsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.stop.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.stop.go new file mode 100644 index 000000000..3d8546d4d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.stop.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newWatcherStopFunc(t Transport) WatcherStop { + return func(o ...func(*WatcherStopRequest)) (*Response, error) { + var r = WatcherStopRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherStop - Stops Watcher if it is running. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stop.html. +type WatcherStop func(o ...func(*WatcherStopRequest)) (*Response, error) + +// WatcherStopRequest configures the Watcher Stop API request. +type WatcherStopRequest struct { + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r WatcherStopRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "POST" + + path.Grow(7 + len("/_watcher/_stop")) + path.WriteString("http://") + path.WriteString("/_watcher/_stop") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.stop") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.stop") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherStop) WithContext(v context.Context) func(*WatcherStopRequest) { + return func(r *WatcherStopRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f WatcherStop) WithMasterTimeout(v time.Duration) func(*WatcherStopRequest) { + return func(r *WatcherStopRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherStop) WithPretty() func(*WatcherStopRequest) { + return func(r *WatcherStopRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherStop) WithHuman() func(*WatcherStopRequest) { + return func(r *WatcherStopRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherStop) WithErrorTrace() func(*WatcherStopRequest) { + return func(r *WatcherStopRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherStop) WithFilterPath(v ...string) func(*WatcherStopRequest) { + return func(r *WatcherStopRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherStop) WithHeader(h map[string]string) func(*WatcherStopRequest) { + return func(r *WatcherStopRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherStop) WithOpaqueID(s string) func(*WatcherStopRequest) { + return func(r *WatcherStopRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.update_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.update_settings.go new file mode 100644 index 000000000..c70647141 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.watcher.update_settings.go @@ -0,0 +1,251 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" + "time" +) + +func newWatcherUpdateSettingsFunc(t Transport) WatcherUpdateSettings { + return func(body io.Reader, o ...func(*WatcherUpdateSettingsRequest)) (*Response, error) { + var r = WatcherUpdateSettingsRequest{Body: body} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherUpdateSettings - Update settings for the watcher system index +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-update-settings.html. +type WatcherUpdateSettings func(body io.Reader, o ...func(*WatcherUpdateSettingsRequest)) (*Response, error) + +// WatcherUpdateSettingsRequest configures the Watcher Update Settings API request. +type WatcherUpdateSettingsRequest struct { + Body io.Reader + + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r WatcherUpdateSettingsRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.update_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "PUT" + + path.Grow(7 + len("/_watcher/settings")) + path.WriteString("http://") + path.WriteString("/_watcher/settings") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.update_settings") + if reader := instrument.RecordRequestBody(ctx, "watcher.update_settings", r.Body); reader != nil { + req.Body = reader + } + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.update_settings") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherUpdateSettings) WithContext(v context.Context) func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f WatcherUpdateSettings) WithMasterTimeout(v time.Duration) func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - specify timeout for waiting for acknowledgement from all nodes. +func (f WatcherUpdateSettings) WithTimeout(v time.Duration) func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherUpdateSettings) WithPretty() func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherUpdateSettings) WithHuman() func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherUpdateSettings) WithErrorTrace() func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherUpdateSettings) WithFilterPath(v ...string) func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherUpdateSettings) WithHeader(h map[string]string) func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherUpdateSettings) WithOpaqueID(s string) func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.xpack.info.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.xpack.info.go new file mode 100644 index 000000000..d88b3d572 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.xpack.info.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newXPackInfoFunc(t Transport) XPackInfo { + return func(o ...func(*XPackInfoRequest)) (*Response, error) { + var r = XPackInfoRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// XPackInfo - Retrieves information about the installed X-Pack features. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html. +type XPackInfo func(o ...func(*XPackInfoRequest)) (*Response, error) + +// XPackInfoRequest configures the X Pack Info API request. +type XPackInfoRequest struct { + AcceptEnterprise *bool + Categories []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r XPackInfoRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "xpack.info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_xpack")) + path.WriteString("http://") + path.WriteString("/_xpack") + + params = make(map[string]string) + + if r.AcceptEnterprise != nil { + params["accept_enterprise"] = strconv.FormatBool(*r.AcceptEnterprise) + } + + if len(r.Categories) > 0 { + params["categories"] = strings.Join(r.Categories, ",") + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "xpack.info") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "xpack.info") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f XPackInfo) WithContext(v context.Context) func(*XPackInfoRequest) { + return func(r *XPackInfoRequest) { + r.ctx = v + } +} + +// WithAcceptEnterprise - if this param is used it must be set to true. +func (f XPackInfo) WithAcceptEnterprise(v bool) func(*XPackInfoRequest) { + return func(r *XPackInfoRequest) { + r.AcceptEnterprise = &v + } +} + +// WithCategories - comma-separated list of info categories. can be any of: build, license, features. +func (f XPackInfo) WithCategories(v ...string) func(*XPackInfoRequest) { + return func(r *XPackInfoRequest) { + r.Categories = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f XPackInfo) WithPretty() func(*XPackInfoRequest) { + return func(r *XPackInfoRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f XPackInfo) WithHuman() func(*XPackInfoRequest) { + return func(r *XPackInfoRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f XPackInfo) WithErrorTrace() func(*XPackInfoRequest) { + return func(r *XPackInfoRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f XPackInfo) WithFilterPath(v ...string) func(*XPackInfoRequest) { + return func(r *XPackInfoRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f XPackInfo) WithHeader(h map[string]string) func(*XPackInfoRequest) { + return func(r *XPackInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f XPackInfo) WithOpaqueID(s string) func(*XPackInfoRequest) { + return func(r *XPackInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.xpack.usage.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.xpack.usage.go new file mode 100644 index 000000000..431f34d89 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/api.xpack.xpack.usage.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 9.1.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" + "time" +) + +func newXPackUsageFunc(t Transport) XPackUsage { + return func(o ...func(*XPackUsageRequest)) (*Response, error) { + var r = XPackUsageRequest{} + for _, f := range o { + f(&r) + } + + if transport, ok := t.(Instrumented); ok { + r.Instrument = transport.InstrumentationEnabled() + } + + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// XPackUsage - Retrieves usage information about the installed X-Pack features. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/usage-api.html. +type XPackUsage func(o ...func(*XPackUsageRequest)) (*Response, error) + +// XPackUsageRequest configures the X Pack Usage API request. +type XPackUsageRequest struct { + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context + + Instrument Instrumentation +} + +// Do executes the request and returns response or error. +func (r XPackUsageRequest) Do(providedCtx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ctx context.Context + ) + + if instrument, ok := r.Instrument.(Instrumentation); ok { + ctx = instrument.Start(providedCtx, "xpack.usage") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + method = "GET" + + path.Grow(7 + len("/_xpack/usage")) + path.WriteString("http://") + path.WriteString("/_xpack/usage") + + params = make(map[string]string) + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.BeforeRequest(req, "xpack.usage") + } + res, err := transport.Perform(req) + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "xpack.usage") + } + if err != nil { + if instrument, ok := r.Instrument.(Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f XPackUsage) WithContext(v context.Context) func(*XPackUsageRequest) { + return func(r *XPackUsageRequest) { + r.ctx = v + } +} + +// WithMasterTimeout - specify timeout for watch write operation. +func (f XPackUsage) WithMasterTimeout(v time.Duration) func(*XPackUsageRequest) { + return func(r *XPackUsageRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f XPackUsage) WithPretty() func(*XPackUsageRequest) { + return func(r *XPackUsageRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f XPackUsage) WithHuman() func(*XPackUsageRequest) { + return func(r *XPackUsageRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f XPackUsage) WithErrorTrace() func(*XPackUsageRequest) { + return func(r *XPackUsageRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f XPackUsage) WithFilterPath(v ...string) func(*XPackUsageRequest) { + return func(r *XPackUsageRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f XPackUsage) WithHeader(h map[string]string) func(*XPackUsageRequest) { + return func(r *XPackUsageRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f XPackUsage) WithOpaqueID(s string) func(*XPackUsageRequest) { + return func(r *XPackUsageRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/doc.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/doc.go new file mode 100644 index 000000000..404a1650d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/doc.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +/* +Package esapi provides the Go API for Elasticsearch. + +It is automatically included in the client provided by the +github.com/elastic/go-elasticsearch package: + + es, _ := elasticsearch.NewDefaultClient() + res, _ := es.Info() + log.Println(res) + +For each Elasticsearch API, such as "Index", the package exports two corresponding types: +a function and a struct. + +The function type allows you to call the Elasticsearch API as a method on the client, +passing the parameters as arguments: + + res, err := es.Index( + "test", // Index name + strings.NewReader(`{"title" : "Test"}`), // Document body + es.Index.WithDocumentID("1"), // Document ID + es.Index.WithRefresh("true"), // Refresh + ) + if err != nil { + log.Fatalf("ERROR: %s", err) + } + defer res.Body.Close() + + log.Println(res) + + // => [201 Created] {"_index":"test","_type":"_doc","_id":"1" ... + +The struct type allows for a more hands-on approach, where you create a new struct, with the +request configuration as fields, and call the Do() method +with a context and the client as arguments: + + req := esapi.IndexRequest{ + Index: "test", // Index name + Body: strings.NewReader(`{"title" : "Test"}`), // Document body + DocumentID: "1", // Document ID + Refresh: "true", // Refresh + } + + res, err := req.Do(context.Background(), es) + if err != nil { + log.Fatalf("Error getting response: %s", err) + } + defer res.Body.Close() + + log.Println(res) + + // => [200 OK] {"_index":"test","_type":"_doc","_id":"1","_version":2 ... + +The function type is a wrapper around the struct, and allows +to configure and perform the request in a more expressive way. +It has a minor overhead compared to using a struct directly; +refer to the esapi_benchmark_test.go suite for concrete numbers. + +See the documentation for each API function or struct at +https://pkg.go.dev/github.com/elastic/go-elasticsearch, +or locally by: + + go doc github.com/elastic/go-elasticsearch/v9/esapi Index + go doc github.com/elastic/go-elasticsearch/v9/esapi IndexRequest + +# Response + +The esapi.Response type is a lightweight wrapper around http.Response. + +The res.Body field is an io.ReadCloser, leaving the JSON parsing to the +calling code, in the interest of performance and flexibility +(eg. to allow using a custom JSON parser). + +It is imperative to close the response body for a non-nil response. + +The Response type implements a couple of convenience methods for accessing +the status, checking an error status code or printing +the response body for debugging purposes. + +# Additional Information + +See the Elasticsearch documentation at +https://www.elastic.co/guide/en/elasticsearch/reference/master/api-conventions.html for detailed information +about the API endpoints and parameters. + +The Go API is generated from the Elasticsearch JSON specification at +https://github.com/elastic/elasticsearch/tree/master/rest-api-spec/src/main/resources/rest-api-spec/api +by the internal package available at +https://github.com/elastic/go-elasticsearch/tree/master/internal/cmd/generate/commands/gensource. + +The API is tested by integration tests common to all Elasticsearch official clients, generated from the +source at https://github.com/elastic/elasticsearch/tree/master/rest-api-spec/src/main/resources/rest-api-spec/test. +The generator is provided by the internal package available at internal/cmd/generate/commands/gentests. +*/ +package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/esapi.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/esapi.go new file mode 100644 index 000000000..3c5d189a6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/esapi.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package esapi + +import ( + "net/http" + "strconv" + "time" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/internal/version" +) + +// Version returns the package version as a string. +const Version = version.Client + +// Transport defines the interface for an API client. +type Transport interface { + Perform(*http.Request) (*http.Response, error) +} + +// Instrumented allows to retrieve the current transport Instrumentation +type Instrumented elastictransport.Instrumented + +// Instrumentation defines the interface for the instrumentation API. +type Instrumentation elastictransport.Instrumentation + +// BoolPtr returns a pointer to v. +// +// It is used as a convenience function for converting a bool value +// into a pointer when passing the value to a function or struct field +// which expects a pointer. +func BoolPtr(v bool) *bool { return &v } + +// IntPtr returns a pointer to v. +// +// It is used as a convenience function for converting an int value +// into a pointer when passing the value to a function or struct field +// which expects a pointer. +func IntPtr(v int) *int { return &v } + +// formatDuration converts duration to a string in the format +// accepted by Elasticsearch. +func formatDuration(d time.Duration) string { + if d < time.Millisecond { + return strconv.FormatInt(int64(d), 10) + "nanos" + } + return strconv.FormatInt(int64(d)/int64(time.Millisecond), 10) + "ms" +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/esapi.request.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/esapi.request.go new file mode 100644 index 000000000..234983615 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/esapi.request.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package esapi + +import ( + "context" + "io" + "net/http" +) + +const ( + headerContentType = "Content-Type" +) + +var ( + headerContentTypeJSON = []string{"application/json"} +) + +// Request defines the API request. +// +type Request interface { + Do(ctx context.Context, transport Transport) (*Response, error) +} + +// newRequest creates an HTTP request. +// +func newRequest(method, path string, body io.Reader) (*http.Request, error) { + return http.NewRequest(method, path, body) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/esapi/esapi.response.go b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/esapi.response.go new file mode 100644 index 000000000..a657b5ee9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/esapi/esapi.response.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package esapi + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" +) + +// Response represents the API response. +// +type Response struct { + StatusCode int + Header http.Header + Body io.ReadCloser +} + +// String returns the response as a string. +// +// The intended usage is for testing or debugging only. +// +func (r *Response) String() string { + var ( + out = new(bytes.Buffer) + b1 = bytes.NewBuffer([]byte{}) + b2 = bytes.NewBuffer([]byte{}) + tr io.Reader + ) + + if r != nil && r.Body != nil { + tr = io.TeeReader(r.Body, b1) + defer r.Body.Close() + + if _, err := io.Copy(b2, tr); err != nil { + out.WriteString(fmt.Sprintf("", err)) + return out.String() + } + defer func() { r.Body = ioutil.NopCloser(b1) }() + } + + if r != nil { + out.WriteString(fmt.Sprintf("[%d %s]", r.StatusCode, http.StatusText(r.StatusCode))) + if r.StatusCode > 0 { + out.WriteRune(' ') + } + } else { + out.WriteString("[0 ]") + } + + if r != nil && r.Body != nil { + out.ReadFrom(b2) // errcheck exclude (*bytes.Buffer).ReadFrom + } + + return out.String() +} + +// Status returns the response status as a string. +// +func (r *Response) Status() string { + var b strings.Builder + if r != nil { + b.WriteString(strconv.Itoa(r.StatusCode)) + b.WriteString(" ") + b.WriteString(http.StatusText(r.StatusCode)) + } + return b.String() +} + +// IsError returns true when the response status indicates failure. +// +func (r *Response) IsError() bool { + return r.StatusCode > 299 +} + +// Warnings returns the deprecation warnings from response headers. +// +func (r *Response) Warnings() []string { + return r.Header["Warning"] +} + +// HasWarnings returns true when the response headers contain deprecation warnings. +// +func (r *Response) HasWarnings() bool { + return len(r.Warnings()) > 0 +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/internal/version/version.go b/vendor/github.com/elastic/go-elasticsearch/v9/internal/version/version.go new file mode 100644 index 000000000..877e34554 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/internal/version/version.go @@ -0,0 +1,21 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package version + +// Client returns the client version as a string. +const Client = "9.1.0" diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/api._.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/api._.go new file mode 100644 index 000000000..dcddb70e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/api._.go @@ -0,0 +1,14140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package typedapi + +import ( + "github.com/elastic/elastic-transport-go/v8/elastictransport" + async_search_delete "github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/delete" + async_search_get "github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/get" + async_search_status "github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/status" + async_search_submit "github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/submit" + autoscaling_delete_autoscaling_policy "github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/deleteautoscalingpolicy" + autoscaling_get_autoscaling_capacity "github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingcapacity" + autoscaling_get_autoscaling_policy "github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingpolicy" + autoscaling_put_autoscaling_policy "github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/putautoscalingpolicy" + capabilities "github.com/elastic/go-elasticsearch/v9/typedapi/capabilities" + cat_aliases "github.com/elastic/go-elasticsearch/v9/typedapi/cat/aliases" + cat_allocation "github.com/elastic/go-elasticsearch/v9/typedapi/cat/allocation" + cat_component_templates "github.com/elastic/go-elasticsearch/v9/typedapi/cat/componenttemplates" + cat_count "github.com/elastic/go-elasticsearch/v9/typedapi/cat/count" + cat_fielddata "github.com/elastic/go-elasticsearch/v9/typedapi/cat/fielddata" + cat_health "github.com/elastic/go-elasticsearch/v9/typedapi/cat/health" + cat_help "github.com/elastic/go-elasticsearch/v9/typedapi/cat/help" + cat_indices "github.com/elastic/go-elasticsearch/v9/typedapi/cat/indices" + cat_master "github.com/elastic/go-elasticsearch/v9/typedapi/cat/master" + cat_ml_datafeeds "github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldatafeeds" + cat_ml_data_frame_analytics "github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldataframeanalytics" + cat_ml_jobs "github.com/elastic/go-elasticsearch/v9/typedapi/cat/mljobs" + cat_ml_trained_models "github.com/elastic/go-elasticsearch/v9/typedapi/cat/mltrainedmodels" + cat_nodeattrs "github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodeattrs" + cat_nodes "github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodes" + cat_pending_tasks "github.com/elastic/go-elasticsearch/v9/typedapi/cat/pendingtasks" + cat_plugins "github.com/elastic/go-elasticsearch/v9/typedapi/cat/plugins" + cat_recovery "github.com/elastic/go-elasticsearch/v9/typedapi/cat/recovery" + cat_repositories "github.com/elastic/go-elasticsearch/v9/typedapi/cat/repositories" + cat_segments "github.com/elastic/go-elasticsearch/v9/typedapi/cat/segments" + cat_shards "github.com/elastic/go-elasticsearch/v9/typedapi/cat/shards" + cat_snapshots "github.com/elastic/go-elasticsearch/v9/typedapi/cat/snapshots" + cat_tasks "github.com/elastic/go-elasticsearch/v9/typedapi/cat/tasks" + cat_templates "github.com/elastic/go-elasticsearch/v9/typedapi/cat/templates" + cat_thread_pool "github.com/elastic/go-elasticsearch/v9/typedapi/cat/threadpool" + cat_transforms "github.com/elastic/go-elasticsearch/v9/typedapi/cat/transforms" + ccr_delete_auto_follow_pattern "github.com/elastic/go-elasticsearch/v9/typedapi/ccr/deleteautofollowpattern" + ccr_follow "github.com/elastic/go-elasticsearch/v9/typedapi/ccr/follow" + ccr_follow_info "github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followinfo" + ccr_follow_stats "github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followstats" + ccr_forget_follower "github.com/elastic/go-elasticsearch/v9/typedapi/ccr/forgetfollower" + ccr_get_auto_follow_pattern "github.com/elastic/go-elasticsearch/v9/typedapi/ccr/getautofollowpattern" + ccr_pause_auto_follow_pattern "github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pauseautofollowpattern" + ccr_pause_follow "github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pausefollow" + ccr_put_auto_follow_pattern "github.com/elastic/go-elasticsearch/v9/typedapi/ccr/putautofollowpattern" + ccr_resume_auto_follow_pattern "github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumeautofollowpattern" + ccr_resume_follow "github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumefollow" + ccr_stats "github.com/elastic/go-elasticsearch/v9/typedapi/ccr/stats" + ccr_unfollow "github.com/elastic/go-elasticsearch/v9/typedapi/ccr/unfollow" + cluster_allocation_explain "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/allocationexplain" + cluster_delete_component_template "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/deletecomponenttemplate" + cluster_delete_voting_config_exclusions "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/deletevotingconfigexclusions" + cluster_exists_component_template "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/existscomponenttemplate" + cluster_get_component_template "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getcomponenttemplate" + cluster_get_settings "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getsettings" + cluster_health "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/health" + cluster_info "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/info" + cluster_pending_tasks "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/pendingtasks" + cluster_post_voting_config_exclusions "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/postvotingconfigexclusions" + cluster_put_component_template "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putcomponenttemplate" + cluster_put_settings "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putsettings" + cluster_remote_info "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/remoteinfo" + cluster_reroute "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/reroute" + cluster_state "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/state" + cluster_stats "github.com/elastic/go-elasticsearch/v9/typedapi/cluster/stats" + connector_check_in "github.com/elastic/go-elasticsearch/v9/typedapi/connector/checkin" + connector_delete "github.com/elastic/go-elasticsearch/v9/typedapi/connector/delete" + connector_get "github.com/elastic/go-elasticsearch/v9/typedapi/connector/get" + connector_last_sync "github.com/elastic/go-elasticsearch/v9/typedapi/connector/lastsync" + connector_list "github.com/elastic/go-elasticsearch/v9/typedapi/connector/list" + connector_post "github.com/elastic/go-elasticsearch/v9/typedapi/connector/post" + connector_put "github.com/elastic/go-elasticsearch/v9/typedapi/connector/put" + connector_secret_post "github.com/elastic/go-elasticsearch/v9/typedapi/connector/secretpost" + connector_sync_job_cancel "github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcancel" + connector_sync_job_check_in "github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcheckin" + connector_sync_job_claim "github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobclaim" + connector_sync_job_delete "github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobdelete" + connector_sync_job_error "github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoberror" + connector_sync_job_get "github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobget" + connector_sync_job_list "github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoblist" + connector_sync_job_post "github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobpost" + connector_sync_job_update_stats "github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobupdatestats" + connector_update_active_filtering "github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateactivefiltering" + connector_update_api_key_id "github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateapikeyid" + connector_update_configuration "github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateconfiguration" + connector_update_error "github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateerror" + connector_update_features "github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefeatures" + connector_update_filtering "github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefiltering" + connector_update_filtering_validation "github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefilteringvalidation" + connector_update_index_name "github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateindexname" + connector_update_name "github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatename" + connector_update_native "github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatenative" + connector_update_pipeline "github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatepipeline" + connector_update_scheduling "github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatescheduling" + connector_update_service_type "github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateservicetype" + connector_update_status "github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatestatus" + core_bulk "github.com/elastic/go-elasticsearch/v9/typedapi/core/bulk" + core_clear_scroll "github.com/elastic/go-elasticsearch/v9/typedapi/core/clearscroll" + core_close_point_in_time "github.com/elastic/go-elasticsearch/v9/typedapi/core/closepointintime" + core_count "github.com/elastic/go-elasticsearch/v9/typedapi/core/count" + core_create "github.com/elastic/go-elasticsearch/v9/typedapi/core/create" + core_delete "github.com/elastic/go-elasticsearch/v9/typedapi/core/delete" + core_delete_by_query "github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyquery" + core_delete_by_query_rethrottle "github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyqueryrethrottle" + core_delete_script "github.com/elastic/go-elasticsearch/v9/typedapi/core/deletescript" + core_exists "github.com/elastic/go-elasticsearch/v9/typedapi/core/exists" + core_exists_source "github.com/elastic/go-elasticsearch/v9/typedapi/core/existssource" + core_explain "github.com/elastic/go-elasticsearch/v9/typedapi/core/explain" + core_field_caps "github.com/elastic/go-elasticsearch/v9/typedapi/core/fieldcaps" + core_get "github.com/elastic/go-elasticsearch/v9/typedapi/core/get" + core_get_script "github.com/elastic/go-elasticsearch/v9/typedapi/core/getscript" + core_get_script_context "github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptcontext" + core_get_script_languages "github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptlanguages" + core_get_source "github.com/elastic/go-elasticsearch/v9/typedapi/core/getsource" + core_health_report "github.com/elastic/go-elasticsearch/v9/typedapi/core/healthreport" + core_index "github.com/elastic/go-elasticsearch/v9/typedapi/core/index" + core_info "github.com/elastic/go-elasticsearch/v9/typedapi/core/info" + core_mget "github.com/elastic/go-elasticsearch/v9/typedapi/core/mget" + core_msearch "github.com/elastic/go-elasticsearch/v9/typedapi/core/msearch" + core_msearch_template "github.com/elastic/go-elasticsearch/v9/typedapi/core/msearchtemplate" + core_mtermvectors "github.com/elastic/go-elasticsearch/v9/typedapi/core/mtermvectors" + core_open_point_in_time "github.com/elastic/go-elasticsearch/v9/typedapi/core/openpointintime" + core_ping "github.com/elastic/go-elasticsearch/v9/typedapi/core/ping" + core_put_script "github.com/elastic/go-elasticsearch/v9/typedapi/core/putscript" + core_rank_eval "github.com/elastic/go-elasticsearch/v9/typedapi/core/rankeval" + core_reindex "github.com/elastic/go-elasticsearch/v9/typedapi/core/reindex" + core_reindex_rethrottle "github.com/elastic/go-elasticsearch/v9/typedapi/core/reindexrethrottle" + core_render_search_template "github.com/elastic/go-elasticsearch/v9/typedapi/core/rendersearchtemplate" + core_scripts_painless_execute "github.com/elastic/go-elasticsearch/v9/typedapi/core/scriptspainlessexecute" + core_scroll "github.com/elastic/go-elasticsearch/v9/typedapi/core/scroll" + core_search "github.com/elastic/go-elasticsearch/v9/typedapi/core/search" + core_search_mvt "github.com/elastic/go-elasticsearch/v9/typedapi/core/searchmvt" + core_search_shards "github.com/elastic/go-elasticsearch/v9/typedapi/core/searchshards" + core_search_template "github.com/elastic/go-elasticsearch/v9/typedapi/core/searchtemplate" + core_terms_enum "github.com/elastic/go-elasticsearch/v9/typedapi/core/termsenum" + core_termvectors "github.com/elastic/go-elasticsearch/v9/typedapi/core/termvectors" + core_update "github.com/elastic/go-elasticsearch/v9/typedapi/core/update" + core_update_by_query "github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyquery" + core_update_by_query_rethrottle "github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyqueryrethrottle" + dangling_indices_delete_dangling_index "github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/deletedanglingindex" + dangling_indices_import_dangling_index "github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/importdanglingindex" + dangling_indices_list_dangling_indices "github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/listdanglingindices" + enrich_delete_policy "github.com/elastic/go-elasticsearch/v9/typedapi/enrich/deletepolicy" + enrich_execute_policy "github.com/elastic/go-elasticsearch/v9/typedapi/enrich/executepolicy" + enrich_get_policy "github.com/elastic/go-elasticsearch/v9/typedapi/enrich/getpolicy" + enrich_put_policy "github.com/elastic/go-elasticsearch/v9/typedapi/enrich/putpolicy" + enrich_stats "github.com/elastic/go-elasticsearch/v9/typedapi/enrich/stats" + eql_delete "github.com/elastic/go-elasticsearch/v9/typedapi/eql/delete" + eql_get "github.com/elastic/go-elasticsearch/v9/typedapi/eql/get" + eql_get_status "github.com/elastic/go-elasticsearch/v9/typedapi/eql/getstatus" + eql_search "github.com/elastic/go-elasticsearch/v9/typedapi/eql/search" + esql_async_query "github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquery" + esql_async_query_delete "github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerydelete" + esql_async_query_get "github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncqueryget" + esql_async_query_stop "github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerystop" + esql_get_query "github.com/elastic/go-elasticsearch/v9/typedapi/esql/getquery" + esql_list_queries "github.com/elastic/go-elasticsearch/v9/typedapi/esql/listqueries" + esql_query "github.com/elastic/go-elasticsearch/v9/typedapi/esql/query" + features_get_features "github.com/elastic/go-elasticsearch/v9/typedapi/features/getfeatures" + features_reset_features "github.com/elastic/go-elasticsearch/v9/typedapi/features/resetfeatures" + fleet_global_checkpoints "github.com/elastic/go-elasticsearch/v9/typedapi/fleet/globalcheckpoints" + fleet_msearch "github.com/elastic/go-elasticsearch/v9/typedapi/fleet/msearch" + fleet_post_secret "github.com/elastic/go-elasticsearch/v9/typedapi/fleet/postsecret" + fleet_search "github.com/elastic/go-elasticsearch/v9/typedapi/fleet/search" + graph_explore "github.com/elastic/go-elasticsearch/v9/typedapi/graph/explore" + ilm_delete_lifecycle "github.com/elastic/go-elasticsearch/v9/typedapi/ilm/deletelifecycle" + ilm_explain_lifecycle "github.com/elastic/go-elasticsearch/v9/typedapi/ilm/explainlifecycle" + ilm_get_lifecycle "github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getlifecycle" + ilm_get_status "github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getstatus" + ilm_migrate_to_data_tiers "github.com/elastic/go-elasticsearch/v9/typedapi/ilm/migratetodatatiers" + ilm_move_to_step "github.com/elastic/go-elasticsearch/v9/typedapi/ilm/movetostep" + ilm_put_lifecycle "github.com/elastic/go-elasticsearch/v9/typedapi/ilm/putlifecycle" + ilm_remove_policy "github.com/elastic/go-elasticsearch/v9/typedapi/ilm/removepolicy" + ilm_retry "github.com/elastic/go-elasticsearch/v9/typedapi/ilm/retry" + ilm_start "github.com/elastic/go-elasticsearch/v9/typedapi/ilm/start" + ilm_stop "github.com/elastic/go-elasticsearch/v9/typedapi/ilm/stop" + indices_add_block "github.com/elastic/go-elasticsearch/v9/typedapi/indices/addblock" + indices_analyze "github.com/elastic/go-elasticsearch/v9/typedapi/indices/analyze" + indices_cancel_migrate_reindex "github.com/elastic/go-elasticsearch/v9/typedapi/indices/cancelmigratereindex" + indices_clear_cache "github.com/elastic/go-elasticsearch/v9/typedapi/indices/clearcache" + indices_clone "github.com/elastic/go-elasticsearch/v9/typedapi/indices/clone" + indices_close "github.com/elastic/go-elasticsearch/v9/typedapi/indices/close" + indices_create "github.com/elastic/go-elasticsearch/v9/typedapi/indices/create" + indices_create_data_stream "github.com/elastic/go-elasticsearch/v9/typedapi/indices/createdatastream" + indices_create_from "github.com/elastic/go-elasticsearch/v9/typedapi/indices/createfrom" + indices_data_streams_stats "github.com/elastic/go-elasticsearch/v9/typedapi/indices/datastreamsstats" + indices_delete "github.com/elastic/go-elasticsearch/v9/typedapi/indices/delete" + indices_delete_alias "github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletealias" + indices_delete_data_lifecycle "github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatalifecycle" + indices_delete_data_stream "github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastream" + indices_delete_data_stream_options "github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastreamoptions" + indices_delete_index_template "github.com/elastic/go-elasticsearch/v9/typedapi/indices/deleteindextemplate" + indices_delete_template "github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletetemplate" + indices_disk_usage "github.com/elastic/go-elasticsearch/v9/typedapi/indices/diskusage" + indices_downsample "github.com/elastic/go-elasticsearch/v9/typedapi/indices/downsample" + indices_exists "github.com/elastic/go-elasticsearch/v9/typedapi/indices/exists" + indices_exists_alias "github.com/elastic/go-elasticsearch/v9/typedapi/indices/existsalias" + indices_exists_index_template "github.com/elastic/go-elasticsearch/v9/typedapi/indices/existsindextemplate" + indices_exists_template "github.com/elastic/go-elasticsearch/v9/typedapi/indices/existstemplate" + indices_explain_data_lifecycle "github.com/elastic/go-elasticsearch/v9/typedapi/indices/explaindatalifecycle" + indices_field_usage_stats "github.com/elastic/go-elasticsearch/v9/typedapi/indices/fieldusagestats" + indices_flush "github.com/elastic/go-elasticsearch/v9/typedapi/indices/flush" + indices_forcemerge "github.com/elastic/go-elasticsearch/v9/typedapi/indices/forcemerge" + indices_get "github.com/elastic/go-elasticsearch/v9/typedapi/indices/get" + indices_get_alias "github.com/elastic/go-elasticsearch/v9/typedapi/indices/getalias" + indices_get_data_lifecycle "github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecycle" + indices_get_data_lifecycle_stats "github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecyclestats" + indices_get_data_stream "github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastream" + indices_get_data_stream_options "github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamoptions" + indices_get_data_stream_settings "github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamsettings" + indices_get_field_mapping "github.com/elastic/go-elasticsearch/v9/typedapi/indices/getfieldmapping" + indices_get_index_template "github.com/elastic/go-elasticsearch/v9/typedapi/indices/getindextemplate" + indices_get_mapping "github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmapping" + indices_get_migrate_reindex_status "github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmigratereindexstatus" + indices_get_settings "github.com/elastic/go-elasticsearch/v9/typedapi/indices/getsettings" + indices_get_template "github.com/elastic/go-elasticsearch/v9/typedapi/indices/gettemplate" + indices_migrate_reindex "github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratereindex" + indices_migrate_to_data_stream "github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratetodatastream" + indices_modify_data_stream "github.com/elastic/go-elasticsearch/v9/typedapi/indices/modifydatastream" + indices_open "github.com/elastic/go-elasticsearch/v9/typedapi/indices/open" + indices_promote_data_stream "github.com/elastic/go-elasticsearch/v9/typedapi/indices/promotedatastream" + indices_put_alias "github.com/elastic/go-elasticsearch/v9/typedapi/indices/putalias" + indices_put_data_lifecycle "github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatalifecycle" + indices_put_data_stream_options "github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamoptions" + indices_put_data_stream_settings "github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamsettings" + indices_put_index_template "github.com/elastic/go-elasticsearch/v9/typedapi/indices/putindextemplate" + indices_put_mapping "github.com/elastic/go-elasticsearch/v9/typedapi/indices/putmapping" + indices_put_settings "github.com/elastic/go-elasticsearch/v9/typedapi/indices/putsettings" + indices_put_template "github.com/elastic/go-elasticsearch/v9/typedapi/indices/puttemplate" + indices_recovery "github.com/elastic/go-elasticsearch/v9/typedapi/indices/recovery" + indices_refresh "github.com/elastic/go-elasticsearch/v9/typedapi/indices/refresh" + indices_reload_search_analyzers "github.com/elastic/go-elasticsearch/v9/typedapi/indices/reloadsearchanalyzers" + indices_remove_block "github.com/elastic/go-elasticsearch/v9/typedapi/indices/removeblock" + indices_resolve_cluster "github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolvecluster" + indices_resolve_index "github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolveindex" + indices_rollover "github.com/elastic/go-elasticsearch/v9/typedapi/indices/rollover" + indices_segments "github.com/elastic/go-elasticsearch/v9/typedapi/indices/segments" + indices_shard_stores "github.com/elastic/go-elasticsearch/v9/typedapi/indices/shardstores" + indices_shrink "github.com/elastic/go-elasticsearch/v9/typedapi/indices/shrink" + indices_simulate_index_template "github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulateindextemplate" + indices_simulate_template "github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulatetemplate" + indices_split "github.com/elastic/go-elasticsearch/v9/typedapi/indices/split" + indices_stats "github.com/elastic/go-elasticsearch/v9/typedapi/indices/stats" + indices_update_aliases "github.com/elastic/go-elasticsearch/v9/typedapi/indices/updatealiases" + indices_validate_query "github.com/elastic/go-elasticsearch/v9/typedapi/indices/validatequery" + inference_chat_completion_unified "github.com/elastic/go-elasticsearch/v9/typedapi/inference/chatcompletionunified" + inference_completion "github.com/elastic/go-elasticsearch/v9/typedapi/inference/completion" + inference_delete "github.com/elastic/go-elasticsearch/v9/typedapi/inference/delete" + inference_get "github.com/elastic/go-elasticsearch/v9/typedapi/inference/get" + inference_inference "github.com/elastic/go-elasticsearch/v9/typedapi/inference/inference" + inference_put "github.com/elastic/go-elasticsearch/v9/typedapi/inference/put" + inference_put_alibabacloud "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putalibabacloud" + inference_put_amazonbedrock "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonbedrock" + inference_put_amazonsagemaker "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonsagemaker" + inference_put_anthropic "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putanthropic" + inference_put_azureaistudio "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureaistudio" + inference_put_azureopenai "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureopenai" + inference_put_cohere "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcohere" + inference_put_custom "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcustom" + inference_put_deepseek "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putdeepseek" + inference_put_elasticsearch "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelasticsearch" + inference_put_elser "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelser" + inference_put_googleaistudio "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgoogleaistudio" + inference_put_googlevertexai "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgooglevertexai" + inference_put_hugging_face "github.com/elastic/go-elasticsearch/v9/typedapi/inference/puthuggingface" + inference_put_jinaai "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putjinaai" + inference_put_mistral "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putmistral" + inference_put_openai "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putopenai" + inference_put_voyageai "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putvoyageai" + inference_put_watsonx "github.com/elastic/go-elasticsearch/v9/typedapi/inference/putwatsonx" + inference_rerank "github.com/elastic/go-elasticsearch/v9/typedapi/inference/rerank" + inference_sparse_embedding "github.com/elastic/go-elasticsearch/v9/typedapi/inference/sparseembedding" + inference_stream_completion "github.com/elastic/go-elasticsearch/v9/typedapi/inference/streamcompletion" + inference_text_embedding "github.com/elastic/go-elasticsearch/v9/typedapi/inference/textembedding" + inference_update "github.com/elastic/go-elasticsearch/v9/typedapi/inference/update" + ingest_delete_geoip_database "github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletegeoipdatabase" + ingest_delete_ip_location_database "github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deleteiplocationdatabase" + ingest_delete_pipeline "github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletepipeline" + ingest_geo_ip_stats "github.com/elastic/go-elasticsearch/v9/typedapi/ingest/geoipstats" + ingest_get_geoip_database "github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getgeoipdatabase" + ingest_get_ip_location_database "github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getiplocationdatabase" + ingest_get_pipeline "github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getpipeline" + ingest_processor_grok "github.com/elastic/go-elasticsearch/v9/typedapi/ingest/processorgrok" + ingest_put_geoip_database "github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putgeoipdatabase" + ingest_put_ip_location_database "github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putiplocationdatabase" + ingest_put_pipeline "github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putpipeline" + ingest_simulate "github.com/elastic/go-elasticsearch/v9/typedapi/ingest/simulate" + license_delete "github.com/elastic/go-elasticsearch/v9/typedapi/license/delete" + license_get "github.com/elastic/go-elasticsearch/v9/typedapi/license/get" + license_get_basic_status "github.com/elastic/go-elasticsearch/v9/typedapi/license/getbasicstatus" + license_get_trial_status "github.com/elastic/go-elasticsearch/v9/typedapi/license/gettrialstatus" + license_post "github.com/elastic/go-elasticsearch/v9/typedapi/license/post" + license_post_start_basic "github.com/elastic/go-elasticsearch/v9/typedapi/license/poststartbasic" + license_post_start_trial "github.com/elastic/go-elasticsearch/v9/typedapi/license/poststarttrial" + logstash_delete_pipeline "github.com/elastic/go-elasticsearch/v9/typedapi/logstash/deletepipeline" + logstash_get_pipeline "github.com/elastic/go-elasticsearch/v9/typedapi/logstash/getpipeline" + logstash_put_pipeline "github.com/elastic/go-elasticsearch/v9/typedapi/logstash/putpipeline" + migration_deprecations "github.com/elastic/go-elasticsearch/v9/typedapi/migration/deprecations" + migration_get_feature_upgrade_status "github.com/elastic/go-elasticsearch/v9/typedapi/migration/getfeatureupgradestatus" + migration_post_feature_upgrade "github.com/elastic/go-elasticsearch/v9/typedapi/migration/postfeatureupgrade" + ml_clear_trained_model_deployment_cache "github.com/elastic/go-elasticsearch/v9/typedapi/ml/cleartrainedmodeldeploymentcache" + ml_close_job "github.com/elastic/go-elasticsearch/v9/typedapi/ml/closejob" + ml_delete_calendar "github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendar" + ml_delete_calendar_event "github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarevent" + ml_delete_calendar_job "github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarjob" + ml_delete_datafeed "github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedatafeed" + ml_delete_data_frame_analytics "github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedataframeanalytics" + ml_delete_expired_data "github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteexpireddata" + ml_delete_filter "github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletefilter" + ml_delete_forecast "github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteforecast" + ml_delete_job "github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletejob" + ml_delete_model_snapshot "github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletemodelsnapshot" + ml_delete_trained_model "github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodel" + ml_delete_trained_model_alias "github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodelalias" + ml_estimate_model_memory "github.com/elastic/go-elasticsearch/v9/typedapi/ml/estimatemodelmemory" + ml_evaluate_data_frame "github.com/elastic/go-elasticsearch/v9/typedapi/ml/evaluatedataframe" + ml_explain_data_frame_analytics "github.com/elastic/go-elasticsearch/v9/typedapi/ml/explaindataframeanalytics" + ml_flush_job "github.com/elastic/go-elasticsearch/v9/typedapi/ml/flushjob" + ml_forecast "github.com/elastic/go-elasticsearch/v9/typedapi/ml/forecast" + ml_get_buckets "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getbuckets" + ml_get_calendar_events "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendarevents" + ml_get_calendars "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendars" + ml_get_categories "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcategories" + ml_get_datafeeds "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeeds" + ml_get_datafeed_stats "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeedstats" + ml_get_data_frame_analytics "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalytics" + ml_get_data_frame_analytics_stats "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalyticsstats" + ml_get_filters "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getfilters" + ml_get_influencers "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getinfluencers" + ml_get_jobs "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobs" + ml_get_job_stats "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobstats" + ml_get_memory_stats "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmemorystats" + ml_get_model_snapshots "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshots" + ml_get_model_snapshot_upgrade_stats "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshotupgradestats" + ml_get_overall_buckets "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getoverallbuckets" + ml_get_records "github.com/elastic/go-elasticsearch/v9/typedapi/ml/getrecords" + ml_get_trained_models "github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodels" + ml_get_trained_models_stats "github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodelsstats" + ml_infer_trained_model "github.com/elastic/go-elasticsearch/v9/typedapi/ml/infertrainedmodel" + ml_info "github.com/elastic/go-elasticsearch/v9/typedapi/ml/info" + ml_open_job "github.com/elastic/go-elasticsearch/v9/typedapi/ml/openjob" + ml_post_calendar_events "github.com/elastic/go-elasticsearch/v9/typedapi/ml/postcalendarevents" + ml_post_data "github.com/elastic/go-elasticsearch/v9/typedapi/ml/postdata" + ml_preview_datafeed "github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdatafeed" + ml_preview_data_frame_analytics "github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdataframeanalytics" + ml_put_calendar "github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendar" + ml_put_calendar_job "github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendarjob" + ml_put_datafeed "github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdatafeed" + ml_put_data_frame_analytics "github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdataframeanalytics" + ml_put_filter "github.com/elastic/go-elasticsearch/v9/typedapi/ml/putfilter" + ml_put_job "github.com/elastic/go-elasticsearch/v9/typedapi/ml/putjob" + ml_put_trained_model "github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodel" + ml_put_trained_model_alias "github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelalias" + ml_put_trained_model_definition_part "github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodeldefinitionpart" + ml_put_trained_model_vocabulary "github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelvocabulary" + ml_reset_job "github.com/elastic/go-elasticsearch/v9/typedapi/ml/resetjob" + ml_revert_model_snapshot "github.com/elastic/go-elasticsearch/v9/typedapi/ml/revertmodelsnapshot" + ml_set_upgrade_mode "github.com/elastic/go-elasticsearch/v9/typedapi/ml/setupgrademode" + ml_start_datafeed "github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdatafeed" + ml_start_data_frame_analytics "github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdataframeanalytics" + ml_start_trained_model_deployment "github.com/elastic/go-elasticsearch/v9/typedapi/ml/starttrainedmodeldeployment" + ml_stop_datafeed "github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdatafeed" + ml_stop_data_frame_analytics "github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdataframeanalytics" + ml_stop_trained_model_deployment "github.com/elastic/go-elasticsearch/v9/typedapi/ml/stoptrainedmodeldeployment" + ml_update_datafeed "github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedatafeed" + ml_update_data_frame_analytics "github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedataframeanalytics" + ml_update_filter "github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatefilter" + ml_update_job "github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatejob" + ml_update_model_snapshot "github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatemodelsnapshot" + ml_update_trained_model_deployment "github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatetrainedmodeldeployment" + ml_upgrade_job_snapshot "github.com/elastic/go-elasticsearch/v9/typedapi/ml/upgradejobsnapshot" + ml_validate "github.com/elastic/go-elasticsearch/v9/typedapi/ml/validate" + ml_validate_detector "github.com/elastic/go-elasticsearch/v9/typedapi/ml/validatedetector" + monitoring_bulk "github.com/elastic/go-elasticsearch/v9/typedapi/monitoring/bulk" + nodes_clear_repositories_metering_archive "github.com/elastic/go-elasticsearch/v9/typedapi/nodes/clearrepositoriesmeteringarchive" + nodes_get_repositories_metering_info "github.com/elastic/go-elasticsearch/v9/typedapi/nodes/getrepositoriesmeteringinfo" + nodes_hot_threads "github.com/elastic/go-elasticsearch/v9/typedapi/nodes/hotthreads" + nodes_info "github.com/elastic/go-elasticsearch/v9/typedapi/nodes/info" + nodes_reload_secure_settings "github.com/elastic/go-elasticsearch/v9/typedapi/nodes/reloadsecuresettings" + nodes_stats "github.com/elastic/go-elasticsearch/v9/typedapi/nodes/stats" + nodes_usage "github.com/elastic/go-elasticsearch/v9/typedapi/nodes/usage" + profiling_flamegraph "github.com/elastic/go-elasticsearch/v9/typedapi/profiling/flamegraph" + profiling_stacktraces "github.com/elastic/go-elasticsearch/v9/typedapi/profiling/stacktraces" + profiling_status "github.com/elastic/go-elasticsearch/v9/typedapi/profiling/status" + profiling_topn_functions "github.com/elastic/go-elasticsearch/v9/typedapi/profiling/topnfunctions" + query_rules_delete_rule "github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleterule" + query_rules_delete_ruleset "github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleteruleset" + query_rules_get_rule "github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getrule" + query_rules_get_ruleset "github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getruleset" + query_rules_list_rulesets "github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/listrulesets" + query_rules_put_rule "github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putrule" + query_rules_put_ruleset "github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putruleset" + query_rules_test "github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/test" + rollup_delete_job "github.com/elastic/go-elasticsearch/v9/typedapi/rollup/deletejob" + rollup_get_jobs "github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getjobs" + rollup_get_rollup_caps "github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupcaps" + rollup_get_rollup_index_caps "github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupindexcaps" + rollup_put_job "github.com/elastic/go-elasticsearch/v9/typedapi/rollup/putjob" + rollup_rollup_search "github.com/elastic/go-elasticsearch/v9/typedapi/rollup/rollupsearch" + rollup_start_job "github.com/elastic/go-elasticsearch/v9/typedapi/rollup/startjob" + rollup_stop_job "github.com/elastic/go-elasticsearch/v9/typedapi/rollup/stopjob" + searchable_snapshots_cache_stats "github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/cachestats" + searchable_snapshots_clear_cache "github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/clearcache" + searchable_snapshots_mount "github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/mount" + searchable_snapshots_stats "github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/stats" + search_application_delete "github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/delete" + search_application_delete_behavioral_analytics "github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/deletebehavioralanalytics" + search_application_get "github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/get" + search_application_get_behavioral_analytics "github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/getbehavioralanalytics" + search_application_list "github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/list" + search_application_post_behavioral_analytics_event "github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/postbehavioralanalyticsevent" + search_application_put "github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/put" + search_application_put_behavioral_analytics "github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/putbehavioralanalytics" + search_application_render_query "github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/renderquery" + search_application_search "github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/search" + security_activate_user_profile "github.com/elastic/go-elasticsearch/v9/typedapi/security/activateuserprofile" + security_authenticate "github.com/elastic/go-elasticsearch/v9/typedapi/security/authenticate" + security_bulk_delete_role "github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkdeleterole" + security_bulk_put_role "github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkputrole" + security_bulk_update_api_keys "github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkupdateapikeys" + security_change_password "github.com/elastic/go-elasticsearch/v9/typedapi/security/changepassword" + security_clear_api_key_cache "github.com/elastic/go-elasticsearch/v9/typedapi/security/clearapikeycache" + security_clear_cached_privileges "github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedprivileges" + security_clear_cached_realms "github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedrealms" + security_clear_cached_roles "github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedroles" + security_clear_cached_service_tokens "github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedservicetokens" + security_create_api_key "github.com/elastic/go-elasticsearch/v9/typedapi/security/createapikey" + security_create_cross_cluster_api_key "github.com/elastic/go-elasticsearch/v9/typedapi/security/createcrossclusterapikey" + security_create_service_token "github.com/elastic/go-elasticsearch/v9/typedapi/security/createservicetoken" + security_delegate_pki "github.com/elastic/go-elasticsearch/v9/typedapi/security/delegatepki" + security_delete_privileges "github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteprivileges" + security_delete_role "github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterole" + security_delete_role_mapping "github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterolemapping" + security_delete_service_token "github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteservicetoken" + security_delete_user "github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteuser" + security_disable_user "github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuser" + security_disable_user_profile "github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuserprofile" + security_enable_user "github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuser" + security_enable_user_profile "github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuserprofile" + security_enroll_kibana "github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollkibana" + security_enroll_node "github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollnode" + security_get_api_key "github.com/elastic/go-elasticsearch/v9/typedapi/security/getapikey" + security_get_builtin_privileges "github.com/elastic/go-elasticsearch/v9/typedapi/security/getbuiltinprivileges" + security_get_privileges "github.com/elastic/go-elasticsearch/v9/typedapi/security/getprivileges" + security_get_role "github.com/elastic/go-elasticsearch/v9/typedapi/security/getrole" + security_get_role_mapping "github.com/elastic/go-elasticsearch/v9/typedapi/security/getrolemapping" + security_get_service_accounts "github.com/elastic/go-elasticsearch/v9/typedapi/security/getserviceaccounts" + security_get_service_credentials "github.com/elastic/go-elasticsearch/v9/typedapi/security/getservicecredentials" + security_get_settings "github.com/elastic/go-elasticsearch/v9/typedapi/security/getsettings" + security_get_token "github.com/elastic/go-elasticsearch/v9/typedapi/security/gettoken" + security_get_user "github.com/elastic/go-elasticsearch/v9/typedapi/security/getuser" + security_get_user_privileges "github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprivileges" + security_get_user_profile "github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprofile" + security_grant_api_key "github.com/elastic/go-elasticsearch/v9/typedapi/security/grantapikey" + security_has_privileges "github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivileges" + security_has_privileges_user_profile "github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivilegesuserprofile" + security_invalidate_api_key "github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidateapikey" + security_invalidate_token "github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidatetoken" + security_oidc_authenticate "github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcauthenticate" + security_oidc_logout "github.com/elastic/go-elasticsearch/v9/typedapi/security/oidclogout" + security_oidc_prepare_authentication "github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcprepareauthentication" + security_put_privileges "github.com/elastic/go-elasticsearch/v9/typedapi/security/putprivileges" + security_put_role "github.com/elastic/go-elasticsearch/v9/typedapi/security/putrole" + security_put_role_mapping "github.com/elastic/go-elasticsearch/v9/typedapi/security/putrolemapping" + security_put_user "github.com/elastic/go-elasticsearch/v9/typedapi/security/putuser" + security_query_api_keys "github.com/elastic/go-elasticsearch/v9/typedapi/security/queryapikeys" + security_query_role "github.com/elastic/go-elasticsearch/v9/typedapi/security/queryrole" + security_query_user "github.com/elastic/go-elasticsearch/v9/typedapi/security/queryuser" + security_saml_authenticate "github.com/elastic/go-elasticsearch/v9/typedapi/security/samlauthenticate" + security_saml_complete_logout "github.com/elastic/go-elasticsearch/v9/typedapi/security/samlcompletelogout" + security_saml_invalidate "github.com/elastic/go-elasticsearch/v9/typedapi/security/samlinvalidate" + security_saml_logout "github.com/elastic/go-elasticsearch/v9/typedapi/security/samllogout" + security_saml_prepare_authentication "github.com/elastic/go-elasticsearch/v9/typedapi/security/samlprepareauthentication" + security_saml_service_provider_metadata "github.com/elastic/go-elasticsearch/v9/typedapi/security/samlserviceprovidermetadata" + security_suggest_user_profiles "github.com/elastic/go-elasticsearch/v9/typedapi/security/suggestuserprofiles" + security_update_api_key "github.com/elastic/go-elasticsearch/v9/typedapi/security/updateapikey" + security_update_cross_cluster_api_key "github.com/elastic/go-elasticsearch/v9/typedapi/security/updatecrossclusterapikey" + security_update_settings "github.com/elastic/go-elasticsearch/v9/typedapi/security/updatesettings" + security_update_user_profile_data "github.com/elastic/go-elasticsearch/v9/typedapi/security/updateuserprofiledata" + shutdown_delete_node "github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/deletenode" + shutdown_get_node "github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/getnode" + shutdown_put_node "github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/putnode" + simulate_ingest "github.com/elastic/go-elasticsearch/v9/typedapi/simulate/ingest" + slm_delete_lifecycle "github.com/elastic/go-elasticsearch/v9/typedapi/slm/deletelifecycle" + slm_execute_lifecycle "github.com/elastic/go-elasticsearch/v9/typedapi/slm/executelifecycle" + slm_execute_retention "github.com/elastic/go-elasticsearch/v9/typedapi/slm/executeretention" + slm_get_lifecycle "github.com/elastic/go-elasticsearch/v9/typedapi/slm/getlifecycle" + slm_get_stats "github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstats" + slm_get_status "github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstatus" + slm_put_lifecycle "github.com/elastic/go-elasticsearch/v9/typedapi/slm/putlifecycle" + slm_start "github.com/elastic/go-elasticsearch/v9/typedapi/slm/start" + slm_stop "github.com/elastic/go-elasticsearch/v9/typedapi/slm/stop" + snapshot_cleanup_repository "github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/cleanuprepository" + snapshot_clone "github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/clone" + snapshot_create "github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/create" + snapshot_create_repository "github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/createrepository" + snapshot_delete "github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/delete" + snapshot_delete_repository "github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/deleterepository" + snapshot_get "github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/get" + snapshot_get_repository "github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/getrepository" + snapshot_repository_analyze "github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryanalyze" + snapshot_repository_verify_integrity "github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryverifyintegrity" + snapshot_restore "github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/restore" + snapshot_status "github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/status" + snapshot_verify_repository "github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/verifyrepository" + sql_clear_cursor "github.com/elastic/go-elasticsearch/v9/typedapi/sql/clearcursor" + sql_delete_async "github.com/elastic/go-elasticsearch/v9/typedapi/sql/deleteasync" + sql_get_async "github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasync" + sql_get_async_status "github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasyncstatus" + sql_query "github.com/elastic/go-elasticsearch/v9/typedapi/sql/query" + sql_translate "github.com/elastic/go-elasticsearch/v9/typedapi/sql/translate" + ssl_certificates "github.com/elastic/go-elasticsearch/v9/typedapi/ssl/certificates" + streams_logs_disable "github.com/elastic/go-elasticsearch/v9/typedapi/streams/logsdisable" + streams_logs_enable "github.com/elastic/go-elasticsearch/v9/typedapi/streams/logsenable" + streams_status "github.com/elastic/go-elasticsearch/v9/typedapi/streams/status" + synonyms_delete_synonym "github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonym" + synonyms_delete_synonym_rule "github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonymrule" + synonyms_get_synonym "github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonym" + synonyms_get_synonym_rule "github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymrule" + synonyms_get_synonyms_sets "github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymssets" + synonyms_put_synonym "github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonym" + synonyms_put_synonym_rule "github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonymrule" + tasks_cancel "github.com/elastic/go-elasticsearch/v9/typedapi/tasks/cancel" + tasks_get "github.com/elastic/go-elasticsearch/v9/typedapi/tasks/get" + tasks_list "github.com/elastic/go-elasticsearch/v9/typedapi/tasks/list" + text_structure_find_field_structure "github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findfieldstructure" + text_structure_find_message_structure "github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findmessagestructure" + text_structure_find_structure "github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findstructure" + text_structure_test_grok_pattern "github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/testgrokpattern" + transform_delete_transform "github.com/elastic/go-elasticsearch/v9/typedapi/transform/deletetransform" + transform_get_node_stats "github.com/elastic/go-elasticsearch/v9/typedapi/transform/getnodestats" + transform_get_transform "github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransform" + transform_get_transform_stats "github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransformstats" + transform_preview_transform "github.com/elastic/go-elasticsearch/v9/typedapi/transform/previewtransform" + transform_put_transform "github.com/elastic/go-elasticsearch/v9/typedapi/transform/puttransform" + transform_reset_transform "github.com/elastic/go-elasticsearch/v9/typedapi/transform/resettransform" + transform_schedule_now_transform "github.com/elastic/go-elasticsearch/v9/typedapi/transform/schedulenowtransform" + transform_start_transform "github.com/elastic/go-elasticsearch/v9/typedapi/transform/starttransform" + transform_stop_transform "github.com/elastic/go-elasticsearch/v9/typedapi/transform/stoptransform" + transform_update_transform "github.com/elastic/go-elasticsearch/v9/typedapi/transform/updatetransform" + transform_upgrade_transforms "github.com/elastic/go-elasticsearch/v9/typedapi/transform/upgradetransforms" + watcher_ack_watch "github.com/elastic/go-elasticsearch/v9/typedapi/watcher/ackwatch" + watcher_activate_watch "github.com/elastic/go-elasticsearch/v9/typedapi/watcher/activatewatch" + watcher_deactivate_watch "github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deactivatewatch" + watcher_delete_watch "github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deletewatch" + watcher_execute_watch "github.com/elastic/go-elasticsearch/v9/typedapi/watcher/executewatch" + watcher_get_settings "github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getsettings" + watcher_get_watch "github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getwatch" + watcher_put_watch "github.com/elastic/go-elasticsearch/v9/typedapi/watcher/putwatch" + watcher_query_watches "github.com/elastic/go-elasticsearch/v9/typedapi/watcher/querywatches" + watcher_start "github.com/elastic/go-elasticsearch/v9/typedapi/watcher/start" + watcher_stats "github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stats" + watcher_stop "github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stop" + watcher_update_settings "github.com/elastic/go-elasticsearch/v9/typedapi/watcher/updatesettings" + xpack_info "github.com/elastic/go-elasticsearch/v9/typedapi/xpack/info" + xpack_usage "github.com/elastic/go-elasticsearch/v9/typedapi/xpack/usage" +) + +type AsyncSearch struct { + Delete async_search_delete.NewDelete + Get async_search_get.NewGet + Status async_search_status.NewStatus + Submit async_search_submit.NewSubmit +} + +type Autoscaling struct { + DeleteAutoscalingPolicy autoscaling_delete_autoscaling_policy.NewDeleteAutoscalingPolicy + GetAutoscalingCapacity autoscaling_get_autoscaling_capacity.NewGetAutoscalingCapacity + GetAutoscalingPolicy autoscaling_get_autoscaling_policy.NewGetAutoscalingPolicy + PutAutoscalingPolicy autoscaling_put_autoscaling_policy.NewPutAutoscalingPolicy +} + +type Capabilities struct { + Capabilities capabilities.NewCapabilities +} + +type Cat struct { + Aliases cat_aliases.NewAliases + Allocation cat_allocation.NewAllocation + ComponentTemplates cat_component_templates.NewComponentTemplates + Count cat_count.NewCount + Fielddata cat_fielddata.NewFielddata + Health cat_health.NewHealth + Help cat_help.NewHelp + Indices cat_indices.NewIndices + Master cat_master.NewMaster + MlDataFrameAnalytics cat_ml_data_frame_analytics.NewMlDataFrameAnalytics + MlDatafeeds cat_ml_datafeeds.NewMlDatafeeds + MlJobs cat_ml_jobs.NewMlJobs + MlTrainedModels cat_ml_trained_models.NewMlTrainedModels + Nodeattrs cat_nodeattrs.NewNodeattrs + Nodes cat_nodes.NewNodes + PendingTasks cat_pending_tasks.NewPendingTasks + Plugins cat_plugins.NewPlugins + Recovery cat_recovery.NewRecovery + Repositories cat_repositories.NewRepositories + Segments cat_segments.NewSegments + Shards cat_shards.NewShards + Snapshots cat_snapshots.NewSnapshots + Tasks cat_tasks.NewTasks + Templates cat_templates.NewTemplates + ThreadPool cat_thread_pool.NewThreadPool + Transforms cat_transforms.NewTransforms +} + +type Ccr struct { + DeleteAutoFollowPattern ccr_delete_auto_follow_pattern.NewDeleteAutoFollowPattern + Follow ccr_follow.NewFollow + FollowInfo ccr_follow_info.NewFollowInfo + FollowStats ccr_follow_stats.NewFollowStats + ForgetFollower ccr_forget_follower.NewForgetFollower + GetAutoFollowPattern ccr_get_auto_follow_pattern.NewGetAutoFollowPattern + PauseAutoFollowPattern ccr_pause_auto_follow_pattern.NewPauseAutoFollowPattern + PauseFollow ccr_pause_follow.NewPauseFollow + PutAutoFollowPattern ccr_put_auto_follow_pattern.NewPutAutoFollowPattern + ResumeAutoFollowPattern ccr_resume_auto_follow_pattern.NewResumeAutoFollowPattern + ResumeFollow ccr_resume_follow.NewResumeFollow + Stats ccr_stats.NewStats + Unfollow ccr_unfollow.NewUnfollow +} + +type Cluster struct { + AllocationExplain cluster_allocation_explain.NewAllocationExplain + DeleteComponentTemplate cluster_delete_component_template.NewDeleteComponentTemplate + DeleteVotingConfigExclusions cluster_delete_voting_config_exclusions.NewDeleteVotingConfigExclusions + ExistsComponentTemplate cluster_exists_component_template.NewExistsComponentTemplate + GetComponentTemplate cluster_get_component_template.NewGetComponentTemplate + GetSettings cluster_get_settings.NewGetSettings + Health cluster_health.NewHealth + Info cluster_info.NewInfo + PendingTasks cluster_pending_tasks.NewPendingTasks + PostVotingConfigExclusions cluster_post_voting_config_exclusions.NewPostVotingConfigExclusions + PutComponentTemplate cluster_put_component_template.NewPutComponentTemplate + PutSettings cluster_put_settings.NewPutSettings + RemoteInfo cluster_remote_info.NewRemoteInfo + Reroute cluster_reroute.NewReroute + State cluster_state.NewState + Stats cluster_stats.NewStats +} + +type Connector struct { + CheckIn connector_check_in.NewCheckIn + Delete connector_delete.NewDelete + Get connector_get.NewGet + LastSync connector_last_sync.NewLastSync + List connector_list.NewList + Post connector_post.NewPost + Put connector_put.NewPut + SecretPost connector_secret_post.NewSecretPost + SyncJobCancel connector_sync_job_cancel.NewSyncJobCancel + SyncJobCheckIn connector_sync_job_check_in.NewSyncJobCheckIn + SyncJobClaim connector_sync_job_claim.NewSyncJobClaim + SyncJobDelete connector_sync_job_delete.NewSyncJobDelete + SyncJobError connector_sync_job_error.NewSyncJobError + SyncJobGet connector_sync_job_get.NewSyncJobGet + SyncJobList connector_sync_job_list.NewSyncJobList + SyncJobPost connector_sync_job_post.NewSyncJobPost + SyncJobUpdateStats connector_sync_job_update_stats.NewSyncJobUpdateStats + UpdateActiveFiltering connector_update_active_filtering.NewUpdateActiveFiltering + UpdateApiKeyId connector_update_api_key_id.NewUpdateApiKeyId + UpdateConfiguration connector_update_configuration.NewUpdateConfiguration + UpdateError connector_update_error.NewUpdateError + UpdateFeatures connector_update_features.NewUpdateFeatures + UpdateFiltering connector_update_filtering.NewUpdateFiltering + UpdateFilteringValidation connector_update_filtering_validation.NewUpdateFilteringValidation + UpdateIndexName connector_update_index_name.NewUpdateIndexName + UpdateName connector_update_name.NewUpdateName + UpdateNative connector_update_native.NewUpdateNative + UpdatePipeline connector_update_pipeline.NewUpdatePipeline + UpdateScheduling connector_update_scheduling.NewUpdateScheduling + UpdateServiceType connector_update_service_type.NewUpdateServiceType + UpdateStatus connector_update_status.NewUpdateStatus +} + +type Core struct { + Bulk core_bulk.NewBulk + ClearScroll core_clear_scroll.NewClearScroll + ClosePointInTime core_close_point_in_time.NewClosePointInTime + Count core_count.NewCount + Create core_create.NewCreate + Delete core_delete.NewDelete + DeleteByQuery core_delete_by_query.NewDeleteByQuery + DeleteByQueryRethrottle core_delete_by_query_rethrottle.NewDeleteByQueryRethrottle + DeleteScript core_delete_script.NewDeleteScript + Exists core_exists.NewExists + ExistsSource core_exists_source.NewExistsSource + Explain core_explain.NewExplain + FieldCaps core_field_caps.NewFieldCaps + Get core_get.NewGet + GetScript core_get_script.NewGetScript + GetScriptContext core_get_script_context.NewGetScriptContext + GetScriptLanguages core_get_script_languages.NewGetScriptLanguages + GetSource core_get_source.NewGetSource + HealthReport core_health_report.NewHealthReport + Index core_index.NewIndex + Info core_info.NewInfo + Mget core_mget.NewMget + Msearch core_msearch.NewMsearch + MsearchTemplate core_msearch_template.NewMsearchTemplate + Mtermvectors core_mtermvectors.NewMtermvectors + OpenPointInTime core_open_point_in_time.NewOpenPointInTime + Ping core_ping.NewPing + PutScript core_put_script.NewPutScript + RankEval core_rank_eval.NewRankEval + Reindex core_reindex.NewReindex + ReindexRethrottle core_reindex_rethrottle.NewReindexRethrottle + RenderSearchTemplate core_render_search_template.NewRenderSearchTemplate + ScriptsPainlessExecute core_scripts_painless_execute.NewScriptsPainlessExecute + Scroll core_scroll.NewScroll + Search core_search.NewSearch + SearchMvt core_search_mvt.NewSearchMvt + SearchShards core_search_shards.NewSearchShards + SearchTemplate core_search_template.NewSearchTemplate + TermsEnum core_terms_enum.NewTermsEnum + Termvectors core_termvectors.NewTermvectors + Update core_update.NewUpdate + UpdateByQuery core_update_by_query.NewUpdateByQuery + UpdateByQueryRethrottle core_update_by_query_rethrottle.NewUpdateByQueryRethrottle +} + +type DanglingIndices struct { + DeleteDanglingIndex dangling_indices_delete_dangling_index.NewDeleteDanglingIndex + ImportDanglingIndex dangling_indices_import_dangling_index.NewImportDanglingIndex + ListDanglingIndices dangling_indices_list_dangling_indices.NewListDanglingIndices +} + +type Enrich struct { + DeletePolicy enrich_delete_policy.NewDeletePolicy + ExecutePolicy enrich_execute_policy.NewExecutePolicy + GetPolicy enrich_get_policy.NewGetPolicy + PutPolicy enrich_put_policy.NewPutPolicy + Stats enrich_stats.NewStats +} + +type Eql struct { + Delete eql_delete.NewDelete + Get eql_get.NewGet + GetStatus eql_get_status.NewGetStatus + Search eql_search.NewSearch +} + +type Esql struct { + AsyncQuery esql_async_query.NewAsyncQuery + AsyncQueryDelete esql_async_query_delete.NewAsyncQueryDelete + AsyncQueryGet esql_async_query_get.NewAsyncQueryGet + AsyncQueryStop esql_async_query_stop.NewAsyncQueryStop + GetQuery esql_get_query.NewGetQuery + ListQueries esql_list_queries.NewListQueries + Query esql_query.NewQuery +} + +type Features struct { + GetFeatures features_get_features.NewGetFeatures + ResetFeatures features_reset_features.NewResetFeatures +} + +type Fleet struct { + GlobalCheckpoints fleet_global_checkpoints.NewGlobalCheckpoints + Msearch fleet_msearch.NewMsearch + PostSecret fleet_post_secret.NewPostSecret + Search fleet_search.NewSearch +} + +type Graph struct { + Explore graph_explore.NewExplore +} + +type Ilm struct { + DeleteLifecycle ilm_delete_lifecycle.NewDeleteLifecycle + ExplainLifecycle ilm_explain_lifecycle.NewExplainLifecycle + GetLifecycle ilm_get_lifecycle.NewGetLifecycle + GetStatus ilm_get_status.NewGetStatus + MigrateToDataTiers ilm_migrate_to_data_tiers.NewMigrateToDataTiers + MoveToStep ilm_move_to_step.NewMoveToStep + PutLifecycle ilm_put_lifecycle.NewPutLifecycle + RemovePolicy ilm_remove_policy.NewRemovePolicy + Retry ilm_retry.NewRetry + Start ilm_start.NewStart + Stop ilm_stop.NewStop +} + +type Indices struct { + AddBlock indices_add_block.NewAddBlock + Analyze indices_analyze.NewAnalyze + CancelMigrateReindex indices_cancel_migrate_reindex.NewCancelMigrateReindex + ClearCache indices_clear_cache.NewClearCache + Clone indices_clone.NewClone + Close indices_close.NewClose + Create indices_create.NewCreate + CreateDataStream indices_create_data_stream.NewCreateDataStream + CreateFrom indices_create_from.NewCreateFrom + DataStreamsStats indices_data_streams_stats.NewDataStreamsStats + Delete indices_delete.NewDelete + DeleteAlias indices_delete_alias.NewDeleteAlias + DeleteDataLifecycle indices_delete_data_lifecycle.NewDeleteDataLifecycle + DeleteDataStream indices_delete_data_stream.NewDeleteDataStream + DeleteDataStreamOptions indices_delete_data_stream_options.NewDeleteDataStreamOptions + DeleteIndexTemplate indices_delete_index_template.NewDeleteIndexTemplate + DeleteTemplate indices_delete_template.NewDeleteTemplate + DiskUsage indices_disk_usage.NewDiskUsage + Downsample indices_downsample.NewDownsample + Exists indices_exists.NewExists + ExistsAlias indices_exists_alias.NewExistsAlias + ExistsIndexTemplate indices_exists_index_template.NewExistsIndexTemplate + ExistsTemplate indices_exists_template.NewExistsTemplate + ExplainDataLifecycle indices_explain_data_lifecycle.NewExplainDataLifecycle + FieldUsageStats indices_field_usage_stats.NewFieldUsageStats + Flush indices_flush.NewFlush + Forcemerge indices_forcemerge.NewForcemerge + Get indices_get.NewGet + GetAlias indices_get_alias.NewGetAlias + GetDataLifecycle indices_get_data_lifecycle.NewGetDataLifecycle + GetDataLifecycleStats indices_get_data_lifecycle_stats.NewGetDataLifecycleStats + GetDataStream indices_get_data_stream.NewGetDataStream + GetDataStreamOptions indices_get_data_stream_options.NewGetDataStreamOptions + GetDataStreamSettings indices_get_data_stream_settings.NewGetDataStreamSettings + GetFieldMapping indices_get_field_mapping.NewGetFieldMapping + GetIndexTemplate indices_get_index_template.NewGetIndexTemplate + GetMapping indices_get_mapping.NewGetMapping + GetMigrateReindexStatus indices_get_migrate_reindex_status.NewGetMigrateReindexStatus + GetSettings indices_get_settings.NewGetSettings + GetTemplate indices_get_template.NewGetTemplate + MigrateReindex indices_migrate_reindex.NewMigrateReindex + MigrateToDataStream indices_migrate_to_data_stream.NewMigrateToDataStream + ModifyDataStream indices_modify_data_stream.NewModifyDataStream + Open indices_open.NewOpen + PromoteDataStream indices_promote_data_stream.NewPromoteDataStream + PutAlias indices_put_alias.NewPutAlias + PutDataLifecycle indices_put_data_lifecycle.NewPutDataLifecycle + PutDataStreamOptions indices_put_data_stream_options.NewPutDataStreamOptions + PutDataStreamSettings indices_put_data_stream_settings.NewPutDataStreamSettings + PutIndexTemplate indices_put_index_template.NewPutIndexTemplate + PutMapping indices_put_mapping.NewPutMapping + PutSettings indices_put_settings.NewPutSettings + PutTemplate indices_put_template.NewPutTemplate + Recovery indices_recovery.NewRecovery + Refresh indices_refresh.NewRefresh + ReloadSearchAnalyzers indices_reload_search_analyzers.NewReloadSearchAnalyzers + RemoveBlock indices_remove_block.NewRemoveBlock + ResolveCluster indices_resolve_cluster.NewResolveCluster + ResolveIndex indices_resolve_index.NewResolveIndex + Rollover indices_rollover.NewRollover + Segments indices_segments.NewSegments + ShardStores indices_shard_stores.NewShardStores + Shrink indices_shrink.NewShrink + SimulateIndexTemplate indices_simulate_index_template.NewSimulateIndexTemplate + SimulateTemplate indices_simulate_template.NewSimulateTemplate + Split indices_split.NewSplit + Stats indices_stats.NewStats + UpdateAliases indices_update_aliases.NewUpdateAliases + ValidateQuery indices_validate_query.NewValidateQuery +} + +type Inference struct { + ChatCompletionUnified inference_chat_completion_unified.NewChatCompletionUnified + Completion inference_completion.NewCompletion + Delete inference_delete.NewDelete + Get inference_get.NewGet + Inference inference_inference.NewInference + Put inference_put.NewPut + PutAlibabacloud inference_put_alibabacloud.NewPutAlibabacloud + PutAmazonbedrock inference_put_amazonbedrock.NewPutAmazonbedrock + PutAmazonsagemaker inference_put_amazonsagemaker.NewPutAmazonsagemaker + PutAnthropic inference_put_anthropic.NewPutAnthropic + PutAzureaistudio inference_put_azureaistudio.NewPutAzureaistudio + PutAzureopenai inference_put_azureopenai.NewPutAzureopenai + PutCohere inference_put_cohere.NewPutCohere + PutCustom inference_put_custom.NewPutCustom + PutDeepseek inference_put_deepseek.NewPutDeepseek + PutElasticsearch inference_put_elasticsearch.NewPutElasticsearch + PutElser inference_put_elser.NewPutElser + PutGoogleaistudio inference_put_googleaistudio.NewPutGoogleaistudio + PutGooglevertexai inference_put_googlevertexai.NewPutGooglevertexai + PutHuggingFace inference_put_hugging_face.NewPutHuggingFace + PutJinaai inference_put_jinaai.NewPutJinaai + PutMistral inference_put_mistral.NewPutMistral + PutOpenai inference_put_openai.NewPutOpenai + PutVoyageai inference_put_voyageai.NewPutVoyageai + PutWatsonx inference_put_watsonx.NewPutWatsonx + Rerank inference_rerank.NewRerank + SparseEmbedding inference_sparse_embedding.NewSparseEmbedding + StreamCompletion inference_stream_completion.NewStreamCompletion + TextEmbedding inference_text_embedding.NewTextEmbedding + Update inference_update.NewUpdate +} + +type Ingest struct { + DeleteGeoipDatabase ingest_delete_geoip_database.NewDeleteGeoipDatabase + DeleteIpLocationDatabase ingest_delete_ip_location_database.NewDeleteIpLocationDatabase + DeletePipeline ingest_delete_pipeline.NewDeletePipeline + GeoIpStats ingest_geo_ip_stats.NewGeoIpStats + GetGeoipDatabase ingest_get_geoip_database.NewGetGeoipDatabase + GetIpLocationDatabase ingest_get_ip_location_database.NewGetIpLocationDatabase + GetPipeline ingest_get_pipeline.NewGetPipeline + ProcessorGrok ingest_processor_grok.NewProcessorGrok + PutGeoipDatabase ingest_put_geoip_database.NewPutGeoipDatabase + PutIpLocationDatabase ingest_put_ip_location_database.NewPutIpLocationDatabase + PutPipeline ingest_put_pipeline.NewPutPipeline + Simulate ingest_simulate.NewSimulate +} + +type License struct { + Delete license_delete.NewDelete + Get license_get.NewGet + GetBasicStatus license_get_basic_status.NewGetBasicStatus + GetTrialStatus license_get_trial_status.NewGetTrialStatus + Post license_post.NewPost + PostStartBasic license_post_start_basic.NewPostStartBasic + PostStartTrial license_post_start_trial.NewPostStartTrial +} + +type Logstash struct { + DeletePipeline logstash_delete_pipeline.NewDeletePipeline + GetPipeline logstash_get_pipeline.NewGetPipeline + PutPipeline logstash_put_pipeline.NewPutPipeline +} + +type Migration struct { + Deprecations migration_deprecations.NewDeprecations + GetFeatureUpgradeStatus migration_get_feature_upgrade_status.NewGetFeatureUpgradeStatus + PostFeatureUpgrade migration_post_feature_upgrade.NewPostFeatureUpgrade +} + +type Ml struct { + ClearTrainedModelDeploymentCache ml_clear_trained_model_deployment_cache.NewClearTrainedModelDeploymentCache + CloseJob ml_close_job.NewCloseJob + DeleteCalendar ml_delete_calendar.NewDeleteCalendar + DeleteCalendarEvent ml_delete_calendar_event.NewDeleteCalendarEvent + DeleteCalendarJob ml_delete_calendar_job.NewDeleteCalendarJob + DeleteDataFrameAnalytics ml_delete_data_frame_analytics.NewDeleteDataFrameAnalytics + DeleteDatafeed ml_delete_datafeed.NewDeleteDatafeed + DeleteExpiredData ml_delete_expired_data.NewDeleteExpiredData + DeleteFilter ml_delete_filter.NewDeleteFilter + DeleteForecast ml_delete_forecast.NewDeleteForecast + DeleteJob ml_delete_job.NewDeleteJob + DeleteModelSnapshot ml_delete_model_snapshot.NewDeleteModelSnapshot + DeleteTrainedModel ml_delete_trained_model.NewDeleteTrainedModel + DeleteTrainedModelAlias ml_delete_trained_model_alias.NewDeleteTrainedModelAlias + EstimateModelMemory ml_estimate_model_memory.NewEstimateModelMemory + EvaluateDataFrame ml_evaluate_data_frame.NewEvaluateDataFrame + ExplainDataFrameAnalytics ml_explain_data_frame_analytics.NewExplainDataFrameAnalytics + FlushJob ml_flush_job.NewFlushJob + Forecast ml_forecast.NewForecast + GetBuckets ml_get_buckets.NewGetBuckets + GetCalendarEvents ml_get_calendar_events.NewGetCalendarEvents + GetCalendars ml_get_calendars.NewGetCalendars + GetCategories ml_get_categories.NewGetCategories + GetDataFrameAnalytics ml_get_data_frame_analytics.NewGetDataFrameAnalytics + GetDataFrameAnalyticsStats ml_get_data_frame_analytics_stats.NewGetDataFrameAnalyticsStats + GetDatafeedStats ml_get_datafeed_stats.NewGetDatafeedStats + GetDatafeeds ml_get_datafeeds.NewGetDatafeeds + GetFilters ml_get_filters.NewGetFilters + GetInfluencers ml_get_influencers.NewGetInfluencers + GetJobStats ml_get_job_stats.NewGetJobStats + GetJobs ml_get_jobs.NewGetJobs + GetMemoryStats ml_get_memory_stats.NewGetMemoryStats + GetModelSnapshotUpgradeStats ml_get_model_snapshot_upgrade_stats.NewGetModelSnapshotUpgradeStats + GetModelSnapshots ml_get_model_snapshots.NewGetModelSnapshots + GetOverallBuckets ml_get_overall_buckets.NewGetOverallBuckets + GetRecords ml_get_records.NewGetRecords + GetTrainedModels ml_get_trained_models.NewGetTrainedModels + GetTrainedModelsStats ml_get_trained_models_stats.NewGetTrainedModelsStats + InferTrainedModel ml_infer_trained_model.NewInferTrainedModel + Info ml_info.NewInfo + OpenJob ml_open_job.NewOpenJob + PostCalendarEvents ml_post_calendar_events.NewPostCalendarEvents + PostData ml_post_data.NewPostData + PreviewDataFrameAnalytics ml_preview_data_frame_analytics.NewPreviewDataFrameAnalytics + PreviewDatafeed ml_preview_datafeed.NewPreviewDatafeed + PutCalendar ml_put_calendar.NewPutCalendar + PutCalendarJob ml_put_calendar_job.NewPutCalendarJob + PutDataFrameAnalytics ml_put_data_frame_analytics.NewPutDataFrameAnalytics + PutDatafeed ml_put_datafeed.NewPutDatafeed + PutFilter ml_put_filter.NewPutFilter + PutJob ml_put_job.NewPutJob + PutTrainedModel ml_put_trained_model.NewPutTrainedModel + PutTrainedModelAlias ml_put_trained_model_alias.NewPutTrainedModelAlias + PutTrainedModelDefinitionPart ml_put_trained_model_definition_part.NewPutTrainedModelDefinitionPart + PutTrainedModelVocabulary ml_put_trained_model_vocabulary.NewPutTrainedModelVocabulary + ResetJob ml_reset_job.NewResetJob + RevertModelSnapshot ml_revert_model_snapshot.NewRevertModelSnapshot + SetUpgradeMode ml_set_upgrade_mode.NewSetUpgradeMode + StartDataFrameAnalytics ml_start_data_frame_analytics.NewStartDataFrameAnalytics + StartDatafeed ml_start_datafeed.NewStartDatafeed + StartTrainedModelDeployment ml_start_trained_model_deployment.NewStartTrainedModelDeployment + StopDataFrameAnalytics ml_stop_data_frame_analytics.NewStopDataFrameAnalytics + StopDatafeed ml_stop_datafeed.NewStopDatafeed + StopTrainedModelDeployment ml_stop_trained_model_deployment.NewStopTrainedModelDeployment + UpdateDataFrameAnalytics ml_update_data_frame_analytics.NewUpdateDataFrameAnalytics + UpdateDatafeed ml_update_datafeed.NewUpdateDatafeed + UpdateFilter ml_update_filter.NewUpdateFilter + UpdateJob ml_update_job.NewUpdateJob + UpdateModelSnapshot ml_update_model_snapshot.NewUpdateModelSnapshot + UpdateTrainedModelDeployment ml_update_trained_model_deployment.NewUpdateTrainedModelDeployment + UpgradeJobSnapshot ml_upgrade_job_snapshot.NewUpgradeJobSnapshot + Validate ml_validate.NewValidate + ValidateDetector ml_validate_detector.NewValidateDetector +} + +type Monitoring struct { + Bulk monitoring_bulk.NewBulk +} + +type Nodes struct { + ClearRepositoriesMeteringArchive nodes_clear_repositories_metering_archive.NewClearRepositoriesMeteringArchive + GetRepositoriesMeteringInfo nodes_get_repositories_metering_info.NewGetRepositoriesMeteringInfo + HotThreads nodes_hot_threads.NewHotThreads + Info nodes_info.NewInfo + ReloadSecureSettings nodes_reload_secure_settings.NewReloadSecureSettings + Stats nodes_stats.NewStats + Usage nodes_usage.NewUsage +} + +type Profiling struct { + Flamegraph profiling_flamegraph.NewFlamegraph + Stacktraces profiling_stacktraces.NewStacktraces + Status profiling_status.NewStatus + TopnFunctions profiling_topn_functions.NewTopnFunctions +} + +type QueryRules struct { + DeleteRule query_rules_delete_rule.NewDeleteRule + DeleteRuleset query_rules_delete_ruleset.NewDeleteRuleset + GetRule query_rules_get_rule.NewGetRule + GetRuleset query_rules_get_ruleset.NewGetRuleset + ListRulesets query_rules_list_rulesets.NewListRulesets + PutRule query_rules_put_rule.NewPutRule + PutRuleset query_rules_put_ruleset.NewPutRuleset + Test query_rules_test.NewTest +} + +type Rollup struct { + DeleteJob rollup_delete_job.NewDeleteJob + GetJobs rollup_get_jobs.NewGetJobs + GetRollupCaps rollup_get_rollup_caps.NewGetRollupCaps + GetRollupIndexCaps rollup_get_rollup_index_caps.NewGetRollupIndexCaps + PutJob rollup_put_job.NewPutJob + RollupSearch rollup_rollup_search.NewRollupSearch + StartJob rollup_start_job.NewStartJob + StopJob rollup_stop_job.NewStopJob +} + +type SearchApplication struct { + Delete search_application_delete.NewDelete + DeleteBehavioralAnalytics search_application_delete_behavioral_analytics.NewDeleteBehavioralAnalytics + Get search_application_get.NewGet + GetBehavioralAnalytics search_application_get_behavioral_analytics.NewGetBehavioralAnalytics + List search_application_list.NewList + PostBehavioralAnalyticsEvent search_application_post_behavioral_analytics_event.NewPostBehavioralAnalyticsEvent + Put search_application_put.NewPut + PutBehavioralAnalytics search_application_put_behavioral_analytics.NewPutBehavioralAnalytics + RenderQuery search_application_render_query.NewRenderQuery + Search search_application_search.NewSearch +} + +type SearchableSnapshots struct { + CacheStats searchable_snapshots_cache_stats.NewCacheStats + ClearCache searchable_snapshots_clear_cache.NewClearCache + Mount searchable_snapshots_mount.NewMount + Stats searchable_snapshots_stats.NewStats +} + +type Security struct { + ActivateUserProfile security_activate_user_profile.NewActivateUserProfile + Authenticate security_authenticate.NewAuthenticate + BulkDeleteRole security_bulk_delete_role.NewBulkDeleteRole + BulkPutRole security_bulk_put_role.NewBulkPutRole + BulkUpdateApiKeys security_bulk_update_api_keys.NewBulkUpdateApiKeys + ChangePassword security_change_password.NewChangePassword + ClearApiKeyCache security_clear_api_key_cache.NewClearApiKeyCache + ClearCachedPrivileges security_clear_cached_privileges.NewClearCachedPrivileges + ClearCachedRealms security_clear_cached_realms.NewClearCachedRealms + ClearCachedRoles security_clear_cached_roles.NewClearCachedRoles + ClearCachedServiceTokens security_clear_cached_service_tokens.NewClearCachedServiceTokens + CreateApiKey security_create_api_key.NewCreateApiKey + CreateCrossClusterApiKey security_create_cross_cluster_api_key.NewCreateCrossClusterApiKey + CreateServiceToken security_create_service_token.NewCreateServiceToken + DelegatePki security_delegate_pki.NewDelegatePki + DeletePrivileges security_delete_privileges.NewDeletePrivileges + DeleteRole security_delete_role.NewDeleteRole + DeleteRoleMapping security_delete_role_mapping.NewDeleteRoleMapping + DeleteServiceToken security_delete_service_token.NewDeleteServiceToken + DeleteUser security_delete_user.NewDeleteUser + DisableUser security_disable_user.NewDisableUser + DisableUserProfile security_disable_user_profile.NewDisableUserProfile + EnableUser security_enable_user.NewEnableUser + EnableUserProfile security_enable_user_profile.NewEnableUserProfile + EnrollKibana security_enroll_kibana.NewEnrollKibana + EnrollNode security_enroll_node.NewEnrollNode + GetApiKey security_get_api_key.NewGetApiKey + GetBuiltinPrivileges security_get_builtin_privileges.NewGetBuiltinPrivileges + GetPrivileges security_get_privileges.NewGetPrivileges + GetRole security_get_role.NewGetRole + GetRoleMapping security_get_role_mapping.NewGetRoleMapping + GetServiceAccounts security_get_service_accounts.NewGetServiceAccounts + GetServiceCredentials security_get_service_credentials.NewGetServiceCredentials + GetSettings security_get_settings.NewGetSettings + GetToken security_get_token.NewGetToken + GetUser security_get_user.NewGetUser + GetUserPrivileges security_get_user_privileges.NewGetUserPrivileges + GetUserProfile security_get_user_profile.NewGetUserProfile + GrantApiKey security_grant_api_key.NewGrantApiKey + HasPrivileges security_has_privileges.NewHasPrivileges + HasPrivilegesUserProfile security_has_privileges_user_profile.NewHasPrivilegesUserProfile + InvalidateApiKey security_invalidate_api_key.NewInvalidateApiKey + InvalidateToken security_invalidate_token.NewInvalidateToken + OidcAuthenticate security_oidc_authenticate.NewOidcAuthenticate + OidcLogout security_oidc_logout.NewOidcLogout + OidcPrepareAuthentication security_oidc_prepare_authentication.NewOidcPrepareAuthentication + PutPrivileges security_put_privileges.NewPutPrivileges + PutRole security_put_role.NewPutRole + PutRoleMapping security_put_role_mapping.NewPutRoleMapping + PutUser security_put_user.NewPutUser + QueryApiKeys security_query_api_keys.NewQueryApiKeys + QueryRole security_query_role.NewQueryRole + QueryUser security_query_user.NewQueryUser + SamlAuthenticate security_saml_authenticate.NewSamlAuthenticate + SamlCompleteLogout security_saml_complete_logout.NewSamlCompleteLogout + SamlInvalidate security_saml_invalidate.NewSamlInvalidate + SamlLogout security_saml_logout.NewSamlLogout + SamlPrepareAuthentication security_saml_prepare_authentication.NewSamlPrepareAuthentication + SamlServiceProviderMetadata security_saml_service_provider_metadata.NewSamlServiceProviderMetadata + SuggestUserProfiles security_suggest_user_profiles.NewSuggestUserProfiles + UpdateApiKey security_update_api_key.NewUpdateApiKey + UpdateCrossClusterApiKey security_update_cross_cluster_api_key.NewUpdateCrossClusterApiKey + UpdateSettings security_update_settings.NewUpdateSettings + UpdateUserProfileData security_update_user_profile_data.NewUpdateUserProfileData +} + +type Shutdown struct { + DeleteNode shutdown_delete_node.NewDeleteNode + GetNode shutdown_get_node.NewGetNode + PutNode shutdown_put_node.NewPutNode +} + +type Simulate struct { + Ingest simulate_ingest.NewIngest +} + +type Slm struct { + DeleteLifecycle slm_delete_lifecycle.NewDeleteLifecycle + ExecuteLifecycle slm_execute_lifecycle.NewExecuteLifecycle + ExecuteRetention slm_execute_retention.NewExecuteRetention + GetLifecycle slm_get_lifecycle.NewGetLifecycle + GetStats slm_get_stats.NewGetStats + GetStatus slm_get_status.NewGetStatus + PutLifecycle slm_put_lifecycle.NewPutLifecycle + Start slm_start.NewStart + Stop slm_stop.NewStop +} + +type Snapshot struct { + CleanupRepository snapshot_cleanup_repository.NewCleanupRepository + Clone snapshot_clone.NewClone + Create snapshot_create.NewCreate + CreateRepository snapshot_create_repository.NewCreateRepository + Delete snapshot_delete.NewDelete + DeleteRepository snapshot_delete_repository.NewDeleteRepository + Get snapshot_get.NewGet + GetRepository snapshot_get_repository.NewGetRepository + RepositoryAnalyze snapshot_repository_analyze.NewRepositoryAnalyze + RepositoryVerifyIntegrity snapshot_repository_verify_integrity.NewRepositoryVerifyIntegrity + Restore snapshot_restore.NewRestore + Status snapshot_status.NewStatus + VerifyRepository snapshot_verify_repository.NewVerifyRepository +} + +type Sql struct { + ClearCursor sql_clear_cursor.NewClearCursor + DeleteAsync sql_delete_async.NewDeleteAsync + GetAsync sql_get_async.NewGetAsync + GetAsyncStatus sql_get_async_status.NewGetAsyncStatus + Query sql_query.NewQuery + Translate sql_translate.NewTranslate +} + +type Ssl struct { + Certificates ssl_certificates.NewCertificates +} + +type Streams struct { + LogsDisable streams_logs_disable.NewLogsDisable + LogsEnable streams_logs_enable.NewLogsEnable + Status streams_status.NewStatus +} + +type Synonyms struct { + DeleteSynonym synonyms_delete_synonym.NewDeleteSynonym + DeleteSynonymRule synonyms_delete_synonym_rule.NewDeleteSynonymRule + GetSynonym synonyms_get_synonym.NewGetSynonym + GetSynonymRule synonyms_get_synonym_rule.NewGetSynonymRule + GetSynonymsSets synonyms_get_synonyms_sets.NewGetSynonymsSets + PutSynonym synonyms_put_synonym.NewPutSynonym + PutSynonymRule synonyms_put_synonym_rule.NewPutSynonymRule +} + +type Tasks struct { + Cancel tasks_cancel.NewCancel + Get tasks_get.NewGet + List tasks_list.NewList +} + +type TextStructure struct { + FindFieldStructure text_structure_find_field_structure.NewFindFieldStructure + FindMessageStructure text_structure_find_message_structure.NewFindMessageStructure + FindStructure text_structure_find_structure.NewFindStructure + TestGrokPattern text_structure_test_grok_pattern.NewTestGrokPattern +} + +type Transform struct { + DeleteTransform transform_delete_transform.NewDeleteTransform + GetNodeStats transform_get_node_stats.NewGetNodeStats + GetTransform transform_get_transform.NewGetTransform + GetTransformStats transform_get_transform_stats.NewGetTransformStats + PreviewTransform transform_preview_transform.NewPreviewTransform + PutTransform transform_put_transform.NewPutTransform + ResetTransform transform_reset_transform.NewResetTransform + ScheduleNowTransform transform_schedule_now_transform.NewScheduleNowTransform + StartTransform transform_start_transform.NewStartTransform + StopTransform transform_stop_transform.NewStopTransform + UpdateTransform transform_update_transform.NewUpdateTransform + UpgradeTransforms transform_upgrade_transforms.NewUpgradeTransforms +} + +type Watcher struct { + AckWatch watcher_ack_watch.NewAckWatch + ActivateWatch watcher_activate_watch.NewActivateWatch + DeactivateWatch watcher_deactivate_watch.NewDeactivateWatch + DeleteWatch watcher_delete_watch.NewDeleteWatch + ExecuteWatch watcher_execute_watch.NewExecuteWatch + GetSettings watcher_get_settings.NewGetSettings + GetWatch watcher_get_watch.NewGetWatch + PutWatch watcher_put_watch.NewPutWatch + QueryWatches watcher_query_watches.NewQueryWatches + Start watcher_start.NewStart + Stats watcher_stats.NewStats + Stop watcher_stop.NewStop + UpdateSettings watcher_update_settings.NewUpdateSettings +} + +type Xpack struct { + Info xpack_info.NewInfo + Usage xpack_usage.NewUsage +} + +type API struct { + AsyncSearch AsyncSearch + Autoscaling Autoscaling + Capabilities Capabilities + Cat Cat + Ccr Ccr + Cluster Cluster + Connector Connector + Core Core + DanglingIndices DanglingIndices + Enrich Enrich + Eql Eql + Esql Esql + Features Features + Fleet Fleet + Graph Graph + Ilm Ilm + Indices Indices + Inference Inference + Ingest Ingest + License License + Logstash Logstash + Migration Migration + Ml Ml + Monitoring Monitoring + Nodes Nodes + Profiling Profiling + QueryRules QueryRules + Rollup Rollup + SearchApplication SearchApplication + SearchableSnapshots SearchableSnapshots + Security Security + Shutdown Shutdown + Simulate Simulate + Slm Slm + Snapshot Snapshot + Sql Sql + Ssl Ssl + Streams Streams + Synonyms Synonyms + Tasks Tasks + TextStructure TextStructure + Transform Transform + Watcher Watcher + Xpack Xpack + + Bulk core_bulk.NewBulk + ClearScroll core_clear_scroll.NewClearScroll + ClosePointInTime core_close_point_in_time.NewClosePointInTime + Count core_count.NewCount + Create core_create.NewCreate + Delete core_delete.NewDelete + DeleteByQuery core_delete_by_query.NewDeleteByQuery + DeleteByQueryRethrottle core_delete_by_query_rethrottle.NewDeleteByQueryRethrottle + DeleteScript core_delete_script.NewDeleteScript + Exists core_exists.NewExists + ExistsSource core_exists_source.NewExistsSource + Explain core_explain.NewExplain + FieldCaps core_field_caps.NewFieldCaps + Get core_get.NewGet + GetScript core_get_script.NewGetScript + GetScriptContext core_get_script_context.NewGetScriptContext + GetScriptLanguages core_get_script_languages.NewGetScriptLanguages + GetSource core_get_source.NewGetSource + HealthReport core_health_report.NewHealthReport + Index core_index.NewIndex + Info core_info.NewInfo + Mget core_mget.NewMget + Msearch core_msearch.NewMsearch + MsearchTemplate core_msearch_template.NewMsearchTemplate + Mtermvectors core_mtermvectors.NewMtermvectors + OpenPointInTime core_open_point_in_time.NewOpenPointInTime + Ping core_ping.NewPing + PutScript core_put_script.NewPutScript + RankEval core_rank_eval.NewRankEval + Reindex core_reindex.NewReindex + ReindexRethrottle core_reindex_rethrottle.NewReindexRethrottle + RenderSearchTemplate core_render_search_template.NewRenderSearchTemplate + ScriptsPainlessExecute core_scripts_painless_execute.NewScriptsPainlessExecute + Scroll core_scroll.NewScroll + Search core_search.NewSearch + SearchMvt core_search_mvt.NewSearchMvt + SearchShards core_search_shards.NewSearchShards + SearchTemplate core_search_template.NewSearchTemplate + TermsEnum core_terms_enum.NewTermsEnum + Termvectors core_termvectors.NewTermvectors + Update core_update.NewUpdate + UpdateByQuery core_update_by_query.NewUpdateByQuery + UpdateByQueryRethrottle core_update_by_query_rethrottle.NewUpdateByQueryRethrottle +} + +func New(tp elastictransport.Interface) *API { + return &API{ + AsyncSearch: AsyncSearch{ + Delete: async_search_delete.NewDeleteFunc(tp), + Get: async_search_get.NewGetFunc(tp), + Status: async_search_status.NewStatusFunc(tp), + Submit: async_search_submit.NewSubmitFunc(tp), + }, + + Autoscaling: Autoscaling{ + DeleteAutoscalingPolicy: autoscaling_delete_autoscaling_policy.NewDeleteAutoscalingPolicyFunc(tp), + GetAutoscalingCapacity: autoscaling_get_autoscaling_capacity.NewGetAutoscalingCapacityFunc(tp), + GetAutoscalingPolicy: autoscaling_get_autoscaling_policy.NewGetAutoscalingPolicyFunc(tp), + PutAutoscalingPolicy: autoscaling_put_autoscaling_policy.NewPutAutoscalingPolicyFunc(tp), + }, + + Capabilities: Capabilities{ + Capabilities: capabilities.NewCapabilitiesFunc(tp), + }, + + Cat: Cat{ + Aliases: cat_aliases.NewAliasesFunc(tp), + Allocation: cat_allocation.NewAllocationFunc(tp), + ComponentTemplates: cat_component_templates.NewComponentTemplatesFunc(tp), + Count: cat_count.NewCountFunc(tp), + Fielddata: cat_fielddata.NewFielddataFunc(tp), + Health: cat_health.NewHealthFunc(tp), + Help: cat_help.NewHelpFunc(tp), + Indices: cat_indices.NewIndicesFunc(tp), + Master: cat_master.NewMasterFunc(tp), + MlDataFrameAnalytics: cat_ml_data_frame_analytics.NewMlDataFrameAnalyticsFunc(tp), + MlDatafeeds: cat_ml_datafeeds.NewMlDatafeedsFunc(tp), + MlJobs: cat_ml_jobs.NewMlJobsFunc(tp), + MlTrainedModels: cat_ml_trained_models.NewMlTrainedModelsFunc(tp), + Nodeattrs: cat_nodeattrs.NewNodeattrsFunc(tp), + Nodes: cat_nodes.NewNodesFunc(tp), + PendingTasks: cat_pending_tasks.NewPendingTasksFunc(tp), + Plugins: cat_plugins.NewPluginsFunc(tp), + Recovery: cat_recovery.NewRecoveryFunc(tp), + Repositories: cat_repositories.NewRepositoriesFunc(tp), + Segments: cat_segments.NewSegmentsFunc(tp), + Shards: cat_shards.NewShardsFunc(tp), + Snapshots: cat_snapshots.NewSnapshotsFunc(tp), + Tasks: cat_tasks.NewTasksFunc(tp), + Templates: cat_templates.NewTemplatesFunc(tp), + ThreadPool: cat_thread_pool.NewThreadPoolFunc(tp), + Transforms: cat_transforms.NewTransformsFunc(tp), + }, + + Ccr: Ccr{ + DeleteAutoFollowPattern: ccr_delete_auto_follow_pattern.NewDeleteAutoFollowPatternFunc(tp), + Follow: ccr_follow.NewFollowFunc(tp), + FollowInfo: ccr_follow_info.NewFollowInfoFunc(tp), + FollowStats: ccr_follow_stats.NewFollowStatsFunc(tp), + ForgetFollower: ccr_forget_follower.NewForgetFollowerFunc(tp), + GetAutoFollowPattern: ccr_get_auto_follow_pattern.NewGetAutoFollowPatternFunc(tp), + PauseAutoFollowPattern: ccr_pause_auto_follow_pattern.NewPauseAutoFollowPatternFunc(tp), + PauseFollow: ccr_pause_follow.NewPauseFollowFunc(tp), + PutAutoFollowPattern: ccr_put_auto_follow_pattern.NewPutAutoFollowPatternFunc(tp), + ResumeAutoFollowPattern: ccr_resume_auto_follow_pattern.NewResumeAutoFollowPatternFunc(tp), + ResumeFollow: ccr_resume_follow.NewResumeFollowFunc(tp), + Stats: ccr_stats.NewStatsFunc(tp), + Unfollow: ccr_unfollow.NewUnfollowFunc(tp), + }, + + Cluster: Cluster{ + AllocationExplain: cluster_allocation_explain.NewAllocationExplainFunc(tp), + DeleteComponentTemplate: cluster_delete_component_template.NewDeleteComponentTemplateFunc(tp), + DeleteVotingConfigExclusions: cluster_delete_voting_config_exclusions.NewDeleteVotingConfigExclusionsFunc(tp), + ExistsComponentTemplate: cluster_exists_component_template.NewExistsComponentTemplateFunc(tp), + GetComponentTemplate: cluster_get_component_template.NewGetComponentTemplateFunc(tp), + GetSettings: cluster_get_settings.NewGetSettingsFunc(tp), + Health: cluster_health.NewHealthFunc(tp), + Info: cluster_info.NewInfoFunc(tp), + PendingTasks: cluster_pending_tasks.NewPendingTasksFunc(tp), + PostVotingConfigExclusions: cluster_post_voting_config_exclusions.NewPostVotingConfigExclusionsFunc(tp), + PutComponentTemplate: cluster_put_component_template.NewPutComponentTemplateFunc(tp), + PutSettings: cluster_put_settings.NewPutSettingsFunc(tp), + RemoteInfo: cluster_remote_info.NewRemoteInfoFunc(tp), + Reroute: cluster_reroute.NewRerouteFunc(tp), + State: cluster_state.NewStateFunc(tp), + Stats: cluster_stats.NewStatsFunc(tp), + }, + + Connector: Connector{ + CheckIn: connector_check_in.NewCheckInFunc(tp), + Delete: connector_delete.NewDeleteFunc(tp), + Get: connector_get.NewGetFunc(tp), + LastSync: connector_last_sync.NewLastSyncFunc(tp), + List: connector_list.NewListFunc(tp), + Post: connector_post.NewPostFunc(tp), + Put: connector_put.NewPutFunc(tp), + SecretPost: connector_secret_post.NewSecretPostFunc(tp), + SyncJobCancel: connector_sync_job_cancel.NewSyncJobCancelFunc(tp), + SyncJobCheckIn: connector_sync_job_check_in.NewSyncJobCheckInFunc(tp), + SyncJobClaim: connector_sync_job_claim.NewSyncJobClaimFunc(tp), + SyncJobDelete: connector_sync_job_delete.NewSyncJobDeleteFunc(tp), + SyncJobError: connector_sync_job_error.NewSyncJobErrorFunc(tp), + SyncJobGet: connector_sync_job_get.NewSyncJobGetFunc(tp), + SyncJobList: connector_sync_job_list.NewSyncJobListFunc(tp), + SyncJobPost: connector_sync_job_post.NewSyncJobPostFunc(tp), + SyncJobUpdateStats: connector_sync_job_update_stats.NewSyncJobUpdateStatsFunc(tp), + UpdateActiveFiltering: connector_update_active_filtering.NewUpdateActiveFilteringFunc(tp), + UpdateApiKeyId: connector_update_api_key_id.NewUpdateApiKeyIdFunc(tp), + UpdateConfiguration: connector_update_configuration.NewUpdateConfigurationFunc(tp), + UpdateError: connector_update_error.NewUpdateErrorFunc(tp), + UpdateFeatures: connector_update_features.NewUpdateFeaturesFunc(tp), + UpdateFiltering: connector_update_filtering.NewUpdateFilteringFunc(tp), + UpdateFilteringValidation: connector_update_filtering_validation.NewUpdateFilteringValidationFunc(tp), + UpdateIndexName: connector_update_index_name.NewUpdateIndexNameFunc(tp), + UpdateName: connector_update_name.NewUpdateNameFunc(tp), + UpdateNative: connector_update_native.NewUpdateNativeFunc(tp), + UpdatePipeline: connector_update_pipeline.NewUpdatePipelineFunc(tp), + UpdateScheduling: connector_update_scheduling.NewUpdateSchedulingFunc(tp), + UpdateServiceType: connector_update_service_type.NewUpdateServiceTypeFunc(tp), + UpdateStatus: connector_update_status.NewUpdateStatusFunc(tp), + }, + + Core: Core{ + Bulk: core_bulk.NewBulkFunc(tp), + ClearScroll: core_clear_scroll.NewClearScrollFunc(tp), + ClosePointInTime: core_close_point_in_time.NewClosePointInTimeFunc(tp), + Count: core_count.NewCountFunc(tp), + Create: core_create.NewCreateFunc(tp), + Delete: core_delete.NewDeleteFunc(tp), + DeleteByQuery: core_delete_by_query.NewDeleteByQueryFunc(tp), + DeleteByQueryRethrottle: core_delete_by_query_rethrottle.NewDeleteByQueryRethrottleFunc(tp), + DeleteScript: core_delete_script.NewDeleteScriptFunc(tp), + Exists: core_exists.NewExistsFunc(tp), + ExistsSource: core_exists_source.NewExistsSourceFunc(tp), + Explain: core_explain.NewExplainFunc(tp), + FieldCaps: core_field_caps.NewFieldCapsFunc(tp), + Get: core_get.NewGetFunc(tp), + GetScript: core_get_script.NewGetScriptFunc(tp), + GetScriptContext: core_get_script_context.NewGetScriptContextFunc(tp), + GetScriptLanguages: core_get_script_languages.NewGetScriptLanguagesFunc(tp), + GetSource: core_get_source.NewGetSourceFunc(tp), + HealthReport: core_health_report.NewHealthReportFunc(tp), + Index: core_index.NewIndexFunc(tp), + Info: core_info.NewInfoFunc(tp), + Mget: core_mget.NewMgetFunc(tp), + Msearch: core_msearch.NewMsearchFunc(tp), + MsearchTemplate: core_msearch_template.NewMsearchTemplateFunc(tp), + Mtermvectors: core_mtermvectors.NewMtermvectorsFunc(tp), + OpenPointInTime: core_open_point_in_time.NewOpenPointInTimeFunc(tp), + Ping: core_ping.NewPingFunc(tp), + PutScript: core_put_script.NewPutScriptFunc(tp), + RankEval: core_rank_eval.NewRankEvalFunc(tp), + Reindex: core_reindex.NewReindexFunc(tp), + ReindexRethrottle: core_reindex_rethrottle.NewReindexRethrottleFunc(tp), + RenderSearchTemplate: core_render_search_template.NewRenderSearchTemplateFunc(tp), + ScriptsPainlessExecute: core_scripts_painless_execute.NewScriptsPainlessExecuteFunc(tp), + Scroll: core_scroll.NewScrollFunc(tp), + Search: core_search.NewSearchFunc(tp), + SearchMvt: core_search_mvt.NewSearchMvtFunc(tp), + SearchShards: core_search_shards.NewSearchShardsFunc(tp), + SearchTemplate: core_search_template.NewSearchTemplateFunc(tp), + TermsEnum: core_terms_enum.NewTermsEnumFunc(tp), + Termvectors: core_termvectors.NewTermvectorsFunc(tp), + Update: core_update.NewUpdateFunc(tp), + UpdateByQuery: core_update_by_query.NewUpdateByQueryFunc(tp), + UpdateByQueryRethrottle: core_update_by_query_rethrottle.NewUpdateByQueryRethrottleFunc(tp), + }, + + DanglingIndices: DanglingIndices{ + DeleteDanglingIndex: dangling_indices_delete_dangling_index.NewDeleteDanglingIndexFunc(tp), + ImportDanglingIndex: dangling_indices_import_dangling_index.NewImportDanglingIndexFunc(tp), + ListDanglingIndices: dangling_indices_list_dangling_indices.NewListDanglingIndicesFunc(tp), + }, + + Enrich: Enrich{ + DeletePolicy: enrich_delete_policy.NewDeletePolicyFunc(tp), + ExecutePolicy: enrich_execute_policy.NewExecutePolicyFunc(tp), + GetPolicy: enrich_get_policy.NewGetPolicyFunc(tp), + PutPolicy: enrich_put_policy.NewPutPolicyFunc(tp), + Stats: enrich_stats.NewStatsFunc(tp), + }, + + Eql: Eql{ + Delete: eql_delete.NewDeleteFunc(tp), + Get: eql_get.NewGetFunc(tp), + GetStatus: eql_get_status.NewGetStatusFunc(tp), + Search: eql_search.NewSearchFunc(tp), + }, + + Esql: Esql{ + AsyncQuery: esql_async_query.NewAsyncQueryFunc(tp), + AsyncQueryDelete: esql_async_query_delete.NewAsyncQueryDeleteFunc(tp), + AsyncQueryGet: esql_async_query_get.NewAsyncQueryGetFunc(tp), + AsyncQueryStop: esql_async_query_stop.NewAsyncQueryStopFunc(tp), + GetQuery: esql_get_query.NewGetQueryFunc(tp), + ListQueries: esql_list_queries.NewListQueriesFunc(tp), + Query: esql_query.NewQueryFunc(tp), + }, + + Features: Features{ + GetFeatures: features_get_features.NewGetFeaturesFunc(tp), + ResetFeatures: features_reset_features.NewResetFeaturesFunc(tp), + }, + + Fleet: Fleet{ + GlobalCheckpoints: fleet_global_checkpoints.NewGlobalCheckpointsFunc(tp), + Msearch: fleet_msearch.NewMsearchFunc(tp), + PostSecret: fleet_post_secret.NewPostSecretFunc(tp), + Search: fleet_search.NewSearchFunc(tp), + }, + + Graph: Graph{ + Explore: graph_explore.NewExploreFunc(tp), + }, + + Ilm: Ilm{ + DeleteLifecycle: ilm_delete_lifecycle.NewDeleteLifecycleFunc(tp), + ExplainLifecycle: ilm_explain_lifecycle.NewExplainLifecycleFunc(tp), + GetLifecycle: ilm_get_lifecycle.NewGetLifecycleFunc(tp), + GetStatus: ilm_get_status.NewGetStatusFunc(tp), + MigrateToDataTiers: ilm_migrate_to_data_tiers.NewMigrateToDataTiersFunc(tp), + MoveToStep: ilm_move_to_step.NewMoveToStepFunc(tp), + PutLifecycle: ilm_put_lifecycle.NewPutLifecycleFunc(tp), + RemovePolicy: ilm_remove_policy.NewRemovePolicyFunc(tp), + Retry: ilm_retry.NewRetryFunc(tp), + Start: ilm_start.NewStartFunc(tp), + Stop: ilm_stop.NewStopFunc(tp), + }, + + Indices: Indices{ + AddBlock: indices_add_block.NewAddBlockFunc(tp), + Analyze: indices_analyze.NewAnalyzeFunc(tp), + CancelMigrateReindex: indices_cancel_migrate_reindex.NewCancelMigrateReindexFunc(tp), + ClearCache: indices_clear_cache.NewClearCacheFunc(tp), + Clone: indices_clone.NewCloneFunc(tp), + Close: indices_close.NewCloseFunc(tp), + Create: indices_create.NewCreateFunc(tp), + CreateDataStream: indices_create_data_stream.NewCreateDataStreamFunc(tp), + CreateFrom: indices_create_from.NewCreateFromFunc(tp), + DataStreamsStats: indices_data_streams_stats.NewDataStreamsStatsFunc(tp), + Delete: indices_delete.NewDeleteFunc(tp), + DeleteAlias: indices_delete_alias.NewDeleteAliasFunc(tp), + DeleteDataLifecycle: indices_delete_data_lifecycle.NewDeleteDataLifecycleFunc(tp), + DeleteDataStream: indices_delete_data_stream.NewDeleteDataStreamFunc(tp), + DeleteDataStreamOptions: indices_delete_data_stream_options.NewDeleteDataStreamOptionsFunc(tp), + DeleteIndexTemplate: indices_delete_index_template.NewDeleteIndexTemplateFunc(tp), + DeleteTemplate: indices_delete_template.NewDeleteTemplateFunc(tp), + DiskUsage: indices_disk_usage.NewDiskUsageFunc(tp), + Downsample: indices_downsample.NewDownsampleFunc(tp), + Exists: indices_exists.NewExistsFunc(tp), + ExistsAlias: indices_exists_alias.NewExistsAliasFunc(tp), + ExistsIndexTemplate: indices_exists_index_template.NewExistsIndexTemplateFunc(tp), + ExistsTemplate: indices_exists_template.NewExistsTemplateFunc(tp), + ExplainDataLifecycle: indices_explain_data_lifecycle.NewExplainDataLifecycleFunc(tp), + FieldUsageStats: indices_field_usage_stats.NewFieldUsageStatsFunc(tp), + Flush: indices_flush.NewFlushFunc(tp), + Forcemerge: indices_forcemerge.NewForcemergeFunc(tp), + Get: indices_get.NewGetFunc(tp), + GetAlias: indices_get_alias.NewGetAliasFunc(tp), + GetDataLifecycle: indices_get_data_lifecycle.NewGetDataLifecycleFunc(tp), + GetDataLifecycleStats: indices_get_data_lifecycle_stats.NewGetDataLifecycleStatsFunc(tp), + GetDataStream: indices_get_data_stream.NewGetDataStreamFunc(tp), + GetDataStreamOptions: indices_get_data_stream_options.NewGetDataStreamOptionsFunc(tp), + GetDataStreamSettings: indices_get_data_stream_settings.NewGetDataStreamSettingsFunc(tp), + GetFieldMapping: indices_get_field_mapping.NewGetFieldMappingFunc(tp), + GetIndexTemplate: indices_get_index_template.NewGetIndexTemplateFunc(tp), + GetMapping: indices_get_mapping.NewGetMappingFunc(tp), + GetMigrateReindexStatus: indices_get_migrate_reindex_status.NewGetMigrateReindexStatusFunc(tp), + GetSettings: indices_get_settings.NewGetSettingsFunc(tp), + GetTemplate: indices_get_template.NewGetTemplateFunc(tp), + MigrateReindex: indices_migrate_reindex.NewMigrateReindexFunc(tp), + MigrateToDataStream: indices_migrate_to_data_stream.NewMigrateToDataStreamFunc(tp), + ModifyDataStream: indices_modify_data_stream.NewModifyDataStreamFunc(tp), + Open: indices_open.NewOpenFunc(tp), + PromoteDataStream: indices_promote_data_stream.NewPromoteDataStreamFunc(tp), + PutAlias: indices_put_alias.NewPutAliasFunc(tp), + PutDataLifecycle: indices_put_data_lifecycle.NewPutDataLifecycleFunc(tp), + PutDataStreamOptions: indices_put_data_stream_options.NewPutDataStreamOptionsFunc(tp), + PutDataStreamSettings: indices_put_data_stream_settings.NewPutDataStreamSettingsFunc(tp), + PutIndexTemplate: indices_put_index_template.NewPutIndexTemplateFunc(tp), + PutMapping: indices_put_mapping.NewPutMappingFunc(tp), + PutSettings: indices_put_settings.NewPutSettingsFunc(tp), + PutTemplate: indices_put_template.NewPutTemplateFunc(tp), + Recovery: indices_recovery.NewRecoveryFunc(tp), + Refresh: indices_refresh.NewRefreshFunc(tp), + ReloadSearchAnalyzers: indices_reload_search_analyzers.NewReloadSearchAnalyzersFunc(tp), + RemoveBlock: indices_remove_block.NewRemoveBlockFunc(tp), + ResolveCluster: indices_resolve_cluster.NewResolveClusterFunc(tp), + ResolveIndex: indices_resolve_index.NewResolveIndexFunc(tp), + Rollover: indices_rollover.NewRolloverFunc(tp), + Segments: indices_segments.NewSegmentsFunc(tp), + ShardStores: indices_shard_stores.NewShardStoresFunc(tp), + Shrink: indices_shrink.NewShrinkFunc(tp), + SimulateIndexTemplate: indices_simulate_index_template.NewSimulateIndexTemplateFunc(tp), + SimulateTemplate: indices_simulate_template.NewSimulateTemplateFunc(tp), + Split: indices_split.NewSplitFunc(tp), + Stats: indices_stats.NewStatsFunc(tp), + UpdateAliases: indices_update_aliases.NewUpdateAliasesFunc(tp), + ValidateQuery: indices_validate_query.NewValidateQueryFunc(tp), + }, + + Inference: Inference{ + ChatCompletionUnified: inference_chat_completion_unified.NewChatCompletionUnifiedFunc(tp), + Completion: inference_completion.NewCompletionFunc(tp), + Delete: inference_delete.NewDeleteFunc(tp), + Get: inference_get.NewGetFunc(tp), + Inference: inference_inference.NewInferenceFunc(tp), + Put: inference_put.NewPutFunc(tp), + PutAlibabacloud: inference_put_alibabacloud.NewPutAlibabacloudFunc(tp), + PutAmazonbedrock: inference_put_amazonbedrock.NewPutAmazonbedrockFunc(tp), + PutAmazonsagemaker: inference_put_amazonsagemaker.NewPutAmazonsagemakerFunc(tp), + PutAnthropic: inference_put_anthropic.NewPutAnthropicFunc(tp), + PutAzureaistudio: inference_put_azureaistudio.NewPutAzureaistudioFunc(tp), + PutAzureopenai: inference_put_azureopenai.NewPutAzureopenaiFunc(tp), + PutCohere: inference_put_cohere.NewPutCohereFunc(tp), + PutCustom: inference_put_custom.NewPutCustomFunc(tp), + PutDeepseek: inference_put_deepseek.NewPutDeepseekFunc(tp), + PutElasticsearch: inference_put_elasticsearch.NewPutElasticsearchFunc(tp), + PutElser: inference_put_elser.NewPutElserFunc(tp), + PutGoogleaistudio: inference_put_googleaistudio.NewPutGoogleaistudioFunc(tp), + PutGooglevertexai: inference_put_googlevertexai.NewPutGooglevertexaiFunc(tp), + PutHuggingFace: inference_put_hugging_face.NewPutHuggingFaceFunc(tp), + PutJinaai: inference_put_jinaai.NewPutJinaaiFunc(tp), + PutMistral: inference_put_mistral.NewPutMistralFunc(tp), + PutOpenai: inference_put_openai.NewPutOpenaiFunc(tp), + PutVoyageai: inference_put_voyageai.NewPutVoyageaiFunc(tp), + PutWatsonx: inference_put_watsonx.NewPutWatsonxFunc(tp), + Rerank: inference_rerank.NewRerankFunc(tp), + SparseEmbedding: inference_sparse_embedding.NewSparseEmbeddingFunc(tp), + StreamCompletion: inference_stream_completion.NewStreamCompletionFunc(tp), + TextEmbedding: inference_text_embedding.NewTextEmbeddingFunc(tp), + Update: inference_update.NewUpdateFunc(tp), + }, + + Ingest: Ingest{ + DeleteGeoipDatabase: ingest_delete_geoip_database.NewDeleteGeoipDatabaseFunc(tp), + DeleteIpLocationDatabase: ingest_delete_ip_location_database.NewDeleteIpLocationDatabaseFunc(tp), + DeletePipeline: ingest_delete_pipeline.NewDeletePipelineFunc(tp), + GeoIpStats: ingest_geo_ip_stats.NewGeoIpStatsFunc(tp), + GetGeoipDatabase: ingest_get_geoip_database.NewGetGeoipDatabaseFunc(tp), + GetIpLocationDatabase: ingest_get_ip_location_database.NewGetIpLocationDatabaseFunc(tp), + GetPipeline: ingest_get_pipeline.NewGetPipelineFunc(tp), + ProcessorGrok: ingest_processor_grok.NewProcessorGrokFunc(tp), + PutGeoipDatabase: ingest_put_geoip_database.NewPutGeoipDatabaseFunc(tp), + PutIpLocationDatabase: ingest_put_ip_location_database.NewPutIpLocationDatabaseFunc(tp), + PutPipeline: ingest_put_pipeline.NewPutPipelineFunc(tp), + Simulate: ingest_simulate.NewSimulateFunc(tp), + }, + + License: License{ + Delete: license_delete.NewDeleteFunc(tp), + Get: license_get.NewGetFunc(tp), + GetBasicStatus: license_get_basic_status.NewGetBasicStatusFunc(tp), + GetTrialStatus: license_get_trial_status.NewGetTrialStatusFunc(tp), + Post: license_post.NewPostFunc(tp), + PostStartBasic: license_post_start_basic.NewPostStartBasicFunc(tp), + PostStartTrial: license_post_start_trial.NewPostStartTrialFunc(tp), + }, + + Logstash: Logstash{ + DeletePipeline: logstash_delete_pipeline.NewDeletePipelineFunc(tp), + GetPipeline: logstash_get_pipeline.NewGetPipelineFunc(tp), + PutPipeline: logstash_put_pipeline.NewPutPipelineFunc(tp), + }, + + Migration: Migration{ + Deprecations: migration_deprecations.NewDeprecationsFunc(tp), + GetFeatureUpgradeStatus: migration_get_feature_upgrade_status.NewGetFeatureUpgradeStatusFunc(tp), + PostFeatureUpgrade: migration_post_feature_upgrade.NewPostFeatureUpgradeFunc(tp), + }, + + Ml: Ml{ + ClearTrainedModelDeploymentCache: ml_clear_trained_model_deployment_cache.NewClearTrainedModelDeploymentCacheFunc(tp), + CloseJob: ml_close_job.NewCloseJobFunc(tp), + DeleteCalendar: ml_delete_calendar.NewDeleteCalendarFunc(tp), + DeleteCalendarEvent: ml_delete_calendar_event.NewDeleteCalendarEventFunc(tp), + DeleteCalendarJob: ml_delete_calendar_job.NewDeleteCalendarJobFunc(tp), + DeleteDataFrameAnalytics: ml_delete_data_frame_analytics.NewDeleteDataFrameAnalyticsFunc(tp), + DeleteDatafeed: ml_delete_datafeed.NewDeleteDatafeedFunc(tp), + DeleteExpiredData: ml_delete_expired_data.NewDeleteExpiredDataFunc(tp), + DeleteFilter: ml_delete_filter.NewDeleteFilterFunc(tp), + DeleteForecast: ml_delete_forecast.NewDeleteForecastFunc(tp), + DeleteJob: ml_delete_job.NewDeleteJobFunc(tp), + DeleteModelSnapshot: ml_delete_model_snapshot.NewDeleteModelSnapshotFunc(tp), + DeleteTrainedModel: ml_delete_trained_model.NewDeleteTrainedModelFunc(tp), + DeleteTrainedModelAlias: ml_delete_trained_model_alias.NewDeleteTrainedModelAliasFunc(tp), + EstimateModelMemory: ml_estimate_model_memory.NewEstimateModelMemoryFunc(tp), + EvaluateDataFrame: ml_evaluate_data_frame.NewEvaluateDataFrameFunc(tp), + ExplainDataFrameAnalytics: ml_explain_data_frame_analytics.NewExplainDataFrameAnalyticsFunc(tp), + FlushJob: ml_flush_job.NewFlushJobFunc(tp), + Forecast: ml_forecast.NewForecastFunc(tp), + GetBuckets: ml_get_buckets.NewGetBucketsFunc(tp), + GetCalendarEvents: ml_get_calendar_events.NewGetCalendarEventsFunc(tp), + GetCalendars: ml_get_calendars.NewGetCalendarsFunc(tp), + GetCategories: ml_get_categories.NewGetCategoriesFunc(tp), + GetDataFrameAnalytics: ml_get_data_frame_analytics.NewGetDataFrameAnalyticsFunc(tp), + GetDataFrameAnalyticsStats: ml_get_data_frame_analytics_stats.NewGetDataFrameAnalyticsStatsFunc(tp), + GetDatafeedStats: ml_get_datafeed_stats.NewGetDatafeedStatsFunc(tp), + GetDatafeeds: ml_get_datafeeds.NewGetDatafeedsFunc(tp), + GetFilters: ml_get_filters.NewGetFiltersFunc(tp), + GetInfluencers: ml_get_influencers.NewGetInfluencersFunc(tp), + GetJobStats: ml_get_job_stats.NewGetJobStatsFunc(tp), + GetJobs: ml_get_jobs.NewGetJobsFunc(tp), + GetMemoryStats: ml_get_memory_stats.NewGetMemoryStatsFunc(tp), + GetModelSnapshotUpgradeStats: ml_get_model_snapshot_upgrade_stats.NewGetModelSnapshotUpgradeStatsFunc(tp), + GetModelSnapshots: ml_get_model_snapshots.NewGetModelSnapshotsFunc(tp), + GetOverallBuckets: ml_get_overall_buckets.NewGetOverallBucketsFunc(tp), + GetRecords: ml_get_records.NewGetRecordsFunc(tp), + GetTrainedModels: ml_get_trained_models.NewGetTrainedModelsFunc(tp), + GetTrainedModelsStats: ml_get_trained_models_stats.NewGetTrainedModelsStatsFunc(tp), + InferTrainedModel: ml_infer_trained_model.NewInferTrainedModelFunc(tp), + Info: ml_info.NewInfoFunc(tp), + OpenJob: ml_open_job.NewOpenJobFunc(tp), + PostCalendarEvents: ml_post_calendar_events.NewPostCalendarEventsFunc(tp), + PostData: ml_post_data.NewPostDataFunc(tp), + PreviewDataFrameAnalytics: ml_preview_data_frame_analytics.NewPreviewDataFrameAnalyticsFunc(tp), + PreviewDatafeed: ml_preview_datafeed.NewPreviewDatafeedFunc(tp), + PutCalendar: ml_put_calendar.NewPutCalendarFunc(tp), + PutCalendarJob: ml_put_calendar_job.NewPutCalendarJobFunc(tp), + PutDataFrameAnalytics: ml_put_data_frame_analytics.NewPutDataFrameAnalyticsFunc(tp), + PutDatafeed: ml_put_datafeed.NewPutDatafeedFunc(tp), + PutFilter: ml_put_filter.NewPutFilterFunc(tp), + PutJob: ml_put_job.NewPutJobFunc(tp), + PutTrainedModel: ml_put_trained_model.NewPutTrainedModelFunc(tp), + PutTrainedModelAlias: ml_put_trained_model_alias.NewPutTrainedModelAliasFunc(tp), + PutTrainedModelDefinitionPart: ml_put_trained_model_definition_part.NewPutTrainedModelDefinitionPartFunc(tp), + PutTrainedModelVocabulary: ml_put_trained_model_vocabulary.NewPutTrainedModelVocabularyFunc(tp), + ResetJob: ml_reset_job.NewResetJobFunc(tp), + RevertModelSnapshot: ml_revert_model_snapshot.NewRevertModelSnapshotFunc(tp), + SetUpgradeMode: ml_set_upgrade_mode.NewSetUpgradeModeFunc(tp), + StartDataFrameAnalytics: ml_start_data_frame_analytics.NewStartDataFrameAnalyticsFunc(tp), + StartDatafeed: ml_start_datafeed.NewStartDatafeedFunc(tp), + StartTrainedModelDeployment: ml_start_trained_model_deployment.NewStartTrainedModelDeploymentFunc(tp), + StopDataFrameAnalytics: ml_stop_data_frame_analytics.NewStopDataFrameAnalyticsFunc(tp), + StopDatafeed: ml_stop_datafeed.NewStopDatafeedFunc(tp), + StopTrainedModelDeployment: ml_stop_trained_model_deployment.NewStopTrainedModelDeploymentFunc(tp), + UpdateDataFrameAnalytics: ml_update_data_frame_analytics.NewUpdateDataFrameAnalyticsFunc(tp), + UpdateDatafeed: ml_update_datafeed.NewUpdateDatafeedFunc(tp), + UpdateFilter: ml_update_filter.NewUpdateFilterFunc(tp), + UpdateJob: ml_update_job.NewUpdateJobFunc(tp), + UpdateModelSnapshot: ml_update_model_snapshot.NewUpdateModelSnapshotFunc(tp), + UpdateTrainedModelDeployment: ml_update_trained_model_deployment.NewUpdateTrainedModelDeploymentFunc(tp), + UpgradeJobSnapshot: ml_upgrade_job_snapshot.NewUpgradeJobSnapshotFunc(tp), + Validate: ml_validate.NewValidateFunc(tp), + ValidateDetector: ml_validate_detector.NewValidateDetectorFunc(tp), + }, + + Monitoring: Monitoring{ + Bulk: monitoring_bulk.NewBulkFunc(tp), + }, + + Nodes: Nodes{ + ClearRepositoriesMeteringArchive: nodes_clear_repositories_metering_archive.NewClearRepositoriesMeteringArchiveFunc(tp), + GetRepositoriesMeteringInfo: nodes_get_repositories_metering_info.NewGetRepositoriesMeteringInfoFunc(tp), + HotThreads: nodes_hot_threads.NewHotThreadsFunc(tp), + Info: nodes_info.NewInfoFunc(tp), + ReloadSecureSettings: nodes_reload_secure_settings.NewReloadSecureSettingsFunc(tp), + Stats: nodes_stats.NewStatsFunc(tp), + Usage: nodes_usage.NewUsageFunc(tp), + }, + + Profiling: Profiling{ + Flamegraph: profiling_flamegraph.NewFlamegraphFunc(tp), + Stacktraces: profiling_stacktraces.NewStacktracesFunc(tp), + Status: profiling_status.NewStatusFunc(tp), + TopnFunctions: profiling_topn_functions.NewTopnFunctionsFunc(tp), + }, + + QueryRules: QueryRules{ + DeleteRule: query_rules_delete_rule.NewDeleteRuleFunc(tp), + DeleteRuleset: query_rules_delete_ruleset.NewDeleteRulesetFunc(tp), + GetRule: query_rules_get_rule.NewGetRuleFunc(tp), + GetRuleset: query_rules_get_ruleset.NewGetRulesetFunc(tp), + ListRulesets: query_rules_list_rulesets.NewListRulesetsFunc(tp), + PutRule: query_rules_put_rule.NewPutRuleFunc(tp), + PutRuleset: query_rules_put_ruleset.NewPutRulesetFunc(tp), + Test: query_rules_test.NewTestFunc(tp), + }, + + Rollup: Rollup{ + DeleteJob: rollup_delete_job.NewDeleteJobFunc(tp), + GetJobs: rollup_get_jobs.NewGetJobsFunc(tp), + GetRollupCaps: rollup_get_rollup_caps.NewGetRollupCapsFunc(tp), + GetRollupIndexCaps: rollup_get_rollup_index_caps.NewGetRollupIndexCapsFunc(tp), + PutJob: rollup_put_job.NewPutJobFunc(tp), + RollupSearch: rollup_rollup_search.NewRollupSearchFunc(tp), + StartJob: rollup_start_job.NewStartJobFunc(tp), + StopJob: rollup_stop_job.NewStopJobFunc(tp), + }, + + SearchApplication: SearchApplication{ + Delete: search_application_delete.NewDeleteFunc(tp), + DeleteBehavioralAnalytics: search_application_delete_behavioral_analytics.NewDeleteBehavioralAnalyticsFunc(tp), + Get: search_application_get.NewGetFunc(tp), + GetBehavioralAnalytics: search_application_get_behavioral_analytics.NewGetBehavioralAnalyticsFunc(tp), + List: search_application_list.NewListFunc(tp), + PostBehavioralAnalyticsEvent: search_application_post_behavioral_analytics_event.NewPostBehavioralAnalyticsEventFunc(tp), + Put: search_application_put.NewPutFunc(tp), + PutBehavioralAnalytics: search_application_put_behavioral_analytics.NewPutBehavioralAnalyticsFunc(tp), + RenderQuery: search_application_render_query.NewRenderQueryFunc(tp), + Search: search_application_search.NewSearchFunc(tp), + }, + + SearchableSnapshots: SearchableSnapshots{ + CacheStats: searchable_snapshots_cache_stats.NewCacheStatsFunc(tp), + ClearCache: searchable_snapshots_clear_cache.NewClearCacheFunc(tp), + Mount: searchable_snapshots_mount.NewMountFunc(tp), + Stats: searchable_snapshots_stats.NewStatsFunc(tp), + }, + + Security: Security{ + ActivateUserProfile: security_activate_user_profile.NewActivateUserProfileFunc(tp), + Authenticate: security_authenticate.NewAuthenticateFunc(tp), + BulkDeleteRole: security_bulk_delete_role.NewBulkDeleteRoleFunc(tp), + BulkPutRole: security_bulk_put_role.NewBulkPutRoleFunc(tp), + BulkUpdateApiKeys: security_bulk_update_api_keys.NewBulkUpdateApiKeysFunc(tp), + ChangePassword: security_change_password.NewChangePasswordFunc(tp), + ClearApiKeyCache: security_clear_api_key_cache.NewClearApiKeyCacheFunc(tp), + ClearCachedPrivileges: security_clear_cached_privileges.NewClearCachedPrivilegesFunc(tp), + ClearCachedRealms: security_clear_cached_realms.NewClearCachedRealmsFunc(tp), + ClearCachedRoles: security_clear_cached_roles.NewClearCachedRolesFunc(tp), + ClearCachedServiceTokens: security_clear_cached_service_tokens.NewClearCachedServiceTokensFunc(tp), + CreateApiKey: security_create_api_key.NewCreateApiKeyFunc(tp), + CreateCrossClusterApiKey: security_create_cross_cluster_api_key.NewCreateCrossClusterApiKeyFunc(tp), + CreateServiceToken: security_create_service_token.NewCreateServiceTokenFunc(tp), + DelegatePki: security_delegate_pki.NewDelegatePkiFunc(tp), + DeletePrivileges: security_delete_privileges.NewDeletePrivilegesFunc(tp), + DeleteRole: security_delete_role.NewDeleteRoleFunc(tp), + DeleteRoleMapping: security_delete_role_mapping.NewDeleteRoleMappingFunc(tp), + DeleteServiceToken: security_delete_service_token.NewDeleteServiceTokenFunc(tp), + DeleteUser: security_delete_user.NewDeleteUserFunc(tp), + DisableUser: security_disable_user.NewDisableUserFunc(tp), + DisableUserProfile: security_disable_user_profile.NewDisableUserProfileFunc(tp), + EnableUser: security_enable_user.NewEnableUserFunc(tp), + EnableUserProfile: security_enable_user_profile.NewEnableUserProfileFunc(tp), + EnrollKibana: security_enroll_kibana.NewEnrollKibanaFunc(tp), + EnrollNode: security_enroll_node.NewEnrollNodeFunc(tp), + GetApiKey: security_get_api_key.NewGetApiKeyFunc(tp), + GetBuiltinPrivileges: security_get_builtin_privileges.NewGetBuiltinPrivilegesFunc(tp), + GetPrivileges: security_get_privileges.NewGetPrivilegesFunc(tp), + GetRole: security_get_role.NewGetRoleFunc(tp), + GetRoleMapping: security_get_role_mapping.NewGetRoleMappingFunc(tp), + GetServiceAccounts: security_get_service_accounts.NewGetServiceAccountsFunc(tp), + GetServiceCredentials: security_get_service_credentials.NewGetServiceCredentialsFunc(tp), + GetSettings: security_get_settings.NewGetSettingsFunc(tp), + GetToken: security_get_token.NewGetTokenFunc(tp), + GetUser: security_get_user.NewGetUserFunc(tp), + GetUserPrivileges: security_get_user_privileges.NewGetUserPrivilegesFunc(tp), + GetUserProfile: security_get_user_profile.NewGetUserProfileFunc(tp), + GrantApiKey: security_grant_api_key.NewGrantApiKeyFunc(tp), + HasPrivileges: security_has_privileges.NewHasPrivilegesFunc(tp), + HasPrivilegesUserProfile: security_has_privileges_user_profile.NewHasPrivilegesUserProfileFunc(tp), + InvalidateApiKey: security_invalidate_api_key.NewInvalidateApiKeyFunc(tp), + InvalidateToken: security_invalidate_token.NewInvalidateTokenFunc(tp), + OidcAuthenticate: security_oidc_authenticate.NewOidcAuthenticateFunc(tp), + OidcLogout: security_oidc_logout.NewOidcLogoutFunc(tp), + OidcPrepareAuthentication: security_oidc_prepare_authentication.NewOidcPrepareAuthenticationFunc(tp), + PutPrivileges: security_put_privileges.NewPutPrivilegesFunc(tp), + PutRole: security_put_role.NewPutRoleFunc(tp), + PutRoleMapping: security_put_role_mapping.NewPutRoleMappingFunc(tp), + PutUser: security_put_user.NewPutUserFunc(tp), + QueryApiKeys: security_query_api_keys.NewQueryApiKeysFunc(tp), + QueryRole: security_query_role.NewQueryRoleFunc(tp), + QueryUser: security_query_user.NewQueryUserFunc(tp), + SamlAuthenticate: security_saml_authenticate.NewSamlAuthenticateFunc(tp), + SamlCompleteLogout: security_saml_complete_logout.NewSamlCompleteLogoutFunc(tp), + SamlInvalidate: security_saml_invalidate.NewSamlInvalidateFunc(tp), + SamlLogout: security_saml_logout.NewSamlLogoutFunc(tp), + SamlPrepareAuthentication: security_saml_prepare_authentication.NewSamlPrepareAuthenticationFunc(tp), + SamlServiceProviderMetadata: security_saml_service_provider_metadata.NewSamlServiceProviderMetadataFunc(tp), + SuggestUserProfiles: security_suggest_user_profiles.NewSuggestUserProfilesFunc(tp), + UpdateApiKey: security_update_api_key.NewUpdateApiKeyFunc(tp), + UpdateCrossClusterApiKey: security_update_cross_cluster_api_key.NewUpdateCrossClusterApiKeyFunc(tp), + UpdateSettings: security_update_settings.NewUpdateSettingsFunc(tp), + UpdateUserProfileData: security_update_user_profile_data.NewUpdateUserProfileDataFunc(tp), + }, + + Shutdown: Shutdown{ + DeleteNode: shutdown_delete_node.NewDeleteNodeFunc(tp), + GetNode: shutdown_get_node.NewGetNodeFunc(tp), + PutNode: shutdown_put_node.NewPutNodeFunc(tp), + }, + + Simulate: Simulate{ + Ingest: simulate_ingest.NewIngestFunc(tp), + }, + + Slm: Slm{ + DeleteLifecycle: slm_delete_lifecycle.NewDeleteLifecycleFunc(tp), + ExecuteLifecycle: slm_execute_lifecycle.NewExecuteLifecycleFunc(tp), + ExecuteRetention: slm_execute_retention.NewExecuteRetentionFunc(tp), + GetLifecycle: slm_get_lifecycle.NewGetLifecycleFunc(tp), + GetStats: slm_get_stats.NewGetStatsFunc(tp), + GetStatus: slm_get_status.NewGetStatusFunc(tp), + PutLifecycle: slm_put_lifecycle.NewPutLifecycleFunc(tp), + Start: slm_start.NewStartFunc(tp), + Stop: slm_stop.NewStopFunc(tp), + }, + + Snapshot: Snapshot{ + CleanupRepository: snapshot_cleanup_repository.NewCleanupRepositoryFunc(tp), + Clone: snapshot_clone.NewCloneFunc(tp), + Create: snapshot_create.NewCreateFunc(tp), + CreateRepository: snapshot_create_repository.NewCreateRepositoryFunc(tp), + Delete: snapshot_delete.NewDeleteFunc(tp), + DeleteRepository: snapshot_delete_repository.NewDeleteRepositoryFunc(tp), + Get: snapshot_get.NewGetFunc(tp), + GetRepository: snapshot_get_repository.NewGetRepositoryFunc(tp), + RepositoryAnalyze: snapshot_repository_analyze.NewRepositoryAnalyzeFunc(tp), + RepositoryVerifyIntegrity: snapshot_repository_verify_integrity.NewRepositoryVerifyIntegrityFunc(tp), + Restore: snapshot_restore.NewRestoreFunc(tp), + Status: snapshot_status.NewStatusFunc(tp), + VerifyRepository: snapshot_verify_repository.NewVerifyRepositoryFunc(tp), + }, + + Sql: Sql{ + ClearCursor: sql_clear_cursor.NewClearCursorFunc(tp), + DeleteAsync: sql_delete_async.NewDeleteAsyncFunc(tp), + GetAsync: sql_get_async.NewGetAsyncFunc(tp), + GetAsyncStatus: sql_get_async_status.NewGetAsyncStatusFunc(tp), + Query: sql_query.NewQueryFunc(tp), + Translate: sql_translate.NewTranslateFunc(tp), + }, + + Ssl: Ssl{ + Certificates: ssl_certificates.NewCertificatesFunc(tp), + }, + + Streams: Streams{ + LogsDisable: streams_logs_disable.NewLogsDisableFunc(tp), + LogsEnable: streams_logs_enable.NewLogsEnableFunc(tp), + Status: streams_status.NewStatusFunc(tp), + }, + + Synonyms: Synonyms{ + DeleteSynonym: synonyms_delete_synonym.NewDeleteSynonymFunc(tp), + DeleteSynonymRule: synonyms_delete_synonym_rule.NewDeleteSynonymRuleFunc(tp), + GetSynonym: synonyms_get_synonym.NewGetSynonymFunc(tp), + GetSynonymRule: synonyms_get_synonym_rule.NewGetSynonymRuleFunc(tp), + GetSynonymsSets: synonyms_get_synonyms_sets.NewGetSynonymsSetsFunc(tp), + PutSynonym: synonyms_put_synonym.NewPutSynonymFunc(tp), + PutSynonymRule: synonyms_put_synonym_rule.NewPutSynonymRuleFunc(tp), + }, + + Tasks: Tasks{ + Cancel: tasks_cancel.NewCancelFunc(tp), + Get: tasks_get.NewGetFunc(tp), + List: tasks_list.NewListFunc(tp), + }, + + TextStructure: TextStructure{ + FindFieldStructure: text_structure_find_field_structure.NewFindFieldStructureFunc(tp), + FindMessageStructure: text_structure_find_message_structure.NewFindMessageStructureFunc(tp), + FindStructure: text_structure_find_structure.NewFindStructureFunc(tp), + TestGrokPattern: text_structure_test_grok_pattern.NewTestGrokPatternFunc(tp), + }, + + Transform: Transform{ + DeleteTransform: transform_delete_transform.NewDeleteTransformFunc(tp), + GetNodeStats: transform_get_node_stats.NewGetNodeStatsFunc(tp), + GetTransform: transform_get_transform.NewGetTransformFunc(tp), + GetTransformStats: transform_get_transform_stats.NewGetTransformStatsFunc(tp), + PreviewTransform: transform_preview_transform.NewPreviewTransformFunc(tp), + PutTransform: transform_put_transform.NewPutTransformFunc(tp), + ResetTransform: transform_reset_transform.NewResetTransformFunc(tp), + ScheduleNowTransform: transform_schedule_now_transform.NewScheduleNowTransformFunc(tp), + StartTransform: transform_start_transform.NewStartTransformFunc(tp), + StopTransform: transform_stop_transform.NewStopTransformFunc(tp), + UpdateTransform: transform_update_transform.NewUpdateTransformFunc(tp), + UpgradeTransforms: transform_upgrade_transforms.NewUpgradeTransformsFunc(tp), + }, + + Watcher: Watcher{ + AckWatch: watcher_ack_watch.NewAckWatchFunc(tp), + ActivateWatch: watcher_activate_watch.NewActivateWatchFunc(tp), + DeactivateWatch: watcher_deactivate_watch.NewDeactivateWatchFunc(tp), + DeleteWatch: watcher_delete_watch.NewDeleteWatchFunc(tp), + ExecuteWatch: watcher_execute_watch.NewExecuteWatchFunc(tp), + GetSettings: watcher_get_settings.NewGetSettingsFunc(tp), + GetWatch: watcher_get_watch.NewGetWatchFunc(tp), + PutWatch: watcher_put_watch.NewPutWatchFunc(tp), + QueryWatches: watcher_query_watches.NewQueryWatchesFunc(tp), + Start: watcher_start.NewStartFunc(tp), + Stats: watcher_stats.NewStatsFunc(tp), + Stop: watcher_stop.NewStopFunc(tp), + UpdateSettings: watcher_update_settings.NewUpdateSettingsFunc(tp), + }, + + Xpack: Xpack{ + Info: xpack_info.NewInfoFunc(tp), + Usage: xpack_usage.NewUsageFunc(tp), + }, + + Bulk: core_bulk.NewBulkFunc(tp), + ClearScroll: core_clear_scroll.NewClearScrollFunc(tp), + ClosePointInTime: core_close_point_in_time.NewClosePointInTimeFunc(tp), + Count: core_count.NewCountFunc(tp), + Create: core_create.NewCreateFunc(tp), + Delete: core_delete.NewDeleteFunc(tp), + DeleteByQuery: core_delete_by_query.NewDeleteByQueryFunc(tp), + DeleteByQueryRethrottle: core_delete_by_query_rethrottle.NewDeleteByQueryRethrottleFunc(tp), + DeleteScript: core_delete_script.NewDeleteScriptFunc(tp), + Exists: core_exists.NewExistsFunc(tp), + ExistsSource: core_exists_source.NewExistsSourceFunc(tp), + Explain: core_explain.NewExplainFunc(tp), + FieldCaps: core_field_caps.NewFieldCapsFunc(tp), + Get: core_get.NewGetFunc(tp), + GetScript: core_get_script.NewGetScriptFunc(tp), + GetScriptContext: core_get_script_context.NewGetScriptContextFunc(tp), + GetScriptLanguages: core_get_script_languages.NewGetScriptLanguagesFunc(tp), + GetSource: core_get_source.NewGetSourceFunc(tp), + HealthReport: core_health_report.NewHealthReportFunc(tp), + Index: core_index.NewIndexFunc(tp), + Info: core_info.NewInfoFunc(tp), + Mget: core_mget.NewMgetFunc(tp), + Msearch: core_msearch.NewMsearchFunc(tp), + MsearchTemplate: core_msearch_template.NewMsearchTemplateFunc(tp), + Mtermvectors: core_mtermvectors.NewMtermvectorsFunc(tp), + OpenPointInTime: core_open_point_in_time.NewOpenPointInTimeFunc(tp), + Ping: core_ping.NewPingFunc(tp), + PutScript: core_put_script.NewPutScriptFunc(tp), + RankEval: core_rank_eval.NewRankEvalFunc(tp), + Reindex: core_reindex.NewReindexFunc(tp), + ReindexRethrottle: core_reindex_rethrottle.NewReindexRethrottleFunc(tp), + RenderSearchTemplate: core_render_search_template.NewRenderSearchTemplateFunc(tp), + ScriptsPainlessExecute: core_scripts_painless_execute.NewScriptsPainlessExecuteFunc(tp), + Scroll: core_scroll.NewScrollFunc(tp), + Search: core_search.NewSearchFunc(tp), + SearchMvt: core_search_mvt.NewSearchMvtFunc(tp), + SearchShards: core_search_shards.NewSearchShardsFunc(tp), + SearchTemplate: core_search_template.NewSearchTemplateFunc(tp), + TermsEnum: core_terms_enum.NewTermsEnumFunc(tp), + Termvectors: core_termvectors.NewTermvectorsFunc(tp), + Update: core_update.NewUpdateFunc(tp), + UpdateByQuery: core_update_by_query.NewUpdateByQueryFunc(tp), + UpdateByQueryRethrottle: core_update_by_query_rethrottle.NewUpdateByQueryRethrottleFunc(tp), + } +} + +type MethodAsyncSearch struct { + tp elastictransport.Interface +} + +type MethodAutoscaling struct { + tp elastictransport.Interface +} + +type MethodCapabilities struct { + tp elastictransport.Interface +} + +type MethodCat struct { + tp elastictransport.Interface +} + +type MethodCcr struct { + tp elastictransport.Interface +} + +type MethodCluster struct { + tp elastictransport.Interface +} + +type MethodConnector struct { + tp elastictransport.Interface +} + +type MethodCore struct { + tp elastictransport.Interface +} + +type MethodDanglingIndices struct { + tp elastictransport.Interface +} + +type MethodEnrich struct { + tp elastictransport.Interface +} + +type MethodEql struct { + tp elastictransport.Interface +} + +type MethodEsql struct { + tp elastictransport.Interface +} + +type MethodFeatures struct { + tp elastictransport.Interface +} + +type MethodFleet struct { + tp elastictransport.Interface +} + +type MethodGraph struct { + tp elastictransport.Interface +} + +type MethodIlm struct { + tp elastictransport.Interface +} + +type MethodIndices struct { + tp elastictransport.Interface +} + +type MethodInference struct { + tp elastictransport.Interface +} + +type MethodIngest struct { + tp elastictransport.Interface +} + +type MethodLicense struct { + tp elastictransport.Interface +} + +type MethodLogstash struct { + tp elastictransport.Interface +} + +type MethodMigration struct { + tp elastictransport.Interface +} + +type MethodMl struct { + tp elastictransport.Interface +} + +type MethodMonitoring struct { + tp elastictransport.Interface +} + +type MethodNodes struct { + tp elastictransport.Interface +} + +type MethodProfiling struct { + tp elastictransport.Interface +} + +type MethodQueryRules struct { + tp elastictransport.Interface +} + +type MethodRollup struct { + tp elastictransport.Interface +} + +type MethodSearchApplication struct { + tp elastictransport.Interface +} + +type MethodSearchableSnapshots struct { + tp elastictransport.Interface +} + +type MethodSecurity struct { + tp elastictransport.Interface +} + +type MethodShutdown struct { + tp elastictransport.Interface +} + +type MethodSimulate struct { + tp elastictransport.Interface +} + +type MethodSlm struct { + tp elastictransport.Interface +} + +type MethodSnapshot struct { + tp elastictransport.Interface +} + +type MethodSql struct { + tp elastictransport.Interface +} + +type MethodSsl struct { + tp elastictransport.Interface +} + +type MethodStreams struct { + tp elastictransport.Interface +} + +type MethodSynonyms struct { + tp elastictransport.Interface +} + +type MethodTasks struct { + tp elastictransport.Interface +} + +type MethodTextStructure struct { + tp elastictransport.Interface +} + +type MethodTransform struct { + tp elastictransport.Interface +} + +type MethodWatcher struct { + tp elastictransport.Interface +} + +type MethodXpack struct { + tp elastictransport.Interface +} + +type MethodAPI struct { + tp elastictransport.Interface + AsyncSearch MethodAsyncSearch + Autoscaling MethodAutoscaling + Capabilities MethodCapabilities + Cat MethodCat + Ccr MethodCcr + Cluster MethodCluster + Connector MethodConnector + Core MethodCore + DanglingIndices MethodDanglingIndices + Enrich MethodEnrich + Eql MethodEql + Esql MethodEsql + Features MethodFeatures + Fleet MethodFleet + Graph MethodGraph + Ilm MethodIlm + Indices MethodIndices + Inference MethodInference + Ingest MethodIngest + License MethodLicense + Logstash MethodLogstash + Migration MethodMigration + Ml MethodMl + Monitoring MethodMonitoring + Nodes MethodNodes + Profiling MethodProfiling + QueryRules MethodQueryRules + Rollup MethodRollup + SearchApplication MethodSearchApplication + SearchableSnapshots MethodSearchableSnapshots + Security MethodSecurity + Shutdown MethodShutdown + Simulate MethodSimulate + Slm MethodSlm + Snapshot MethodSnapshot + Sql MethodSql + Ssl MethodSsl + Streams MethodStreams + Synonyms MethodSynonyms + Tasks MethodTasks + TextStructure MethodTextStructure + Transform MethodTransform + Watcher MethodWatcher + Xpack MethodXpack +} + +// Bulk index or delete documents. +// Perform multiple `index`, `create`, `delete`, and `update` actions in a +// single request. +// This reduces overhead and can greatly increase indexing speed. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To use the `create` action, you must have the `create_doc`, `create`, +// `index`, or `write` index privilege. Data streams support only the `create` +// action. +// * To use the `index` action, you must have the `create`, `index`, or `write` +// index privilege. +// * To use the `delete` action, you must have the `delete` or `write` index +// privilege. +// * To use the `update` action, you must have the `index` or `write` index +// privilege. +// * To automatically create a data stream or index with a bulk API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// * To make the result of a bulk operation visible to search using the +// `refresh` parameter, you must have the `maintenance` or `manage` index +// privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The actions are specified in the request body using a newline delimited JSON +// (NDJSON) structure: +// +// ``` +// action_and_meta_data\n +// optional_source\n +// action_and_meta_data\n +// optional_source\n +// .... +// action_and_meta_data\n +// optional_source\n +// ``` +// +// The `index` and `create` actions expect a source on the next line and have +// the same semantics as the `op_type` parameter in the standard index API. +// A `create` action fails if a document with the same ID already exists in the +// target +// An `index` action adds or replaces a document as necessary. +// +// NOTE: Data streams support only the `create` action. +// To update or delete a document in a data stream, you must target the backing +// index containing the document. +// +// An `update` action expects that the partial doc, upsert, and script and its +// options are specified on the next line. +// +// A `delete` action does not expect a source on the next line and has the same +// semantics as the standard delete API. +// +// NOTE: The final line of data must end with a newline character (`\n`). +// Each newline character may be preceded by a carriage return (`\r`). +// When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header +// of `application/json` or `application/x-ndjson`. +// Because this format uses literal newline characters (`\n`) as delimiters, +// make sure that the JSON actions and sources are not pretty printed. +// +// If you provide a target in the request path, it is used for any actions that +// don't explicitly specify an `_index` argument. +// +// A note on the format: the idea here is to make processing as fast as +// possible. +// As some of the actions are redirected to other shards on other nodes, only +// `action_meta_data` is parsed on the receiving node side. +// +// Client libraries using this protocol should try and strive to do something +// similar on the client side, and reduce buffering as much as possible. +// +// There is no "correct" number of actions to perform in a single bulk request. +// Experiment with different settings to find the optimal size for your +// particular workload. +// Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by +// default so clients must ensure that no request exceeds this size. +// It is not possible to index a single document that exceeds the size limit, so +// you must pre-process any such documents into smaller pieces before sending +// them to Elasticsearch. +// For instance, split documents into pages or chapters before indexing them, or +// store raw binary data in a system outside Elasticsearch and replace the raw +// data with a link to the external system in the documents that you send to +// Elasticsearch. +// +// **Client suppport for bulk requests** +// +// Some of the officially supported clients provide helpers to assist with bulk +// requests and reindexing: +// +// * Go: Check out `esutil.BulkIndexer` +// * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and +// `Search::Elasticsearch::Client::5_0::Scroll` +// * Python: Check out `elasticsearch.helpers.*` +// * JavaScript: Check out `client.helpers.*` +// * .NET: Check out `BulkAllObservable` +// * PHP: Check out bulk indexing. +// +// **Submitting bulk requests with cURL** +// +// If you're providing text file input to `curl`, you must use the +// `--data-binary` flag instead of plain `-d`. +// The latter doesn't preserve newlines. For example: +// +// ``` +// $ cat requests +// { "index" : { "_index" : "test", "_id" : "1" } } +// { "field1" : "value1" } +// $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk +// --data-binary "@requests"; echo +// {"took":7, "errors": false, +// "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} +// ``` +// +// **Optimistic concurrency control** +// +// Each `index` and `delete` action within a bulk API call may include the +// `if_seq_no` and `if_primary_term` parameters in their respective action and +// meta data lines. +// The `if_seq_no` and `if_primary_term` parameters control how operations are +// run, based on the last modification to existing documents. See Optimistic +// concurrency control for more details. +// +// **Versioning** +// +// Each bulk item can include the version value using the `version` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_version` mapping. +// It also support the `version_type`. +// +// **Routing** +// +// Each bulk item can include the routing value using the `routing` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_routing` mapping. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Wait for active shards** +// +// When making bulk calls, you can set the `wait_for_active_shards` parameter to +// require a minimum number of shard copies to be active before starting to +// process the bulk request. +// +// **Refresh** +// +// Control when the changes made by this request are visible to search. +// +// NOTE: Only the shards that receive the bulk request will be affected by +// refresh. +// Imagine a `_bulk?refresh=wait_for` request with three documents in it that +// happen to be routed to different shards in an index with five shards. +// The request will only wait for those three shards to refresh. +// The other two shards that make up the index do not participate in the `_bulk` +// request at all. +// +// You might want to disable the refresh interval temporarily to improve +// indexing throughput for large bulk requests. +// Refer to the linked documentation for step-by-step instructions using the +// index settings API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk +func (p *MethodAPI) Bulk() *core_bulk.Bulk { + _bulk := core_bulk.NewBulkFunc(p.tp) + return _bulk() +} + +// Clear a scrolling search. +// Clear the search context and results for a scrolling search. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll +func (p *MethodAPI) ClearScroll() *core_clear_scroll.ClearScroll { + _clearscroll := core_clear_scroll.NewClearScrollFunc(p.tp) + return _clearscroll() +} + +// Close a point in time. +// A point in time must be opened explicitly before being used in search +// requests. +// The `keep_alive` parameter tells Elasticsearch how long it should persist. +// A point in time is automatically closed when the `keep_alive` period has +// elapsed. +// However, keeping points in time has a cost; close them as soon as they are no +// longer required for search requests. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time +func (p *MethodAPI) ClosePointInTime() *core_close_point_in_time.ClosePointInTime { + _closepointintime := core_close_point_in_time.NewClosePointInTimeFunc(p.tp) + return _closepointintime() +} + +// Count search results. +// Get the number of documents matching a query. +// +// The query can be provided either by using a simple query string as a +// parameter, or by defining Query DSL within the request body. +// The query is optional. When no query is provided, the API uses `match_all` to +// count all the documents. +// +// The count API supports multi-target syntax. You can run a single count API +// search across multiple data streams and indices. +// +// The operation is broadcast across all shards. +// For each shard ID group, a replica is chosen and the search is run against +// it. +// This means that replicas increase the scalability of the count. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count +func (p *MethodAPI) Count() *core_count.Count { + _count := core_count.NewCountFunc(p.tp) + return _count() +} + +// Create a new document in the index. +// +// You can index a new JSON document with the `//_doc/` or +// `//_create/<_id>` APIs +// Using `_create` guarantees that the document is indexed only if it does not +// already exist. +// It returns a 409 response when a document with a same ID already exists in +// the index. +// To update an existing document, you must use the `//_doc/` API. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add a document using the `PUT //_create/<_id>` or `POST +// //_create/<_id>` request formats, you must have the `create_doc`, +// `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create +func (p *MethodAPI) Create(index, id string) *core_create.Create { + _create := core_create.NewCreateFunc(p.tp) + return _create(index, id) +} + +// Delete a document. +// +// Remove a JSON document from the specified index. +// +// NOTE: You cannot send deletion requests directly to a data stream. +// To delete a document in a data stream, you must target the backing index +// containing the document. +// +// **Optimistic concurrency control** +// +// Delete operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Versioning** +// +// Each document indexed is versioned. +// When deleting a document, the version can be specified to make sure the +// relevant document you are trying to delete is actually being deleted and it +// has not changed in the meantime. +// Every write operation run on a document, deletes included, causes its version +// to be incremented. +// The version number of a deleted document remains available for a short time +// after deletion to allow for control of concurrent operations. +// The length of time for which a deleted document's version remains available +// is determined by the `index.gc_deletes` index setting. +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to delete a document. +// +// If the `_routing` mapping is set to `required` and no routing value is +// specified, the delete API throws a `RoutingMissingException` and rejects the +// request. +// +// For example: +// +// ``` +// DELETE /my-index-000001/_doc/1?routing=shard-1 +// ``` +// +// This request deletes the document with ID 1, but it is routed based on the +// user. +// The document is not deleted if the correct routing is not specified. +// +// **Distributed** +// +// The delete operation gets hashed into a specific shard ID. +// It then gets redirected into the primary shard within that ID group and +// replicated (if needed) to shard replicas within that ID group. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete +func (p *MethodAPI) Delete(index, id string) *core_delete.Delete { + _delete := core_delete.NewDeleteFunc(p.tp) + return _delete(index, id) +} + +// Delete documents. +// +// Deletes documents that match the specified query. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `delete` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// When you submit a delete by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and deletes +// matching documents using internal versioning. +// If a document changes between the time that the snapshot is taken and the +// delete operation is processed, it results in a version conflict and the +// delete operation fails. +// +// NOTE: Documents with a version equal to 0 cannot be deleted using delete by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing a delete by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents to delete. +// A bulk delete request is performed for each batch of matching documents. +// If a search or bulk request is rejected, the requests are retried up to 10 +// times, with exponential back off. +// If the maximum retry limit is reached, processing halts and all failed +// requests are returned in the response. +// Any delete requests that completed successfully still stick, they are not +// rolled back. +// +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts the operation could attempt +// to delete more documents from the source than `max_docs` until it has +// successfully deleted `max_docs documents`, or it has gone through every +// document in the source query. +// +// **Throttling delete requests** +// +// To control the rate at which delete by query issues batches of delete +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to disable throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is `1000`, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single `_bulk` request, large batch sizes +// cause Elasticsearch to create many requests and wait before starting the next +// set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Delete by query supports sliced scroll to parallelize the delete process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` lets Elasticsearch choose the number of slices to +// use. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// Adding slices to the delete by query operation creates sub-requests which +// means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with slices only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with `slices` will cancel each sub-request. +// * Due to the nature of `slices` each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// `slices` are distributed proportionally to each sub-request. Combine that +// with the earlier point about distribution being uneven and you should +// conclude that using `max_docs` with `slices` might not result in exactly +// `max_docs` documents being deleted. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many `slices` hurts +// performance. Setting `slices` higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Delete performance scales linearly across available resources with the +// number of slices. +// +// Whether query or delete performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Cancel a delete by query operation** +// +// Any delete by query can be canceled using the task cancel API. For example: +// +// ``` +// POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +// ``` +// +// The task ID can be found by using the get tasks API. +// +// Cancellation should happen quickly but might take a few seconds. +// The get task status API will continue to list the delete by query task until +// this task checks that it has been cancelled and terminates itself. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query +func (p *MethodAPI) DeleteByQuery(index string) *core_delete_by_query.DeleteByQuery { + _deletebyquery := core_delete_by_query.NewDeleteByQueryFunc(p.tp) + return _deletebyquery(index) +} + +// Throttle a delete by query operation. +// +// Change the number of requests per second for a particular delete by query +// operation. +// Rethrottling that speeds up the query takes effect immediately but +// rethrotting that slows down the query takes effect after completing the +// current batch to prevent scroll timeouts. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle +func (p *MethodAPI) DeleteByQueryRethrottle(taskid string) *core_delete_by_query_rethrottle.DeleteByQueryRethrottle { + _deletebyqueryrethrottle := core_delete_by_query_rethrottle.NewDeleteByQueryRethrottleFunc(p.tp) + return _deletebyqueryrethrottle(taskid) +} + +// Delete a script or search template. +// Deletes a stored script or search template. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script +func (p *MethodAPI) DeleteScript(id string) *core_delete_script.DeleteScript { + _deletescript := core_delete_script.NewDeleteScriptFunc(p.tp) + return _deletescript(id) +} + +// Check a document. +// +// Verify that a document exists. +// For example, check to see if a document with the `_id` 0 exists: +// +// ``` +// HEAD my-index-000001/_doc/0 +// ``` +// +// If the document exists, the API returns a status code of `200 - OK`. +// If the document doesn’t exist, the API returns `404 - Not Found`. +// +// **Versioning support** +// +// You can use the `version` parameter to check the document only if its current +// version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get +func (p *MethodAPI) Exists(index, id string) *core_exists.Exists { + _exists := core_exists.NewExistsFunc(p.tp) + return _exists(index, id) +} + +// Check for a document source. +// +// Check whether a document source exists in an index. +// For example: +// +// ``` +// HEAD my-index-000001/_source/1 +// ``` +// +// A document's source is not available if it is disabled in the mapping. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get +func (p *MethodAPI) ExistsSource(index, id string) *core_exists_source.ExistsSource { + _existssource := core_exists_source.NewExistsSourceFunc(p.tp) + return _existssource(index, id) +} + +// Explain a document match result. +// Get information about why a specific document matches, or doesn't match, a +// query. +// It computes a score explanation for a query and a specific document. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain +func (p *MethodAPI) Explain(index, id string) *core_explain.Explain { + _explain := core_explain.NewExplainFunc(p.tp) + return _explain(index, id) +} + +// Get the field capabilities. +// +// Get information about the capabilities of fields among multiple indices. +// +// For data streams, the API returns field capabilities among the stream’s +// backing indices. +// It returns runtime fields like any other field. +// For example, a runtime field with a type of keyword is returned the same as +// any other field that belongs to the `keyword` family. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps +func (p *MethodAPI) FieldCaps() *core_field_caps.FieldCaps { + _fieldcaps := core_field_caps.NewFieldCapsFunc(p.tp) + return _fieldcaps() +} + +// Get a document by its ID. +// +// Get a document and its source or stored fields from an index. +// +// By default, this API is realtime and is not affected by the refresh rate of +// the index (when data will become visible for search). +// In the case where stored fields are requested with the `stored_fields` +// parameter and the document has been updated but is not yet refreshed, the API +// will have to parse and analyze the source to extract the stored fields. +// To turn off realtime behavior, set the `realtime` parameter to false. +// +// **Source filtering** +// +// By default, the API returns the contents of the `_source` field unless you +// have used the `stored_fields` parameter or the `_source` field is turned off. +// You can turn off `_source` retrieval by using the `_source` parameter: +// +// ``` +// GET my-index-000001/_doc/0?_source=false +// ``` +// +// If you only need one or two fields from the `_source`, use the +// `_source_includes` or `_source_excludes` parameters to include or filter out +// particular fields. +// This can be helpful with large documents where partial retrieval can save on +// network overhead +// Both parameters take a comma separated list of fields or wildcard +// expressions. +// For example: +// +// ``` +// GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities +// ``` +// +// If you only want to specify includes, you can use a shorter notation: +// +// ``` +// GET my-index-000001/_doc/0?_source=*.id +// ``` +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to retrieve a document. +// For example: +// +// ``` +// GET my-index-000001/_doc/2?routing=user1 +// ``` +// +// This request gets the document with ID 2, but it is routed based on the user. +// The document is not fetched if the correct routing is not specified. +// +// **Distributed** +// +// The GET operation is hashed into a specific shard ID. +// It is then redirected to one of the replicas within that shard ID and returns +// the result. +// The replicas are the primary shard and its replicas within that shard ID +// group. +// This means that the more replicas you have, the better your GET scaling will +// be. +// +// **Versioning support** +// +// You can use the `version` parameter to retrieve the document only if its +// current version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get +func (p *MethodAPI) Get(index, id string) *core_get.Get { + _get := core_get.NewGetFunc(p.tp) + return _get(index, id) +} + +// Get a script or search template. +// Retrieves a stored script or search template. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script +func (p *MethodAPI) GetScript(id string) *core_get_script.GetScript { + _getscript := core_get_script.NewGetScriptFunc(p.tp) + return _getscript(id) +} + +// Get script contexts. +// +// Get a list of supported script contexts and their methods. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context +func (p *MethodAPI) GetScriptContext() *core_get_script_context.GetScriptContext { + _getscriptcontext := core_get_script_context.NewGetScriptContextFunc(p.tp) + return _getscriptcontext() +} + +// Get script languages. +// +// Get a list of available script types, languages, and contexts. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages +func (p *MethodAPI) GetScriptLanguages() *core_get_script_languages.GetScriptLanguages { + _getscriptlanguages := core_get_script_languages.NewGetScriptLanguagesFunc(p.tp) + return _getscriptlanguages() +} + +// Get a document's source. +// +// Get the source of a document. +// For example: +// +// ``` +// GET my-index-000001/_source/1 +// ``` +// +// You can use the source filtering parameters to control which parts of the +// `_source` are returned: +// +// ``` +// GET +// my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities +// ``` +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get +func (p *MethodAPI) GetSource(index, id string) *core_get_source.GetSource { + _getsource := core_get_source.NewGetSourceFunc(p.tp) + return _getsource(index, id) +} + +// Get the cluster health. +// Get a report with the health status of an Elasticsearch cluster. +// The report contains a list of indicators that compose Elasticsearch +// functionality. +// +// Each indicator has a health status of: green, unknown, yellow or red. +// The indicator will provide an explanation and metadata describing the reason +// for its current health status. +// +// The cluster’s status is controlled by the worst indicator status. +// +// In the event that an indicator’s status is non-green, a list of impacts may +// be present in the indicator result which detail the functionalities that are +// negatively affected by the health issue. +// Each impact carries with it a severity level, an area of the system that is +// affected, and a simple description of the impact on the system. +// +// Some health indicators can determine the root cause of a health problem and +// prescribe a set of steps that can be performed in order to improve the health +// of the system. +// The root cause and remediation steps are encapsulated in a diagnosis. +// A diagnosis contains a cause detailing a root cause analysis, an action +// containing a brief description of the steps to take to fix the problem, the +// list of affected resources (if applicable), and a detailed step-by-step +// troubleshooting guide to fix the diagnosed problem. +// +// NOTE: The health indicators perform root cause analysis of non-green health +// statuses. This can be computationally expensive when called frequently. +// When setting up automated polling of the API for health status, set verbose +// to false to disable the more expensive analysis logic. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report +func (p *MethodAPI) HealthReport() *core_health_report.HealthReport { + _healthreport := core_health_report.NewHealthReportFunc(p.tp) + return _healthreport() +} + +// Create or update a document in an index. +// +// Add a JSON document to the specified data stream or index and make it +// searchable. +// If the target is an index and the document already exists, the request +// updates the document and increments its version. +// +// NOTE: You cannot use this API to send update requests for existing documents +// in a data stream. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add or overwrite a document using the `PUT //_doc/<_id>` request +// format, you must have the `create`, `index`, or `write` index privilege. +// * To add a document using the `POST //_doc/` request format, you must +// have the `create_doc`, `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// NOTE: Replica shards might not all be started when an indexing operation +// returns successfully. +// By default, only the primary is required. Set `wait_for_active_shards` to +// change this default behavior. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Optimistic concurrency control** +// +// Index operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. +// +// **No operation (noop) updates** +// +// When updating a document by using this API, a new version of the document is +// always created even if the document hasn't changed. +// If this isn't acceptable use the `_update` API with `detect_noop` set to +// `true`. +// The `detect_noop` option isn't available on this API because it doesn’t fetch +// the old source and isn't able to compare it against the new source. +// +// There isn't a definitive rule for when noop updates aren't acceptable. +// It's a combination of lots of factors like how frequently your data source +// sends updates that are actually noops and how many queries per second +// Elasticsearch runs on the shard receiving the updates. +// +// **Versioning** +// +// Each indexed document is given a version number. +// By default, internal versioning is used that starts at 1 and increments with +// each update, deletes included. +// Optionally, the version number can be set to an external value (for example, +// if maintained in a database). +// To enable this functionality, `version_type` should be set to `external`. +// The value provided must be a numeric, long value greater than or equal to 0, +// and less than around `9.2e+18`. +// +// NOTE: Versioning is completely real time, and is not affected by the near +// real time aspects of search operations. +// If no version is provided, the operation runs without any version checks. +// +// When using the external version type, the system checks to see if the version +// number passed to the index request is greater than the version of the +// currently stored document. +// If true, the document will be indexed and the new version number used. +// If the value provided is less than or equal to the stored document's version +// number, a version conflict will occur and the index operation will fail. For +// example: +// +// ``` +// PUT my-index-000001/_doc/1?version=2&version_type=external +// +// { +// "user": { +// "id": "elkbee" +// } +// } +// +// In this example, the operation will succeed since the supplied version of 2 +// is higher than the current document version of 1. +// If the document was already updated and its version was set to 2 or higher, +// the indexing command will fail and result in a conflict (409 HTTP status +// code). +// +// A nice side effect is that there is no need to maintain strict ordering of +// async indexing operations run as a result of changes to a source database, as +// long as version numbers from the source database are used. +// Even the simple case of updating the Elasticsearch index using data from a +// database is simplified if external versioning is used, as only the latest +// version will be used if the index operations arrive out of order. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create +func (p *MethodAPI) Index(index string) *core_index.Index { + _index := core_index.NewIndexFunc(p.tp) + return _index(index) +} + +// Get cluster info. +// Get basic build, version, and cluster information. +// ::: In Serverless, this API is retained for backward compatibility only. Some +// response fields, such as the version number, should be ignored. +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info +func (p *MethodAPI) Info() *core_info.Info { + _info := core_info.NewInfoFunc(p.tp) + return _info() +} + +// Get multiple documents. +// +// Get multiple JSON documents by ID from one or more indices. +// If you specify an index in the request URI, you only need to specify the +// document IDs in the request body. +// To ensure fast responses, this multi get (mget) API responds with partial +// results if one or more shards fail. +// +// **Filter source fields** +// +// By default, the `_source` field is returned for every document (if stored). +// Use the `_source` and `_source_include` or `source_exclude` attributes to +// filter what fields are returned for a particular document. +// You can include the `_source`, `_source_includes`, and `_source_excludes` +// query parameters in the request URI to specify the defaults to use when there +// are no per-document instructions. +// +// **Get stored fields** +// +// Use the `stored_fields` attribute to specify the set of stored fields you +// want to retrieve. +// Any requested fields that are not stored are ignored. +// You can include the `stored_fields` query parameter in the request URI to +// specify the defaults to use when there are no per-document instructions. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget +func (p *MethodAPI) Mget() *core_mget.Mget { + _mget := core_mget.NewMgetFunc(p.tp) + return _mget() +} + +// Run multiple searches. +// +// The format of the request is similar to the bulk API format and makes use of +// the newline delimited JSON (NDJSON) format. +// The structure is as follows: +// +// ``` +// header\n +// body\n +// header\n +// body\n +// ``` +// +// This structure is specifically optimized to reduce parsing if a specific +// search ends up redirected to another node. +// +// IMPORTANT: The final line of data must end with a newline character `\n`. +// Each newline character may be preceded by a carriage return `\r`. +// When sending requests to this endpoint the `Content-Type` header should be +// set to `application/x-ndjson`. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch +func (p *MethodAPI) Msearch() *core_msearch.Msearch { + _msearch := core_msearch.NewMsearchFunc(p.tp) + return _msearch() +} + +// Run multiple templated searches. +// +// Run multiple templated searches with a single request. +// If you are providing a text file or text input to `curl`, use the +// `--data-binary` flag instead of `-d` to preserve newlines. +// For example: +// +// ``` +// $ cat requests +// { "index": "my-index" } +// { "id": "my-search-template", "params": { "query_string": "hello world", +// "from": 0, "size": 10 }} +// { "index": "my-other-index" } +// { "id": "my-other-search-template", "params": { "query_type": "match_all" }} +// +// $ curl -H "Content-Type: application/x-ndjson" -XGET +// localhost:9200/_msearch/template --data-binary "@requests"; echo +// ``` +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template +func (p *MethodAPI) MsearchTemplate() *core_msearch_template.MsearchTemplate { + _msearchtemplate := core_msearch_template.NewMsearchTemplateFunc(p.tp) + return _msearchtemplate() +} + +// Get multiple term vectors. +// +// Get multiple term vectors with a single request. +// You can specify existing documents by index and ID or provide artificial +// documents in the body of the request. +// You can specify the index in the request body or request URI. +// The response contains a `docs` array with all the fetched termvectors. +// Each element has the structure provided by the termvectors API. +// +// **Artificial documents** +// +// You can also use `mtermvectors` to generate term vectors for artificial +// documents provided in the body of the request. +// The mapping used is determined by the specified `_index`. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors +func (p *MethodAPI) Mtermvectors() *core_mtermvectors.Mtermvectors { + _mtermvectors := core_mtermvectors.NewMtermvectorsFunc(p.tp) + return _mtermvectors() +} + +// Open a point in time. +// +// A search request by default runs against the most recent visible data of the +// target indices, +// which is called point in time. Elasticsearch pit (point in time) is a +// lightweight view into the +// state of the data as it existed when initiated. In some cases, it’s preferred +// to perform multiple +// search requests using the same point in time. For example, if refreshes +// happen between +// `search_after` requests, then the results of those requests might not be +// consistent as changes happening +// between searches are only visible to the more recent point in time. +// +// A point in time must be opened explicitly before being used in search +// requests. +// +// A subsequent search request with the `pit` parameter must not specify +// `index`, `routing`, or `preference` values as these parameters are copied +// from the point in time. +// +// Just like regular searches, you can use `from` and `size` to page through +// point in time search results, up to the first 10,000 hits. +// If you want to retrieve more hits, use PIT with `search_after`. +// +// IMPORTANT: The open point in time request and each subsequent search request +// can return different identifiers; always use the most recently received ID +// for the next search request. +// +// When a PIT that contains shard failures is used in a search request, the +// missing are always reported in the search response as a +// `NoShardAvailableActionException` exception. +// To get rid of these exceptions, a new PIT needs to be created so that shards +// missing from the previous PIT can be handled, assuming they become available +// in the meantime. +// +// **Keeping point in time alive** +// +// The `keep_alive` parameter, which is passed to a open point in time request +// and search request, extends the time to live of the corresponding point in +// time. +// The value does not need to be long enough to process all data — it just needs +// to be long enough for the next request. +// +// Normally, the background merge process optimizes the index by merging +// together smaller segments to create new, bigger segments. +// Once the smaller segments are no longer needed they are deleted. +// However, open point-in-times prevent the old segments from being deleted +// since they are still in use. +// +// TIP: Keeping older segments alive means that more disk space and file handles +// are needed. +// Ensure that you have configured your nodes to have ample free file handles. +// +// Additionally, if a segment contains deleted or updated documents then the +// point in time must keep track of whether each document in the segment was +// live at the time of the initial search request. +// Ensure that your nodes have sufficient heap space if you have many open +// point-in-times on an index that is subject to ongoing deletes or updates. +// Note that a point-in-time doesn't prevent its associated indices from being +// deleted. +// You can check how many point-in-times (that is, search contexts) are open +// with the nodes stats API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time +func (p *MethodAPI) OpenPointInTime(index string) *core_open_point_in_time.OpenPointInTime { + _openpointintime := core_open_point_in_time.NewOpenPointInTimeFunc(p.tp) + return _openpointintime(index) +} + +// Ping the cluster. +// Get information about whether the cluster is running. +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster +func (p *MethodAPI) Ping() *core_ping.Ping { + _ping := core_ping.NewPingFunc(p.tp) + return _ping() +} + +// Create or update a script or search template. +// Creates or updates a stored script or search template. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script +func (p *MethodAPI) PutScript(id string) *core_put_script.PutScript { + _putscript := core_put_script.NewPutScriptFunc(p.tp) + return _putscript(id) +} + +// Evaluate ranked search results. +// +// Evaluate the quality of ranked search results over a set of typical search +// queries. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval +func (p *MethodAPI) RankEval() *core_rank_eval.RankEval { + _rankeval := core_rank_eval.NewRankEvalFunc(p.tp) + return _rankeval() +} + +// Reindex documents. +// +// Copy documents from a source to a destination. +// You can copy all documents to the destination index or reindex a subset of +// the documents. +// The source can be any existing index, alias, or data stream. +// The destination must differ from the source. +// For example, you cannot reindex a data stream into itself. +// +// IMPORTANT: Reindex requires `_source` to be enabled for all documents in the +// source. +// The destination should be configured as wanted before calling the reindex +// API. +// Reindex does not copy the settings from the source or its associated +// template. +// Mappings, shard counts, and replicas, for example, must be configured ahead +// of time. +// +// If the Elasticsearch security features are enabled, you must have the +// following security privileges: +// +// * The `read` index privilege for the source data stream, index, or alias. +// * The `write` index privilege for the destination data stream, index, or +// index alias. +// * To automatically create a data stream or index with a reindex API request, +// you must have the `auto_configure`, `create_index`, or `manage` index +// privilege for the destination data stream, index, or alias. +// * If reindexing from a remote cluster, the `source.remote.user` must have the +// `monitor` cluster privilege and the `read` index privilege for the source +// data stream, index, or alias. +// +// If reindexing from a remote cluster, you must explicitly allow the remote +// host in the `reindex.remote.whitelist` setting. +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The `dest` element can be configured like the index API to control optimistic +// concurrency control. +// Omitting `version_type` or setting it to `internal` causes Elasticsearch to +// blindly dump documents into the destination, overwriting any that happen to +// have the same ID. +// +// Setting `version_type` to `external` causes Elasticsearch to preserve the +// `version` from the source, create any documents that are missing, and update +// any documents that have an older version in the destination than they do in +// the source. +// +// Setting `op_type` to `create` causes the reindex API to create only missing +// documents in the destination. +// All existing documents will cause a version conflict. +// +// IMPORTANT: Because data streams are append-only, any reindex request to a +// destination data stream must have an `op_type` of `create`. +// A reindex can only add new documents to a destination data stream. +// It cannot update existing documents in a destination data stream. +// +// By default, version conflicts abort the reindex process. +// To continue reindexing if there are conflicts, set the `conflicts` request +// body property to `proceed`. +// In this case, the response includes a count of the version conflicts that +// were encountered. +// Note that the handling of other error types is unaffected by the `conflicts` +// property. +// Additionally, if you opt to count version conflicts, the operation could +// attempt to reindex more documents from the source than `max_docs` until it +// has successfully indexed `max_docs` documents into the target or it has gone +// through every document in the source query. +// +// Refer to the linked documentation for examples of how to reindex documents. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex +func (p *MethodAPI) Reindex() *core_reindex.Reindex { + _reindex := core_reindex.NewReindexFunc(p.tp) + return _reindex() +} + +// Throttle a reindex operation. +// +// Change the number of requests per second for a particular reindex operation. +// For example: +// +// ``` +// POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +// ``` +// +// Rethrottling that speeds up the query takes effect immediately. +// Rethrottling that slows down the query will take effect after completing the +// current batch. +// This behavior prevents scroll timeouts. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex +func (p *MethodAPI) ReindexRethrottle(taskid string) *core_reindex_rethrottle.ReindexRethrottle { + _reindexrethrottle := core_reindex_rethrottle.NewReindexRethrottleFunc(p.tp) + return _reindexrethrottle(taskid) +} + +// Render a search template. +// +// Render a search template as a search request body. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template +func (p *MethodAPI) RenderSearchTemplate() *core_render_search_template.RenderSearchTemplate { + _rendersearchtemplate := core_render_search_template.NewRenderSearchTemplateFunc(p.tp) + return _rendersearchtemplate() +} + +// Run a script. +// +// Runs a script and returns a result. +// Use this API to build and test scripts, such as when defining a script for a +// runtime field. +// This API requires very few dependencies and is especially useful if you don't +// have permissions to write documents on a cluster. +// +// The API uses several _contexts_, which control how scripts are run, what +// variables are available at runtime, and what the return type is. +// +// Each context requires a script, but additional parameters depend on the +// context you're using for that script. +// https://www.elastic.co/docs/reference/scripting-languages/painless/painless-api-examples +func (p *MethodAPI) ScriptsPainlessExecute() *core_scripts_painless_execute.ScriptsPainlessExecute { + _scriptspainlessexecute := core_scripts_painless_execute.NewScriptsPainlessExecuteFunc(p.tp) + return _scriptspainlessexecute() +} + +// Run a scrolling search. +// +// IMPORTANT: The scroll API is no longer recommend for deep pagination. If you +// need to preserve the index state while paging through more than 10,000 hits, +// use the `search_after` parameter with a point in time (PIT). +// +// The scroll API gets large sets of results from a single scrolling search +// request. +// To get the necessary scroll ID, submit a search API request that includes an +// argument for the `scroll` query parameter. +// The `scroll` parameter indicates how long Elasticsearch should retain the +// search context for the request. +// The search response returns a scroll ID in the `_scroll_id` response body +// parameter. +// You can then use the scroll ID with the scroll API to retrieve the next batch +// of results for the request. +// If the Elasticsearch security features are enabled, the access to the results +// of a specific scroll ID is restricted to the user or API key that submitted +// the search. +// +// You can also use the scroll API to specify a new scroll parameter that +// extends or shortens the retention period for the search context. +// +// IMPORTANT: Results from a scrolling search reflect the state of the index at +// the time of the initial search request. Subsequent indexing or document +// changes only affect later search and scroll requests. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll +func (p *MethodAPI) Scroll() *core_scroll.Scroll { + _scroll := core_scroll.NewScrollFunc(p.tp) + return _scroll() +} + +// Run a search. +// +// Get search hits that match the query defined in the request. +// You can provide search queries using the `q` query string parameter or the +// request body. +// If both are specified, only the query parameter is used. +// +// If the Elasticsearch security features are enabled, you must have the read +// index privilege for the target data stream, index, or alias. For +// cross-cluster search, refer to the documentation about configuring CCS +// privileges. +// To search a point in time (PIT) for an alias, you must have the `read` index +// privilege for the alias's data streams or indices. +// +// **Search slicing** +// +// When paging through a large number of documents, it can be helpful to split +// the search into multiple slices to consume them independently with the +// `slice` and `pit` properties. +// By default the splitting is done first on the shards, then locally on each +// shard. +// The local splitting partitions the shard into contiguous ranges based on +// Lucene document IDs. +// +// For instance if the number of shards is equal to 2 and you request 4 slices, +// the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are +// assigned to the second shard. +// +// IMPORTANT: The same point-in-time ID should be used for all slices. +// If different PIT IDs are used, slices can overlap and miss documents. +// This situation can occur because the splitting criterion is based on Lucene +// document IDs, which are not stable across changes to the index. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search +func (p *MethodAPI) Search() *core_search.Search { + _search := core_search.NewSearchFunc(p.tp) + return _search() +} + +// Search a vector tile. +// +// Search a vector tile for geospatial values. +// Before using this API, you should be familiar with the Mapbox vector tile +// specification. +// The API returns results as a binary mapbox vector tile. +// +// Internally, Elasticsearch translates a vector tile search API request into a +// search containing: +// +// * A `geo_bounding_box` query on the ``. The query uses the +// `//` tile as a bounding box. +// * A `geotile_grid` or `geohex_grid` aggregation on the ``. The +// `grid_agg` parameter determines the aggregation type. The aggregation uses +// the `//` tile as a bounding box. +// * Optionally, a `geo_bounds` aggregation on the ``. The search only +// includes this aggregation if the `exact_bounds` parameter is `true`. +// * If the optional parameter `with_labels` is `true`, the internal search will +// include a dynamic runtime field that calls the `getLabelPosition` function of +// the geometry doc value. This enables the generation of new point features +// containing suggested geometry labels, so that, for example, multi-polygons +// will have only one label. +// +// The API returns results as a binary Mapbox vector tile. +// Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the +// tile contains three layers: +// +// * A `hits` layer containing a feature for each `` value matching the +// `geo_bounding_box` query. +// * An `aggs` layer containing a feature for each cell of the `geotile_grid` or +// `geohex_grid`. The layer only contains features for cells with matching data. +// * A meta layer containing: +// - A feature containing a bounding box. By default, this is the bounding box +// +// of the tile. +// - Value ranges for any sub-aggregations on the `geotile_grid` or +// +// `geohex_grid`. +// - Metadata for the search. +// +// The API only returns features that can display at its zoom level. +// For example, if a polygon feature has no area at its zoom level, the API +// omits it. +// The API returns errors as UTF-8 encoded JSON. +// +// IMPORTANT: You can specify several options for this API as either a query +// parameter or request body parameter. +// If you specify both parameters, the query parameter takes precedence. +// +// **Grid precision for geotile** +// +// For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles +// for lower zoom levels. +// `grid_precision` represents the additional zoom levels available through +// these cells. The final precision is computed by as follows: ` + +// grid_precision`. +// For example, if `` is 7 and `grid_precision` is 8, then the +// `geotile_grid` aggregation will use a precision of 15. +// The maximum final precision is 29. +// The `grid_precision` also determines the number of cells for the grid as +// follows: `(2^grid_precision) x (2^grid_precision)`. +// For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +// The `aggs` layer only contains features for cells with matching data. +// +// **Grid precision for geohex** +// +// For a `grid_agg` of `geohex`, Elasticsearch uses `` and +// `grid_precision` to calculate a final precision as follows: ` + +// grid_precision`. +// +// This precision determines the H3 resolution of the hexagonal cells produced +// by the `geohex` aggregation. +// The following table maps the H3 resolution for each precision. +// For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +// At a precision of 6, hexagonal cells have an H3 resolution of 2. +// If `` is 3 and `grid_precision` is 4, the precision is 7. +// At a precision of 7, hexagonal cells have an H3 resolution of 3. +// +// | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +// | --------- | ---------------- | ------------- | ----------------| ----- | +// | 1 | 4 | 0 | 122 | 30.5 | +// | 2 | 16 | 0 | 122 | 7.625 | +// | 3 | 64 | 1 | 842 | 13.15625 | +// | 4 | 256 | 1 | 842 | 3.2890625 | +// | 5 | 1024 | 2 | 5882 | 5.744140625 | +// | 6 | 4096 | 2 | 5882 | 1.436035156 | +// | 7 | 16384 | 3 | 41162 | 2.512329102 | +// | 8 | 65536 | 3 | 41162 | 0.6280822754 | +// | 9 | 262144 | 4 | 288122 | 1.099098206 | +// | 10 | 1048576 | 4 | 288122 | 0.2747745514 | +// | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +// | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +// | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +// | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +// | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +// | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +// | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +// | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +// | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +// | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +// | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +// | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +// | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +// | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +// | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +// | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +// | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +// | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +// | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | +// +// Hexagonal cells don't align perfectly on a vector tile. +// Some cells may intersect more than one vector tile. +// To compute the H3 resolution for each precision, Elasticsearch compares the +// average density of hexagonal bins at each resolution with the average density +// of tile bins at each zoom level. +// Elasticsearch uses the H3 resolution that is closest to the corresponding +// geotile density. +// +// Learn how to use the vector tile search API with practical examples in the +// [Vector tile search +// examples](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/vector-tile-search) +// guide. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt +func (p *MethodAPI) SearchMvt(index, field, zoom, x, y string) *core_search_mvt.SearchMvt { + _searchmvt := core_search_mvt.NewSearchMvtFunc(p.tp) + return _searchmvt(index, field, zoom, x, y) +} + +// Get the search shards. +// +// Get the indices and shards that a search request would be run against. +// This information can be useful for working out issues or planning +// optimizations with routing and shard preferences. +// When filtered aliases are used, the filter is returned as part of the +// `indices` section. +// +// If the Elasticsearch security features are enabled, you must have the +// `view_index_metadata` or `manage` index privilege for the target data stream, +// index, or alias. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards +func (p *MethodAPI) SearchShards() *core_search_shards.SearchShards { + _searchshards := core_search_shards.NewSearchShardsFunc(p.tp) + return _searchshards() +} + +// Run a search with a search template. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template +func (p *MethodAPI) SearchTemplate() *core_search_template.SearchTemplate { + _searchtemplate := core_search_template.NewSearchTemplateFunc(p.tp) + return _searchtemplate() +} + +// Get terms in an index. +// +// Discover terms that match a partial string in an index. +// This API is designed for low-latency look-ups used in auto-complete +// scenarios. +// +// > info +// > The terms enum API may return terms from deleted documents. Deleted +// documents are initially only marked as deleted. It is not until their +// segments are merged that documents are actually deleted. Until that happens, +// the terms enum API will return terms from these documents. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum +func (p *MethodAPI) TermsEnum(index string) *core_terms_enum.TermsEnum { + _termsenum := core_terms_enum.NewTermsEnumFunc(p.tp) + return _termsenum(index) +} + +// Get term vector information. +// +// Get information and statistics about terms in the fields of a particular +// document. +// +// You can retrieve term vectors for documents stored in the index or for +// artificial documents passed in the body of the request. +// You can specify the fields you are interested in through the `fields` +// parameter or by adding the fields to the request body. +// For example: +// +// ``` +// GET /my-index-000001/_termvectors/1?fields=message +// ``` +// +// Fields can be specified using wildcards, similar to the multi match query. +// +// Term vectors are real-time by default, not near real-time. +// This can be changed by setting `realtime` parameter to `false`. +// +// You can request three types of values: _term information_, _term statistics_, +// and _field statistics_. +// By default, all term information and field statistics are returned for all +// fields but term statistics are excluded. +// +// **Term information** +// +// * term frequency in the field (always returned) +// * term positions (`positions: true`) +// * start and end offsets (`offsets: true`) +// * term payloads (`payloads: true`), as base64 encoded bytes +// +// If the requested information wasn't stored in the index, it will be computed +// on the fly if possible. +// Additionally, term vectors could be computed for documents not even existing +// in the index, but instead provided by the user. +// +// > warn +// > Start and end offsets assume UTF-16 encoding is being used. If you want to +// use these offsets in order to get the original text that produced this token, +// you should make sure that the string you are taking a sub-string of is also +// encoded using UTF-16. +// +// **Behaviour** +// +// The term and field statistics are not accurate. +// Deleted documents are not taken into account. +// The information is only retrieved for the shard the requested document +// resides in. +// The term and field statistics are therefore only useful as relative measures +// whereas the absolute numbers have no meaning in this context. +// By default, when requesting term vectors of artificial documents, a shard to +// get the statistics from is randomly selected. +// Use `routing` only to hit a particular shard. +// Refer to the linked documentation for detailed examples of how to use this +// API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors +func (p *MethodAPI) Termvectors(index string) *core_termvectors.Termvectors { + _termvectors := core_termvectors.NewTermvectorsFunc(p.tp) + return _termvectors(index) +} + +// Update a document. +// +// Update a document by running a script or passing a partial document. +// +// If the Elasticsearch security features are enabled, you must have the `index` +// or `write` index privilege for the target index or index alias. +// +// The script can update, delete, or skip modifying the document. +// The API also supports passing a partial document, which is merged into the +// existing document. +// To fully replace an existing document, use the index API. +// This operation: +// +// * Gets the document (collocated with the shard) from the index. +// * Runs the specified script. +// * Indexes the result. +// +// The document must still be reindexed, but using this API removes some network +// roundtrips and reduces chances of version conflicts between the GET and the +// index operation. +// +// The `_source` field must be enabled to use this API. +// In addition to `_source`, you can access the following variables through the +// `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the +// current timestamp). +// For usage examples such as partial updates, upserts, and scripted updates, +// see the External documentation. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update +func (p *MethodAPI) Update(index, id string) *core_update.Update { + _update := core_update.NewUpdateFunc(p.tp) + return _update(index, id) +} + +// Update documents. +// Updates documents that match the specified query. +// If no query is specified, performs an update on every document in the data +// stream or index without modifying the source, which is useful for picking up +// mapping changes. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `index` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// +// When you submit an update by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and updates +// matching documents using internal versioning. +// When the versions match, the document is updated and the version number is +// incremented. +// If a document changes between the time that the snapshot is taken and the +// update operation is processed, it results in a version conflict and the +// operation fails. +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts, the operation could attempt +// to update more documents from the source than `max_docs` until it has +// successfully updated `max_docs` documents or it has gone through every +// document in the source query. +// +// NOTE: Documents with a version equal to 0 cannot be updated using update by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing an update by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents. +// A bulk update request is performed for each batch of matching documents. +// Any query or update failures cause the update by query request to fail and +// the failures are shown in the response. +// Any update requests that completed successfully still stick, they are not +// rolled back. +// +// **Refreshing shards** +// +// Specifying the `refresh` parameter refreshes all shards once the request +// completes. +// This is different to the update API's `refresh` parameter, which causes only +// the shard +// that received the request to be refreshed. Unlike the update API, it does not +// support +// `wait_for`. +// +// **Running update by query asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch +// performs some preflight checks, launches the request, and returns a +// [task](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) +// you can use to cancel or get the status of the task. +// Elasticsearch creates a record of this task as a document at +// `.tasks/task/${taskId}`. +// +// **Waiting for active shards** +// +// `wait_for_active_shards` controls how many copies of a shard must be active +// before proceeding with the request. See +// [`wait_for_active_shards`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards) +// for details. `timeout` controls how long each write request waits for +// unavailable +// shards to become available. Both work exactly the way they work in the +// [Bulk +// API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk). +// Update by query uses scrolled searches, so you can also +// specify the `scroll` parameter to control how long it keeps the search +// context +// alive, for example `?scroll=10m`. The default is 5 minutes. +// +// **Throttling update requests** +// +// To control the rate at which update by query issues batches of update +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to turn off throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is 1000, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single _bulk request, large batch sizes cause +// Elasticsearch to create many requests and wait before starting the next set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Update by query supports sliced scroll to parallelize the update process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` chooses a reasonable number for most data streams +// and indices. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// +// Adding `slices` to `_update_by_query` just automates the manual process of +// creating sub-requests, which means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with `slices` only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with slices will cancel each sub-request. +// * Due to the nature of slices each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// slices are distributed proportionally to each sub-request. Combine that with +// the point above about distribution being uneven and you should conclude that +// using `max_docs` with `slices` might not result in exactly `max_docs` +// documents being updated. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many slices hurts +// performance. Setting slices higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Update performance scales linearly across available resources with the +// number of slices. +// +// Whether query or update performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// Refer to the linked documentation for examples of how to update documents +// using the `_update_by_query` API: +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query +func (p *MethodAPI) UpdateByQuery(index string) *core_update_by_query.UpdateByQuery { + _updatebyquery := core_update_by_query.NewUpdateByQueryFunc(p.tp) + return _updatebyquery(index) +} + +// Throttle an update by query operation. +// +// Change the number of requests per second for a particular update by query +// operation. +// Rethrottling that speeds up the query takes effect immediately but +// rethrotting that slows down the query takes effect after completing the +// current batch to prevent scroll timeouts. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle +func (p *MethodAPI) UpdateByQueryRethrottle(taskid string) *core_update_by_query_rethrottle.UpdateByQueryRethrottle { + _updatebyqueryrethrottle := core_update_by_query_rethrottle.NewUpdateByQueryRethrottleFunc(p.tp) + return _updatebyqueryrethrottle(taskid) +} + +// Delete an async search. +// +// If the asynchronous search is still running, it is cancelled. +// Otherwise, the saved search results are deleted. +// If the Elasticsearch security features are enabled, the deletion of a +// specific async search is restricted to: the authenticated user that submitted +// the original search request; users that have the `cancel_task` cluster +// privilege. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit +func (p *MethodAsyncSearch) Delete(id string) *async_search_delete.Delete { + _delete := async_search_delete.NewDeleteFunc(p.tp) + return _delete(id) +} + +// Get async search results. +// +// Retrieve the results of a previously submitted asynchronous search request. +// If the Elasticsearch security features are enabled, access to the results of +// a specific async search is restricted to the user or API key that submitted +// it. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit +func (p *MethodAsyncSearch) Get(id string) *async_search_get.Get { + _get := async_search_get.NewGetFunc(p.tp) + return _get(id) +} + +// Get the async search status. +// +// Get the status of a previously submitted async search request given its +// identifier, without retrieving search results. +// If the Elasticsearch security features are enabled, the access to the status +// of a specific async search is restricted to: +// +// * The user or API key that submitted the original async search request. +// * Users that have the `monitor` cluster privilege or greater privileges. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit +func (p *MethodAsyncSearch) Status(id string) *async_search_status.Status { + _status := async_search_status.NewStatusFunc(p.tp) + return _status(id) +} + +// Run an async search. +// +// When the primary sort of the results is an indexed field, shards get sorted +// based on minimum and maximum value that they hold for that field. Partial +// results become available following the sort criteria that was requested. +// +// Warning: Asynchronous search does not support scroll or search requests that +// include only the suggest section. +// +// By default, Elasticsearch does not allow you to store an async search +// response larger than 10Mb and an attempt to do this results in an error. +// The maximum allowed size for a stored async search response can be set by +// changing the `search.max_async_search_response_size` cluster level setting. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit +func (p *MethodAsyncSearch) Submit() *async_search_submit.Submit { + _submit := async_search_submit.NewSubmitFunc(p.tp) + return _submit() +} + +// Delete an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy +func (p *MethodAutoscaling) DeleteAutoscalingPolicy(name string) *autoscaling_delete_autoscaling_policy.DeleteAutoscalingPolicy { + _deleteautoscalingpolicy := autoscaling_delete_autoscaling_policy.NewDeleteAutoscalingPolicyFunc(p.tp) + return _deleteautoscalingpolicy(name) +} + +// Get the autoscaling capacity. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// This API gets the current autoscaling capacity based on the configured +// autoscaling policy. +// It will return information to size the cluster appropriately to the current +// workload. +// +// The `required_capacity` is calculated as the maximum of the +// `required_capacity` result of all individual deciders that are enabled for +// the policy. +// +// The operator should verify that the `current_nodes` match the operator’s +// knowledge of the cluster to avoid making autoscaling decisions based on stale +// or incomplete information. +// +// The response contains decider-specific information you can use to diagnose +// how and why autoscaling determined a certain capacity was required. +// This information is provided for diagnosis only. +// Do not use this information to make autoscaling decisions. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity +func (p *MethodAutoscaling) GetAutoscalingCapacity() *autoscaling_get_autoscaling_capacity.GetAutoscalingCapacity { + _getautoscalingcapacity := autoscaling_get_autoscaling_capacity.NewGetAutoscalingCapacityFunc(p.tp) + return _getautoscalingcapacity() +} + +// Get an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity +func (p *MethodAutoscaling) GetAutoscalingPolicy(name string) *autoscaling_get_autoscaling_policy.GetAutoscalingPolicy { + _getautoscalingpolicy := autoscaling_get_autoscaling_policy.NewGetAutoscalingPolicyFunc(p.tp) + return _getautoscalingpolicy(name) +} + +// Create or update an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy +func (p *MethodAutoscaling) PutAutoscalingPolicy(name string) *autoscaling_put_autoscaling_policy.PutAutoscalingPolicy { + _putautoscalingpolicy := autoscaling_put_autoscaling_policy.NewPutAutoscalingPolicyFunc(p.tp) + return _putautoscalingpolicy(name) +} + +// Checks if the specified combination of method, API, parameters, and arbitrary +// capabilities are supported +// https://github.com/elastic/elasticsearch/blob/main/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc#require-or-skip-api-capabilities +func (p *MethodCapabilities) Capabilities() *capabilities.Capabilities { + _capabilities := capabilities.NewCapabilitiesFunc(p.tp) + return _capabilities() +} + +// Get aliases. +// +// Get the cluster's index aliases, including filter and routing information. +// This API does not return data stream aliases. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or the Kibana console. They are not intended for use by applications. +// For application consumption, use the aliases API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases +func (p *MethodCat) Aliases() *cat_aliases.Aliases { + _aliases := cat_aliases.NewAliasesFunc(p.tp) + return _aliases() +} + +// Get shard allocation information. +// +// Get a snapshot of the number of shards allocated to each data node and their +// disk space. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation +func (p *MethodCat) Allocation() *cat_allocation.Allocation { + _allocation := cat_allocation.NewAllocationFunc(p.tp) + return _allocation() +} + +// Get component templates. +// +// Get information about component templates in a cluster. +// Component templates are building blocks for constructing index templates that +// specify index mappings, settings, and aliases. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. +// They are not intended for use by applications. For application consumption, +// use the get component template API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates +func (p *MethodCat) ComponentTemplates() *cat_component_templates.ComponentTemplates { + _componenttemplates := cat_component_templates.NewComponentTemplatesFunc(p.tp) + return _componenttemplates() +} + +// Get a document count. +// +// Get quick access to a document count for a data stream, an index, or an +// entire cluster. +// The document count only includes live documents, not deleted documents which +// have not yet been removed by the merge process. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. +// They are not intended for use by applications. For application consumption, +// use the count API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count +func (p *MethodCat) Count() *cat_count.Count { + _count := cat_count.NewCountFunc(p.tp) + return _count() +} + +// Get field data cache information. +// +// Get the amount of heap memory currently used by the field data cache on every +// data node in the cluster. +// +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. +// They are not intended for use by applications. For application consumption, +// use the nodes stats API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata +func (p *MethodCat) Fielddata() *cat_fielddata.Fielddata { + _fielddata := cat_fielddata.NewFielddataFunc(p.tp) + return _fielddata() +} + +// Get the cluster health status. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. +// They are not intended for use by applications. For application consumption, +// use the cluster health API. +// This API is often used to check malfunctioning clusters. +// To help you track cluster health alongside log files and alerting systems, +// the API returns timestamps in two formats: +// `HH:MM:SS`, which is human-readable but includes no date information; +// `Unix epoch time`, which is machine-sortable and includes date information. +// The latter format is useful for cluster recoveries that take multiple days. +// You can use the cat health API to verify cluster health across multiple +// nodes. +// You also can use the API to track the recovery of a large cluster over a +// longer period of time. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health +func (p *MethodCat) Health() *cat_health.Health { + _health := cat_health.NewHealthFunc(p.tp) + return _health() +} + +// Get CAT help. +// +// Get help for the CAT APIs. +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat +func (p *MethodCat) Help() *cat_help.Help { + _help := cat_help.NewHelpFunc(p.tp) + return _help() +} + +// Get index information. +// +// Get high-level information about indices in a cluster, including backing +// indices for data streams. +// +// Use this request to get the following information for each index in a +// cluster: +// - shard count +// - document count +// - deleted document count +// - primary store size +// - total store size of all shards, including shard replicas +// +// These metrics are retrieved directly from Lucene, which Elasticsearch uses +// internally to power indexing and search. As a result, all document counts +// include hidden nested documents. +// To get an accurate count of Elasticsearch documents, use the cat count or +// count APIs. +// +// CAT APIs are only intended for human consumption using the command line or +// Kibana console. +// They are not intended for use by applications. For application consumption, +// use an index endpoint. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices +func (p *MethodCat) Indices() *cat_indices.Indices { + _indices := cat_indices.NewIndicesFunc(p.tp) + return _indices() +} + +// Get master node information. +// +// Get information about the master node, including the ID, bound IP address, +// and name. +// +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master +func (p *MethodCat) Master() *cat_master.Master { + _master := cat_master.NewMasterFunc(p.tp) + return _master() +} + +// Get data frame analytics jobs. +// +// Get configuration and usage information about data frame analytics jobs. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get data frame analytics jobs statistics +// API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics +func (p *MethodCat) MlDataFrameAnalytics() *cat_ml_data_frame_analytics.MlDataFrameAnalytics { + _mldataframeanalytics := cat_ml_data_frame_analytics.NewMlDataFrameAnalyticsFunc(p.tp) + return _mldataframeanalytics() +} + +// Get datafeeds. +// +// Get configuration and usage information about datafeeds. +// This API returns a maximum of 10,000 datafeeds. +// If the Elasticsearch security features are enabled, you must have +// `monitor_ml`, `monitor`, `manage_ml`, or `manage` +// cluster privileges to use this API. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get datafeed statistics API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds +func (p *MethodCat) MlDatafeeds() *cat_ml_datafeeds.MlDatafeeds { + _mldatafeeds := cat_ml_datafeeds.NewMlDatafeedsFunc(p.tp) + return _mldatafeeds() +} + +// Get anomaly detection jobs. +// +// Get configuration and usage information for anomaly detection jobs. +// This API returns a maximum of 10,000 jobs. +// If the Elasticsearch security features are enabled, you must have +// `monitor_ml`, +// `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get anomaly detection job statistics API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs +func (p *MethodCat) MlJobs() *cat_ml_jobs.MlJobs { + _mljobs := cat_ml_jobs.NewMlJobsFunc(p.tp) + return _mljobs() +} + +// Get trained models. +// +// Get configuration and usage information about inference trained models. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get trained models statistics API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models +func (p *MethodCat) MlTrainedModels() *cat_ml_trained_models.MlTrainedModels { + _mltrainedmodels := cat_ml_trained_models.NewMlTrainedModelsFunc(p.tp) + return _mltrainedmodels() +} + +// Get node attribute information. +// +// Get information about custom node attributes. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs +func (p *MethodCat) Nodeattrs() *cat_nodeattrs.Nodeattrs { + _nodeattrs := cat_nodeattrs.NewNodeattrsFunc(p.tp) + return _nodeattrs() +} + +// Get node information. +// +// Get information about the nodes in a cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes +func (p *MethodCat) Nodes() *cat_nodes.Nodes { + _nodes := cat_nodes.NewNodesFunc(p.tp) + return _nodes() +} + +// Get pending task information. +// +// Get information about cluster-level changes that have not yet taken effect. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the pending cluster tasks API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks +func (p *MethodCat) PendingTasks() *cat_pending_tasks.PendingTasks { + _pendingtasks := cat_pending_tasks.NewPendingTasksFunc(p.tp) + return _pendingtasks() +} + +// Get plugin information. +// +// Get a list of plugins running on each node of a cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins +func (p *MethodCat) Plugins() *cat_plugins.Plugins { + _plugins := cat_plugins.NewPluginsFunc(p.tp) + return _plugins() +} + +// Get shard recovery information. +// +// Get information about ongoing and completed shard recoveries. +// Shard recovery is the process of initializing a shard copy, such as restoring +// a primary shard from a snapshot or syncing a replica shard from a primary +// shard. When a shard recovery completes, the recovered shard is available for +// search and indexing. +// For data streams, the API returns information about the stream’s backing +// indices. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the index recovery API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery +func (p *MethodCat) Recovery() *cat_recovery.Recovery { + _recovery := cat_recovery.NewRecoveryFunc(p.tp) + return _recovery() +} + +// Get snapshot repository information. +// +// Get a list of snapshot repositories for a cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the get snapshot repository API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories +func (p *MethodCat) Repositories() *cat_repositories.Repositories { + _repositories := cat_repositories.NewRepositoriesFunc(p.tp) + return _repositories() +} + +// Get segment information. +// +// Get low-level information about the Lucene segments in index shards. +// For data streams, the API returns information about the backing indices. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the index segments API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments +func (p *MethodCat) Segments() *cat_segments.Segments { + _segments := cat_segments.NewSegmentsFunc(p.tp) + return _segments() +} + +// Get shard information. +// +// Get information about the shards in a cluster. +// For data streams, the API returns information about the backing indices. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards +func (p *MethodCat) Shards() *cat_shards.Shards { + _shards := cat_shards.NewShardsFunc(p.tp) + return _shards() +} + +// Get snapshot information. +// +// Get information about the snapshots stored in one or more repositories. +// A snapshot is a backup of an index or running Elasticsearch cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the get snapshot API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots +func (p *MethodCat) Snapshots() *cat_snapshots.Snapshots { + _snapshots := cat_snapshots.NewSnapshotsFunc(p.tp) + return _snapshots() +} + +// Get task information. +// +// Get information about tasks currently running in the cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the task management API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks +func (p *MethodCat) Tasks() *cat_tasks.Tasks { + _tasks := cat_tasks.NewTasksFunc(p.tp) + return _tasks() +} + +// Get index template information. +// +// Get information about the index templates in a cluster. +// You can use index templates to apply index settings and field mappings to new +// indices at creation. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the get index template API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates +func (p *MethodCat) Templates() *cat_templates.Templates { + _templates := cat_templates.NewTemplatesFunc(p.tp) + return _templates() +} + +// Get thread pool statistics. +// +// Get thread pool statistics for each node in a cluster. +// Returned information includes all built-in thread pools and custom thread +// pools. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool +func (p *MethodCat) ThreadPool() *cat_thread_pool.ThreadPool { + _threadpool := cat_thread_pool.NewThreadPoolFunc(p.tp) + return _threadpool() +} + +// Get transform information. +// +// Get configuration and usage information about transforms. +// +// CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get transform statistics API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms +func (p *MethodCat) Transforms() *cat_transforms.Transforms { + _transforms := cat_transforms.NewTransformsFunc(p.tp) + return _transforms() +} + +// Delete auto-follow patterns. +// +// Delete a collection of cross-cluster replication auto-follow patterns. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern +func (p *MethodCcr) DeleteAutoFollowPattern(name string) *ccr_delete_auto_follow_pattern.DeleteAutoFollowPattern { + _deleteautofollowpattern := ccr_delete_auto_follow_pattern.NewDeleteAutoFollowPatternFunc(p.tp) + return _deleteautofollowpattern(name) +} + +// Create a follower. +// Create a cross-cluster replication follower index that follows a specific +// leader index. +// When the API returns, the follower index exists and cross-cluster replication +// starts replicating operations from the leader index to the follower index. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow +func (p *MethodCcr) Follow(index string) *ccr_follow.Follow { + _follow := ccr_follow.NewFollowFunc(p.tp) + return _follow(index) +} + +// Get follower information. +// +// Get information about all cross-cluster replication follower indices. +// For example, the results include follower index names, leader index names, +// replication options, and whether the follower indices are active or paused. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info +func (p *MethodCcr) FollowInfo(index string) *ccr_follow_info.FollowInfo { + _followinfo := ccr_follow_info.NewFollowInfoFunc(p.tp) + return _followinfo(index) +} + +// Get follower stats. +// +// Get cross-cluster replication follower stats. +// The API returns shard-level stats about the "following tasks" associated with +// each shard for the specified indices. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats +func (p *MethodCcr) FollowStats(index string) *ccr_follow_stats.FollowStats { + _followstats := ccr_follow_stats.NewFollowStatsFunc(p.tp) + return _followstats(index) +} + +// Forget a follower. +// Remove the cross-cluster replication follower retention leases from the +// leader. +// +// A following index takes out retention leases on its leader index. +// These leases are used to increase the likelihood that the shards of the +// leader index retain the history of operations that the shards of the +// following index need to run replication. +// When a follower index is converted to a regular index by the unfollow API +// (either by directly calling the API or by index lifecycle management tasks), +// these leases are removed. +// However, removal of the leases can fail, for example when the remote cluster +// containing the leader index is unavailable. +// While the leases will eventually expire on their own, their extended +// existence can cause the leader index to hold more history than necessary and +// prevent index lifecycle management from performing some operations on the +// leader index. +// This API exists to enable manually removing the leases when the unfollow API +// is unable to do so. +// +// NOTE: This API does not stop replication by a following index. If you use +// this API with a follower index that is still actively following, the +// following index will add back retention leases on the leader. +// The only purpose of this API is to handle the case of failure to remove the +// following retention leases after the unfollow API is invoked. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower +func (p *MethodCcr) ForgetFollower(index string) *ccr_forget_follower.ForgetFollower { + _forgetfollower := ccr_forget_follower.NewForgetFollowerFunc(p.tp) + return _forgetfollower(index) +} + +// Get auto-follow patterns. +// +// Get cross-cluster replication auto-follow patterns. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1 +func (p *MethodCcr) GetAutoFollowPattern() *ccr_get_auto_follow_pattern.GetAutoFollowPattern { + _getautofollowpattern := ccr_get_auto_follow_pattern.NewGetAutoFollowPatternFunc(p.tp) + return _getautofollowpattern() +} + +// Pause an auto-follow pattern. +// +// Pause a cross-cluster replication auto-follow pattern. +// When the API returns, the auto-follow pattern is inactive. +// New indices that are created on the remote cluster and match the auto-follow +// patterns are ignored. +// +// You can resume auto-following with the resume auto-follow pattern API. +// When it resumes, the auto-follow pattern is active again and automatically +// configures follower indices for newly created indices on the remote cluster +// that match its patterns. +// Remote indices that were created while the pattern was paused will also be +// followed, unless they have been deleted or closed in the interim. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern +func (p *MethodCcr) PauseAutoFollowPattern(name string) *ccr_pause_auto_follow_pattern.PauseAutoFollowPattern { + _pauseautofollowpattern := ccr_pause_auto_follow_pattern.NewPauseAutoFollowPatternFunc(p.tp) + return _pauseautofollowpattern(name) +} + +// Pause a follower. +// +// Pause a cross-cluster replication follower index. +// The follower index will not fetch any additional operations from the leader +// index. +// You can resume following with the resume follower API. +// You can pause and resume a follower index to change the configuration of the +// following task. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow +func (p *MethodCcr) PauseFollow(index string) *ccr_pause_follow.PauseFollow { + _pausefollow := ccr_pause_follow.NewPauseFollowFunc(p.tp) + return _pausefollow(index) +} + +// Create or update auto-follow patterns. +// Create a collection of cross-cluster replication auto-follow patterns for a +// remote cluster. +// Newly created indices on the remote cluster that match any of the patterns +// are automatically configured as follower indices. +// Indices on the remote cluster that were created before the auto-follow +// pattern was created will not be auto-followed even if they match the pattern. +// +// This API can also be used to update auto-follow patterns. +// NOTE: Follower indices that were configured automatically before updating an +// auto-follow pattern will remain unchanged even if they do not match against +// the new patterns. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern +func (p *MethodCcr) PutAutoFollowPattern(name string) *ccr_put_auto_follow_pattern.PutAutoFollowPattern { + _putautofollowpattern := ccr_put_auto_follow_pattern.NewPutAutoFollowPatternFunc(p.tp) + return _putautofollowpattern(name) +} + +// Resume an auto-follow pattern. +// +// Resume a cross-cluster replication auto-follow pattern that was paused. +// The auto-follow pattern will resume configuring following indices for newly +// created indices that match its patterns on the remote cluster. +// Remote indices created while the pattern was paused will also be followed +// unless they have been deleted or closed in the interim. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern +func (p *MethodCcr) ResumeAutoFollowPattern(name string) *ccr_resume_auto_follow_pattern.ResumeAutoFollowPattern { + _resumeautofollowpattern := ccr_resume_auto_follow_pattern.NewResumeAutoFollowPatternFunc(p.tp) + return _resumeautofollowpattern(name) +} + +// Resume a follower. +// Resume a cross-cluster replication follower index that was paused. +// The follower index could have been paused with the pause follower API. +// Alternatively it could be paused due to replication that cannot be retried +// due to failures during following tasks. +// When this API returns, the follower index will resume fetching operations +// from the leader index. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow +func (p *MethodCcr) ResumeFollow(index string) *ccr_resume_follow.ResumeFollow { + _resumefollow := ccr_resume_follow.NewResumeFollowFunc(p.tp) + return _resumefollow(index) +} + +// Get cross-cluster replication stats. +// +// This API returns stats about auto-following and the same shard-level stats as +// the get follower stats API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats +func (p *MethodCcr) Stats() *ccr_stats.Stats { + _stats := ccr_stats.NewStatsFunc(p.tp) + return _stats() +} + +// Unfollow an index. +// +// Convert a cross-cluster replication follower index to a regular index. +// The API stops the following task associated with a follower index and removes +// index metadata and settings associated with cross-cluster replication. +// The follower index must be paused and closed before you call the unfollow +// API. +// +// > info +// > Currently cross-cluster replication does not support converting an existing +// regular index to a follower index. Converting a follower index to a regular +// index is an irreversible operation. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow +func (p *MethodCcr) Unfollow(index string) *ccr_unfollow.Unfollow { + _unfollow := ccr_unfollow.NewUnfollowFunc(p.tp) + return _unfollow(index) +} + +// Explain the shard allocations. +// Get explanations for shard allocations in the cluster. +// For unassigned shards, it provides an explanation for why the shard is +// unassigned. +// For assigned shards, it provides an explanation for why the shard is +// remaining on its current node and has not moved or rebalanced to another +// node. +// This API can be very useful when attempting to diagnose why a shard is +// unassigned or why a shard continues to remain on its current node when you +// might expect otherwise. +// Refer to the linked documentation for examples of how to troubleshoot +// allocation issues using this API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain +func (p *MethodCluster) AllocationExplain() *cluster_allocation_explain.AllocationExplain { + _allocationexplain := cluster_allocation_explain.NewAllocationExplainFunc(p.tp) + return _allocationexplain() +} + +// Delete component templates. +// Component templates are building blocks for constructing index templates that +// specify index mappings, settings, and aliases. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template +func (p *MethodCluster) DeleteComponentTemplate(name string) *cluster_delete_component_template.DeleteComponentTemplate { + _deletecomponenttemplate := cluster_delete_component_template.NewDeleteComponentTemplateFunc(p.tp) + return _deletecomponenttemplate(name) +} + +// Clear cluster voting config exclusions. +// Remove master-eligible nodes from the voting configuration exclusion list. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions +func (p *MethodCluster) DeleteVotingConfigExclusions() *cluster_delete_voting_config_exclusions.DeleteVotingConfigExclusions { + _deletevotingconfigexclusions := cluster_delete_voting_config_exclusions.NewDeleteVotingConfigExclusionsFunc(p.tp) + return _deletevotingconfigexclusions() +} + +// Check component templates. +// Returns information about whether a particular component template exists. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template +func (p *MethodCluster) ExistsComponentTemplate(name string) *cluster_exists_component_template.ExistsComponentTemplate { + _existscomponenttemplate := cluster_exists_component_template.NewExistsComponentTemplateFunc(p.tp) + return _existscomponenttemplate(name) +} + +// Get component templates. +// Get information about component templates. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template +func (p *MethodCluster) GetComponentTemplate() *cluster_get_component_template.GetComponentTemplate { + _getcomponenttemplate := cluster_get_component_template.NewGetComponentTemplateFunc(p.tp) + return _getcomponenttemplate() +} + +// Get cluster-wide settings. +// +// By default, it returns only settings that have been explicitly defined. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings +func (p *MethodCluster) GetSettings() *cluster_get_settings.GetSettings { + _getsettings := cluster_get_settings.NewGetSettingsFunc(p.tp) + return _getsettings() +} + +// Get the cluster health status. +// +// You can also use the API to get the health status of only specified data +// streams and indices. +// For data streams, the API retrieves the health status of the stream’s backing +// indices. +// +// The cluster health status is: green, yellow or red. +// On the shard level, a red status indicates that the specific shard is not +// allocated in the cluster. Yellow means that the primary shard is allocated +// but replicas are not. Green means that all shards are allocated. +// The index level status is controlled by the worst shard status. +// +// One of the main benefits of the API is the ability to wait until the cluster +// reaches a certain high watermark health level. +// The cluster status is controlled by the worst index status. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health +func (p *MethodCluster) Health() *cluster_health.Health { + _health := cluster_health.NewHealthFunc(p.tp) + return _health() +} + +// Get cluster info. +// Returns basic information about the cluster. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info +func (p *MethodCluster) Info(target string) *cluster_info.Info { + _info := cluster_info.NewInfoFunc(p.tp) + return _info(target) +} + +// Get the pending cluster tasks. +// Get information about cluster-level changes (such as create index, update +// mapping, allocate or fail shard) that have not yet taken effect. +// +// NOTE: This API returns a list of any pending updates to the cluster state. +// These are distinct from the tasks reported by the task management API which +// include periodic tasks and tasks initiated by the user, such as node stats, +// search queries, or create index requests. +// However, if a user-initiated task such as a create index command causes a +// cluster state update, the activity of this task might be reported by both +// task api and pending cluster tasks API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks +func (p *MethodCluster) PendingTasks() *cluster_pending_tasks.PendingTasks { + _pendingtasks := cluster_pending_tasks.NewPendingTasksFunc(p.tp) + return _pendingtasks() +} + +// Update voting configuration exclusions. +// Update the cluster voting config exclusions by node IDs or node names. +// By default, if there are more than three master-eligible nodes in the cluster +// and you remove fewer than half of the master-eligible nodes in the cluster at +// once, the voting configuration automatically shrinks. +// If you want to shrink the voting configuration to contain fewer than three +// nodes or to remove half or more of the master-eligible nodes in the cluster +// at once, use this API to remove departing nodes from the voting configuration +// manually. +// The API adds an entry for each specified node to the cluster’s voting +// configuration exclusions list. +// It then waits until the cluster has reconfigured its voting configuration to +// exclude the specified nodes. +// +// Clusters should have no voting configuration exclusions in normal operation. +// Once the excluded nodes have stopped, clear the voting configuration +// exclusions with `DELETE /_cluster/voting_config_exclusions`. +// This API waits for the nodes to be fully removed from the cluster before it +// returns. +// If your cluster has voting configuration exclusions for nodes that you no +// longer intend to remove, use `DELETE +// /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the +// voting configuration exclusions without waiting for the nodes to leave the +// cluster. +// +// A response to `POST /_cluster/voting_config_exclusions` with an HTTP status +// code of 200 OK guarantees that the node has been removed from the voting +// configuration and will not be reinstated until the voting configuration +// exclusions are cleared by calling `DELETE +// /_cluster/voting_config_exclusions`. +// If the call to `POST /_cluster/voting_config_exclusions` fails or returns a +// response with an HTTP status code other than 200 OK then the node may not +// have been removed from the voting configuration. +// In that case, you may safely retry the call. +// +// NOTE: Voting exclusions are required only when you remove at least half of +// the master-eligible nodes from a cluster in a short time period. +// They are not required when removing master-ineligible nodes or when removing +// fewer than half of the master-eligible nodes. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions +func (p *MethodCluster) PostVotingConfigExclusions() *cluster_post_voting_config_exclusions.PostVotingConfigExclusions { + _postvotingconfigexclusions := cluster_post_voting_config_exclusions.NewPostVotingConfigExclusionsFunc(p.tp) + return _postvotingconfigexclusions() +} + +// Create or update a component template. +// Component templates are building blocks for constructing index templates that +// specify index mappings, settings, and aliases. +// +// An index template can be composed of multiple component templates. +// To use a component template, specify it in an index template’s `composed_of` +// list. +// Component templates are only applied to new data streams and indices as part +// of a matching index template. +// +// Settings and mappings specified directly in the index template or the create +// index request override any settings or mappings specified in a component +// template. +// +// Component templates are only used during index creation. +// For data streams, this includes data stream creation and the creation of a +// stream’s backing indices. +// Changes to component templates do not affect existing indices, including a +// stream’s backing indices. +// +// You can use C-style `/* *\/` block comments in component templates. +// You can include comments anywhere in the request body except before the +// opening curly bracket. +// +// **Applying component templates** +// +// You cannot directly apply a component template to a data stream or index. +// To be applied, a component template must be included in an index template's +// `composed_of` list. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template +func (p *MethodCluster) PutComponentTemplate(name string) *cluster_put_component_template.PutComponentTemplate { + _putcomponenttemplate := cluster_put_component_template.NewPutComponentTemplateFunc(p.tp) + return _putcomponenttemplate(name) +} + +// Update the cluster settings. +// +// Configure and update dynamic settings on a running cluster. +// You can also configure dynamic settings locally on an unstarted or shut down +// node in `elasticsearch.yml`. +// +// Updates made with this API can be persistent, which apply across cluster +// restarts, or transient, which reset after a cluster restart. +// You can also reset transient or persistent settings by assigning them a null +// value. +// +// If you configure the same setting using multiple methods, Elasticsearch +// applies the settings in following order of precedence: 1) Transient setting; +// 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting +// value. +// For example, you can apply a transient setting to override a persistent +// setting or `elasticsearch.yml` setting. +// However, a change to an `elasticsearch.yml` setting will not override a +// defined transient or persistent setting. +// +// TIP: In Elastic Cloud, use the user settings feature to configure all cluster +// settings. This method automatically rejects unsafe settings that could break +// your cluster. +// If you run Elasticsearch on your own hardware, use this API to configure +// dynamic cluster settings. +// Only use `elasticsearch.yml` for static cluster settings and node settings. +// The API doesn’t require a restart and ensures a setting’s value is the same +// on all nodes. +// +// WARNING: Transient cluster settings are no longer recommended. Use persistent +// cluster settings instead. +// If a cluster becomes unstable, transient settings can clear unexpectedly, +// resulting in a potentially undesired cluster configuration. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings +func (p *MethodCluster) PutSettings() *cluster_put_settings.PutSettings { + _putsettings := cluster_put_settings.NewPutSettingsFunc(p.tp) + return _putsettings() +} + +// Get remote cluster information. +// +// Get information about configured remote clusters. +// The API returns connection and endpoint information keyed by the configured +// remote cluster alias. +// +// > info +// > This API returns information that reflects current state on the local +// cluster. +// > The `connected` field does not necessarily reflect whether a remote cluster +// is down or unavailable, only whether there is currently an open connection to +// it. +// > Elasticsearch does not spontaneously try to reconnect to a disconnected +// remote cluster. +// > To trigger a reconnection, attempt a cross-cluster search, ES|QL +// cross-cluster search, or try the [resolve cluster +// endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info +func (p *MethodCluster) RemoteInfo() *cluster_remote_info.RemoteInfo { + _remoteinfo := cluster_remote_info.NewRemoteInfoFunc(p.tp) + return _remoteinfo() +} + +// Reroute the cluster. +// Manually change the allocation of individual shards in the cluster. +// For example, a shard can be moved from one node to another explicitly, an +// allocation can be canceled, and an unassigned shard can be explicitly +// allocated to a specific node. +// +// It is important to note that after processing any reroute commands +// Elasticsearch will perform rebalancing as normal (respecting the values of +// settings such as `cluster.routing.rebalance.enable`) in order to remain in a +// balanced state. +// For example, if the requested allocation includes moving a shard from node1 +// to node2 then this may cause a shard to be moved from node2 back to node1 to +// even things out. +// +// The cluster can be set to disable allocations using the +// `cluster.routing.allocation.enable` setting. +// If allocations are disabled then the only allocations that will be performed +// are explicit ones given using the reroute command, and consequent allocations +// due to rebalancing. +// +// The cluster will attempt to allocate a shard a maximum of +// `index.allocation.max_retries` times in a row (defaults to `5`), before +// giving up and leaving the shard unallocated. +// This scenario can be caused by structural problems such as having an analyzer +// which refers to a stopwords file which doesn’t exist on all nodes. +// +// Once the problem has been corrected, allocation can be manually retried by +// calling the reroute API with the `?retry_failed` URI query parameter, which +// will attempt a single retry round for these shards. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute +func (p *MethodCluster) Reroute() *cluster_reroute.Reroute { + _reroute := cluster_reroute.NewRerouteFunc(p.tp) + return _reroute() +} + +// Get the cluster state. +// Get comprehensive information about the state of the cluster. +// +// The cluster state is an internal data structure which keeps track of a +// variety of information needed by every node, including the identity and +// attributes of the other nodes in the cluster; cluster-wide settings; index +// metadata, including the mapping and settings for each index; the location and +// status of every shard copy in the cluster. +// +// The elected master node ensures that every node in the cluster has a copy of +// the same cluster state. +// This API lets you retrieve a representation of this internal state for +// debugging or diagnostic purposes. +// You may need to consult the Elasticsearch source code to determine the +// precise meaning of the response. +// +// By default the API will route requests to the elected master node since this +// node is the authoritative source of cluster states. +// You can also retrieve the cluster state held on the node handling the API +// request by adding the `?local=true` query parameter. +// +// Elasticsearch may need to expend significant effort to compute a response to +// this API in larger clusters, and the response may comprise a very large +// quantity of data. +// If you use this API repeatedly, your cluster may become unstable. +// +// WARNING: The response is a representation of an internal data structure. +// Its format is not subject to the same compatibility guarantees as other more +// stable APIs and may change from version to version. +// Do not query this API using external monitoring tools. +// Instead, obtain the information you require using other more stable cluster +// APIs. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state +func (p *MethodCluster) State() *cluster_state.State { + _state := cluster_state.NewStateFunc(p.tp) + return _state() +} + +// Get cluster statistics. +// Get basic index metrics (shard numbers, store size, memory usage) and +// information about the current nodes that form the cluster (number, roles, os, +// jvm versions, memory usage, cpu and installed plugins). +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats +func (p *MethodCluster) Stats() *cluster_stats.Stats { + _stats := cluster_stats.NewStatsFunc(p.tp) + return _stats() +} + +// Check in a connector. +// +// Update the `last_seen` field in the connector and set it to the current +// timestamp. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in +func (p *MethodConnector) CheckIn(connectorid string) *connector_check_in.CheckIn { + _checkin := connector_check_in.NewCheckInFunc(p.tp) + return _checkin(connectorid) +} + +// Delete a connector. +// +// Removes a connector and associated sync jobs. +// This is a destructive action that is not recoverable. +// NOTE: This action doesn’t delete any API keys, ingest pipelines, or data +// indices associated with the connector. +// These need to be removed manually. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete +func (p *MethodConnector) Delete(connectorid string) *connector_delete.Delete { + _delete := connector_delete.NewDeleteFunc(p.tp) + return _delete(connectorid) +} + +// Get a connector. +// +// Get the details about a connector. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get +func (p *MethodConnector) Get(connectorid string) *connector_get.Get { + _get := connector_get.NewGetFunc(p.tp) + return _get(connectorid) +} + +// Update the connector last sync stats. +// +// Update the fields related to the last sync of a connector. +// This action is used for analytics and monitoring. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-last-sync +func (p *MethodConnector) LastSync(connectorid string) *connector_last_sync.LastSync { + _lastsync := connector_last_sync.NewLastSyncFunc(p.tp) + return _lastsync(connectorid) +} + +// Get all connectors. +// +// Get information about all connectors. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list +func (p *MethodConnector) List() *connector_list.List { + _list := connector_list.NewListFunc(p.tp) + return _list() +} + +// Create a connector. +// +// Connectors are Elasticsearch integrations that bring content from third-party +// data sources, which can be deployed on Elastic Cloud or hosted on your own +// infrastructure. +// Elastic managed connectors (Native connectors) are a managed service on +// Elastic Cloud. +// Self-managed connectors (Connector clients) are self-managed on your +// infrastructure. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put +func (p *MethodConnector) Post() *connector_post.Post { + _post := connector_post.NewPostFunc(p.tp) + return _post() +} + +// Create or update a connector. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put +func (p *MethodConnector) Put() *connector_put.Put { + _put := connector_put.NewPutFunc(p.tp) + return _put() +} + +// Creates a secret for a Connector. +func (p *MethodConnector) SecretPost() *connector_secret_post.SecretPost { + _secretpost := connector_secret_post.NewSecretPostFunc(p.tp) + return _secretpost() +} + +// Cancel a connector sync job. +// +// Cancel a connector sync job, which sets the status to cancelling and updates +// `cancellation_requested_at` to the current time. +// The connector service is then responsible for setting the status of connector +// sync jobs to cancelled. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel +func (p *MethodConnector) SyncJobCancel(connectorsyncjobid string) *connector_sync_job_cancel.SyncJobCancel { + _syncjobcancel := connector_sync_job_cancel.NewSyncJobCancelFunc(p.tp) + return _syncjobcancel(connectorsyncjobid) +} + +// Check in a connector sync job. +// Check in a connector sync job and set the `last_seen` field to the current +// time before updating it in the internal index. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in +func (p *MethodConnector) SyncJobCheckIn(connectorsyncjobid string) *connector_sync_job_check_in.SyncJobCheckIn { + _syncjobcheckin := connector_sync_job_check_in.NewSyncJobCheckInFunc(p.tp) + return _syncjobcheckin(connectorsyncjobid) +} + +// Claim a connector sync job. +// This action updates the job status to `in_progress` and sets the `last_seen` +// and `started_at` timestamps to the current time. +// Additionally, it can set the `sync_cursor` property for the sync job. +// +// This API is not intended for direct connector management by users. +// It supports the implementation of services that utilize the connector +// protocol to communicate with Elasticsearch. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim +func (p *MethodConnector) SyncJobClaim(connectorsyncjobid string) *connector_sync_job_claim.SyncJobClaim { + _syncjobclaim := connector_sync_job_claim.NewSyncJobClaimFunc(p.tp) + return _syncjobclaim(connectorsyncjobid) +} + +// Delete a connector sync job. +// +// Remove a connector sync job and its associated data. +// This is a destructive action that is not recoverable. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete +func (p *MethodConnector) SyncJobDelete(connectorsyncjobid string) *connector_sync_job_delete.SyncJobDelete { + _syncjobdelete := connector_sync_job_delete.NewSyncJobDeleteFunc(p.tp) + return _syncjobdelete(connectorsyncjobid) +} + +// Set a connector sync job error. +// Set the `error` field for a connector sync job and set its `status` to +// `error`. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error +func (p *MethodConnector) SyncJobError(connectorsyncjobid string) *connector_sync_job_error.SyncJobError { + _syncjoberror := connector_sync_job_error.NewSyncJobErrorFunc(p.tp) + return _syncjoberror(connectorsyncjobid) +} + +// Get a connector sync job. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get +func (p *MethodConnector) SyncJobGet(connectorsyncjobid string) *connector_sync_job_get.SyncJobGet { + _syncjobget := connector_sync_job_get.NewSyncJobGetFunc(p.tp) + return _syncjobget(connectorsyncjobid) +} + +// Get all connector sync jobs. +// +// Get information about all stored connector sync jobs listed by their creation +// date in ascending order. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list +func (p *MethodConnector) SyncJobList() *connector_sync_job_list.SyncJobList { + _syncjoblist := connector_sync_job_list.NewSyncJobListFunc(p.tp) + return _syncjoblist() +} + +// Create a connector sync job. +// +// Create a connector sync job document in the internal index and initialize its +// counters and timestamps with default values. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post +func (p *MethodConnector) SyncJobPost() *connector_sync_job_post.SyncJobPost { + _syncjobpost := connector_sync_job_post.NewSyncJobPostFunc(p.tp) + return _syncjobpost() +} + +// Set the connector sync job stats. +// Stats include: `deleted_document_count`, `indexed_document_count`, +// `indexed_document_volume`, and `total_document_count`. +// You can also update `last_seen`. +// This API is mainly used by the connector service for updating sync job +// information. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats +func (p *MethodConnector) SyncJobUpdateStats(connectorsyncjobid string) *connector_sync_job_update_stats.SyncJobUpdateStats { + _syncjobupdatestats := connector_sync_job_update_stats.NewSyncJobUpdateStatsFunc(p.tp) + return _syncjobupdatestats(connectorsyncjobid) +} + +// Activate the connector draft filter. +// +// Activates the valid draft filtering for a connector. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering +func (p *MethodConnector) UpdateActiveFiltering(connectorid string) *connector_update_active_filtering.UpdateActiveFiltering { + _updateactivefiltering := connector_update_active_filtering.NewUpdateActiveFilteringFunc(p.tp) + return _updateactivefiltering(connectorid) +} + +// Update the connector API key ID. +// +// Update the `api_key_id` and `api_key_secret_id` fields of a connector. +// You can specify the ID of the API key used for authorization and the ID of +// the connector secret where the API key is stored. +// The connector secret ID is required only for Elastic managed (native) +// connectors. +// Self-managed connectors (connector clients) do not use this field. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id +func (p *MethodConnector) UpdateApiKeyId(connectorid string) *connector_update_api_key_id.UpdateApiKeyId { + _updateapikeyid := connector_update_api_key_id.NewUpdateApiKeyIdFunc(p.tp) + return _updateapikeyid(connectorid) +} + +// Update the connector configuration. +// +// Update the configuration field in the connector document. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration +func (p *MethodConnector) UpdateConfiguration(connectorid string) *connector_update_configuration.UpdateConfiguration { + _updateconfiguration := connector_update_configuration.NewUpdateConfigurationFunc(p.tp) + return _updateconfiguration(connectorid) +} + +// Update the connector error field. +// +// Set the error field for the connector. +// If the error provided in the request body is non-null, the connector’s status +// is updated to error. +// Otherwise, if the error is reset to null, the connector status is updated to +// connected. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error +func (p *MethodConnector) UpdateError(connectorid string) *connector_update_error.UpdateError { + _updateerror := connector_update_error.NewUpdateErrorFunc(p.tp) + return _updateerror(connectorid) +} + +// Update the connector features. +// Update the connector features in the connector document. +// This API can be used to control the following aspects of a connector: +// +// * document-level security +// * incremental syncs +// * advanced sync rules +// * basic sync rules +// +// Normally, the running connector service automatically manages these features. +// However, you can use this API to override the default behavior. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features +func (p *MethodConnector) UpdateFeatures(connectorid string) *connector_update_features.UpdateFeatures { + _updatefeatures := connector_update_features.NewUpdateFeaturesFunc(p.tp) + return _updatefeatures(connectorid) +} + +// Update the connector filtering. +// +// Update the draft filtering configuration of a connector and marks the draft +// validation state as edited. +// The filtering draft is activated once validated by the running Elastic +// connector service. +// The filtering property is used to configure sync rules (both basic and +// advanced) for a connector. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering +func (p *MethodConnector) UpdateFiltering(connectorid string) *connector_update_filtering.UpdateFiltering { + _updatefiltering := connector_update_filtering.NewUpdateFilteringFunc(p.tp) + return _updatefiltering(connectorid) +} + +// Update the connector draft filtering validation. +// +// Update the draft filtering validation info for a connector. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering-validation +func (p *MethodConnector) UpdateFilteringValidation(connectorid string) *connector_update_filtering_validation.UpdateFilteringValidation { + _updatefilteringvalidation := connector_update_filtering_validation.NewUpdateFilteringValidationFunc(p.tp) + return _updatefilteringvalidation(connectorid) +} + +// Update the connector index name. +// +// Update the `index_name` field of a connector, specifying the index where the +// data ingested by the connector is stored. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name +func (p *MethodConnector) UpdateIndexName(connectorid string) *connector_update_index_name.UpdateIndexName { + _updateindexname := connector_update_index_name.NewUpdateIndexNameFunc(p.tp) + return _updateindexname(connectorid) +} + +// Update the connector name and description. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name +func (p *MethodConnector) UpdateName(connectorid string) *connector_update_name.UpdateName { + _updatename := connector_update_name.NewUpdateNameFunc(p.tp) + return _updatename(connectorid) +} + +// Update the connector is_native flag. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-native +func (p *MethodConnector) UpdateNative(connectorid string) *connector_update_native.UpdateNative { + _updatenative := connector_update_native.NewUpdateNativeFunc(p.tp) + return _updatenative(connectorid) +} + +// Update the connector pipeline. +// +// When you create a new connector, the configuration of an ingest pipeline is +// populated with default settings. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline +func (p *MethodConnector) UpdatePipeline(connectorid string) *connector_update_pipeline.UpdatePipeline { + _updatepipeline := connector_update_pipeline.NewUpdatePipelineFunc(p.tp) + return _updatepipeline(connectorid) +} + +// Update the connector scheduling. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling +func (p *MethodConnector) UpdateScheduling(connectorid string) *connector_update_scheduling.UpdateScheduling { + _updatescheduling := connector_update_scheduling.NewUpdateSchedulingFunc(p.tp) + return _updatescheduling(connectorid) +} + +// Update the connector service type. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type +func (p *MethodConnector) UpdateServiceType(connectorid string) *connector_update_service_type.UpdateServiceType { + _updateservicetype := connector_update_service_type.NewUpdateServiceTypeFunc(p.tp) + return _updateservicetype(connectorid) +} + +// Update the connector status. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status +func (p *MethodConnector) UpdateStatus(connectorid string) *connector_update_status.UpdateStatus { + _updatestatus := connector_update_status.NewUpdateStatusFunc(p.tp) + return _updatestatus(connectorid) +} + +// Bulk index or delete documents. +// Perform multiple `index`, `create`, `delete`, and `update` actions in a +// single request. +// This reduces overhead and can greatly increase indexing speed. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To use the `create` action, you must have the `create_doc`, `create`, +// `index`, or `write` index privilege. Data streams support only the `create` +// action. +// * To use the `index` action, you must have the `create`, `index`, or `write` +// index privilege. +// * To use the `delete` action, you must have the `delete` or `write` index +// privilege. +// * To use the `update` action, you must have the `index` or `write` index +// privilege. +// * To automatically create a data stream or index with a bulk API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// * To make the result of a bulk operation visible to search using the +// `refresh` parameter, you must have the `maintenance` or `manage` index +// privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The actions are specified in the request body using a newline delimited JSON +// (NDJSON) structure: +// +// ``` +// action_and_meta_data\n +// optional_source\n +// action_and_meta_data\n +// optional_source\n +// .... +// action_and_meta_data\n +// optional_source\n +// ``` +// +// The `index` and `create` actions expect a source on the next line and have +// the same semantics as the `op_type` parameter in the standard index API. +// A `create` action fails if a document with the same ID already exists in the +// target +// An `index` action adds or replaces a document as necessary. +// +// NOTE: Data streams support only the `create` action. +// To update or delete a document in a data stream, you must target the backing +// index containing the document. +// +// An `update` action expects that the partial doc, upsert, and script and its +// options are specified on the next line. +// +// A `delete` action does not expect a source on the next line and has the same +// semantics as the standard delete API. +// +// NOTE: The final line of data must end with a newline character (`\n`). +// Each newline character may be preceded by a carriage return (`\r`). +// When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header +// of `application/json` or `application/x-ndjson`. +// Because this format uses literal newline characters (`\n`) as delimiters, +// make sure that the JSON actions and sources are not pretty printed. +// +// If you provide a target in the request path, it is used for any actions that +// don't explicitly specify an `_index` argument. +// +// A note on the format: the idea here is to make processing as fast as +// possible. +// As some of the actions are redirected to other shards on other nodes, only +// `action_meta_data` is parsed on the receiving node side. +// +// Client libraries using this protocol should try and strive to do something +// similar on the client side, and reduce buffering as much as possible. +// +// There is no "correct" number of actions to perform in a single bulk request. +// Experiment with different settings to find the optimal size for your +// particular workload. +// Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by +// default so clients must ensure that no request exceeds this size. +// It is not possible to index a single document that exceeds the size limit, so +// you must pre-process any such documents into smaller pieces before sending +// them to Elasticsearch. +// For instance, split documents into pages or chapters before indexing them, or +// store raw binary data in a system outside Elasticsearch and replace the raw +// data with a link to the external system in the documents that you send to +// Elasticsearch. +// +// **Client suppport for bulk requests** +// +// Some of the officially supported clients provide helpers to assist with bulk +// requests and reindexing: +// +// * Go: Check out `esutil.BulkIndexer` +// * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and +// `Search::Elasticsearch::Client::5_0::Scroll` +// * Python: Check out `elasticsearch.helpers.*` +// * JavaScript: Check out `client.helpers.*` +// * .NET: Check out `BulkAllObservable` +// * PHP: Check out bulk indexing. +// +// **Submitting bulk requests with cURL** +// +// If you're providing text file input to `curl`, you must use the +// `--data-binary` flag instead of plain `-d`. +// The latter doesn't preserve newlines. For example: +// +// ``` +// $ cat requests +// { "index" : { "_index" : "test", "_id" : "1" } } +// { "field1" : "value1" } +// $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk +// --data-binary "@requests"; echo +// {"took":7, "errors": false, +// "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} +// ``` +// +// **Optimistic concurrency control** +// +// Each `index` and `delete` action within a bulk API call may include the +// `if_seq_no` and `if_primary_term` parameters in their respective action and +// meta data lines. +// The `if_seq_no` and `if_primary_term` parameters control how operations are +// run, based on the last modification to existing documents. See Optimistic +// concurrency control for more details. +// +// **Versioning** +// +// Each bulk item can include the version value using the `version` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_version` mapping. +// It also support the `version_type`. +// +// **Routing** +// +// Each bulk item can include the routing value using the `routing` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_routing` mapping. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Wait for active shards** +// +// When making bulk calls, you can set the `wait_for_active_shards` parameter to +// require a minimum number of shard copies to be active before starting to +// process the bulk request. +// +// **Refresh** +// +// Control when the changes made by this request are visible to search. +// +// NOTE: Only the shards that receive the bulk request will be affected by +// refresh. +// Imagine a `_bulk?refresh=wait_for` request with three documents in it that +// happen to be routed to different shards in an index with five shards. +// The request will only wait for those three shards to refresh. +// The other two shards that make up the index do not participate in the `_bulk` +// request at all. +// +// You might want to disable the refresh interval temporarily to improve +// indexing throughput for large bulk requests. +// Refer to the linked documentation for step-by-step instructions using the +// index settings API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk +func (p *MethodCore) Bulk() *core_bulk.Bulk { + _bulk := core_bulk.NewBulkFunc(p.tp) + return _bulk() +} + +// Clear a scrolling search. +// Clear the search context and results for a scrolling search. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll +func (p *MethodCore) ClearScroll() *core_clear_scroll.ClearScroll { + _clearscroll := core_clear_scroll.NewClearScrollFunc(p.tp) + return _clearscroll() +} + +// Close a point in time. +// A point in time must be opened explicitly before being used in search +// requests. +// The `keep_alive` parameter tells Elasticsearch how long it should persist. +// A point in time is automatically closed when the `keep_alive` period has +// elapsed. +// However, keeping points in time has a cost; close them as soon as they are no +// longer required for search requests. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time +func (p *MethodCore) ClosePointInTime() *core_close_point_in_time.ClosePointInTime { + _closepointintime := core_close_point_in_time.NewClosePointInTimeFunc(p.tp) + return _closepointintime() +} + +// Count search results. +// Get the number of documents matching a query. +// +// The query can be provided either by using a simple query string as a +// parameter, or by defining Query DSL within the request body. +// The query is optional. When no query is provided, the API uses `match_all` to +// count all the documents. +// +// The count API supports multi-target syntax. You can run a single count API +// search across multiple data streams and indices. +// +// The operation is broadcast across all shards. +// For each shard ID group, a replica is chosen and the search is run against +// it. +// This means that replicas increase the scalability of the count. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count +func (p *MethodCore) Count() *core_count.Count { + _count := core_count.NewCountFunc(p.tp) + return _count() +} + +// Create a new document in the index. +// +// You can index a new JSON document with the `//_doc/` or +// `//_create/<_id>` APIs +// Using `_create` guarantees that the document is indexed only if it does not +// already exist. +// It returns a 409 response when a document with a same ID already exists in +// the index. +// To update an existing document, you must use the `//_doc/` API. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add a document using the `PUT //_create/<_id>` or `POST +// //_create/<_id>` request formats, you must have the `create_doc`, +// `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create +func (p *MethodCore) Create(index, id string) *core_create.Create { + _create := core_create.NewCreateFunc(p.tp) + return _create(index, id) +} + +// Delete a document. +// +// Remove a JSON document from the specified index. +// +// NOTE: You cannot send deletion requests directly to a data stream. +// To delete a document in a data stream, you must target the backing index +// containing the document. +// +// **Optimistic concurrency control** +// +// Delete operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Versioning** +// +// Each document indexed is versioned. +// When deleting a document, the version can be specified to make sure the +// relevant document you are trying to delete is actually being deleted and it +// has not changed in the meantime. +// Every write operation run on a document, deletes included, causes its version +// to be incremented. +// The version number of a deleted document remains available for a short time +// after deletion to allow for control of concurrent operations. +// The length of time for which a deleted document's version remains available +// is determined by the `index.gc_deletes` index setting. +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to delete a document. +// +// If the `_routing` mapping is set to `required` and no routing value is +// specified, the delete API throws a `RoutingMissingException` and rejects the +// request. +// +// For example: +// +// ``` +// DELETE /my-index-000001/_doc/1?routing=shard-1 +// ``` +// +// This request deletes the document with ID 1, but it is routed based on the +// user. +// The document is not deleted if the correct routing is not specified. +// +// **Distributed** +// +// The delete operation gets hashed into a specific shard ID. +// It then gets redirected into the primary shard within that ID group and +// replicated (if needed) to shard replicas within that ID group. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete +func (p *MethodCore) Delete(index, id string) *core_delete.Delete { + _delete := core_delete.NewDeleteFunc(p.tp) + return _delete(index, id) +} + +// Delete documents. +// +// Deletes documents that match the specified query. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `delete` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// When you submit a delete by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and deletes +// matching documents using internal versioning. +// If a document changes between the time that the snapshot is taken and the +// delete operation is processed, it results in a version conflict and the +// delete operation fails. +// +// NOTE: Documents with a version equal to 0 cannot be deleted using delete by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing a delete by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents to delete. +// A bulk delete request is performed for each batch of matching documents. +// If a search or bulk request is rejected, the requests are retried up to 10 +// times, with exponential back off. +// If the maximum retry limit is reached, processing halts and all failed +// requests are returned in the response. +// Any delete requests that completed successfully still stick, they are not +// rolled back. +// +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts the operation could attempt +// to delete more documents from the source than `max_docs` until it has +// successfully deleted `max_docs documents`, or it has gone through every +// document in the source query. +// +// **Throttling delete requests** +// +// To control the rate at which delete by query issues batches of delete +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to disable throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is `1000`, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single `_bulk` request, large batch sizes +// cause Elasticsearch to create many requests and wait before starting the next +// set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Delete by query supports sliced scroll to parallelize the delete process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` lets Elasticsearch choose the number of slices to +// use. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// Adding slices to the delete by query operation creates sub-requests which +// means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with slices only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with `slices` will cancel each sub-request. +// * Due to the nature of `slices` each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// `slices` are distributed proportionally to each sub-request. Combine that +// with the earlier point about distribution being uneven and you should +// conclude that using `max_docs` with `slices` might not result in exactly +// `max_docs` documents being deleted. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many `slices` hurts +// performance. Setting `slices` higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Delete performance scales linearly across available resources with the +// number of slices. +// +// Whether query or delete performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Cancel a delete by query operation** +// +// Any delete by query can be canceled using the task cancel API. For example: +// +// ``` +// POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +// ``` +// +// The task ID can be found by using the get tasks API. +// +// Cancellation should happen quickly but might take a few seconds. +// The get task status API will continue to list the delete by query task until +// this task checks that it has been cancelled and terminates itself. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query +func (p *MethodCore) DeleteByQuery(index string) *core_delete_by_query.DeleteByQuery { + _deletebyquery := core_delete_by_query.NewDeleteByQueryFunc(p.tp) + return _deletebyquery(index) +} + +// Throttle a delete by query operation. +// +// Change the number of requests per second for a particular delete by query +// operation. +// Rethrottling that speeds up the query takes effect immediately but +// rethrotting that slows down the query takes effect after completing the +// current batch to prevent scroll timeouts. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle +func (p *MethodCore) DeleteByQueryRethrottle(taskid string) *core_delete_by_query_rethrottle.DeleteByQueryRethrottle { + _deletebyqueryrethrottle := core_delete_by_query_rethrottle.NewDeleteByQueryRethrottleFunc(p.tp) + return _deletebyqueryrethrottle(taskid) +} + +// Delete a script or search template. +// Deletes a stored script or search template. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script +func (p *MethodCore) DeleteScript(id string) *core_delete_script.DeleteScript { + _deletescript := core_delete_script.NewDeleteScriptFunc(p.tp) + return _deletescript(id) +} + +// Check a document. +// +// Verify that a document exists. +// For example, check to see if a document with the `_id` 0 exists: +// +// ``` +// HEAD my-index-000001/_doc/0 +// ``` +// +// If the document exists, the API returns a status code of `200 - OK`. +// If the document doesn’t exist, the API returns `404 - Not Found`. +// +// **Versioning support** +// +// You can use the `version` parameter to check the document only if its current +// version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get +func (p *MethodCore) Exists(index, id string) *core_exists.Exists { + _exists := core_exists.NewExistsFunc(p.tp) + return _exists(index, id) +} + +// Check for a document source. +// +// Check whether a document source exists in an index. +// For example: +// +// ``` +// HEAD my-index-000001/_source/1 +// ``` +// +// A document's source is not available if it is disabled in the mapping. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get +func (p *MethodCore) ExistsSource(index, id string) *core_exists_source.ExistsSource { + _existssource := core_exists_source.NewExistsSourceFunc(p.tp) + return _existssource(index, id) +} + +// Explain a document match result. +// Get information about why a specific document matches, or doesn't match, a +// query. +// It computes a score explanation for a query and a specific document. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain +func (p *MethodCore) Explain(index, id string) *core_explain.Explain { + _explain := core_explain.NewExplainFunc(p.tp) + return _explain(index, id) +} + +// Get the field capabilities. +// +// Get information about the capabilities of fields among multiple indices. +// +// For data streams, the API returns field capabilities among the stream’s +// backing indices. +// It returns runtime fields like any other field. +// For example, a runtime field with a type of keyword is returned the same as +// any other field that belongs to the `keyword` family. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps +func (p *MethodCore) FieldCaps() *core_field_caps.FieldCaps { + _fieldcaps := core_field_caps.NewFieldCapsFunc(p.tp) + return _fieldcaps() +} + +// Get a document by its ID. +// +// Get a document and its source or stored fields from an index. +// +// By default, this API is realtime and is not affected by the refresh rate of +// the index (when data will become visible for search). +// In the case where stored fields are requested with the `stored_fields` +// parameter and the document has been updated but is not yet refreshed, the API +// will have to parse and analyze the source to extract the stored fields. +// To turn off realtime behavior, set the `realtime` parameter to false. +// +// **Source filtering** +// +// By default, the API returns the contents of the `_source` field unless you +// have used the `stored_fields` parameter or the `_source` field is turned off. +// You can turn off `_source` retrieval by using the `_source` parameter: +// +// ``` +// GET my-index-000001/_doc/0?_source=false +// ``` +// +// If you only need one or two fields from the `_source`, use the +// `_source_includes` or `_source_excludes` parameters to include or filter out +// particular fields. +// This can be helpful with large documents where partial retrieval can save on +// network overhead +// Both parameters take a comma separated list of fields or wildcard +// expressions. +// For example: +// +// ``` +// GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities +// ``` +// +// If you only want to specify includes, you can use a shorter notation: +// +// ``` +// GET my-index-000001/_doc/0?_source=*.id +// ``` +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to retrieve a document. +// For example: +// +// ``` +// GET my-index-000001/_doc/2?routing=user1 +// ``` +// +// This request gets the document with ID 2, but it is routed based on the user. +// The document is not fetched if the correct routing is not specified. +// +// **Distributed** +// +// The GET operation is hashed into a specific shard ID. +// It is then redirected to one of the replicas within that shard ID and returns +// the result. +// The replicas are the primary shard and its replicas within that shard ID +// group. +// This means that the more replicas you have, the better your GET scaling will +// be. +// +// **Versioning support** +// +// You can use the `version` parameter to retrieve the document only if its +// current version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get +func (p *MethodCore) Get(index, id string) *core_get.Get { + _get := core_get.NewGetFunc(p.tp) + return _get(index, id) +} + +// Get a script or search template. +// Retrieves a stored script or search template. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script +func (p *MethodCore) GetScript(id string) *core_get_script.GetScript { + _getscript := core_get_script.NewGetScriptFunc(p.tp) + return _getscript(id) +} + +// Get script contexts. +// +// Get a list of supported script contexts and their methods. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context +func (p *MethodCore) GetScriptContext() *core_get_script_context.GetScriptContext { + _getscriptcontext := core_get_script_context.NewGetScriptContextFunc(p.tp) + return _getscriptcontext() +} + +// Get script languages. +// +// Get a list of available script types, languages, and contexts. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages +func (p *MethodCore) GetScriptLanguages() *core_get_script_languages.GetScriptLanguages { + _getscriptlanguages := core_get_script_languages.NewGetScriptLanguagesFunc(p.tp) + return _getscriptlanguages() +} + +// Get a document's source. +// +// Get the source of a document. +// For example: +// +// ``` +// GET my-index-000001/_source/1 +// ``` +// +// You can use the source filtering parameters to control which parts of the +// `_source` are returned: +// +// ``` +// GET +// my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities +// ``` +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get +func (p *MethodCore) GetSource(index, id string) *core_get_source.GetSource { + _getsource := core_get_source.NewGetSourceFunc(p.tp) + return _getsource(index, id) +} + +// Get the cluster health. +// Get a report with the health status of an Elasticsearch cluster. +// The report contains a list of indicators that compose Elasticsearch +// functionality. +// +// Each indicator has a health status of: green, unknown, yellow or red. +// The indicator will provide an explanation and metadata describing the reason +// for its current health status. +// +// The cluster’s status is controlled by the worst indicator status. +// +// In the event that an indicator’s status is non-green, a list of impacts may +// be present in the indicator result which detail the functionalities that are +// negatively affected by the health issue. +// Each impact carries with it a severity level, an area of the system that is +// affected, and a simple description of the impact on the system. +// +// Some health indicators can determine the root cause of a health problem and +// prescribe a set of steps that can be performed in order to improve the health +// of the system. +// The root cause and remediation steps are encapsulated in a diagnosis. +// A diagnosis contains a cause detailing a root cause analysis, an action +// containing a brief description of the steps to take to fix the problem, the +// list of affected resources (if applicable), and a detailed step-by-step +// troubleshooting guide to fix the diagnosed problem. +// +// NOTE: The health indicators perform root cause analysis of non-green health +// statuses. This can be computationally expensive when called frequently. +// When setting up automated polling of the API for health status, set verbose +// to false to disable the more expensive analysis logic. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report +func (p *MethodCore) HealthReport() *core_health_report.HealthReport { + _healthreport := core_health_report.NewHealthReportFunc(p.tp) + return _healthreport() +} + +// Create or update a document in an index. +// +// Add a JSON document to the specified data stream or index and make it +// searchable. +// If the target is an index and the document already exists, the request +// updates the document and increments its version. +// +// NOTE: You cannot use this API to send update requests for existing documents +// in a data stream. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add or overwrite a document using the `PUT //_doc/<_id>` request +// format, you must have the `create`, `index`, or `write` index privilege. +// * To add a document using the `POST //_doc/` request format, you must +// have the `create_doc`, `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// NOTE: Replica shards might not all be started when an indexing operation +// returns successfully. +// By default, only the primary is required. Set `wait_for_active_shards` to +// change this default behavior. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Optimistic concurrency control** +// +// Index operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. +// +// **No operation (noop) updates** +// +// When updating a document by using this API, a new version of the document is +// always created even if the document hasn't changed. +// If this isn't acceptable use the `_update` API with `detect_noop` set to +// `true`. +// The `detect_noop` option isn't available on this API because it doesn’t fetch +// the old source and isn't able to compare it against the new source. +// +// There isn't a definitive rule for when noop updates aren't acceptable. +// It's a combination of lots of factors like how frequently your data source +// sends updates that are actually noops and how many queries per second +// Elasticsearch runs on the shard receiving the updates. +// +// **Versioning** +// +// Each indexed document is given a version number. +// By default, internal versioning is used that starts at 1 and increments with +// each update, deletes included. +// Optionally, the version number can be set to an external value (for example, +// if maintained in a database). +// To enable this functionality, `version_type` should be set to `external`. +// The value provided must be a numeric, long value greater than or equal to 0, +// and less than around `9.2e+18`. +// +// NOTE: Versioning is completely real time, and is not affected by the near +// real time aspects of search operations. +// If no version is provided, the operation runs without any version checks. +// +// When using the external version type, the system checks to see if the version +// number passed to the index request is greater than the version of the +// currently stored document. +// If true, the document will be indexed and the new version number used. +// If the value provided is less than or equal to the stored document's version +// number, a version conflict will occur and the index operation will fail. For +// example: +// +// ``` +// PUT my-index-000001/_doc/1?version=2&version_type=external +// +// { +// "user": { +// "id": "elkbee" +// } +// } +// +// In this example, the operation will succeed since the supplied version of 2 +// is higher than the current document version of 1. +// If the document was already updated and its version was set to 2 or higher, +// the indexing command will fail and result in a conflict (409 HTTP status +// code). +// +// A nice side effect is that there is no need to maintain strict ordering of +// async indexing operations run as a result of changes to a source database, as +// long as version numbers from the source database are used. +// Even the simple case of updating the Elasticsearch index using data from a +// database is simplified if external versioning is used, as only the latest +// version will be used if the index operations arrive out of order. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create +func (p *MethodCore) Index(index string) *core_index.Index { + _index := core_index.NewIndexFunc(p.tp) + return _index(index) +} + +// Get cluster info. +// Get basic build, version, and cluster information. +// ::: In Serverless, this API is retained for backward compatibility only. Some +// response fields, such as the version number, should be ignored. +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info +func (p *MethodCore) Info() *core_info.Info { + _info := core_info.NewInfoFunc(p.tp) + return _info() +} + +// Get multiple documents. +// +// Get multiple JSON documents by ID from one or more indices. +// If you specify an index in the request URI, you only need to specify the +// document IDs in the request body. +// To ensure fast responses, this multi get (mget) API responds with partial +// results if one or more shards fail. +// +// **Filter source fields** +// +// By default, the `_source` field is returned for every document (if stored). +// Use the `_source` and `_source_include` or `source_exclude` attributes to +// filter what fields are returned for a particular document. +// You can include the `_source`, `_source_includes`, and `_source_excludes` +// query parameters in the request URI to specify the defaults to use when there +// are no per-document instructions. +// +// **Get stored fields** +// +// Use the `stored_fields` attribute to specify the set of stored fields you +// want to retrieve. +// Any requested fields that are not stored are ignored. +// You can include the `stored_fields` query parameter in the request URI to +// specify the defaults to use when there are no per-document instructions. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget +func (p *MethodCore) Mget() *core_mget.Mget { + _mget := core_mget.NewMgetFunc(p.tp) + return _mget() +} + +// Run multiple searches. +// +// The format of the request is similar to the bulk API format and makes use of +// the newline delimited JSON (NDJSON) format. +// The structure is as follows: +// +// ``` +// header\n +// body\n +// header\n +// body\n +// ``` +// +// This structure is specifically optimized to reduce parsing if a specific +// search ends up redirected to another node. +// +// IMPORTANT: The final line of data must end with a newline character `\n`. +// Each newline character may be preceded by a carriage return `\r`. +// When sending requests to this endpoint the `Content-Type` header should be +// set to `application/x-ndjson`. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch +func (p *MethodCore) Msearch() *core_msearch.Msearch { + _msearch := core_msearch.NewMsearchFunc(p.tp) + return _msearch() +} + +// Run multiple templated searches. +// +// Run multiple templated searches with a single request. +// If you are providing a text file or text input to `curl`, use the +// `--data-binary` flag instead of `-d` to preserve newlines. +// For example: +// +// ``` +// $ cat requests +// { "index": "my-index" } +// { "id": "my-search-template", "params": { "query_string": "hello world", +// "from": 0, "size": 10 }} +// { "index": "my-other-index" } +// { "id": "my-other-search-template", "params": { "query_type": "match_all" }} +// +// $ curl -H "Content-Type: application/x-ndjson" -XGET +// localhost:9200/_msearch/template --data-binary "@requests"; echo +// ``` +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template +func (p *MethodCore) MsearchTemplate() *core_msearch_template.MsearchTemplate { + _msearchtemplate := core_msearch_template.NewMsearchTemplateFunc(p.tp) + return _msearchtemplate() +} + +// Get multiple term vectors. +// +// Get multiple term vectors with a single request. +// You can specify existing documents by index and ID or provide artificial +// documents in the body of the request. +// You can specify the index in the request body or request URI. +// The response contains a `docs` array with all the fetched termvectors. +// Each element has the structure provided by the termvectors API. +// +// **Artificial documents** +// +// You can also use `mtermvectors` to generate term vectors for artificial +// documents provided in the body of the request. +// The mapping used is determined by the specified `_index`. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors +func (p *MethodCore) Mtermvectors() *core_mtermvectors.Mtermvectors { + _mtermvectors := core_mtermvectors.NewMtermvectorsFunc(p.tp) + return _mtermvectors() +} + +// Open a point in time. +// +// A search request by default runs against the most recent visible data of the +// target indices, +// which is called point in time. Elasticsearch pit (point in time) is a +// lightweight view into the +// state of the data as it existed when initiated. In some cases, it’s preferred +// to perform multiple +// search requests using the same point in time. For example, if refreshes +// happen between +// `search_after` requests, then the results of those requests might not be +// consistent as changes happening +// between searches are only visible to the more recent point in time. +// +// A point in time must be opened explicitly before being used in search +// requests. +// +// A subsequent search request with the `pit` parameter must not specify +// `index`, `routing`, or `preference` values as these parameters are copied +// from the point in time. +// +// Just like regular searches, you can use `from` and `size` to page through +// point in time search results, up to the first 10,000 hits. +// If you want to retrieve more hits, use PIT with `search_after`. +// +// IMPORTANT: The open point in time request and each subsequent search request +// can return different identifiers; always use the most recently received ID +// for the next search request. +// +// When a PIT that contains shard failures is used in a search request, the +// missing are always reported in the search response as a +// `NoShardAvailableActionException` exception. +// To get rid of these exceptions, a new PIT needs to be created so that shards +// missing from the previous PIT can be handled, assuming they become available +// in the meantime. +// +// **Keeping point in time alive** +// +// The `keep_alive` parameter, which is passed to a open point in time request +// and search request, extends the time to live of the corresponding point in +// time. +// The value does not need to be long enough to process all data — it just needs +// to be long enough for the next request. +// +// Normally, the background merge process optimizes the index by merging +// together smaller segments to create new, bigger segments. +// Once the smaller segments are no longer needed they are deleted. +// However, open point-in-times prevent the old segments from being deleted +// since they are still in use. +// +// TIP: Keeping older segments alive means that more disk space and file handles +// are needed. +// Ensure that you have configured your nodes to have ample free file handles. +// +// Additionally, if a segment contains deleted or updated documents then the +// point in time must keep track of whether each document in the segment was +// live at the time of the initial search request. +// Ensure that your nodes have sufficient heap space if you have many open +// point-in-times on an index that is subject to ongoing deletes or updates. +// Note that a point-in-time doesn't prevent its associated indices from being +// deleted. +// You can check how many point-in-times (that is, search contexts) are open +// with the nodes stats API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time +func (p *MethodCore) OpenPointInTime(index string) *core_open_point_in_time.OpenPointInTime { + _openpointintime := core_open_point_in_time.NewOpenPointInTimeFunc(p.tp) + return _openpointintime(index) +} + +// Ping the cluster. +// Get information about whether the cluster is running. +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster +func (p *MethodCore) Ping() *core_ping.Ping { + _ping := core_ping.NewPingFunc(p.tp) + return _ping() +} + +// Create or update a script or search template. +// Creates or updates a stored script or search template. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script +func (p *MethodCore) PutScript(id string) *core_put_script.PutScript { + _putscript := core_put_script.NewPutScriptFunc(p.tp) + return _putscript(id) +} + +// Evaluate ranked search results. +// +// Evaluate the quality of ranked search results over a set of typical search +// queries. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval +func (p *MethodCore) RankEval() *core_rank_eval.RankEval { + _rankeval := core_rank_eval.NewRankEvalFunc(p.tp) + return _rankeval() +} + +// Reindex documents. +// +// Copy documents from a source to a destination. +// You can copy all documents to the destination index or reindex a subset of +// the documents. +// The source can be any existing index, alias, or data stream. +// The destination must differ from the source. +// For example, you cannot reindex a data stream into itself. +// +// IMPORTANT: Reindex requires `_source` to be enabled for all documents in the +// source. +// The destination should be configured as wanted before calling the reindex +// API. +// Reindex does not copy the settings from the source or its associated +// template. +// Mappings, shard counts, and replicas, for example, must be configured ahead +// of time. +// +// If the Elasticsearch security features are enabled, you must have the +// following security privileges: +// +// * The `read` index privilege for the source data stream, index, or alias. +// * The `write` index privilege for the destination data stream, index, or +// index alias. +// * To automatically create a data stream or index with a reindex API request, +// you must have the `auto_configure`, `create_index`, or `manage` index +// privilege for the destination data stream, index, or alias. +// * If reindexing from a remote cluster, the `source.remote.user` must have the +// `monitor` cluster privilege and the `read` index privilege for the source +// data stream, index, or alias. +// +// If reindexing from a remote cluster, you must explicitly allow the remote +// host in the `reindex.remote.whitelist` setting. +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The `dest` element can be configured like the index API to control optimistic +// concurrency control. +// Omitting `version_type` or setting it to `internal` causes Elasticsearch to +// blindly dump documents into the destination, overwriting any that happen to +// have the same ID. +// +// Setting `version_type` to `external` causes Elasticsearch to preserve the +// `version` from the source, create any documents that are missing, and update +// any documents that have an older version in the destination than they do in +// the source. +// +// Setting `op_type` to `create` causes the reindex API to create only missing +// documents in the destination. +// All existing documents will cause a version conflict. +// +// IMPORTANT: Because data streams are append-only, any reindex request to a +// destination data stream must have an `op_type` of `create`. +// A reindex can only add new documents to a destination data stream. +// It cannot update existing documents in a destination data stream. +// +// By default, version conflicts abort the reindex process. +// To continue reindexing if there are conflicts, set the `conflicts` request +// body property to `proceed`. +// In this case, the response includes a count of the version conflicts that +// were encountered. +// Note that the handling of other error types is unaffected by the `conflicts` +// property. +// Additionally, if you opt to count version conflicts, the operation could +// attempt to reindex more documents from the source than `max_docs` until it +// has successfully indexed `max_docs` documents into the target or it has gone +// through every document in the source query. +// +// Refer to the linked documentation for examples of how to reindex documents. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex +func (p *MethodCore) Reindex() *core_reindex.Reindex { + _reindex := core_reindex.NewReindexFunc(p.tp) + return _reindex() +} + +// Throttle a reindex operation. +// +// Change the number of requests per second for a particular reindex operation. +// For example: +// +// ``` +// POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +// ``` +// +// Rethrottling that speeds up the query takes effect immediately. +// Rethrottling that slows down the query will take effect after completing the +// current batch. +// This behavior prevents scroll timeouts. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex +func (p *MethodCore) ReindexRethrottle(taskid string) *core_reindex_rethrottle.ReindexRethrottle { + _reindexrethrottle := core_reindex_rethrottle.NewReindexRethrottleFunc(p.tp) + return _reindexrethrottle(taskid) +} + +// Render a search template. +// +// Render a search template as a search request body. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template +func (p *MethodCore) RenderSearchTemplate() *core_render_search_template.RenderSearchTemplate { + _rendersearchtemplate := core_render_search_template.NewRenderSearchTemplateFunc(p.tp) + return _rendersearchtemplate() +} + +// Run a script. +// +// Runs a script and returns a result. +// Use this API to build and test scripts, such as when defining a script for a +// runtime field. +// This API requires very few dependencies and is especially useful if you don't +// have permissions to write documents on a cluster. +// +// The API uses several _contexts_, which control how scripts are run, what +// variables are available at runtime, and what the return type is. +// +// Each context requires a script, but additional parameters depend on the +// context you're using for that script. +// https://www.elastic.co/docs/reference/scripting-languages/painless/painless-api-examples +func (p *MethodCore) ScriptsPainlessExecute() *core_scripts_painless_execute.ScriptsPainlessExecute { + _scriptspainlessexecute := core_scripts_painless_execute.NewScriptsPainlessExecuteFunc(p.tp) + return _scriptspainlessexecute() +} + +// Run a scrolling search. +// +// IMPORTANT: The scroll API is no longer recommend for deep pagination. If you +// need to preserve the index state while paging through more than 10,000 hits, +// use the `search_after` parameter with a point in time (PIT). +// +// The scroll API gets large sets of results from a single scrolling search +// request. +// To get the necessary scroll ID, submit a search API request that includes an +// argument for the `scroll` query parameter. +// The `scroll` parameter indicates how long Elasticsearch should retain the +// search context for the request. +// The search response returns a scroll ID in the `_scroll_id` response body +// parameter. +// You can then use the scroll ID with the scroll API to retrieve the next batch +// of results for the request. +// If the Elasticsearch security features are enabled, the access to the results +// of a specific scroll ID is restricted to the user or API key that submitted +// the search. +// +// You can also use the scroll API to specify a new scroll parameter that +// extends or shortens the retention period for the search context. +// +// IMPORTANT: Results from a scrolling search reflect the state of the index at +// the time of the initial search request. Subsequent indexing or document +// changes only affect later search and scroll requests. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll +func (p *MethodCore) Scroll() *core_scroll.Scroll { + _scroll := core_scroll.NewScrollFunc(p.tp) + return _scroll() +} + +// Run a search. +// +// Get search hits that match the query defined in the request. +// You can provide search queries using the `q` query string parameter or the +// request body. +// If both are specified, only the query parameter is used. +// +// If the Elasticsearch security features are enabled, you must have the read +// index privilege for the target data stream, index, or alias. For +// cross-cluster search, refer to the documentation about configuring CCS +// privileges. +// To search a point in time (PIT) for an alias, you must have the `read` index +// privilege for the alias's data streams or indices. +// +// **Search slicing** +// +// When paging through a large number of documents, it can be helpful to split +// the search into multiple slices to consume them independently with the +// `slice` and `pit` properties. +// By default the splitting is done first on the shards, then locally on each +// shard. +// The local splitting partitions the shard into contiguous ranges based on +// Lucene document IDs. +// +// For instance if the number of shards is equal to 2 and you request 4 slices, +// the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are +// assigned to the second shard. +// +// IMPORTANT: The same point-in-time ID should be used for all slices. +// If different PIT IDs are used, slices can overlap and miss documents. +// This situation can occur because the splitting criterion is based on Lucene +// document IDs, which are not stable across changes to the index. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search +func (p *MethodCore) Search() *core_search.Search { + _search := core_search.NewSearchFunc(p.tp) + return _search() +} + +// Search a vector tile. +// +// Search a vector tile for geospatial values. +// Before using this API, you should be familiar with the Mapbox vector tile +// specification. +// The API returns results as a binary mapbox vector tile. +// +// Internally, Elasticsearch translates a vector tile search API request into a +// search containing: +// +// * A `geo_bounding_box` query on the ``. The query uses the +// `//` tile as a bounding box. +// * A `geotile_grid` or `geohex_grid` aggregation on the ``. The +// `grid_agg` parameter determines the aggregation type. The aggregation uses +// the `//` tile as a bounding box. +// * Optionally, a `geo_bounds` aggregation on the ``. The search only +// includes this aggregation if the `exact_bounds` parameter is `true`. +// * If the optional parameter `with_labels` is `true`, the internal search will +// include a dynamic runtime field that calls the `getLabelPosition` function of +// the geometry doc value. This enables the generation of new point features +// containing suggested geometry labels, so that, for example, multi-polygons +// will have only one label. +// +// The API returns results as a binary Mapbox vector tile. +// Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the +// tile contains three layers: +// +// * A `hits` layer containing a feature for each `` value matching the +// `geo_bounding_box` query. +// * An `aggs` layer containing a feature for each cell of the `geotile_grid` or +// `geohex_grid`. The layer only contains features for cells with matching data. +// * A meta layer containing: +// - A feature containing a bounding box. By default, this is the bounding box +// +// of the tile. +// - Value ranges for any sub-aggregations on the `geotile_grid` or +// +// `geohex_grid`. +// - Metadata for the search. +// +// The API only returns features that can display at its zoom level. +// For example, if a polygon feature has no area at its zoom level, the API +// omits it. +// The API returns errors as UTF-8 encoded JSON. +// +// IMPORTANT: You can specify several options for this API as either a query +// parameter or request body parameter. +// If you specify both parameters, the query parameter takes precedence. +// +// **Grid precision for geotile** +// +// For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles +// for lower zoom levels. +// `grid_precision` represents the additional zoom levels available through +// these cells. The final precision is computed by as follows: ` + +// grid_precision`. +// For example, if `` is 7 and `grid_precision` is 8, then the +// `geotile_grid` aggregation will use a precision of 15. +// The maximum final precision is 29. +// The `grid_precision` also determines the number of cells for the grid as +// follows: `(2^grid_precision) x (2^grid_precision)`. +// For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +// The `aggs` layer only contains features for cells with matching data. +// +// **Grid precision for geohex** +// +// For a `grid_agg` of `geohex`, Elasticsearch uses `` and +// `grid_precision` to calculate a final precision as follows: ` + +// grid_precision`. +// +// This precision determines the H3 resolution of the hexagonal cells produced +// by the `geohex` aggregation. +// The following table maps the H3 resolution for each precision. +// For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +// At a precision of 6, hexagonal cells have an H3 resolution of 2. +// If `` is 3 and `grid_precision` is 4, the precision is 7. +// At a precision of 7, hexagonal cells have an H3 resolution of 3. +// +// | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +// | --------- | ---------------- | ------------- | ----------------| ----- | +// | 1 | 4 | 0 | 122 | 30.5 | +// | 2 | 16 | 0 | 122 | 7.625 | +// | 3 | 64 | 1 | 842 | 13.15625 | +// | 4 | 256 | 1 | 842 | 3.2890625 | +// | 5 | 1024 | 2 | 5882 | 5.744140625 | +// | 6 | 4096 | 2 | 5882 | 1.436035156 | +// | 7 | 16384 | 3 | 41162 | 2.512329102 | +// | 8 | 65536 | 3 | 41162 | 0.6280822754 | +// | 9 | 262144 | 4 | 288122 | 1.099098206 | +// | 10 | 1048576 | 4 | 288122 | 0.2747745514 | +// | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +// | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +// | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +// | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +// | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +// | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +// | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +// | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +// | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +// | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +// | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +// | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +// | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +// | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +// | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +// | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +// | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +// | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +// | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | +// +// Hexagonal cells don't align perfectly on a vector tile. +// Some cells may intersect more than one vector tile. +// To compute the H3 resolution for each precision, Elasticsearch compares the +// average density of hexagonal bins at each resolution with the average density +// of tile bins at each zoom level. +// Elasticsearch uses the H3 resolution that is closest to the corresponding +// geotile density. +// +// Learn how to use the vector tile search API with practical examples in the +// [Vector tile search +// examples](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/vector-tile-search) +// guide. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt +func (p *MethodCore) SearchMvt(index, field, zoom, x, y string) *core_search_mvt.SearchMvt { + _searchmvt := core_search_mvt.NewSearchMvtFunc(p.tp) + return _searchmvt(index, field, zoom, x, y) +} + +// Get the search shards. +// +// Get the indices and shards that a search request would be run against. +// This information can be useful for working out issues or planning +// optimizations with routing and shard preferences. +// When filtered aliases are used, the filter is returned as part of the +// `indices` section. +// +// If the Elasticsearch security features are enabled, you must have the +// `view_index_metadata` or `manage` index privilege for the target data stream, +// index, or alias. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards +func (p *MethodCore) SearchShards() *core_search_shards.SearchShards { + _searchshards := core_search_shards.NewSearchShardsFunc(p.tp) + return _searchshards() +} + +// Run a search with a search template. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template +func (p *MethodCore) SearchTemplate() *core_search_template.SearchTemplate { + _searchtemplate := core_search_template.NewSearchTemplateFunc(p.tp) + return _searchtemplate() +} + +// Get terms in an index. +// +// Discover terms that match a partial string in an index. +// This API is designed for low-latency look-ups used in auto-complete +// scenarios. +// +// > info +// > The terms enum API may return terms from deleted documents. Deleted +// documents are initially only marked as deleted. It is not until their +// segments are merged that documents are actually deleted. Until that happens, +// the terms enum API will return terms from these documents. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum +func (p *MethodCore) TermsEnum(index string) *core_terms_enum.TermsEnum { + _termsenum := core_terms_enum.NewTermsEnumFunc(p.tp) + return _termsenum(index) +} + +// Get term vector information. +// +// Get information and statistics about terms in the fields of a particular +// document. +// +// You can retrieve term vectors for documents stored in the index or for +// artificial documents passed in the body of the request. +// You can specify the fields you are interested in through the `fields` +// parameter or by adding the fields to the request body. +// For example: +// +// ``` +// GET /my-index-000001/_termvectors/1?fields=message +// ``` +// +// Fields can be specified using wildcards, similar to the multi match query. +// +// Term vectors are real-time by default, not near real-time. +// This can be changed by setting `realtime` parameter to `false`. +// +// You can request three types of values: _term information_, _term statistics_, +// and _field statistics_. +// By default, all term information and field statistics are returned for all +// fields but term statistics are excluded. +// +// **Term information** +// +// * term frequency in the field (always returned) +// * term positions (`positions: true`) +// * start and end offsets (`offsets: true`) +// * term payloads (`payloads: true`), as base64 encoded bytes +// +// If the requested information wasn't stored in the index, it will be computed +// on the fly if possible. +// Additionally, term vectors could be computed for documents not even existing +// in the index, but instead provided by the user. +// +// > warn +// > Start and end offsets assume UTF-16 encoding is being used. If you want to +// use these offsets in order to get the original text that produced this token, +// you should make sure that the string you are taking a sub-string of is also +// encoded using UTF-16. +// +// **Behaviour** +// +// The term and field statistics are not accurate. +// Deleted documents are not taken into account. +// The information is only retrieved for the shard the requested document +// resides in. +// The term and field statistics are therefore only useful as relative measures +// whereas the absolute numbers have no meaning in this context. +// By default, when requesting term vectors of artificial documents, a shard to +// get the statistics from is randomly selected. +// Use `routing` only to hit a particular shard. +// Refer to the linked documentation for detailed examples of how to use this +// API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors +func (p *MethodCore) Termvectors(index string) *core_termvectors.Termvectors { + _termvectors := core_termvectors.NewTermvectorsFunc(p.tp) + return _termvectors(index) +} + +// Update a document. +// +// Update a document by running a script or passing a partial document. +// +// If the Elasticsearch security features are enabled, you must have the `index` +// or `write` index privilege for the target index or index alias. +// +// The script can update, delete, or skip modifying the document. +// The API also supports passing a partial document, which is merged into the +// existing document. +// To fully replace an existing document, use the index API. +// This operation: +// +// * Gets the document (collocated with the shard) from the index. +// * Runs the specified script. +// * Indexes the result. +// +// The document must still be reindexed, but using this API removes some network +// roundtrips and reduces chances of version conflicts between the GET and the +// index operation. +// +// The `_source` field must be enabled to use this API. +// In addition to `_source`, you can access the following variables through the +// `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the +// current timestamp). +// For usage examples such as partial updates, upserts, and scripted updates, +// see the External documentation. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update +func (p *MethodCore) Update(index, id string) *core_update.Update { + _update := core_update.NewUpdateFunc(p.tp) + return _update(index, id) +} + +// Update documents. +// Updates documents that match the specified query. +// If no query is specified, performs an update on every document in the data +// stream or index without modifying the source, which is useful for picking up +// mapping changes. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `index` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// +// When you submit an update by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and updates +// matching documents using internal versioning. +// When the versions match, the document is updated and the version number is +// incremented. +// If a document changes between the time that the snapshot is taken and the +// update operation is processed, it results in a version conflict and the +// operation fails. +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts, the operation could attempt +// to update more documents from the source than `max_docs` until it has +// successfully updated `max_docs` documents or it has gone through every +// document in the source query. +// +// NOTE: Documents with a version equal to 0 cannot be updated using update by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing an update by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents. +// A bulk update request is performed for each batch of matching documents. +// Any query or update failures cause the update by query request to fail and +// the failures are shown in the response. +// Any update requests that completed successfully still stick, they are not +// rolled back. +// +// **Refreshing shards** +// +// Specifying the `refresh` parameter refreshes all shards once the request +// completes. +// This is different to the update API's `refresh` parameter, which causes only +// the shard +// that received the request to be refreshed. Unlike the update API, it does not +// support +// `wait_for`. +// +// **Running update by query asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch +// performs some preflight checks, launches the request, and returns a +// [task](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) +// you can use to cancel or get the status of the task. +// Elasticsearch creates a record of this task as a document at +// `.tasks/task/${taskId}`. +// +// **Waiting for active shards** +// +// `wait_for_active_shards` controls how many copies of a shard must be active +// before proceeding with the request. See +// [`wait_for_active_shards`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards) +// for details. `timeout` controls how long each write request waits for +// unavailable +// shards to become available. Both work exactly the way they work in the +// [Bulk +// API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk). +// Update by query uses scrolled searches, so you can also +// specify the `scroll` parameter to control how long it keeps the search +// context +// alive, for example `?scroll=10m`. The default is 5 minutes. +// +// **Throttling update requests** +// +// To control the rate at which update by query issues batches of update +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to turn off throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is 1000, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single _bulk request, large batch sizes cause +// Elasticsearch to create many requests and wait before starting the next set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Update by query supports sliced scroll to parallelize the update process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` chooses a reasonable number for most data streams +// and indices. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// +// Adding `slices` to `_update_by_query` just automates the manual process of +// creating sub-requests, which means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with `slices` only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with slices will cancel each sub-request. +// * Due to the nature of slices each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// slices are distributed proportionally to each sub-request. Combine that with +// the point above about distribution being uneven and you should conclude that +// using `max_docs` with `slices` might not result in exactly `max_docs` +// documents being updated. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many slices hurts +// performance. Setting slices higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Update performance scales linearly across available resources with the +// number of slices. +// +// Whether query or update performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// Refer to the linked documentation for examples of how to update documents +// using the `_update_by_query` API: +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query +func (p *MethodCore) UpdateByQuery(index string) *core_update_by_query.UpdateByQuery { + _updatebyquery := core_update_by_query.NewUpdateByQueryFunc(p.tp) + return _updatebyquery(index) +} + +// Throttle an update by query operation. +// +// Change the number of requests per second for a particular update by query +// operation. +// Rethrottling that speeds up the query takes effect immediately but +// rethrotting that slows down the query takes effect after completing the +// current batch to prevent scroll timeouts. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle +func (p *MethodCore) UpdateByQueryRethrottle(taskid string) *core_update_by_query_rethrottle.UpdateByQueryRethrottle { + _updatebyqueryrethrottle := core_update_by_query_rethrottle.NewUpdateByQueryRethrottleFunc(p.tp) + return _updatebyqueryrethrottle(taskid) +} + +// Delete a dangling index. +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index +func (p *MethodDanglingIndices) DeleteDanglingIndex(indexuuid string) *dangling_indices_delete_dangling_index.DeleteDanglingIndex { + _deletedanglingindex := dangling_indices_delete_dangling_index.NewDeleteDanglingIndexFunc(p.tp) + return _deletedanglingindex(indexuuid) +} + +// Import a dangling index. +// +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index +func (p *MethodDanglingIndices) ImportDanglingIndex(indexuuid string) *dangling_indices_import_dangling_index.ImportDanglingIndex { + _importdanglingindex := dangling_indices_import_dangling_index.NewImportDanglingIndexFunc(p.tp) + return _importdanglingindex(indexuuid) +} + +// Get the dangling indices. +// +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. +// +// Use this API to list dangling indices, which you can then import or delete. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices +func (p *MethodDanglingIndices) ListDanglingIndices() *dangling_indices_list_dangling_indices.ListDanglingIndices { + _listdanglingindices := dangling_indices_list_dangling_indices.NewListDanglingIndicesFunc(p.tp) + return _listdanglingindices() +} + +// Delete an enrich policy. +// Deletes an existing enrich policy and its enrich index. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy +func (p *MethodEnrich) DeletePolicy(name string) *enrich_delete_policy.DeletePolicy { + _deletepolicy := enrich_delete_policy.NewDeletePolicyFunc(p.tp) + return _deletepolicy(name) +} + +// Run an enrich policy. +// Create the enrich index for an existing enrich policy. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy +func (p *MethodEnrich) ExecutePolicy(name string) *enrich_execute_policy.ExecutePolicy { + _executepolicy := enrich_execute_policy.NewExecutePolicyFunc(p.tp) + return _executepolicy(name) +} + +// Get an enrich policy. +// Returns information about an enrich policy. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy +func (p *MethodEnrich) GetPolicy() *enrich_get_policy.GetPolicy { + _getpolicy := enrich_get_policy.NewGetPolicyFunc(p.tp) + return _getpolicy() +} + +// Create an enrich policy. +// Creates an enrich policy. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy +func (p *MethodEnrich) PutPolicy(name string) *enrich_put_policy.PutPolicy { + _putpolicy := enrich_put_policy.NewPutPolicyFunc(p.tp) + return _putpolicy(name) +} + +// Get enrich stats. +// Returns enrich coordinator statistics and information about enrich policies +// that are currently executing. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats +func (p *MethodEnrich) Stats() *enrich_stats.Stats { + _stats := enrich_stats.NewStatsFunc(p.tp) + return _stats() +} + +// Delete an async EQL search. +// Delete an async EQL search or a stored synchronous EQL search. +// The API also deletes results for the search. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete +func (p *MethodEql) Delete(id string) *eql_delete.Delete { + _delete := eql_delete.NewDeleteFunc(p.tp) + return _delete(id) +} + +// Get async EQL search results. +// Get the current status and available results for an async EQL search or a +// stored synchronous EQL search. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get +func (p *MethodEql) Get(id string) *eql_get.Get { + _get := eql_get.NewGetFunc(p.tp) + return _get(id) +} + +// Get the async EQL status. +// Get the current status for an async EQL search or a stored synchronous EQL +// search without returning results. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status +func (p *MethodEql) GetStatus(id string) *eql_get_status.GetStatus { + _getstatus := eql_get_status.NewGetStatusFunc(p.tp) + return _getstatus(id) +} + +// Get EQL search results. +// Returns search results for an Event Query Language (EQL) query. +// EQL assumes each document in a data stream or index corresponds to an event. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search +func (p *MethodEql) Search(index string) *eql_search.Search { + _search := eql_search.NewSearchFunc(p.tp) + return _search(index) +} + +// Run an async ES|QL query. +// Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its +// progress, and retrieve results when they become available. +// +// The API accepts the same parameters and request body as the synchronous query +// API, along with additional async related properties. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query +func (p *MethodEsql) AsyncQuery() *esql_async_query.AsyncQuery { + _asyncquery := esql_async_query.NewAsyncQueryFunc(p.tp) + return _asyncquery() +} + +// Delete an async ES|QL query. +// If the query is still running, it is cancelled. +// Otherwise, the stored results are deleted. +// +// If the Elasticsearch security features are enabled, only the following users +// can use this API to delete a query: +// +// * The authenticated user that submitted the original query request +// * Users with the `cancel_task` cluster privilege +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete +func (p *MethodEsql) AsyncQueryDelete(id string) *esql_async_query_delete.AsyncQueryDelete { + _asyncquerydelete := esql_async_query_delete.NewAsyncQueryDeleteFunc(p.tp) + return _asyncquerydelete(id) +} + +// Get async ES|QL query results. +// Get the current status and available results or stored results for an ES|QL +// asynchronous query. +// If the Elasticsearch security features are enabled, only the user who first +// submitted the ES|QL query can retrieve the results using this API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get +func (p *MethodEsql) AsyncQueryGet(id string) *esql_async_query_get.AsyncQueryGet { + _asyncqueryget := esql_async_query_get.NewAsyncQueryGetFunc(p.tp) + return _asyncqueryget(id) +} + +// Stop async ES|QL query. +// +// This API interrupts the query execution and returns the results so far. +// If the Elasticsearch security features are enabled, only the user who first +// submitted the ES|QL query can stop it. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-stop +func (p *MethodEsql) AsyncQueryStop(id string) *esql_async_query_stop.AsyncQueryStop { + _asyncquerystop := esql_async_query_stop.NewAsyncQueryStopFunc(p.tp) + return _asyncquerystop(id) +} + +// Get a specific running ES|QL query information. +// Returns an object extended information about a running ES|QL query. +func (p *MethodEsql) GetQuery(id string) *esql_get_query.GetQuery { + _getquery := esql_get_query.NewGetQueryFunc(p.tp) + return _getquery(id) +} + +// Get running ES|QL queries information. +// Returns an object containing IDs and other information about the running +// ES|QL queries. +func (p *MethodEsql) ListQueries() *esql_list_queries.ListQueries { + _listqueries := esql_list_queries.NewListQueriesFunc(p.tp) + return _listqueries() +} + +// Run an ES|QL query. +// Get search results for an ES|QL (Elasticsearch query language) query. +// https://www.elastic.co/docs/explore-analyze/query-filter/languages/esql-rest +func (p *MethodEsql) Query() *esql_query.Query { + _query := esql_query.NewQueryFunc(p.tp) + return _query() +} + +// Get the features. +// Get a list of features that can be included in snapshots using the +// `feature_states` field when creating a snapshot. +// You can use this API to determine which feature states to include when taking +// a snapshot. +// By default, all feature states are included in a snapshot if that snapshot +// includes the global state, or none if it does not. +// +// A feature state includes one or more system indices necessary for a given +// feature to function. +// In order to ensure data integrity, all system indices that comprise a feature +// state are snapshotted and restored together. +// +// The features listed by this API are a combination of built-in features and +// features defined by plugins. +// In order for a feature state to be listed in this API and recognized as a +// valid feature state by the create snapshot API, the plugin that defines that +// feature must be installed on the master node. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features +func (p *MethodFeatures) GetFeatures() *features_get_features.GetFeatures { + _getfeatures := features_get_features.NewGetFeaturesFunc(p.tp) + return _getfeatures() +} + +// Reset the features. +// Clear all of the state information stored in system indices by Elasticsearch +// features, including the security and machine learning indices. +// +// WARNING: Intended for development and testing use only. Do not reset features +// on a production cluster. +// +// Return a cluster to the same state as a new installation by resetting the +// feature state for all Elasticsearch features. +// This deletes all state information stored in system indices. +// +// The response code is HTTP 200 if the state is successfully reset for all +// features. +// It is HTTP 500 if the reset operation failed for any feature. +// +// Note that select features might provide a way to reset particular system +// indices. +// Using this API resets all features, both those that are built-in and +// implemented as plugins. +// +// To list the features that will be affected, use the get features API. +// +// IMPORTANT: The features installed on the node you submit this request to are +// the features that will be reset. Run on the master node if you have any +// doubts about which plugins are installed on individual nodes. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features +func (p *MethodFeatures) ResetFeatures() *features_reset_features.ResetFeatures { + _resetfeatures := features_reset_features.NewResetFeaturesFunc(p.tp) + return _resetfeatures() +} + +// Get global checkpoints. +// +// Get the current global checkpoints for an index. +// This API is designed for internal use by the Fleet server project. +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet +func (p *MethodFleet) GlobalCheckpoints(index string) *fleet_global_checkpoints.GlobalCheckpoints { + _globalcheckpoints := fleet_global_checkpoints.NewGlobalCheckpointsFunc(p.tp) + return _globalcheckpoints(index) +} + +// Run multiple Fleet searches. +// Run several Fleet searches with a single API request. +// The API follows the same structure as the multi search API. +// However, similar to the Fleet search API, it supports the +// `wait_for_checkpoints` parameter. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch +func (p *MethodFleet) Msearch() *fleet_msearch.Msearch { + _msearch := fleet_msearch.NewMsearchFunc(p.tp) + return _msearch() +} + +// Creates a secret stored by Fleet. +func (p *MethodFleet) PostSecret() *fleet_post_secret.PostSecret { + _postsecret := fleet_post_secret.NewPostSecretFunc(p.tp) + return _postsecret() +} + +// Run a Fleet search. +// The purpose of the Fleet search API is to provide an API where the search +// will be run only +// after the provided checkpoint has been processed and is visible for searches +// inside of Elasticsearch. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search +func (p *MethodFleet) Search(index string) *fleet_search.Search { + _search := fleet_search.NewSearchFunc(p.tp) + return _search(index) +} + +// Explore graph analytics. +// Extract and summarize information about the documents and terms in an +// Elasticsearch data stream or index. +// The easiest way to understand the behavior of this API is to use the Graph UI +// to explore connections. +// An initial request to the `_explore` API contains a seed query that +// identifies the documents of interest and specifies the fields that define the +// vertices and connections you want to include in the graph. +// Subsequent requests enable you to spider out from one more vertices of +// interest. +// You can exclude vertices that have already been returned. +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph +func (p *MethodGraph) Explore(index string) *graph_explore.Explore { + _explore := graph_explore.NewExploreFunc(p.tp) + return _explore(index) +} + +// Delete a lifecycle policy. +// You cannot delete policies that are currently in use. If the policy is being +// used to manage any indices, the request fails and returns an error. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle +func (p *MethodIlm) DeleteLifecycle(policy string) *ilm_delete_lifecycle.DeleteLifecycle { + _deletelifecycle := ilm_delete_lifecycle.NewDeleteLifecycleFunc(p.tp) + return _deletelifecycle(policy) +} + +// Explain the lifecycle state. +// Get the current lifecycle status for one or more indices. +// For data streams, the API retrieves the current lifecycle status for the +// stream's backing indices. +// +// The response indicates when the index entered each lifecycle state, provides +// the definition of the running phase, and information about any failures. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle +func (p *MethodIlm) ExplainLifecycle(index string) *ilm_explain_lifecycle.ExplainLifecycle { + _explainlifecycle := ilm_explain_lifecycle.NewExplainLifecycleFunc(p.tp) + return _explainlifecycle(index) +} + +// Get lifecycle policies. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle +func (p *MethodIlm) GetLifecycle() *ilm_get_lifecycle.GetLifecycle { + _getlifecycle := ilm_get_lifecycle.NewGetLifecycleFunc(p.tp) + return _getlifecycle() +} + +// Get the ILM status. +// +// Get the current index lifecycle management status. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status +func (p *MethodIlm) GetStatus() *ilm_get_status.GetStatus { + _getstatus := ilm_get_status.NewGetStatusFunc(p.tp) + return _getstatus() +} + +// Migrate to data tiers routing. +// Switch the indices, ILM policies, and legacy, composable, and component +// templates from using custom node attributes and attribute-based allocation +// filters to using data tiers. +// Optionally, delete one legacy index template. +// Using node roles enables ILM to automatically move the indices between data +// tiers. +// +// Migrating away from custom node attributes routing can be manually performed. +// This API provides an automated way of performing three out of the four manual +// steps listed in the migration guide: +// +// 1. Stop setting the custom hot attribute on new indices. +// 1. Remove custom allocation settings from existing ILM policies. +// 1. Replace custom allocation settings from existing indices with the +// corresponding tier preference. +// +// ILM must be stopped before performing the migration. +// Use the stop ILM and get ILM status APIs to wait until the reported operation +// mode is `STOPPED`. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers +func (p *MethodIlm) MigrateToDataTiers() *ilm_migrate_to_data_tiers.MigrateToDataTiers { + _migratetodatatiers := ilm_migrate_to_data_tiers.NewMigrateToDataTiersFunc(p.tp) + return _migratetodatatiers() +} + +// Move to a lifecycle step. +// Manually move an index into a specific step in the lifecycle policy and run +// that step. +// +// WARNING: This operation can result in the loss of data. Manually moving an +// index into a specific step runs that step even if it has already been +// performed. This is a potentially destructive action and this should be +// considered an expert level API. +// +// You must specify both the current step and the step to be executed in the +// body of the request. +// The request will fail if the current step does not match the step currently +// running for the index +// This is to prevent the index from being moved from an unexpected step into +// the next step. +// +// When specifying the target (`next_step`) to which the index will be moved, +// either the name or both the action and name fields are optional. +// If only the phase is specified, the index will move to the first step of the +// first action in the target phase. +// If the phase and action are specified, the index will move to the first step +// of the specified action in the specified phase. +// Only actions specified in the ILM policy are considered valid. +// An index cannot move to a step that is not part of its policy. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step +func (p *MethodIlm) MoveToStep(index string) *ilm_move_to_step.MoveToStep { + _movetostep := ilm_move_to_step.NewMoveToStepFunc(p.tp) + return _movetostep(index) +} + +// Create or update a lifecycle policy. +// If the specified policy exists, it is replaced and the policy version is +// incremented. +// +// NOTE: Only the latest version of the policy is stored, you cannot revert to +// previous versions. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle +func (p *MethodIlm) PutLifecycle(policy string) *ilm_put_lifecycle.PutLifecycle { + _putlifecycle := ilm_put_lifecycle.NewPutLifecycleFunc(p.tp) + return _putlifecycle(policy) +} + +// Remove policies from an index. +// Remove the assigned lifecycle policies from an index or a data stream's +// backing indices. +// It also stops managing the indices. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy +func (p *MethodIlm) RemovePolicy(index string) *ilm_remove_policy.RemovePolicy { + _removepolicy := ilm_remove_policy.NewRemovePolicyFunc(p.tp) + return _removepolicy(index) +} + +// Retry a policy. +// Retry running the lifecycle policy for an index that is in the ERROR step. +// The API sets the policy back to the step where the error occurred and runs +// the step. +// Use the explain lifecycle state API to determine whether an index is in the +// ERROR step. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry +func (p *MethodIlm) Retry(index string) *ilm_retry.Retry { + _retry := ilm_retry.NewRetryFunc(p.tp) + return _retry(index) +} + +// Start the ILM plugin. +// Start the index lifecycle management plugin if it is currently stopped. +// ILM is started automatically when the cluster is formed. +// Restarting ILM is necessary only when it has been stopped using the stop ILM +// API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start +func (p *MethodIlm) Start() *ilm_start.Start { + _start := ilm_start.NewStartFunc(p.tp) + return _start() +} + +// Stop the ILM plugin. +// Halt all lifecycle management operations and stop the index lifecycle +// management plugin. +// This is useful when you are performing maintenance on the cluster and need to +// prevent ILM from performing any actions on your indices. +// +// The API returns as soon as the stop request has been acknowledged, but the +// plugin might continue to run until in-progress operations complete and the +// plugin can be safely stopped. +// Use the get ILM status API to check whether ILM is running. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop +func (p *MethodIlm) Stop() *ilm_stop.Stop { + _stop := ilm_stop.NewStopFunc(p.tp) + return _stop() +} + +// Add an index block. +// +// Add an index block to an index. +// Index blocks limit the operations allowed on an index by blocking specific +// operation types. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block +func (p *MethodIndices) AddBlock(index, block string) *indices_add_block.AddBlock { + _addblock := indices_add_block.NewAddBlockFunc(p.tp) + return _addblock(index, block) +} + +// Get tokens from text analysis. +// The analyze API performs analysis on a text string and returns the resulting +// tokens. +// +// Generating excessive amount of tokens may cause a node to run out of memory. +// The `index.analyze.max_token_count` setting enables you to limit the number +// of tokens that can be produced. +// If more than this limit of tokens gets generated, an error occurs. +// The `_analyze` endpoint without a specified index will always use `10000` as +// its limit. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze +func (p *MethodIndices) Analyze() *indices_analyze.Analyze { + _analyze := indices_analyze.NewAnalyzeFunc(p.tp) + return _analyze() +} + +// Cancel a migration reindex operation. +// +// Cancel a migration reindex attempt for a data stream or index. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-cancel-migrate-reindex +func (p *MethodIndices) CancelMigrateReindex(index string) *indices_cancel_migrate_reindex.CancelMigrateReindex { + _cancelmigratereindex := indices_cancel_migrate_reindex.NewCancelMigrateReindexFunc(p.tp) + return _cancelmigratereindex(index) +} + +// Clear the cache. +// Clear the cache of one or more indices. +// For data streams, the API clears the caches of the stream's backing indices. +// +// By default, the clear cache API clears all caches. +// To clear only specific caches, use the `fielddata`, `query`, or `request` +// parameters. +// To clear the cache only of specific fields, use the `fields` parameter. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache +func (p *MethodIndices) ClearCache() *indices_clear_cache.ClearCache { + _clearcache := indices_clear_cache.NewClearCacheFunc(p.tp) + return _clearcache() +} + +// Clone an index. +// Clone an existing index into a new index. +// Each original primary shard is cloned into a new primary shard in the new +// index. +// +// IMPORTANT: Elasticsearch does not apply index templates to the resulting +// index. +// The API also does not copy index metadata from the original index. +// Index metadata includes aliases, index lifecycle management phase +// definitions, and cross-cluster replication (CCR) follower information. +// For example, if you clone a CCR follower index, the resulting clone will not +// be a follower index. +// +// The clone API copies most index settings from the source index to the +// resulting index, with the exception of `index.number_of_replicas` and +// `index.auto_expand_replicas`. +// To set the number of replicas in the resulting index, configure these +// settings in the clone request. +// +// Cloning works as follows: +// +// * First, it creates a new target index with the same definition as the source +// index. +// * Then it hard-links segments from the source index into the target index. If +// the file system does not support hard-linking, all segments are copied into +// the new index, which is a much more time consuming process. +// * Finally, it recovers the target index as though it were a closed index +// which had just been re-opened. +// +// IMPORTANT: Indices can only be cloned if they meet the following +// requirements: +// +// * The index must be marked as read-only and have a cluster health status of +// green. +// * The target index must not exist. +// * The source index must have the same number of primary shards as the target +// index. +// * The node handling the clone process must have sufficient free disk space to +// accommodate a second copy of the existing index. +// +// The current write index on a data stream cannot be cloned. +// In order to clone the current write index, the data stream must first be +// rolled over so that a new write index is created and then the previous write +// index can be cloned. +// +// NOTE: Mappings cannot be specified in the `_clone` request. The mappings of +// the source index will be used for the target index. +// +// **Monitor the cloning process** +// +// The cloning process can be monitored with the cat recovery API or the cluster +// health API can be used to wait until all primary shards have been allocated +// by setting the `wait_for_status` parameter to `yellow`. +// +// The `_clone` API returns as soon as the target index has been added to the +// cluster state, before any shards have been allocated. +// At this point, all shards are in the state unassigned. +// If, for any reason, the target index can't be allocated, its primary shard +// will remain unassigned until it can be allocated on that node. +// +// Once the primary shard is allocated, it moves to state initializing, and the +// clone process begins. +// When the clone operation completes, the shard will become active. +// At that point, Elasticsearch will try to allocate any replicas and may decide +// to relocate the primary shard to another node. +// +// **Wait for active shards** +// +// Because the clone operation creates a new index to clone the shards to, the +// wait for active shards setting on index creation applies to the clone index +// action as well. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone +func (p *MethodIndices) Clone(index, target string) *indices_clone.Clone { + _clone := indices_clone.NewCloneFunc(p.tp) + return _clone(index, target) +} + +// Close an index. +// A closed index is blocked for read or write operations and does not allow all +// operations that opened indices allow. +// It is not possible to index documents or to search for documents in a closed +// index. +// Closed indices do not have to maintain internal data structures for indexing +// or searching documents, which results in a smaller overhead on the cluster. +// +// When opening or closing an index, the master node is responsible for +// restarting the index shards to reflect the new state of the index. +// The shards will then go through the normal recovery process. +// The data of opened and closed indices is automatically replicated by the +// cluster to ensure that enough shard copies are safely kept around at all +// times. +// +// You can open and close multiple indices. +// An error is thrown if the request explicitly refers to a missing index. +// This behaviour can be turned off using the `ignore_unavailable=true` +// parameter. +// +// By default, you must explicitly name the indices you are opening or closing. +// To open or close indices with `_all`, `*`, or other wildcard expressions, +// change the` action.destructive_requires_name` setting to `false`. This +// setting can also be changed with the cluster update settings API. +// +// Closed indices consume a significant amount of disk-space which can cause +// problems in managed environments. +// Closing indices can be turned off with the cluster settings API by setting +// `cluster.indices.close.enable` to `false`. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close +func (p *MethodIndices) Close(index string) *indices_close.Close { + _close := indices_close.NewCloseFunc(p.tp) + return _close(index) +} + +// Create an index. +// You can use the create index API to add a new index to an Elasticsearch +// cluster. +// When creating an index, you can specify the following: +// +// * Settings for the index. +// * Mappings for fields in the index. +// * Index aliases +// +// **Wait for active shards** +// +// By default, index creation will only return a response to the client when the +// primary copies of each shard have been started, or the request times out. +// The index creation response will indicate what happened. +// For example, `acknowledged` indicates whether the index was successfully +// created in the cluster, `while shards_acknowledged` indicates whether the +// requisite number of shard copies were started for each shard in the index +// before timing out. +// Note that it is still possible for either `acknowledged` or +// `shards_acknowledged` to be `false`, but for the index creation to be +// successful. +// These values simply indicate whether the operation completed before the +// timeout. +// If `acknowledged` is false, the request timed out before the cluster state +// was updated with the newly created index, but it probably will be created +// sometime soon. +// If `shards_acknowledged` is false, then the request timed out before the +// requisite number of shards were started (by default just the primaries), even +// if the cluster state was successfully updated to reflect the newly created +// index (that is to say, `acknowledged` is `true`). +// +// You can change the default of only waiting for the primary shards to start +// through the index setting `index.write.wait_for_active_shards`. +// Note that changing this setting will also affect the `wait_for_active_shards` +// value on all subsequent write operations. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create +func (p *MethodIndices) Create(index string) *indices_create.Create { + _create := indices_create.NewCreateFunc(p.tp) + return _create(index) +} + +// Create a data stream. +// +// You must have a matching index template with data stream enabled. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream +func (p *MethodIndices) CreateDataStream(name string) *indices_create_data_stream.CreateDataStream { + _createdatastream := indices_create_data_stream.NewCreateDataStreamFunc(p.tp) + return _createdatastream(name) +} + +// Create an index from a source index. +// +// Copy the mappings and settings from the source index to a destination index +// while allowing request settings and mappings to override the source values. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-from +func (p *MethodIndices) CreateFrom(source, dest string) *indices_create_from.CreateFrom { + _createfrom := indices_create_from.NewCreateFromFunc(p.tp) + return _createfrom(source, dest) +} + +// Get data stream stats. +// +// Get statistics for one or more data streams. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1 +func (p *MethodIndices) DataStreamsStats() *indices_data_streams_stats.DataStreamsStats { + _datastreamsstats := indices_data_streams_stats.NewDataStreamsStatsFunc(p.tp) + return _datastreamsstats() +} + +// Delete indices. +// Deleting an index deletes its documents, shards, and metadata. +// It does not delete related Kibana components, such as data views, +// visualizations, or dashboards. +// +// You cannot delete the current write index of a data stream. +// To delete the index, you must roll over the data stream so a new write index +// is created. +// You can then use the delete index API to delete the previous write index. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete +func (p *MethodIndices) Delete(index string) *indices_delete.Delete { + _delete := indices_delete.NewDeleteFunc(p.tp) + return _delete(index) +} + +// Delete an alias. +// Removes a data stream or index from an alias. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias +func (p *MethodIndices) DeleteAlias(index, name string) *indices_delete_alias.DeleteAlias { + _deletealias := indices_delete_alias.NewDeleteAliasFunc(p.tp) + return _deletealias(index, name) +} + +// Delete data stream lifecycles. +// Removes the data stream lifecycle from a data stream, rendering it not +// managed by the data stream lifecycle. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle +func (p *MethodIndices) DeleteDataLifecycle(name string) *indices_delete_data_lifecycle.DeleteDataLifecycle { + _deletedatalifecycle := indices_delete_data_lifecycle.NewDeleteDataLifecycleFunc(p.tp) + return _deletedatalifecycle(name) +} + +// Delete data streams. +// Deletes one or more data streams and their backing indices. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream +func (p *MethodIndices) DeleteDataStream(name string) *indices_delete_data_stream.DeleteDataStream { + _deletedatastream := indices_delete_data_stream.NewDeleteDataStreamFunc(p.tp) + return _deletedatastream(name) +} + +// Delete data stream options. +// Removes the data stream options from a data stream. +// https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html +func (p *MethodIndices) DeleteDataStreamOptions(name string) *indices_delete_data_stream_options.DeleteDataStreamOptions { + _deletedatastreamoptions := indices_delete_data_stream_options.NewDeleteDataStreamOptionsFunc(p.tp) + return _deletedatastreamoptions(name) +} + +// Delete an index template. +// The provided may contain multiple template names separated +// by a comma. If multiple template +// names are specified then there is no wildcard support and the provided names +// should match completely with +// existing templates. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template +func (p *MethodIndices) DeleteIndexTemplate(name string) *indices_delete_index_template.DeleteIndexTemplate { + _deleteindextemplate := indices_delete_index_template.NewDeleteIndexTemplateFunc(p.tp) + return _deleteindextemplate(name) +} + +// Delete a legacy index template. +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template +func (p *MethodIndices) DeleteTemplate(name string) *indices_delete_template.DeleteTemplate { + _deletetemplate := indices_delete_template.NewDeleteTemplateFunc(p.tp) + return _deletetemplate(name) +} + +// Analyze the index disk usage. +// Analyze the disk usage of each field of an index or data stream. +// This API might not support indices created in previous Elasticsearch +// versions. +// The result of a small index can be inaccurate as some parts of an index might +// not be analyzed by the API. +// +// NOTE: The total size of fields of the analyzed shards of the index in the +// response is usually smaller than the index `store_size` value because some +// small metadata files are ignored and some parts of data files might not be +// scanned by the API. +// Since stored fields are stored together in a compressed format, the sizes of +// stored fields are also estimates and can be inaccurate. +// The stored size of the `_id` field is likely underestimated while the +// `_source` field is overestimated. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage +func (p *MethodIndices) DiskUsage(index string) *indices_disk_usage.DiskUsage { + _diskusage := indices_disk_usage.NewDiskUsageFunc(p.tp) + return _diskusage(index) +} + +// Downsample an index. +// Aggregate a time series (TSDS) index and store pre-computed statistical +// summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric +// field grouped by a configured time interval. +// For example, a TSDS index that contains metrics sampled every 10 seconds can +// be downsampled to an hourly index. +// All documents within an hour interval are summarized and stored as a single +// document in the downsample index. +// +// NOTE: Only indices in a time series data stream are supported. +// Neither field nor document level security can be defined on the source index. +// The source index must be read only (`index.blocks.write: true`). +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample +func (p *MethodIndices) Downsample(index, targetindex string) *indices_downsample.Downsample { + _downsample := indices_downsample.NewDownsampleFunc(p.tp) + return _downsample(index, targetindex) +} + +// Check indices. +// Check if one or more indices, index aliases, or data streams exist. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists +func (p *MethodIndices) Exists(index string) *indices_exists.Exists { + _exists := indices_exists.NewExistsFunc(p.tp) + return _exists(index) +} + +// Check aliases. +// +// Check if one or more data stream or index aliases exist. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias +func (p *MethodIndices) ExistsAlias(name string) *indices_exists_alias.ExistsAlias { + _existsalias := indices_exists_alias.NewExistsAliasFunc(p.tp) + return _existsalias(name) +} + +// Check index templates. +// +// Check whether index templates exist. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template +func (p *MethodIndices) ExistsIndexTemplate(name string) *indices_exists_index_template.ExistsIndexTemplate { + _existsindextemplate := indices_exists_index_template.NewExistsIndexTemplateFunc(p.tp) + return _existsindextemplate(name) +} + +// Check existence of index templates. +// Get information about whether index templates exist. +// Index templates define settings, mappings, and aliases that can be applied +// automatically to new indices. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template +func (p *MethodIndices) ExistsTemplate(name string) *indices_exists_template.ExistsTemplate { + _existstemplate := indices_exists_template.NewExistsTemplateFunc(p.tp) + return _existstemplate(name) +} + +// Get the status for a data stream lifecycle. +// Get information about an index or data stream's current data stream lifecycle +// status, such as time since index creation, time since rollover, the lifecycle +// configuration managing the index, or any errors encountered during lifecycle +// execution. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle +func (p *MethodIndices) ExplainDataLifecycle(index string) *indices_explain_data_lifecycle.ExplainDataLifecycle { + _explaindatalifecycle := indices_explain_data_lifecycle.NewExplainDataLifecycleFunc(p.tp) + return _explaindatalifecycle(index) +} + +// Get field usage stats. +// Get field usage information for each shard and field of an index. +// Field usage statistics are automatically captured when queries are running on +// a cluster. +// A shard-level search request that accesses a given field, even if multiple +// times during that request, is counted as a single use. +// +// The response body reports the per-shard usage count of the data structures +// that back the fields in the index. +// A given request will increment each count by a maximum value of 1, even if +// the request accesses the same field multiple times. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats +func (p *MethodIndices) FieldUsageStats(index string) *indices_field_usage_stats.FieldUsageStats { + _fieldusagestats := indices_field_usage_stats.NewFieldUsageStatsFunc(p.tp) + return _fieldusagestats(index) +} + +// Flush data streams or indices. +// Flushing a data stream or index is the process of making sure that any data +// that is currently only stored in the transaction log is also permanently +// stored in the Lucene index. +// When restarting, Elasticsearch replays any unflushed operations from the +// transaction log into the Lucene index to bring it back into the state that it +// was in before the restart. +// Elasticsearch automatically triggers flushes as needed, using heuristics that +// trade off the size of the unflushed transaction log against the cost of +// performing each flush. +// +// After each operation has been flushed it is permanently stored in the Lucene +// index. +// This may mean that there is no need to maintain an additional copy of it in +// the transaction log. +// The transaction log is made up of multiple files, called generations, and +// Elasticsearch will delete any generation files when they are no longer +// needed, freeing up disk space. +// +// It is also possible to trigger a flush on one or more indices using the flush +// API, although it is rare for users to need to call this API directly. +// If you call the flush API after indexing some documents then a successful +// response indicates that Elasticsearch has flushed all the documents that were +// indexed before the flush API was called. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush +func (p *MethodIndices) Flush() *indices_flush.Flush { + _flush := indices_flush.NewFlushFunc(p.tp) + return _flush() +} + +// Force a merge. +// Perform the force merge operation on the shards of one or more indices. +// For data streams, the API forces a merge on the shards of the stream's +// backing indices. +// +// Merging reduces the number of segments in each shard by merging some of them +// together and also frees up the space used by deleted documents. +// Merging normally happens automatically, but sometimes it is useful to trigger +// a merge manually. +// +// WARNING: We recommend force merging only a read-only index (meaning the index +// is no longer receiving writes). +// When documents are updated or deleted, the old version is not immediately +// removed but instead soft-deleted and marked with a "tombstone". +// These soft-deleted documents are automatically cleaned up during regular +// segment merges. +// But force merge can cause very large (greater than 5 GB) segments to be +// produced, which are not eligible for regular merges. +// So the number of soft-deleted documents can then grow rapidly, resulting in +// higher disk usage and worse search performance. +// If you regularly force merge an index receiving writes, this can also make +// snapshots more expensive, since the new documents can't be backed up +// incrementally. +// +// **Blocks during a force merge** +// +// Calls to this API block until the merge is complete (unless request contains +// `wait_for_completion=false`). +// If the client connection is lost before completion then the force merge +// process will continue in the background. +// Any new requests to force merge the same indices will also block until the +// ongoing force merge is complete. +// +// **Running force merge asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch performs +// some preflight checks, launches the request, and returns a task you can use +// to get the status of the task. +// However, you can not cancel this task as the force merge task is not +// cancelable. +// Elasticsearch creates a record of this task as a document at +// `_tasks/`. +// When you are done with a task, you should delete the task document so +// Elasticsearch can reclaim the space. +// +// **Force merging multiple indices** +// +// You can force merge multiple indices with a single request by targeting: +// +// * One or more data streams that contain multiple backing indices +// * Multiple indices +// * One or more aliases +// * All data streams and indices in a cluster +// +// Each targeted shard is force-merged separately using the force_merge +// threadpool. +// By default each node only has a single `force_merge` thread which means that +// the shards on that node are force-merged one at a time. +// If you expand the `force_merge` threadpool on a node then it will force merge +// its shards in parallel +// +// Force merge makes the storage for the shard being merged temporarily +// increase, as it may require free space up to triple its size in case +// `max_num_segments parameter` is set to `1`, to rewrite all segments into a +// new one. +// +// **Data streams and time-based indices** +// +// Force-merging is useful for managing a data stream's older backing indices +// and other time-based indices, particularly after a rollover. +// In these cases, each index only receives indexing traffic for a certain +// period of time. +// Once an index receive no more writes, its shards can be force-merged to a +// single segment. +// This can be a good idea because single-segment shards can sometimes use +// simpler and more efficient data structures to perform searches. +// For example: +// +// ``` +// POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +// ``` +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge +func (p *MethodIndices) Forcemerge() *indices_forcemerge.Forcemerge { + _forcemerge := indices_forcemerge.NewForcemergeFunc(p.tp) + return _forcemerge() +} + +// Get index information. +// Get information about one or more indices. For data streams, the API returns +// information about the +// stream’s backing indices. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get +func (p *MethodIndices) Get(index string) *indices_get.Get { + _get := indices_get.NewGetFunc(p.tp) + return _get(index) +} + +// Get aliases. +// Retrieves information for one or more data stream or index aliases. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias +func (p *MethodIndices) GetAlias() *indices_get_alias.GetAlias { + _getalias := indices_get_alias.NewGetAliasFunc(p.tp) + return _getalias() +} + +// Get data stream lifecycles. +// +// Get the data stream lifecycle configuration of one or more data streams. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle +func (p *MethodIndices) GetDataLifecycle(name string) *indices_get_data_lifecycle.GetDataLifecycle { + _getdatalifecycle := indices_get_data_lifecycle.NewGetDataLifecycleFunc(p.tp) + return _getdatalifecycle(name) +} + +// Get data stream lifecycle stats. +// Get statistics about the data streams that are managed by a data stream +// lifecycle. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats +func (p *MethodIndices) GetDataLifecycleStats() *indices_get_data_lifecycle_stats.GetDataLifecycleStats { + _getdatalifecyclestats := indices_get_data_lifecycle_stats.NewGetDataLifecycleStatsFunc(p.tp) + return _getdatalifecyclestats() +} + +// Get data streams. +// +// Get information about one or more data streams. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream +func (p *MethodIndices) GetDataStream() *indices_get_data_stream.GetDataStream { + _getdatastream := indices_get_data_stream.NewGetDataStreamFunc(p.tp) + return _getdatastream() +} + +// Get data stream options. +// +// Get the data stream options configuration of one or more data streams. +// https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html +func (p *MethodIndices) GetDataStreamOptions(name string) *indices_get_data_stream_options.GetDataStreamOptions { + _getdatastreamoptions := indices_get_data_stream_options.NewGetDataStreamOptionsFunc(p.tp) + return _getdatastreamoptions(name) +} + +// Get data stream settings. +// +// Get setting information for one or more data streams. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-settings +func (p *MethodIndices) GetDataStreamSettings(name string) *indices_get_data_stream_settings.GetDataStreamSettings { + _getdatastreamsettings := indices_get_data_stream_settings.NewGetDataStreamSettingsFunc(p.tp) + return _getdatastreamsettings(name) +} + +// Get mapping definitions. +// Retrieves mapping definitions for one or more fields. +// For data streams, the API retrieves field mappings for the stream’s backing +// indices. +// +// This API is useful if you don't need a complete mapping or if an index +// mapping contains a large number of fields. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping +func (p *MethodIndices) GetFieldMapping(fields string) *indices_get_field_mapping.GetFieldMapping { + _getfieldmapping := indices_get_field_mapping.NewGetFieldMappingFunc(p.tp) + return _getfieldmapping(fields) +} + +// Get index templates. +// Get information about one or more index templates. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template +func (p *MethodIndices) GetIndexTemplate() *indices_get_index_template.GetIndexTemplate { + _getindextemplate := indices_get_index_template.NewGetIndexTemplateFunc(p.tp) + return _getindextemplate() +} + +// Get mapping definitions. +// For data streams, the API retrieves mappings for the stream’s backing +// indices. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping +func (p *MethodIndices) GetMapping() *indices_get_mapping.GetMapping { + _getmapping := indices_get_mapping.NewGetMappingFunc(p.tp) + return _getmapping() +} + +// Get the migration reindexing status. +// +// Get the status of a migration reindex attempt for a data stream or index. +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-migration +func (p *MethodIndices) GetMigrateReindexStatus(index string) *indices_get_migrate_reindex_status.GetMigrateReindexStatus { + _getmigratereindexstatus := indices_get_migrate_reindex_status.NewGetMigrateReindexStatusFunc(p.tp) + return _getmigratereindexstatus(index) +} + +// Get index settings. +// Get setting information for one or more indices. +// For data streams, it returns setting information for the stream's backing +// indices. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings +func (p *MethodIndices) GetSettings() *indices_get_settings.GetSettings { + _getsettings := indices_get_settings.NewGetSettingsFunc(p.tp) + return _getsettings() +} + +// Get legacy index templates. +// Get information about one or more index templates. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template +func (p *MethodIndices) GetTemplate() *indices_get_template.GetTemplate { + _gettemplate := indices_get_template.NewGetTemplateFunc(p.tp) + return _gettemplate() +} + +// Reindex legacy backing indices. +// +// Reindex all legacy backing indices for a data stream. +// This operation occurs in a persistent task. +// The persistent task ID is returned immediately and the reindexing work is +// completed in that task. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-reindex +func (p *MethodIndices) MigrateReindex() *indices_migrate_reindex.MigrateReindex { + _migratereindex := indices_migrate_reindex.NewMigrateReindexFunc(p.tp) + return _migratereindex() +} + +// Convert an index alias to a data stream. +// Converts an index alias to a data stream. +// You must have a matching index template that is data stream enabled. +// The alias must meet the following criteria: +// The alias must have a write index; +// All indices for the alias must have a `@timestamp` field mapping of a `date` +// or `date_nanos` field type; +// The alias must not have any filters; +// The alias must not use custom routing. +// If successful, the request removes the alias and creates a data stream with +// the same name. +// The indices for the alias become hidden backing indices for the stream. +// The write index for the alias becomes the write index for the stream. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-to-data-stream +func (p *MethodIndices) MigrateToDataStream(name string) *indices_migrate_to_data_stream.MigrateToDataStream { + _migratetodatastream := indices_migrate_to_data_stream.NewMigrateToDataStreamFunc(p.tp) + return _migratetodatastream(name) +} + +// Update data streams. +// Performs one or more data stream modification actions in a single atomic +// operation. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-modify-data-stream +func (p *MethodIndices) ModifyDataStream() *indices_modify_data_stream.ModifyDataStream { + _modifydatastream := indices_modify_data_stream.NewModifyDataStreamFunc(p.tp) + return _modifydatastream() +} + +// Open a closed index. +// For data streams, the API opens any closed backing indices. +// +// A closed index is blocked for read/write operations and does not allow all +// operations that opened indices allow. +// It is not possible to index documents or to search for documents in a closed +// index. +// This allows closed indices to not have to maintain internal data structures +// for indexing or searching documents, resulting in a smaller overhead on the +// cluster. +// +// When opening or closing an index, the master is responsible for restarting +// the index shards to reflect the new state of the index. +// The shards will then go through the normal recovery process. +// The data of opened or closed indices is automatically replicated by the +// cluster to ensure that enough shard copies are safely kept around at all +// times. +// +// You can open and close multiple indices. +// An error is thrown if the request explicitly refers to a missing index. +// This behavior can be turned off by using the `ignore_unavailable=true` +// parameter. +// +// By default, you must explicitly name the indices you are opening or closing. +// To open or close indices with `_all`, `*`, or other wildcard expressions, +// change the `action.destructive_requires_name` setting to `false`. +// This setting can also be changed with the cluster update settings API. +// +// Closed indices consume a significant amount of disk-space which can cause +// problems in managed environments. +// Closing indices can be turned off with the cluster settings API by setting +// `cluster.indices.close.enable` to `false`. +// +// Because opening or closing an index allocates its shards, the +// `wait_for_active_shards` setting on index creation applies to the `_open` and +// `_close` index actions as well. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open +func (p *MethodIndices) Open(index string) *indices_open.Open { + _open := indices_open.NewOpenFunc(p.tp) + return _open(index) +} + +// Promote a data stream. +// Promote a data stream from a replicated data stream managed by cross-cluster +// replication (CCR) to a regular data stream. +// +// With CCR auto following, a data stream from a remote cluster can be +// replicated to the local cluster. +// These data streams can't be rolled over in the local cluster. +// These replicated data streams roll over only if the upstream data stream +// rolls over. +// In the event that the remote cluster is no longer available, the data stream +// in the local cluster can be promoted to a regular data stream, which allows +// these data streams to be rolled over in the local cluster. +// +// NOTE: When promoting a data stream, ensure the local cluster has a data +// stream enabled index template that matches the data stream. +// If this is missing, the data stream will not be able to roll over until a +// matching index template is created. +// This will affect the lifecycle management of the data stream and interfere +// with the data stream size and retention. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-promote-data-stream +func (p *MethodIndices) PromoteDataStream(name string) *indices_promote_data_stream.PromoteDataStream { + _promotedatastream := indices_promote_data_stream.NewPromoteDataStreamFunc(p.tp) + return _promotedatastream(name) +} + +// Create or update an alias. +// Adds a data stream or index to an alias. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-alias +func (p *MethodIndices) PutAlias(index, name string) *indices_put_alias.PutAlias { + _putalias := indices_put_alias.NewPutAliasFunc(p.tp) + return _putalias(index, name) +} + +// Update data stream lifecycles. +// Update the data stream lifecycle of the specified data streams. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle +func (p *MethodIndices) PutDataLifecycle(name string) *indices_put_data_lifecycle.PutDataLifecycle { + _putdatalifecycle := indices_put_data_lifecycle.NewPutDataLifecycleFunc(p.tp) + return _putdatalifecycle(name) +} + +// Update data stream options. +// Update the data stream options of the specified data streams. +// https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html +func (p *MethodIndices) PutDataStreamOptions(name string) *indices_put_data_stream_options.PutDataStreamOptions { + _putdatastreamoptions := indices_put_data_stream_options.NewPutDataStreamOptionsFunc(p.tp) + return _putdatastreamoptions(name) +} + +// Update data stream settings. +// +// This API can be used to override settings on specific data streams. These +// overrides will take precedence over what +// is specified in the template that the data stream matches. To prevent your +// data stream from getting into an invalid state, +// only certain settings are allowed. If possible, the setting change is applied +// to all +// backing indices. Otherwise, it will be applied when the data stream is next +// rolled over. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-settings +func (p *MethodIndices) PutDataStreamSettings(name string) *indices_put_data_stream_settings.PutDataStreamSettings { + _putdatastreamsettings := indices_put_data_stream_settings.NewPutDataStreamSettingsFunc(p.tp) + return _putdatastreamsettings(name) +} + +// Create or update an index template. +// Index templates define settings, mappings, and aliases that can be applied +// automatically to new indices. +// +// Elasticsearch applies templates to new indices based on an wildcard pattern +// that matches the index name. +// Index templates are applied during data stream or index creation. +// For data streams, these settings and mappings are applied when the stream's +// backing indices are created. +// Settings and mappings specified in a create index API request override any +// settings or mappings specified in an index template. +// Changes to index templates do not affect existing indices, including the +// existing backing indices of a data stream. +// +// You can use C-style `/* *\/` block comments in index templates. +// You can include comments anywhere in the request body, except before the +// opening curly bracket. +// +// **Multiple matching templates** +// +// If multiple index templates match the name of a new index or data stream, the +// template with the highest priority is used. +// +// Multiple templates with overlapping index patterns at the same priority are +// not allowed and an error will be thrown when attempting to create a template +// matching an existing index template at identical priorities. +// +// **Composing aliases, mappings, and settings** +// +// When multiple component templates are specified in the `composed_of` field +// for an index template, they are merged in the order specified, meaning that +// later component templates override earlier component templates. +// Any mappings, settings, or aliases from the parent index template are merged +// in next. +// Finally, any configuration on the index request itself is merged. +// Mapping definitions are merged recursively, which means that later mapping +// components can introduce new field mappings and update the mapping +// configuration. +// If a field mapping is already contained in an earlier component, its +// definition will be completely overwritten by the later one. +// This recursive merging strategy applies not only to field mappings, but also +// root options like `dynamic_templates` and `meta`. +// If an earlier component contains a `dynamic_templates` block, then by default +// new `dynamic_templates` entries are appended onto the end. +// If an entry already exists with the same key, then it is overwritten by the +// new definition. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template +func (p *MethodIndices) PutIndexTemplate(name string) *indices_put_index_template.PutIndexTemplate { + _putindextemplate := indices_put_index_template.NewPutIndexTemplateFunc(p.tp) + return _putindextemplate(name) +} + +// Update field mappings. +// Add new fields to an existing data stream or index. +// You can use the update mapping API to: +// +// - Add a new field to an existing index +// - Update mappings for multiple indices in a single request +// - Add new properties to an object field +// - Enable multi-fields for an existing field +// - Update supported mapping parameters +// - Change a field's mapping using reindexing +// - Rename a field using a field alias +// +// Learn how to use the update mapping API with practical examples in the +// [Update mapping API +// examples](https://www.elastic.co/docs//manage-data/data-store/mapping/update-mappings-examples) +// guide. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping +func (p *MethodIndices) PutMapping(index string) *indices_put_mapping.PutMapping { + _putmapping := indices_put_mapping.NewPutMappingFunc(p.tp) + return _putmapping(index) +} + +// Update index settings. +// Changes dynamic index settings in real time. +// For data streams, index setting changes are applied to all backing indices by +// default. +// +// To revert a setting to the default value, use a null value. +// The list of per-index settings that can be updated dynamically on live +// indices can be found in index settings documentation. +// To preserve existing settings from being updated, set the `preserve_existing` +// parameter to `true`. +// +// For performance optimization during bulk indexing, you can disable the +// refresh interval. +// Refer to [disable refresh +// interval](https://www.elastic.co/docs/deploy-manage/production-guidance/optimize-performance/indexing-speed#disable-refresh-interval) +// for an example. +// There are multiple valid ways to represent index settings in the request +// body. You can specify only the setting, for example: +// +// ``` +// +// { +// "number_of_replicas": 1 +// } +// +// ``` +// +// Or you can use an `index` setting object: +// ``` +// +// { +// "index": { +// "number_of_replicas": 1 +// } +// } +// +// ``` +// +// Or you can use dot annotation: +// ``` +// +// { +// "index.number_of_replicas": 1 +// } +// +// ``` +// +// Or you can embed any of the aforementioned options in a `settings` object. +// For example: +// +// ``` +// +// { +// "settings": { +// "index": { +// "number_of_replicas": 1 +// } +// } +// } +// +// ``` +// +// NOTE: You can only define new analyzers on closed indices. +// To add an analyzer, you must close the index, define the analyzer, and reopen +// the index. +// You cannot close the write index of a data stream. +// To update the analyzer for a data stream's write index and future backing +// indices, update the analyzer in the index template used by the stream. +// Then roll over the data stream to apply the new analyzer to the stream's +// write index and future backing indices. +// This affects searches and any new data added to the stream after the +// rollover. +// However, it does not affect the data stream's backing indices or their +// existing data. +// To change the analyzer for existing backing indices, you must create a new +// data stream and reindex your data into it. +// Refer to [updating analyzers on existing +// indices](https://www.elastic.co/docs/manage-data/data-store/text-analysis/specify-an-analyzer#update-analyzers-on-existing-indices) +// for step-by-step examples. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings +func (p *MethodIndices) PutSettings() *indices_put_settings.PutSettings { + _putsettings := indices_put_settings.NewPutSettingsFunc(p.tp) + return _putsettings() +} + +// Create or update a legacy index template. +// Index templates define settings, mappings, and aliases that can be applied +// automatically to new indices. +// Elasticsearch applies templates to new indices based on an index pattern that +// matches the index name. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +// +// Composable templates always take precedence over legacy templates. +// If no composable template matches a new index, matching legacy templates are +// applied according to their order. +// +// Index templates are only applied during index creation. +// Changes to index templates do not affect existing indices. +// Settings and mappings specified in create index API requests override any +// settings or mappings specified in an index template. +// +// You can use C-style `/* *\/` block comments in index templates. +// You can include comments anywhere in the request body, except before the +// opening curly bracket. +// +// **Indices matching multiple templates** +// +// Multiple index templates can potentially match an index, in this case, both +// the settings and mappings are merged into the final configuration of the +// index. +// The order of the merging can be controlled using the order parameter, with +// lower order being applied first, and higher orders overriding them. +// NOTE: Multiple matching templates with the same order value will result in a +// non-deterministic merging order. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template +func (p *MethodIndices) PutTemplate(name string) *indices_put_template.PutTemplate { + _puttemplate := indices_put_template.NewPutTemplateFunc(p.tp) + return _puttemplate(name) +} + +// Get index recovery information. +// Get information about ongoing and completed shard recoveries for one or more +// indices. +// For data streams, the API returns information for the stream's backing +// indices. +// +// All recoveries, whether ongoing or complete, are kept in the cluster state +// and may be reported on at any time. +// +// Shard recovery is the process of initializing a shard copy, such as restoring +// a primary shard from a snapshot or creating a replica shard from a primary +// shard. +// When a shard recovery completes, the recovered shard is available for search +// and indexing. +// +// Recovery automatically occurs during the following processes: +// +// * When creating an index for the first time. +// * When a node rejoins the cluster and starts up any missing primary shard +// copies using the data that it holds in its data path. +// * Creation of new replica shard copies from the primary. +// * Relocation of a shard copy to a different node in the same cluster. +// * A snapshot restore operation. +// * A clone, shrink, or split operation. +// +// You can determine the cause of a shard recovery using the recovery or cat +// recovery APIs. +// +// The index recovery API reports information about completed recoveries only +// for shard copies that currently exist in the cluster. +// It only reports the last recovery for each shard copy and does not report +// historical information about earlier recoveries, nor does it report +// information about the recoveries of shard copies that no longer exist. +// This means that if a shard copy completes a recovery and then Elasticsearch +// relocates it onto a different node then the information about the original +// recovery will not be shown in the recovery API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery +func (p *MethodIndices) Recovery() *indices_recovery.Recovery { + _recovery := indices_recovery.NewRecoveryFunc(p.tp) + return _recovery() +} + +// Refresh an index. +// A refresh makes recent operations performed on one or more indices available +// for search. +// For data streams, the API runs the refresh operation on the stream’s backing +// indices. +// +// By default, Elasticsearch periodically refreshes indices every second, but +// only on indices that have received one search request or more in the last 30 +// seconds. +// You can change this default interval with the `index.refresh_interval` +// setting. +// +// Refresh requests are synchronous and do not return a response until the +// refresh operation completes. +// +// Refreshes are resource-intensive. +// To ensure good cluster performance, it's recommended to wait for +// Elasticsearch's periodic refresh rather than performing an explicit refresh +// when possible. +// +// If your application workflow indexes documents and then runs a search to +// retrieve the indexed document, it's recommended to use the index API's +// `refresh=wait_for` query parameter option. +// This option ensures the indexing operation waits for a periodic refresh +// before running the search. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh +func (p *MethodIndices) Refresh() *indices_refresh.Refresh { + _refresh := indices_refresh.NewRefreshFunc(p.tp) + return _refresh() +} + +// Reload search analyzers. +// Reload an index's search analyzers and their resources. +// For data streams, the API reloads search analyzers and resources for the +// stream's backing indices. +// +// IMPORTANT: After reloading the search analyzers you should clear the request +// cache to make sure it doesn't contain responses derived from the previous +// versions of the analyzer. +// +// You can use the reload search analyzers API to pick up changes to synonym +// files used in the `synonym_graph` or `synonym` token filter of a search +// analyzer. +// To be eligible, the token filter must have an `updateable` flag of `true` and +// only be used in search analyzers. +// +// NOTE: This API does not perform a reload for each shard of an index. +// Instead, it performs a reload for each node containing index shards. +// As a result, the total shard count returned by the API can differ from the +// number of index shards. +// Because reloading affects every node with an index shard, it is important to +// update the synonym file on every data node in the cluster--including nodes +// that don't contain a shard replica--before using this API. +// This ensures the synonym file is updated everywhere in the cluster in case +// shards are relocated in the future. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers +func (p *MethodIndices) ReloadSearchAnalyzers(index string) *indices_reload_search_analyzers.ReloadSearchAnalyzers { + _reloadsearchanalyzers := indices_reload_search_analyzers.NewReloadSearchAnalyzersFunc(p.tp) + return _reloadsearchanalyzers(index) +} + +// Remove an index block. +// +// Remove an index block from an index. +// Index blocks limit the operations allowed on an index by blocking specific +// operation types. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-remove-block +func (p *MethodIndices) RemoveBlock(index, block string) *indices_remove_block.RemoveBlock { + _removeblock := indices_remove_block.NewRemoveBlockFunc(p.tp) + return _removeblock(index, block) +} + +// Resolve the cluster. +// +// Resolve the specified index expressions to return information about each +// cluster, including the local "querying" cluster, if included. +// If no index expression is provided, the API will return information about all +// the remote clusters that are configured on the querying cluster. +// +// This endpoint is useful before doing a cross-cluster search in order to +// determine which remote clusters should be included in a search. +// +// You use the same index expression with this endpoint as you would for +// cross-cluster search. +// Index and cluster exclusions are also supported with this endpoint. +// +// For each cluster in the index expression, information is returned about: +// +// * Whether the querying ("local") cluster is currently connected to each +// remote cluster specified in the index expression. Note that this endpoint +// actively attempts to contact the remote clusters, unlike the `remote/info` +// endpoint. +// * Whether each remote cluster is configured with `skip_unavailable` as `true` +// or `false`. +// * Whether there are any indices, aliases, or data streams on that cluster +// that match the index expression. +// * Whether the search is likely to have errors returned when you do the +// cross-cluster search (including any authorization errors if you do not have +// permission to query the index). +// * Cluster version information, including the Elasticsearch server version. +// +// For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns +// information about the local cluster and all remotely configured clusters that +// start with the alias `cluster*`. +// Each cluster returns information about whether it has any indices, aliases or +// data streams that match `my-index-*`. +// +// ## Note on backwards compatibility +// The ability to query without an index expression was added in version 8.18, +// so when +// querying remote clusters older than that, the local cluster will send the +// index +// expression `dummy*` to those remote clusters. Thus, if an errors occur, you +// may see a reference +// to that index expression even though you didn't request it. If it causes a +// problem, you can +// instead include an index expression like `*:*` to bypass the issue. +// +// ## Advantages of using this endpoint before a cross-cluster search +// +// You may want to exclude a cluster or index from a search when: +// +// * A remote cluster is not currently connected and is configured with +// `skip_unavailable=false`. Running a cross-cluster search under those +// conditions will cause the entire search to fail. +// * A cluster has no matching indices, aliases or data streams for the index +// expression (or your user does not have permissions to search them). For +// example, suppose your index expression is `logs*,remote1:logs*` and the +// remote1 cluster has no indices, aliases or data streams that match `logs*`. +// In that case, that cluster will return no results from that cluster if you +// include it in a cross-cluster search. +// * The index expression (combined with any query parameters you specify) will +// likely cause an exception to be thrown when you do the search. In these +// cases, the "error" field in the `_resolve/cluster` response will be present. +// (This is also where security/permission errors will be shown.) +// * A remote cluster is an older version that does not support the feature you +// want to use in your search. +// +// ## Test availability of remote clusters +// +// The `remote/info` endpoint is commonly used to test whether the "local" +// cluster (the cluster being queried) is connected to its remote clusters, but +// it does not necessarily reflect whether the remote cluster is available or +// not. +// The remote cluster may be available, while the local cluster is not currently +// connected to it. +// +// You can use the `_resolve/cluster` API to attempt to reconnect to remote +// clusters. +// For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. +// The `connected` field in the response will indicate whether it was +// successful. +// If a connection was (re-)established, this will also cause the `remote/info` +// endpoint to now indicate a connected status. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster +func (p *MethodIndices) ResolveCluster() *indices_resolve_cluster.ResolveCluster { + _resolvecluster := indices_resolve_cluster.NewResolveClusterFunc(p.tp) + return _resolvecluster() +} + +// Resolve indices. +// Resolve the names and/or index patterns for indices, aliases, and data +// streams. +// Multiple patterns and remote clusters are supported. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index +func (p *MethodIndices) ResolveIndex(name string) *indices_resolve_index.ResolveIndex { + _resolveindex := indices_resolve_index.NewResolveIndexFunc(p.tp) + return _resolveindex(name) +} + +// Roll over to a new index. +// TIP: It is recommended to use the index lifecycle rollover action to automate +// rollovers. +// +// The rollover API creates a new index for a data stream or index alias. +// The API behavior depends on the rollover target. +// +// **Roll over a data stream** +// +// If you roll over a data stream, the API creates a new write index for the +// stream. +// The stream's previous write index becomes a regular backing index. +// A rollover also increments the data stream's generation. +// +// **Roll over an index alias with a write index** +// +// TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a +// write index to manage time series data. +// Data streams replace this functionality, require less maintenance, and +// automatically integrate with data tiers. +// +// If an index alias points to multiple indices, one of the indices must be a +// write index. +// The rollover API creates a new write index for the alias with +// `is_write_index` set to `true`. +// The API also `sets is_write_index` to `false` for the previous write index. +// +// **Roll over an index alias with one index** +// +// If you roll over an index alias that points to only one index, the API +// creates a new index for the alias and removes the original index from the +// alias. +// +// NOTE: A rollover creates a new index and is subject to the +// `wait_for_active_shards` setting. +// +// **Increment index names for an alias** +// +// When you roll over an index alias, you can specify a name for the new index. +// If you don't specify a name and the current index ends with `-` and a number, +// such as `my-index-000001` or `my-index-3`, the new index name increments that +// number. +// For example, if you roll over an alias with a current index of +// `my-index-000001`, the rollover creates a new index named `my-index-000002`. +// This number is always six characters and zero-padded, regardless of the +// previous index's name. +// +// If you use an index alias for time series data, you can use date math in the +// index name to track the rollover date. +// For example, you can create an alias that points to an index named +// ``. +// If you create the index on May 6, 2099, the index's name is +// `my-index-2099.05.06-000001`. +// If you roll over the alias on May 7, 2099, the new index's name is +// `my-index-2099.05.07-000002`. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover +func (p *MethodIndices) Rollover(alias string) *indices_rollover.Rollover { + _rollover := indices_rollover.NewRolloverFunc(p.tp) + return _rollover(alias) +} + +// Get index segments. +// Get low-level information about the Lucene segments in index shards. +// For data streams, the API returns information about the stream's backing +// indices. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments +func (p *MethodIndices) Segments() *indices_segments.Segments { + _segments := indices_segments.NewSegmentsFunc(p.tp) + return _segments() +} + +// Get index shard stores. +// Get store information about replica shards in one or more indices. +// For data streams, the API retrieves store information for the stream's +// backing indices. +// +// The index shard stores API returns the following information: +// +// * The node on which each replica shard exists. +// * The allocation ID for each replica shard. +// * A unique ID for each replica shard. +// * Any errors encountered while opening the shard index or from an earlier +// failure. +// +// By default, the API returns store information only for primary shards that +// are unassigned or have one or more unassigned replica shards. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores +func (p *MethodIndices) ShardStores() *indices_shard_stores.ShardStores { + _shardstores := indices_shard_stores.NewShardStoresFunc(p.tp) + return _shardstores() +} + +// Shrink an index. +// Shrink an index into a new index with fewer primary shards. +// +// Before you can shrink an index: +// +// * The index must be read-only. +// * A copy of every shard in the index must reside on the same node. +// * The index must have a green health status. +// +// To make shard allocation easier, we recommend you also remove the index's +// replica shards. +// You can later re-add replica shards as part of the shrink operation. +// +// The requested number of primary shards in the target index must be a factor +// of the number of shards in the source index. +// For example an index with 8 primary shards can be shrunk into 4, 2 or 1 +// primary shards or an index with 15 primary shards can be shrunk into 5, 3 or +// 1. +// If the number of shards in the index is a prime number it can only be shrunk +// into a single primary shard +// +// Before shrinking, a (primary or replica) copy of every shard in the index +// +// must be present on the same node. +// +// The current write index on a data stream cannot be shrunk. In order to shrink +// the current write index, the data stream must first be rolled over so that a +// new write index is created and then the previous write index can be shrunk. +// +// A shrink operation: +// +// * Creates a new target index with the same definition as the source index, +// but with a smaller number of primary shards. +// * Hard-links segments from the source index into the target index. If the +// file system does not support hard-linking, then all segments are copied into +// the new index, which is a much more time consuming process. Also if using +// multiple data paths, shards on different data paths require a full copy of +// segment files if they are not on the same disk since hardlinks do not work +// across disks. +// * Recovers the target index as though it were a closed index which had just +// been re-opened. Recovers shards to the +// `.routing.allocation.initial_recovery._id` index setting. +// +// IMPORTANT: Indices can only be shrunk if they satisfy the following +// requirements: +// +// * The target index must not exist. +// * The source index must have more primary shards than the target index. +// * The number of primary shards in the target index must be a factor of the +// number of primary shards in the source index. The source index must have more +// primary shards than the target index. +// * The index must not contain more than 2,147,483,519 documents in total +// across all shards that will be shrunk into a single shard on the target index +// as this is the maximum number of docs that can fit into a single shard. +// * The node handling the shrink process must have sufficient free disk space +// to accommodate a second copy of the existing index. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink +func (p *MethodIndices) Shrink(index, target string) *indices_shrink.Shrink { + _shrink := indices_shrink.NewShrinkFunc(p.tp) + return _shrink(index, target) +} + +// Simulate an index. +// Get the index configuration that would be applied to the specified index from +// an existing index template. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template +func (p *MethodIndices) SimulateIndexTemplate(name string) *indices_simulate_index_template.SimulateIndexTemplate { + _simulateindextemplate := indices_simulate_index_template.NewSimulateIndexTemplateFunc(p.tp) + return _simulateindextemplate(name) +} + +// Simulate an index template. +// Get the index configuration that would be applied by a particular index +// template. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template +func (p *MethodIndices) SimulateTemplate() *indices_simulate_template.SimulateTemplate { + _simulatetemplate := indices_simulate_template.NewSimulateTemplateFunc(p.tp) + return _simulatetemplate() +} + +// Split an index. +// Split an index into a new index with more primary shards. +// * Before you can split an index: +// +// * The index must be read-only. +// * The cluster health status must be green. +// +// You can do make an index read-only with the following request using the add +// index block API: +// +// ``` +// PUT /my_source_index/_block/write +// ``` +// +// The current write index on a data stream cannot be split. +// In order to split the current write index, the data stream must first be +// rolled over so that a new write index is created and then the previous write +// index can be split. +// +// The number of times the index can be split (and the number of shards that +// each original shard can be split into) is determined by the +// `index.number_of_routing_shards` setting. +// The number of routing shards specifies the hashing space that is used +// internally to distribute documents across shards with consistent hashing. +// For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x +// 2 x 3) could be split by a factor of 2 or 3. +// +// A split operation: +// +// * Creates a new target index with the same definition as the source index, +// but with a larger number of primary shards. +// * Hard-links segments from the source index into the target index. If the +// file system doesn't support hard-linking, all segments are copied into the +// new index, which is a much more time consuming process. +// * Hashes all documents again, after low level files are created, to delete +// documents that belong to a different shard. +// * Recovers the target index as though it were a closed index which had just +// been re-opened. +// +// IMPORTANT: Indices can only be split if they satisfy the following +// requirements: +// +// * The target index must not exist. +// * The source index must have fewer primary shards than the target index. +// * The number of primary shards in the target index must be a multiple of the +// number of primary shards in the source index. +// * The node handling the split process must have sufficient free disk space to +// accommodate a second copy of the existing index. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split +func (p *MethodIndices) Split(index, target string) *indices_split.Split { + _split := indices_split.NewSplitFunc(p.tp) + return _split(index, target) +} + +// Get index statistics. +// For data streams, the API retrieves statistics for the stream's backing +// indices. +// +// By default, the returned statistics are index-level with `primaries` and +// `total` aggregations. +// `primaries` are the values for only the primary shards. +// `total` are the accumulated values for both primary and replica shards. +// +// To get shard-level statistics, set the `level` parameter to `shards`. +// +// NOTE: When moving to another node, the shard-level statistics for a shard are +// cleared. +// Although the shard is no longer part of the node, that node retains any +// node-level statistics to which the shard contributed. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats +func (p *MethodIndices) Stats() *indices_stats.Stats { + _stats := indices_stats.NewStatsFunc(p.tp) + return _stats() +} + +// Create or update an alias. +// Adds a data stream or index to an alias. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases +func (p *MethodIndices) UpdateAliases() *indices_update_aliases.UpdateAliases { + _updatealiases := indices_update_aliases.NewUpdateAliasesFunc(p.tp) + return _updatealiases() +} + +// Validate a query. +// Validates a query without running it. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-validate-query +func (p *MethodIndices) ValidateQuery() *indices_validate_query.ValidateQuery { + _validatequery := indices_validate_query.NewValidateQueryFunc(p.tp) + return _validatequery() +} + +// Perform chat completion inference +// +// The chat completion inference API enables real-time responses for chat +// completion tasks by delivering answers incrementally, reducing response times +// during computation. +// It only works with the `chat_completion` task type for `openai` and `elastic` +// inference services. +// +// NOTE: The `chat_completion` task type is only available within the _stream +// API and only supports streaming. +// The Chat completion inference API and the Stream inference API differ in +// their response structure and capabilities. +// The Chat completion inference API provides more comprehensive customization +// options through more fields and function calling support. +// If you use the `openai`, `hugging_face` or the `elastic` service, use the +// Chat completion inference API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference +func (p *MethodInference) ChatCompletionUnified(inferenceid string) *inference_chat_completion_unified.ChatCompletionUnified { + _chatcompletionunified := inference_chat_completion_unified.NewChatCompletionUnifiedFunc(p.tp) + return _chatcompletionunified(inferenceid) +} + +// Perform completion inference on the service +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference +func (p *MethodInference) Completion(inferenceid string) *inference_completion.Completion { + _completion := inference_completion.NewCompletionFunc(p.tp) + return _completion(inferenceid) +} + +// Delete an inference endpoint +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete +func (p *MethodInference) Delete(inferenceid string) *inference_delete.Delete { + _delete := inference_delete.NewDeleteFunc(p.tp) + return _delete(inferenceid) +} + +// Get an inference endpoint +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get +func (p *MethodInference) Get() *inference_get.Get { + _get := inference_get.NewGetFunc(p.tp) + return _get() +} + +// Perform inference on the service. +// +// This API enables you to use machine learning models to perform specific tasks +// on data that you provide as an input. +// It returns a response with the results of the tasks. +// The inference endpoint you use can perform one specific task that has been +// defined when the endpoint was created with the create inference API. +// +// For details about using this API with a service, such as Amazon Bedrock, +// Anthropic, or HuggingFace, refer to the service-specific documentation. +// +// > info +// > The inference APIs enable you to use certain services, such as built-in +// machine learning models (ELSER, E5), models uploaded through Eland, Cohere, +// OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or +// Hugging Face. For built-in models and models uploaded through Eland, the +// inference APIs offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference +func (p *MethodInference) Inference(inferenceid string) *inference_inference.Inference { + _inference := inference_inference.NewInferenceFunc(p.tp) + return _inference(inferenceid) +} + +// Create an inference endpoint. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, +// Anthropic, Watsonx.ai, or Hugging Face. +// For built-in models and models uploaded through Eland, the inference APIs +// offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. +// +// The following integrations are available through the inference API. You can +// find the available task types next to the integration name: +// * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, +// `text_embedding`) +// * Amazon Bedrock (`completion`, `text_embedding`) +// * Amazon SageMaker (`chat_completion`, `completion`, `rerank`, +// `sparse_embedding`, `text_embedding`) +// * Anthropic (`completion`) +// * Azure AI Studio (`completion`, `text_embedding`) +// * Azure OpenAI (`completion`, `text_embedding`) +// * Cohere (`completion`, `rerank`, `text_embedding`) +// * DeepSeek (`completion`, `chat_completion`) +// * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this +// service is for built-in models and models uploaded through Eland) +// * ELSER (`sparse_embedding`) +// * Google AI Studio (`completion`, `text_embedding`) +// * Google Vertex AI (`rerank`, `text_embedding`) +// * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) +// * Mistral (`chat_completion`, `completion`, `text_embedding`) +// * OpenAI (`chat_completion`, `completion`, `text_embedding`) +// * VoyageAI (`text_embedding`, `rerank`) +// * Watsonx inference integration (`text_embedding`) +// * JinaAI (`text_embedding`, `rerank`) +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put +func (p *MethodInference) Put(inferenceid string) *inference_put.Put { + _put := inference_put.NewPutFunc(p.tp) + return _put(inferenceid) +} + +// Create an AlibabaCloud AI Search inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `alibabacloud-ai-search` service. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud +func (p *MethodInference) PutAlibabacloud(tasktype, alibabacloudinferenceid string) *inference_put_alibabacloud.PutAlibabacloud { + _putalibabacloud := inference_put_alibabacloud.NewPutAlibabacloudFunc(p.tp) + return _putalibabacloud(tasktype, alibabacloudinferenceid) +} + +// Create an Amazon Bedrock inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `amazonbedrock` service. +// +// >info +// > You need to provide the access and secret keys only once, during the +// inference model creation. The get inference API does not retrieve your access +// or secret keys. After creating the inference model, you cannot change the +// associated key pairs. If you want to use a different access and secret key +// pair, delete the inference model and recreate it with the same name and the +// updated keys. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock +func (p *MethodInference) PutAmazonbedrock(tasktype, amazonbedrockinferenceid string) *inference_put_amazonbedrock.PutAmazonbedrock { + _putamazonbedrock := inference_put_amazonbedrock.NewPutAmazonbedrockFunc(p.tp) + return _putamazonbedrock(tasktype, amazonbedrockinferenceid) +} + +// Create an Amazon SageMaker inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `amazon_sagemaker` service. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonsagemaker +func (p *MethodInference) PutAmazonsagemaker(tasktype, amazonsagemakerinferenceid string) *inference_put_amazonsagemaker.PutAmazonsagemaker { + _putamazonsagemaker := inference_put_amazonsagemaker.NewPutAmazonsagemakerFunc(p.tp) + return _putamazonsagemaker(tasktype, amazonsagemakerinferenceid) +} + +// Create an Anthropic inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `anthropic` service. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic +func (p *MethodInference) PutAnthropic(tasktype, anthropicinferenceid string) *inference_put_anthropic.PutAnthropic { + _putanthropic := inference_put_anthropic.NewPutAnthropicFunc(p.tp) + return _putanthropic(tasktype, anthropicinferenceid) +} + +// Create an Azure AI studio inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `azureaistudio` service. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio +func (p *MethodInference) PutAzureaistudio(tasktype, azureaistudioinferenceid string) *inference_put_azureaistudio.PutAzureaistudio { + _putazureaistudio := inference_put_azureaistudio.NewPutAzureaistudioFunc(p.tp) + return _putazureaistudio(tasktype, azureaistudioinferenceid) +} + +// Create an Azure OpenAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `azureopenai` service. +// +// The list of chat completion models that you can choose from in your Azure +// OpenAI deployment include: +// +// * [GPT-4 and GPT-4 Turbo +// models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) +// * +// [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) +// +// The list of embeddings models that you can choose from in your deployment can +// be found in the [Azure models +// documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai +func (p *MethodInference) PutAzureopenai(tasktype, azureopenaiinferenceid string) *inference_put_azureopenai.PutAzureopenai { + _putazureopenai := inference_put_azureopenai.NewPutAzureopenaiFunc(p.tp) + return _putazureopenai(tasktype, azureopenaiinferenceid) +} + +// Create a Cohere inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `cohere` +// service. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere +func (p *MethodInference) PutCohere(tasktype, cohereinferenceid string) *inference_put_cohere.PutCohere { + _putcohere := inference_put_cohere.NewPutCohereFunc(p.tp) + return _putcohere(tasktype, cohereinferenceid) +} + +// Create a custom inference endpoint. +// +// The custom service gives more control over how to interact with external +// inference services that aren't explicitly supported through dedicated +// integrations. +// The custom service gives you the ability to define the headers, url, query +// parameters, request body, and secrets. +// The custom service supports the template replacement functionality, which +// enables you to define a template that can be replaced with the value +// associated with that key. +// Templates are portions of a string that start with `${` and end with `}`. +// The parameters `secret_parameters` and `task_settings` are checked for keys +// for template replacement. Template replacement is supported in the `request`, +// `headers`, `url`, and `query_parameters`. +// If the definition (key) is not found for a template, an error message is +// returned. +// In case of an endpoint definition like the following: +// ``` +// PUT _inference/text_embedding/test-text-embedding +// +// { +// "service": "custom", +// "service_settings": { +// "secret_parameters": { +// "api_key": "" +// }, +// "url": "...endpoints.huggingface.cloud/v1/embeddings", +// "headers": { +// "Authorization": "Bearer ${api_key}", +// "Content-Type": "application/json" +// }, +// "request": "{\"input\": ${input}}", +// "response": { +// "json_parser": { +// "text_embeddings":"$.data[*].embedding[*]" +// } +// } +// } +// } +// +// ``` +// To replace `${api_key}` the `secret_parameters` and `task_settings` are +// checked for a key named `api_key`. +// +// > info +// > Templates should not be surrounded by quotes. +// +// Pre-defined templates: +// * `${input}` refers to the array of input strings that comes from the `input` +// field of the subsequent inference requests. +// * `${input_type}` refers to the input type translation values. +// * `${query}` refers to the query field used specifically for reranking tasks. +// * `${top_n}` refers to the `top_n` field available when performing rerank +// requests. +// * `${return_documents}` refers to the `return_documents` field available when +// performing rerank requests. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-custom +func (p *MethodInference) PutCustom(tasktype, custominferenceid string) *inference_put_custom.PutCustom { + _putcustom := inference_put_custom.NewPutCustomFunc(p.tp) + return _putcustom(tasktype, custominferenceid) +} + +// Create a DeepSeek inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `deepseek` +// service. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-deepseek +func (p *MethodInference) PutDeepseek(tasktype, deepseekinferenceid string) *inference_put_deepseek.PutDeepseek { + _putdeepseek := inference_put_deepseek.NewPutDeepseekFunc(p.tp) + return _putdeepseek(tasktype, deepseekinferenceid) +} + +// Create an Elasticsearch inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `elasticsearch` service. +// +// > info +// > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference +// endpoints, you only need to create the enpoints using the API if you want to +// customize the settings. +// +// If you use the ELSER or the E5 model through the `elasticsearch` service, the +// API request will automatically download and deploy the model if it isn't +// downloaded yet. +// +// > info +// > You might see a 502 bad gateway error in the response when using the Kibana +// Console. This error usually just reflects a timeout, while the model +// downloads in the background. You can check the download progress in the +// Machine Learning UI. If using the Python client, you can set the timeout +// parameter to a higher value. +// +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch +func (p *MethodInference) PutElasticsearch(tasktype, elasticsearchinferenceid string) *inference_put_elasticsearch.PutElasticsearch { + _putelasticsearch := inference_put_elasticsearch.NewPutElasticsearchFunc(p.tp) + return _putelasticsearch(tasktype, elasticsearchinferenceid) +} + +// Create an ELSER inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `elser` +// service. +// You can also deploy ELSER by using the Elasticsearch inference integration. +// +// > info +// > Your Elasticsearch deployment contains a preconfigured ELSER inference +// endpoint, you only need to create the enpoint using the API if you want to +// customize the settings. +// +// The API request will automatically download and deploy the ELSER model if it +// isn't already downloaded. +// +// > info +// > You might see a 502 bad gateway error in the response when using the Kibana +// Console. This error usually just reflects a timeout, while the model +// downloads in the background. You can check the download progress in the +// Machine Learning UI. If using the Python client, you can set the timeout +// parameter to a higher value. +// +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elser +func (p *MethodInference) PutElser(tasktype, elserinferenceid string) *inference_put_elser.PutElser { + _putelser := inference_put_elser.NewPutElserFunc(p.tp) + return _putelser(tasktype, elserinferenceid) +} + +// Create an Google AI Studio inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `googleaistudio` service. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio +func (p *MethodInference) PutGoogleaistudio(tasktype, googleaistudioinferenceid string) *inference_put_googleaistudio.PutGoogleaistudio { + _putgoogleaistudio := inference_put_googleaistudio.NewPutGoogleaistudioFunc(p.tp) + return _putgoogleaistudio(tasktype, googleaistudioinferenceid) +} + +// Create a Google Vertex AI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `googlevertexai` service. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai +func (p *MethodInference) PutGooglevertexai(tasktype, googlevertexaiinferenceid string) *inference_put_googlevertexai.PutGooglevertexai { + _putgooglevertexai := inference_put_googlevertexai.NewPutGooglevertexaiFunc(p.tp) + return _putgooglevertexai(tasktype, googlevertexaiinferenceid) +} + +// Create a Hugging Face inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `hugging_face` service. +// Supported tasks include: `text_embedding`, `completion`, and +// `chat_completion`. +// +// To configure the endpoint, first visit the Hugging Face Inference Endpoints +// page and create a new endpoint. +// Select a model that supports the task you intend to use. +// +// For Elastic's `text_embedding` task: +// The selected model must support the `Sentence Embeddings` task. On the new +// endpoint creation page, select the `Sentence Embeddings` task under the +// `Advanced Configuration` section. +// After the endpoint has initialized, copy the generated endpoint URL. +// Recommended models for `text_embedding` task: +// +// * `all-MiniLM-L6-v2` +// * `all-MiniLM-L12-v2` +// * `all-mpnet-base-v2` +// * `e5-base-v2` +// * `e5-small-v2` +// * `multilingual-e5-base` +// * `multilingual-e5-small` +// +// For Elastic's `chat_completion` and `completion` tasks: +// The selected model must support the `Text Generation` task and expose OpenAI +// API. HuggingFace supports both serverless and dedicated endpoints for `Text +// Generation`. When creating dedicated endpoint select the `Text Generation` +// task. +// After the endpoint is initialized (for dedicated) or ready (for serverless), +// ensure it supports the OpenAI API and includes `/v1/chat/completions` part in +// URL. Then, copy the full endpoint URL for use. +// Recommended models for `chat_completion` and `completion` tasks: +// +// * `Mistral-7B-Instruct-v0.2` +// * `QwQ-32B` +// * `Phi-3-mini-128k-instruct` +// +// For Elastic's `rerank` task: +// The selected model must support the `sentence-ranking` task and expose OpenAI +// API. +// HuggingFace supports only dedicated (not serverless) endpoints for `Rerank` +// so far. +// After the endpoint is initialized, copy the full endpoint URL for use. +// Tested models for `rerank` task: +// +// * `bge-reranker-base` +// * `jina-reranker-v1-turbo-en-GGUF` +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face +func (p *MethodInference) PutHuggingFace(tasktype, huggingfaceinferenceid string) *inference_put_hugging_face.PutHuggingFace { + _puthuggingface := inference_put_hugging_face.NewPutHuggingFaceFunc(p.tp) + return _puthuggingface(tasktype, huggingfaceinferenceid) +} + +// Create an JinaAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `jinaai` +// service. +// +// To review the available `rerank` models, refer to . +// To review the available `text_embedding` models, refer to the +// . +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai +func (p *MethodInference) PutJinaai(tasktype, jinaaiinferenceid string) *inference_put_jinaai.PutJinaai { + _putjinaai := inference_put_jinaai.NewPutJinaaiFunc(p.tp) + return _putjinaai(tasktype, jinaaiinferenceid) +} + +// Create a Mistral inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `mistral` +// service. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral +func (p *MethodInference) PutMistral(tasktype, mistralinferenceid string) *inference_put_mistral.PutMistral { + _putmistral := inference_put_mistral.NewPutMistralFunc(p.tp) + return _putmistral(tasktype, mistralinferenceid) +} + +// Create an OpenAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `openai` +// service or `openai` compatible APIs. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai +func (p *MethodInference) PutOpenai(tasktype, openaiinferenceid string) *inference_put_openai.PutOpenai { + _putopenai := inference_put_openai.NewPutOpenaiFunc(p.tp) + return _putopenai(tasktype, openaiinferenceid) +} + +// Create a VoyageAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `voyageai` +// service. +// +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-voyageai +func (p *MethodInference) PutVoyageai(tasktype, voyageaiinferenceid string) *inference_put_voyageai.PutVoyageai { + _putvoyageai := inference_put_voyageai.NewPutVoyageaiFunc(p.tp) + return _putvoyageai(tasktype, voyageaiinferenceid) +} + +// Create a Watsonx inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `watsonxai` service. +// You need an IBM Cloud Databases for Elasticsearch deployment to use the +// `watsonxai` inference service. +// You can provision one through the IBM catalog, the Cloud Databases CLI +// plug-in, the Cloud Databases API, or Terraform. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx +func (p *MethodInference) PutWatsonx(tasktype, watsonxinferenceid string) *inference_put_watsonx.PutWatsonx { + _putwatsonx := inference_put_watsonx.NewPutWatsonxFunc(p.tp) + return _putwatsonx(tasktype, watsonxinferenceid) +} + +// Perform reranking inference on the service +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference +func (p *MethodInference) Rerank(inferenceid string) *inference_rerank.Rerank { + _rerank := inference_rerank.NewRerankFunc(p.tp) + return _rerank(inferenceid) +} + +// Perform sparse embedding inference on the service +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference +func (p *MethodInference) SparseEmbedding(inferenceid string) *inference_sparse_embedding.SparseEmbedding { + _sparseembedding := inference_sparse_embedding.NewSparseEmbeddingFunc(p.tp) + return _sparseembedding(inferenceid) +} + +// Perform streaming inference. +// Get real-time responses for completion tasks by delivering answers +// incrementally, reducing response times during computation. +// This API works only with the completion task type. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, +// Watsonx.ai, or Hugging Face. For built-in models and models uploaded through +// Eland, the inference APIs offer an alternative way to use and manage trained +// models. However, if you do not plan to use the inference APIs to use these +// models or if you want to use non-NLP models, use the machine learning trained +// model APIs. +// +// This API requires the `monitor_inference` cluster privilege (the built-in +// `inference_admin` and `inference_user` roles grant this privilege). You must +// use a client that supports streaming. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference +func (p *MethodInference) StreamCompletion(inferenceid string) *inference_stream_completion.StreamCompletion { + _streamcompletion := inference_stream_completion.NewStreamCompletionFunc(p.tp) + return _streamcompletion(inferenceid) +} + +// Perform text embedding inference on the service +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference +func (p *MethodInference) TextEmbedding(inferenceid string) *inference_text_embedding.TextEmbedding { + _textembedding := inference_text_embedding.NewTextEmbeddingFunc(p.tp) + return _textembedding(inferenceid) +} + +// Update an inference endpoint. +// +// Modify `task_settings`, secrets (within `service_settings`), or +// `num_allocations` for an inference endpoint, depending on the specific +// endpoint service and `task_type`. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, +// Watsonx.ai, or Hugging Face. +// For built-in models and models uploaded through Eland, the inference APIs +// offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update +func (p *MethodInference) Update(inferenceid string) *inference_update.Update { + _update := inference_update.NewUpdateFunc(p.tp) + return _update(inferenceid) +} + +// Delete GeoIP database configurations. +// +// Delete one or more IP geolocation database configurations. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-geoip-database +func (p *MethodIngest) DeleteGeoipDatabase(id string) *ingest_delete_geoip_database.DeleteGeoipDatabase { + _deletegeoipdatabase := ingest_delete_geoip_database.NewDeleteGeoipDatabaseFunc(p.tp) + return _deletegeoipdatabase(id) +} + +// Delete IP geolocation database configurations. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database +func (p *MethodIngest) DeleteIpLocationDatabase(id string) *ingest_delete_ip_location_database.DeleteIpLocationDatabase { + _deleteiplocationdatabase := ingest_delete_ip_location_database.NewDeleteIpLocationDatabaseFunc(p.tp) + return _deleteiplocationdatabase(id) +} + +// Delete pipelines. +// Delete one or more ingest pipelines. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline +func (p *MethodIngest) DeletePipeline(id string) *ingest_delete_pipeline.DeletePipeline { + _deletepipeline := ingest_delete_pipeline.NewDeletePipelineFunc(p.tp) + return _deletepipeline(id) +} + +// Get GeoIP statistics. +// Get download statistics for GeoIP2 databases that are used with the GeoIP +// processor. +// https://www.elastic.co/docs/reference/enrich-processor/geoip-processor +func (p *MethodIngest) GeoIpStats() *ingest_geo_ip_stats.GeoIpStats { + _geoipstats := ingest_geo_ip_stats.NewGeoIpStatsFunc(p.tp) + return _geoipstats() +} + +// Get GeoIP database configurations. +// +// Get information about one or more IP geolocation database configurations. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database +func (p *MethodIngest) GetGeoipDatabase() *ingest_get_geoip_database.GetGeoipDatabase { + _getgeoipdatabase := ingest_get_geoip_database.NewGetGeoipDatabaseFunc(p.tp) + return _getgeoipdatabase() +} + +// Get IP geolocation database configurations. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database +func (p *MethodIngest) GetIpLocationDatabase() *ingest_get_ip_location_database.GetIpLocationDatabase { + _getiplocationdatabase := ingest_get_ip_location_database.NewGetIpLocationDatabaseFunc(p.tp) + return _getiplocationdatabase() +} + +// Get pipelines. +// +// Get information about one or more ingest pipelines. +// This API returns a local reference of the pipeline. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline +func (p *MethodIngest) GetPipeline() *ingest_get_pipeline.GetPipeline { + _getpipeline := ingest_get_pipeline.NewGetPipelineFunc(p.tp) + return _getpipeline() +} + +// Run a grok processor. +// Extract structured fields out of a single text field within a document. +// You must choose which field to extract matched fields from, as well as the +// grok pattern you expect will match. +// A grok pattern is like a regular expression that supports aliased expressions +// that can be reused. +// https://www.elastic.co/docs/reference/enrich-processor/grok-processor +func (p *MethodIngest) ProcessorGrok() *ingest_processor_grok.ProcessorGrok { + _processorgrok := ingest_processor_grok.NewProcessorGrokFunc(p.tp) + return _processorgrok() +} + +// Create or update a GeoIP database configuration. +// +// Refer to the create or update IP geolocation database configuration API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-geoip-database +func (p *MethodIngest) PutGeoipDatabase(id string) *ingest_put_geoip_database.PutGeoipDatabase { + _putgeoipdatabase := ingest_put_geoip_database.NewPutGeoipDatabaseFunc(p.tp) + return _putgeoipdatabase(id) +} + +// Create or update an IP geolocation database configuration. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database +func (p *MethodIngest) PutIpLocationDatabase(id string) *ingest_put_ip_location_database.PutIpLocationDatabase { + _putiplocationdatabase := ingest_put_ip_location_database.NewPutIpLocationDatabaseFunc(p.tp) + return _putiplocationdatabase(id) +} + +// Create or update a pipeline. +// Changes made using this API take effect immediately. +// https://www.elastic.co/docs/manage-data/ingest/transform-enrich/ingest-pipelines +func (p *MethodIngest) PutPipeline(id string) *ingest_put_pipeline.PutPipeline { + _putpipeline := ingest_put_pipeline.NewPutPipelineFunc(p.tp) + return _putpipeline(id) +} + +// Simulate a pipeline. +// +// Run an ingest pipeline against a set of provided documents. +// You can either specify an existing pipeline to use with the provided +// documents or supply a pipeline definition in the body of the request. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate +func (p *MethodIngest) Simulate() *ingest_simulate.Simulate { + _simulate := ingest_simulate.NewSimulateFunc(p.tp) + return _simulate() +} + +// Delete the license. +// +// When the license expires, your subscription level reverts to Basic. +// +// If the operator privileges feature is enabled, only operator users can use +// this API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete +func (p *MethodLicense) Delete() *license_delete.Delete { + _delete := license_delete.NewDeleteFunc(p.tp) + return _delete() +} + +// Get license information. +// +// Get information about your Elastic license including its type, its status, +// when it was issued, and when it expires. +// +// >info +// > If the master node is generating a new cluster state, the get license API +// may return a `404 Not Found` response. +// > If you receive an unexpected 404 response after cluster startup, wait a +// short period and retry the request. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get +func (p *MethodLicense) Get() *license_get.Get { + _get := license_get.NewGetFunc(p.tp) + return _get() +} + +// Get the basic license status. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status +func (p *MethodLicense) GetBasicStatus() *license_get_basic_status.GetBasicStatus { + _getbasicstatus := license_get_basic_status.NewGetBasicStatusFunc(p.tp) + return _getbasicstatus() +} + +// Get the trial status. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status +func (p *MethodLicense) GetTrialStatus() *license_get_trial_status.GetTrialStatus { + _gettrialstatus := license_get_trial_status.NewGetTrialStatusFunc(p.tp) + return _gettrialstatus() +} + +// Update the license. +// +// You can update your license at runtime without shutting down your nodes. +// License updates take effect immediately. +// If the license you are installing does not support all of the features that +// were available with your previous license, however, you are notified in the +// response. +// You must then re-submit the API request with the acknowledge parameter set to +// true. +// +// NOTE: If Elasticsearch security features are enabled and you are installing a +// gold or higher license, you must enable TLS on the transport networking layer +// before you install the license. +// If the operator privileges feature is enabled, only operator users can use +// this API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post +func (p *MethodLicense) Post() *license_post.Post { + _post := license_post.NewPostFunc(p.tp) + return _post() +} + +// Start a basic license. +// +// Start an indefinite basic license, which gives access to all the basic +// features. +// +// NOTE: In order to start a basic license, you must not currently have a basic +// license. +// +// If the basic license does not support all of the features that are available +// with your current license, however, you are notified in the response. +// You must then re-submit the API request with the `acknowledge` parameter set +// to `true`. +// +// To check the status of your basic license, use the get basic license API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic +func (p *MethodLicense) PostStartBasic() *license_post_start_basic.PostStartBasic { + _poststartbasic := license_post_start_basic.NewPostStartBasicFunc(p.tp) + return _poststartbasic() +} + +// Start a trial. +// Start a 30-day trial, which gives access to all subscription features. +// +// NOTE: You are allowed to start a trial only if your cluster has not already +// activated a trial for the current major product version. +// For example, if you have already activated a trial for v8.0, you cannot start +// a new trial until v9.0. You can, however, request an extended trial at +// https://www.elastic.co/trialextension. +// +// To check the status of your trial, use the get trial status API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial +func (p *MethodLicense) PostStartTrial() *license_post_start_trial.PostStartTrial { + _poststarttrial := license_post_start_trial.NewPostStartTrialFunc(p.tp) + return _poststarttrial() +} + +// Delete a Logstash pipeline. +// Delete a pipeline that is used for Logstash Central Management. +// If the request succeeds, you receive an empty response with an appropriate +// status code. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline +func (p *MethodLogstash) DeletePipeline(id string) *logstash_delete_pipeline.DeletePipeline { + _deletepipeline := logstash_delete_pipeline.NewDeletePipelineFunc(p.tp) + return _deletepipeline(id) +} + +// Get Logstash pipelines. +// Get pipelines that are used for Logstash Central Management. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline +func (p *MethodLogstash) GetPipeline() *logstash_get_pipeline.GetPipeline { + _getpipeline := logstash_get_pipeline.NewGetPipelineFunc(p.tp) + return _getpipeline() +} + +// Create or update a Logstash pipeline. +// +// Create a pipeline that is used for Logstash Central Management. +// If the specified pipeline exists, it is replaced. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline +func (p *MethodLogstash) PutPipeline(id string) *logstash_put_pipeline.PutPipeline { + _putpipeline := logstash_put_pipeline.NewPutPipelineFunc(p.tp) + return _putpipeline(id) +} + +// Get deprecation information. +// Get information about different cluster, node, and index level settings that +// use deprecated features that will be removed or changed in the next major +// version. +// +// TIP: This APIs is designed for indirect use by the Upgrade Assistant. +// You are strongly recommended to use the Upgrade Assistant. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations +func (p *MethodMigration) Deprecations() *migration_deprecations.Deprecations { + _deprecations := migration_deprecations.NewDeprecationsFunc(p.tp) + return _deprecations() +} + +// Get feature migration information. +// Version upgrades sometimes require changes to how features store +// configuration information and data in system indices. +// Check which features need to be migrated and the status of any migrations +// that are in progress. +// +// TIP: This API is designed for indirect use by the Upgrade Assistant. +// You are strongly recommended to use the Upgrade Assistant. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status +func (p *MethodMigration) GetFeatureUpgradeStatus() *migration_get_feature_upgrade_status.GetFeatureUpgradeStatus { + _getfeatureupgradestatus := migration_get_feature_upgrade_status.NewGetFeatureUpgradeStatusFunc(p.tp) + return _getfeatureupgradestatus() +} + +// Start the feature migration. +// Version upgrades sometimes require changes to how features store +// configuration information and data in system indices. +// This API starts the automatic migration process. +// +// Some functionality might be temporarily unavailable during the migration +// process. +// +// TIP: The API is designed for indirect use by the Upgrade Assistant. We +// strongly recommend you use the Upgrade Assistant. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status +func (p *MethodMigration) PostFeatureUpgrade() *migration_post_feature_upgrade.PostFeatureUpgrade { + _postfeatureupgrade := migration_post_feature_upgrade.NewPostFeatureUpgradeFunc(p.tp) + return _postfeatureupgrade() +} + +// Clear trained model deployment cache. +// +// Cache will be cleared on all nodes where the trained model is assigned. +// A trained model deployment may have an inference cache enabled. +// As requests are handled by each allocated node, their responses may be cached +// on that individual node. +// Calling this API clears the caches without restarting the deployment. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache +func (p *MethodMl) ClearTrainedModelDeploymentCache(modelid string) *ml_clear_trained_model_deployment_cache.ClearTrainedModelDeploymentCache { + _cleartrainedmodeldeploymentcache := ml_clear_trained_model_deployment_cache.NewClearTrainedModelDeploymentCacheFunc(p.tp) + return _cleartrainedmodeldeploymentcache(modelid) +} + +// Close anomaly detection jobs. +// +// A job can be opened and closed multiple times throughout its lifecycle. A +// closed job cannot receive data or perform analysis operations, but you can +// still explore and navigate results. +// When you close a job, it runs housekeeping tasks such as pruning the model +// history, flushing buffers, calculating final results and persisting the model +// snapshots. Depending upon the size of the job, it could take several minutes +// to close and the equivalent time to re-open. After it is closed, the job has +// a minimal overhead on the cluster except for maintaining its meta data. +// Therefore it is a best practice to close jobs that are no longer required to +// process data. +// If you close an anomaly detection job whose datafeed is running, the request +// first tries to stop the datafeed. This behavior is equivalent to calling stop +// datafeed API with the same timeout and force parameters as the close job +// request. +// When a datafeed that has a specified end date stops, it automatically closes +// its associated job. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job +func (p *MethodMl) CloseJob(jobid string) *ml_close_job.CloseJob { + _closejob := ml_close_job.NewCloseJobFunc(p.tp) + return _closejob(jobid) +} + +// Delete a calendar. +// +// Remove all scheduled events from a calendar, then delete it. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar +func (p *MethodMl) DeleteCalendar(calendarid string) *ml_delete_calendar.DeleteCalendar { + _deletecalendar := ml_delete_calendar.NewDeleteCalendarFunc(p.tp) + return _deletecalendar(calendarid) +} + +// Delete events from a calendar. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event +func (p *MethodMl) DeleteCalendarEvent(calendarid, eventid string) *ml_delete_calendar_event.DeleteCalendarEvent { + _deletecalendarevent := ml_delete_calendar_event.NewDeleteCalendarEventFunc(p.tp) + return _deletecalendarevent(calendarid, eventid) +} + +// Delete anomaly jobs from a calendar. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job +func (p *MethodMl) DeleteCalendarJob(calendarid, jobid string) *ml_delete_calendar_job.DeleteCalendarJob { + _deletecalendarjob := ml_delete_calendar_job.NewDeleteCalendarJobFunc(p.tp) + return _deletecalendarjob(calendarid, jobid) +} + +// Delete a data frame analytics job. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics +func (p *MethodMl) DeleteDataFrameAnalytics(id string) *ml_delete_data_frame_analytics.DeleteDataFrameAnalytics { + _deletedataframeanalytics := ml_delete_data_frame_analytics.NewDeleteDataFrameAnalyticsFunc(p.tp) + return _deletedataframeanalytics(id) +} + +// Delete a datafeed. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed +func (p *MethodMl) DeleteDatafeed(datafeedid string) *ml_delete_datafeed.DeleteDatafeed { + _deletedatafeed := ml_delete_datafeed.NewDeleteDatafeedFunc(p.tp) + return _deletedatafeed(datafeedid) +} + +// Delete expired ML data. +// +// Delete all job results, model snapshots and forecast data that have exceeded +// their retention days period. Machine learning state documents that are not +// associated with any job are also deleted. +// You can limit the request to a single or set of anomaly detection jobs by +// using a job identifier, a group name, a comma-separated list of jobs, or a +// wildcard expression. You can delete expired data for all anomaly detection +// jobs by using `_all`, by specifying `*` as the ``, or by omitting the +// ``. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data +func (p *MethodMl) DeleteExpiredData() *ml_delete_expired_data.DeleteExpiredData { + _deleteexpireddata := ml_delete_expired_data.NewDeleteExpiredDataFunc(p.tp) + return _deleteexpireddata() +} + +// Delete a filter. +// +// If an anomaly detection job references the filter, you cannot delete the +// filter. You must update or delete the job before you can delete the filter. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter +func (p *MethodMl) DeleteFilter(filterid string) *ml_delete_filter.DeleteFilter { + _deletefilter := ml_delete_filter.NewDeleteFilterFunc(p.tp) + return _deletefilter(filterid) +} + +// Delete forecasts from a job. +// +// By default, forecasts are retained for 14 days. You can specify a +// different retention period with the `expires_in` parameter in the forecast +// jobs API. The delete forecast API enables you to delete one or more +// forecasts before they expire. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast +func (p *MethodMl) DeleteForecast(jobid string) *ml_delete_forecast.DeleteForecast { + _deleteforecast := ml_delete_forecast.NewDeleteForecastFunc(p.tp) + return _deleteforecast(jobid) +} + +// Delete an anomaly detection job. +// +// All job configuration, model state and results are deleted. +// It is not currently possible to delete multiple jobs using wildcards or a +// comma separated list. If you delete a job that has a datafeed, the request +// first tries to delete the datafeed. This behavior is equivalent to calling +// the delete datafeed API with the same timeout and force parameters as the +// delete job request. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job +func (p *MethodMl) DeleteJob(jobid string) *ml_delete_job.DeleteJob { + _deletejob := ml_delete_job.NewDeleteJobFunc(p.tp) + return _deletejob(jobid) +} + +// Delete a model snapshot. +// +// You cannot delete the active model snapshot. To delete that snapshot, first +// revert to a different one. To identify the active model snapshot, refer to +// the `model_snapshot_id` in the results from the get jobs API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot +func (p *MethodMl) DeleteModelSnapshot(jobid, snapshotid string) *ml_delete_model_snapshot.DeleteModelSnapshot { + _deletemodelsnapshot := ml_delete_model_snapshot.NewDeleteModelSnapshotFunc(p.tp) + return _deletemodelsnapshot(jobid, snapshotid) +} + +// Delete an unreferenced trained model. +// +// The request deletes a trained inference model that is not referenced by an +// ingest pipeline. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model +func (p *MethodMl) DeleteTrainedModel(modelid string) *ml_delete_trained_model.DeleteTrainedModel { + _deletetrainedmodel := ml_delete_trained_model.NewDeleteTrainedModelFunc(p.tp) + return _deletetrainedmodel(modelid) +} + +// Delete a trained model alias. +// +// This API deletes an existing model alias that refers to a trained model. If +// the model alias is missing or refers to a model other than the one identified +// by the `model_id`, this API returns an error. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias +func (p *MethodMl) DeleteTrainedModelAlias(modelid, modelalias string) *ml_delete_trained_model_alias.DeleteTrainedModelAlias { + _deletetrainedmodelalias := ml_delete_trained_model_alias.NewDeleteTrainedModelAliasFunc(p.tp) + return _deletetrainedmodelalias(modelid, modelalias) +} + +// Estimate job model memory usage. +// +// Make an estimation of the memory usage for an anomaly detection job model. +// The estimate is based on analysis configuration details for the job and +// cardinality +// estimates for the fields it references. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-estimate-model-memory +func (p *MethodMl) EstimateModelMemory() *ml_estimate_model_memory.EstimateModelMemory { + _estimatemodelmemory := ml_estimate_model_memory.NewEstimateModelMemoryFunc(p.tp) + return _estimatemodelmemory() +} + +// Evaluate data frame analytics. +// +// The API packages together commonly used evaluation metrics for various types +// of machine learning features. This has been designed for use on indexes +// created by data frame analytics. Evaluation requires both a ground truth +// field and an analytics result field to be present. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame +func (p *MethodMl) EvaluateDataFrame() *ml_evaluate_data_frame.EvaluateDataFrame { + _evaluatedataframe := ml_evaluate_data_frame.NewEvaluateDataFrameFunc(p.tp) + return _evaluatedataframe() +} + +// Explain data frame analytics config. +// +// This API provides explanations for a data frame analytics config that either +// exists already or one that has not been created yet. The following +// explanations are provided: +// * which fields are included or not in the analysis and why, +// * how much memory is estimated to be required. The estimate can be used when +// deciding the appropriate value for model_memory_limit setting later on. +// If you have object fields or fields that are excluded via source filtering, +// they are not included in the explanation. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics +func (p *MethodMl) ExplainDataFrameAnalytics() *ml_explain_data_frame_analytics.ExplainDataFrameAnalytics { + _explaindataframeanalytics := ml_explain_data_frame_analytics.NewExplainDataFrameAnalyticsFunc(p.tp) + return _explaindataframeanalytics() +} + +// Force buffered data to be processed. +// The flush jobs API is only applicable when sending data for analysis using +// the post data API. Depending on the content of the buffer, then it might +// additionally calculate new results. Both flush and close operations are +// similar, however the flush is more efficient if you are expecting to send +// more data for analysis. When flushing, the job remains open and is available +// to continue analyzing data. A close operation additionally prunes and +// persists the model state to disk and the job must be opened again before +// analyzing further data. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job +func (p *MethodMl) FlushJob(jobid string) *ml_flush_job.FlushJob { + _flushjob := ml_flush_job.NewFlushJobFunc(p.tp) + return _flushjob(jobid) +} + +// Predict future behavior of a time series. +// +// Forecasts are not supported for jobs that perform population analysis; an +// error occurs if you try to create a forecast for a job that has an +// `over_field_name` in its configuration. Forcasts predict future behavior +// based on historical data. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast +func (p *MethodMl) Forecast(jobid string) *ml_forecast.Forecast { + _forecast := ml_forecast.NewForecastFunc(p.tp) + return _forecast(jobid) +} + +// Get anomaly detection job results for buckets. +// The API presents a chronological view of the records, grouped by bucket. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets +func (p *MethodMl) GetBuckets(jobid string) *ml_get_buckets.GetBuckets { + _getbuckets := ml_get_buckets.NewGetBucketsFunc(p.tp) + return _getbuckets(jobid) +} + +// Get info about events in calendars. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events +func (p *MethodMl) GetCalendarEvents(calendarid string) *ml_get_calendar_events.GetCalendarEvents { + _getcalendarevents := ml_get_calendar_events.NewGetCalendarEventsFunc(p.tp) + return _getcalendarevents(calendarid) +} + +// Get calendar configuration info. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars +func (p *MethodMl) GetCalendars() *ml_get_calendars.GetCalendars { + _getcalendars := ml_get_calendars.NewGetCalendarsFunc(p.tp) + return _getcalendars() +} + +// Get anomaly detection job results for categories. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories +func (p *MethodMl) GetCategories(jobid string) *ml_get_categories.GetCategories { + _getcategories := ml_get_categories.NewGetCategoriesFunc(p.tp) + return _getcategories(jobid) +} + +// Get data frame analytics job configuration info. +// You can get information for multiple data frame analytics jobs in a single +// API request by using a comma-separated list of data frame analytics jobs or a +// wildcard expression. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics +func (p *MethodMl) GetDataFrameAnalytics() *ml_get_data_frame_analytics.GetDataFrameAnalytics { + _getdataframeanalytics := ml_get_data_frame_analytics.NewGetDataFrameAnalyticsFunc(p.tp) + return _getdataframeanalytics() +} + +// Get data frame analytics job stats. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats +func (p *MethodMl) GetDataFrameAnalyticsStats() *ml_get_data_frame_analytics_stats.GetDataFrameAnalyticsStats { + _getdataframeanalyticsstats := ml_get_data_frame_analytics_stats.NewGetDataFrameAnalyticsStatsFunc(p.tp) + return _getdataframeanalyticsstats() +} + +// Get datafeed stats. +// You can get statistics for multiple datafeeds in a single API request by +// using a comma-separated list of datafeeds or a wildcard expression. You can +// get statistics for all datafeeds by using `_all`, by specifying `*` as the +// ``, or by omitting the ``. If the datafeed is stopped, the +// only information you receive is the `datafeed_id` and the `state`. +// This API returns a maximum of 10,000 datafeeds. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats +func (p *MethodMl) GetDatafeedStats() *ml_get_datafeed_stats.GetDatafeedStats { + _getdatafeedstats := ml_get_datafeed_stats.NewGetDatafeedStatsFunc(p.tp) + return _getdatafeedstats() +} + +// Get datafeeds configuration info. +// You can get information for multiple datafeeds in a single API request by +// using a comma-separated list of datafeeds or a wildcard expression. You can +// get information for all datafeeds by using `_all`, by specifying `*` as the +// ``, or by omitting the ``. +// This API returns a maximum of 10,000 datafeeds. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds +func (p *MethodMl) GetDatafeeds() *ml_get_datafeeds.GetDatafeeds { + _getdatafeeds := ml_get_datafeeds.NewGetDatafeedsFunc(p.tp) + return _getdatafeeds() +} + +// Get filters. +// You can get a single filter or all filters. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters +func (p *MethodMl) GetFilters() *ml_get_filters.GetFilters { + _getfilters := ml_get_filters.NewGetFiltersFunc(p.tp) + return _getfilters() +} + +// Get anomaly detection job results for influencers. +// Influencers are the entities that have contributed to, or are to blame for, +// the anomalies. Influencer results are available only if an +// `influencer_field_name` is specified in the job configuration. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers +func (p *MethodMl) GetInfluencers(jobid string) *ml_get_influencers.GetInfluencers { + _getinfluencers := ml_get_influencers.NewGetInfluencersFunc(p.tp) + return _getinfluencers(jobid) +} + +// Get anomaly detection job stats. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats +func (p *MethodMl) GetJobStats() *ml_get_job_stats.GetJobStats { + _getjobstats := ml_get_job_stats.NewGetJobStatsFunc(p.tp) + return _getjobstats() +} + +// Get anomaly detection jobs configuration info. +// You can get information for multiple anomaly detection jobs in a single API +// request by using a group name, a comma-separated list of jobs, or a wildcard +// expression. You can get information for all anomaly detection jobs by using +// `_all`, by specifying `*` as the ``, or by omitting the ``. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs +func (p *MethodMl) GetJobs() *ml_get_jobs.GetJobs { + _getjobs := ml_get_jobs.NewGetJobsFunc(p.tp) + return _getjobs() +} + +// Get machine learning memory usage info. +// Get information about how machine learning jobs and trained models are using +// memory, +// on each node, both within the JVM heap, and natively, outside of the JVM. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats +func (p *MethodMl) GetMemoryStats() *ml_get_memory_stats.GetMemoryStats { + _getmemorystats := ml_get_memory_stats.NewGetMemoryStatsFunc(p.tp) + return _getmemorystats() +} + +// Get anomaly detection job model snapshot upgrade usage info. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats +func (p *MethodMl) GetModelSnapshotUpgradeStats(jobid, snapshotid string) *ml_get_model_snapshot_upgrade_stats.GetModelSnapshotUpgradeStats { + _getmodelsnapshotupgradestats := ml_get_model_snapshot_upgrade_stats.NewGetModelSnapshotUpgradeStatsFunc(p.tp) + return _getmodelsnapshotupgradestats(jobid, snapshotid) +} + +// Get model snapshots info. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots +func (p *MethodMl) GetModelSnapshots(jobid string) *ml_get_model_snapshots.GetModelSnapshots { + _getmodelsnapshots := ml_get_model_snapshots.NewGetModelSnapshotsFunc(p.tp) + return _getmodelsnapshots(jobid) +} + +// Get overall bucket results. +// +// Retrievs overall bucket results that summarize the bucket results of +// multiple anomaly detection jobs. +// +// The `overall_score` is calculated by combining the scores of all the +// buckets within the overall bucket span. First, the maximum +// `anomaly_score` per anomaly detection job in the overall bucket is +// calculated. Then the `top_n` of those scores are averaged to result in +// the `overall_score`. This means that you can fine-tune the +// `overall_score` so that it is more or less sensitive to the number of +// jobs that detect an anomaly at the same time. For example, if you set +// `top_n` to `1`, the `overall_score` is the maximum bucket score in the +// overall bucket. Alternatively, if you set `top_n` to the number of jobs, +// the `overall_score` is high only when all jobs detect anomalies in that +// overall bucket. If you set the `bucket_span` parameter (to a value +// greater than its default), the `overall_score` is the maximum +// `overall_score` of the overall buckets that have a span equal to the +// jobs' largest bucket span. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets +func (p *MethodMl) GetOverallBuckets(jobid string) *ml_get_overall_buckets.GetOverallBuckets { + _getoverallbuckets := ml_get_overall_buckets.NewGetOverallBucketsFunc(p.tp) + return _getoverallbuckets(jobid) +} + +// Get anomaly records for an anomaly detection job. +// Records contain the detailed analytical results. They describe the anomalous +// activity that has been identified in the input data based on the detector +// configuration. +// There can be many anomaly records depending on the characteristics and size +// of the input data. In practice, there are often too many to be able to +// manually process them. The machine learning features therefore perform a +// sophisticated aggregation of the anomaly records into buckets. +// The number of record results depends on the number of anomalies found in each +// bucket, which relates to the number of time series being modeled and the +// number of detectors. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records +func (p *MethodMl) GetRecords(jobid string) *ml_get_records.GetRecords { + _getrecords := ml_get_records.NewGetRecordsFunc(p.tp) + return _getrecords(jobid) +} + +// Get trained model configuration info. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models +func (p *MethodMl) GetTrainedModels() *ml_get_trained_models.GetTrainedModels { + _gettrainedmodels := ml_get_trained_models.NewGetTrainedModelsFunc(p.tp) + return _gettrainedmodels() +} + +// Get trained models usage info. +// You can get usage information for multiple trained +// models in a single API request by using a comma-separated list of model IDs +// or a wildcard expression. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats +func (p *MethodMl) GetTrainedModelsStats() *ml_get_trained_models_stats.GetTrainedModelsStats { + _gettrainedmodelsstats := ml_get_trained_models_stats.NewGetTrainedModelsStatsFunc(p.tp) + return _gettrainedmodelsstats() +} + +// Evaluate a trained model. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model +func (p *MethodMl) InferTrainedModel(modelid string) *ml_infer_trained_model.InferTrainedModel { + _infertrainedmodel := ml_infer_trained_model.NewInferTrainedModelFunc(p.tp) + return _infertrainedmodel(modelid) +} + +// Get machine learning information. +// Get defaults and limits used by machine learning. +// This endpoint is designed to be used by a user interface that needs to fully +// understand machine learning configurations where some options are not +// specified, meaning that the defaults should be used. This endpoint may be +// used to find out what those defaults are. It also provides information about +// the maximum size of machine learning jobs that could run in the current +// cluster configuration. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info +func (p *MethodMl) Info() *ml_info.Info { + _info := ml_info.NewInfoFunc(p.tp) + return _info() +} + +// Open anomaly detection jobs. +// +// An anomaly detection job must be opened to be ready to receive and analyze +// data. It can be opened and closed multiple times throughout its lifecycle. +// When you open a new job, it starts with an empty model. +// When you open an existing job, the most recent model state is automatically +// loaded. The job is ready to resume its analysis from where it left off, once +// new data is received. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job +func (p *MethodMl) OpenJob(jobid string) *ml_open_job.OpenJob { + _openjob := ml_open_job.NewOpenJobFunc(p.tp) + return _openjob(jobid) +} + +// Add scheduled events to the calendar. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events +func (p *MethodMl) PostCalendarEvents(calendarid string) *ml_post_calendar_events.PostCalendarEvents { + _postcalendarevents := ml_post_calendar_events.NewPostCalendarEventsFunc(p.tp) + return _postcalendarevents(calendarid) +} + +// Send data to an anomaly detection job for analysis. +// +// IMPORTANT: For each job, data can be accepted from only a single connection +// at a time. +// It is not currently possible to post data to multiple jobs using wildcards or +// a comma-separated list. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data +func (p *MethodMl) PostData(jobid string) *ml_post_data.PostData { + _postdata := ml_post_data.NewPostDataFunc(p.tp) + return _postdata(jobid) +} + +// Preview features used by data frame analytics. +// Preview the extracted features used by a data frame analytics config. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics +func (p *MethodMl) PreviewDataFrameAnalytics() *ml_preview_data_frame_analytics.PreviewDataFrameAnalytics { + _previewdataframeanalytics := ml_preview_data_frame_analytics.NewPreviewDataFrameAnalyticsFunc(p.tp) + return _previewdataframeanalytics() +} + +// Preview a datafeed. +// This API returns the first "page" of search results from a datafeed. +// You can preview an existing datafeed or provide configuration details for a +// datafeed +// and anomaly detection job in the API. The preview shows the structure of the +// data +// that will be passed to the anomaly detection engine. +// IMPORTANT: When Elasticsearch security features are enabled, the preview uses +// the credentials of the user that +// called the API. However, when the datafeed starts it uses the roles of the +// last user that created or updated the +// datafeed. To get a preview that accurately reflects the behavior of the +// datafeed, use the appropriate credentials. +// You can also use secondary authorization headers to supply the credentials. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed +func (p *MethodMl) PreviewDatafeed() *ml_preview_datafeed.PreviewDatafeed { + _previewdatafeed := ml_preview_datafeed.NewPreviewDatafeedFunc(p.tp) + return _previewdatafeed() +} + +// Create a calendar. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar +func (p *MethodMl) PutCalendar(calendarid string) *ml_put_calendar.PutCalendar { + _putcalendar := ml_put_calendar.NewPutCalendarFunc(p.tp) + return _putcalendar(calendarid) +} + +// Add anomaly detection job to calendar. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job +func (p *MethodMl) PutCalendarJob(calendarid, jobid string) *ml_put_calendar_job.PutCalendarJob { + _putcalendarjob := ml_put_calendar_job.NewPutCalendarJobFunc(p.tp) + return _putcalendarjob(calendarid, jobid) +} + +// Create a data frame analytics job. +// This API creates a data frame analytics job that performs an analysis on the +// source indices and stores the outcome in a destination index. +// By default, the query used in the source configuration is `{"match_all": +// {}}`. +// +// If the destination index does not exist, it is created automatically when you +// start the job. +// +// If you supply only a subset of the regression or classification parameters, +// hyperparameter optimization occurs. It determines a value for each of the +// undefined parameters. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics +func (p *MethodMl) PutDataFrameAnalytics(id string) *ml_put_data_frame_analytics.PutDataFrameAnalytics { + _putdataframeanalytics := ml_put_data_frame_analytics.NewPutDataFrameAnalyticsFunc(p.tp) + return _putdataframeanalytics(id) +} + +// Create a datafeed. +// Datafeeds retrieve data from Elasticsearch for analysis by an anomaly +// detection job. +// You can associate only one datafeed with each anomaly detection job. +// The datafeed contains a query that runs at a defined interval (`frequency`). +// If you are concerned about delayed data, you can add a delay (`query_delay') +// at each interval. +// By default, the datafeed uses the following query: `{"match_all": {"boost": +// 1}}`. +// +// When Elasticsearch security features are enabled, your datafeed remembers +// which roles the user who created it had +// at the time of creation and runs the query using those same roles. If you +// provide secondary authorization headers, +// those credentials are used instead. +// You must use Kibana, this API, or the create anomaly detection jobs API to +// create a datafeed. Do not add a datafeed +// directly to the `.ml-config` index. Do not give users `write` privileges on +// the `.ml-config` index. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed +func (p *MethodMl) PutDatafeed(datafeedid string) *ml_put_datafeed.PutDatafeed { + _putdatafeed := ml_put_datafeed.NewPutDatafeedFunc(p.tp) + return _putdatafeed(datafeedid) +} + +// Create a filter. +// A filter contains a list of strings. It can be used by one or more anomaly +// detection jobs. +// Specifically, filters are referenced in the `custom_rules` property of +// detector configuration objects. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter +func (p *MethodMl) PutFilter(filterid string) *ml_put_filter.PutFilter { + _putfilter := ml_put_filter.NewPutFilterFunc(p.tp) + return _putfilter(filterid) +} + +// Create an anomaly detection job. +// +// If you include a `datafeed_config`, you must have read index privileges on +// the source index. +// If you include a `datafeed_config` but do not provide a query, the datafeed +// uses `{"match_all": {"boost": 1}}`. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job +func (p *MethodMl) PutJob(jobid string) *ml_put_job.PutJob { + _putjob := ml_put_job.NewPutJobFunc(p.tp) + return _putjob(jobid) +} + +// Create a trained model. +// Enable you to supply a trained model that is not created by data frame +// analytics. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model +func (p *MethodMl) PutTrainedModel(modelid string) *ml_put_trained_model.PutTrainedModel { + _puttrainedmodel := ml_put_trained_model.NewPutTrainedModelFunc(p.tp) + return _puttrainedmodel(modelid) +} + +// Create or update a trained model alias. +// A trained model alias is a logical name used to reference a single trained +// model. +// You can use aliases instead of trained model identifiers to make it easier to +// reference your models. For example, you can use aliases in inference +// aggregations and processors. +// An alias must be unique and refer to only a single trained model. However, +// you can have multiple aliases for each trained model. +// If you use this API to update an alias such that it references a different +// trained model ID and the model uses a different type of data frame analytics, +// an error occurs. For example, this situation occurs if you have a trained +// model for regression analysis and a trained model for classification +// analysis; you cannot reassign an alias from one type of trained model to +// another. +// If you use this API to update an alias and there are very few input fields in +// common between the old and new trained models for the model alias, the API +// returns a warning. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias +func (p *MethodMl) PutTrainedModelAlias(modelid, modelalias string) *ml_put_trained_model_alias.PutTrainedModelAlias { + _puttrainedmodelalias := ml_put_trained_model_alias.NewPutTrainedModelAliasFunc(p.tp) + return _puttrainedmodelalias(modelid, modelalias) +} + +// Create part of a trained model definition. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part +func (p *MethodMl) PutTrainedModelDefinitionPart(modelid, part string) *ml_put_trained_model_definition_part.PutTrainedModelDefinitionPart { + _puttrainedmodeldefinitionpart := ml_put_trained_model_definition_part.NewPutTrainedModelDefinitionPartFunc(p.tp) + return _puttrainedmodeldefinitionpart(modelid, part) +} + +// Create a trained model vocabulary. +// This API is supported only for natural language processing (NLP) models. +// The vocabulary is stored in the index as described in +// `inference_config.*.vocabulary` of the trained model definition. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary +func (p *MethodMl) PutTrainedModelVocabulary(modelid string) *ml_put_trained_model_vocabulary.PutTrainedModelVocabulary { + _puttrainedmodelvocabulary := ml_put_trained_model_vocabulary.NewPutTrainedModelVocabularyFunc(p.tp) + return _puttrainedmodelvocabulary(modelid) +} + +// Reset an anomaly detection job. +// All model state and results are deleted. The job is ready to start over as if +// it had just been created. +// It is not currently possible to reset multiple jobs using wildcards or a +// comma separated list. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job +func (p *MethodMl) ResetJob(jobid string) *ml_reset_job.ResetJob { + _resetjob := ml_reset_job.NewResetJobFunc(p.tp) + return _resetjob(jobid) +} + +// Revert to a snapshot. +// The machine learning features react quickly to anomalous input, learning new +// behaviors in data. Highly anomalous input increases the variance in the +// models whilst the system learns whether this is a new step-change in behavior +// or a one-off event. In the case where this anomalous input is known to be a +// one-off, then it might be appropriate to reset the model state to a time +// before this event. For example, you might consider reverting to a saved +// snapshot after Black Friday or a critical system failure. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot +func (p *MethodMl) RevertModelSnapshot(jobid, snapshotid string) *ml_revert_model_snapshot.RevertModelSnapshot { + _revertmodelsnapshot := ml_revert_model_snapshot.NewRevertModelSnapshotFunc(p.tp) + return _revertmodelsnapshot(jobid, snapshotid) +} + +// Set upgrade_mode for ML indices. +// Sets a cluster wide upgrade_mode setting that prepares machine learning +// indices for an upgrade. +// When upgrading your cluster, in some circumstances you must restart your +// nodes and reindex your machine learning indices. In those circumstances, +// there must be no machine learning jobs running. You can close the machine +// learning jobs, do the upgrade, then open all the jobs again. Alternatively, +// you can use this API to temporarily halt tasks associated with the jobs and +// datafeeds and prevent new jobs from opening. You can also use this API +// during upgrades that do not require you to reindex your machine learning +// indices, though stopping jobs is not a requirement in that case. +// You can see the current value for the upgrade_mode setting by using the get +// machine learning info API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode +func (p *MethodMl) SetUpgradeMode() *ml_set_upgrade_mode.SetUpgradeMode { + _setupgrademode := ml_set_upgrade_mode.NewSetUpgradeModeFunc(p.tp) + return _setupgrademode() +} + +// Start a data frame analytics job. +// A data frame analytics job can be started and stopped multiple times +// throughout its lifecycle. +// If the destination index does not exist, it is created automatically the +// first time you start the data frame analytics job. The +// `index.number_of_shards` and `index.number_of_replicas` settings for the +// destination index are copied from the source index. If there are multiple +// source indices, the destination index copies the highest setting values. The +// mappings for the destination index are also copied from the source indices. +// If there are any mapping conflicts, the job fails to start. +// If the destination index exists, it is used as is. You can therefore set up +// the destination index in advance with custom settings and mappings. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics +func (p *MethodMl) StartDataFrameAnalytics(id string) *ml_start_data_frame_analytics.StartDataFrameAnalytics { + _startdataframeanalytics := ml_start_data_frame_analytics.NewStartDataFrameAnalyticsFunc(p.tp) + return _startdataframeanalytics(id) +} + +// Start datafeeds. +// +// A datafeed must be started in order to retrieve data from Elasticsearch. A +// datafeed can be started and stopped +// multiple times throughout its lifecycle. +// +// Before you can start a datafeed, the anomaly detection job must be open. +// Otherwise, an error occurs. +// +// If you restart a stopped datafeed, it continues processing input data from +// the next millisecond after it was stopped. +// If new data was indexed for that exact millisecond between stopping and +// starting, it will be ignored. +// +// When Elasticsearch security features are enabled, your datafeed remembers +// which roles the last user to create or +// update it had at the time of creation or update and runs the query using +// those same roles. If you provided secondary +// authorization headers when you created or updated the datafeed, those +// credentials are used instead. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed +func (p *MethodMl) StartDatafeed(datafeedid string) *ml_start_datafeed.StartDatafeed { + _startdatafeed := ml_start_datafeed.NewStartDatafeedFunc(p.tp) + return _startdatafeed(datafeedid) +} + +// Start a trained model deployment. +// It allocates the model to every machine learning node. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment +func (p *MethodMl) StartTrainedModelDeployment(modelid string) *ml_start_trained_model_deployment.StartTrainedModelDeployment { + _starttrainedmodeldeployment := ml_start_trained_model_deployment.NewStartTrainedModelDeploymentFunc(p.tp) + return _starttrainedmodeldeployment(modelid) +} + +// Stop data frame analytics jobs. +// A data frame analytics job can be started and stopped multiple times +// throughout its lifecycle. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics +func (p *MethodMl) StopDataFrameAnalytics(id string) *ml_stop_data_frame_analytics.StopDataFrameAnalytics { + _stopdataframeanalytics := ml_stop_data_frame_analytics.NewStopDataFrameAnalyticsFunc(p.tp) + return _stopdataframeanalytics(id) +} + +// Stop datafeeds. +// A datafeed that is stopped ceases to retrieve data from Elasticsearch. A +// datafeed can be started and stopped +// multiple times throughout its lifecycle. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed +func (p *MethodMl) StopDatafeed(datafeedid string) *ml_stop_datafeed.StopDatafeed { + _stopdatafeed := ml_stop_datafeed.NewStopDatafeedFunc(p.tp) + return _stopdatafeed(datafeedid) +} + +// Stop a trained model deployment. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment +func (p *MethodMl) StopTrainedModelDeployment(modelid string) *ml_stop_trained_model_deployment.StopTrainedModelDeployment { + _stoptrainedmodeldeployment := ml_stop_trained_model_deployment.NewStopTrainedModelDeploymentFunc(p.tp) + return _stoptrainedmodeldeployment(modelid) +} + +// Update a data frame analytics job. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics +func (p *MethodMl) UpdateDataFrameAnalytics(id string) *ml_update_data_frame_analytics.UpdateDataFrameAnalytics { + _updatedataframeanalytics := ml_update_data_frame_analytics.NewUpdateDataFrameAnalyticsFunc(p.tp) + return _updatedataframeanalytics(id) +} + +// Update a datafeed. +// You must stop and start the datafeed for the changes to be applied. +// When Elasticsearch security features are enabled, your datafeed remembers +// which roles the user who updated it had at +// the time of the update and runs the query using those same roles. If you +// provide secondary authorization headers, +// those credentials are used instead. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed +func (p *MethodMl) UpdateDatafeed(datafeedid string) *ml_update_datafeed.UpdateDatafeed { + _updatedatafeed := ml_update_datafeed.NewUpdateDatafeedFunc(p.tp) + return _updatedatafeed(datafeedid) +} + +// Update a filter. +// Updates the description of a filter, adds items, or removes items from the +// list. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter +func (p *MethodMl) UpdateFilter(filterid string) *ml_update_filter.UpdateFilter { + _updatefilter := ml_update_filter.NewUpdateFilterFunc(p.tp) + return _updatefilter(filterid) +} + +// Update an anomaly detection job. +// Updates certain properties of an anomaly detection job. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job +func (p *MethodMl) UpdateJob(jobid string) *ml_update_job.UpdateJob { + _updatejob := ml_update_job.NewUpdateJobFunc(p.tp) + return _updatejob(jobid) +} + +// Update a snapshot. +// Updates certain properties of a snapshot. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot +func (p *MethodMl) UpdateModelSnapshot(jobid, snapshotid string) *ml_update_model_snapshot.UpdateModelSnapshot { + _updatemodelsnapshot := ml_update_model_snapshot.NewUpdateModelSnapshotFunc(p.tp) + return _updatemodelsnapshot(jobid, snapshotid) +} + +// Update a trained model deployment. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment +func (p *MethodMl) UpdateTrainedModelDeployment(modelid string) *ml_update_trained_model_deployment.UpdateTrainedModelDeployment { + _updatetrainedmodeldeployment := ml_update_trained_model_deployment.NewUpdateTrainedModelDeploymentFunc(p.tp) + return _updatetrainedmodeldeployment(modelid) +} + +// Upgrade a snapshot. +// Upgrade an anomaly detection model snapshot to the latest major version. +// Over time, older snapshot formats are deprecated and removed. Anomaly +// detection jobs support only snapshots that are from the current or previous +// major version. +// This API provides a means to upgrade a snapshot to the current major version. +// This aids in preparing the cluster for an upgrade to the next major version. +// Only one snapshot per anomaly detection job can be upgraded at a time and the +// upgraded snapshot cannot be the current snapshot of the anomaly detection +// job. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot +func (p *MethodMl) UpgradeJobSnapshot(jobid, snapshotid string) *ml_upgrade_job_snapshot.UpgradeJobSnapshot { + _upgradejobsnapshot := ml_upgrade_job_snapshot.NewUpgradeJobSnapshotFunc(p.tp) + return _upgradejobsnapshot(jobid, snapshotid) +} + +// Validate an anomaly detection job. +// https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html +func (p *MethodMl) Validate() *ml_validate.Validate { + _validate := ml_validate.NewValidateFunc(p.tp) + return _validate() +} + +// Validate an anomaly detection job. +// https://www.elastic.co/docs/api/doc/elasticsearch +func (p *MethodMl) ValidateDetector() *ml_validate_detector.ValidateDetector { + _validatedetector := ml_validate_detector.NewValidateDetectorFunc(p.tp) + return _validatedetector() +} + +// Send monitoring data. +// This API is used by the monitoring features to send monitoring data. +// https://www.elastic.co/docs/api/doc/elasticsearch +func (p *MethodMonitoring) Bulk() *monitoring_bulk.Bulk { + _bulk := monitoring_bulk.NewBulkFunc(p.tp) + return _bulk() +} + +// Clear the archived repositories metering. +// Clear the archived repositories metering information in the cluster. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive +func (p *MethodNodes) ClearRepositoriesMeteringArchive(nodeid, maxarchiveversion string) *nodes_clear_repositories_metering_archive.ClearRepositoriesMeteringArchive { + _clearrepositoriesmeteringarchive := nodes_clear_repositories_metering_archive.NewClearRepositoriesMeteringArchiveFunc(p.tp) + return _clearrepositoriesmeteringarchive(nodeid, maxarchiveversion) +} + +// Get cluster repositories metering. +// Get repositories metering information for a cluster. +// This API exposes monotonically non-decreasing counters and it is expected +// that clients would durably store the information needed to compute +// aggregations over a period of time. +// Additionally, the information exposed by this API is volatile, meaning that +// it will not be present after node restarts. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info +func (p *MethodNodes) GetRepositoriesMeteringInfo(nodeid string) *nodes_get_repositories_metering_info.GetRepositoriesMeteringInfo { + _getrepositoriesmeteringinfo := nodes_get_repositories_metering_info.NewGetRepositoriesMeteringInfoFunc(p.tp) + return _getrepositoriesmeteringinfo(nodeid) +} + +// Get the hot threads for nodes. +// Get a breakdown of the hot threads on each selected node in the cluster. +// The output is plain text with a breakdown of the top hot threads for each +// node. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads +func (p *MethodNodes) HotThreads() *nodes_hot_threads.HotThreads { + _hotthreads := nodes_hot_threads.NewHotThreadsFunc(p.tp) + return _hotthreads() +} + +// Get node information. +// +// By default, the API returns all attributes and core settings for cluster +// nodes. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info +func (p *MethodNodes) Info() *nodes_info.Info { + _info := nodes_info.NewInfoFunc(p.tp) + return _info() +} + +// Reload the keystore on nodes in the cluster. +// +// Secure settings are stored in an on-disk keystore. Certain of these settings +// are reloadable. +// That is, you can change them on disk and reload them without restarting any +// nodes in the cluster. +// When you have updated reloadable secure settings in your keystore, you can +// use this API to reload those settings on each node. +// +// When the Elasticsearch keystore is password protected and not simply +// obfuscated, you must provide the password for the keystore when you reload +// the secure settings. +// Reloading the settings for the whole cluster assumes that the keystores for +// all nodes are protected with the same password; this method is allowed only +// when inter-node communications are encrypted. +// Alternatively, you can reload the secure settings on each node by locally +// accessing the API and passing the node-specific Elasticsearch keystore +// password. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-reload-secure-settings +func (p *MethodNodes) ReloadSecureSettings() *nodes_reload_secure_settings.ReloadSecureSettings { + _reloadsecuresettings := nodes_reload_secure_settings.NewReloadSecureSettingsFunc(p.tp) + return _reloadsecuresettings() +} + +// Get node statistics. +// Get statistics for nodes in a cluster. +// By default, all stats are returned. You can limit the returned information by +// using metrics. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats +func (p *MethodNodes) Stats() *nodes_stats.Stats { + _stats := nodes_stats.NewStatsFunc(p.tp) + return _stats() +} + +// Get feature usage information. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage +func (p *MethodNodes) Usage() *nodes_usage.Usage { + _usage := nodes_usage.NewUsageFunc(p.tp) + return _usage() +} + +// Extracts a UI-optimized structure to render flamegraphs from Universal +// Profiling. +// https://www.elastic.co/guide/en/observability/current/universal-profiling.html +func (p *MethodProfiling) Flamegraph() *profiling_flamegraph.Flamegraph { + _flamegraph := profiling_flamegraph.NewFlamegraphFunc(p.tp) + return _flamegraph() +} + +// Extracts raw stacktrace information from Universal Profiling. +// https://www.elastic.co/guide/en/observability/current/universal-profiling.html +func (p *MethodProfiling) Stacktraces() *profiling_stacktraces.Stacktraces { + _stacktraces := profiling_stacktraces.NewStacktracesFunc(p.tp) + return _stacktraces() +} + +// Returns basic information about the status of Universal Profiling. +// https://www.elastic.co/guide/en/observability/current/universal-profiling.html +func (p *MethodProfiling) Status() *profiling_status.Status { + _status := profiling_status.NewStatusFunc(p.tp) + return _status() +} + +// Extracts a list of topN functions from Universal Profiling. +// https://www.elastic.co/guide/en/observability/current/universal-profiling.html +func (p *MethodProfiling) TopnFunctions() *profiling_topn_functions.TopnFunctions { + _topnfunctions := profiling_topn_functions.NewTopnFunctionsFunc(p.tp) + return _topnfunctions() +} + +// Delete a query rule. +// Delete a query rule within a query ruleset. +// This is a destructive action that is only recoverable by re-adding the same +// rule with the create or update query rule API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule +func (p *MethodQueryRules) DeleteRule(rulesetid, ruleid string) *query_rules_delete_rule.DeleteRule { + _deleterule := query_rules_delete_rule.NewDeleteRuleFunc(p.tp) + return _deleterule(rulesetid, ruleid) +} + +// Delete a query ruleset. +// Remove a query ruleset and its associated data. +// This is a destructive action that is not recoverable. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset +func (p *MethodQueryRules) DeleteRuleset(rulesetid string) *query_rules_delete_ruleset.DeleteRuleset { + _deleteruleset := query_rules_delete_ruleset.NewDeleteRulesetFunc(p.tp) + return _deleteruleset(rulesetid) +} + +// Get a query rule. +// Get details about a query rule within a query ruleset. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule +func (p *MethodQueryRules) GetRule(rulesetid, ruleid string) *query_rules_get_rule.GetRule { + _getrule := query_rules_get_rule.NewGetRuleFunc(p.tp) + return _getrule(rulesetid, ruleid) +} + +// Get a query ruleset. +// Get details about a query ruleset. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset +func (p *MethodQueryRules) GetRuleset(rulesetid string) *query_rules_get_ruleset.GetRuleset { + _getruleset := query_rules_get_ruleset.NewGetRulesetFunc(p.tp) + return _getruleset(rulesetid) +} + +// Get all query rulesets. +// Get summarized information about the query rulesets. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets +func (p *MethodQueryRules) ListRulesets() *query_rules_list_rulesets.ListRulesets { + _listrulesets := query_rules_list_rulesets.NewListRulesetsFunc(p.tp) + return _listrulesets() +} + +// Create or update a query rule. +// Create or update a query rule within a query ruleset. +// +// IMPORTANT: Due to limitations within pinned queries, you can only pin +// documents using ids or docs, but cannot use both in single rule. +// It is advised to use one or the other in query rulesets, to avoid errors. +// Additionally, pinned queries have a maximum limit of 100 pinned hits. +// If multiple matching rules pin more than 100 documents, only the first 100 +// documents are pinned in the order they are specified in the ruleset. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule +func (p *MethodQueryRules) PutRule(rulesetid, ruleid string) *query_rules_put_rule.PutRule { + _putrule := query_rules_put_rule.NewPutRuleFunc(p.tp) + return _putrule(rulesetid, ruleid) +} + +// Create or update a query ruleset. +// There is a limit of 100 rules per ruleset. +// This limit can be increased by using the +// `xpack.applications.rules.max_rules_per_ruleset` cluster setting. +// +// IMPORTANT: Due to limitations within pinned queries, you can only select +// documents using `ids` or `docs`, but cannot use both in single rule. +// It is advised to use one or the other in query rulesets, to avoid errors. +// Additionally, pinned queries have a maximum limit of 100 pinned hits. +// If multiple matching rules pin more than 100 documents, only the first 100 +// documents are pinned in the order they are specified in the ruleset. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset +func (p *MethodQueryRules) PutRuleset(rulesetid string) *query_rules_put_ruleset.PutRuleset { + _putruleset := query_rules_put_ruleset.NewPutRulesetFunc(p.tp) + return _putruleset(rulesetid) +} + +// Test a query ruleset. +// Evaluate match criteria against a query ruleset to identify the rules that +// would match that criteria. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test +func (p *MethodQueryRules) Test(rulesetid string) *query_rules_test.Test { + _test := query_rules_test.NewTestFunc(p.tp) + return _test(rulesetid) +} + +// Delete a rollup job. +// +// A job must be stopped before it can be deleted. +// If you attempt to delete a started job, an error occurs. +// Similarly, if you attempt to delete a nonexistent job, an exception occurs. +// +// IMPORTANT: When you delete a job, you remove only the process that is +// actively monitoring and rolling up data. +// The API does not delete any previously rolled up data. +// This is by design; a user may wish to roll up a static data set. +// Because the data set is static, after it has been fully rolled up there is no +// need to keep the indexing rollup job around (as there will be no new data). +// Thus the job can be deleted, leaving behind the rolled up data for analysis. +// If you wish to also remove the rollup data and the rollup index contains the +// data for only a single job, you can delete the whole rollup index. +// If the rollup index stores data from several jobs, you must issue a +// delete-by-query that targets the rollup job's identifier in the rollup index. +// For example: +// +// ``` +// POST my_rollup_index/_delete_by_query +// +// { +// "query": { +// "term": { +// "_rollup.id": "the_rollup_job_id" +// } +// } +// } +// +// ``` +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job +func (p *MethodRollup) DeleteJob(id string) *rollup_delete_job.DeleteJob { + _deletejob := rollup_delete_job.NewDeleteJobFunc(p.tp) + return _deletejob(id) +} + +// Get rollup job information. +// Get the configuration, stats, and status of rollup jobs. +// +// NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. +// If a job was created, ran for a while, then was deleted, the API does not +// return any details about it. +// For details about a historical rollup job, the rollup capabilities API may be +// more useful. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs +func (p *MethodRollup) GetJobs() *rollup_get_jobs.GetJobs { + _getjobs := rollup_get_jobs.NewGetJobsFunc(p.tp) + return _getjobs() +} + +// Get the rollup job capabilities. +// Get the capabilities of any rollup jobs that have been configured for a +// specific index or index pattern. +// +// This API is useful because a rollup job is often configured to rollup only a +// subset of fields from the source index. +// Furthermore, only certain aggregations can be configured for various fields, +// leading to a limited subset of functionality depending on that configuration. +// This API enables you to inspect an index and determine: +// +// 1. Does this index have associated rollup data somewhere in the cluster? +// 2. If yes to the first question, what fields were rolled up, what +// aggregations can be performed, and where does the data live? +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps +func (p *MethodRollup) GetRollupCaps() *rollup_get_rollup_caps.GetRollupCaps { + _getrollupcaps := rollup_get_rollup_caps.NewGetRollupCapsFunc(p.tp) + return _getrollupcaps() +} + +// Get the rollup index capabilities. +// Get the rollup capabilities of all jobs inside of a rollup index. +// A single rollup index may store the data for multiple rollup jobs and may +// have a variety of capabilities depending on those jobs. This API enables you +// to determine: +// +// * What jobs are stored in an index (or indices specified via a pattern)? +// * What target indices were rolled up, what fields were used in those rollups, +// and what aggregations can be performed on each job? +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps +func (p *MethodRollup) GetRollupIndexCaps(index string) *rollup_get_rollup_index_caps.GetRollupIndexCaps { + _getrollupindexcaps := rollup_get_rollup_index_caps.NewGetRollupIndexCapsFunc(p.tp) + return _getrollupindexcaps(index) +} + +// Create a rollup job. +// +// WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will +// fail with a message about the deprecation and planned removal of rollup +// features. A cluster needs to contain either a rollup job or a rollup index in +// order for this API to be allowed to run. +// +// The rollup job configuration contains all the details about how the job +// should run, when it indexes documents, and what future queries will be able +// to run against the rollup index. +// +// There are three main sections to the job configuration: the logistical +// details about the job (for example, the cron schedule), the fields that are +// used for grouping, and what metrics to collect for each group. +// +// Jobs are created in a `STOPPED` state. You can start them with the start +// rollup jobs API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job +func (p *MethodRollup) PutJob(id string) *rollup_put_job.PutJob { + _putjob := rollup_put_job.NewPutJobFunc(p.tp) + return _putjob(id) +} + +// Search rolled-up data. +// The rollup search endpoint is needed because, internally, rolled-up documents +// utilize a different document structure than the original data. +// It rewrites standard Query DSL into a format that matches the rollup +// documents then takes the response and rewrites it back to what a client would +// expect given the original query. +// +// The request body supports a subset of features from the regular search API. +// The following functionality is not available: +// +// `size`: Because rollups work on pre-aggregated data, no search hits can be +// returned and so size must be set to zero or omitted entirely. +// `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are +// similarly disallowed. +// +// For more detailed examples of using the rollup search API, including querying +// rolled-up data only or combining rolled-up and live data, refer to the +// External documentation. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search +func (p *MethodRollup) RollupSearch(index string) *rollup_rollup_search.RollupSearch { + _rollupsearch := rollup_rollup_search.NewRollupSearchFunc(p.tp) + return _rollupsearch(index) +} + +// Start rollup jobs. +// If you try to start a job that does not exist, an exception occurs. +// If you try to start a job that is already started, nothing happens. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job +func (p *MethodRollup) StartJob(id string) *rollup_start_job.StartJob { + _startjob := rollup_start_job.NewStartJobFunc(p.tp) + return _startjob(id) +} + +// Stop rollup jobs. +// If you try to stop a job that does not exist, an exception occurs. +// If you try to stop a job that is already stopped, nothing happens. +// +// Since only a stopped job can be deleted, it can be useful to block the API +// until the indexer has fully stopped. +// This is accomplished with the `wait_for_completion` query parameter, and +// optionally a timeout. For example: +// +// ``` +// POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s +// ``` +// The parameter blocks the API call from returning until either the job has +// moved to STOPPED or the specified time has elapsed. +// If the specified time elapses without the job moving to STOPPED, a timeout +// exception occurs. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job +func (p *MethodRollup) StopJob(id string) *rollup_stop_job.StopJob { + _stopjob := rollup_stop_job.NewStopJobFunc(p.tp) + return _stopjob(id) +} + +// Delete a search application. +// +// Remove a search application and its associated alias. Indices attached to the +// search application are not removed. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete +func (p *MethodSearchApplication) Delete(name string) *search_application_delete.Delete { + _delete := search_application_delete.NewDeleteFunc(p.tp) + return _delete(name) +} + +// Delete a behavioral analytics collection. +// The associated data stream is also deleted. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics +func (p *MethodSearchApplication) DeleteBehavioralAnalytics(name string) *search_application_delete_behavioral_analytics.DeleteBehavioralAnalytics { + _deletebehavioralanalytics := search_application_delete_behavioral_analytics.NewDeleteBehavioralAnalyticsFunc(p.tp) + return _deletebehavioralanalytics(name) +} + +// Get search application details. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get +func (p *MethodSearchApplication) Get(name string) *search_application_get.Get { + _get := search_application_get.NewGetFunc(p.tp) + return _get(name) +} + +// Get behavioral analytics collections. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics +func (p *MethodSearchApplication) GetBehavioralAnalytics() *search_application_get_behavioral_analytics.GetBehavioralAnalytics { + _getbehavioralanalytics := search_application_get_behavioral_analytics.NewGetBehavioralAnalyticsFunc(p.tp) + return _getbehavioralanalytics() +} + +// Get search applications. +// Get information about search applications. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics +func (p *MethodSearchApplication) List() *search_application_list.List { + _list := search_application_list.NewListFunc(p.tp) + return _list() +} + +// Create a behavioral analytics collection event. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event +func (p *MethodSearchApplication) PostBehavioralAnalyticsEvent(collectionname, eventtype string) *search_application_post_behavioral_analytics_event.PostBehavioralAnalyticsEvent { + _postbehavioralanalyticsevent := search_application_post_behavioral_analytics_event.NewPostBehavioralAnalyticsEventFunc(p.tp) + return _postbehavioralanalyticsevent(collectionname, eventtype) +} + +// Create or update a search application. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put +func (p *MethodSearchApplication) Put(name string) *search_application_put.Put { + _put := search_application_put.NewPutFunc(p.tp) + return _put(name) +} + +// Create a behavioral analytics collection. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics +func (p *MethodSearchApplication) PutBehavioralAnalytics(name string) *search_application_put_behavioral_analytics.PutBehavioralAnalytics { + _putbehavioralanalytics := search_application_put_behavioral_analytics.NewPutBehavioralAnalyticsFunc(p.tp) + return _putbehavioralanalytics(name) +} + +// Render a search application query. +// Generate an Elasticsearch query using the specified query parameters and the +// search template associated with the search application or a default template +// if none is specified. +// If a parameter used in the search template is not specified in `params`, the +// parameter's default value will be used. +// The API returns the specific Elasticsearch query that would be generated and +// run by calling the search application search API. +// +// You must have `read` privileges on the backing alias of the search +// application. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query +func (p *MethodSearchApplication) RenderQuery(name string) *search_application_render_query.RenderQuery { + _renderquery := search_application_render_query.NewRenderQueryFunc(p.tp) + return _renderquery(name) +} + +// Run a search application search. +// Generate and run an Elasticsearch query that uses the specified query +// parameteter and the search template associated with the search application or +// default template. +// Unspecified template parameters are assigned their default values if +// applicable. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search +func (p *MethodSearchApplication) Search(name string) *search_application_search.Search { + _search := search_application_search.NewSearchFunc(p.tp) + return _search(name) +} + +// Get cache statistics. +// Get statistics about the shared cache for partially mounted indices. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats +func (p *MethodSearchableSnapshots) CacheStats() *searchable_snapshots_cache_stats.CacheStats { + _cachestats := searchable_snapshots_cache_stats.NewCacheStatsFunc(p.tp) + return _cachestats() +} + +// Clear the cache. +// Clear indices and data streams from the shared cache for partially mounted +// indices. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache +func (p *MethodSearchableSnapshots) ClearCache() *searchable_snapshots_clear_cache.ClearCache { + _clearcache := searchable_snapshots_clear_cache.NewClearCacheFunc(p.tp) + return _clearcache() +} + +// Mount a snapshot. +// Mount a snapshot as a searchable snapshot index. +// Do not use this API for snapshots managed by index lifecycle management +// (ILM). +// Manually mounting ILM-managed snapshots can interfere with ILM processes. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount +func (p *MethodSearchableSnapshots) Mount(repository, snapshot string) *searchable_snapshots_mount.Mount { + _mount := searchable_snapshots_mount.NewMountFunc(p.tp) + return _mount(repository, snapshot) +} + +// Get searchable snapshot statistics. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats +func (p *MethodSearchableSnapshots) Stats() *searchable_snapshots_stats.Stats { + _stats := searchable_snapshots_stats.NewStatsFunc(p.tp) + return _stats() +} + +// Activate a user profile. +// +// Create or update a user profile on behalf of another user. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// The calling application must have either an `access_token` or a combination +// of `username` and `password` for the user that the profile document is +// intended for. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// This API creates or updates a profile document for end users with information +// that is extracted from the user's authentication object including `username`, +// `full_name,` `roles`, and the authentication realm. +// For example, in the JWT `access_token` case, the profile user's `username` is +// extracted from the JWT token claim pointed to by the `claims.principal` +// setting of the JWT realm that authenticated the token. +// +// When updating a profile document, the API enables the document if it was +// disabled. +// Any updates do not change existing content for either the `labels` or `data` +// fields. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile +func (p *MethodSecurity) ActivateUserProfile() *security_activate_user_profile.ActivateUserProfile { + _activateuserprofile := security_activate_user_profile.NewActivateUserProfileFunc(p.tp) + return _activateuserprofile() +} + +// Authenticate a user. +// +// Authenticates a user and returns information about the authenticated user. +// Include the user information in a [basic auth +// header](https://en.wikipedia.org/wiki/Basic_access_authentication). +// A successful call returns a JSON structure that shows user information such +// as their username, the roles that are assigned to the user, any assigned +// metadata, and information about the realms that authenticated and authorized +// the user. +// If the user cannot be authenticated, this API returns a 401 status code. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate +func (p *MethodSecurity) Authenticate() *security_authenticate.Authenticate { + _authenticate := security_authenticate.NewAuthenticateFunc(p.tp) + return _authenticate() +} + +// Bulk delete roles. +// +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The bulk delete roles API cannot delete roles that are defined in roles +// files. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role +func (p *MethodSecurity) BulkDeleteRole() *security_bulk_delete_role.BulkDeleteRole { + _bulkdeleterole := security_bulk_delete_role.NewBulkDeleteRoleFunc(p.tp) + return _bulkdeleterole() +} + +// Bulk create or update roles. +// +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The bulk create or update roles API cannot update roles that are defined in +// roles files. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role +func (p *MethodSecurity) BulkPutRole() *security_bulk_put_role.BulkPutRole { + _bulkputrole := security_bulk_put_role.NewBulkPutRoleFunc(p.tp) + return _bulkputrole() +} + +// Bulk update API keys. +// Update the attributes for multiple API keys. +// +// IMPORTANT: It is not possible to use an API key as the authentication +// credential for this API. To update API keys, the owner user's credentials are +// required. +// +// This API is similar to the update API key API but enables you to apply the +// same update to multiple API keys in one API call. This operation can greatly +// improve performance over making individual updates. +// +// It is not possible to update expired or invalidated API keys. +// +// This API supports updates to API key access scope, metadata and expiration. +// The access scope of each API key is derived from the `role_descriptors` you +// specify in the request and a snapshot of the owner user's permissions at the +// time of the request. +// The snapshot of the owner's permissions is updated automatically on every +// call. +// +// IMPORTANT: If you don't specify `role_descriptors` in the request, a call to +// this API might still change an API key's access scope. This change can occur +// if the owner user's permissions have changed since the API key was created or +// last modified. +// +// A successful request returns a JSON structure that contains the IDs of all +// updated API keys, the IDs of API keys that already had the requested changes +// and did not require an update, and error details for any failed update. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys +func (p *MethodSecurity) BulkUpdateApiKeys() *security_bulk_update_api_keys.BulkUpdateApiKeys { + _bulkupdateapikeys := security_bulk_update_api_keys.NewBulkUpdateApiKeysFunc(p.tp) + return _bulkupdateapikeys() +} + +// Change passwords. +// +// Change the passwords of users in the native realm and built-in users. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password +func (p *MethodSecurity) ChangePassword() *security_change_password.ChangePassword { + _changepassword := security_change_password.NewChangePasswordFunc(p.tp) + return _changepassword() +} + +// Clear the API key cache. +// +// Evict a subset of all entries from the API key cache. +// The cache is also automatically cleared on state changes of the security +// index. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache +func (p *MethodSecurity) ClearApiKeyCache(ids string) *security_clear_api_key_cache.ClearApiKeyCache { + _clearapikeycache := security_clear_api_key_cache.NewClearApiKeyCacheFunc(p.tp) + return _clearapikeycache(ids) +} + +// Clear the privileges cache. +// +// Evict privileges from the native application privilege cache. +// The cache is also automatically cleared for applications that have their +// privileges updated. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges +func (p *MethodSecurity) ClearCachedPrivileges(application string) *security_clear_cached_privileges.ClearCachedPrivileges { + _clearcachedprivileges := security_clear_cached_privileges.NewClearCachedPrivilegesFunc(p.tp) + return _clearcachedprivileges(application) +} + +// Clear the user cache. +// +// Evict users from the user cache. +// You can completely clear the cache or evict specific users. +// +// User credentials are cached in memory on each node to avoid connecting to a +// remote authentication service or hitting the disk for every incoming request. +// There are realm settings that you can use to configure the user cache. +// For more information, refer to the documentation about controlling the user +// cache. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms +func (p *MethodSecurity) ClearCachedRealms(realms string) *security_clear_cached_realms.ClearCachedRealms { + _clearcachedrealms := security_clear_cached_realms.NewClearCachedRealmsFunc(p.tp) + return _clearcachedrealms(realms) +} + +// Clear the roles cache. +// +// Evict roles from the native role cache. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles +func (p *MethodSecurity) ClearCachedRoles(name string) *security_clear_cached_roles.ClearCachedRoles { + _clearcachedroles := security_clear_cached_roles.NewClearCachedRolesFunc(p.tp) + return _clearcachedroles(name) +} + +// Clear service account token caches. +// +// Evict a subset of all entries from the service account token caches. +// Two separate caches exist for service account tokens: one cache for tokens +// backed by the `service_tokens` file, and another for tokens backed by the +// `.security` index. +// This API clears matching entries from both caches. +// +// The cache for service account tokens backed by the `.security` index is +// cleared automatically on state changes of the security index. +// The cache for tokens backed by the `service_tokens` file is cleared +// automatically on file changes. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens +func (p *MethodSecurity) ClearCachedServiceTokens(namespace, service, name string) *security_clear_cached_service_tokens.ClearCachedServiceTokens { + _clearcachedservicetokens := security_clear_cached_service_tokens.NewClearCachedServiceTokensFunc(p.tp) + return _clearcachedservicetokens(namespace, service, name) +} + +// Create an API key. +// +// Create an API key for access without requiring basic authentication. +// +// IMPORTANT: If the credential that is used to authenticate this request is an +// API key, the derived API key cannot have any privileges. +// If you specify privileges, the API returns an error. +// +// A successful request returns a JSON structure that contains the API key, its +// unique id, and its name. +// If applicable, it also returns expiration information for the API key in +// milliseconds. +// +// NOTE: By default, API keys never expire. You can specify expiration +// information when you create the API keys. +// +// The API keys are created by the Elasticsearch API key service, which is +// automatically enabled. +// To configure or turn off the API key service, refer to API key service +// setting documentation. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key +func (p *MethodSecurity) CreateApiKey() *security_create_api_key.CreateApiKey { + _createapikey := security_create_api_key.NewCreateApiKeyFunc(p.tp) + return _createapikey() +} + +// Create a cross-cluster API key. +// +// Create an API key of the `cross_cluster` type for the API key based remote +// cluster access. +// A `cross_cluster` API key cannot be used to authenticate through the REST +// interface. +// +// IMPORTANT: To authenticate this request you must use a credential that is not +// an API key. Even if you use an API key that has the required privilege, the +// API returns an error. +// +// Cross-cluster API keys are created by the Elasticsearch API key service, +// which is automatically enabled. +// +// NOTE: Unlike REST API keys, a cross-cluster API key does not capture +// permissions of the authenticated user. The API key’s effective permission is +// exactly as specified with the `access` property. +// +// A successful request returns a JSON structure that contains the API key, its +// unique ID, and its name. If applicable, it also returns expiration +// information for the API key in milliseconds. +// +// By default, API keys never expire. You can specify expiration information +// when you create the API keys. +// +// Cross-cluster API keys can only be updated with the update cross-cluster API +// key API. +// Attempting to update them with the update REST API key API or the bulk update +// REST API keys API will result in an error. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key +func (p *MethodSecurity) CreateCrossClusterApiKey() *security_create_cross_cluster_api_key.CreateCrossClusterApiKey { + _createcrossclusterapikey := security_create_cross_cluster_api_key.NewCreateCrossClusterApiKeyFunc(p.tp) + return _createcrossclusterapikey() +} + +// Create a service account token. +// +// Create a service accounts token for access without requiring basic +// authentication. +// +// NOTE: Service account tokens never expire. +// You must actively delete them if they are no longer needed. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token +func (p *MethodSecurity) CreateServiceToken(namespace, service string) *security_create_service_token.CreateServiceToken { + _createservicetoken := security_create_service_token.NewCreateServiceTokenFunc(p.tp) + return _createservicetoken(namespace, service) +} + +// Delegate PKI authentication. +// +// This API implements the exchange of an X509Certificate chain for an +// Elasticsearch access token. +// The certificate chain is validated, according to RFC 5280, by sequentially +// considering the trust configuration of every installed PKI realm that has +// `delegation.enabled` set to `true`. +// A successfully trusted client certificate is also subject to the validation +// of the subject distinguished name according to thw `username_pattern` of the +// respective realm. +// +// This API is called by smart and trusted proxies, such as Kibana, which +// terminate the user's TLS session but still want to authenticate the user by +// using a PKI realm—-​as if the user connected directly to Elasticsearch. +// +// IMPORTANT: The association between the subject public key in the target +// certificate and the corresponding private key is not validated. +// This is part of the TLS authentication process and it is delegated to the +// proxy that calls this API. +// The proxy is trusted to have performed the TLS authentication and this API +// translates that authentication into an Elasticsearch access token. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki +func (p *MethodSecurity) DelegatePki() *security_delegate_pki.DelegatePki { + _delegatepki := security_delegate_pki.NewDelegatePkiFunc(p.tp) + return _delegatepki() +} + +// Delete application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_security` cluster privilege (or a greater privilege such as +// `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges +func (p *MethodSecurity) DeletePrivileges(application, name string) *security_delete_privileges.DeletePrivileges { + _deleteprivileges := security_delete_privileges.NewDeletePrivilegesFunc(p.tp) + return _deleteprivileges(application, name) +} + +// Delete roles. +// +// Delete roles in the native realm. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The delete roles API cannot remove roles that are defined in roles files. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role +func (p *MethodSecurity) DeleteRole(name string) *security_delete_role.DeleteRole { + _deleterole := security_delete_role.NewDeleteRoleFunc(p.tp) + return _deleterole(name) +} + +// Delete role mappings. +// +// Role mappings define which roles are assigned to each user. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. +// The delete role mappings API cannot remove role mappings that are defined in +// role mapping files. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping +func (p *MethodSecurity) DeleteRoleMapping(name string) *security_delete_role_mapping.DeleteRoleMapping { + _deleterolemapping := security_delete_role_mapping.NewDeleteRoleMappingFunc(p.tp) + return _deleterolemapping(name) +} + +// Delete service account tokens. +// +// Delete service account tokens for a service in a specified namespace. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token +func (p *MethodSecurity) DeleteServiceToken(namespace, service, name string) *security_delete_service_token.DeleteServiceToken { + _deleteservicetoken := security_delete_service_token.NewDeleteServiceTokenFunc(p.tp) + return _deleteservicetoken(namespace, service, name) +} + +// Delete users. +// +// Delete users from the native realm. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user +func (p *MethodSecurity) DeleteUser(username string) *security_delete_user.DeleteUser { + _deleteuser := security_delete_user.NewDeleteUserFunc(p.tp) + return _deleteuser(username) +} + +// Disable users. +// +// Disable users in the native realm. +// By default, when you create users, they are enabled. +// You can use this API to revoke a user's access to Elasticsearch. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user +func (p *MethodSecurity) DisableUser(username string) *security_disable_user.DisableUser { + _disableuser := security_disable_user.NewDisableUserFunc(p.tp) + return _disableuser(username) +} + +// Disable a user profile. +// +// Disable user profiles so that they are not visible in user profile searches. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// When you activate a user profile, its automatically enabled and visible in +// user profile searches. You can use the disable user profile API to disable a +// user profile so it’s not visible in these searches. +// To re-enable a disabled user profile, use the enable user profile API . +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile +func (p *MethodSecurity) DisableUserProfile(uid string) *security_disable_user_profile.DisableUserProfile { + _disableuserprofile := security_disable_user_profile.NewDisableUserProfileFunc(p.tp) + return _disableuserprofile(uid) +} + +// Enable users. +// +// Enable users in the native realm. +// By default, when you create users, they are enabled. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user +func (p *MethodSecurity) EnableUser(username string) *security_enable_user.EnableUser { + _enableuser := security_enable_user.NewEnableUserFunc(p.tp) + return _enableuser(username) +} + +// Enable a user profile. +// +// Enable user profiles to make them visible in user profile searches. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// When you activate a user profile, it's automatically enabled and visible in +// user profile searches. +// If you later disable the user profile, you can use the enable user profile +// API to make the profile visible in these searches again. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile +func (p *MethodSecurity) EnableUserProfile(uid string) *security_enable_user_profile.EnableUserProfile { + _enableuserprofile := security_enable_user_profile.NewEnableUserProfileFunc(p.tp) + return _enableuserprofile(uid) +} + +// Enroll Kibana. +// +// Enable a Kibana instance to configure itself for communication with a secured +// Elasticsearch cluster. +// +// NOTE: This API is currently intended for internal use only by Kibana. +// Kibana uses this API internally to configure itself for communications with +// an Elasticsearch cluster that already has security features enabled. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana +func (p *MethodSecurity) EnrollKibana() *security_enroll_kibana.EnrollKibana { + _enrollkibana := security_enroll_kibana.NewEnrollKibanaFunc(p.tp) + return _enrollkibana() +} + +// Enroll a node. +// +// Enroll a new node to allow it to join an existing cluster with security +// features enabled. +// +// The response contains all the necessary information for the joining node to +// bootstrap discovery and security related settings so that it can successfully +// join the cluster. +// The response contains key and certificate material that allows the caller to +// generate valid signed certificates for the HTTP layer of all nodes in the +// cluster. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node +func (p *MethodSecurity) EnrollNode() *security_enroll_node.EnrollNode { + _enrollnode := security_enroll_node.NewEnrollNodeFunc(p.tp) + return _enrollnode() +} + +// Get API key information. +// +// Retrieves information for one or more API keys. +// NOTE: If you have only the `manage_own_api_key` privilege, this API returns +// only the API keys that you own. +// If you have `read_security`, `manage_api_key` or greater privileges +// (including `manage_security`), this API returns all API keys regardless of +// ownership. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key +func (p *MethodSecurity) GetApiKey() *security_get_api_key.GetApiKey { + _getapikey := security_get_api_key.NewGetApiKeyFunc(p.tp) + return _getapikey() +} + +// Get builtin privileges. +// +// Get the list of cluster privileges and index privileges that are available in +// this version of Elasticsearch. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges +func (p *MethodSecurity) GetBuiltinPrivileges() *security_get_builtin_privileges.GetBuiltinPrivileges { + _getbuiltinprivileges := security_get_builtin_privileges.NewGetBuiltinPrivilegesFunc(p.tp) + return _getbuiltinprivileges() +} + +// Get application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `read_security` cluster privilege (or a greater privilege such as +// `manage_security` or `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges +func (p *MethodSecurity) GetPrivileges() *security_get_privileges.GetPrivileges { + _getprivileges := security_get_privileges.NewGetPrivilegesFunc(p.tp) + return _getprivileges() +} + +// Get roles. +// +// Get roles in the native realm. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The get roles API cannot retrieve roles that are defined in roles files. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role +func (p *MethodSecurity) GetRole() *security_get_role.GetRole { + _getrole := security_get_role.NewGetRoleFunc(p.tp) + return _getrole() +} + +// Get role mappings. +// +// Role mappings define which roles are assigned to each user. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. +// The get role mappings API cannot retrieve role mappings that are defined in +// role mapping files. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping +func (p *MethodSecurity) GetRoleMapping() *security_get_role_mapping.GetRoleMapping { + _getrolemapping := security_get_role_mapping.NewGetRoleMappingFunc(p.tp) + return _getrolemapping() +} + +// Get service accounts. +// +// Get a list of service accounts that match the provided path parameters. +// +// NOTE: Currently, only the `elastic/fleet-server` service account is +// available. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts +func (p *MethodSecurity) GetServiceAccounts() *security_get_service_accounts.GetServiceAccounts { + _getserviceaccounts := security_get_service_accounts.NewGetServiceAccountsFunc(p.tp) + return _getserviceaccounts() +} + +// Get service account credentials. +// +// To use this API, you must have at least the `read_security` cluster privilege +// (or a greater privilege such as `manage_service_account` or +// `manage_security`). +// +// The response includes service account tokens that were created with the +// create service account tokens API as well as file-backed tokens from all +// nodes of the cluster. +// +// NOTE: For tokens backed by the `service_tokens` file, the API collects them +// from all nodes of the cluster. +// Tokens with the same name from different nodes are assumed to be the same +// token and are only counted once towards the total number of service tokens. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials +func (p *MethodSecurity) GetServiceCredentials(namespace, service string) *security_get_service_credentials.GetServiceCredentials { + _getservicecredentials := security_get_service_credentials.NewGetServiceCredentialsFunc(p.tp) + return _getservicecredentials(namespace, service) +} + +// Get security index settings. +// +// Get the user-configurable settings for the security internal index +// (`.security` and associated indices). +// Only a subset of the index settings — those that are user-configurable—will +// be shown. +// This includes: +// +// * `index.auto_expand_replicas` +// * `index.number_of_replicas` +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings +func (p *MethodSecurity) GetSettings() *security_get_settings.GetSettings { + _getsettings := security_get_settings.NewGetSettingsFunc(p.tp) + return _getsettings() +} + +// Get a token. +// +// Create a bearer token for access without requiring basic authentication. +// The tokens are created by the Elasticsearch Token Service, which is +// automatically enabled when you configure TLS on the HTTP interface. +// Alternatively, you can explicitly enable the +// `xpack.security.authc.token.enabled` setting. +// When you are running in production mode, a bootstrap check prevents you from +// enabling the token service unless you also enable TLS on the HTTP interface. +// +// The get token API takes the same parameters as a typical OAuth 2.0 token API +// except for the use of a JSON request body. +// +// A successful get token API call returns a JSON structure that contains the +// access token, the amount of time (seconds) that the token expires in, the +// type, and the scope if available. +// +// The tokens returned by the get token API have a finite period of time for +// which they are valid and after that time period, they can no longer be used. +// That time period is defined by the `xpack.security.authc.token.timeout` +// setting. +// If you want to invalidate a token immediately, you can do so by using the +// invalidate token API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token +func (p *MethodSecurity) GetToken() *security_get_token.GetToken { + _gettoken := security_get_token.NewGetTokenFunc(p.tp) + return _gettoken() +} + +// Get users. +// +// Get information about users in the native realm and built-in users. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user +func (p *MethodSecurity) GetUser() *security_get_user.GetUser { + _getuser := security_get_user.NewGetUserFunc(p.tp) + return _getuser() +} + +// Get user privileges. +// +// Get the security privileges for the logged in user. +// All users can use this API, but only to determine their own privileges. +// To check the privileges of other users, you must use the run as feature. +// To check whether a user has a specific list of privileges, use the has +// privileges API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges +func (p *MethodSecurity) GetUserPrivileges() *security_get_user_privileges.GetUserPrivileges { + _getuserprivileges := security_get_user_privileges.NewGetUserPrivilegesFunc(p.tp) + return _getuserprivileges() +} + +// Get a user profile. +// +// Get a user's profile using the unique profile ID. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile +func (p *MethodSecurity) GetUserProfile(uid string) *security_get_user_profile.GetUserProfile { + _getuserprofile := security_get_user_profile.NewGetUserProfileFunc(p.tp) + return _getuserprofile(uid) +} + +// Grant an API key. +// +// Create an API key on behalf of another user. +// This API is similar to the create API keys API, however it creates the API +// key for a user that is different than the user that runs the API. +// The caller must have authentication credentials for the user on whose behalf +// the API key will be created. +// It is not possible to use this API to create an API key without that user's +// credentials. +// The supported user authentication credential types are: +// +// * username and password +// * Elasticsearch access tokens +// * JWTs +// +// The user, for whom the authentication credentials is provided, can optionally +// "run as" (impersonate) another user. +// In this case, the API key will be created on behalf of the impersonated user. +// +// This API is intended be used by applications that need to create and manage +// API keys for end users, but cannot guarantee that those users have permission +// to create API keys on their own behalf. +// The API keys are created by the Elasticsearch API key service, which is +// automatically enabled. +// +// A successful grant API key API call returns a JSON structure that contains +// the API key, its unique id, and its name. +// If applicable, it also returns expiration information for the API key in +// milliseconds. +// +// By default, API keys never expire. You can specify expiration information +// when you create the API keys. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key +func (p *MethodSecurity) GrantApiKey() *security_grant_api_key.GrantApiKey { + _grantapikey := security_grant_api_key.NewGrantApiKeyFunc(p.tp) + return _grantapikey() +} + +// Check user privileges. +// +// Determine whether the specified user has a specified list of privileges. +// All users can use this API, but only to determine their own privileges. +// To check the privileges of other users, you must use the run as feature. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges +func (p *MethodSecurity) HasPrivileges() *security_has_privileges.HasPrivileges { + _hasprivileges := security_has_privileges.NewHasPrivilegesFunc(p.tp) + return _hasprivileges() +} + +// Check user profile privileges. +// +// Determine whether the users associated with the specified user profile IDs +// have all the requested privileges. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile +func (p *MethodSecurity) HasPrivilegesUserProfile() *security_has_privileges_user_profile.HasPrivilegesUserProfile { + _hasprivilegesuserprofile := security_has_privileges_user_profile.NewHasPrivilegesUserProfileFunc(p.tp) + return _hasprivilegesuserprofile() +} + +// Invalidate API keys. +// +// This API invalidates API keys created by the create API key or grant API key +// APIs. +// Invalidated API keys fail authentication, but they can still be viewed using +// the get API key information and query API key information APIs, for at least +// the configured retention period, until they are automatically deleted. +// +// To use this API, you must have at least the `manage_security`, +// `manage_api_key`, or `manage_own_api_key` cluster privileges. +// The `manage_security` privilege allows deleting any API key, including both +// REST and cross cluster API keys. +// The `manage_api_key` privilege allows deleting any REST API key, but not +// cross cluster API keys. +// The `manage_own_api_key` only allows deleting REST API keys that are owned by +// the user. +// In addition, with the `manage_own_api_key` privilege, an invalidation request +// must be issued in one of the three formats: +// +// - Set the parameter `owner=true`. +// - Or, set both `username` and `realm_name` to match the user's identity. +// - Or, if the request is issued by an API key, that is to say an API key +// invalidates itself, specify its ID in the `ids` field. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key +func (p *MethodSecurity) InvalidateApiKey() *security_invalidate_api_key.InvalidateApiKey { + _invalidateapikey := security_invalidate_api_key.NewInvalidateApiKeyFunc(p.tp) + return _invalidateapikey() +} + +// Invalidate a token. +// +// The access tokens returned by the get token API have a finite period of time +// for which they are valid. +// After that time period, they can no longer be used. +// The time period is defined by the `xpack.security.authc.token.timeout` +// setting. +// +// The refresh tokens returned by the get token API are only valid for 24 hours. +// They can also be used exactly once. +// If you want to invalidate one or more access or refresh tokens immediately, +// use this invalidate token API. +// +// NOTE: While all parameters are optional, at least one of them is required. +// More specifically, either one of `token` or `refresh_token` parameters is +// required. +// If none of these two are specified, then `realm_name` and/or `username` need +// to be specified. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token +func (p *MethodSecurity) InvalidateToken() *security_invalidate_token.InvalidateToken { + _invalidatetoken := security_invalidate_token.NewInvalidateTokenFunc(p.tp) + return _invalidatetoken() +} + +// Authenticate OpenID Connect. +// +// Exchange an OpenID Connect authentication response message for an +// Elasticsearch internal access token and refresh token that can be +// subsequently used for authentication. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate +func (p *MethodSecurity) OidcAuthenticate() *security_oidc_authenticate.OidcAuthenticate { + _oidcauthenticate := security_oidc_authenticate.NewOidcAuthenticateFunc(p.tp) + return _oidcauthenticate() +} + +// Logout of OpenID Connect. +// +// Invalidate an access token and a refresh token that were generated as a +// response to the `/_security/oidc/authenticate` API. +// +// If the OpenID Connect authentication realm in Elasticsearch is accordingly +// configured, the response to this call will contain a URI pointing to the end +// session endpoint of the OpenID Connect Provider in order to perform single +// logout. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout +func (p *MethodSecurity) OidcLogout() *security_oidc_logout.OidcLogout { + _oidclogout := security_oidc_logout.NewOidcLogoutFunc(p.tp) + return _oidclogout() +} + +// Prepare OpenID connect authentication. +// +// Create an oAuth 2.0 authentication request as a URL string based on the +// configuration of the OpenID Connect authentication realm in Elasticsearch. +// +// The response of this API is a URL pointing to the Authorization Endpoint of +// the configured OpenID Connect Provider, which can be used to redirect the +// browser of the user in order to continue the authentication process. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication +func (p *MethodSecurity) OidcPrepareAuthentication() *security_oidc_prepare_authentication.OidcPrepareAuthentication { + _oidcprepareauthentication := security_oidc_prepare_authentication.NewOidcPrepareAuthenticationFunc(p.tp) + return _oidcprepareauthentication() +} + +// Create or update application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_security` cluster privilege (or a greater privilege such as +// `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +// +// Application names are formed from a prefix, with an optional suffix that +// conform to the following rules: +// +// * The prefix must begin with a lowercase ASCII letter. +// * The prefix must contain only ASCII letters or digits. +// * The prefix must be at least 3 characters long. +// * If the suffix exists, it must begin with either a dash `-` or `_`. +// * The suffix cannot contain any of the following characters: `\`, `/`, `*`, +// `?`, `"`, `<`, `>`, `|`, `,`, `*`. +// * No part of the name can contain whitespace. +// +// Privilege names must begin with a lowercase ASCII letter and must contain +// only ASCII letters and digits along with the characters `_`, `-`, and `.`. +// +// Action names can contain any number of printable ASCII characters and must +// contain at least one of the following characters: `/`, `*`, `:`. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges +func (p *MethodSecurity) PutPrivileges() *security_put_privileges.PutPrivileges { + _putprivileges := security_put_privileges.NewPutPrivilegesFunc(p.tp) + return _putprivileges() +} + +// Create or update roles. +// +// The role management APIs are generally the preferred way to manage roles in +// the native realm, rather than using file-based role management. +// The create or update roles API cannot update roles that are defined in roles +// files. +// File-based role management is not available in Elastic Serverless. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role +func (p *MethodSecurity) PutRole(name string) *security_put_role.PutRole { + _putrole := security_put_role.NewPutRoleFunc(p.tp) + return _putrole(name) +} + +// Create or update role mappings. +// +// Role mappings define which roles are assigned to each user. +// Each mapping has rules that identify users and a list of roles that are +// granted to those users. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. The create or update role mappings API +// cannot update role mappings that are defined in role mapping files. +// +// NOTE: This API does not create roles. Rather, it maps users to existing +// roles. +// Roles can be created by using the create or update roles API or roles files. +// +// **Role templates** +// +// The most common use for role mappings is to create a mapping from a known +// value on the user to a fixed role name. +// For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should +// be given the superuser role in Elasticsearch. +// The `roles` field is used for this purpose. +// +// For more complex needs, it is possible to use Mustache templates to +// dynamically determine the names of the roles that should be granted to the +// user. +// The `role_templates` field is used for this purpose. +// +// NOTE: To use role templates successfully, the relevant scripting feature must +// be enabled. +// Otherwise, all attempts to create a role mapping with role templates fail. +// +// All of the user fields that are available in the role mapping rules are also +// available in the role templates. +// Thus it is possible to assign a user to a role that reflects their username, +// their groups, or the name of the realm to which they authenticated. +// +// By default a template is evaluated to produce a single string that is the +// name of the role which should be assigned to the user. +// If the format of the template is set to "json" then the template is expected +// to produce a JSON string or an array of JSON strings for the role names. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping +func (p *MethodSecurity) PutRoleMapping(name string) *security_put_role_mapping.PutRoleMapping { + _putrolemapping := security_put_role_mapping.NewPutRoleMappingFunc(p.tp) + return _putrolemapping(name) +} + +// Create or update users. +// +// Add and update users in the native realm. +// A password is required for adding a new user but is optional when updating an +// existing user. +// To change a user's password without updating any other fields, use the change +// password API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user +func (p *MethodSecurity) PutUser(username string) *security_put_user.PutUser { + _putuser := security_put_user.NewPutUserFunc(p.tp) + return _putuser(username) +} + +// Find API keys with a query. +// +// Get a paginated list of API keys and their information. +// You can optionally filter the results with a query. +// +// To use this API, you must have at least the `manage_own_api_key` or the +// `read_security` cluster privileges. +// If you have only the `manage_own_api_key` privilege, this API returns only +// the API keys that you own. +// If you have the `read_security`, `manage_api_key`, or greater privileges +// (including `manage_security`), this API returns all API keys regardless of +// ownership. +// Refer to the linked documentation for examples of how to find API keys: +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys +func (p *MethodSecurity) QueryApiKeys() *security_query_api_keys.QueryApiKeys { + _queryapikeys := security_query_api_keys.NewQueryApiKeysFunc(p.tp) + return _queryapikeys() +} + +// Find roles with a query. +// +// Get roles in a paginated manner. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The query roles API does not retrieve roles that are defined in roles files, +// nor built-in ones. +// You can optionally filter the results with a query. +// Also, the results can be paginated and sorted. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role +func (p *MethodSecurity) QueryRole() *security_query_role.QueryRole { + _queryrole := security_query_role.NewQueryRoleFunc(p.tp) + return _queryrole() +} + +// Find users with a query. +// +// Get information for users in a paginated manner. +// You can optionally filter the results with a query. +// +// NOTE: As opposed to the get user API, built-in users are excluded from the +// result. +// This API is only for native users. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user +func (p *MethodSecurity) QueryUser() *security_query_user.QueryUser { + _queryuser := security_query_user.NewQueryUserFunc(p.tp) + return _queryuser() +} + +// Authenticate SAML. +// +// Submit a SAML response message to Elasticsearch for consumption. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The SAML message that is submitted can be: +// +// * A response to a SAML authentication request that was previously created +// using the SAML prepare authentication API. +// * An unsolicited SAML message in the case of an IdP-initiated single sign-on +// (SSO) flow. +// +// In either case, the SAML message needs to be a base64 encoded XML document +// with a root element of ``. +// +// After successful validation, Elasticsearch responds with an Elasticsearch +// internal access token and refresh token that can be subsequently used for +// authentication. +// This API endpoint essentially exchanges SAML responses that indicate +// successful authentication in the IdP for Elasticsearch access and refresh +// tokens, which can be used for authentication against Elasticsearch. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate +func (p *MethodSecurity) SamlAuthenticate() *security_saml_authenticate.SamlAuthenticate { + _samlauthenticate := security_saml_authenticate.NewSamlAuthenticateFunc(p.tp) + return _samlauthenticate() +} + +// Logout of SAML completely. +// +// Verifies the logout response sent from the SAML IdP. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The SAML IdP may send a logout response back to the SP after handling the +// SP-initiated SAML Single Logout. +// This API verifies the response by ensuring the content is relevant and +// validating its signature. +// An empty response is returned if the verification process is successful. +// The response can be sent by the IdP with either the HTTP-Redirect or the +// HTTP-Post binding. +// The caller of this API must prepare the request accordingly so that this API +// can handle either of them. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout +func (p *MethodSecurity) SamlCompleteLogout() *security_saml_complete_logout.SamlCompleteLogout { + _samlcompletelogout := security_saml_complete_logout.NewSamlCompleteLogoutFunc(p.tp) + return _samlcompletelogout() +} + +// Invalidate SAML. +// +// Submit a SAML LogoutRequest message to Elasticsearch for consumption. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The logout request comes from the SAML IdP during an IdP initiated Single +// Logout. +// The custom web application can use this API to have Elasticsearch process the +// `LogoutRequest`. +// After successful validation of the request, Elasticsearch invalidates the +// access token and refresh token that corresponds to that specific SAML +// principal and provides a URL that contains a SAML LogoutResponse message. +// Thus the user can be redirected back to their IdP. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate +func (p *MethodSecurity) SamlInvalidate() *security_saml_invalidate.SamlInvalidate { + _samlinvalidate := security_saml_invalidate.NewSamlInvalidateFunc(p.tp) + return _samlinvalidate() +} + +// Logout of SAML. +// +// Submits a request to invalidate an access token and refresh token. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// This API invalidates the tokens that were generated for a user by the SAML +// authenticate API. +// If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP +// supports this, the Elasticsearch response contains a URL to redirect the user +// to the IdP that contains a SAML logout request (starting an SP-initiated SAML +// Single Logout). +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout +func (p *MethodSecurity) SamlLogout() *security_saml_logout.SamlLogout { + _samllogout := security_saml_logout.NewSamlLogoutFunc(p.tp) + return _samllogout() +} + +// Prepare SAML authentication. +// +// Create a SAML authentication request (``) as a URL string based +// on the configuration of the respective SAML realm in Elasticsearch. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// This API returns a URL pointing to the SAML Identity Provider. +// You can use the URL to redirect the browser of the user in order to continue +// the authentication process. +// The URL includes a single parameter named `SAMLRequest`, which contains a +// SAML Authentication request that is deflated and Base64 encoded. +// If the configuration dictates that SAML authentication requests should be +// signed, the URL has two extra parameters named `SigAlg` and `Signature`. +// These parameters contain the algorithm used for the signature and the +// signature value itself. +// It also returns a random string that uniquely identifies this SAML +// Authentication request. +// The caller of this API needs to store this identifier as it needs to be used +// in a following step of the authentication process. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication +func (p *MethodSecurity) SamlPrepareAuthentication() *security_saml_prepare_authentication.SamlPrepareAuthentication { + _samlprepareauthentication := security_saml_prepare_authentication.NewSamlPrepareAuthenticationFunc(p.tp) + return _samlprepareauthentication() +} + +// Create SAML service provider metadata. +// +// Generate SAML metadata for a SAML 2.0 Service Provider. +// +// The SAML 2.0 specification provides a mechanism for Service Providers to +// describe their capabilities and configuration using a metadata file. +// This API generates Service Provider metadata based on the configuration of a +// SAML realm in Elasticsearch. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata +func (p *MethodSecurity) SamlServiceProviderMetadata(realmname string) *security_saml_service_provider_metadata.SamlServiceProviderMetadata { + _samlserviceprovidermetadata := security_saml_service_provider_metadata.NewSamlServiceProviderMetadataFunc(p.tp) + return _samlserviceprovidermetadata(realmname) +} + +// Suggest a user profile. +// +// Get suggestions for user profiles that match specified search criteria. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles +func (p *MethodSecurity) SuggestUserProfiles() *security_suggest_user_profiles.SuggestUserProfiles { + _suggestuserprofiles := security_suggest_user_profiles.NewSuggestUserProfilesFunc(p.tp) + return _suggestuserprofiles() +} + +// Update an API key. +// +// Update attributes of an existing API key. +// This API supports updates to an API key's access scope, expiration, and +// metadata. +// +// To use this API, you must have at least the `manage_own_api_key` cluster +// privilege. +// Users can only update API keys that they created or that were granted to +// them. +// To update another user’s API key, use the `run_as` feature to submit a +// request on behalf of another user. +// +// IMPORTANT: It's not possible to use an API key as the authentication +// credential for this API. The owner user’s credentials are required. +// +// Use this API to update API keys created by the create API key or grant API +// Key APIs. +// If you need to apply the same update to many API keys, you can use the bulk +// update API keys API to reduce overhead. +// It's not possible to update expired API keys or API keys that have been +// invalidated by the invalidate API key API. +// +// The access scope of an API key is derived from the `role_descriptors` you +// specify in the request and a snapshot of the owner user's permissions at the +// time of the request. +// The snapshot of the owner's permissions is updated automatically on every +// call. +// +// IMPORTANT: If you don't specify `role_descriptors` in the request, a call to +// this API might still change the API key's access scope. +// This change can occur if the owner user's permissions have changed since the +// API key was created or last modified. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key +func (p *MethodSecurity) UpdateApiKey(id string) *security_update_api_key.UpdateApiKey { + _updateapikey := security_update_api_key.NewUpdateApiKeyFunc(p.tp) + return _updateapikey(id) +} + +// Update a cross-cluster API key. +// +// Update the attributes of an existing cross-cluster API key, which is used for +// API key based remote cluster access. +// +// To use this API, you must have at least the `manage_security` cluster +// privilege. +// Users can only update API keys that they created. +// To update another user's API key, use the `run_as` feature to submit a +// request on behalf of another user. +// +// IMPORTANT: It's not possible to use an API key as the authentication +// credential for this API. +// To update an API key, the owner user's credentials are required. +// +// It's not possible to update expired API keys, or API keys that have been +// invalidated by the invalidate API key API. +// +// This API supports updates to an API key's access scope, metadata, and +// expiration. +// The owner user's information, such as the `username` and `realm`, is also +// updated automatically on every call. +// +// NOTE: This API cannot update REST API keys, which should be updated by either +// the update API key or bulk update API keys API. +// +// To learn more about how to use this API, refer to the [Update cross cluter +// API key API examples +// page](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-cc-api-key-examples). +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key +func (p *MethodSecurity) UpdateCrossClusterApiKey(id string) *security_update_cross_cluster_api_key.UpdateCrossClusterApiKey { + _updatecrossclusterapikey := security_update_cross_cluster_api_key.NewUpdateCrossClusterApiKeyFunc(p.tp) + return _updatecrossclusterapikey(id) +} + +// Update security index settings. +// +// Update the user-configurable settings for the security internal index +// (`.security` and associated indices). Only a subset of settings are allowed +// to be modified. This includes `index.auto_expand_replicas` and +// `index.number_of_replicas`. +// +// NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will +// be ignored during updates. +// +// If a specific index is not in use on the system and settings are provided for +// it, the request will be rejected. +// This API does not yet support configuring the settings for indices before +// they are in use. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings +func (p *MethodSecurity) UpdateSettings() *security_update_settings.UpdateSettings { + _updatesettings := security_update_settings.NewUpdateSettingsFunc(p.tp) + return _updatesettings() +} + +// Update user profile data. +// +// Update specific data for the user profile that is associated with a unique +// ID. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_user_profile` cluster privilege. +// * The `update_profile_data` global privilege for the namespaces that are +// referenced in the request. +// +// This API updates the `labels` and `data` fields of an existing user profile +// document with JSON objects. +// New keys and their values are added to the profile document and conflicting +// keys are replaced by data that's included in the request. +// +// For both labels and data, content is namespaced by the top-level fields. +// The `update_profile_data` global privilege grants privileges for updating +// only the allowed namespaces. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data +func (p *MethodSecurity) UpdateUserProfileData(uid string) *security_update_user_profile_data.UpdateUserProfileData { + _updateuserprofiledata := security_update_user_profile_data.NewUpdateUserProfileDataFunc(p.tp) + return _updateuserprofiledata(uid) +} + +// Cancel node shutdown preparations. +// Remove a node from the shutdown list so it can resume normal operations. +// You must explicitly clear the shutdown request when a node rejoins the +// cluster or when a node has permanently left the cluster. +// Shutdown requests are never removed automatically by Elasticsearch. +// +// NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic +// Cloud Enterprise, and Elastic Cloud on Kubernetes. +// Direct use is not supported. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node +func (p *MethodShutdown) DeleteNode(nodeid string) *shutdown_delete_node.DeleteNode { + _deletenode := shutdown_delete_node.NewDeleteNodeFunc(p.tp) + return _deletenode(nodeid) +} + +// Get the shutdown status. +// +// Get information about nodes that are ready to be shut down, have shut down +// preparations still in progress, or have stalled. +// The API returns status information for each part of the shut down process. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node +func (p *MethodShutdown) GetNode() *shutdown_get_node.GetNode { + _getnode := shutdown_get_node.NewGetNodeFunc(p.tp) + return _getnode() +} + +// Prepare a node to be shut down. +// +// NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic +// Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// If you specify a node that is offline, it will be prepared for shut down when +// it rejoins the cluster. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. +// +// The API migrates ongoing tasks and index shards to other nodes as needed to +// prepare a node to be restarted or shut down and removed from the cluster. +// This ensures that Elasticsearch can be stopped safely with minimal disruption +// to the cluster. +// +// You must specify the type of shutdown: `restart`, `remove`, or `replace`. +// If a node is already being prepared for shutdown, you can use this API to +// change the shutdown type. +// +// IMPORTANT: This API does NOT terminate the Elasticsearch process. +// Monitor the node shutdown status to determine when it is safe to stop +// Elasticsearch. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node +func (p *MethodShutdown) PutNode(nodeid string) *shutdown_put_node.PutNode { + _putnode := shutdown_put_node.NewPutNodeFunc(p.tp) + return _putnode(nodeid) +} + +// Simulate data ingestion. +// Run ingest pipelines against a set of provided documents, optionally with +// substitute pipeline definitions, to simulate ingesting data into an index. +// +// This API is meant to be used for troubleshooting or pipeline development, as +// it does not actually index any data into Elasticsearch. +// +// The API runs the default and final pipeline for that index against a set of +// documents provided in the body of the request. +// If a pipeline contains a reroute processor, it follows that reroute processor +// to the new index, running that index's pipelines as well the same way that a +// non-simulated ingest would. +// No data is indexed into Elasticsearch. +// Instead, the transformed document is returned, along with the list of +// pipelines that have been run and the name of the index where the document +// would have been indexed if this were not a simulation. +// The transformed document is validated against the mappings that would apply +// to this index, and any validation error is reported in the result. +// +// This API differs from the simulate pipeline API in that you specify a single +// pipeline for that API, and it runs only that one pipeline. +// The simulate pipeline API is more useful for developing a single pipeline, +// while the simulate ingest API is more useful for troubleshooting the +// interaction of the various pipelines that get applied when ingesting into an +// index. +// +// By default, the pipeline definitions that are currently in the system are +// used. +// However, you can supply substitute pipeline definitions in the body of the +// request. +// These will be used in place of the pipeline definitions that are already in +// the system. This can be used to replace existing pipeline definitions or to +// create new ones. The pipeline substitutions are used only within this +// request. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest +func (p *MethodSimulate) Ingest() *simulate_ingest.Ingest { + _ingest := simulate_ingest.NewIngestFunc(p.tp) + return _ingest() +} + +// Delete a policy. +// Delete a snapshot lifecycle policy definition. +// This operation prevents any future snapshots from being taken but does not +// cancel in-progress snapshots or remove previously-taken snapshots. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle +func (p *MethodSlm) DeleteLifecycle(policyid string) *slm_delete_lifecycle.DeleteLifecycle { + _deletelifecycle := slm_delete_lifecycle.NewDeleteLifecycleFunc(p.tp) + return _deletelifecycle(policyid) +} + +// Run a policy. +// Immediately create a snapshot according to the snapshot lifecycle policy +// without waiting for the scheduled time. +// The snapshot policy is normally applied according to its schedule, but you +// might want to manually run a policy before performing an upgrade or other +// maintenance. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle +func (p *MethodSlm) ExecuteLifecycle(policyid string) *slm_execute_lifecycle.ExecuteLifecycle { + _executelifecycle := slm_execute_lifecycle.NewExecuteLifecycleFunc(p.tp) + return _executelifecycle(policyid) +} + +// Run a retention policy. +// Manually apply the retention policy to force immediate removal of snapshots +// that are expired according to the snapshot lifecycle policy retention rules. +// The retention policy is normally applied according to its schedule. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention +func (p *MethodSlm) ExecuteRetention() *slm_execute_retention.ExecuteRetention { + _executeretention := slm_execute_retention.NewExecuteRetentionFunc(p.tp) + return _executeretention() +} + +// Get policy information. +// Get snapshot lifecycle policy definitions and information about the latest +// snapshot attempts. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle +func (p *MethodSlm) GetLifecycle() *slm_get_lifecycle.GetLifecycle { + _getlifecycle := slm_get_lifecycle.NewGetLifecycleFunc(p.tp) + return _getlifecycle() +} + +// Get snapshot lifecycle management statistics. +// Get global and policy-level statistics about actions taken by snapshot +// lifecycle management. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats +func (p *MethodSlm) GetStats() *slm_get_stats.GetStats { + _getstats := slm_get_stats.NewGetStatsFunc(p.tp) + return _getstats() +} + +// Get the snapshot lifecycle management status. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status +func (p *MethodSlm) GetStatus() *slm_get_status.GetStatus { + _getstatus := slm_get_status.NewGetStatusFunc(p.tp) + return _getstatus() +} + +// Create or update a policy. +// Create or update a snapshot lifecycle policy. +// If the policy already exists, this request increments the policy version. +// Only the latest version of a policy is stored. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle +func (p *MethodSlm) PutLifecycle(policyid string) *slm_put_lifecycle.PutLifecycle { + _putlifecycle := slm_put_lifecycle.NewPutLifecycleFunc(p.tp) + return _putlifecycle(policyid) +} + +// Start snapshot lifecycle management. +// Snapshot lifecycle management (SLM) starts automatically when a cluster is +// formed. +// Manually starting SLM is necessary only if it has been stopped using the stop +// SLM API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start +func (p *MethodSlm) Start() *slm_start.Start { + _start := slm_start.NewStartFunc(p.tp) + return _start() +} + +// Stop snapshot lifecycle management. +// Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +// This API is useful when you are performing maintenance on a cluster and need +// to prevent SLM from performing any actions on your data streams or indices. +// Stopping SLM does not stop any snapshots that are in progress. +// You can manually trigger snapshots with the run snapshot lifecycle policy API +// even if SLM is stopped. +// +// The API returns a response as soon as the request is acknowledged, but the +// plugin might continue to run until in-progress operations complete and it can +// be safely stopped. +// Use the get snapshot lifecycle management status API to see if SLM is +// running. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop +func (p *MethodSlm) Stop() *slm_stop.Stop { + _stop := slm_stop.NewStopFunc(p.tp) + return _stop() +} + +// Clean up the snapshot repository. +// Trigger the review of the contents of a snapshot repository and delete any +// stale data not referenced by existing snapshots. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository +func (p *MethodSnapshot) CleanupRepository(repository string) *snapshot_cleanup_repository.CleanupRepository { + _cleanuprepository := snapshot_cleanup_repository.NewCleanupRepositoryFunc(p.tp) + return _cleanuprepository(repository) +} + +// Clone a snapshot. +// Clone part of all of a snapshot into another snapshot in the same repository. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone +func (p *MethodSnapshot) Clone(repository, snapshot, targetsnapshot string) *snapshot_clone.Clone { + _clone := snapshot_clone.NewCloneFunc(p.tp) + return _clone(repository, snapshot, targetsnapshot) +} + +// Create a snapshot. +// Take a snapshot of a cluster or of data streams and indices. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create +func (p *MethodSnapshot) Create(repository, snapshot string) *snapshot_create.Create { + _create := snapshot_create.NewCreateFunc(p.tp) + return _create(repository, snapshot) +} + +// Create or update a snapshot repository. +// IMPORTANT: If you are migrating searchable snapshots, the repository name +// must be identical in the source and destination clusters. +// To register a snapshot repository, the cluster's global metadata must be +// writeable. +// Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` +// and `clsuter.blocks.read_only_allow_delete` settings) that prevent write +// access. +// +// Several options for this API can be specified using a query parameter or a +// request body parameter. +// If both parameters are specified, only the query parameter is used. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository +func (p *MethodSnapshot) CreateRepository(repository string) *snapshot_create_repository.CreateRepository { + _createrepository := snapshot_create_repository.NewCreateRepositoryFunc(p.tp) + return _createrepository(repository) +} + +// Delete snapshots. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete +func (p *MethodSnapshot) Delete(repository, snapshot string) *snapshot_delete.Delete { + _delete := snapshot_delete.NewDeleteFunc(p.tp) + return _delete(repository, snapshot) +} + +// Delete snapshot repositories. +// When a repository is unregistered, Elasticsearch removes only the reference +// to the location where the repository is storing the snapshots. +// The snapshots themselves are left untouched and in place. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository +func (p *MethodSnapshot) DeleteRepository(repository string) *snapshot_delete_repository.DeleteRepository { + _deleterepository := snapshot_delete_repository.NewDeleteRepositoryFunc(p.tp) + return _deleterepository(repository) +} + +// Get snapshot information. +// +// NOTE: The `after` parameter and `next` field enable you to iterate through +// snapshots with some consistency guarantees regarding concurrent creation or +// deletion of snapshots. +// It is guaranteed that any snapshot that exists at the beginning of the +// iteration and is not concurrently deleted will be seen during the iteration. +// Snapshots concurrently created may be seen during an iteration. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get +func (p *MethodSnapshot) Get(repository, snapshot string) *snapshot_get.Get { + _get := snapshot_get.NewGetFunc(p.tp) + return _get(repository, snapshot) +} + +// Get snapshot repository information. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository +func (p *MethodSnapshot) GetRepository() *snapshot_get_repository.GetRepository { + _getrepository := snapshot_get_repository.NewGetRepositoryFunc(p.tp) + return _getrepository() +} + +// Analyze a snapshot repository. +// +// Performs operations on a snapshot repository in order to check for incorrect +// behaviour. +// +// There are a large number of third-party storage systems available, not all of +// which are suitable for use as a snapshot repository by Elasticsearch. +// Some storage systems behave incorrectly, or perform poorly, especially when +// accessed concurrently by multiple clients as the nodes of an Elasticsearch +// cluster do. +// This API performs a collection of read and write operations on your +// repository which are designed to detect incorrect behaviour and to measure +// the performance characteristics of your storage system. +// +// The default values for the parameters are deliberately low to reduce the +// impact of running an analysis inadvertently and to provide a sensible +// starting point for your investigations. +// Run your first analysis with the default parameter values to check for simple +// problems. +// Some repositories may behave correctly when lightly loaded but incorrectly +// under production-like workloads. +// If the first analysis is successful, run a sequence of increasingly large +// analyses until you encounter a failure or you reach a `blob_count` of at +// least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of +// at least `1tb`, and a `register_operation_count` of at least `100`. +// Always specify a generous timeout, possibly `1h` or longer, to allow time for +// each analysis to run to completion. +// Some repositories may behave correctly when accessed by a small number of +// Elasticsearch nodes but incorrectly when accessed concurrently by a +// production-scale cluster. +// Perform the analyses using a multi-node cluster of a similar size to your +// production cluster so that it can detect any problems that only arise when +// the repository is accessed by many nodes at once. +// +// If the analysis fails, Elasticsearch detected that your repository behaved +// unexpectedly. +// This usually means you are using a third-party storage system with an +// incorrect or incompatible implementation of the API it claims to support. +// If so, this storage system is not suitable for use as a snapshot repository. +// Repository analysis triggers conditions that occur only rarely when taking +// snapshots in a production system. +// Snapshotting to unsuitable storage may appear to work correctly most of the +// time despite repository analysis failures. +// However your snapshot data is at risk if you store it in a snapshot +// repository that does not reliably pass repository analysis. +// You can demonstrate that the analysis failure is due to an incompatible +// storage implementation by verifying that Elasticsearch does not detect the +// same problem when analysing the reference implementation of the storage +// protocol you are using. +// For instance, if you are using storage that offers an API which the supplier +// claims to be compatible with AWS S3, verify that repositories in AWS S3 do +// not fail repository analysis. +// This allows you to demonstrate to your storage supplier that a repository +// analysis failure must only be caused by an incompatibility with AWS S3 and +// cannot be attributed to a problem in Elasticsearch. +// Please do not report Elasticsearch issues involving third-party storage +// systems unless you can demonstrate that the same issue exists when analysing +// a repository that uses the reference implementation of the same storage +// protocol. +// You will need to work with the supplier of your storage system to address the +// incompatibilities that Elasticsearch detects. +// +// If the analysis is successful, the API returns details of the testing +// process, optionally including how long each operation took. +// You can use this information to determine the performance of your storage +// system. +// If any operation fails or returns an incorrect result, the API returns an +// error. +// If the API returns an error, it may not have removed all the data it wrote to +// the repository. +// The error will indicate the location of any leftover data and this path is +// also recorded in the Elasticsearch logs. +// You should verify that this location has been cleaned up correctly. +// If there is still leftover data at the specified location, you should +// manually remove it. +// +// If the connection from your client to Elasticsearch is closed while the +// client is waiting for the result of the analysis, the test is cancelled. +// Some clients are configured to close their connection if no response is +// received within a certain timeout. +// An analysis takes a long time to complete so you might need to relax any such +// client-side timeouts. +// On cancellation the analysis attempts to clean up the data it was writing, +// but it may not be able to remove it all. +// The path to the leftover data is recorded in the Elasticsearch logs. +// You should verify that this location has been cleaned up correctly. +// If there is still leftover data at the specified location, you should +// manually remove it. +// +// If the analysis is successful then it detected no incorrect behaviour, but +// this does not mean that correct behaviour is guaranteed. +// The analysis attempts to detect common bugs but it does not offer 100% +// coverage. +// Additionally, it does not test the following: +// +// * Your repository must perform durable writes. Once a blob has been written +// it must remain in place until it is deleted, even after a power loss or +// similar disaster. +// * Your repository must not suffer from silent data corruption. Once a blob +// has been written, its contents must remain unchanged until it is deliberately +// modified or deleted. +// * Your repository must behave correctly even if connectivity from the cluster +// is disrupted. Reads and writes may fail in this case, but they must not +// return incorrect results. +// +// IMPORTANT: An analysis writes a substantial amount of data to your repository +// and then reads it back again. +// This consumes bandwidth on the network between the cluster and the +// repository, and storage space and I/O bandwidth on the repository itself. +// You must ensure this load does not affect other users of these systems. +// Analyses respect the repository settings `max_snapshot_bytes_per_sec` and +// `max_restore_bytes_per_sec` if available and the cluster setting +// `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth +// they consume. +// +// NOTE: This API is intended for exploratory use by humans. +// You should expect the request parameters and the response format to vary in +// future versions. +// The response exposes immplementation details of the analysis which may change +// from version to version. +// +// NOTE: Different versions of Elasticsearch may perform different checks for +// repository compatibility, with newer versions typically being stricter than +// older ones. +// A storage system that passes repository analysis with one version of +// Elasticsearch may fail with a different version. +// This indicates it behaves incorrectly in ways that the former version did not +// detect. +// You must work with the supplier of your storage system to address the +// incompatibilities detected by the repository analysis API in any version of +// Elasticsearch. +// +// NOTE: This API may not work correctly in a mixed-version cluster. +// +// *Implementation details* +// +// NOTE: This section of documentation describes how the repository analysis API +// works in this version of Elasticsearch, but you should expect the +// implementation to vary between versions. +// The request parameters and response format depend on details of the +// implementation so may also be different in newer versions. +// +// The analysis comprises a number of blob-level tasks, as set by the +// `blob_count` parameter and a number of compare-and-exchange operations on +// linearizable registers, as set by the `register_operation_count` parameter. +// These tasks are distributed over the data and master-eligible nodes in the +// cluster for execution. +// +// For most blob-level tasks, the executing node first writes a blob to the +// repository and then instructs some of the other nodes in the cluster to +// attempt to read the data it just wrote. +// The size of the blob is chosen randomly, according to the `max_blob_size` and +// `max_total_data_size` parameters. +// If any of these reads fails then the repository does not implement the +// necessary read-after-write semantics that Elasticsearch requires. +// +// For some blob-level tasks, the executing node will instruct some of its peers +// to attempt to read the data before the writing process completes. +// These reads are permitted to fail, but must not return partial data. +// If any read returns partial data then the repository does not implement the +// necessary atomicity semantics that Elasticsearch requires. +// +// For some blob-level tasks, the executing node will overwrite the blob while +// its peers are reading it. +// In this case the data read may come from either the original or the +// overwritten blob, but the read operation must not return partial data or a +// mix of data from the two blobs. +// If any of these reads returns partial data or a mix of the two blobs then the +// repository does not implement the necessary atomicity semantics that +// Elasticsearch requires for overwrites. +// +// The executing node will use a variety of different methods to write the blob. +// For instance, where applicable, it will use both single-part and multi-part +// uploads. +// Similarly, the reading nodes will use a variety of different methods to read +// the data back again. +// For instance they may read the entire blob from start to end or may read only +// a subset of the data. +// +// For some blob-level tasks, the executing node will cancel the write before it +// is complete. +// In this case, it still instructs some of the other nodes in the cluster to +// attempt to read the blob but all of these reads must fail to find the blob. +// +// Linearizable registers are special blobs that Elasticsearch manipulates using +// an atomic compare-and-exchange operation. +// This operation ensures correct and strongly-consistent behavior even when the +// blob is accessed by multiple nodes at the same time. +// The detailed implementation of the compare-and-exchange operation on +// linearizable registers varies by repository type. +// Repository analysis verifies that that uncontended compare-and-exchange +// operations on a linearizable register blob always succeed. +// Repository analysis also verifies that contended operations either succeed or +// report the contention but do not return incorrect results. +// If an operation fails due to contention, Elasticsearch retries the operation +// until it succeeds. +// Most of the compare-and-exchange operations performed by repository analysis +// atomically increment a counter which is represented as an 8-byte blob. +// Some operations also verify the behavior on small blobs with sizes other than +// 8 bytes. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze +func (p *MethodSnapshot) RepositoryAnalyze(repository string) *snapshot_repository_analyze.RepositoryAnalyze { + _repositoryanalyze := snapshot_repository_analyze.NewRepositoryAnalyzeFunc(p.tp) + return _repositoryanalyze(repository) +} + +// Verify the repository integrity. +// Verify the integrity of the contents of a snapshot repository. +// +// This API enables you to perform a comprehensive check of the contents of a +// repository, looking for any anomalies in its data or metadata which might +// prevent you from restoring snapshots from the repository or which might cause +// future snapshot create or delete operations to fail. +// +// If you suspect the integrity of the contents of one of your snapshot +// repositories, cease all write activity to this repository immediately, set +// its `read_only` option to `true`, and use this API to verify its integrity. +// Until you do so: +// +// * It may not be possible to restore some snapshots from this repository. +// * Searchable snapshots may report errors when searched or may have unassigned +// shards. +// * Taking snapshots into this repository may fail or may appear to succeed but +// have created a snapshot which cannot be restored. +// * Deleting snapshots from this repository may fail or may appear to succeed +// but leave the underlying data on disk. +// * Continuing to write to the repository while it is in an invalid state may +// causing additional damage to its contents. +// +// If the API finds any problems with the integrity of the contents of your +// repository, Elasticsearch will not be able to repair the damage. +// The only way to bring the repository back into a fully working state after +// its contents have been damaged is by restoring its contents from a repository +// backup which was taken before the damage occurred. +// You must also identify what caused the damage and take action to prevent it +// from happening again. +// +// If you cannot restore a repository backup, register a new repository and use +// this for all future snapshot operations. +// In some cases it may be possible to recover some of the contents of a damaged +// repository, either by restoring as many of its snapshots as needed and taking +// new snapshots of the restored data, or by using the reindex API to copy data +// from any searchable snapshots mounted from the damaged repository. +// +// Avoid all operations which write to the repository while the verify +// repository integrity API is running. +// If something changes the repository contents while an integrity verification +// is running then Elasticsearch may incorrectly report having detected some +// anomalies in its contents due to the concurrent writes. +// It may also incorrectly fail to report some anomalies that the concurrent +// writes prevented it from detecting. +// +// NOTE: This API is intended for exploratory use by humans. You should expect +// the request parameters and the response format to vary in future versions. +// +// NOTE: This API may not work correctly in a mixed-version cluster. +// +// The default values for the parameters of this API are designed to limit the +// impact of the integrity verification on other activities in your cluster. +// For instance, by default it will only use at most half of the `snapshot_meta` +// threads to verify the integrity of each snapshot, allowing other snapshot +// operations to use the other half of this thread pool. +// If you modify these parameters to speed up the verification process, you risk +// disrupting other snapshot-related operations in your cluster. +// For large repositories, consider setting up a separate single-node +// Elasticsearch cluster just for running the integrity verification API. +// +// The response exposes implementation details of the analysis which may change +// from version to version. +// The response body format is therefore not considered stable and may be +// different in newer versions. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-verify-integrity +func (p *MethodSnapshot) RepositoryVerifyIntegrity(repository string) *snapshot_repository_verify_integrity.RepositoryVerifyIntegrity { + _repositoryverifyintegrity := snapshot_repository_verify_integrity.NewRepositoryVerifyIntegrityFunc(p.tp) + return _repositoryverifyintegrity(repository) +} + +// Restore a snapshot. +// Restore a snapshot of a cluster or data streams and indices. +// +// You can restore a snapshot only to a running cluster with an elected master +// node. +// The snapshot repository must be registered and available to the cluster. +// The snapshot and cluster versions must be compatible. +// +// To restore a snapshot, the cluster's global metadata must be writable. Ensure +// there are't any cluster blocks that prevent writes. The restore operation +// ignores index blocks. +// +// Before you restore a data stream, ensure the cluster contains a matching +// index template with data streams enabled. To check, use the index management +// feature in Kibana or the get index template API: +// +// ``` +// GET +// _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +// ``` +// +// If no such template exists, you can create one or restore a cluster state +// that contains one. Without a matching index template, a data stream can't +// roll over or create backing indices. +// +// If your snapshot contains data from App Search or Workplace Search, you must +// restore the Enterprise Search encryption key before you restore the snapshot. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore +func (p *MethodSnapshot) Restore(repository, snapshot string) *snapshot_restore.Restore { + _restore := snapshot_restore.NewRestoreFunc(p.tp) + return _restore(repository, snapshot) +} + +// Get the snapshot status. +// Get a detailed description of the current state for each shard participating +// in the snapshot. +// +// Note that this API should be used only to obtain detailed shard-level +// information for ongoing snapshots. +// If this detail is not needed or you want to obtain information about one or +// more existing snapshots, use the get snapshot API. +// +// If you omit the `` request path parameter, the request retrieves +// information only for currently running snapshots. +// This usage is preferred. +// If needed, you can specify `` and `` to retrieve +// information for specific snapshots, even if they're not currently running. +// +// WARNING: Using the API to return the status of any snapshots other than +// currently running snapshots can be expensive. +// The API requires a read from the repository for each shard in each snapshot. +// For example, if you have 100 snapshots with 1,000 shards each, an API request +// that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 +// shards). +// +// Depending on the latency of your storage, such requests can take an extremely +// long time to return results. +// These requests can also tax machine resources and, when using cloud storage, +// incur high processing costs. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status +func (p *MethodSnapshot) Status() *snapshot_status.Status { + _status := snapshot_status.NewStatusFunc(p.tp) + return _status() +} + +// Verify a snapshot repository. +// Check for common misconfigurations in a snapshot repository. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository +func (p *MethodSnapshot) VerifyRepository(repository string) *snapshot_verify_repository.VerifyRepository { + _verifyrepository := snapshot_verify_repository.NewVerifyRepositoryFunc(p.tp) + return _verifyrepository(repository) +} + +// Clear an SQL search cursor. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor +func (p *MethodSql) ClearCursor() *sql_clear_cursor.ClearCursor { + _clearcursor := sql_clear_cursor.NewClearCursorFunc(p.tp) + return _clearcursor() +} + +// Delete an async SQL search. +// Delete an async SQL search or a stored synchronous SQL search. +// If the search is still running, the API cancels it. +// +// If the Elasticsearch security features are enabled, only the following users +// can use this API to delete a search: +// +// * Users with the `cancel_task` cluster privilege. +// * The user who first submitted the search. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async +func (p *MethodSql) DeleteAsync(id string) *sql_delete_async.DeleteAsync { + _deleteasync := sql_delete_async.NewDeleteAsyncFunc(p.tp) + return _deleteasync(id) +} + +// Get async SQL search results. +// Get the current status and available results for an async SQL search or +// stored synchronous SQL search. +// +// If the Elasticsearch security features are enabled, only the user who first +// submitted the SQL search can retrieve the search using this API. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async +func (p *MethodSql) GetAsync(id string) *sql_get_async.GetAsync { + _getasync := sql_get_async.NewGetAsyncFunc(p.tp) + return _getasync(id) +} + +// Get the async SQL search status. +// Get the current status of an async SQL search or a stored synchronous SQL +// search. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status +func (p *MethodSql) GetAsyncStatus(id string) *sql_get_async_status.GetAsyncStatus { + _getasyncstatus := sql_get_async_status.NewGetAsyncStatusFunc(p.tp) + return _getasyncstatus(id) +} + +// Get SQL search results. +// Run an SQL request. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query +func (p *MethodSql) Query() *sql_query.Query { + _query := sql_query.NewQueryFunc(p.tp) + return _query() +} + +// Translate SQL into Elasticsearch queries. +// Translate an SQL search into a search API request containing Query DSL. +// It accepts the same request body parameters as the SQL search API, excluding +// `cursor`. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate +func (p *MethodSql) Translate() *sql_translate.Translate { + _translate := sql_translate.NewTranslateFunc(p.tp) + return _translate() +} + +// Get SSL certificates. +// +// Get information about the X.509 certificates that are used to encrypt +// communications in the cluster. +// The API returns a list that includes certificates from all TLS contexts +// including: +// +// - Settings for transport and HTTP interfaces +// - TLS settings that are used within authentication realms +// - TLS settings for remote monitoring exporters +// +// The list includes certificates that are used for configuring trust, such as +// those configured in the `xpack.security.transport.ssl.truststore` and +// `xpack.security.transport.ssl.certificate_authorities` settings. +// It also includes certificates that are used for configuring server identity, +// such as `xpack.security.http.ssl.keystore` and +// `xpack.security.http.ssl.certificate settings`. +// +// The list does not include certificates that are sourced from the default SSL +// context of the Java Runtime Environment (JRE), even if those certificates are +// in use within Elasticsearch. +// +// NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the +// API returns all the certificates that are included in the PKCS#11 token +// irrespective of whether these are used in the Elasticsearch TLS +// configuration. +// +// If Elasticsearch is configured to use a keystore or truststore, the API +// output includes all certificates in that store, even though some of the +// certificates might not be in active use within the cluster. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates +func (p *MethodSsl) Certificates() *ssl_certificates.Certificates { + _certificates := ssl_certificates.NewCertificatesFunc(p.tp) + return _certificates() +} + +// Disable the Logs Streams feature for this cluster +// https://www.elastic.co/guide/en/elasticsearch/reference/current/streams-logs-disable.html +func (p *MethodStreams) LogsDisable() *streams_logs_disable.LogsDisable { + _logsdisable := streams_logs_disable.NewLogsDisableFunc(p.tp) + return _logsdisable() +} + +// Enable the Logs Streams feature for this cluster +// https://www.elastic.co/guide/en/elasticsearch/reference/current/streams-logs-enable.html +func (p *MethodStreams) LogsEnable() *streams_logs_enable.LogsEnable { + _logsenable := streams_logs_enable.NewLogsEnableFunc(p.tp) + return _logsenable() +} + +// Return the current status of the streams feature for each streams type +// https://www.elastic.co/guide/en/elasticsearch/reference/current/streams-status.html +func (p *MethodStreams) Status() *streams_status.Status { + _status := streams_status.NewStatusFunc(p.tp) + return _status() +} + +// Delete a synonym set. +// +// You can only delete a synonyms set that is not in use by any index analyzer. +// +// Synonyms sets can be used in synonym graph token filters and synonym token +// filters. +// These synonym filters can be used as part of search analyzers. +// +// Analyzers need to be loaded when an index is restored (such as when a node +// starts, or the index becomes open). +// Even if the analyzer is not used on any field mapping, it still needs to be +// loaded on the index recovery phase. +// +// If any analyzers cannot be loaded, the index becomes unavailable and the +// cluster status becomes red or yellow as index shards are not available. +// To prevent that, synonyms sets that are used in analyzers can't be deleted. +// A delete request in this case will return a 400 response code. +// +// To remove a synonyms set, you must first remove all indices that contain +// analyzers using it. +// You can migrate an index by creating a new index that does not contain the +// token filter with the synonyms set, and use the reindex API in order to copy +// over the index data. +// Once finished, you can delete the index. +// When the synonyms set is not used in analyzers, you will be able to delete +// it. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym +func (p *MethodSynonyms) DeleteSynonym(id string) *synonyms_delete_synonym.DeleteSynonym { + _deletesynonym := synonyms_delete_synonym.NewDeleteSynonymFunc(p.tp) + return _deletesynonym(id) +} + +// Delete a synonym rule. +// Delete a synonym rule from a synonym set. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule +func (p *MethodSynonyms) DeleteSynonymRule(setid, ruleid string) *synonyms_delete_synonym_rule.DeleteSynonymRule { + _deletesynonymrule := synonyms_delete_synonym_rule.NewDeleteSynonymRuleFunc(p.tp) + return _deletesynonymrule(setid, ruleid) +} + +// Get a synonym set. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym +func (p *MethodSynonyms) GetSynonym(id string) *synonyms_get_synonym.GetSynonym { + _getsynonym := synonyms_get_synonym.NewGetSynonymFunc(p.tp) + return _getsynonym(id) +} + +// Get a synonym rule. +// Get a synonym rule from a synonym set. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule +func (p *MethodSynonyms) GetSynonymRule(setid, ruleid string) *synonyms_get_synonym_rule.GetSynonymRule { + _getsynonymrule := synonyms_get_synonym_rule.NewGetSynonymRuleFunc(p.tp) + return _getsynonymrule(setid, ruleid) +} + +// Get all synonym sets. +// Get a summary of all defined synonym sets. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym +func (p *MethodSynonyms) GetSynonymsSets() *synonyms_get_synonyms_sets.GetSynonymsSets { + _getsynonymssets := synonyms_get_synonyms_sets.NewGetSynonymsSetsFunc(p.tp) + return _getsynonymssets() +} + +// Create or update a synonym set. +// Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +// If you need to manage more synonym rules, you can create multiple synonym +// sets. +// +// When an existing synonyms set is updated, the search analyzers that use the +// synonyms set are reloaded automatically for all indices. +// This is equivalent to invoking the reload search analyzers API for all +// indices that use the synonyms set. +// +// For practical examples of how to create or update a synonyms set, refer to +// the External documentation. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym +func (p *MethodSynonyms) PutSynonym(id string) *synonyms_put_synonym.PutSynonym { + _putsynonym := synonyms_put_synonym.NewPutSynonymFunc(p.tp) + return _putsynonym(id) +} + +// Create or update a synonym rule. +// Create or update a synonym rule in a synonym set. +// +// If any of the synonym rules included is invalid, the API returns an error. +// +// When you update a synonym rule, all analyzers using the synonyms set will be +// reloaded automatically to reflect the new rule. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule +func (p *MethodSynonyms) PutSynonymRule(setid, ruleid string) *synonyms_put_synonym_rule.PutSynonymRule { + _putsynonymrule := synonyms_put_synonym_rule.NewPutSynonymRuleFunc(p.tp) + return _putsynonymrule(setid, ruleid) +} + +// Cancel a task. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// A task may continue to run for some time after it has been cancelled because +// it may not be able to safely stop its current activity straight away. +// It is also possible that Elasticsearch must complete its work on other tasks +// before it can process the cancellation. +// The get task information API will continue to list these cancelled tasks +// until they complete. +// The cancelled flag in the response indicates that the cancellation command +// has been processed and the task will stop as soon as possible. +// +// To troubleshoot why a cancelled task does not complete promptly, use the get +// task information API with the `?detailed` parameter to identify the other +// tasks the system is running. +// You can also use the node hot threads API to obtain detailed information +// about the work the system is doing instead of completing the cancelled task. +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks +func (p *MethodTasks) Cancel() *tasks_cancel.Cancel { + _cancel := tasks_cancel.NewCancelFunc(p.tp) + return _cancel() +} + +// Get task information. +// Get information about a task currently running in the cluster. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// If the task identifier is not found, a 404 response code indicates that there +// are no resources that match the request. +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks +func (p *MethodTasks) Get(taskid string) *tasks_get.Get { + _get := tasks_get.NewGetFunc(p.tp) + return _get(taskid) +} + +// Get all tasks. +// Get information about the tasks currently running on one or more nodes in the +// cluster. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// **Identifying running tasks** +// +// The `X-Opaque-Id header`, when provided on the HTTP request header, is going +// to be returned as a header in the response as well as in the headers field +// for in the task information. +// This enables you to track certain calls or associate certain tasks with the +// client that started them. +// For example: +// +// ``` +// curl -i -H "X-Opaque-Id: 123456" +// "http://localhost:9200/_tasks?group_by=parents" +// ``` +// +// The API returns the following result: +// +// ``` +// HTTP/1.1 200 OK +// X-Opaque-Id: 123456 +// content-type: application/json; charset=UTF-8 +// content-length: 831 +// +// { +// "tasks" : { +// "u5lcZHqcQhu-rUoFaqDphA:45" : { +// "node" : "u5lcZHqcQhu-rUoFaqDphA", +// "id" : 45, +// "type" : "transport", +// "action" : "cluster:monitor/tasks/lists", +// "start_time_in_millis" : 1513823752749, +// "running_time_in_nanos" : 293139, +// "cancellable" : false, +// "headers" : { +// "X-Opaque-Id" : "123456" +// }, +// "children" : [ +// { +// "node" : "u5lcZHqcQhu-rUoFaqDphA", +// "id" : 46, +// "type" : "direct", +// "action" : "cluster:monitor/tasks/lists[n]", +// "start_time_in_millis" : 1513823752750, +// "running_time_in_nanos" : 92133, +// "cancellable" : false, +// "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", +// "headers" : { +// "X-Opaque-Id" : "123456" +// } +// } +// ] +// } +// } +// } +// +// ``` +// In this example, `X-Opaque-Id: 123456` is the ID as a part of the response +// header. +// The `X-Opaque-Id` in the task `headers` is the ID for the task that was +// initiated by the REST request. +// The `X-Opaque-Id` in the children `headers` is the child task of the task +// that was initiated by the REST request. +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks +func (p *MethodTasks) List() *tasks_list.List { + _list := tasks_list.NewListFunc(p.tp) + return _list() +} + +// Find the structure of a text field. +// Find the structure of a text field in an Elasticsearch index. +// +// This API provides a starting point for extracting further information from +// log messages already ingested into Elasticsearch. +// For example, if you have ingested data into a very simple index that has just +// `@timestamp` and message fields, you can use this API to see what common +// structure exists in the message field. +// +// The response from the API contains: +// +// * Sample messages. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// * Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// If the structure finder produces unexpected results, specify the `explain` +// query parameter and an explanation will appear in the response. +// It helps determine why the returned structure was chosen. +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure +func (p *MethodTextStructure) FindFieldStructure() *text_structure_find_field_structure.FindFieldStructure { + _findfieldstructure := text_structure_find_field_structure.NewFindFieldStructureFunc(p.tp) + return _findfieldstructure() +} + +// Find the structure of text messages. +// Find the structure of a list of text messages. +// The messages must contain data that is suitable to be ingested into +// Elasticsearch. +// +// This API provides a starting point for ingesting data into Elasticsearch in a +// format that is suitable for subsequent use with other Elastic Stack +// functionality. +// Use this API rather than the find text structure API if your input text has +// already been split up into separate messages by some other process. +// +// The response from the API contains: +// +// * Sample messages. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// If the structure finder produces unexpected results, specify the `explain` +// query parameter and an explanation will appear in the response. +// It helps determine why the returned structure was chosen. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure +func (p *MethodTextStructure) FindMessageStructure() *text_structure_find_message_structure.FindMessageStructure { + _findmessagestructure := text_structure_find_message_structure.NewFindMessageStructureFunc(p.tp) + return _findmessagestructure() +} + +// Find the structure of a text file. +// The text file must contain data that is suitable to be ingested into +// Elasticsearch. +// +// This API provides a starting point for ingesting data into Elasticsearch in a +// format that is suitable for subsequent use with other Elastic Stack +// functionality. +// Unlike other Elasticsearch endpoints, the data that is posted to this +// endpoint does not need to be UTF-8 encoded and in JSON format. +// It must, however, be text; binary text formats are not currently supported. +// The size is limited to the Elasticsearch HTTP receive buffer size, which +// defaults to 100 Mb. +// +// The response from the API contains: +// +// * A couple of messages from the beginning of the text. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// * Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure +func (p *MethodTextStructure) FindStructure() *text_structure_find_structure.FindStructure { + _findstructure := text_structure_find_structure.NewFindStructureFunc(p.tp) + return _findstructure() +} + +// Test a Grok pattern. +// Test a Grok pattern on one or more lines of text. +// The API indicates whether the lines match the pattern together with the +// offsets and lengths of the matched substrings. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern +func (p *MethodTextStructure) TestGrokPattern() *text_structure_test_grok_pattern.TestGrokPattern { + _testgrokpattern := text_structure_test_grok_pattern.NewTestGrokPatternFunc(p.tp) + return _testgrokpattern() +} + +// Delete a transform. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform +func (p *MethodTransform) DeleteTransform(transformid string) *transform_delete_transform.DeleteTransform { + _deletetransform := transform_delete_transform.NewDeleteTransformFunc(p.tp) + return _deletetransform(transformid) +} + +// Retrieves transform usage information for transform nodes. +// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-node-stats.html +func (p *MethodTransform) GetNodeStats() *transform_get_node_stats.GetNodeStats { + _getnodestats := transform_get_node_stats.NewGetNodeStatsFunc(p.tp) + return _getnodestats() +} + +// Get transforms. +// Get configuration information for transforms. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform +func (p *MethodTransform) GetTransform() *transform_get_transform.GetTransform { + _gettransform := transform_get_transform.NewGetTransformFunc(p.tp) + return _gettransform() +} + +// Get transform stats. +// +// Get usage information for transforms. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats +func (p *MethodTransform) GetTransformStats(transformid string) *transform_get_transform_stats.GetTransformStats { + _gettransformstats := transform_get_transform_stats.NewGetTransformStatsFunc(p.tp) + return _gettransformstats(transformid) +} + +// Preview a transform. +// Generates a preview of the results that you will get when you create a +// transform with the same configuration. +// +// It returns a maximum of 100 results. The calculations are based on all the +// current data in the source index. It also +// generates a list of mappings and settings for the destination index. These +// values are determined based on the field +// types of the source index and the transform aggregations. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform +func (p *MethodTransform) PreviewTransform() *transform_preview_transform.PreviewTransform { + _previewtransform := transform_preview_transform.NewPreviewTransformFunc(p.tp) + return _previewtransform() +} + +// Create a transform. +// Creates a transform. +// +// A transform copies data from source indices, transforms it, and persists it +// into an entity-centric destination index. You can also think of the +// destination index as a two-dimensional tabular data structure (known as +// a data frame). The ID for each document in the data frame is generated from a +// hash of the entity, so there is a +// unique row per entity. +// +// You must choose either the latest or pivot method for your transform; you +// cannot use both in a single transform. If +// you choose to use the pivot method for your transform, the entities are +// defined by the set of `group_by` fields in +// the pivot object. If you choose to use the latest method, the entities are +// defined by the `unique_key` field values +// in the latest object. +// +// You must have `create_index`, `index`, and `read` privileges on the +// destination index and `read` and +// `view_index_metadata` privileges on the source indices. When Elasticsearch +// security features are enabled, the +// transform remembers which roles the user that created it had at the time of +// creation and uses those same roles. If +// those roles do not have the required privileges on the source and destination +// indices, the transform fails when it +// attempts unauthorized operations. +// +// NOTE: You must use Kibana or this API to create a transform. Do not add a +// transform directly into any +// `.transform-internal*` indices using the Elasticsearch index API. If +// Elasticsearch security features are enabled, do +// not give users any privileges on `.transform-internal*` indices. If you used +// transforms prior to 7.5, also do not +// give users any privileges on `.data-frame-internal*` indices. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform +func (p *MethodTransform) PutTransform(transformid string) *transform_put_transform.PutTransform { + _puttransform := transform_put_transform.NewPutTransformFunc(p.tp) + return _puttransform(transformid) +} + +// Reset a transform. +// +// Before you can reset it, you must stop it; alternatively, use the `force` +// query parameter. +// If the destination index was created by the transform, it is deleted. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform +func (p *MethodTransform) ResetTransform(transformid string) *transform_reset_transform.ResetTransform { + _resettransform := transform_reset_transform.NewResetTransformFunc(p.tp) + return _resettransform(transformid) +} + +// Schedule a transform to start now. +// +// Instantly run a transform to process data. +// If you run this API, the transform will process the new data instantly, +// without waiting for the configured frequency interval. After the API is +// called, +// the transform will be processed again at `now + frequency` unless the API +// is called again in the meantime. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform +func (p *MethodTransform) ScheduleNowTransform(transformid string) *transform_schedule_now_transform.ScheduleNowTransform { + _schedulenowtransform := transform_schedule_now_transform.NewScheduleNowTransformFunc(p.tp) + return _schedulenowtransform(transformid) +} + +// Start a transform. +// +// When you start a transform, it creates the destination index if it does not +// already exist. The `number_of_shards` is +// set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot +// transform, it deduces the mapping +// definitions for the destination index from the source indices and the +// transform aggregations. If fields in the +// destination index are derived from scripts (as in the case of +// `scripted_metric` or `bucket_script` aggregations), +// the transform uses dynamic mappings unless an index template exists. If it is +// a latest transform, it does not deduce +// mapping definitions; it uses dynamic mappings. To use explicit mappings, +// create the destination index before you +// start the transform. Alternatively, you can create an index template, though +// it does not affect the deduced mappings +// in a pivot transform. +// +// When the transform starts, a series of validations occur to ensure its +// success. If you deferred validation when you +// created the transform, they occur when you start the transform—​with the +// exception of privilege checks. When +// Elasticsearch security features are enabled, the transform remembers which +// roles the user that created it had at the +// time of creation and uses those same roles. If those roles do not have the +// required privileges on the source and +// destination indices, the transform fails when it attempts unauthorized +// operations. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform +func (p *MethodTransform) StartTransform(transformid string) *transform_start_transform.StartTransform { + _starttransform := transform_start_transform.NewStartTransformFunc(p.tp) + return _starttransform(transformid) +} + +// Stop transforms. +// Stops one or more transforms. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform +func (p *MethodTransform) StopTransform(transformid string) *transform_stop_transform.StopTransform { + _stoptransform := transform_stop_transform.NewStopTransformFunc(p.tp) + return _stoptransform(transformid) +} + +// Update a transform. +// Updates certain properties of a transform. +// +// All updated properties except `description` do not take effect until after +// the transform starts the next checkpoint, +// thus there is data consistency in each checkpoint. To use this API, you must +// have `read` and `view_index_metadata` +// privileges for the source indices. You must also have `index` and `read` +// privileges for the destination index. When +// Elasticsearch security features are enabled, the transform remembers which +// roles the user who updated it had at the +// time of update and runs with those privileges. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform +func (p *MethodTransform) UpdateTransform(transformid string) *transform_update_transform.UpdateTransform { + _updatetransform := transform_update_transform.NewUpdateTransformFunc(p.tp) + return _updatetransform(transformid) +} + +// Upgrade all transforms. +// +// Transforms are compatible across minor versions and between supported major +// versions. +// However, over time, the format of transform configuration information may +// change. +// This API identifies transforms that have a legacy configuration format and +// upgrades them to the latest version. +// It also cleans up the internal data structures that store the transform state +// and checkpoints. +// The upgrade does not affect the source and destination indices. +// The upgrade also does not affect the roles that transforms use when +// Elasticsearch security features are enabled; the role used to read source +// data and write to the destination index remains unchanged. +// +// If a transform upgrade step fails, the upgrade stops and an error is returned +// about the underlying issue. +// Resolve the issue then re-run the process again. +// A summary is returned when the upgrade is finished. +// +// To ensure continuous transforms remain running during a major version upgrade +// of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade +// transforms before upgrading the cluster. +// You may want to perform a recent cluster backup prior to the upgrade. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms +func (p *MethodTransform) UpgradeTransforms() *transform_upgrade_transforms.UpgradeTransforms { + _upgradetransforms := transform_upgrade_transforms.NewUpgradeTransformsFunc(p.tp) + return _upgradetransforms() +} + +// Acknowledge a watch. +// Acknowledging a watch enables you to manually throttle the execution of the +// watch's actions. +// +// The acknowledgement state of an action is stored in the +// `status.actions..ack.state` structure. +// +// IMPORTANT: If the specified watch is currently being executed, this API will +// return an error +// The reason for this behavior is to prevent overwriting the watch status from +// a watch execution. +// +// Acknowledging an action throttles further executions of that action until its +// `ack.state` is reset to `awaits_successful_execution`. +// This happens when the condition of the watch is not met (the condition +// evaluates to false). +// To demonstrate how throttling works in practice and how it can be configured +// for individual actions within a watch, refer to External documentation. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch +func (p *MethodWatcher) AckWatch(watchid string) *watcher_ack_watch.AckWatch { + _ackwatch := watcher_ack_watch.NewAckWatchFunc(p.tp) + return _ackwatch(watchid) +} + +// Activate a watch. +// A watch can be either active or inactive. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch +func (p *MethodWatcher) ActivateWatch(watchid string) *watcher_activate_watch.ActivateWatch { + _activatewatch := watcher_activate_watch.NewActivateWatchFunc(p.tp) + return _activatewatch(watchid) +} + +// Deactivate a watch. +// A watch can be either active or inactive. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch +func (p *MethodWatcher) DeactivateWatch(watchid string) *watcher_deactivate_watch.DeactivateWatch { + _deactivatewatch := watcher_deactivate_watch.NewDeactivateWatchFunc(p.tp) + return _deactivatewatch(watchid) +} + +// Delete a watch. +// When the watch is removed, the document representing the watch in the +// `.watches` index is gone and it will never be run again. +// +// Deleting a watch does not delete any watch execution records related to this +// watch from the watch history. +// +// IMPORTANT: Deleting a watch must be done by using only this API. +// Do not delete the watch directly from the `.watches` index using the +// Elasticsearch delete document API +// When Elasticsearch security features are enabled, make sure no write +// privileges are granted to anyone for the `.watches` index. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch +func (p *MethodWatcher) DeleteWatch(id string) *watcher_delete_watch.DeleteWatch { + _deletewatch := watcher_delete_watch.NewDeleteWatchFunc(p.tp) + return _deletewatch(id) +} + +// Run a watch. +// This API can be used to force execution of the watch outside of its +// triggering logic or to simulate the watch execution for debugging purposes. +// +// For testing and debugging purposes, you also have fine-grained control on how +// the watch runs. +// You can run the watch without running all of its actions or alternatively by +// simulating them. +// You can also force execution by ignoring the watch condition and control +// whether a watch record would be written to the watch history after it runs. +// +// You can use the run watch API to run watches that are not yet registered by +// specifying the watch definition inline. +// This serves as great tool for testing and debugging your watches prior to +// adding them to Watcher. +// +// When Elasticsearch security features are enabled on your cluster, watches are +// run with the privileges of the user that stored the watches. +// If your user is allowed to read index `a`, but not index `b`, then the exact +// same set of rules will apply during execution of a watch. +// +// When using the run watch API, the authorization data of the user that called +// the API will be used as a base, instead of the information who stored the +// watch. +// Refer to the external documentation for examples of watch execution requests, +// including existing, customized, and inline watches. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch +func (p *MethodWatcher) ExecuteWatch() *watcher_execute_watch.ExecuteWatch { + _executewatch := watcher_execute_watch.NewExecuteWatchFunc(p.tp) + return _executewatch() +} + +// Get Watcher index settings. +// Get settings for the Watcher internal index (`.watches`). +// Only a subset of settings are shown, for example `index.auto_expand_replicas` +// and `index.number_of_replicas`. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings +func (p *MethodWatcher) GetSettings() *watcher_get_settings.GetSettings { + _getsettings := watcher_get_settings.NewGetSettingsFunc(p.tp) + return _getsettings() +} + +// Get a watch. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch +func (p *MethodWatcher) GetWatch(id string) *watcher_get_watch.GetWatch { + _getwatch := watcher_get_watch.NewGetWatchFunc(p.tp) + return _getwatch(id) +} + +// Create or update a watch. +// When a watch is registered, a new document that represents the watch is added +// to the `.watches` index and its trigger is immediately registered with the +// relevant trigger engine. +// Typically for the `schedule` trigger, the scheduler is the trigger engine. +// +// IMPORTANT: You must use Kibana or this API to create a watch. +// Do not add a watch directly to the `.watches` index by using the +// Elasticsearch index API. +// If Elasticsearch security features are enabled, do not give users write +// privileges on the `.watches` index. +// +// When you add a watch you can also define its initial active state by setting +// the *active* parameter. +// +// When Elasticsearch security features are enabled, your watch can index or +// search only on indices for which the user that stored the watch has +// privileges. +// If the user is able to read index `a`, but not index `b`, the same will apply +// when the watch runs. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch +func (p *MethodWatcher) PutWatch(id string) *watcher_put_watch.PutWatch { + _putwatch := watcher_put_watch.NewPutWatchFunc(p.tp) + return _putwatch(id) +} + +// Query watches. +// Get all registered watches in a paginated manner and optionally filter +// watches by a query. +// +// Note that only the `_id` and `metadata.*` fields are queryable or sortable. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches +func (p *MethodWatcher) QueryWatches() *watcher_query_watches.QueryWatches { + _querywatches := watcher_query_watches.NewQueryWatchesFunc(p.tp) + return _querywatches() +} + +// Start the watch service. +// Start the Watcher service if it is not already running. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start +func (p *MethodWatcher) Start() *watcher_start.Start { + _start := watcher_start.NewStartFunc(p.tp) + return _start() +} + +// Get Watcher statistics. +// This API always returns basic metrics. +// You retrieve more metrics by using the metric parameter. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats +func (p *MethodWatcher) Stats() *watcher_stats.Stats { + _stats := watcher_stats.NewStatsFunc(p.tp) + return _stats() +} + +// Stop the watch service. +// Stop the Watcher service if it is running. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop +func (p *MethodWatcher) Stop() *watcher_stop.Stop { + _stop := watcher_stop.NewStopFunc(p.tp) + return _stop() +} + +// Update Watcher index settings. +// Update settings for the Watcher internal index (`.watches`). +// Only a subset of settings can be modified. +// This includes `index.auto_expand_replicas`, `index.number_of_replicas`, +// `index.routing.allocation.exclude.*`, +// `index.routing.allocation.include.*` and +// `index.routing.allocation.require.*`. +// Modification of `index.routing.allocation.include._tier_preference` is an +// exception and is not allowed as the +// Watcher shards must always be in the `data_content` tier. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings +func (p *MethodWatcher) UpdateSettings() *watcher_update_settings.UpdateSettings { + _updatesettings := watcher_update_settings.NewUpdateSettingsFunc(p.tp) + return _updatesettings() +} + +// Get information. +// The information provided by the API includes: +// +// * Build information including the build number and timestamp. +// * License information about the currently installed license. +// * Feature information for the features that are currently enabled and +// available under the current license. +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info +func (p *MethodXpack) Info() *xpack_info.Info { + _info := xpack_info.NewInfoFunc(p.tp) + return _info() +} + +// Get usage information. +// Get information about the features that are currently enabled and available +// under the current license. +// The API also provides some usage statistics. +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack +func (p *MethodXpack) Usage() *xpack_usage.Usage { + _usage := xpack_usage.NewUsageFunc(p.tp) + return _usage() +} + +func NewMethodAPI(tp elastictransport.Interface) *MethodAPI { + return &MethodAPI{ + tp: tp, + AsyncSearch: MethodAsyncSearch{tp: tp}, + Autoscaling: MethodAutoscaling{tp: tp}, + Capabilities: MethodCapabilities{tp: tp}, + Cat: MethodCat{tp: tp}, + Ccr: MethodCcr{tp: tp}, + Cluster: MethodCluster{tp: tp}, + Connector: MethodConnector{tp: tp}, + Core: MethodCore{tp: tp}, + DanglingIndices: MethodDanglingIndices{tp: tp}, + Enrich: MethodEnrich{tp: tp}, + Eql: MethodEql{tp: tp}, + Esql: MethodEsql{tp: tp}, + Features: MethodFeatures{tp: tp}, + Fleet: MethodFleet{tp: tp}, + Graph: MethodGraph{tp: tp}, + Ilm: MethodIlm{tp: tp}, + Indices: MethodIndices{tp: tp}, + Inference: MethodInference{tp: tp}, + Ingest: MethodIngest{tp: tp}, + License: MethodLicense{tp: tp}, + Logstash: MethodLogstash{tp: tp}, + Migration: MethodMigration{tp: tp}, + Ml: MethodMl{tp: tp}, + Monitoring: MethodMonitoring{tp: tp}, + Nodes: MethodNodes{tp: tp}, + Profiling: MethodProfiling{tp: tp}, + QueryRules: MethodQueryRules{tp: tp}, + Rollup: MethodRollup{tp: tp}, + SearchApplication: MethodSearchApplication{tp: tp}, + SearchableSnapshots: MethodSearchableSnapshots{tp: tp}, + Security: MethodSecurity{tp: tp}, + Shutdown: MethodShutdown{tp: tp}, + Simulate: MethodSimulate{tp: tp}, + Slm: MethodSlm{tp: tp}, + Snapshot: MethodSnapshot{tp: tp}, + Sql: MethodSql{tp: tp}, + Ssl: MethodSsl{tp: tp}, + Streams: MethodStreams{tp: tp}, + Synonyms: MethodSynonyms{tp: tp}, + Tasks: MethodTasks{tp: tp}, + TextStructure: MethodTextStructure{tp: tp}, + Transform: MethodTransform{tp: tp}, + Watcher: MethodWatcher{tp: tp}, + Xpack: MethodXpack{tp: tp}} +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/delete/delete.go new file mode 100644 index 000000000..1904e8312 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/delete/delete.go @@ -0,0 +1,356 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete an async search. +// +// If the asynchronous search is still running, it is cancelled. +// Otherwise, the saved search results are deleted. +// If the Elasticsearch security features are enabled, the deletion of a +// specific async search is restricted to: the authenticated user that submitted +// the original search request; users that have the `cancel_task` cluster +// privilege. +package delete + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Delete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDelete type alias for index. +type NewDelete func(id string) *Delete + +// NewDeleteFunc returns a new instance of Delete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteFunc(tp elastictransport.Interface) NewDelete { + return func(id string) *Delete { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete an async search. +// +// If the asynchronous search is still running, it is cancelled. +// Otherwise, the saved search results are deleted. +// If the Elasticsearch security features are enabled, the deletion of a +// specific async search is restricted to: the authenticated user that submitted +// the original search request; users that have the `cancel_task` cluster +// privilege. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit +func New(tp elastictransport.Interface) *Delete { + r := &Delete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Delete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_async_search") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Delete) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "async_search.delete") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "async_search.delete") + if reader := instrument.RecordRequestBody(ctx, "async_search.delete", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "async_search.delete") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Delete query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a delete.Response +func (r Delete) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "async_search.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Delete) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "async_search.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Delete query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Delete headers map. +func (r *Delete) Header(key, value string) *Delete { + r.headers.Set(key, value) + + return r +} + +// Id A unique identifier for the async search. +// API Name: id +func (r *Delete) _id(id string) *Delete { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Delete) ErrorTrace(errortrace bool) *Delete { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Delete) FilterPath(filterpaths ...string) *Delete { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Delete) Human(human bool) *Delete { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Delete) Pretty(pretty bool) *Delete { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/delete/response.go new file mode 100644 index 000000000..9cda29905 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/delete/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package delete + +// Response holds the response body struct for the package delete +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/async_search/delete/AsyncSearchDeleteResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/get/get.go new file mode 100644 index 000000000..d18d706b2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/get/get.go @@ -0,0 +1,391 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get async search results. +// +// Retrieve the results of a previously submitted asynchronous search request. +// If the Elasticsearch security features are enabled, access to the results of +// a specific async search is restricted to the user or API key that submitted +// it. +package get + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Get struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGet type alias for index. +type NewGet func(id string) *Get + +// NewGetFunc returns a new instance of Get with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFunc(tp elastictransport.Interface) NewGet { + return func(id string) *Get { + n := New(tp) + + n._id(id) + + return n + } +} + +// Get async search results. +// +// Retrieve the results of a previously submitted asynchronous search request. +// If the Elasticsearch security features are enabled, access to the results of +// a specific async search is restricted to the user or API key that submitted +// it. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit +func New(tp elastictransport.Interface) *Get { + r := &Get{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Get) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_async_search") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Get) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "async_search.get") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "async_search.get") + if reader := instrument.RecordRequestBody(ctx, "async_search.get", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "async_search.get") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Get query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a get.Response +func (r Get) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "async_search.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + r.TypedKeys(true) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Get) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "async_search.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Get query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Get headers map. +func (r *Get) Header(key, value string) *Get { + r.headers.Set(key, value) + + return r +} + +// Id A unique identifier for the async search. +// API Name: id +func (r *Get) _id(id string) *Get { + r.paramSet |= idMask + r.id = id + + return r +} + +// KeepAlive The length of time that the async search should be available in the cluster. +// When not specified, the `keep_alive` set with the corresponding submit async +// request will be used. +// Otherwise, it is possible to override the value and extend the validity of +// the request. +// When this period expires, the search, if still running, is cancelled. +// If the search is completed, its saved results are deleted. +// API name: keep_alive +func (r *Get) KeepAlive(duration string) *Get { + r.values.Set("keep_alive", duration) + + return r +} + +// TypedKeys Specify whether aggregation and suggester names should be prefixed by their +// respective types in the response +// API name: typed_keys +func (r *Get) TypedKeys(typedkeys bool) *Get { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} + +// WaitForCompletionTimeout Specifies to wait for the search to be completed up until the provided +// timeout. +// Final results will be returned if available before the timeout expires, +// otherwise the currently available results will be returned once the timeout +// expires. +// By default no timeout is set meaning that the currently available results +// will be returned without any additional wait. +// API name: wait_for_completion_timeout +func (r *Get) WaitForCompletionTimeout(duration string) *Get { + r.values.Set("wait_for_completion_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Get) ErrorTrace(errortrace bool) *Get { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Get) FilterPath(filterpaths ...string) *Get { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Get) Human(human bool) *Get { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Get) Pretty(pretty bool) *Get { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/get/response.go new file mode 100644 index 000000000..0d792f30b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/get/response.go @@ -0,0 +1,154 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package get + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package get +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/async_search/get/AsyncSearchGetResponse.ts#L22-L25 +type Response struct { + + // CompletionTime Indicates when the async search completed. + // It is present only when the search has completed. + CompletionTime types.DateTime `json:"completion_time,omitempty"` + CompletionTimeInMillis *int64 `json:"completion_time_in_millis,omitempty"` + // ExpirationTime Indicates when the async search will expire. + ExpirationTime types.DateTime `json:"expiration_time,omitempty"` + ExpirationTimeInMillis int64 `json:"expiration_time_in_millis"` + Id *string `json:"id,omitempty"` + // IsPartial When the query is no longer running, this property indicates whether the + // search failed or was successfully completed on all shards. + // While the query is running, `is_partial` is always set to `true`. + IsPartial bool `json:"is_partial"` + // IsRunning Indicates whether the search is still running or has completed. + // + // > info + // > If the search failed after some shards returned their results or the node + // that is coordinating the async search dies, results may be partial even + // though `is_running` is `false`. + IsRunning bool `json:"is_running"` + Response types.AsyncSearch `json:"response"` + StartTime types.DateTime `json:"start_time,omitempty"` + StartTimeInMillis int64 `json:"start_time_in_millis"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "completion_time": + if err := dec.Decode(&s.CompletionTime); err != nil { + return fmt.Errorf("%s | %w", "CompletionTime", err) + } + + case "completion_time_in_millis": + if err := dec.Decode(&s.CompletionTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "CompletionTimeInMillis", err) + } + + case "expiration_time": + if err := dec.Decode(&s.ExpirationTime); err != nil { + return fmt.Errorf("%s | %w", "ExpirationTime", err) + } + + case "expiration_time_in_millis": + if err := dec.Decode(&s.ExpirationTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "ExpirationTimeInMillis", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "is_partial": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsPartial", err) + } + s.IsPartial = value + case bool: + s.IsPartial = v + } + + case "is_running": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsRunning", err) + } + s.IsRunning = value + case bool: + s.IsRunning = v + } + + case "response": + if err := dec.Decode(&s.Response); err != nil { + return fmt.Errorf("%s | %w", "Response", err) + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeInMillis", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/status/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/status/response.go new file mode 100644 index 000000000..4b0a267a9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/status/response.go @@ -0,0 +1,185 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package status + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package status +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/async_search/status/AsyncSearchStatusResponse.ts#L39-L42 +type Response struct { + + // Clusters_ Metadata about clusters involved in the cross-cluster search. + // It is not shown for local-only searches. + Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` + // CompletionStatus If the async search completed, this field shows the status code of the + // search. + // For example, `200` indicates that the async search was successfully + // completed. + // `503` indicates that the async search was completed with an error. + CompletionStatus *int `json:"completion_status,omitempty"` + // CompletionTime Indicates when the async search completed. + // It is present only when the search has completed. + CompletionTime types.DateTime `json:"completion_time,omitempty"` + CompletionTimeInMillis *int64 `json:"completion_time_in_millis,omitempty"` + // ExpirationTime Indicates when the async search will expire. + ExpirationTime types.DateTime `json:"expiration_time,omitempty"` + ExpirationTimeInMillis int64 `json:"expiration_time_in_millis"` + Id *string `json:"id,omitempty"` + // IsPartial When the query is no longer running, this property indicates whether the + // search failed or was successfully completed on all shards. + // While the query is running, `is_partial` is always set to `true`. + IsPartial bool `json:"is_partial"` + // IsRunning Indicates whether the search is still running or has completed. + // + // > info + // > If the search failed after some shards returned their results or the node + // that is coordinating the async search dies, results may be partial even + // though `is_running` is `false`. + IsRunning bool `json:"is_running"` + // Shards_ The number of shards that have run the query so far. + Shards_ types.ShardStatistics `json:"_shards"` + StartTime types.DateTime `json:"start_time,omitempty"` + StartTimeInMillis int64 `json:"start_time_in_millis"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_clusters": + if err := dec.Decode(&s.Clusters_); err != nil { + return fmt.Errorf("%s | %w", "Clusters_", err) + } + + case "completion_status": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CompletionStatus", err) + } + s.CompletionStatus = &value + case float64: + f := int(v) + s.CompletionStatus = &f + } + + case "completion_time": + if err := dec.Decode(&s.CompletionTime); err != nil { + return fmt.Errorf("%s | %w", "CompletionTime", err) + } + + case "completion_time_in_millis": + if err := dec.Decode(&s.CompletionTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "CompletionTimeInMillis", err) + } + + case "expiration_time": + if err := dec.Decode(&s.ExpirationTime); err != nil { + return fmt.Errorf("%s | %w", "ExpirationTime", err) + } + + case "expiration_time_in_millis": + if err := dec.Decode(&s.ExpirationTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "ExpirationTimeInMillis", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "is_partial": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsPartial", err) + } + s.IsPartial = value + case bool: + s.IsPartial = v + } + + case "is_running": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsRunning", err) + } + s.IsRunning = value + case bool: + s.IsRunning = v + } + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return fmt.Errorf("%s | %w", "Shards_", err) + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeInMillis", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/status/status.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/status/status.go new file mode 100644 index 000000000..f3ff3996d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/status/status.go @@ -0,0 +1,370 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the async search status. +// +// Get the status of a previously submitted async search request given its +// identifier, without retrieving search results. +// If the Elasticsearch security features are enabled, the access to the status +// of a specific async search is restricted to: +// +// * The user or API key that submitted the original async search request. +// * Users that have the `monitor` cluster privilege or greater privileges. +package status + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Status struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStatus type alias for index. +type NewStatus func(id string) *Status + +// NewStatusFunc returns a new instance of Status with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStatusFunc(tp elastictransport.Interface) NewStatus { + return func(id string) *Status { + n := New(tp) + + n._id(id) + + return n + } +} + +// Get the async search status. +// +// Get the status of a previously submitted async search request given its +// identifier, without retrieving search results. +// If the Elasticsearch security features are enabled, the access to the status +// of a specific async search is restricted to: +// +// * The user or API key that submitted the original async search request. +// * Users that have the `monitor` cluster privilege or greater privileges. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit +func New(tp elastictransport.Interface) *Status { + r := &Status{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Status) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_async_search") + path.WriteString("/") + path.WriteString("status") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Status) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "async_search.status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "async_search.status") + if reader := instrument.RecordRequestBody(ctx, "async_search.status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "async_search.status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Status query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a status.Response +func (r Status) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "async_search.status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Status) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "async_search.status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Status query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Status headers map. +func (r *Status) Header(key, value string) *Status { + r.headers.Set(key, value) + + return r +} + +// Id A unique identifier for the async search. +// API Name: id +func (r *Status) _id(id string) *Status { + r.paramSet |= idMask + r.id = id + + return r +} + +// KeepAlive The length of time that the async search needs to be available. +// Ongoing async searches and any saved search results are deleted after this +// period. +// API name: keep_alive +func (r *Status) KeepAlive(duration string) *Status { + r.values.Set("keep_alive", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Status) ErrorTrace(errortrace bool) *Status { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Status) FilterPath(filterpaths ...string) *Status { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Status) Human(human bool) *Status { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Status) Pretty(pretty bool) *Status { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/submit/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/submit/request.go new file mode 100644 index 000000000..ddfdced0a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/submit/request.go @@ -0,0 +1,506 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package submit + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package submit +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/async_search/submit/AsyncSearchSubmitRequest.ts#L54-L294 +type Request struct { + Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` + Collapse *types.FieldCollapse `json:"collapse,omitempty"` + // DocvalueFields Array of wildcard (*) patterns. The request returns doc values for field + // names matching these patterns in the hits.fields property of the response. + DocvalueFields []types.FieldAndFormat `json:"docvalue_fields,omitempty"` + // Explain If true, returns detailed information about score computation as part of a + // hit. + Explain *bool `json:"explain,omitempty"` + // Ext Configuration of search extensions defined by Elasticsearch plugins. + Ext map[string]json.RawMessage `json:"ext,omitempty"` + // Fields Array of wildcard (*) patterns. The request returns values for field names + // matching these patterns in the hits.fields property of the response. + Fields []types.FieldAndFormat `json:"fields,omitempty"` + // From Starting document offset. By default, you cannot page through more than + // 10,000 + // hits using the from and size parameters. To page through more hits, use the + // search_after parameter. + From *int `json:"from,omitempty"` + Highlight *types.Highlight `json:"highlight,omitempty"` + // IndicesBoost Boosts the _score of documents from specified indices. + IndicesBoost []map[string]types.Float64 `json:"indices_boost,omitempty"` + // Knn Defines the approximate kNN search to run. + Knn []types.KnnSearch `json:"knn,omitempty"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are + // not included in search results and results collected by aggregations. + MinScore *types.Float64 `json:"min_score,omitempty"` + // Pit Limits the search to a point in time (PIT). If you provide a PIT, you + // cannot specify an in the request path. + Pit *types.PointInTimeReference `json:"pit,omitempty"` + PostFilter *types.Query `json:"post_filter,omitempty"` + Profile *bool `json:"profile,omitempty"` + // Query Defines the search definition using the Query DSL. + Query *types.Query `json:"query,omitempty"` + Rescore []types.Rescore `json:"rescore,omitempty"` + // RuntimeMappings Defines one or more runtime fields in the search request. These fields take + // precedence over mapped fields with the same name. + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` + // ScriptFields Retrieve a script evaluation (based on different fields) for each hit. + ScriptFields map[string]types.ScriptField `json:"script_fields,omitempty"` + SearchAfter []types.FieldValue `json:"search_after,omitempty"` + // SeqNoPrimaryTerm If true, returns sequence number and primary term of the last modification + // of each hit. See Optimistic concurrency control. + SeqNoPrimaryTerm *bool `json:"seq_no_primary_term,omitempty"` + // Size The number of hits to return. By default, you cannot page through more + // than 10,000 hits using the from and size parameters. To page through more + // hits, use the search_after parameter. + Size *int `json:"size,omitempty"` + Slice *types.SlicedScroll `json:"slice,omitempty"` + Sort []types.SortCombinations `json:"sort,omitempty"` + // Source_ Indicates which source fields are returned for matching documents. These + // fields are returned in the hits._source property of the search response. + Source_ types.SourceConfig `json:"_source,omitempty"` + // Stats Stats groups to associate with the search. Each group maintains a statistics + // aggregation for its associated searches. You can retrieve these stats using + // the indices stats API. + Stats []string `json:"stats,omitempty"` + // StoredFields List of stored fields to return as part of a hit. If no fields are specified, + // no stored fields are included in the response. If this field is specified, + // the _source + // parameter defaults to false. You can pass _source: true to return both source + // fields + // and stored fields in the search response. + StoredFields []string `json:"stored_fields,omitempty"` + Suggest *types.Suggester `json:"suggest,omitempty"` + // TerminateAfter Maximum number of documents to collect for each shard. If a query reaches + // this + // limit, Elasticsearch terminates the query early. Elasticsearch collects + // documents + // before sorting. Defaults to 0, which does not terminate query execution + // early. + TerminateAfter *int64 `json:"terminate_after,omitempty"` + // Timeout Specifies the period of time to wait for a response from each shard. If no + // response + // is received before the timeout expires, the request fails and returns an + // error. + // Defaults to no timeout. + Timeout *string `json:"timeout,omitempty"` + // TrackScores If true, calculate and return document scores, even if the scores are not + // used for sorting. + TrackScores *bool `json:"track_scores,omitempty"` + // TrackTotalHits Number of hits matching the query to count accurately. If true, the exact + // number of hits is returned at the cost of some performance. If false, the + // response does not include the total number of hits matching the query. + // Defaults to 10,000 hits. + TrackTotalHits types.TrackHits `json:"track_total_hits,omitempty"` + // Version If true, returns document version as part of a hit. + Version *bool `json:"version,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Aggregations: make(map[string]types.Aggregations, 0), + Ext: make(map[string]json.RawMessage, 0), + ScriptFields: make(map[string]types.ScriptField, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Submit request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "collapse": + if err := dec.Decode(&s.Collapse); err != nil { + return fmt.Errorf("%s | %w", "Collapse", err) + } + + case "docvalue_fields": + if err := dec.Decode(&s.DocvalueFields); err != nil { + return fmt.Errorf("%s | %w", "DocvalueFields", err) + } + + case "explain": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Explain", err) + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "ext": + if s.Ext == nil { + s.Ext = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Ext); err != nil { + return fmt.Errorf("%s | %w", "Ext", err) + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "from": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "highlight": + if err := dec.Decode(&s.Highlight); err != nil { + return fmt.Errorf("%s | %w", "Highlight", err) + } + + case "indices_boost": + if err := dec.Decode(&s.IndicesBoost); err != nil { + return fmt.Errorf("%s | %w", "IndicesBoost", err) + } + + case "knn": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := types.NewKnnSearch() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Knn", err) + } + + s.Knn = append(s.Knn, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Knn); err != nil { + return fmt.Errorf("%s | %w", "Knn", err) + } + } + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := types.Float64(value) + s.MinScore = &f + case float64: + f := types.Float64(v) + s.MinScore = &f + } + + case "pit": + if err := dec.Decode(&s.Pit); err != nil { + return fmt.Errorf("%s | %w", "Pit", err) + } + + case "post_filter": + if err := dec.Decode(&s.PostFilter); err != nil { + return fmt.Errorf("%s | %w", "PostFilter", err) + } + + case "profile": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + s.Profile = &value + case bool: + s.Profile = &v + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "rescore": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := types.NewRescore() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Rescore", err) + } + + s.Rescore = append(s.Rescore, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Rescore); err != nil { + return fmt.Errorf("%s | %w", "Rescore", err) + } + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]types.ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return fmt.Errorf("%s | %w", "ScriptFields", err) + } + + case "search_after": + if err := dec.Decode(&s.SearchAfter); err != nil { + return fmt.Errorf("%s | %w", "SearchAfter", err) + } + + case "seq_no_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SeqNoPrimaryTerm", err) + } + s.SeqNoPrimaryTerm = &value + case bool: + s.SeqNoPrimaryTerm = &v + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "slice": + if err := dec.Decode(&s.Slice); err != nil { + return fmt.Errorf("%s | %w", "Slice", err) + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(types.SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + case "_source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source__field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source_", err) + } + + switch t { + + case "exclude_vectors", "excludes", "includes": + o := types.NewSourceFilter() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + s.Source_ = o + break source__field + + } + } + if s.Source_ == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + } + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return fmt.Errorf("%s | %w", "Stats", err) + } + + case "stored_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + + s.StoredFields = append(s.StoredFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.StoredFields); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + } + + case "suggest": + if err := dec.Decode(&s.Suggest); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + + case "terminate_after": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TerminateAfter", err) + } + s.TerminateAfter = &value + case float64: + f := int64(v) + s.TerminateAfter = &f + } + + case "timeout": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Timeout = &o + + case "track_scores": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TrackScores", err) + } + s.TrackScores = &value + case bool: + s.TrackScores = &v + } + + case "track_total_hits": + if err := dec.Decode(&s.TrackTotalHits); err != nil { + return fmt.Errorf("%s | %w", "TrackTotalHits", err) + } + + case "version": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + s.Version = &value + case bool: + s.Version = &v + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/submit/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/submit/response.go new file mode 100644 index 000000000..408e6eaab --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/submit/response.go @@ -0,0 +1,154 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package submit + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package submit +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/async_search/submit/AsyncSearchSubmitResponse.ts#L22-L25 +type Response struct { + + // CompletionTime Indicates when the async search completed. + // It is present only when the search has completed. + CompletionTime types.DateTime `json:"completion_time,omitempty"` + CompletionTimeInMillis *int64 `json:"completion_time_in_millis,omitempty"` + // ExpirationTime Indicates when the async search will expire. + ExpirationTime types.DateTime `json:"expiration_time,omitempty"` + ExpirationTimeInMillis int64 `json:"expiration_time_in_millis"` + Id *string `json:"id,omitempty"` + // IsPartial When the query is no longer running, this property indicates whether the + // search failed or was successfully completed on all shards. + // While the query is running, `is_partial` is always set to `true`. + IsPartial bool `json:"is_partial"` + // IsRunning Indicates whether the search is still running or has completed. + // + // > info + // > If the search failed after some shards returned their results or the node + // that is coordinating the async search dies, results may be partial even + // though `is_running` is `false`. + IsRunning bool `json:"is_running"` + Response types.AsyncSearch `json:"response"` + StartTime types.DateTime `json:"start_time,omitempty"` + StartTimeInMillis int64 `json:"start_time_in_millis"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "completion_time": + if err := dec.Decode(&s.CompletionTime); err != nil { + return fmt.Errorf("%s | %w", "CompletionTime", err) + } + + case "completion_time_in_millis": + if err := dec.Decode(&s.CompletionTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "CompletionTimeInMillis", err) + } + + case "expiration_time": + if err := dec.Decode(&s.ExpirationTime); err != nil { + return fmt.Errorf("%s | %w", "ExpirationTime", err) + } + + case "expiration_time_in_millis": + if err := dec.Decode(&s.ExpirationTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "ExpirationTimeInMillis", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "is_partial": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsPartial", err) + } + s.IsPartial = value + case bool: + s.IsPartial = v + } + + case "is_running": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsRunning", err) + } + s.IsRunning = value + case bool: + s.IsRunning = v + } + + case "response": + if err := dec.Decode(&s.Response); err != nil { + return fmt.Errorf("%s | %w", "Response", err) + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeInMillis", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/submit/submit.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/submit/submit.go new file mode 100644 index 000000000..9687c35cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/submit/submit.go @@ -0,0 +1,1152 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run an async search. +// +// When the primary sort of the results is an indexed field, shards get sorted +// based on minimum and maximum value that they hold for that field. Partial +// results become available following the sort criteria that was requested. +// +// Warning: Asynchronous search does not support scroll or search requests that +// include only the suggest section. +// +// By default, Elasticsearch does not allow you to store an async search +// response larger than 10Mb and an attempt to do this results in an error. +// The maximum allowed size for a stored async search response can be set by +// changing the `search.max_async_search_response_size` cluster level setting. +package submit + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/suggestmode" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Submit struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSubmit type alias for index. +type NewSubmit func() *Submit + +// NewSubmitFunc returns a new instance of Submit with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSubmitFunc(tp elastictransport.Interface) NewSubmit { + return func() *Submit { + n := New(tp) + + return n + } +} + +// Run an async search. +// +// When the primary sort of the results is an indexed field, shards get sorted +// based on minimum and maximum value that they hold for that field. Partial +// results become available following the sort criteria that was requested. +// +// Warning: Asynchronous search does not support scroll or search requests that +// include only the suggest section. +// +// By default, Elasticsearch does not allow you to store an async search +// response larger than 10Mb and an attempt to do this results in an error. +// The maximum allowed size for a stored async search response can be set by +// changing the `search.max_async_search_response_size` cluster level setting. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit +func New(tp elastictransport.Interface) *Submit { + r := &Submit{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Submit) Raw(raw io.Reader) *Submit { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Submit) Request(req *Request) *Submit { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Submit) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Submit: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_async_search") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_async_search") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Submit) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "async_search.submit") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "async_search.submit") + if reader := instrument.RecordRequestBody(ctx, "async_search.submit", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "async_search.submit") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Submit query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a submit.Response +func (r Submit) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "async_search.submit") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + r.TypedKeys(true) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Submit headers map. +func (r *Submit) Header(key, value string) *Submit { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of index names to search; use `_all` or empty string +// to perform the operation on all indices +// API Name: index +func (r *Submit) Index(index string) *Submit { + r.paramSet |= indexMask + r.index = index + + return r +} + +// WaitForCompletionTimeout Blocks and waits until the search is completed up to a certain timeout. +// When the async search completes within the timeout, the response won’t +// include the ID as the results are not stored in the cluster. +// API name: wait_for_completion_timeout +func (r *Submit) WaitForCompletionTimeout(duration string) *Submit { + r.values.Set("wait_for_completion_timeout", duration) + + return r +} + +// KeepAlive Specifies how long the async search needs to be available. +// Ongoing async searches and any saved search results are deleted after this +// period. +// API name: keep_alive +func (r *Submit) KeepAlive(duration string) *Submit { + r.values.Set("keep_alive", duration) + + return r +} + +// KeepOnCompletion If `true`, results are stored for later retrieval when the search completes +// within the `wait_for_completion_timeout`. +// API name: keep_on_completion +func (r *Submit) KeepOnCompletion(keeponcompletion bool) *Submit { + r.values.Set("keep_on_completion", strconv.FormatBool(keeponcompletion)) + + return r +} + +// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete +// indices. (This includes `_all` string or when no indices have been specified) +// API name: allow_no_indices +func (r *Submit) AllowNoIndices(allownoindices bool) *Submit { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// AllowPartialSearchResults Indicate if an error should be returned if there is a partial search failure +// or timeout +// API name: allow_partial_search_results +func (r *Submit) AllowPartialSearchResults(allowpartialsearchresults bool) *Submit { + r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults)) + + return r +} + +// Analyzer The analyzer to use for the query string +// API name: analyzer +func (r *Submit) Analyzer(analyzer string) *Submit { + r.values.Set("analyzer", analyzer) + + return r +} + +// AnalyzeWildcard Specify whether wildcard and prefix queries should be analyzed (default: +// false) +// API name: analyze_wildcard +func (r *Submit) AnalyzeWildcard(analyzewildcard bool) *Submit { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) + + return r +} + +// BatchedReduceSize Affects how often partial results become available, which happens whenever +// shard results are reduced. +// A partial reduction is performed every time the coordinating node has +// received a certain number of new shard responses (5 by default). +// API name: batched_reduce_size +func (r *Submit) BatchedReduceSize(batchedreducesize string) *Submit { + r.values.Set("batched_reduce_size", batchedreducesize) + + return r +} + +// CcsMinimizeRoundtrips The default value is the only supported value. +// API name: ccs_minimize_roundtrips +func (r *Submit) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Submit { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) + + return r +} + +// DefaultOperator The default operator for query string query (AND or OR) +// API name: default_operator +func (r *Submit) DefaultOperator(defaultoperator operator.Operator) *Submit { + r.values.Set("default_operator", defaultoperator.String()) + + return r +} + +// Df The field to use as default where no field prefix is given in the query +// string +// API name: df +func (r *Submit) Df(df string) *Submit { + r.values.Set("df", df) + + return r +} + +// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, +// closed or both. +// API name: expand_wildcards +func (r *Submit) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Submit { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreThrottled Whether specified concrete, expanded or aliased indices should be ignored +// when throttled +// API name: ignore_throttled +func (r *Submit) IgnoreThrottled(ignorethrottled bool) *Submit { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable +// (missing or closed) +// API name: ignore_unavailable +func (r *Submit) IgnoreUnavailable(ignoreunavailable bool) *Submit { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Lenient Specify whether format-based query failures (such as providing text to a +// numeric field) should be ignored +// API name: lenient +func (r *Submit) Lenient(lenient bool) *Submit { + r.values.Set("lenient", strconv.FormatBool(lenient)) + + return r +} + +// MaxConcurrentShardRequests The number of concurrent shard requests per node this search executes +// concurrently. This value should be used to limit the impact of the search on +// the cluster in order to limit the number of concurrent shard requests +// API name: max_concurrent_shard_requests +func (r *Submit) MaxConcurrentShardRequests(maxconcurrentshardrequests int) *Submit { + r.values.Set("max_concurrent_shard_requests", strconv.Itoa(maxconcurrentshardrequests)) + + return r +} + +// Preference Specify the node or shard the operation should be performed on (default: +// random) +// API name: preference +func (r *Submit) Preference(preference string) *Submit { + r.values.Set("preference", preference) + + return r +} + +// RequestCache Specify if request cache should be used for this request or not, defaults to +// true +// API name: request_cache +func (r *Submit) RequestCache(requestcache bool) *Submit { + r.values.Set("request_cache", strconv.FormatBool(requestcache)) + + return r +} + +// Routing A comma-separated list of specific routing values +// API name: routing +func (r *Submit) Routing(routing string) *Submit { + r.values.Set("routing", routing) + + return r +} + +// SearchType Search operation type +// API name: search_type +func (r *Submit) SearchType(searchtype searchtype.SearchType) *Submit { + r.values.Set("search_type", searchtype.String()) + + return r +} + +// SuggestField Specifies which field to use for suggestions. +// API name: suggest_field +func (r *Submit) SuggestField(field string) *Submit { + r.values.Set("suggest_field", field) + + return r +} + +// SuggestMode Specify suggest mode +// API name: suggest_mode +func (r *Submit) SuggestMode(suggestmode suggestmode.SuggestMode) *Submit { + r.values.Set("suggest_mode", suggestmode.String()) + + return r +} + +// SuggestSize How many suggestions to return in response +// API name: suggest_size +func (r *Submit) SuggestSize(suggestsize string) *Submit { + r.values.Set("suggest_size", suggestsize) + + return r +} + +// SuggestText The source text for which the suggestions should be returned. +// API name: suggest_text +func (r *Submit) SuggestText(suggesttext string) *Submit { + r.values.Set("suggest_text", suggesttext) + + return r +} + +// TypedKeys Specify whether aggregation and suggester names should be prefixed by their +// respective types in the response +// API name: typed_keys +func (r *Submit) TypedKeys(typedkeys bool) *Submit { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} + +// RestTotalHitsAsInt Indicates whether hits.total should be rendered as an integer or an object in +// the rest search response +// API name: rest_total_hits_as_int +func (r *Submit) RestTotalHitsAsInt(resttotalhitsasint bool) *Submit { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) + + return r +} + +// SourceExcludes_ A list of fields to exclude from the returned _source field +// API name: _source_excludes +func (r *Submit) SourceExcludes_(fields ...string) *Submit { + r.values.Set("_source_excludes", strings.Join(fields, ",")) + + return r +} + +// SourceIncludes_ A list of fields to extract and return from the _source field +// API name: _source_includes +func (r *Submit) SourceIncludes_(fields ...string) *Submit { + r.values.Set("_source_includes", strings.Join(fields, ",")) + + return r +} + +// Q Query in the Lucene query string syntax +// API name: q +func (r *Submit) Q(q string) *Submit { + r.values.Set("q", q) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Submit) ErrorTrace(errortrace bool) *Submit { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Submit) FilterPath(filterpaths ...string) *Submit { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Submit) Human(human bool) *Submit { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Submit) Pretty(pretty bool) *Submit { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: aggregations +func (r *Submit) Aggregations(aggregations map[string]types.Aggregations) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Aggregations = aggregations + return r +} + +func (r *Submit) AddAggregation(key string, value types.AggregationsVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp + return r +} + +// API name: collapse +func (r *Submit) Collapse(collapse types.FieldCollapseVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Collapse = collapse.FieldCollapseCaster() + + return r +} + +// Array of wildcard (*) patterns. The request returns doc values for field +// names matching these patterns in the hits.fields property of the response. +// API name: docvalue_fields +func (r *Submit) DocvalueFields(docvaluefields ...types.FieldAndFormatVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docvaluefields { + + r.req.DocvalueFields = append(r.req.DocvalueFields, *v.FieldAndFormatCaster()) + + } + return r +} + +// If true, returns detailed information about score computation as part of a +// hit. +// API name: explain +func (r *Submit) Explain(explain bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Explain = &explain + + return r +} + +// Configuration of search extensions defined by Elasticsearch plugins. +// API name: ext +func (r *Submit) Ext(ext map[string]json.RawMessage) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Ext = ext + return r +} + +func (r *Submit) AddExt(key string, value json.RawMessage) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Ext == nil { + r.req.Ext = make(map[string]json.RawMessage) + } else { + tmp = r.req.Ext + } + + tmp[key] = value + + r.req.Ext = tmp + return r +} + +// Array of wildcard (*) patterns. The request returns values for field names +// matching these patterns in the hits.fields property of the response. +// API name: fields +func (r *Submit) Fields(fields ...types.FieldAndFormatVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range fields { + + r.req.Fields = append(r.req.Fields, *v.FieldAndFormatCaster()) + + } + return r +} + +// Starting document offset. By default, you cannot page through more than +// 10,000 +// hits using the from and size parameters. To page through more hits, use the +// search_after parameter. +// API name: from +func (r *Submit) From(from int) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.From = &from + + return r +} + +// API name: highlight +func (r *Submit) Highlight(highlight types.HighlightVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Highlight = highlight.HighlightCaster() + + return r +} + +// Boosts the _score of documents from specified indices. +// API name: indices_boost +func (r *Submit) IndicesBoost(indicesboost []map[string]types.Float64) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndicesBoost = indicesboost + + return r +} + +// Defines the approximate kNN search to run. +// API name: knn +func (r *Submit) Knn(knns ...types.KnnSearchVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Knn = make([]types.KnnSearch, len(knns)) + for i, v := range knns { + r.req.Knn[i] = *v.KnnSearchCaster() + } + + return r +} + +// Minimum _score for matching documents. Documents with a lower _score are +// not included in search results and results collected by aggregations. +// API name: min_score +func (r *Submit) MinScore(minscore types.Float64) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MinScore = &minscore + + return r +} + +// Limits the search to a point in time (PIT). If you provide a PIT, you +// cannot specify an in the request path. +// API name: pit +func (r *Submit) Pit(pit types.PointInTimeReferenceVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Pit = pit.PointInTimeReferenceCaster() + + return r +} + +// API name: post_filter +func (r *Submit) PostFilter(postfilter types.QueryVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PostFilter = postfilter.QueryCaster() + + return r +} + +// API name: profile +func (r *Submit) Profile(profile bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Profile = &profile + + return r +} + +// Defines the search definition using the Query DSL. +// API name: query +func (r *Submit) Query(query types.QueryVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} + +// API name: rescore +func (r *Submit) Rescore(rescores ...types.RescoreVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Rescore = make([]types.Rescore, len(rescores)) + for i, v := range rescores { + r.req.Rescore[i] = *v.RescoreCaster() + } + + return r +} + +// Defines one or more runtime fields in the search request. These fields take +// precedence over mapped fields with the same name. +// API name: runtime_mappings +func (r *Submit) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() + + return r +} + +// Retrieve a script evaluation (based on different fields) for each hit. +// API name: script_fields +func (r *Submit) ScriptFields(scriptfields map[string]types.ScriptField) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScriptFields = scriptfields + return r +} + +func (r *Submit) AddScriptField(key string, value types.ScriptFieldVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + + r.req.ScriptFields = tmp + return r +} + +// API name: search_after +func (r *Submit) SearchAfter(sortresults ...types.FieldValueVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } + + return r +} + +// If true, returns sequence number and primary term of the last modification +// of each hit. See Optimistic concurrency control. +// API name: seq_no_primary_term +func (r *Submit) SeqNoPrimaryTerm(seqnoprimaryterm bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SeqNoPrimaryTerm = &seqnoprimaryterm + + return r +} + +// The number of hits to return. By default, you cannot page through more +// than 10,000 hits using the from and size parameters. To page through more +// hits, use the search_after parameter. +// API name: size +func (r *Submit) Size(size int) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Size = &size + + return r +} + +// API name: slice +func (r *Submit) Slice(slice types.SlicedScrollVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Slice = slice.SlicedScrollCaster() + + return r +} + +// API name: sort +func (r *Submit) Sort(sorts ...types.SortCombinationsVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } + + return r +} + +// Indicates which source fields are returned for matching documents. These +// fields are returned in the hits._source property of the search response. +// API name: _source +func (r *Submit) Source_(sourceconfig types.SourceConfigVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = *sourceconfig.SourceConfigCaster() + + return r +} + +// Stats groups to associate with the search. Each group maintains a statistics +// aggregation for its associated searches. You can retrieve these stats using +// the indices stats API. +// API name: stats +func (r *Submit) Stats(stats ...string) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range stats { + + r.req.Stats = append(r.req.Stats, v) + + } + return r +} + +// List of stored fields to return as part of a hit. If no fields are specified, +// no stored fields are included in the response. If this field is specified, +// the _source +// parameter defaults to false. You can pass _source: true to return both source +// fields +// and stored fields in the search response. +// API name: stored_fields +func (r *Submit) StoredFields(fields ...string) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.StoredFields = fields + + return r +} + +// API name: suggest +func (r *Submit) Suggest(suggest types.SuggesterVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Suggest = suggest.SuggesterCaster() + + return r +} + +// Maximum number of documents to collect for each shard. If a query reaches +// this +// limit, Elasticsearch terminates the query early. Elasticsearch collects +// documents +// before sorting. Defaults to 0, which does not terminate query execution +// early. +// API name: terminate_after +func (r *Submit) TerminateAfter(terminateafter int64) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TerminateAfter = &terminateafter + + return r +} + +// Specifies the period of time to wait for a response from each shard. If no +// response +// is received before the timeout expires, the request fails and returns an +// error. +// Defaults to no timeout. +// API name: timeout +func (r *Submit) Timeout(timeout string) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = &timeout + + return r +} + +// If true, calculate and return document scores, even if the scores are not +// used for sorting. +// API name: track_scores +func (r *Submit) TrackScores(trackscores bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackScores = &trackscores + + return r +} + +// Number of hits matching the query to count accurately. If true, the exact +// number of hits is returned at the cost of some performance. If false, the +// response does not include the total number of hits matching the query. +// Defaults to 10,000 hits. +// API name: track_total_hits +func (r *Submit) TrackTotalHits(trackhits types.TrackHitsVariant) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackTotalHits = *trackhits.TrackHitsCaster() + + return r +} + +// If true, returns document version as part of a hit. +// API name: version +func (r *Submit) Version(version bool) *Submit { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Version = &version + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go new file mode 100644 index 000000000..467ac39d9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go @@ -0,0 +1,371 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +package deleteautoscalingpolicy + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteAutoscalingPolicy struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteAutoscalingPolicy type alias for index. +type NewDeleteAutoscalingPolicy func(name string) *DeleteAutoscalingPolicy + +// NewDeleteAutoscalingPolicyFunc returns a new instance of DeleteAutoscalingPolicy with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteAutoscalingPolicyFunc(tp elastictransport.Interface) NewDeleteAutoscalingPolicy { + return func(name string) *DeleteAutoscalingPolicy { + n := New(tp) + + n._name(name) + + return n + } +} + +// Delete an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy +func New(tp elastictransport.Interface) *DeleteAutoscalingPolicy { + r := &DeleteAutoscalingPolicy{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteAutoscalingPolicy) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_autoscaling") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteAutoscalingPolicy) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "autoscaling.delete_autoscaling_policy") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "autoscaling.delete_autoscaling_policy") + if reader := instrument.RecordRequestBody(ctx, "autoscaling.delete_autoscaling_policy", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "autoscaling.delete_autoscaling_policy") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteAutoscalingPolicy query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleteautoscalingpolicy.Response +func (r DeleteAutoscalingPolicy) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "autoscaling.delete_autoscaling_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteAutoscalingPolicy) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "autoscaling.delete_autoscaling_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteAutoscalingPolicy query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteAutoscalingPolicy headers map. +func (r *DeleteAutoscalingPolicy) Header(key, value string) *DeleteAutoscalingPolicy { + r.headers.Set(key, value) + + return r +} + +// Name the name of the autoscaling policy +// API Name: name +func (r *DeleteAutoscalingPolicy) _name(name string) *DeleteAutoscalingPolicy { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *DeleteAutoscalingPolicy) MasterTimeout(duration string) *DeleteAutoscalingPolicy { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *DeleteAutoscalingPolicy) Timeout(duration string) *DeleteAutoscalingPolicy { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteAutoscalingPolicy) ErrorTrace(errortrace bool) *DeleteAutoscalingPolicy { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteAutoscalingPolicy) FilterPath(filterpaths ...string) *DeleteAutoscalingPolicy { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteAutoscalingPolicy) Human(human bool) *DeleteAutoscalingPolicy { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteAutoscalingPolicy) Pretty(pretty bool) *DeleteAutoscalingPolicy { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/deleteautoscalingpolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/deleteautoscalingpolicy/response.go new file mode 100644 index 000000000..d737722c1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/deleteautoscalingpolicy/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleteautoscalingpolicy + +// Response holds the response body struct for the package deleteautoscalingpolicy +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/autoscaling/delete_autoscaling_policy/DeleteAutoscalingPolicyResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go new file mode 100644 index 000000000..4362ca50e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go @@ -0,0 +1,375 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the autoscaling capacity. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// This API gets the current autoscaling capacity based on the configured +// autoscaling policy. +// It will return information to size the cluster appropriately to the current +// workload. +// +// The `required_capacity` is calculated as the maximum of the +// `required_capacity` result of all individual deciders that are enabled for +// the policy. +// +// The operator should verify that the `current_nodes` match the operator’s +// knowledge of the cluster to avoid making autoscaling decisions based on stale +// or incomplete information. +// +// The response contains decider-specific information you can use to diagnose +// how and why autoscaling determined a certain capacity was required. +// This information is provided for diagnosis only. +// Do not use this information to make autoscaling decisions. +package getautoscalingcapacity + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetAutoscalingCapacity struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetAutoscalingCapacity type alias for index. +type NewGetAutoscalingCapacity func() *GetAutoscalingCapacity + +// NewGetAutoscalingCapacityFunc returns a new instance of GetAutoscalingCapacity with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetAutoscalingCapacityFunc(tp elastictransport.Interface) NewGetAutoscalingCapacity { + return func() *GetAutoscalingCapacity { + n := New(tp) + + return n + } +} + +// Get the autoscaling capacity. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// This API gets the current autoscaling capacity based on the configured +// autoscaling policy. +// It will return information to size the cluster appropriately to the current +// workload. +// +// The `required_capacity` is calculated as the maximum of the +// `required_capacity` result of all individual deciders that are enabled for +// the policy. +// +// The operator should verify that the `current_nodes` match the operator’s +// knowledge of the cluster to avoid making autoscaling decisions based on stale +// or incomplete information. +// +// The response contains decider-specific information you can use to diagnose +// how and why autoscaling determined a certain capacity was required. +// This information is provided for diagnosis only. +// Do not use this information to make autoscaling decisions. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity +func New(tp elastictransport.Interface) *GetAutoscalingCapacity { + r := &GetAutoscalingCapacity{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetAutoscalingCapacity) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_autoscaling") + path.WriteString("/") + path.WriteString("capacity") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetAutoscalingCapacity) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "autoscaling.get_autoscaling_capacity") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "autoscaling.get_autoscaling_capacity") + if reader := instrument.RecordRequestBody(ctx, "autoscaling.get_autoscaling_capacity", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "autoscaling.get_autoscaling_capacity") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetAutoscalingCapacity query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getautoscalingcapacity.Response +func (r GetAutoscalingCapacity) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "autoscaling.get_autoscaling_capacity") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetAutoscalingCapacity) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "autoscaling.get_autoscaling_capacity") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetAutoscalingCapacity query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetAutoscalingCapacity headers map. +func (r *GetAutoscalingCapacity) Header(key, value string) *GetAutoscalingCapacity { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetAutoscalingCapacity) MasterTimeout(duration string) *GetAutoscalingCapacity { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetAutoscalingCapacity) ErrorTrace(errortrace bool) *GetAutoscalingCapacity { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetAutoscalingCapacity) FilterPath(filterpaths ...string) *GetAutoscalingCapacity { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetAutoscalingCapacity) Human(human bool) *GetAutoscalingCapacity { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetAutoscalingCapacity) Pretty(pretty bool) *GetAutoscalingCapacity { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingcapacity/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingcapacity/response.go new file mode 100644 index 000000000..bc2fd2a69 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingcapacity/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getautoscalingcapacity + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getautoscalingcapacity +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L25-L29 +type Response struct { + Policies map[string]types.AutoscalingDeciders `json:"policies"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Policies: make(map[string]types.AutoscalingDeciders, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go new file mode 100644 index 000000000..3f97e0cca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go @@ -0,0 +1,362 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +package getautoscalingpolicy + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetAutoscalingPolicy struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetAutoscalingPolicy type alias for index. +type NewGetAutoscalingPolicy func(name string) *GetAutoscalingPolicy + +// NewGetAutoscalingPolicyFunc returns a new instance of GetAutoscalingPolicy with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetAutoscalingPolicyFunc(tp elastictransport.Interface) NewGetAutoscalingPolicy { + return func(name string) *GetAutoscalingPolicy { + n := New(tp) + + n._name(name) + + return n + } +} + +// Get an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity +func New(tp elastictransport.Interface) *GetAutoscalingPolicy { + r := &GetAutoscalingPolicy{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetAutoscalingPolicy) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_autoscaling") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetAutoscalingPolicy) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "autoscaling.get_autoscaling_policy") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "autoscaling.get_autoscaling_policy") + if reader := instrument.RecordRequestBody(ctx, "autoscaling.get_autoscaling_policy", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "autoscaling.get_autoscaling_policy") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetAutoscalingPolicy query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getautoscalingpolicy.Response +func (r GetAutoscalingPolicy) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "autoscaling.get_autoscaling_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetAutoscalingPolicy) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "autoscaling.get_autoscaling_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetAutoscalingPolicy query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetAutoscalingPolicy headers map. +func (r *GetAutoscalingPolicy) Header(key, value string) *GetAutoscalingPolicy { + r.headers.Set(key, value) + + return r +} + +// Name the name of the autoscaling policy +// API Name: name +func (r *GetAutoscalingPolicy) _name(name string) *GetAutoscalingPolicy { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetAutoscalingPolicy) MasterTimeout(duration string) *GetAutoscalingPolicy { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetAutoscalingPolicy) ErrorTrace(errortrace bool) *GetAutoscalingPolicy { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetAutoscalingPolicy) FilterPath(filterpaths ...string) *GetAutoscalingPolicy { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetAutoscalingPolicy) Human(human bool) *GetAutoscalingPolicy { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetAutoscalingPolicy) Pretty(pretty bool) *GetAutoscalingPolicy { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingpolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingpolicy/response.go new file mode 100644 index 000000000..aa77271b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingpolicy/response.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getautoscalingpolicy + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package getautoscalingpolicy +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/autoscaling/get_autoscaling_policy/GetAutoscalingPolicyResponse.ts#L22-L25 +type Response struct { + + // Deciders Decider settings. + Deciders map[string]json.RawMessage `json:"deciders"` + Roles []string `json:"roles"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Deciders: make(map[string]json.RawMessage, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go new file mode 100644 index 000000000..b14364184 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go @@ -0,0 +1,429 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +package putautoscalingpolicy + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutAutoscalingPolicy struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutAutoscalingPolicy type alias for index. +type NewPutAutoscalingPolicy func(name string) *PutAutoscalingPolicy + +// NewPutAutoscalingPolicyFunc returns a new instance of PutAutoscalingPolicy with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutAutoscalingPolicyFunc(tp elastictransport.Interface) NewPutAutoscalingPolicy { + return func(name string) *PutAutoscalingPolicy { + n := New(tp) + + n._name(name) + + return n + } +} + +// Create or update an autoscaling policy. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy +func New(tp elastictransport.Interface) *PutAutoscalingPolicy { + r := &PutAutoscalingPolicy{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutAutoscalingPolicy) Raw(raw io.Reader) *PutAutoscalingPolicy { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutAutoscalingPolicy) Request(req *Request) *PutAutoscalingPolicy { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutAutoscalingPolicy) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutAutoscalingPolicy: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_autoscaling") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutAutoscalingPolicy) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "autoscaling.put_autoscaling_policy") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "autoscaling.put_autoscaling_policy") + if reader := instrument.RecordRequestBody(ctx, "autoscaling.put_autoscaling_policy", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "autoscaling.put_autoscaling_policy") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutAutoscalingPolicy query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putautoscalingpolicy.Response +func (r PutAutoscalingPolicy) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "autoscaling.put_autoscaling_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutAutoscalingPolicy headers map. +func (r *PutAutoscalingPolicy) Header(key, value string) *PutAutoscalingPolicy { + r.headers.Set(key, value) + + return r +} + +// Name the name of the autoscaling policy +// API Name: name +func (r *PutAutoscalingPolicy) _name(name string) *PutAutoscalingPolicy { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *PutAutoscalingPolicy) MasterTimeout(duration string) *PutAutoscalingPolicy { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *PutAutoscalingPolicy) Timeout(duration string) *PutAutoscalingPolicy { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutAutoscalingPolicy) ErrorTrace(errortrace bool) *PutAutoscalingPolicy { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutAutoscalingPolicy) FilterPath(filterpaths ...string) *PutAutoscalingPolicy { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutAutoscalingPolicy) Human(human bool) *PutAutoscalingPolicy { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutAutoscalingPolicy) Pretty(pretty bool) *PutAutoscalingPolicy { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Decider settings. +// API name: deciders +func (r *PutAutoscalingPolicy) Deciders(deciders map[string]json.RawMessage) *PutAutoscalingPolicy { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Deciders = deciders + return r +} + +func (r *PutAutoscalingPolicy) AddDecider(key string, value json.RawMessage) *PutAutoscalingPolicy { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Deciders == nil { + r.req.Deciders = make(map[string]json.RawMessage) + } else { + tmp = r.req.Deciders + } + + tmp[key] = value + + r.req.Deciders = tmp + return r +} + +// API name: roles +func (r *PutAutoscalingPolicy) Roles(roles ...string) *PutAutoscalingPolicy { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range roles { + + r.req.Roles = append(r.req.Roles, v) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/putautoscalingpolicy/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/putautoscalingpolicy/request.go new file mode 100644 index 000000000..ffb0695f6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/putautoscalingpolicy/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putautoscalingpolicy + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putautoscalingpolicy +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/autoscaling/put_autoscaling_policy/PutAutoscalingPolicyRequest.ts#L25-L57 +type Request = types.AutoscalingPolicy + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewAutoscalingPolicy() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/putautoscalingpolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/putautoscalingpolicy/response.go new file mode 100644 index 000000000..2e79de485 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/putautoscalingpolicy/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putautoscalingpolicy + +// Response holds the response body struct for the package putautoscalingpolicy +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/autoscaling/put_autoscaling_policy/PutAutoscalingPolicyResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/capabilities/capabilities.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/capabilities/capabilities.go new file mode 100644 index 000000000..20fd4a3f8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/capabilities/capabilities.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Checks if the specified combination of method, API, parameters, and arbitrary +// capabilities are supported +package capabilities + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Capabilities struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCapabilities type alias for index. +type NewCapabilities func() *Capabilities + +// NewCapabilitiesFunc returns a new instance of Capabilities with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCapabilitiesFunc(tp elastictransport.Interface) NewCapabilities { + return func() *Capabilities { + n := New(tp) + + return n + } +} + +// Checks if the specified combination of method, API, parameters, and arbitrary +// capabilities are supported +// +// https://github.com/elastic/elasticsearch/blob/main/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc#require-or-skip-api-capabilities +func New(tp elastictransport.Interface) *Capabilities { + r := &Capabilities{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Capabilities) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_capabilities") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Capabilities) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "capabilities") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "capabilities") + if reader := instrument.RecordRequestBody(ctx, "capabilities", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "capabilities") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Capabilities query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a capabilities.Response +func (r Capabilities) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Capabilities) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "capabilities") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Capabilities query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Capabilities headers map. +func (r *Capabilities) Header(key, value string) *Capabilities { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/aliases/aliases.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/aliases/aliases.go new file mode 100644 index 000000000..ae2e35ce0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/aliases/aliases.go @@ -0,0 +1,435 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get aliases. +// +// Get the cluster's index aliases, including filter and routing information. +// This API does not return data stream aliases. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or the Kibana console. They are not intended for use by applications. +// For application consumption, use the aliases API. +package aliases + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Aliases struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAliases type alias for index. +type NewAliases func() *Aliases + +// NewAliasesFunc returns a new instance of Aliases with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAliasesFunc(tp elastictransport.Interface) NewAliases { + return func() *Aliases { + n := New(tp) + + return n + } +} + +// Get aliases. +// +// Get the cluster's index aliases, including filter and routing information. +// This API does not return data stream aliases. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or the Kibana console. They are not intended for use by applications. +// For application consumption, use the aliases API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases +func New(tp elastictransport.Interface) *Aliases { + r := &Aliases{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Aliases) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("aliases") + + method = http.MethodGet + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("aliases") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Aliases) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.aliases") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.aliases") + if reader := instrument.RecordRequestBody(ctx, "cat.aliases", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.aliases") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Aliases query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a aliases.Response +func (r Aliases) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.aliases") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Aliases) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.aliases") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Aliases query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Aliases headers map. +func (r *Aliases) Header(key, value string) *Aliases { + r.headers.Set(key, value) + + return r +} + +// Name A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To +// retrieve all aliases, omit this parameter or use `*` or `_all`. +// API Name: name +func (r *Aliases) Name(name string) *Aliases { + r.paramSet |= nameMask + r.name = name + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *Aliases) H(names ...string) *Aliases { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Aliases) S(names ...string) *Aliases { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// ExpandWildcards The type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// It supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *Aliases) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Aliases { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicated that the request should never timeout, you can set it to `-1`. +// API name: master_timeout +func (r *Aliases) MasterTimeout(duration string) *Aliases { + r.values.Set("master_timeout", duration) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Aliases) Format(format string) *Aliases { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Aliases) Help(help bool) *Aliases { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Aliases) V(v bool) *Aliases { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Aliases) ErrorTrace(errortrace bool) *Aliases { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Aliases) FilterPath(filterpaths ...string) *Aliases { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Aliases) Human(human bool) *Aliases { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Aliases) Pretty(pretty bool) *Aliases { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/aliases/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/aliases/response.go new file mode 100644 index 000000000..495db8f10 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/aliases/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package aliases + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package aliases +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/aliases/CatAliasesResponse.ts#L22-L25 + +type Response []types.AliasesRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/allocation/allocation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/allocation/allocation.go new file mode 100644 index 000000000..5fbbc120a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/allocation/allocation.go @@ -0,0 +1,434 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get shard allocation information. +// +// Get a snapshot of the number of shards allocated to each data node and their +// disk space. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. +package allocation + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/bytes" +) + +const ( + nodeidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Allocation struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + nodeid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAllocation type alias for index. +type NewAllocation func() *Allocation + +// NewAllocationFunc returns a new instance of Allocation with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAllocationFunc(tp elastictransport.Interface) NewAllocation { + return func() *Allocation { + n := New(tp) + + return n + } +} + +// Get shard allocation information. +// +// Get a snapshot of the number of shards allocated to each data node and their +// disk space. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation +func New(tp elastictransport.Interface) *Allocation { + r := &Allocation{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Allocation) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("allocation") + + method = http.MethodGet + case r.paramSet == nodeidMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("allocation") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Allocation) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.allocation") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.allocation") + if reader := instrument.RecordRequestBody(ctx, "cat.allocation", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.allocation") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Allocation query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a allocation.Response +func (r Allocation) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.allocation") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Allocation) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.allocation") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Allocation query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Allocation headers map. +func (r *Allocation) Header(key, value string) *Allocation { + r.headers.Set(key, value) + + return r +} + +// NodeId A comma-separated list of node identifiers or names used to limit the +// returned information. +// API Name: nodeid +func (r *Allocation) NodeId(nodeid string) *Allocation { + r.paramSet |= nodeidMask + r.nodeid = nodeid + + return r +} + +// Bytes The unit used to display byte values. +// API name: bytes +func (r *Allocation) Bytes(bytes bytes.Bytes) *Allocation { + r.values.Set("bytes", bytes.String()) + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *Allocation) H(names ...string) *Allocation { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Allocation) S(names ...string) *Allocation { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Local If `true`, the request computes the list of selected nodes from the +// local cluster state. If `false` the list of selected nodes are computed +// from the cluster state of the master node. In both cases the coordinating +// node will send requests for further information to each selected node. +// API name: local +func (r *Allocation) Local(local bool) *Allocation { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Allocation) MasterTimeout(duration string) *Allocation { + r.values.Set("master_timeout", duration) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Allocation) Format(format string) *Allocation { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Allocation) Help(help bool) *Allocation { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Allocation) V(v bool) *Allocation { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Allocation) ErrorTrace(errortrace bool) *Allocation { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Allocation) FilterPath(filterpaths ...string) *Allocation { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Allocation) Human(human bool) *Allocation { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Allocation) Pretty(pretty bool) *Allocation { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/allocation/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/allocation/response.go new file mode 100644 index 000000000..9524228fa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/allocation/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package allocation + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package allocation +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/allocation/CatAllocationResponse.ts#L22-L25 + +type Response []types.AllocationRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/componenttemplates/component_templates.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/componenttemplates/component_templates.go new file mode 100644 index 000000000..73454cf69 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/componenttemplates/component_templates.go @@ -0,0 +1,432 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get component templates. +// +// Get information about component templates in a cluster. +// Component templates are building blocks for constructing index templates that +// specify index mappings, settings, and aliases. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. +// They are not intended for use by applications. For application consumption, +// use the get component template API. +package componenttemplates + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ComponentTemplates struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewComponentTemplates type alias for index. +type NewComponentTemplates func() *ComponentTemplates + +// NewComponentTemplatesFunc returns a new instance of ComponentTemplates with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewComponentTemplatesFunc(tp elastictransport.Interface) NewComponentTemplates { + return func() *ComponentTemplates { + n := New(tp) + + return n + } +} + +// Get component templates. +// +// Get information about component templates in a cluster. +// Component templates are building blocks for constructing index templates that +// specify index mappings, settings, and aliases. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. +// They are not intended for use by applications. For application consumption, +// use the get component template API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates +func New(tp elastictransport.Interface) *ComponentTemplates { + r := &ComponentTemplates{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ComponentTemplates) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("component_templates") + + method = http.MethodGet + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("component_templates") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ComponentTemplates) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.component_templates") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.component_templates") + if reader := instrument.RecordRequestBody(ctx, "cat.component_templates", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.component_templates") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ComponentTemplates query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a componenttemplates.Response +func (r ComponentTemplates) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.component_templates") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ComponentTemplates) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.component_templates") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ComponentTemplates query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ComponentTemplates headers map. +func (r *ComponentTemplates) Header(key, value string) *ComponentTemplates { + r.headers.Set(key, value) + + return r +} + +// Name The name of the component template. +// It accepts wildcard expressions. +// If it is omitted, all component templates are returned. +// API Name: name +func (r *ComponentTemplates) Name(name string) *ComponentTemplates { + r.paramSet |= nameMask + r.name = name + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *ComponentTemplates) H(names ...string) *ComponentTemplates { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *ComponentTemplates) S(names ...string) *ComponentTemplates { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Local If `true`, the request computes the list of selected nodes from the +// local cluster state. If `false` the list of selected nodes are computed +// from the cluster state of the master node. In both cases the coordinating +// node will send requests for further information to each selected node. +// API name: local +func (r *ComponentTemplates) Local(local bool) *ComponentTemplates { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// API name: master_timeout +func (r *ComponentTemplates) MasterTimeout(duration string) *ComponentTemplates { + r.values.Set("master_timeout", duration) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *ComponentTemplates) Format(format string) *ComponentTemplates { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *ComponentTemplates) Help(help bool) *ComponentTemplates { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *ComponentTemplates) V(v bool) *ComponentTemplates { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ComponentTemplates) ErrorTrace(errortrace bool) *ComponentTemplates { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ComponentTemplates) FilterPath(filterpaths ...string) *ComponentTemplates { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ComponentTemplates) Human(human bool) *ComponentTemplates { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ComponentTemplates) Pretty(pretty bool) *ComponentTemplates { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/componenttemplates/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/componenttemplates/response.go new file mode 100644 index 000000000..405a5b20a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/componenttemplates/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package componenttemplates + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package componenttemplates +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/component_templates/CatComponentTemplatesResponse.ts#L22-L25 + +type Response []types.CatComponentTemplate + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/count/count.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/count/count.go new file mode 100644 index 000000000..1687fd585 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/count/count.go @@ -0,0 +1,417 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get a document count. +// +// Get quick access to a document count for a data stream, an index, or an +// entire cluster. +// The document count only includes live documents, not deleted documents which +// have not yet been removed by the merge process. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. +// They are not intended for use by applications. For application consumption, +// use the count API. +package count + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Count struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCount type alias for index. +type NewCount func() *Count + +// NewCountFunc returns a new instance of Count with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCountFunc(tp elastictransport.Interface) NewCount { + return func() *Count { + n := New(tp) + + return n + } +} + +// Get a document count. +// +// Get quick access to a document count for a data stream, an index, or an +// entire cluster. +// The document count only includes live documents, not deleted documents which +// have not yet been removed by the merge process. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. +// They are not intended for use by applications. For application consumption, +// use the count API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count +func New(tp elastictransport.Interface) *Count { + r := &Count{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Count) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("count") + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("count") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Count) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.count") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.count") + if reader := instrument.RecordRequestBody(ctx, "cat.count", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.count") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Count query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a count.Response +func (r Count) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.count") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Count) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.count") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Count query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Count headers map. +func (r *Count) Header(key, value string) *Count { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases used to limit +// the request. +// It supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *Count) Index(index string) *Count { + r.paramSet |= indexMask + r.index = index + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *Count) H(names ...string) *Count { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Count) S(names ...string) *Count { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Count) Format(format string) *Count { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Count) Help(help bool) *Count { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Count) V(v bool) *Count { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Count) ErrorTrace(errortrace bool) *Count { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Count) FilterPath(filterpaths ...string) *Count { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Count) Human(human bool) *Count { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Count) Pretty(pretty bool) *Count { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/count/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/count/response.go new file mode 100644 index 000000000..c38a8f3c3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/count/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package count + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package count +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/count/CatCountResponse.ts#L22-L25 + +type Response []types.CountRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/fielddata/fielddata.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/fielddata/fielddata.go new file mode 100644 index 000000000..ce711390f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/fielddata/fielddata.go @@ -0,0 +1,419 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get field data cache information. +// +// Get the amount of heap memory currently used by the field data cache on every +// data node in the cluster. +// +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. +// They are not intended for use by applications. For application consumption, +// use the nodes stats API. +package fielddata + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/bytes" +) + +const ( + fieldsMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Fielddata struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + fields string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewFielddata type alias for index. +type NewFielddata func() *Fielddata + +// NewFielddataFunc returns a new instance of Fielddata with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewFielddataFunc(tp elastictransport.Interface) NewFielddata { + return func() *Fielddata { + n := New(tp) + + return n + } +} + +// Get field data cache information. +// +// Get the amount of heap memory currently used by the field data cache on every +// data node in the cluster. +// +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. +// They are not intended for use by applications. For application consumption, +// use the nodes stats API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata +func New(tp elastictransport.Interface) *Fielddata { + r := &Fielddata{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Fielddata) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("fielddata") + + method = http.MethodGet + case r.paramSet == fieldsMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("fielddata") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "fields", r.fields) + } + path.WriteString(r.fields) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Fielddata) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.fielddata") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.fielddata") + if reader := instrument.RecordRequestBody(ctx, "cat.fielddata", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.fielddata") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Fielddata query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a fielddata.Response +func (r Fielddata) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.fielddata") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Fielddata) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.fielddata") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Fielddata query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Fielddata headers map. +func (r *Fielddata) Header(key, value string) *Fielddata { + r.headers.Set(key, value) + + return r +} + +// Fields Comma-separated list of fields used to limit returned information. +// To retrieve all fields, omit this parameter. +// API Name: fields +func (r *Fielddata) Fields(fields string) *Fielddata { + r.paramSet |= fieldsMask + r.fields = fields + + return r +} + +// Bytes The unit used to display byte values. +// API name: bytes +func (r *Fielddata) Bytes(bytes bytes.Bytes) *Fielddata { + r.values.Set("bytes", bytes.String()) + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *Fielddata) H(names ...string) *Fielddata { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Fielddata) S(names ...string) *Fielddata { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Fielddata) Format(format string) *Fielddata { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Fielddata) Help(help bool) *Fielddata { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Fielddata) V(v bool) *Fielddata { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Fielddata) ErrorTrace(errortrace bool) *Fielddata { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Fielddata) FilterPath(filterpaths ...string) *Fielddata { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Fielddata) Human(human bool) *Fielddata { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Fielddata) Pretty(pretty bool) *Fielddata { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/fielddata/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/fielddata/response.go new file mode 100644 index 000000000..dd7da1751 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/fielddata/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package fielddata + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package fielddata +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/fielddata/CatFielddataResponse.ts#L22-L25 + +type Response []types.FielddataRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/health/health.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/health/health.go new file mode 100644 index 000000000..5cc989729 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/health/health.go @@ -0,0 +1,412 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the cluster health status. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. +// They are not intended for use by applications. For application consumption, +// use the cluster health API. +// This API is often used to check malfunctioning clusters. +// To help you track cluster health alongside log files and alerting systems, +// the API returns timestamps in two formats: +// `HH:MM:SS`, which is human-readable but includes no date information; +// `Unix epoch time`, which is machine-sortable and includes date information. +// The latter format is useful for cluster recoveries that take multiple days. +// You can use the cat health API to verify cluster health across multiple +// nodes. +// You also can use the API to track the recovery of a large cluster over a +// longer period of time. +package health + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Health struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewHealth type alias for index. +type NewHealth func() *Health + +// NewHealthFunc returns a new instance of Health with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewHealthFunc(tp elastictransport.Interface) NewHealth { + return func() *Health { + n := New(tp) + + return n + } +} + +// Get the cluster health status. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the command +// line or Kibana console. +// They are not intended for use by applications. For application consumption, +// use the cluster health API. +// This API is often used to check malfunctioning clusters. +// To help you track cluster health alongside log files and alerting systems, +// the API returns timestamps in two formats: +// `HH:MM:SS`, which is human-readable but includes no date information; +// `Unix epoch time`, which is machine-sortable and includes date information. +// The latter format is useful for cluster recoveries that take multiple days. +// You can use the cat health API to verify cluster health across multiple +// nodes. +// You also can use the API to track the recovery of a large cluster over a +// longer period of time. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health +func New(tp elastictransport.Interface) *Health { + r := &Health{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Health) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("health") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Health) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.health") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.health") + if reader := instrument.RecordRequestBody(ctx, "cat.health", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.health") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Health query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a health.Response +func (r Health) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.health") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Health) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.health") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Health query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Health headers map. +func (r *Health) Header(key, value string) *Health { + r.headers.Set(key, value) + + return r +} + +// Time The unit used to display time values. +// API name: time +func (r *Health) Time(time timeunit.TimeUnit) *Health { + r.values.Set("time", time.String()) + + return r +} + +// Ts If true, returns `HH:MM:SS` and Unix epoch timestamps. +// API name: ts +func (r *Health) Ts(ts bool) *Health { + r.values.Set("ts", strconv.FormatBool(ts)) + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *Health) H(names ...string) *Health { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Health) S(names ...string) *Health { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Health) Format(format string) *Health { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Health) Help(help bool) *Health { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Health) V(v bool) *Health { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Health) ErrorTrace(errortrace bool) *Health { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Health) FilterPath(filterpaths ...string) *Health { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Health) Human(human bool) *Health { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Health) Pretty(pretty bool) *Health { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/health/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/health/response.go new file mode 100644 index 000000000..5b1b698df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/health/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package health + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package health +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/health/CatHealthResponse.ts#L22-L25 + +type Response []types.HealthRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/help/help.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/help/help.go new file mode 100644 index 000000000..2419d1a3b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/help/help.go @@ -0,0 +1,278 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get CAT help. +// +// Get help for the CAT APIs. +package help + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Help struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewHelp type alias for index. +type NewHelp func() *Help + +// NewHelpFunc returns a new instance of Help with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewHelpFunc(tp elastictransport.Interface) NewHelp { + return func() *Help { + n := New(tp) + + return n + } +} + +// Get CAT help. +// +// Get help for the CAT APIs. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat +func New(tp elastictransport.Interface) *Help { + r := &Help{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Help) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "text/plain") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Help) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.help") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.help") + if reader := instrument.RecordRequestBody(ctx, "cat.help", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.help") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Help query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a help.Response +func (r Help) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.help") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Help) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.help") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Help query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Help headers map. +func (r *Help) Header(key, value string) *Help { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/help/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/help/response.go new file mode 100644 index 000000000..cb41d0486 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/help/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package help + +// Response holds the response body struct for the package help +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/help/CatHelpResponse.ts#L20-L25 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/indices/indices.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/indices/indices.go new file mode 100644 index 000000000..e8d2bc7c3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/indices/indices.go @@ -0,0 +1,506 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get index information. +// +// Get high-level information about indices in a cluster, including backing +// indices for data streams. +// +// Use this request to get the following information for each index in a +// cluster: +// - shard count +// - document count +// - deleted document count +// - primary store size +// - total store size of all shards, including shard replicas +// +// These metrics are retrieved directly from Lucene, which Elasticsearch uses +// internally to power indexing and search. As a result, all document counts +// include hidden nested documents. +// To get an accurate count of Elasticsearch documents, use the cat count or +// count APIs. +// +// CAT APIs are only intended for human consumption using the command line or +// Kibana console. +// They are not intended for use by applications. For application consumption, +// use an index endpoint. +package indices + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/healthstatus" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Indices struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewIndices type alias for index. +type NewIndices func() *Indices + +// NewIndicesFunc returns a new instance of Indices with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewIndicesFunc(tp elastictransport.Interface) NewIndices { + return func() *Indices { + n := New(tp) + + return n + } +} + +// Get index information. +// +// Get high-level information about indices in a cluster, including backing +// indices for data streams. +// +// Use this request to get the following information for each index in a +// cluster: +// - shard count +// - document count +// - deleted document count +// - primary store size +// - total store size of all shards, including shard replicas +// +// These metrics are retrieved directly from Lucene, which Elasticsearch uses +// internally to power indexing and search. As a result, all document counts +// include hidden nested documents. +// To get an accurate count of Elasticsearch documents, use the cat count or +// count APIs. +// +// CAT APIs are only intended for human consumption using the command line or +// Kibana console. +// They are not intended for use by applications. For application consumption, +// use an index endpoint. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices +func New(tp elastictransport.Interface) *Indices { + r := &Indices{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Indices) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("indices") + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("indices") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Indices) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.indices") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.indices") + if reader := instrument.RecordRequestBody(ctx, "cat.indices", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.indices") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Indices query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a indices.Response +func (r Indices) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.indices") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Indices) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.indices") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Indices query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Indices headers map. +func (r *Indices) Header(key, value string) *Indices { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). To target all data streams and indices, omit this +// parameter or use `*` or `_all`. +// API Name: index +func (r *Indices) Index(index string) *Indices { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Bytes The unit used to display byte values. +// API name: bytes +func (r *Indices) Bytes(bytes bytes.Bytes) *Indices { + r.values.Set("bytes", bytes.String()) + + return r +} + +// ExpandWildcards The type of index that wildcard patterns can match. +// API name: expand_wildcards +func (r *Indices) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Indices { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// Health The health status used to limit returned indices. By default, the response +// includes indices of any health status. +// API name: health +func (r *Indices) Health(health healthstatus.HealthStatus) *Indices { + r.values.Set("health", health.String()) + + return r +} + +// IncludeUnloadedSegments If true, the response includes information from segments that are not loaded +// into memory. +// API name: include_unloaded_segments +func (r *Indices) IncludeUnloadedSegments(includeunloadedsegments bool) *Indices { + r.values.Set("include_unloaded_segments", strconv.FormatBool(includeunloadedsegments)) + + return r +} + +// Pri If true, the response only includes information from primary shards. +// API name: pri +func (r *Indices) Pri(pri bool) *Indices { + r.values.Set("pri", strconv.FormatBool(pri)) + + return r +} + +// Time The unit used to display time values. +// API name: time +func (r *Indices) Time(time timeunit.TimeUnit) *Indices { + r.values.Set("time", time.String()) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Indices) MasterTimeout(duration string) *Indices { + r.values.Set("master_timeout", duration) + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *Indices) H(names ...string) *Indices { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Indices) S(names ...string) *Indices { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Indices) Format(format string) *Indices { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Indices) Help(help bool) *Indices { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Indices) V(v bool) *Indices { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Indices) ErrorTrace(errortrace bool) *Indices { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Indices) FilterPath(filterpaths ...string) *Indices { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Indices) Human(human bool) *Indices { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Indices) Pretty(pretty bool) *Indices { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/indices/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/indices/response.go new file mode 100644 index 000000000..e9eeab15c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/indices/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package indices + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package indices +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/indices/CatIndicesResponse.ts#L22-L25 + +type Response []types.IndicesRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/master/master.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/master/master.go new file mode 100644 index 000000000..19253812c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/master/master.go @@ -0,0 +1,398 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get master node information. +// +// Get information about the master node, including the ID, bound IP address, +// and name. +// +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +package master + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Master struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMaster type alias for index. +type NewMaster func() *Master + +// NewMasterFunc returns a new instance of Master with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMasterFunc(tp elastictransport.Interface) NewMaster { + return func() *Master { + n := New(tp) + + return n + } +} + +// Get master node information. +// +// Get information about the master node, including the ID, bound IP address, +// and name. +// +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master +func New(tp elastictransport.Interface) *Master { + r := &Master{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Master) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("master") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Master) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.master") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.master") + if reader := instrument.RecordRequestBody(ctx, "cat.master", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.master") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Master query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a master.Response +func (r Master) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.master") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Master) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.master") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Master query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Master headers map. +func (r *Master) Header(key, value string) *Master { + r.headers.Set(key, value) + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *Master) H(names ...string) *Master { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Master) S(names ...string) *Master { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Local If `true`, the request computes the list of selected nodes from the +// local cluster state. If `false` the list of selected nodes are computed +// from the cluster state of the master node. In both cases the coordinating +// node will send requests for further information to each selected node. +// API name: local +func (r *Master) Local(local bool) *Master { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Master) MasterTimeout(duration string) *Master { + r.values.Set("master_timeout", duration) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Master) Format(format string) *Master { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Master) Help(help bool) *Master { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Master) V(v bool) *Master { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Master) ErrorTrace(errortrace bool) *Master { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Master) FilterPath(filterpaths ...string) *Master { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Master) Human(human bool) *Master { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Master) Pretty(pretty bool) *Master { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/master/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/master/response.go new file mode 100644 index 000000000..d1743ba29 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/master/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package master + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package master +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/master/CatMasterResponse.ts#L22-L25 + +type Response []types.MasterRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldatafeeds/ml_datafeeds.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldatafeeds/ml_datafeeds.go new file mode 100644 index 000000000..10636976c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldatafeeds/ml_datafeeds.go @@ -0,0 +1,452 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get datafeeds. +// +// Get configuration and usage information about datafeeds. +// This API returns a maximum of 10,000 datafeeds. +// If the Elasticsearch security features are enabled, you must have +// `monitor_ml`, `monitor`, `manage_ml`, or `manage` +// cluster privileges to use this API. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get datafeed statistics API. +package mldatafeeds + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catdatafeedcolumn" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +const ( + datafeedidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type MlDatafeeds struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + datafeedid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMlDatafeeds type alias for index. +type NewMlDatafeeds func() *MlDatafeeds + +// NewMlDatafeedsFunc returns a new instance of MlDatafeeds with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMlDatafeedsFunc(tp elastictransport.Interface) NewMlDatafeeds { + return func() *MlDatafeeds { + n := New(tp) + + return n + } +} + +// Get datafeeds. +// +// Get configuration and usage information about datafeeds. +// This API returns a maximum of 10,000 datafeeds. +// If the Elasticsearch security features are enabled, you must have +// `monitor_ml`, `monitor`, `manage_ml`, or `manage` +// cluster privileges to use this API. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get datafeed statistics API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds +func New(tp elastictransport.Interface) *MlDatafeeds { + r := &MlDatafeeds{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *MlDatafeeds) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("ml") + path.WriteString("/") + path.WriteString("datafeeds") + + method = http.MethodGet + case r.paramSet == datafeedidMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeedid", r.datafeedid) + } + path.WriteString(r.datafeedid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r MlDatafeeds) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.ml_datafeeds") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.ml_datafeeds") + if reader := instrument.RecordRequestBody(ctx, "cat.ml_datafeeds", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.ml_datafeeds") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the MlDatafeeds query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a mldatafeeds.Response +func (r MlDatafeeds) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.ml_datafeeds") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r MlDatafeeds) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.ml_datafeeds") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the MlDatafeeds query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the MlDatafeeds headers map. +func (r *MlDatafeeds) Header(key, value string) *MlDatafeeds { + r.headers.Set(key, value) + + return r +} + +// DatafeedId A numerical character string that uniquely identifies the datafeed. +// API Name: datafeedid +func (r *MlDatafeeds) DatafeedId(datafeedid string) *MlDatafeeds { + r.paramSet |= datafeedidMask + r.datafeedid = datafeedid + + return r +} + +// AllowNoMatch Specifies what to do when the request: +// +// * Contains wildcard expressions and there are no datafeeds that match. +// * Contains the `_all` string or no identifiers and there are no matches. +// * Contains wildcard expressions and there are only partial matches. +// +// If `true`, the API returns an empty datafeeds array when there are no matches +// and the subset of results when +// there are partial matches. If `false`, the API returns a 404 status code when +// there are no matches or only +// partial matches. +// API name: allow_no_match +func (r *MlDatafeeds) AllowNoMatch(allownomatch bool) *MlDatafeeds { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// H Comma-separated list of column names to display. +// API name: h +func (r *MlDatafeeds) H(catdatafeedcolumns ...catdatafeedcolumn.CatDatafeedColumn) *MlDatafeeds { + tmp := []string{} + for _, item := range catdatafeedcolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// S Comma-separated list of column names or column aliases used to sort the +// response. +// API name: s +func (r *MlDatafeeds) S(catdatafeedcolumns ...catdatafeedcolumn.CatDatafeedColumn) *MlDatafeeds { + tmp := []string{} + for _, item := range catdatafeedcolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// Time The unit used to display time values. +// API name: time +func (r *MlDatafeeds) Time(time timeunit.TimeUnit) *MlDatafeeds { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *MlDatafeeds) Format(format string) *MlDatafeeds { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *MlDatafeeds) Help(help bool) *MlDatafeeds { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *MlDatafeeds) V(v bool) *MlDatafeeds { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *MlDatafeeds) ErrorTrace(errortrace bool) *MlDatafeeds { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *MlDatafeeds) FilterPath(filterpaths ...string) *MlDatafeeds { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *MlDatafeeds) Human(human bool) *MlDatafeeds { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *MlDatafeeds) Pretty(pretty bool) *MlDatafeeds { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldatafeeds/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldatafeeds/response.go new file mode 100644 index 000000000..d98e529ba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldatafeeds/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package mldatafeeds + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package mldatafeeds +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/ml_datafeeds/CatDatafeedsResponse.ts#L22-L25 + +type Response []types.DatafeedsRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go new file mode 100644 index 000000000..cefbceb0a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go @@ -0,0 +1,450 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get data frame analytics jobs. +// +// Get configuration and usage information about data frame analytics jobs. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get data frame analytics jobs statistics +// API. +package mldataframeanalytics + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catdfacolumn" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type MlDataFrameAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMlDataFrameAnalytics type alias for index. +type NewMlDataFrameAnalytics func() *MlDataFrameAnalytics + +// NewMlDataFrameAnalyticsFunc returns a new instance of MlDataFrameAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMlDataFrameAnalyticsFunc(tp elastictransport.Interface) NewMlDataFrameAnalytics { + return func() *MlDataFrameAnalytics { + n := New(tp) + + return n + } +} + +// Get data frame analytics jobs. +// +// Get configuration and usage information about data frame analytics jobs. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get data frame analytics jobs statistics +// API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics +func New(tp elastictransport.Interface) *MlDataFrameAnalytics { + r := &MlDataFrameAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *MlDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + + method = http.MethodGet + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r MlDataFrameAnalytics) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.ml_data_frame_analytics") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.ml_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "cat.ml_data_frame_analytics", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.ml_data_frame_analytics") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the MlDataFrameAnalytics query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a mldataframeanalytics.Response +func (r MlDataFrameAnalytics) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.ml_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r MlDataFrameAnalytics) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.ml_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the MlDataFrameAnalytics query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the MlDataFrameAnalytics headers map. +func (r *MlDataFrameAnalytics) Header(key, value string) *MlDataFrameAnalytics { + r.headers.Set(key, value) + + return r +} + +// Id The ID of the data frame analytics to fetch +// API Name: id +func (r *MlDataFrameAnalytics) Id(id string) *MlDataFrameAnalytics { + r.paramSet |= idMask + r.id = id + + return r +} + +// AllowNoMatch Whether to ignore if a wildcard expression matches no configs. (This includes +// `_all` string or when no configs have been specified) +// API name: allow_no_match +func (r *MlDataFrameAnalytics) AllowNoMatch(allownomatch bool) *MlDataFrameAnalytics { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// Bytes The unit in which to display byte values +// API name: bytes +func (r *MlDataFrameAnalytics) Bytes(bytes bytes.Bytes) *MlDataFrameAnalytics { + r.values.Set("bytes", bytes.String()) + + return r +} + +// H Comma-separated list of column names to display. +// API name: h +func (r *MlDataFrameAnalytics) H(catdfacolumns ...catdfacolumn.CatDfaColumn) *MlDataFrameAnalytics { + tmp := []string{} + for _, item := range catdfacolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// S Comma-separated list of column names or column aliases used to sort the +// response. +// API name: s +func (r *MlDataFrameAnalytics) S(catdfacolumns ...catdfacolumn.CatDfaColumn) *MlDataFrameAnalytics { + tmp := []string{} + for _, item := range catdfacolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// Time Unit used to display time values. +// API name: time +func (r *MlDataFrameAnalytics) Time(time timeunit.TimeUnit) *MlDataFrameAnalytics { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *MlDataFrameAnalytics) Format(format string) *MlDataFrameAnalytics { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *MlDataFrameAnalytics) Help(help bool) *MlDataFrameAnalytics { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *MlDataFrameAnalytics) V(v bool) *MlDataFrameAnalytics { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *MlDataFrameAnalytics) ErrorTrace(errortrace bool) *MlDataFrameAnalytics { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *MlDataFrameAnalytics) FilterPath(filterpaths ...string) *MlDataFrameAnalytics { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *MlDataFrameAnalytics) Human(human bool) *MlDataFrameAnalytics { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *MlDataFrameAnalytics) Pretty(pretty bool) *MlDataFrameAnalytics { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldataframeanalytics/response.go new file mode 100644 index 000000000..38fe27edf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldataframeanalytics/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package mldataframeanalytics + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package mldataframeanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/ml_data_frame_analytics/CatDataFrameAnalyticsResponse.ts#L22-L25 + +type Response []types.DataFrameAnalyticsRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mljobs/ml_jobs.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mljobs/ml_jobs.go new file mode 100644 index 000000000..1eccb4b82 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mljobs/ml_jobs.go @@ -0,0 +1,461 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get anomaly detection jobs. +// +// Get configuration and usage information for anomaly detection jobs. +// This API returns a maximum of 10,000 jobs. +// If the Elasticsearch security features are enabled, you must have +// `monitor_ml`, +// `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get anomaly detection job statistics API. +package mljobs + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catanomalydetectorcolumn" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type MlJobs struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMlJobs type alias for index. +type NewMlJobs func() *MlJobs + +// NewMlJobsFunc returns a new instance of MlJobs with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMlJobsFunc(tp elastictransport.Interface) NewMlJobs { + return func() *MlJobs { + n := New(tp) + + return n + } +} + +// Get anomaly detection jobs. +// +// Get configuration and usage information for anomaly detection jobs. +// This API returns a maximum of 10,000 jobs. +// If the Elasticsearch security features are enabled, you must have +// `monitor_ml`, +// `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get anomaly detection job statistics API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs +func New(tp elastictransport.Interface) *MlJobs { + r := &MlJobs{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *MlJobs) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + + method = http.MethodGet + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r MlJobs) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.ml_jobs") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.ml_jobs") + if reader := instrument.RecordRequestBody(ctx, "cat.ml_jobs", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.ml_jobs") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the MlJobs query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a mljobs.Response +func (r MlJobs) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.ml_jobs") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r MlJobs) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.ml_jobs") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the MlJobs query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the MlJobs headers map. +func (r *MlJobs) Header(key, value string) *MlJobs { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *MlJobs) JobId(jobid string) *MlJobs { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// AllowNoMatch Specifies what to do when the request: +// +// * Contains wildcard expressions and there are no jobs that match. +// * Contains the `_all` string or no identifiers and there are no matches. +// * Contains wildcard expressions and there are only partial matches. +// +// If `true`, the API returns an empty jobs array when there are no matches and +// the subset of results when there +// are partial matches. If `false`, the API returns a 404 status code when there +// are no matches or only partial +// matches. +// API name: allow_no_match +func (r *MlJobs) AllowNoMatch(allownomatch bool) *MlJobs { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// Bytes The unit used to display byte values. +// API name: bytes +func (r *MlJobs) Bytes(bytes bytes.Bytes) *MlJobs { + r.values.Set("bytes", bytes.String()) + + return r +} + +// H Comma-separated list of column names to display. +// API name: h +func (r *MlJobs) H(catanonalydetectorcolumns ...catanomalydetectorcolumn.CatAnomalyDetectorColumn) *MlJobs { + tmp := []string{} + for _, item := range catanonalydetectorcolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// S Comma-separated list of column names or column aliases used to sort the +// response. +// API name: s +func (r *MlJobs) S(catanonalydetectorcolumns ...catanomalydetectorcolumn.CatAnomalyDetectorColumn) *MlJobs { + tmp := []string{} + for _, item := range catanonalydetectorcolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// Time The unit used to display time values. +// API name: time +func (r *MlJobs) Time(time timeunit.TimeUnit) *MlJobs { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *MlJobs) Format(format string) *MlJobs { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *MlJobs) Help(help bool) *MlJobs { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *MlJobs) V(v bool) *MlJobs { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *MlJobs) ErrorTrace(errortrace bool) *MlJobs { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *MlJobs) FilterPath(filterpaths ...string) *MlJobs { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *MlJobs) Human(human bool) *MlJobs { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *MlJobs) Pretty(pretty bool) *MlJobs { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mljobs/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mljobs/response.go new file mode 100644 index 000000000..c867b37c1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mljobs/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package mljobs + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package mljobs +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/ml_jobs/CatJobsResponse.ts#L22-L25 + +type Response []types.JobsRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mltrainedmodels/ml_trained_models.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mltrainedmodels/ml_trained_models.go new file mode 100644 index 000000000..5a064a542 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mltrainedmodels/ml_trained_models.go @@ -0,0 +1,465 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get trained models. +// +// Get configuration and usage information about inference trained models. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get trained models statistics API. +package mltrainedmodels + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cattrainedmodelscolumn" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +const ( + modelidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type MlTrainedModels struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + modelid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMlTrainedModels type alias for index. +type NewMlTrainedModels func() *MlTrainedModels + +// NewMlTrainedModelsFunc returns a new instance of MlTrainedModels with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMlTrainedModelsFunc(tp elastictransport.Interface) NewMlTrainedModels { + return func() *MlTrainedModels { + n := New(tp) + + return n + } +} + +// Get trained models. +// +// Get configuration and usage information about inference trained models. +// +// IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get trained models statistics API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models +func New(tp elastictransport.Interface) *MlTrainedModels { + r := &MlTrainedModels{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *MlTrainedModels) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("ml") + path.WriteString("/") + path.WriteString("trained_models") + + method = http.MethodGet + case r.paramSet == modelidMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelid", r.modelid) + } + path.WriteString(r.modelid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r MlTrainedModels) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.ml_trained_models") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.ml_trained_models") + if reader := instrument.RecordRequestBody(ctx, "cat.ml_trained_models", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.ml_trained_models") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the MlTrainedModels query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a mltrainedmodels.Response +func (r MlTrainedModels) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.ml_trained_models") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r MlTrainedModels) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.ml_trained_models") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the MlTrainedModels query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the MlTrainedModels headers map. +func (r *MlTrainedModels) Header(key, value string) *MlTrainedModels { + r.headers.Set(key, value) + + return r +} + +// ModelId A unique identifier for the trained model. +// API Name: modelid +func (r *MlTrainedModels) ModelId(modelid string) *MlTrainedModels { + r.paramSet |= modelidMask + r.modelid = modelid + + return r +} + +// AllowNoMatch Specifies what to do when the request: contains wildcard expressions and +// there are no models that match; contains the `_all` string or no identifiers +// and there are no matches; contains wildcard expressions and there are only +// partial matches. +// If `true`, the API returns an empty array when there are no matches and the +// subset of results when there are partial matches. +// If `false`, the API returns a 404 status code when there are no matches or +// only partial matches. +// API name: allow_no_match +func (r *MlTrainedModels) AllowNoMatch(allownomatch bool) *MlTrainedModels { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// Bytes The unit used to display byte values. +// API name: bytes +func (r *MlTrainedModels) Bytes(bytes bytes.Bytes) *MlTrainedModels { + r.values.Set("bytes", bytes.String()) + + return r +} + +// H A comma-separated list of column names to display. +// API name: h +func (r *MlTrainedModels) H(cattrainedmodelscolumns ...cattrainedmodelscolumn.CatTrainedModelsColumn) *MlTrainedModels { + tmp := []string{} + for _, item := range cattrainedmodelscolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// S A comma-separated list of column names or aliases used to sort the response. +// API name: s +func (r *MlTrainedModels) S(cattrainedmodelscolumns ...cattrainedmodelscolumn.CatTrainedModelsColumn) *MlTrainedModels { + tmp := []string{} + for _, item := range cattrainedmodelscolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// From Skips the specified number of transforms. +// API name: from +func (r *MlTrainedModels) From(from int) *MlTrainedModels { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size The maximum number of transforms to display. +// API name: size +func (r *MlTrainedModels) Size(size int) *MlTrainedModels { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// Time Unit used to display time values. +// API name: time +func (r *MlTrainedModels) Time(time timeunit.TimeUnit) *MlTrainedModels { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *MlTrainedModels) Format(format string) *MlTrainedModels { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *MlTrainedModels) Help(help bool) *MlTrainedModels { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *MlTrainedModels) V(v bool) *MlTrainedModels { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *MlTrainedModels) ErrorTrace(errortrace bool) *MlTrainedModels { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *MlTrainedModels) FilterPath(filterpaths ...string) *MlTrainedModels { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *MlTrainedModels) Human(human bool) *MlTrainedModels { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *MlTrainedModels) Pretty(pretty bool) *MlTrainedModels { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mltrainedmodels/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mltrainedmodels/response.go new file mode 100644 index 000000000..60e83f84e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/mltrainedmodels/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package mltrainedmodels + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package mltrainedmodels +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/ml_trained_models/CatTrainedModelsResponse.ts#L22-L25 + +type Response []types.TrainedModelsRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodeattrs/nodeattrs.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodeattrs/nodeattrs.go new file mode 100644 index 000000000..f7edae002 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodeattrs/nodeattrs.go @@ -0,0 +1,394 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get node attribute information. +// +// Get information about custom node attributes. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +package nodeattrs + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Nodeattrs struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewNodeattrs type alias for index. +type NewNodeattrs func() *Nodeattrs + +// NewNodeattrsFunc returns a new instance of Nodeattrs with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewNodeattrsFunc(tp elastictransport.Interface) NewNodeattrs { + return func() *Nodeattrs { + n := New(tp) + + return n + } +} + +// Get node attribute information. +// +// Get information about custom node attributes. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs +func New(tp elastictransport.Interface) *Nodeattrs { + r := &Nodeattrs{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Nodeattrs) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("nodeattrs") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Nodeattrs) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.nodeattrs") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.nodeattrs") + if reader := instrument.RecordRequestBody(ctx, "cat.nodeattrs", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.nodeattrs") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Nodeattrs query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a nodeattrs.Response +func (r Nodeattrs) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.nodeattrs") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Nodeattrs) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.nodeattrs") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Nodeattrs query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Nodeattrs headers map. +func (r *Nodeattrs) Header(key, value string) *Nodeattrs { + r.headers.Set(key, value) + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *Nodeattrs) H(names ...string) *Nodeattrs { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Nodeattrs) S(names ...string) *Nodeattrs { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Local If `true`, the request computes the list of selected nodes from the +// local cluster state. If `false` the list of selected nodes are computed +// from the cluster state of the master node. In both cases the coordinating +// node will send requests for further information to each selected node. +// API name: local +func (r *Nodeattrs) Local(local bool) *Nodeattrs { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Nodeattrs) MasterTimeout(duration string) *Nodeattrs { + r.values.Set("master_timeout", duration) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Nodeattrs) Format(format string) *Nodeattrs { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Nodeattrs) Help(help bool) *Nodeattrs { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Nodeattrs) V(v bool) *Nodeattrs { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Nodeattrs) ErrorTrace(errortrace bool) *Nodeattrs { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Nodeattrs) FilterPath(filterpaths ...string) *Nodeattrs { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Nodeattrs) Human(human bool) *Nodeattrs { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Nodeattrs) Pretty(pretty bool) *Nodeattrs { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodeattrs/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodeattrs/response.go new file mode 100644 index 000000000..55c3e8402 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodeattrs/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package nodeattrs + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package nodeattrs +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/nodeattrs/CatNodeAttributesResponse.ts#L22-L25 + +type Response []types.NodeAttributesRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodes/nodes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodes/nodes.go new file mode 100644 index 000000000..4fac13638 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodes/nodes.go @@ -0,0 +1,425 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get node information. +// +// Get information about the nodes in a cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +package nodes + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catnodecolumn" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Nodes struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewNodes type alias for index. +type NewNodes func() *Nodes + +// NewNodesFunc returns a new instance of Nodes with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewNodesFunc(tp elastictransport.Interface) NewNodes { + return func() *Nodes { + n := New(tp) + + return n + } +} + +// Get node information. +// +// Get information about the nodes in a cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes +func New(tp elastictransport.Interface) *Nodes { + r := &Nodes{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Nodes) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("nodes") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Nodes) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.nodes") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.nodes") + if reader := instrument.RecordRequestBody(ctx, "cat.nodes", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.nodes") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Nodes query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a nodes.Response +func (r Nodes) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.nodes") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Nodes) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.nodes") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Nodes query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Nodes headers map. +func (r *Nodes) Header(key, value string) *Nodes { + r.headers.Set(key, value) + + return r +} + +// Bytes The unit used to display byte values. +// API name: bytes +func (r *Nodes) Bytes(bytes bytes.Bytes) *Nodes { + r.values.Set("bytes", bytes.String()) + + return r +} + +// FullId If `true`, return the full node ID. If `false`, return the shortened node ID. +// API name: full_id +func (r *Nodes) FullId(fullid string) *Nodes { + r.values.Set("full_id", fullid) + + return r +} + +// IncludeUnloadedSegments If true, the response includes information from segments that are not loaded +// into memory. +// API name: include_unloaded_segments +func (r *Nodes) IncludeUnloadedSegments(includeunloadedsegments bool) *Nodes { + r.values.Set("include_unloaded_segments", strconv.FormatBool(includeunloadedsegments)) + + return r +} + +// H A comma-separated list of columns names to display. +// It supports simple wildcards. +// API name: h +func (r *Nodes) H(catnodecolumns ...catnodecolumn.CatNodeColumn) *Nodes { + tmp := []string{} + for _, item := range catnodecolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// S A comma-separated list of column names or aliases that determines the sort +// order. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Nodes) S(names ...string) *Nodes { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// API name: master_timeout +func (r *Nodes) MasterTimeout(duration string) *Nodes { + r.values.Set("master_timeout", duration) + + return r +} + +// Time The unit used to display time values. +// API name: time +func (r *Nodes) Time(time timeunit.TimeUnit) *Nodes { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Nodes) Format(format string) *Nodes { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Nodes) Help(help bool) *Nodes { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Nodes) V(v bool) *Nodes { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Nodes) ErrorTrace(errortrace bool) *Nodes { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Nodes) FilterPath(filterpaths ...string) *Nodes { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Nodes) Human(human bool) *Nodes { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Nodes) Pretty(pretty bool) *Nodes { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodes/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodes/response.go new file mode 100644 index 000000000..ab9918efa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodes/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package nodes + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package nodes +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/nodes/CatNodesResponse.ts#L22-L25 + +type Response []types.NodesRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/pendingtasks/pending_tasks.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/pendingtasks/pending_tasks.go new file mode 100644 index 000000000..a389de9bc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/pendingtasks/pending_tasks.go @@ -0,0 +1,403 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get pending task information. +// +// Get information about cluster-level changes that have not yet taken effect. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the pending cluster tasks API. +package pendingtasks + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PendingTasks struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPendingTasks type alias for index. +type NewPendingTasks func() *PendingTasks + +// NewPendingTasksFunc returns a new instance of PendingTasks with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPendingTasksFunc(tp elastictransport.Interface) NewPendingTasks { + return func() *PendingTasks { + n := New(tp) + + return n + } +} + +// Get pending task information. +// +// Get information about cluster-level changes that have not yet taken effect. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the pending cluster tasks API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks +func New(tp elastictransport.Interface) *PendingTasks { + r := &PendingTasks{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PendingTasks) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("pending_tasks") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PendingTasks) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.pending_tasks") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.pending_tasks") + if reader := instrument.RecordRequestBody(ctx, "cat.pending_tasks", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.pending_tasks") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PendingTasks query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a pendingtasks.Response +func (r PendingTasks) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.pending_tasks") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r PendingTasks) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.pending_tasks") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the PendingTasks query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the PendingTasks headers map. +func (r *PendingTasks) Header(key, value string) *PendingTasks { + r.headers.Set(key, value) + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *PendingTasks) H(names ...string) *PendingTasks { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *PendingTasks) S(names ...string) *PendingTasks { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Local If `true`, the request computes the list of selected nodes from the +// local cluster state. If `false` the list of selected nodes are computed +// from the cluster state of the master node. In both cases the coordinating +// node will send requests for further information to each selected node. +// API name: local +func (r *PendingTasks) Local(local bool) *PendingTasks { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PendingTasks) MasterTimeout(duration string) *PendingTasks { + r.values.Set("master_timeout", duration) + + return r +} + +// Time Unit used to display time values. +// API name: time +func (r *PendingTasks) Time(time timeunit.TimeUnit) *PendingTasks { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *PendingTasks) Format(format string) *PendingTasks { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *PendingTasks) Help(help bool) *PendingTasks { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *PendingTasks) V(v bool) *PendingTasks { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PendingTasks) ErrorTrace(errortrace bool) *PendingTasks { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PendingTasks) FilterPath(filterpaths ...string) *PendingTasks { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PendingTasks) Human(human bool) *PendingTasks { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PendingTasks) Pretty(pretty bool) *PendingTasks { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/pendingtasks/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/pendingtasks/response.go new file mode 100644 index 000000000..6e8406d04 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/pendingtasks/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package pendingtasks + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package pendingtasks +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/pending_tasks/CatPendingTasksResponse.ts#L22-L25 + +type Response []types.PendingTasksRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/plugins/plugins.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/plugins/plugins.go new file mode 100644 index 000000000..ffd658276 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/plugins/plugins.go @@ -0,0 +1,402 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get plugin information. +// +// Get a list of plugins running on each node of a cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +package plugins + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Plugins struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPlugins type alias for index. +type NewPlugins func() *Plugins + +// NewPluginsFunc returns a new instance of Plugins with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPluginsFunc(tp elastictransport.Interface) NewPlugins { + return func() *Plugins { + n := New(tp) + + return n + } +} + +// Get plugin information. +// +// Get a list of plugins running on each node of a cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins +func New(tp elastictransport.Interface) *Plugins { + r := &Plugins{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Plugins) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("plugins") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Plugins) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.plugins") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.plugins") + if reader := instrument.RecordRequestBody(ctx, "cat.plugins", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.plugins") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Plugins query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a plugins.Response +func (r Plugins) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.plugins") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Plugins) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.plugins") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Plugins query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Plugins headers map. +func (r *Plugins) Header(key, value string) *Plugins { + r.headers.Set(key, value) + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *Plugins) H(names ...string) *Plugins { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Plugins) S(names ...string) *Plugins { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// IncludeBootstrap Include bootstrap plugins in the response +// API name: include_bootstrap +func (r *Plugins) IncludeBootstrap(includebootstrap bool) *Plugins { + r.values.Set("include_bootstrap", strconv.FormatBool(includebootstrap)) + + return r +} + +// Local If `true`, the request computes the list of selected nodes from the +// local cluster state. If `false` the list of selected nodes are computed +// from the cluster state of the master node. In both cases the coordinating +// node will send requests for further information to each selected node. +// API name: local +func (r *Plugins) Local(local bool) *Plugins { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Plugins) MasterTimeout(duration string) *Plugins { + r.values.Set("master_timeout", duration) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Plugins) Format(format string) *Plugins { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Plugins) Help(help bool) *Plugins { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Plugins) V(v bool) *Plugins { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Plugins) ErrorTrace(errortrace bool) *Plugins { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Plugins) FilterPath(filterpaths ...string) *Plugins { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Plugins) Human(human bool) *Plugins { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Plugins) Pretty(pretty bool) *Plugins { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/plugins/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/plugins/response.go new file mode 100644 index 000000000..44baa3611 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/plugins/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package plugins + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package plugins +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/plugins/CatPluginsResponse.ts#L22-L25 + +type Response []types.PluginsRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/recovery/recovery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/recovery/recovery.go new file mode 100644 index 000000000..28f5c4837 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/recovery/recovery.go @@ -0,0 +1,459 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get shard recovery information. +// +// Get information about ongoing and completed shard recoveries. +// Shard recovery is the process of initializing a shard copy, such as restoring +// a primary shard from a snapshot or syncing a replica shard from a primary +// shard. When a shard recovery completes, the recovered shard is available for +// search and indexing. +// For data streams, the API returns information about the stream’s backing +// indices. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the index recovery API. +package recovery + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catrecoverycolumn" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Recovery struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRecovery type alias for index. +type NewRecovery func() *Recovery + +// NewRecoveryFunc returns a new instance of Recovery with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRecoveryFunc(tp elastictransport.Interface) NewRecovery { + return func() *Recovery { + n := New(tp) + + return n + } +} + +// Get shard recovery information. +// +// Get information about ongoing and completed shard recoveries. +// Shard recovery is the process of initializing a shard copy, such as restoring +// a primary shard from a snapshot or syncing a replica shard from a primary +// shard. When a shard recovery completes, the recovered shard is available for +// search and indexing. +// For data streams, the API returns information about the stream’s backing +// indices. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the index recovery API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery +func New(tp elastictransport.Interface) *Recovery { + r := &Recovery{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Recovery) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("recovery") + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("recovery") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Recovery) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.recovery") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.recovery") + if reader := instrument.RecordRequestBody(ctx, "cat.recovery", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.recovery") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Recovery query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a recovery.Response +func (r Recovery) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.recovery") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Recovery) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.recovery") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Recovery query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Recovery headers map. +func (r *Recovery) Header(key, value string) *Recovery { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases used to limit +// the request. +// Supports wildcards (`*`). To target all data streams and indices, omit this +// parameter or use `*` or `_all`. +// API Name: index +func (r *Recovery) Index(index string) *Recovery { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ActiveOnly If `true`, the response only includes ongoing shard recoveries. +// API name: active_only +func (r *Recovery) ActiveOnly(activeonly bool) *Recovery { + r.values.Set("active_only", strconv.FormatBool(activeonly)) + + return r +} + +// Bytes The unit used to display byte values. +// API name: bytes +func (r *Recovery) Bytes(bytes bytes.Bytes) *Recovery { + r.values.Set("bytes", bytes.String()) + + return r +} + +// Detailed If `true`, the response includes detailed information about shard recoveries. +// API name: detailed +func (r *Recovery) Detailed(detailed bool) *Recovery { + r.values.Set("detailed", strconv.FormatBool(detailed)) + + return r +} + +// H A comma-separated list of columns names to display. +// It supports simple wildcards. +// API name: h +func (r *Recovery) H(catrecoverycolumns ...catrecoverycolumn.CatRecoveryColumn) *Recovery { + tmp := []string{} + for _, item := range catrecoverycolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// S A comma-separated list of column names or aliases that determines the sort +// order. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Recovery) S(names ...string) *Recovery { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Time The unit used to display time values. +// API name: time +func (r *Recovery) Time(time timeunit.TimeUnit) *Recovery { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Recovery) Format(format string) *Recovery { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Recovery) Help(help bool) *Recovery { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Recovery) V(v bool) *Recovery { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Recovery) ErrorTrace(errortrace bool) *Recovery { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Recovery) FilterPath(filterpaths ...string) *Recovery { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Recovery) Human(human bool) *Recovery { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Recovery) Pretty(pretty bool) *Recovery { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/recovery/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/recovery/response.go new file mode 100644 index 000000000..49ede854e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/recovery/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package recovery + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package recovery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/recovery/CatRecoveryResponse.ts#L22-L25 + +type Response []types.RecoveryRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/repositories/repositories.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/repositories/repositories.go new file mode 100644 index 000000000..bb141a22e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/repositories/repositories.go @@ -0,0 +1,394 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get snapshot repository information. +// +// Get a list of snapshot repositories for a cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the get snapshot repository API. +package repositories + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Repositories struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRepositories type alias for index. +type NewRepositories func() *Repositories + +// NewRepositoriesFunc returns a new instance of Repositories with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRepositoriesFunc(tp elastictransport.Interface) NewRepositories { + return func() *Repositories { + n := New(tp) + + return n + } +} + +// Get snapshot repository information. +// +// Get a list of snapshot repositories for a cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the get snapshot repository API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories +func New(tp elastictransport.Interface) *Repositories { + r := &Repositories{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Repositories) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("repositories") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Repositories) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.repositories") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.repositories") + if reader := instrument.RecordRequestBody(ctx, "cat.repositories", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.repositories") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Repositories query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a repositories.Response +func (r Repositories) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.repositories") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Repositories) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.repositories") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Repositories query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Repositories headers map. +func (r *Repositories) Header(key, value string) *Repositories { + r.headers.Set(key, value) + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *Repositories) H(names ...string) *Repositories { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Repositories) S(names ...string) *Repositories { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Local If `true`, the request computes the list of selected nodes from the +// local cluster state. If `false` the list of selected nodes are computed +// from the cluster state of the master node. In both cases the coordinating +// node will send requests for further information to each selected node. +// API name: local +func (r *Repositories) Local(local bool) *Repositories { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Repositories) MasterTimeout(duration string) *Repositories { + r.values.Set("master_timeout", duration) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Repositories) Format(format string) *Repositories { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Repositories) Help(help bool) *Repositories { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Repositories) V(v bool) *Repositories { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Repositories) ErrorTrace(errortrace bool) *Repositories { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Repositories) FilterPath(filterpaths ...string) *Repositories { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Repositories) Human(human bool) *Repositories { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Repositories) Pretty(pretty bool) *Repositories { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/repositories/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/repositories/response.go new file mode 100644 index 000000000..41aedf69d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/repositories/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package repositories + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package repositories +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/repositories/CatRepositoriesResponse.ts#L22-L25 + +type Response []types.RepositoriesRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/segments/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/segments/response.go new file mode 100644 index 000000000..ea37b8147 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/segments/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package segments + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package segments +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/segments/CatSegmentsResponse.ts#L22-L25 + +type Response []types.SegmentsRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/segments/segments.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/segments/segments.go new file mode 100644 index 000000000..5d9a267cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/segments/segments.go @@ -0,0 +1,444 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get segment information. +// +// Get low-level information about the Lucene segments in index shards. +// For data streams, the API returns information about the backing indices. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the index segments API. +package segments + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catsegmentscolumn" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Segments struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSegments type alias for index. +type NewSegments func() *Segments + +// NewSegmentsFunc returns a new instance of Segments with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSegmentsFunc(tp elastictransport.Interface) NewSegments { + return func() *Segments { + n := New(tp) + + return n + } +} + +// Get segment information. +// +// Get low-level information about the Lucene segments in index shards. +// For data streams, the API returns information about the backing indices. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the index segments API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments +func New(tp elastictransport.Interface) *Segments { + r := &Segments{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Segments) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("segments") + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("segments") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Segments) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.segments") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.segments") + if reader := instrument.RecordRequestBody(ctx, "cat.segments", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.segments") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Segments query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a segments.Response +func (r Segments) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.segments") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Segments) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.segments") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Segments query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Segments headers map. +func (r *Segments) Header(key, value string) *Segments { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases used to limit +// the request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *Segments) Index(index string) *Segments { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Bytes The unit used to display byte values. +// API name: bytes +func (r *Segments) Bytes(bytes bytes.Bytes) *Segments { + r.values.Set("bytes", bytes.String()) + + return r +} + +// H A comma-separated list of columns names to display. +// It supports simple wildcards. +// API name: h +func (r *Segments) H(catsegmentscolumns ...catsegmentscolumn.CatSegmentsColumn) *Segments { + tmp := []string{} + for _, item := range catsegmentscolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// S A comma-separated list of column names or aliases that determines the sort +// order. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Segments) S(names ...string) *Segments { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Local If `true`, the request computes the list of selected nodes from the +// local cluster state. If `false` the list of selected nodes are computed +// from the cluster state of the master node. In both cases the coordinating +// node will send requests for further information to each selected node. +// API name: local +func (r *Segments) Local(local bool) *Segments { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Segments) MasterTimeout(duration string) *Segments { + r.values.Set("master_timeout", duration) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Segments) Format(format string) *Segments { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Segments) Help(help bool) *Segments { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Segments) V(v bool) *Segments { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Segments) ErrorTrace(errortrace bool) *Segments { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Segments) FilterPath(filterpaths ...string) *Segments { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Segments) Human(human bool) *Segments { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Segments) Pretty(pretty bool) *Segments { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/shards/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/shards/response.go new file mode 100644 index 000000000..0c00c4dce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/shards/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package shards + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package shards +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/shards/CatShardsResponse.ts#L22-L25 + +type Response []types.ShardsRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/shards/shards.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/shards/shards.go new file mode 100644 index 000000000..bd490d8ad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/shards/shards.go @@ -0,0 +1,439 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get shard information. +// +// Get information about the shards in a cluster. +// For data streams, the API returns information about the backing indices. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. +package shards + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catshardcolumn" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Shards struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewShards type alias for index. +type NewShards func() *Shards + +// NewShardsFunc returns a new instance of Shards with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewShardsFunc(tp elastictransport.Interface) NewShards { + return func() *Shards { + n := New(tp) + + return n + } +} + +// Get shard information. +// +// Get information about the shards in a cluster. +// For data streams, the API returns information about the backing indices. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards +func New(tp elastictransport.Interface) *Shards { + r := &Shards{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Shards) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("shards") + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("shards") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Shards) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.shards") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.shards") + if reader := instrument.RecordRequestBody(ctx, "cat.shards", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.shards") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Shards query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a shards.Response +func (r Shards) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.shards") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Shards) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.shards") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Shards query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Shards headers map. +func (r *Shards) Header(key, value string) *Shards { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases used to limit +// the request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *Shards) Index(index string) *Shards { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Bytes The unit used to display byte values. +// API name: bytes +func (r *Shards) Bytes(bytes bytes.Bytes) *Shards { + r.values.Set("bytes", bytes.String()) + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *Shards) H(catshardcolumns ...catshardcolumn.CatShardColumn) *Shards { + tmp := []string{} + for _, item := range catshardcolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// S A comma-separated list of column names or aliases that determines the sort +// order. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Shards) S(names ...string) *Shards { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// API name: master_timeout +func (r *Shards) MasterTimeout(duration string) *Shards { + r.values.Set("master_timeout", duration) + + return r +} + +// Time The unit used to display time values. +// API name: time +func (r *Shards) Time(time timeunit.TimeUnit) *Shards { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Shards) Format(format string) *Shards { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Shards) Help(help bool) *Shards { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Shards) V(v bool) *Shards { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Shards) ErrorTrace(errortrace bool) *Shards { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Shards) FilterPath(filterpaths ...string) *Shards { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Shards) Human(human bool) *Shards { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Shards) Pretty(pretty bool) *Shards { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/snapshots/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/snapshots/response.go new file mode 100644 index 000000000..5c8be037e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/snapshots/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package snapshots + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package snapshots +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/snapshots/CatSnapshotsResponse.ts#L22-L25 + +type Response []types.SnapshotsRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/snapshots/snapshots.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/snapshots/snapshots.go new file mode 100644 index 000000000..5bb6f0ca7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/snapshots/snapshots.go @@ -0,0 +1,440 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get snapshot information. +// +// Get information about the snapshots stored in one or more repositories. +// A snapshot is a backup of an index or running Elasticsearch cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the get snapshot API. +package snapshots + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catsnapshotscolumn" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +const ( + repositoryMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Snapshots struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + repository string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSnapshots type alias for index. +type NewSnapshots func() *Snapshots + +// NewSnapshotsFunc returns a new instance of Snapshots with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSnapshotsFunc(tp elastictransport.Interface) NewSnapshots { + return func() *Snapshots { + n := New(tp) + + return n + } +} + +// Get snapshot information. +// +// Get information about the snapshots stored in one or more repositories. +// A snapshot is a backup of an index or running Elasticsearch cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the get snapshot API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots +func New(tp elastictransport.Interface) *Snapshots { + r := &Snapshots{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Snapshots) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("snapshots") + + method = http.MethodGet + case r.paramSet == repositoryMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("snapshots") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Snapshots) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.snapshots") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.snapshots") + if reader := instrument.RecordRequestBody(ctx, "cat.snapshots", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.snapshots") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Snapshots query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a snapshots.Response +func (r Snapshots) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.snapshots") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Snapshots) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.snapshots") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Snapshots query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Snapshots headers map. +func (r *Snapshots) Header(key, value string) *Snapshots { + r.headers.Set(key, value) + + return r +} + +// Repository A comma-separated list of snapshot repositories used to limit the request. +// Accepts wildcard expressions. +// `_all` returns all repositories. +// If any repository fails during the request, Elasticsearch returns an error. +// API Name: repository +func (r *Snapshots) Repository(repository string) *Snapshots { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// IgnoreUnavailable If `true`, the response does not include information from unavailable +// snapshots. +// API name: ignore_unavailable +func (r *Snapshots) IgnoreUnavailable(ignoreunavailable bool) *Snapshots { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// H A comma-separated list of columns names to display. +// It supports simple wildcards. +// API name: h +func (r *Snapshots) H(catsnapshotscolumns ...catsnapshotscolumn.CatSnapshotsColumn) *Snapshots { + tmp := []string{} + for _, item := range catsnapshotscolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Snapshots) S(names ...string) *Snapshots { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Snapshots) MasterTimeout(duration string) *Snapshots { + r.values.Set("master_timeout", duration) + + return r +} + +// Time Unit used to display time values. +// API name: time +func (r *Snapshots) Time(time timeunit.TimeUnit) *Snapshots { + r.values.Set("time", time.String()) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Snapshots) Format(format string) *Snapshots { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Snapshots) Help(help bool) *Snapshots { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Snapshots) V(v bool) *Snapshots { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Snapshots) ErrorTrace(errortrace bool) *Snapshots { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Snapshots) FilterPath(filterpaths ...string) *Snapshots { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Snapshots) Human(human bool) *Snapshots { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Snapshots) Pretty(pretty bool) *Snapshots { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/tasks/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/tasks/response.go new file mode 100644 index 000000000..1f9f06e85 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/tasks/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package tasks + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package tasks +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/tasks/CatTasksResponse.ts#L22-L25 + +type Response []types.TasksRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/tasks/tasks.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/tasks/tasks.go new file mode 100644 index 000000000..8ed00771e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/tasks/tasks.go @@ -0,0 +1,442 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get task information. +// +// Get information about tasks currently running in the cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the task management API. +package tasks + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Tasks struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewTasks type alias for index. +type NewTasks func() *Tasks + +// NewTasksFunc returns a new instance of Tasks with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewTasksFunc(tp elastictransport.Interface) NewTasks { + return func() *Tasks { + n := New(tp) + + return n + } +} + +// Get task information. +// +// Get information about tasks currently running in the cluster. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the task management API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks +func New(tp elastictransport.Interface) *Tasks { + r := &Tasks{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Tasks) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("tasks") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Tasks) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.tasks") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.tasks") + if reader := instrument.RecordRequestBody(ctx, "cat.tasks", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.tasks") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Tasks query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a tasks.Response +func (r Tasks) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.tasks") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Tasks) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.tasks") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Tasks query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Tasks headers map. +func (r *Tasks) Header(key, value string) *Tasks { + r.headers.Set(key, value) + + return r +} + +// Actions The task action names, which are used to limit the response. +// API name: actions +func (r *Tasks) Actions(actions ...string) *Tasks { + tmp := []string{} + for _, item := range actions { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("actions", strings.Join(tmp, ",")) + + return r +} + +// Detailed If `true`, the response includes detailed information about shard recoveries. +// API name: detailed +func (r *Tasks) Detailed(detailed bool) *Tasks { + r.values.Set("detailed", strconv.FormatBool(detailed)) + + return r +} + +// Nodes Unique node identifiers, which are used to limit the response. +// API name: nodes +func (r *Tasks) Nodes(nodes ...string) *Tasks { + tmp := []string{} + for _, item := range nodes { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("nodes", strings.Join(tmp, ",")) + + return r +} + +// ParentTaskId The parent task identifier, which is used to limit the response. +// API name: parent_task_id +func (r *Tasks) ParentTaskId(parenttaskid string) *Tasks { + r.values.Set("parent_task_id", parenttaskid) + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *Tasks) H(names ...string) *Tasks { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Tasks) S(names ...string) *Tasks { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Time Unit used to display time values. +// API name: time +func (r *Tasks) Time(time timeunit.TimeUnit) *Tasks { + r.values.Set("time", time.String()) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *Tasks) Timeout(duration string) *Tasks { + r.values.Set("timeout", duration) + + return r +} + +// WaitForCompletion If `true`, the request blocks until the task has completed. +// API name: wait_for_completion +func (r *Tasks) WaitForCompletion(waitforcompletion bool) *Tasks { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Tasks) Format(format string) *Tasks { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Tasks) Help(help bool) *Tasks { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Tasks) V(v bool) *Tasks { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Tasks) ErrorTrace(errortrace bool) *Tasks { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Tasks) FilterPath(filterpaths ...string) *Tasks { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Tasks) Human(human bool) *Tasks { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Tasks) Pretty(pretty bool) *Tasks { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/templates/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/templates/response.go new file mode 100644 index 000000000..ee7ea0d5e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/templates/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package templates + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package templates +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/templates/CatTemplatesResponse.ts#L22-L25 + +type Response []types.TemplatesRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/templates/templates.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/templates/templates.go new file mode 100644 index 000000000..eeb7415b8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/templates/templates.go @@ -0,0 +1,427 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get index template information. +// +// Get information about the index templates in a cluster. +// You can use index templates to apply index settings and field mappings to new +// indices at creation. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the get index template API. +package templates + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Templates struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewTemplates type alias for index. +type NewTemplates func() *Templates + +// NewTemplatesFunc returns a new instance of Templates with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewTemplatesFunc(tp elastictransport.Interface) NewTemplates { + return func() *Templates { + n := New(tp) + + return n + } +} + +// Get index template information. +// +// Get information about the index templates in a cluster. +// You can use index templates to apply index settings and field mappings to new +// indices at creation. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the get index template API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates +func New(tp elastictransport.Interface) *Templates { + r := &Templates{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Templates) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("templates") + + method = http.MethodGet + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("templates") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Templates) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.templates") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.templates") + if reader := instrument.RecordRequestBody(ctx, "cat.templates", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.templates") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Templates query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a templates.Response +func (r Templates) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.templates") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Templates) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.templates") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Templates query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Templates headers map. +func (r *Templates) Header(key, value string) *Templates { + r.headers.Set(key, value) + + return r +} + +// Name The name of the template to return. +// Accepts wildcard expressions. If omitted, all templates are returned. +// API Name: name +func (r *Templates) Name(name string) *Templates { + r.paramSet |= nameMask + r.name = name + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *Templates) H(names ...string) *Templates { + r.values.Set("h", strings.Join(names, ",")) + + return r +} + +// S List of columns that determine how the table should be sorted. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *Templates) S(names ...string) *Templates { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Local If `true`, the request computes the list of selected nodes from the +// local cluster state. If `false` the list of selected nodes are computed +// from the cluster state of the master node. In both cases the coordinating +// node will send requests for further information to each selected node. +// API name: local +func (r *Templates) Local(local bool) *Templates { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Templates) MasterTimeout(duration string) *Templates { + r.values.Set("master_timeout", duration) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Templates) Format(format string) *Templates { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Templates) Help(help bool) *Templates { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Templates) V(v bool) *Templates { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Templates) ErrorTrace(errortrace bool) *Templates { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Templates) FilterPath(filterpaths ...string) *Templates { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Templates) Human(human bool) *Templates { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Templates) Pretty(pretty bool) *Templates { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/threadpool/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/threadpool/response.go new file mode 100644 index 000000000..fbfaa5a81 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/threadpool/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package threadpool + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package threadpool +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/thread_pool/CatThreadPoolResponse.ts#L22-L25 + +type Response []types.ThreadPoolRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/threadpool/thread_pool.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/threadpool/thread_pool.go new file mode 100644 index 000000000..4f963dcea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/threadpool/thread_pool.go @@ -0,0 +1,442 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get thread pool statistics. +// +// Get thread pool statistics for each node in a cluster. +// Returned information includes all built-in thread pools and custom thread +// pools. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +package threadpool + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catthreadpoolcolumn" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +const ( + threadpoolpatternsMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ThreadPool struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + threadpoolpatterns string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewThreadPool type alias for index. +type NewThreadPool func() *ThreadPool + +// NewThreadPoolFunc returns a new instance of ThreadPool with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewThreadPoolFunc(tp elastictransport.Interface) NewThreadPool { + return func() *ThreadPool { + n := New(tp) + + return n + } +} + +// Get thread pool statistics. +// +// Get thread pool statistics for each node in a cluster. +// Returned information includes all built-in thread pools and custom thread +// pools. +// IMPORTANT: cat APIs are only intended for human consumption using the command +// line or Kibana console. They are not intended for use by applications. For +// application consumption, use the nodes info API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool +func New(tp elastictransport.Interface) *ThreadPool { + r := &ThreadPool{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ThreadPool) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("thread_pool") + + method = http.MethodGet + case r.paramSet == threadpoolpatternsMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("thread_pool") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "threadpoolpatterns", r.threadpoolpatterns) + } + path.WriteString(r.threadpoolpatterns) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ThreadPool) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.thread_pool") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.thread_pool") + if reader := instrument.RecordRequestBody(ctx, "cat.thread_pool", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.thread_pool") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ThreadPool query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a threadpool.Response +func (r ThreadPool) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.thread_pool") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ThreadPool) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.thread_pool") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ThreadPool query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ThreadPool headers map. +func (r *ThreadPool) Header(key, value string) *ThreadPool { + r.headers.Set(key, value) + + return r +} + +// ThreadPoolPatterns A comma-separated list of thread pool names used to limit the request. +// Accepts wildcard expressions. +// API Name: threadpoolpatterns +func (r *ThreadPool) ThreadPoolPatterns(threadpoolpatterns string) *ThreadPool { + r.paramSet |= threadpoolpatternsMask + r.threadpoolpatterns = threadpoolpatterns + + return r +} + +// H List of columns to appear in the response. Supports simple wildcards. +// API name: h +func (r *ThreadPool) H(catthreadpoolcolumns ...catthreadpoolcolumn.CatThreadPoolColumn) *ThreadPool { + tmp := []string{} + for _, item := range catthreadpoolcolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// S A comma-separated list of column names or aliases that determines the sort +// order. +// Sorting defaults to ascending and can be changed by setting `:asc` +// or `:desc` as a suffix to the column name. +// API name: s +func (r *ThreadPool) S(names ...string) *ThreadPool { + r.values.Set("s", strings.Join(names, ",")) + + return r +} + +// Time The unit used to display time values. +// API name: time +func (r *ThreadPool) Time(time timeunit.TimeUnit) *ThreadPool { + r.values.Set("time", time.String()) + + return r +} + +// Local If `true`, the request computes the list of selected nodes from the +// local cluster state. If `false` the list of selected nodes are computed +// from the cluster state of the master node. In both cases the coordinating +// node will send requests for further information to each selected node. +// API name: local +func (r *ThreadPool) Local(local bool) *ThreadPool { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// API name: master_timeout +func (r *ThreadPool) MasterTimeout(duration string) *ThreadPool { + r.values.Set("master_timeout", duration) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *ThreadPool) Format(format string) *ThreadPool { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *ThreadPool) Help(help bool) *ThreadPool { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *ThreadPool) V(v bool) *ThreadPool { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ThreadPool) ErrorTrace(errortrace bool) *ThreadPool { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ThreadPool) FilterPath(filterpaths ...string) *ThreadPool { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ThreadPool) Human(human bool) *ThreadPool { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ThreadPool) Pretty(pretty bool) *ThreadPool { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/transforms/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/transforms/response.go new file mode 100644 index 000000000..49686677c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/transforms/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package transforms + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package transforms +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/transforms/CatTransformsResponse.ts#L22-L25 + +type Response []types.TransformsRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/transforms/transforms.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/transforms/transforms.go new file mode 100644 index 000000000..92560bee3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cat/transforms/transforms.go @@ -0,0 +1,455 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get transform information. +// +// Get configuration and usage information about transforms. +// +// CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get transform statistics API. +package transforms + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cattransformcolumn" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +const ( + transformidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Transforms struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + transformid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewTransforms type alias for index. +type NewTransforms func() *Transforms + +// NewTransformsFunc returns a new instance of Transforms with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewTransformsFunc(tp elastictransport.Interface) NewTransforms { + return func() *Transforms { + n := New(tp) + + return n + } +} + +// Get transform information. +// +// Get configuration and usage information about transforms. +// +// CAT APIs are only intended for human consumption using the Kibana +// console or command line. They are not intended for use by applications. For +// application consumption, use the get transform statistics API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms +func New(tp elastictransport.Interface) *Transforms { + r := &Transforms{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Transforms) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("transforms") + + method = http.MethodGet + case r.paramSet == transformidMask: + path.WriteString("/") + path.WriteString("_cat") + path.WriteString("/") + path.WriteString("transforms") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "transformid", r.transformid) + } + path.WriteString(r.transformid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Transforms) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cat.transforms") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cat.transforms") + if reader := instrument.RecordRequestBody(ctx, "cat.transforms", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cat.transforms") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Transforms query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a transforms.Response +func (r Transforms) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.transforms") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Transforms) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cat.transforms") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Transforms query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Transforms headers map. +func (r *Transforms) Header(key, value string) *Transforms { + r.headers.Set(key, value) + + return r +} + +// TransformId A transform identifier or a wildcard expression. +// If you do not specify one of these options, the API returns information for +// all transforms. +// API Name: transformid +func (r *Transforms) TransformId(transformid string) *Transforms { + r.paramSet |= transformidMask + r.transformid = transformid + + return r +} + +// AllowNoMatch Specifies what to do when the request: contains wildcard expressions and +// there are no transforms that match; contains the `_all` string or no +// identifiers and there are no matches; contains wildcard expressions and there +// are only partial matches. +// If `true`, it returns an empty transforms array when there are no matches and +// the subset of results when there are partial matches. +// If `false`, the request returns a 404 status code when there are no matches +// or only partial matches. +// API name: allow_no_match +func (r *Transforms) AllowNoMatch(allownomatch bool) *Transforms { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// From Skips the specified number of transforms. +// API name: from +func (r *Transforms) From(from int) *Transforms { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// H Comma-separated list of column names to display. +// API name: h +func (r *Transforms) H(cattransformcolumns ...cattransformcolumn.CatTransformColumn) *Transforms { + tmp := []string{} + for _, item := range cattransformcolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// S Comma-separated list of column names or column aliases used to sort the +// response. +// API name: s +func (r *Transforms) S(cattransformcolumns ...cattransformcolumn.CatTransformColumn) *Transforms { + tmp := []string{} + for _, item := range cattransformcolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// Time The unit used to display time values. +// API name: time +func (r *Transforms) Time(time timeunit.TimeUnit) *Transforms { + r.values.Set("time", time.String()) + + return r +} + +// Size The maximum number of transforms to obtain. +// API name: size +func (r *Transforms) Size(size int) *Transforms { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// Format Specifies the format to return the columnar data in, can be set to +// `text`, `json`, `cbor`, `yaml`, or `smile`. +// API name: format +func (r *Transforms) Format(format string) *Transforms { + r.values.Set("format", format) + + return r +} + +// Help When set to `true` will output available columns. This option +// can't be combined with any other query string option. +// API name: help +func (r *Transforms) Help(help bool) *Transforms { + r.values.Set("help", strconv.FormatBool(help)) + + return r +} + +// V When set to `true` will enable verbose output. +// API name: v +func (r *Transforms) V(v bool) *Transforms { + r.values.Set("v", strconv.FormatBool(v)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Transforms) ErrorTrace(errortrace bool) *Transforms { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Transforms) FilterPath(filterpaths ...string) *Transforms { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Transforms) Human(human bool) *Transforms { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Transforms) Pretty(pretty bool) *Transforms { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go new file mode 100644 index 000000000..a16128da7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go @@ -0,0 +1,359 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete auto-follow patterns. +// +// Delete a collection of cross-cluster replication auto-follow patterns. +package deleteautofollowpattern + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteAutoFollowPattern struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteAutoFollowPattern type alias for index. +type NewDeleteAutoFollowPattern func(name string) *DeleteAutoFollowPattern + +// NewDeleteAutoFollowPatternFunc returns a new instance of DeleteAutoFollowPattern with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteAutoFollowPatternFunc(tp elastictransport.Interface) NewDeleteAutoFollowPattern { + return func(name string) *DeleteAutoFollowPattern { + n := New(tp) + + n._name(name) + + return n + } +} + +// Delete auto-follow patterns. +// +// Delete a collection of cross-cluster replication auto-follow patterns. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern +func New(tp elastictransport.Interface) *DeleteAutoFollowPattern { + r := &DeleteAutoFollowPattern{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteAutoFollowPattern) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("auto_follow") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteAutoFollowPattern) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ccr.delete_auto_follow_pattern") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.delete_auto_follow_pattern") + if reader := instrument.RecordRequestBody(ctx, "ccr.delete_auto_follow_pattern", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.delete_auto_follow_pattern") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteAutoFollowPattern query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleteautofollowpattern.Response +func (r DeleteAutoFollowPattern) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.delete_auto_follow_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteAutoFollowPattern) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.delete_auto_follow_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteAutoFollowPattern query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteAutoFollowPattern headers map. +func (r *DeleteAutoFollowPattern) Header(key, value string) *DeleteAutoFollowPattern { + r.headers.Set(key, value) + + return r +} + +// Name The auto-follow pattern collection to delete. +// API Name: name +func (r *DeleteAutoFollowPattern) _name(name string) *DeleteAutoFollowPattern { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *DeleteAutoFollowPattern) MasterTimeout(duration string) *DeleteAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteAutoFollowPattern) ErrorTrace(errortrace bool) *DeleteAutoFollowPattern { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteAutoFollowPattern) FilterPath(filterpaths ...string) *DeleteAutoFollowPattern { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteAutoFollowPattern) Human(human bool) *DeleteAutoFollowPattern { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteAutoFollowPattern) Pretty(pretty bool) *DeleteAutoFollowPattern { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/deleteautofollowpattern/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/deleteautofollowpattern/response.go new file mode 100644 index 000000000..2b513ee51 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/deleteautofollowpattern/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleteautofollowpattern + +// Response holds the response body struct for the package deleteautofollowpattern +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/delete_auto_follow_pattern/DeleteAutoFollowPatternResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/follow/follow.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/follow/follow.go new file mode 100644 index 000000000..73bbff854 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/follow/follow.go @@ -0,0 +1,583 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a follower. +// Create a cross-cluster replication follower index that follows a specific +// leader index. +// When the API returns, the follower index exists and cross-cluster replication +// starts replicating operations from the leader index to the follower index. +package follow + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Follow struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewFollow type alias for index. +type NewFollow func(index string) *Follow + +// NewFollowFunc returns a new instance of Follow with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewFollowFunc(tp elastictransport.Interface) NewFollow { + return func(index string) *Follow { + n := New(tp) + + n._index(index) + + return n + } +} + +// Create a follower. +// Create a cross-cluster replication follower index that follows a specific +// leader index. +// When the API returns, the follower index exists and cross-cluster replication +// starts replicating operations from the leader index to the follower index. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow +func New(tp elastictransport.Interface) *Follow { + r := &Follow{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Follow) Raw(raw io.Reader) *Follow { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Follow) Request(req *Request) *Follow { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Follow) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Follow: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("follow") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Follow) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ccr.follow") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.follow") + if reader := instrument.RecordRequestBody(ctx, "ccr.follow", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.follow") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Follow query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a follow.Response +func (r Follow) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.follow") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Follow headers map. +func (r *Follow) Header(key, value string) *Follow { + r.headers.Set(key, value) + + return r +} + +// Index The name of the follower index. +// API Name: index +func (r *Follow) _index(index string) *Follow { + r.paramSet |= indexMask + r.index = index + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Follow) MasterTimeout(duration string) *Follow { + r.values.Set("master_timeout", duration) + + return r +} + +// WaitForActiveShards Specifies the number of shards to wait on being active before responding. +// This defaults to waiting on none of the shards to be +// active. +// A shard must be restored from the leader index before being active. Restoring +// a follower shard requires transferring all the +// remote Lucene segment files to the follower index. +// API name: wait_for_active_shards +func (r *Follow) WaitForActiveShards(waitforactiveshards string) *Follow { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Follow) ErrorTrace(errortrace bool) *Follow { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Follow) FilterPath(filterpaths ...string) *Follow { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Follow) Human(human bool) *Follow { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Follow) Pretty(pretty bool) *Follow { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// If the leader index is part of a data stream, the name to which the local +// data stream for the followed index should be renamed. +// API name: data_stream_name +func (r *Follow) DataStreamName(datastreamname string) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DataStreamName = &datastreamname + + return r +} + +// The name of the index in the leader cluster to follow. +// API name: leader_index +func (r *Follow) LeaderIndex(indexname string) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LeaderIndex = indexname + + return r +} + +// The maximum number of outstanding reads requests from the remote cluster. +// API name: max_outstanding_read_requests +func (r *Follow) MaxOutstandingReadRequests(maxoutstandingreadrequests int64) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxOutstandingReadRequests = &maxoutstandingreadrequests + + return r +} + +// The maximum number of outstanding write requests on the follower. +// API name: max_outstanding_write_requests +func (r *Follow) MaxOutstandingWriteRequests(maxoutstandingwriterequests int) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxOutstandingWriteRequests = &maxoutstandingwriterequests + + return r +} + +// The maximum number of operations to pull per read from the remote cluster. +// API name: max_read_request_operation_count +func (r *Follow) MaxReadRequestOperationCount(maxreadrequestoperationcount int) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxReadRequestOperationCount = &maxreadrequestoperationcount + + return r +} + +// The maximum size in bytes of per read of a batch of operations pulled from +// the remote cluster. +// API name: max_read_request_size +func (r *Follow) MaxReadRequestSize(bytesize types.ByteSizeVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxReadRequestSize = *bytesize.ByteSizeCaster() + + return r +} + +// The maximum time to wait before retrying an operation that failed +// exceptionally. An exponential backoff strategy is employed when +// retrying. +// API name: max_retry_delay +func (r *Follow) MaxRetryDelay(duration types.DurationVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRetryDelay = *duration.DurationCaster() + + return r +} + +// The maximum number of operations that can be queued for writing. When this +// limit is reached, reads from the remote cluster will be +// deferred until the number of queued operations goes below the limit. +// API name: max_write_buffer_count +func (r *Follow) MaxWriteBufferCount(maxwritebuffercount int) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteBufferCount = &maxwritebuffercount + + return r +} + +// The maximum total bytes of operations that can be queued for writing. When +// this limit is reached, reads from the remote cluster will +// be deferred until the total bytes of queued operations goes below the limit. +// API name: max_write_buffer_size +func (r *Follow) MaxWriteBufferSize(bytesize types.ByteSizeVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteBufferSize = *bytesize.ByteSizeCaster() + + return r +} + +// The maximum number of operations per bulk write request executed on the +// follower. +// API name: max_write_request_operation_count +func (r *Follow) MaxWriteRequestOperationCount(maxwriterequestoperationcount int) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteRequestOperationCount = &maxwriterequestoperationcount + + return r +} + +// The maximum total bytes of operations per bulk write request executed on the +// follower. +// API name: max_write_request_size +func (r *Follow) MaxWriteRequestSize(bytesize types.ByteSizeVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteRequestSize = *bytesize.ByteSizeCaster() + + return r +} + +// The maximum time to wait for new operations on the remote cluster when the +// follower index is synchronized with the leader index. +// When the timeout has elapsed, the poll for operations will return to the +// follower so that it can update some statistics. +// Then the follower will immediately attempt to read from the leader again. +// API name: read_poll_timeout +func (r *Follow) ReadPollTimeout(duration types.DurationVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ReadPollTimeout = *duration.DurationCaster() + + return r +} + +// The remote cluster containing the leader index. +// API name: remote_cluster +func (r *Follow) RemoteCluster(remotecluster string) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RemoteCluster = remotecluster + + return r +} + +// Settings to override from the leader index. +// API name: settings +func (r *Follow) Settings(settings types.IndexSettingsVariant) *Follow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Settings = settings.IndexSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/follow/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/follow/request.go new file mode 100644 index 000000000..98b7e4c09 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/follow/request.go @@ -0,0 +1,257 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package follow + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package follow +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/follow/CreateFollowIndexRequest.ts#L26-L124 +type Request struct { + + // DataStreamName If the leader index is part of a data stream, the name to which the local + // data stream for the followed index should be renamed. + DataStreamName *string `json:"data_stream_name,omitempty"` + // LeaderIndex The name of the index in the leader cluster to follow. + LeaderIndex string `json:"leader_index"` + // MaxOutstandingReadRequests The maximum number of outstanding reads requests from the remote cluster. + MaxOutstandingReadRequests *int64 `json:"max_outstanding_read_requests,omitempty"` + // MaxOutstandingWriteRequests The maximum number of outstanding write requests on the follower. + MaxOutstandingWriteRequests *int `json:"max_outstanding_write_requests,omitempty"` + // MaxReadRequestOperationCount The maximum number of operations to pull per read from the remote cluster. + MaxReadRequestOperationCount *int `json:"max_read_request_operation_count,omitempty"` + // MaxReadRequestSize The maximum size in bytes of per read of a batch of operations pulled from + // the remote cluster. + MaxReadRequestSize types.ByteSize `json:"max_read_request_size,omitempty"` + // MaxRetryDelay The maximum time to wait before retrying an operation that failed + // exceptionally. An exponential backoff strategy is employed when + // retrying. + MaxRetryDelay types.Duration `json:"max_retry_delay,omitempty"` + // MaxWriteBufferCount The maximum number of operations that can be queued for writing. When this + // limit is reached, reads from the remote cluster will be + // deferred until the number of queued operations goes below the limit. + MaxWriteBufferCount *int `json:"max_write_buffer_count,omitempty"` + // MaxWriteBufferSize The maximum total bytes of operations that can be queued for writing. When + // this limit is reached, reads from the remote cluster will + // be deferred until the total bytes of queued operations goes below the limit. + MaxWriteBufferSize types.ByteSize `json:"max_write_buffer_size,omitempty"` + // MaxWriteRequestOperationCount The maximum number of operations per bulk write request executed on the + // follower. + MaxWriteRequestOperationCount *int `json:"max_write_request_operation_count,omitempty"` + // MaxWriteRequestSize The maximum total bytes of operations per bulk write request executed on the + // follower. + MaxWriteRequestSize types.ByteSize `json:"max_write_request_size,omitempty"` + // ReadPollTimeout The maximum time to wait for new operations on the remote cluster when the + // follower index is synchronized with the leader index. + // When the timeout has elapsed, the poll for operations will return to the + // follower so that it can update some statistics. + // Then the follower will immediately attempt to read from the leader again. + ReadPollTimeout types.Duration `json:"read_poll_timeout,omitempty"` + // RemoteCluster The remote cluster containing the leader index. + RemoteCluster string `json:"remote_cluster"` + // Settings Settings to override from the leader index. + Settings *types.IndexSettings `json:"settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Follow request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_stream_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataStreamName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataStreamName = &o + + case "leader_index": + if err := dec.Decode(&s.LeaderIndex); err != nil { + return fmt.Errorf("%s | %w", "LeaderIndex", err) + } + + case "max_outstanding_read_requests": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxOutstandingReadRequests", err) + } + s.MaxOutstandingReadRequests = &value + case float64: + f := int64(v) + s.MaxOutstandingReadRequests = &f + } + + case "max_outstanding_write_requests": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxOutstandingWriteRequests", err) + } + s.MaxOutstandingWriteRequests = &value + case float64: + f := int(v) + s.MaxOutstandingWriteRequests = &f + } + + case "max_read_request_operation_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxReadRequestOperationCount", err) + } + s.MaxReadRequestOperationCount = &value + case float64: + f := int(v) + s.MaxReadRequestOperationCount = &f + } + + case "max_read_request_size": + if err := dec.Decode(&s.MaxReadRequestSize); err != nil { + return fmt.Errorf("%s | %w", "MaxReadRequestSize", err) + } + + case "max_retry_delay": + if err := dec.Decode(&s.MaxRetryDelay); err != nil { + return fmt.Errorf("%s | %w", "MaxRetryDelay", err) + } + + case "max_write_buffer_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxWriteBufferCount", err) + } + s.MaxWriteBufferCount = &value + case float64: + f := int(v) + s.MaxWriteBufferCount = &f + } + + case "max_write_buffer_size": + if err := dec.Decode(&s.MaxWriteBufferSize); err != nil { + return fmt.Errorf("%s | %w", "MaxWriteBufferSize", err) + } + + case "max_write_request_operation_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxWriteRequestOperationCount", err) + } + s.MaxWriteRequestOperationCount = &value + case float64: + f := int(v) + s.MaxWriteRequestOperationCount = &f + } + + case "max_write_request_size": + if err := dec.Decode(&s.MaxWriteRequestSize); err != nil { + return fmt.Errorf("%s | %w", "MaxWriteRequestSize", err) + } + + case "read_poll_timeout": + if err := dec.Decode(&s.ReadPollTimeout); err != nil { + return fmt.Errorf("%s | %w", "ReadPollTimeout", err) + } + + case "remote_cluster": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RemoteCluster = o + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/follow/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/follow/response.go new file mode 100644 index 000000000..5887ae9e7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/follow/response.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package follow + +// Response holds the response body struct for the package follow +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/follow/CreateFollowIndexResponse.ts#L20-L26 +type Response struct { + FollowIndexCreated bool `json:"follow_index_created"` + FollowIndexShardsAcked bool `json:"follow_index_shards_acked"` + IndexFollowingStarted bool `json:"index_following_started"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followinfo/follow_info.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followinfo/follow_info.go new file mode 100644 index 000000000..c6b712371 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followinfo/follow_info.go @@ -0,0 +1,363 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get follower information. +// +// Get information about all cross-cluster replication follower indices. +// For example, the results include follower index names, leader index names, +// replication options, and whether the follower indices are active or paused. +package followinfo + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type FollowInfo struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewFollowInfo type alias for index. +type NewFollowInfo func(index string) *FollowInfo + +// NewFollowInfoFunc returns a new instance of FollowInfo with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewFollowInfoFunc(tp elastictransport.Interface) NewFollowInfo { + return func(index string) *FollowInfo { + n := New(tp) + + n._index(index) + + return n + } +} + +// Get follower information. +// +// Get information about all cross-cluster replication follower indices. +// For example, the results include follower index names, leader index names, +// replication options, and whether the follower indices are active or paused. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info +func New(tp elastictransport.Interface) *FollowInfo { + r := &FollowInfo{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *FollowInfo) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("info") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r FollowInfo) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ccr.follow_info") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.follow_info") + if reader := instrument.RecordRequestBody(ctx, "ccr.follow_info", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.follow_info") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the FollowInfo query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a followinfo.Response +func (r FollowInfo) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.follow_info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r FollowInfo) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.follow_info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the FollowInfo query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the FollowInfo headers map. +func (r *FollowInfo) Header(key, value string) *FollowInfo { + r.headers.Set(key, value) + + return r +} + +// Index A comma-delimited list of follower index patterns. +// API Name: index +func (r *FollowInfo) _index(index string) *FollowInfo { + r.paramSet |= indexMask + r.index = index + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *FollowInfo) MasterTimeout(duration string) *FollowInfo { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *FollowInfo) ErrorTrace(errortrace bool) *FollowInfo { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *FollowInfo) FilterPath(filterpaths ...string) *FollowInfo { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *FollowInfo) Human(human bool) *FollowInfo { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *FollowInfo) Pretty(pretty bool) *FollowInfo { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followinfo/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followinfo/response.go new file mode 100644 index 000000000..30e186b52 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followinfo/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package followinfo + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package followinfo +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/follow_info/FollowInfoResponse.ts#L22-L24 +type Response struct { + FollowerIndices []types.FollowerIndex `json:"follower_indices"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followstats/follow_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followstats/follow_stats.go new file mode 100644 index 000000000..90facdaa7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followstats/follow_stats.go @@ -0,0 +1,362 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get follower stats. +// +// Get cross-cluster replication follower stats. +// The API returns shard-level stats about the "following tasks" associated with +// each shard for the specified indices. +package followstats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type FollowStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewFollowStats type alias for index. +type NewFollowStats func(index string) *FollowStats + +// NewFollowStatsFunc returns a new instance of FollowStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewFollowStatsFunc(tp elastictransport.Interface) NewFollowStats { + return func(index string) *FollowStats { + n := New(tp) + + n._index(index) + + return n + } +} + +// Get follower stats. +// +// Get cross-cluster replication follower stats. +// The API returns shard-level stats about the "following tasks" associated with +// each shard for the specified indices. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats +func New(tp elastictransport.Interface) *FollowStats { + r := &FollowStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *FollowStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r FollowStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ccr.follow_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.follow_stats") + if reader := instrument.RecordRequestBody(ctx, "ccr.follow_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.follow_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the FollowStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a followstats.Response +func (r FollowStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.follow_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r FollowStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.follow_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the FollowStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the FollowStats headers map. +func (r *FollowStats) Header(key, value string) *FollowStats { + r.headers.Set(key, value) + + return r +} + +// Index A comma-delimited list of index patterns. +// API Name: index +func (r *FollowStats) _index(index string) *FollowStats { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *FollowStats) Timeout(duration string) *FollowStats { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *FollowStats) ErrorTrace(errortrace bool) *FollowStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *FollowStats) FilterPath(filterpaths ...string) *FollowStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *FollowStats) Human(human bool) *FollowStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *FollowStats) Pretty(pretty bool) *FollowStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followstats/response.go new file mode 100644 index 000000000..38498b532 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followstats/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package followstats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package followstats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/follow_stats/FollowIndexStatsResponse.ts#L22-L27 +type Response struct { + + // Indices An array of follower index statistics. + Indices []types.FollowIndexStats `json:"indices"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/forgetfollower/forget_follower.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/forgetfollower/forget_follower.go new file mode 100644 index 000000000..32c395bd1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/forgetfollower/forget_follower.go @@ -0,0 +1,464 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Forget a follower. +// Remove the cross-cluster replication follower retention leases from the +// leader. +// +// A following index takes out retention leases on its leader index. +// These leases are used to increase the likelihood that the shards of the +// leader index retain the history of operations that the shards of the +// following index need to run replication. +// When a follower index is converted to a regular index by the unfollow API +// (either by directly calling the API or by index lifecycle management tasks), +// these leases are removed. +// However, removal of the leases can fail, for example when the remote cluster +// containing the leader index is unavailable. +// While the leases will eventually expire on their own, their extended +// existence can cause the leader index to hold more history than necessary and +// prevent index lifecycle management from performing some operations on the +// leader index. +// This API exists to enable manually removing the leases when the unfollow API +// is unable to do so. +// +// NOTE: This API does not stop replication by a following index. If you use +// this API with a follower index that is still actively following, the +// following index will add back retention leases on the leader. +// The only purpose of this API is to handle the case of failure to remove the +// following retention leases after the unfollow API is invoked. +package forgetfollower + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ForgetFollower struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewForgetFollower type alias for index. +type NewForgetFollower func(index string) *ForgetFollower + +// NewForgetFollowerFunc returns a new instance of ForgetFollower with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewForgetFollowerFunc(tp elastictransport.Interface) NewForgetFollower { + return func(index string) *ForgetFollower { + n := New(tp) + + n._index(index) + + return n + } +} + +// Forget a follower. +// Remove the cross-cluster replication follower retention leases from the +// leader. +// +// A following index takes out retention leases on its leader index. +// These leases are used to increase the likelihood that the shards of the +// leader index retain the history of operations that the shards of the +// following index need to run replication. +// When a follower index is converted to a regular index by the unfollow API +// (either by directly calling the API or by index lifecycle management tasks), +// these leases are removed. +// However, removal of the leases can fail, for example when the remote cluster +// containing the leader index is unavailable. +// While the leases will eventually expire on their own, their extended +// existence can cause the leader index to hold more history than necessary and +// prevent index lifecycle management from performing some operations on the +// leader index. +// This API exists to enable manually removing the leases when the unfollow API +// is unable to do so. +// +// NOTE: This API does not stop replication by a following index. If you use +// this API with a follower index that is still actively following, the +// following index will add back retention leases on the leader. +// The only purpose of this API is to handle the case of failure to remove the +// following retention leases after the unfollow API is invoked. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower +func New(tp elastictransport.Interface) *ForgetFollower { + r := &ForgetFollower{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ForgetFollower) Raw(raw io.Reader) *ForgetFollower { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ForgetFollower) Request(req *Request) *ForgetFollower { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ForgetFollower) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ForgetFollower: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("forget_follower") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ForgetFollower) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ccr.forget_follower") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.forget_follower") + if reader := instrument.RecordRequestBody(ctx, "ccr.forget_follower", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.forget_follower") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ForgetFollower query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a forgetfollower.Response +func (r ForgetFollower) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.forget_follower") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ForgetFollower headers map. +func (r *ForgetFollower) Header(key, value string) *ForgetFollower { + r.headers.Set(key, value) + + return r +} + +// Index the name of the leader index for which specified follower retention leases +// should be removed +// API Name: index +func (r *ForgetFollower) _index(index string) *ForgetFollower { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *ForgetFollower) Timeout(duration string) *ForgetFollower { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ForgetFollower) ErrorTrace(errortrace bool) *ForgetFollower { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ForgetFollower) FilterPath(filterpaths ...string) *ForgetFollower { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ForgetFollower) Human(human bool) *ForgetFollower { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ForgetFollower) Pretty(pretty bool) *ForgetFollower { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: follower_cluster +func (r *ForgetFollower) FollowerCluster(followercluster string) *ForgetFollower { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FollowerCluster = &followercluster + + return r +} + +// API name: follower_index +func (r *ForgetFollower) FollowerIndex(indexname string) *ForgetFollower { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FollowerIndex = &indexname + + return r +} + +// API name: follower_index_uuid +func (r *ForgetFollower) FollowerIndexUuid(uuid string) *ForgetFollower { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FollowerIndexUuid = &uuid + + return r +} + +// API name: leader_remote_cluster +func (r *ForgetFollower) LeaderRemoteCluster(leaderremotecluster string) *ForgetFollower { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LeaderRemoteCluster = &leaderremotecluster + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/forgetfollower/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/forgetfollower/request.go new file mode 100644 index 000000000..955b3ccdc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/forgetfollower/request.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package forgetfollower + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Request holds the request body struct for the package forgetfollower +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/forget_follower/ForgetFollowerIndexRequest.ts#L24-L65 +type Request struct { + FollowerCluster *string `json:"follower_cluster,omitempty"` + FollowerIndex *string `json:"follower_index,omitempty"` + FollowerIndexUuid *string `json:"follower_index_uuid,omitempty"` + LeaderRemoteCluster *string `json:"leader_remote_cluster,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Forgetfollower request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "follower_cluster": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FollowerCluster", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FollowerCluster = &o + + case "follower_index": + if err := dec.Decode(&s.FollowerIndex); err != nil { + return fmt.Errorf("%s | %w", "FollowerIndex", err) + } + + case "follower_index_uuid": + if err := dec.Decode(&s.FollowerIndexUuid); err != nil { + return fmt.Errorf("%s | %w", "FollowerIndexUuid", err) + } + + case "leader_remote_cluster": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LeaderRemoteCluster", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LeaderRemoteCluster = &o + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/forgetfollower/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/forgetfollower/response.go new file mode 100644 index 000000000..24a53d289 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/forgetfollower/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package forgetfollower + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package forgetfollower +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/forget_follower/ForgetFollowerIndexResponse.ts#L22-L24 +type Response struct { + Shards_ types.ShardStatistics `json:"_shards"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go new file mode 100644 index 000000000..9c5b10861 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go @@ -0,0 +1,366 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get auto-follow patterns. +// +// Get cross-cluster replication auto-follow patterns. +package getautofollowpattern + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetAutoFollowPattern struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetAutoFollowPattern type alias for index. +type NewGetAutoFollowPattern func() *GetAutoFollowPattern + +// NewGetAutoFollowPatternFunc returns a new instance of GetAutoFollowPattern with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetAutoFollowPatternFunc(tp elastictransport.Interface) NewGetAutoFollowPattern { + return func() *GetAutoFollowPattern { + n := New(tp) + + return n + } +} + +// Get auto-follow patterns. +// +// Get cross-cluster replication auto-follow patterns. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1 +func New(tp elastictransport.Interface) *GetAutoFollowPattern { + r := &GetAutoFollowPattern{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetAutoFollowPattern) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("auto_follow") + + method = http.MethodGet + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("auto_follow") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetAutoFollowPattern) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ccr.get_auto_follow_pattern") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.get_auto_follow_pattern") + if reader := instrument.RecordRequestBody(ctx, "ccr.get_auto_follow_pattern", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.get_auto_follow_pattern") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetAutoFollowPattern query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getautofollowpattern.Response +func (r GetAutoFollowPattern) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.get_auto_follow_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetAutoFollowPattern) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.get_auto_follow_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetAutoFollowPattern query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetAutoFollowPattern headers map. +func (r *GetAutoFollowPattern) Header(key, value string) *GetAutoFollowPattern { + r.headers.Set(key, value) + + return r +} + +// Name The auto-follow pattern collection that you want to retrieve. +// If you do not specify a name, the API returns information for all +// collections. +// API Name: name +func (r *GetAutoFollowPattern) Name(name string) *GetAutoFollowPattern { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *GetAutoFollowPattern) MasterTimeout(duration string) *GetAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetAutoFollowPattern) ErrorTrace(errortrace bool) *GetAutoFollowPattern { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetAutoFollowPattern) FilterPath(filterpaths ...string) *GetAutoFollowPattern { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetAutoFollowPattern) Human(human bool) *GetAutoFollowPattern { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetAutoFollowPattern) Pretty(pretty bool) *GetAutoFollowPattern { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/getautofollowpattern/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/getautofollowpattern/response.go new file mode 100644 index 000000000..411e7312d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/getautofollowpattern/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getautofollowpattern + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getautofollowpattern +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/get_auto_follow_pattern/GetAutoFollowPatternResponse.ts#L22-L24 +type Response struct { + Patterns []types.AutoFollowPattern `json:"patterns"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go new file mode 100644 index 000000000..8bb1799bf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go @@ -0,0 +1,381 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Pause an auto-follow pattern. +// +// Pause a cross-cluster replication auto-follow pattern. +// When the API returns, the auto-follow pattern is inactive. +// New indices that are created on the remote cluster and match the auto-follow +// patterns are ignored. +// +// You can resume auto-following with the resume auto-follow pattern API. +// When it resumes, the auto-follow pattern is active again and automatically +// configures follower indices for newly created indices on the remote cluster +// that match its patterns. +// Remote indices that were created while the pattern was paused will also be +// followed, unless they have been deleted or closed in the interim. +package pauseautofollowpattern + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PauseAutoFollowPattern struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPauseAutoFollowPattern type alias for index. +type NewPauseAutoFollowPattern func(name string) *PauseAutoFollowPattern + +// NewPauseAutoFollowPatternFunc returns a new instance of PauseAutoFollowPattern with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPauseAutoFollowPatternFunc(tp elastictransport.Interface) NewPauseAutoFollowPattern { + return func(name string) *PauseAutoFollowPattern { + n := New(tp) + + n._name(name) + + return n + } +} + +// Pause an auto-follow pattern. +// +// Pause a cross-cluster replication auto-follow pattern. +// When the API returns, the auto-follow pattern is inactive. +// New indices that are created on the remote cluster and match the auto-follow +// patterns are ignored. +// +// You can resume auto-following with the resume auto-follow pattern API. +// When it resumes, the auto-follow pattern is active again and automatically +// configures follower indices for newly created indices on the remote cluster +// that match its patterns. +// Remote indices that were created while the pattern was paused will also be +// followed, unless they have been deleted or closed in the interim. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern +func New(tp elastictransport.Interface) *PauseAutoFollowPattern { + r := &PauseAutoFollowPattern{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PauseAutoFollowPattern) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("auto_follow") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("pause") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PauseAutoFollowPattern) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ccr.pause_auto_follow_pattern") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.pause_auto_follow_pattern") + if reader := instrument.RecordRequestBody(ctx, "ccr.pause_auto_follow_pattern", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.pause_auto_follow_pattern") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PauseAutoFollowPattern query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a pauseautofollowpattern.Response +func (r PauseAutoFollowPattern) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.pause_auto_follow_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r PauseAutoFollowPattern) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.pause_auto_follow_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the PauseAutoFollowPattern query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the PauseAutoFollowPattern headers map. +func (r *PauseAutoFollowPattern) Header(key, value string) *PauseAutoFollowPattern { + r.headers.Set(key, value) + + return r +} + +// Name The name of the auto-follow pattern to pause. +// API Name: name +func (r *PauseAutoFollowPattern) _name(name string) *PauseAutoFollowPattern { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *PauseAutoFollowPattern) MasterTimeout(duration string) *PauseAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PauseAutoFollowPattern) ErrorTrace(errortrace bool) *PauseAutoFollowPattern { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PauseAutoFollowPattern) FilterPath(filterpaths ...string) *PauseAutoFollowPattern { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PauseAutoFollowPattern) Human(human bool) *PauseAutoFollowPattern { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PauseAutoFollowPattern) Pretty(pretty bool) *PauseAutoFollowPattern { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pauseautofollowpattern/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pauseautofollowpattern/response.go new file mode 100644 index 000000000..5e5d1d5b0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pauseautofollowpattern/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package pauseautofollowpattern + +// Response holds the response body struct for the package pauseautofollowpattern +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/pause_auto_follow_pattern/PauseAutoFollowPatternResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pausefollow/pause_follow.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pausefollow/pause_follow.go new file mode 100644 index 000000000..a06460658 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pausefollow/pause_follow.go @@ -0,0 +1,369 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Pause a follower. +// +// Pause a cross-cluster replication follower index. +// The follower index will not fetch any additional operations from the leader +// index. +// You can resume following with the resume follower API. +// You can pause and resume a follower index to change the configuration of the +// following task. +package pausefollow + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PauseFollow struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPauseFollow type alias for index. +type NewPauseFollow func(index string) *PauseFollow + +// NewPauseFollowFunc returns a new instance of PauseFollow with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPauseFollowFunc(tp elastictransport.Interface) NewPauseFollow { + return func(index string) *PauseFollow { + n := New(tp) + + n._index(index) + + return n + } +} + +// Pause a follower. +// +// Pause a cross-cluster replication follower index. +// The follower index will not fetch any additional operations from the leader +// index. +// You can resume following with the resume follower API. +// You can pause and resume a follower index to change the configuration of the +// following task. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow +func New(tp elastictransport.Interface) *PauseFollow { + r := &PauseFollow{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PauseFollow) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("pause_follow") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PauseFollow) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ccr.pause_follow") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.pause_follow") + if reader := instrument.RecordRequestBody(ctx, "ccr.pause_follow", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.pause_follow") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PauseFollow query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a pausefollow.Response +func (r PauseFollow) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.pause_follow") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r PauseFollow) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.pause_follow") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the PauseFollow query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the PauseFollow headers map. +func (r *PauseFollow) Header(key, value string) *PauseFollow { + r.headers.Set(key, value) + + return r +} + +// Index The name of the follower index. +// API Name: index +func (r *PauseFollow) _index(index string) *PauseFollow { + r.paramSet |= indexMask + r.index = index + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *PauseFollow) MasterTimeout(duration string) *PauseFollow { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PauseFollow) ErrorTrace(errortrace bool) *PauseFollow { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PauseFollow) FilterPath(filterpaths ...string) *PauseFollow { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PauseFollow) Human(human bool) *PauseFollow { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PauseFollow) Pretty(pretty bool) *PauseFollow { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pausefollow/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pausefollow/response.go new file mode 100644 index 000000000..9bbbcd16a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pausefollow/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package pausefollow + +// Response holds the response body struct for the package pausefollow +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/pause_follow/PauseFollowIndexResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go new file mode 100644 index 000000000..a9440936c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go @@ -0,0 +1,620 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update auto-follow patterns. +// Create a collection of cross-cluster replication auto-follow patterns for a +// remote cluster. +// Newly created indices on the remote cluster that match any of the patterns +// are automatically configured as follower indices. +// Indices on the remote cluster that were created before the auto-follow +// pattern was created will not be auto-followed even if they match the pattern. +// +// This API can also be used to update auto-follow patterns. +// NOTE: Follower indices that were configured automatically before updating an +// auto-follow pattern will remain unchanged even if they do not match against +// the new patterns. +package putautofollowpattern + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutAutoFollowPattern struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutAutoFollowPattern type alias for index. +type NewPutAutoFollowPattern func(name string) *PutAutoFollowPattern + +// NewPutAutoFollowPatternFunc returns a new instance of PutAutoFollowPattern with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutAutoFollowPatternFunc(tp elastictransport.Interface) NewPutAutoFollowPattern { + return func(name string) *PutAutoFollowPattern { + n := New(tp) + + n._name(name) + + return n + } +} + +// Create or update auto-follow patterns. +// Create a collection of cross-cluster replication auto-follow patterns for a +// remote cluster. +// Newly created indices on the remote cluster that match any of the patterns +// are automatically configured as follower indices. +// Indices on the remote cluster that were created before the auto-follow +// pattern was created will not be auto-followed even if they match the pattern. +// +// This API can also be used to update auto-follow patterns. +// NOTE: Follower indices that were configured automatically before updating an +// auto-follow pattern will remain unchanged even if they do not match against +// the new patterns. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern +func New(tp elastictransport.Interface) *PutAutoFollowPattern { + r := &PutAutoFollowPattern{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutAutoFollowPattern) Raw(raw io.Reader) *PutAutoFollowPattern { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutAutoFollowPattern) Request(req *Request) *PutAutoFollowPattern { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutAutoFollowPattern) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutAutoFollowPattern: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("auto_follow") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutAutoFollowPattern) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ccr.put_auto_follow_pattern") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.put_auto_follow_pattern") + if reader := instrument.RecordRequestBody(ctx, "ccr.put_auto_follow_pattern", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.put_auto_follow_pattern") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutAutoFollowPattern query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putautofollowpattern.Response +func (r PutAutoFollowPattern) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.put_auto_follow_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutAutoFollowPattern headers map. +func (r *PutAutoFollowPattern) Header(key, value string) *PutAutoFollowPattern { + r.headers.Set(key, value) + + return r +} + +// Name The name of the collection of auto-follow patterns. +// API Name: name +func (r *PutAutoFollowPattern) _name(name string) *PutAutoFollowPattern { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PutAutoFollowPattern) MasterTimeout(duration string) *PutAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutAutoFollowPattern) ErrorTrace(errortrace bool) *PutAutoFollowPattern { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutAutoFollowPattern) FilterPath(filterpaths ...string) *PutAutoFollowPattern { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutAutoFollowPattern) Human(human bool) *PutAutoFollowPattern { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutAutoFollowPattern) Pretty(pretty bool) *PutAutoFollowPattern { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The name of follower index. The template {{leader_index}} can be used to +// derive the name of the follower index from the name of the leader index. When +// following a data stream, use {{leader_index}}; CCR does not support changes +// to the names of a follower data stream’s backing indices. +// API name: follow_index_pattern +func (r *PutAutoFollowPattern) FollowIndexPattern(indexpattern string) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FollowIndexPattern = &indexpattern + + return r +} + +// An array of simple index patterns that can be used to exclude indices from +// being auto-followed. Indices in the remote cluster whose names are matching +// one or more leader_index_patterns and one or more +// leader_index_exclusion_patterns won’t be followed. +// API name: leader_index_exclusion_patterns +func (r *PutAutoFollowPattern) LeaderIndexExclusionPatterns(indexpatterns ...string) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LeaderIndexExclusionPatterns = indexpatterns + + return r +} + +// An array of simple index patterns to match against indices in the remote +// cluster specified by the remote_cluster field. +// API name: leader_index_patterns +func (r *PutAutoFollowPattern) LeaderIndexPatterns(indexpatterns ...string) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LeaderIndexPatterns = indexpatterns + + return r +} + +// The maximum number of outstanding reads requests from the remote cluster. +// API name: max_outstanding_read_requests +func (r *PutAutoFollowPattern) MaxOutstandingReadRequests(maxoutstandingreadrequests int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxOutstandingReadRequests = &maxoutstandingreadrequests + + return r +} + +// The maximum number of outstanding reads requests from the remote cluster. +// API name: max_outstanding_write_requests +func (r *PutAutoFollowPattern) MaxOutstandingWriteRequests(maxoutstandingwriterequests int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxOutstandingWriteRequests = &maxoutstandingwriterequests + + return r +} + +// The maximum number of operations to pull per read from the remote cluster. +// API name: max_read_request_operation_count +func (r *PutAutoFollowPattern) MaxReadRequestOperationCount(maxreadrequestoperationcount int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxReadRequestOperationCount = &maxreadrequestoperationcount + + return r +} + +// The maximum size in bytes of per read of a batch of operations pulled from +// the remote cluster. +// API name: max_read_request_size +func (r *PutAutoFollowPattern) MaxReadRequestSize(bytesize types.ByteSizeVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxReadRequestSize = *bytesize.ByteSizeCaster() + + return r +} + +// The maximum time to wait before retrying an operation that failed +// exceptionally. An exponential backoff strategy is employed when retrying. +// API name: max_retry_delay +func (r *PutAutoFollowPattern) MaxRetryDelay(duration types.DurationVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRetryDelay = *duration.DurationCaster() + + return r +} + +// The maximum number of operations that can be queued for writing. When this +// limit is reached, reads from the remote cluster will be deferred until the +// number of queued operations goes below the limit. +// API name: max_write_buffer_count +func (r *PutAutoFollowPattern) MaxWriteBufferCount(maxwritebuffercount int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteBufferCount = &maxwritebuffercount + + return r +} + +// The maximum total bytes of operations that can be queued for writing. When +// this limit is reached, reads from the remote cluster will be deferred until +// the total bytes of queued operations goes below the limit. +// API name: max_write_buffer_size +func (r *PutAutoFollowPattern) MaxWriteBufferSize(bytesize types.ByteSizeVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteBufferSize = *bytesize.ByteSizeCaster() + + return r +} + +// The maximum number of operations per bulk write request executed on the +// follower. +// API name: max_write_request_operation_count +func (r *PutAutoFollowPattern) MaxWriteRequestOperationCount(maxwriterequestoperationcount int) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteRequestOperationCount = &maxwriterequestoperationcount + + return r +} + +// The maximum total bytes of operations per bulk write request executed on the +// follower. +// API name: max_write_request_size +func (r *PutAutoFollowPattern) MaxWriteRequestSize(bytesize types.ByteSizeVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteRequestSize = *bytesize.ByteSizeCaster() + + return r +} + +// The maximum time to wait for new operations on the remote cluster when the +// follower index is synchronized with the leader index. When the timeout has +// elapsed, the poll for operations will return to the follower so that it can +// update some statistics. Then the follower will immediately attempt to read +// from the leader again. +// API name: read_poll_timeout +func (r *PutAutoFollowPattern) ReadPollTimeout(duration types.DurationVariant) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ReadPollTimeout = *duration.DurationCaster() + + return r +} + +// The remote cluster containing the leader indices to match against. +// API name: remote_cluster +func (r *PutAutoFollowPattern) RemoteCluster(remotecluster string) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RemoteCluster = remotecluster + + return r +} + +// Settings to override from the leader index. Note that certain settings can +// not be overrode (e.g., index.number_of_shards). +// API name: settings +func (r *PutAutoFollowPattern) Settings(settings map[string]json.RawMessage) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Settings = settings + return r +} + +func (r *PutAutoFollowPattern) AddSetting(key string, value json.RawMessage) *PutAutoFollowPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + + r.req.Settings = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/putautofollowpattern/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/putautofollowpattern/request.go new file mode 100644 index 000000000..4a13a9fbe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/putautofollowpattern/request.go @@ -0,0 +1,269 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putautofollowpattern + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putautofollowpattern +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/put_auto_follow_pattern/PutAutoFollowPatternRequest.ts#L27-L133 +type Request struct { + + // FollowIndexPattern The name of follower index. The template {{leader_index}} can be used to + // derive the name of the follower index from the name of the leader index. When + // following a data stream, use {{leader_index}}; CCR does not support changes + // to the names of a follower data stream’s backing indices. + FollowIndexPattern *string `json:"follow_index_pattern,omitempty"` + // LeaderIndexExclusionPatterns An array of simple index patterns that can be used to exclude indices from + // being auto-followed. Indices in the remote cluster whose names are matching + // one or more leader_index_patterns and one or more + // leader_index_exclusion_patterns won’t be followed. + LeaderIndexExclusionPatterns []string `json:"leader_index_exclusion_patterns,omitempty"` + // LeaderIndexPatterns An array of simple index patterns to match against indices in the remote + // cluster specified by the remote_cluster field. + LeaderIndexPatterns []string `json:"leader_index_patterns,omitempty"` + // MaxOutstandingReadRequests The maximum number of outstanding reads requests from the remote cluster. + MaxOutstandingReadRequests *int `json:"max_outstanding_read_requests,omitempty"` + // MaxOutstandingWriteRequests The maximum number of outstanding reads requests from the remote cluster. + MaxOutstandingWriteRequests *int `json:"max_outstanding_write_requests,omitempty"` + // MaxReadRequestOperationCount The maximum number of operations to pull per read from the remote cluster. + MaxReadRequestOperationCount *int `json:"max_read_request_operation_count,omitempty"` + // MaxReadRequestSize The maximum size in bytes of per read of a batch of operations pulled from + // the remote cluster. + MaxReadRequestSize types.ByteSize `json:"max_read_request_size,omitempty"` + // MaxRetryDelay The maximum time to wait before retrying an operation that failed + // exceptionally. An exponential backoff strategy is employed when retrying. + MaxRetryDelay types.Duration `json:"max_retry_delay,omitempty"` + // MaxWriteBufferCount The maximum number of operations that can be queued for writing. When this + // limit is reached, reads from the remote cluster will be deferred until the + // number of queued operations goes below the limit. + MaxWriteBufferCount *int `json:"max_write_buffer_count,omitempty"` + // MaxWriteBufferSize The maximum total bytes of operations that can be queued for writing. When + // this limit is reached, reads from the remote cluster will be deferred until + // the total bytes of queued operations goes below the limit. + MaxWriteBufferSize types.ByteSize `json:"max_write_buffer_size,omitempty"` + // MaxWriteRequestOperationCount The maximum number of operations per bulk write request executed on the + // follower. + MaxWriteRequestOperationCount *int `json:"max_write_request_operation_count,omitempty"` + // MaxWriteRequestSize The maximum total bytes of operations per bulk write request executed on the + // follower. + MaxWriteRequestSize types.ByteSize `json:"max_write_request_size,omitempty"` + // ReadPollTimeout The maximum time to wait for new operations on the remote cluster when the + // follower index is synchronized with the leader index. When the timeout has + // elapsed, the poll for operations will return to the follower so that it can + // update some statistics. Then the follower will immediately attempt to read + // from the leader again. + ReadPollTimeout types.Duration `json:"read_poll_timeout,omitempty"` + // RemoteCluster The remote cluster containing the leader indices to match against. + RemoteCluster string `json:"remote_cluster"` + // Settings Settings to override from the leader index. Note that certain settings can + // not be overrode (e.g., index.number_of_shards). + Settings map[string]json.RawMessage `json:"settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Settings: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putautofollowpattern request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "follow_index_pattern": + if err := dec.Decode(&s.FollowIndexPattern); err != nil { + return fmt.Errorf("%s | %w", "FollowIndexPattern", err) + } + + case "leader_index_exclusion_patterns": + if err := dec.Decode(&s.LeaderIndexExclusionPatterns); err != nil { + return fmt.Errorf("%s | %w", "LeaderIndexExclusionPatterns", err) + } + + case "leader_index_patterns": + if err := dec.Decode(&s.LeaderIndexPatterns); err != nil { + return fmt.Errorf("%s | %w", "LeaderIndexPatterns", err) + } + + case "max_outstanding_read_requests": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxOutstandingReadRequests", err) + } + s.MaxOutstandingReadRequests = &value + case float64: + f := int(v) + s.MaxOutstandingReadRequests = &f + } + + case "max_outstanding_write_requests": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxOutstandingWriteRequests", err) + } + s.MaxOutstandingWriteRequests = &value + case float64: + f := int(v) + s.MaxOutstandingWriteRequests = &f + } + + case "max_read_request_operation_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxReadRequestOperationCount", err) + } + s.MaxReadRequestOperationCount = &value + case float64: + f := int(v) + s.MaxReadRequestOperationCount = &f + } + + case "max_read_request_size": + if err := dec.Decode(&s.MaxReadRequestSize); err != nil { + return fmt.Errorf("%s | %w", "MaxReadRequestSize", err) + } + + case "max_retry_delay": + if err := dec.Decode(&s.MaxRetryDelay); err != nil { + return fmt.Errorf("%s | %w", "MaxRetryDelay", err) + } + + case "max_write_buffer_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxWriteBufferCount", err) + } + s.MaxWriteBufferCount = &value + case float64: + f := int(v) + s.MaxWriteBufferCount = &f + } + + case "max_write_buffer_size": + if err := dec.Decode(&s.MaxWriteBufferSize); err != nil { + return fmt.Errorf("%s | %w", "MaxWriteBufferSize", err) + } + + case "max_write_request_operation_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxWriteRequestOperationCount", err) + } + s.MaxWriteRequestOperationCount = &value + case float64: + f := int(v) + s.MaxWriteRequestOperationCount = &f + } + + case "max_write_request_size": + if err := dec.Decode(&s.MaxWriteRequestSize); err != nil { + return fmt.Errorf("%s | %w", "MaxWriteRequestSize", err) + } + + case "read_poll_timeout": + if err := dec.Decode(&s.ReadPollTimeout); err != nil { + return fmt.Errorf("%s | %w", "ReadPollTimeout", err) + } + + case "remote_cluster": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RemoteCluster = o + + case "settings": + if s.Settings == nil { + s.Settings = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/putautofollowpattern/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/putautofollowpattern/response.go new file mode 100644 index 000000000..2a29a88d5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/putautofollowpattern/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putautofollowpattern + +// Response holds the response body struct for the package putautofollowpattern +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/put_auto_follow_pattern/PutAutoFollowPatternResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumeautofollowpattern/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumeautofollowpattern/response.go new file mode 100644 index 000000000..e2519c6ea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumeautofollowpattern/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package resumeautofollowpattern + +// Response holds the response body struct for the package resumeautofollowpattern +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/resume_auto_follow_pattern/ResumeAutoFollowPatternResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go new file mode 100644 index 000000000..28bfcf276 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go @@ -0,0 +1,369 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Resume an auto-follow pattern. +// +// Resume a cross-cluster replication auto-follow pattern that was paused. +// The auto-follow pattern will resume configuring following indices for newly +// created indices that match its patterns on the remote cluster. +// Remote indices created while the pattern was paused will also be followed +// unless they have been deleted or closed in the interim. +package resumeautofollowpattern + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ResumeAutoFollowPattern struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewResumeAutoFollowPattern type alias for index. +type NewResumeAutoFollowPattern func(name string) *ResumeAutoFollowPattern + +// NewResumeAutoFollowPatternFunc returns a new instance of ResumeAutoFollowPattern with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewResumeAutoFollowPatternFunc(tp elastictransport.Interface) NewResumeAutoFollowPattern { + return func(name string) *ResumeAutoFollowPattern { + n := New(tp) + + n._name(name) + + return n + } +} + +// Resume an auto-follow pattern. +// +// Resume a cross-cluster replication auto-follow pattern that was paused. +// The auto-follow pattern will resume configuring following indices for newly +// created indices that match its patterns on the remote cluster. +// Remote indices created while the pattern was paused will also be followed +// unless they have been deleted or closed in the interim. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern +func New(tp elastictransport.Interface) *ResumeAutoFollowPattern { + r := &ResumeAutoFollowPattern{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ResumeAutoFollowPattern) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("auto_follow") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("resume") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ResumeAutoFollowPattern) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ccr.resume_auto_follow_pattern") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.resume_auto_follow_pattern") + if reader := instrument.RecordRequestBody(ctx, "ccr.resume_auto_follow_pattern", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.resume_auto_follow_pattern") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ResumeAutoFollowPattern query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a resumeautofollowpattern.Response +func (r ResumeAutoFollowPattern) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.resume_auto_follow_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ResumeAutoFollowPattern) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.resume_auto_follow_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ResumeAutoFollowPattern query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ResumeAutoFollowPattern headers map. +func (r *ResumeAutoFollowPattern) Header(key, value string) *ResumeAutoFollowPattern { + r.headers.Set(key, value) + + return r +} + +// Name The name of the auto-follow pattern to resume. +// API Name: name +func (r *ResumeAutoFollowPattern) _name(name string) *ResumeAutoFollowPattern { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *ResumeAutoFollowPattern) MasterTimeout(duration string) *ResumeAutoFollowPattern { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ResumeAutoFollowPattern) ErrorTrace(errortrace bool) *ResumeAutoFollowPattern { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ResumeAutoFollowPattern) FilterPath(filterpaths ...string) *ResumeAutoFollowPattern { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ResumeAutoFollowPattern) Human(human bool) *ResumeAutoFollowPattern { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ResumeAutoFollowPattern) Pretty(pretty bool) *ResumeAutoFollowPattern { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumefollow/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumefollow/request.go new file mode 100644 index 000000000..81bd135ee --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumefollow/request.go @@ -0,0 +1,207 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package resumefollow + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package resumefollow +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/resume_follow/ResumeFollowIndexRequest.ts#L25-L65 +type Request struct { + MaxOutstandingReadRequests *int64 `json:"max_outstanding_read_requests,omitempty"` + MaxOutstandingWriteRequests *int64 `json:"max_outstanding_write_requests,omitempty"` + MaxReadRequestOperationCount *int64 `json:"max_read_request_operation_count,omitempty"` + MaxReadRequestSize *string `json:"max_read_request_size,omitempty"` + MaxRetryDelay types.Duration `json:"max_retry_delay,omitempty"` + MaxWriteBufferCount *int64 `json:"max_write_buffer_count,omitempty"` + MaxWriteBufferSize *string `json:"max_write_buffer_size,omitempty"` + MaxWriteRequestOperationCount *int64 `json:"max_write_request_operation_count,omitempty"` + MaxWriteRequestSize *string `json:"max_write_request_size,omitempty"` + ReadPollTimeout types.Duration `json:"read_poll_timeout,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Resumefollow request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_outstanding_read_requests": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxOutstandingReadRequests", err) + } + s.MaxOutstandingReadRequests = &value + case float64: + f := int64(v) + s.MaxOutstandingReadRequests = &f + } + + case "max_outstanding_write_requests": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxOutstandingWriteRequests", err) + } + s.MaxOutstandingWriteRequests = &value + case float64: + f := int64(v) + s.MaxOutstandingWriteRequests = &f + } + + case "max_read_request_operation_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxReadRequestOperationCount", err) + } + s.MaxReadRequestOperationCount = &value + case float64: + f := int64(v) + s.MaxReadRequestOperationCount = &f + } + + case "max_read_request_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxReadRequestSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxReadRequestSize = &o + + case "max_retry_delay": + if err := dec.Decode(&s.MaxRetryDelay); err != nil { + return fmt.Errorf("%s | %w", "MaxRetryDelay", err) + } + + case "max_write_buffer_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxWriteBufferCount", err) + } + s.MaxWriteBufferCount = &value + case float64: + f := int64(v) + s.MaxWriteBufferCount = &f + } + + case "max_write_buffer_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxWriteBufferSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxWriteBufferSize = &o + + case "max_write_request_operation_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxWriteRequestOperationCount", err) + } + s.MaxWriteRequestOperationCount = &value + case float64: + f := int64(v) + s.MaxWriteRequestOperationCount = &f + } + + case "max_write_request_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxWriteRequestSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxWriteRequestSize = &o + + case "read_poll_timeout": + if err := dec.Decode(&s.ReadPollTimeout); err != nil { + return fmt.Errorf("%s | %w", "ReadPollTimeout", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumefollow/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumefollow/response.go new file mode 100644 index 000000000..bc70654bc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumefollow/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package resumefollow + +// Response holds the response body struct for the package resumefollow +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/resume_follow/ResumeFollowIndexResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumefollow/resume_follow.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumefollow/resume_follow.go new file mode 100644 index 000000000..c5b8d54b8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumefollow/resume_follow.go @@ -0,0 +1,498 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Resume a follower. +// Resume a cross-cluster replication follower index that was paused. +// The follower index could have been paused with the pause follower API. +// Alternatively it could be paused due to replication that cannot be retried +// due to failures during following tasks. +// When this API returns, the follower index will resume fetching operations +// from the leader index. +package resumefollow + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ResumeFollow struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewResumeFollow type alias for index. +type NewResumeFollow func(index string) *ResumeFollow + +// NewResumeFollowFunc returns a new instance of ResumeFollow with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewResumeFollowFunc(tp elastictransport.Interface) NewResumeFollow { + return func(index string) *ResumeFollow { + n := New(tp) + + n._index(index) + + return n + } +} + +// Resume a follower. +// Resume a cross-cluster replication follower index that was paused. +// The follower index could have been paused with the pause follower API. +// Alternatively it could be paused due to replication that cannot be retried +// due to failures during following tasks. +// When this API returns, the follower index will resume fetching operations +// from the leader index. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow +func New(tp elastictransport.Interface) *ResumeFollow { + r := &ResumeFollow{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ResumeFollow) Raw(raw io.Reader) *ResumeFollow { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ResumeFollow) Request(req *Request) *ResumeFollow { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ResumeFollow) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ResumeFollow: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("resume_follow") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ResumeFollow) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ccr.resume_follow") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.resume_follow") + if reader := instrument.RecordRequestBody(ctx, "ccr.resume_follow", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.resume_follow") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ResumeFollow query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a resumefollow.Response +func (r ResumeFollow) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.resume_follow") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ResumeFollow headers map. +func (r *ResumeFollow) Header(key, value string) *ResumeFollow { + r.headers.Set(key, value) + + return r +} + +// Index The name of the follow index to resume following. +// API Name: index +func (r *ResumeFollow) _index(index string) *ResumeFollow { + r.paramSet |= indexMask + r.index = index + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *ResumeFollow) MasterTimeout(duration string) *ResumeFollow { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ResumeFollow) ErrorTrace(errortrace bool) *ResumeFollow { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ResumeFollow) FilterPath(filterpaths ...string) *ResumeFollow { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ResumeFollow) Human(human bool) *ResumeFollow { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ResumeFollow) Pretty(pretty bool) *ResumeFollow { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: max_outstanding_read_requests +func (r *ResumeFollow) MaxOutstandingReadRequests(maxoutstandingreadrequests int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxOutstandingReadRequests = &maxoutstandingreadrequests + + return r +} + +// API name: max_outstanding_write_requests +func (r *ResumeFollow) MaxOutstandingWriteRequests(maxoutstandingwriterequests int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxOutstandingWriteRequests = &maxoutstandingwriterequests + + return r +} + +// API name: max_read_request_operation_count +func (r *ResumeFollow) MaxReadRequestOperationCount(maxreadrequestoperationcount int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxReadRequestOperationCount = &maxreadrequestoperationcount + + return r +} + +// API name: max_read_request_size +func (r *ResumeFollow) MaxReadRequestSize(maxreadrequestsize string) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxReadRequestSize = &maxreadrequestsize + + return r +} + +// API name: max_retry_delay +func (r *ResumeFollow) MaxRetryDelay(duration types.DurationVariant) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRetryDelay = *duration.DurationCaster() + + return r +} + +// API name: max_write_buffer_count +func (r *ResumeFollow) MaxWriteBufferCount(maxwritebuffercount int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteBufferCount = &maxwritebuffercount + + return r +} + +// API name: max_write_buffer_size +func (r *ResumeFollow) MaxWriteBufferSize(maxwritebuffersize string) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteBufferSize = &maxwritebuffersize + + return r +} + +// API name: max_write_request_operation_count +func (r *ResumeFollow) MaxWriteRequestOperationCount(maxwriterequestoperationcount int64) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteRequestOperationCount = &maxwriterequestoperationcount + + return r +} + +// API name: max_write_request_size +func (r *ResumeFollow) MaxWriteRequestSize(maxwriterequestsize string) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxWriteRequestSize = &maxwriterequestsize + + return r +} + +// API name: read_poll_timeout +func (r *ResumeFollow) ReadPollTimeout(duration types.DurationVariant) *ResumeFollow { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ReadPollTimeout = *duration.DurationCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/stats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/stats/response.go new file mode 100644 index 000000000..9d4d1a6c0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/stats/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package stats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/stats/CcrStatsResponse.ts#L22-L29 +type Response struct { + + // AutoFollowStats Statistics for the auto-follow coordinator. + AutoFollowStats types.AutoFollowStats `json:"auto_follow_stats"` + // FollowStats Shard-level statistics for follower indices. + FollowStats types.FollowStats `json:"follow_stats"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/stats/stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/stats/stats.go new file mode 100644 index 000000000..1833c2313 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/stats/stats.go @@ -0,0 +1,347 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get cross-cluster replication stats. +// +// This API returns stats about auto-following and the same shard-level stats as +// the get follower stats API. +package stats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Stats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStats type alias for index. +type NewStats func() *Stats + +// NewStatsFunc returns a new instance of Stats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStatsFunc(tp elastictransport.Interface) NewStats { + return func() *Stats { + n := New(tp) + + return n + } +} + +// Get cross-cluster replication stats. +// +// This API returns stats about auto-following and the same shard-level stats as +// the get follower stats API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats +func New(tp elastictransport.Interface) *Stats { + r := &Stats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Stats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Stats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ccr.stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.stats") + if reader := instrument.RecordRequestBody(ctx, "ccr.stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Stats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stats.Response +func (r Stats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Stats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Stats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Stats headers map. +func (r *Stats) Header(key, value string) *Stats { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *Stats) MasterTimeout(duration string) *Stats { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. +// API name: timeout +func (r *Stats) Timeout(duration string) *Stats { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Stats) ErrorTrace(errortrace bool) *Stats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Stats) FilterPath(filterpaths ...string) *Stats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Stats) Human(human bool) *Stats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Stats) Pretty(pretty bool) *Stats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/unfollow/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/unfollow/response.go new file mode 100644 index 000000000..6cc0b73a0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/unfollow/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package unfollow + +// Response holds the response body struct for the package unfollow +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/unfollow/UnfollowIndexResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/unfollow/unfollow.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/unfollow/unfollow.go new file mode 100644 index 000000000..7b8c1a32d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ccr/unfollow/unfollow.go @@ -0,0 +1,377 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Unfollow an index. +// +// Convert a cross-cluster replication follower index to a regular index. +// The API stops the following task associated with a follower index and removes +// index metadata and settings associated with cross-cluster replication. +// The follower index must be paused and closed before you call the unfollow +// API. +// +// > info +// > Currently cross-cluster replication does not support converting an existing +// regular index to a follower index. Converting a follower index to a regular +// index is an irreversible operation. +package unfollow + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Unfollow struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUnfollow type alias for index. +type NewUnfollow func(index string) *Unfollow + +// NewUnfollowFunc returns a new instance of Unfollow with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUnfollowFunc(tp elastictransport.Interface) NewUnfollow { + return func(index string) *Unfollow { + n := New(tp) + + n._index(index) + + return n + } +} + +// Unfollow an index. +// +// Convert a cross-cluster replication follower index to a regular index. +// The API stops the following task associated with a follower index and removes +// index metadata and settings associated with cross-cluster replication. +// The follower index must be paused and closed before you call the unfollow +// API. +// +// > info +// > Currently cross-cluster replication does not support converting an existing +// regular index to a follower index. Converting a follower index to a regular +// index is an irreversible operation. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow +func New(tp elastictransport.Interface) *Unfollow { + r := &Unfollow{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Unfollow) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_ccr") + path.WriteString("/") + path.WriteString("unfollow") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Unfollow) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ccr.unfollow") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ccr.unfollow") + if reader := instrument.RecordRequestBody(ctx, "ccr.unfollow", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ccr.unfollow") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Unfollow query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a unfollow.Response +func (r Unfollow) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.unfollow") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Unfollow) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ccr.unfollow") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Unfollow query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Unfollow headers map. +func (r *Unfollow) Header(key, value string) *Unfollow { + r.headers.Set(key, value) + + return r +} + +// Index The name of the follower index. +// API Name: index +func (r *Unfollow) _index(index string) *Unfollow { + r.paramSet |= indexMask + r.index = index + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *Unfollow) MasterTimeout(duration string) *Unfollow { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Unfollow) ErrorTrace(errortrace bool) *Unfollow { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Unfollow) FilterPath(filterpaths ...string) *Unfollow { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Unfollow) Human(human bool) *Unfollow { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Unfollow) Pretty(pretty bool) *Unfollow { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/allocationexplain/allocation_explain.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/allocationexplain/allocation_explain.go new file mode 100644 index 000000000..55afa418a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/allocationexplain/allocation_explain.go @@ -0,0 +1,436 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Explain the shard allocations. +// Get explanations for shard allocations in the cluster. +// For unassigned shards, it provides an explanation for why the shard is +// unassigned. +// For assigned shards, it provides an explanation for why the shard is +// remaining on its current node and has not moved or rebalanced to another +// node. +// This API can be very useful when attempting to diagnose why a shard is +// unassigned or why a shard continues to remain on its current node when you +// might expect otherwise. +// Refer to the linked documentation for examples of how to troubleshoot +// allocation issues using this API. +package allocationexplain + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type AllocationExplain struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAllocationExplain type alias for index. +type NewAllocationExplain func() *AllocationExplain + +// NewAllocationExplainFunc returns a new instance of AllocationExplain with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAllocationExplainFunc(tp elastictransport.Interface) NewAllocationExplain { + return func() *AllocationExplain { + n := New(tp) + + return n + } +} + +// Explain the shard allocations. +// Get explanations for shard allocations in the cluster. +// For unassigned shards, it provides an explanation for why the shard is +// unassigned. +// For assigned shards, it provides an explanation for why the shard is +// remaining on its current node and has not moved or rebalanced to another +// node. +// This API can be very useful when attempting to diagnose why a shard is +// unassigned or why a shard continues to remain on its current node when you +// might expect otherwise. +// Refer to the linked documentation for examples of how to troubleshoot +// allocation issues using this API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain +func New(tp elastictransport.Interface) *AllocationExplain { + r := &AllocationExplain{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *AllocationExplain) Raw(raw io.Reader) *AllocationExplain { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *AllocationExplain) Request(req *Request) *AllocationExplain { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *AllocationExplain) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for AllocationExplain: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("allocation") + path.WriteString("/") + path.WriteString("explain") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r AllocationExplain) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.allocation_explain") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.allocation_explain") + if reader := instrument.RecordRequestBody(ctx, "cluster.allocation_explain", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.allocation_explain") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the AllocationExplain query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a allocationexplain.Response +func (r AllocationExplain) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.allocation_explain") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the AllocationExplain headers map. +func (r *AllocationExplain) Header(key, value string) *AllocationExplain { + r.headers.Set(key, value) + + return r +} + +// IncludeDiskInfo If true, returns information about disk usage and shard sizes. +// API name: include_disk_info +func (r *AllocationExplain) IncludeDiskInfo(includediskinfo bool) *AllocationExplain { + r.values.Set("include_disk_info", strconv.FormatBool(includediskinfo)) + + return r +} + +// IncludeYesDecisions If true, returns YES decisions in explanation. +// API name: include_yes_decisions +func (r *AllocationExplain) IncludeYesDecisions(includeyesdecisions bool) *AllocationExplain { + r.values.Set("include_yes_decisions", strconv.FormatBool(includeyesdecisions)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *AllocationExplain) MasterTimeout(duration string) *AllocationExplain { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AllocationExplain) ErrorTrace(errortrace bool) *AllocationExplain { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AllocationExplain) FilterPath(filterpaths ...string) *AllocationExplain { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AllocationExplain) Human(human bool) *AllocationExplain { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AllocationExplain) Pretty(pretty bool) *AllocationExplain { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Specifies the node ID or the name of the node to only explain a shard that is +// currently located on the specified node. +// API name: current_node +func (r *AllocationExplain) CurrentNode(currentnode string) *AllocationExplain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CurrentNode = ¤tnode + + return r +} + +// Specifies the name of the index that you would like an explanation for. +// API name: index +func (r *AllocationExplain) Index(indexname string) *AllocationExplain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Index = &indexname + + return r +} + +// If true, returns explanation for the primary shard for the given shard ID. +// API name: primary +func (r *AllocationExplain) Primary(primary bool) *AllocationExplain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Primary = &primary + + return r +} + +// Specifies the ID of the shard that you would like an explanation for. +// API name: shard +func (r *AllocationExplain) Shard(shard int) *AllocationExplain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Shard = &shard + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/allocationexplain/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/allocationexplain/request.go new file mode 100644 index 000000000..1d0707d9f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/allocationexplain/request.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package allocationexplain + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Request holds the request body struct for the package allocationexplain +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/allocation_explain/ClusterAllocationExplainRequest.ts#L25-L81 +type Request struct { + + // CurrentNode Specifies the node ID or the name of the node to only explain a shard that is + // currently located on the specified node. + CurrentNode *string `json:"current_node,omitempty"` + // Index Specifies the name of the index that you would like an explanation for. + Index *string `json:"index,omitempty"` + // Primary If true, returns explanation for the primary shard for the given shard ID. + Primary *bool `json:"primary,omitempty"` + // Shard Specifies the ID of the shard that you would like an explanation for. + Shard *int `json:"shard,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Allocationexplain request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current_node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CurrentNode", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CurrentNode = &o + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "primary": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Primary", err) + } + s.Primary = &value + case bool: + s.Primary = &v + } + + case "shard": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Shard", err) + } + s.Shard = &value + case float64: + f := int(v) + s.Shard = &f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/allocationexplain/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/allocationexplain/response.go new file mode 100644 index 000000000..87a1c3873 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/allocationexplain/response.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package allocationexplain + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/decision" +) + +// Response holds the response body struct for the package allocationexplain +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/allocation_explain/ClusterAllocationExplainResponse.ts#L32-L64 +type Response struct { + AllocateExplanation *string `json:"allocate_explanation,omitempty"` + AllocationDelay types.Duration `json:"allocation_delay,omitempty"` + AllocationDelayInMillis *int64 `json:"allocation_delay_in_millis,omitempty"` + CanAllocate *decision.Decision `json:"can_allocate,omitempty"` + CanMoveToOtherNode *decision.Decision `json:"can_move_to_other_node,omitempty"` + CanRebalanceCluster *decision.Decision `json:"can_rebalance_cluster,omitempty"` + CanRebalanceClusterDecisions []types.AllocationDecision `json:"can_rebalance_cluster_decisions,omitempty"` + CanRebalanceToOtherNode *decision.Decision `json:"can_rebalance_to_other_node,omitempty"` + CanRemainDecisions []types.AllocationDecision `json:"can_remain_decisions,omitempty"` + CanRemainOnCurrentNode *decision.Decision `json:"can_remain_on_current_node,omitempty"` + ClusterInfo *types.ClusterInfo `json:"cluster_info,omitempty"` + ConfiguredDelay types.Duration `json:"configured_delay,omitempty"` + ConfiguredDelayInMillis *int64 `json:"configured_delay_in_millis,omitempty"` + CurrentNode *types.CurrentNode `json:"current_node,omitempty"` + CurrentState string `json:"current_state"` + Index string `json:"index"` + MoveExplanation *string `json:"move_explanation,omitempty"` + NodeAllocationDecisions []types.NodeAllocationExplanation `json:"node_allocation_decisions,omitempty"` + Note *string `json:"note,omitempty"` + Primary bool `json:"primary"` + RebalanceExplanation *string `json:"rebalance_explanation,omitempty"` + RemainingDelay types.Duration `json:"remaining_delay,omitempty"` + RemainingDelayInMillis *int64 `json:"remaining_delay_in_millis,omitempty"` + Shard int `json:"shard"` + UnassignedInfo *types.UnassignedInformation `json:"unassigned_info,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/deletecomponenttemplate/delete_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/deletecomponenttemplate/delete_component_template.go new file mode 100644 index 000000000..abf3e4bf8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/deletecomponenttemplate/delete_component_template.go @@ -0,0 +1,367 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete component templates. +// Component templates are building blocks for constructing index templates that +// specify index mappings, settings, and aliases. +package deletecomponenttemplate + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteComponentTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteComponentTemplate type alias for index. +type NewDeleteComponentTemplate func(name string) *DeleteComponentTemplate + +// NewDeleteComponentTemplateFunc returns a new instance of DeleteComponentTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteComponentTemplateFunc(tp elastictransport.Interface) NewDeleteComponentTemplate { + return func(name string) *DeleteComponentTemplate { + n := New(tp) + + n._name(name) + + return n + } +} + +// Delete component templates. +// Component templates are building blocks for constructing index templates that +// specify index mappings, settings, and aliases. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template +func New(tp elastictransport.Interface) *DeleteComponentTemplate { + r := &DeleteComponentTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteComponentTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_component_template") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteComponentTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.delete_component_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.delete_component_template") + if reader := instrument.RecordRequestBody(ctx, "cluster.delete_component_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.delete_component_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteComponentTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletecomponenttemplate.Response +func (r DeleteComponentTemplate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.delete_component_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteComponentTemplate) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.delete_component_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteComponentTemplate query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteComponentTemplate headers map. +func (r *DeleteComponentTemplate) Header(key, value string) *DeleteComponentTemplate { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list or wildcard expression of component template names used +// to limit the request. +// API Name: name +func (r *DeleteComponentTemplate) _name(name string) *DeleteComponentTemplate { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *DeleteComponentTemplate) MasterTimeout(duration string) *DeleteComponentTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *DeleteComponentTemplate) Timeout(duration string) *DeleteComponentTemplate { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteComponentTemplate) ErrorTrace(errortrace bool) *DeleteComponentTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteComponentTemplate) FilterPath(filterpaths ...string) *DeleteComponentTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteComponentTemplate) Human(human bool) *DeleteComponentTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteComponentTemplate) Pretty(pretty bool) *DeleteComponentTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/deletecomponenttemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/deletecomponenttemplate/response.go new file mode 100644 index 000000000..264ddfd11 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/deletecomponenttemplate/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletecomponenttemplate + +// Response holds the response body struct for the package deletecomponenttemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/delete_component_template/ClusterDeleteComponentTemplateResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go new file mode 100644 index 000000000..5faa6191c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go @@ -0,0 +1,293 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clear cluster voting config exclusions. +// Remove master-eligible nodes from the voting configuration exclusion list. +package deletevotingconfigexclusions + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteVotingConfigExclusions struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteVotingConfigExclusions type alias for index. +type NewDeleteVotingConfigExclusions func() *DeleteVotingConfigExclusions + +// NewDeleteVotingConfigExclusionsFunc returns a new instance of DeleteVotingConfigExclusions with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteVotingConfigExclusionsFunc(tp elastictransport.Interface) NewDeleteVotingConfigExclusions { + return func() *DeleteVotingConfigExclusions { + n := New(tp) + + return n + } +} + +// Clear cluster voting config exclusions. +// Remove master-eligible nodes from the voting configuration exclusion list. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions +func New(tp elastictransport.Interface) *DeleteVotingConfigExclusions { + r := &DeleteVotingConfigExclusions{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteVotingConfigExclusions) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("voting_config_exclusions") + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteVotingConfigExclusions) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.delete_voting_config_exclusions") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.delete_voting_config_exclusions") + if reader := instrument.RecordRequestBody(ctx, "cluster.delete_voting_config_exclusions", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.delete_voting_config_exclusions") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteVotingConfigExclusions query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletevotingconfigexclusions.Response +func (r DeleteVotingConfigExclusions) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteVotingConfigExclusions) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.delete_voting_config_exclusions") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteVotingConfigExclusions query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteVotingConfigExclusions headers map. +func (r *DeleteVotingConfigExclusions) Header(key, value string) *DeleteVotingConfigExclusions { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *DeleteVotingConfigExclusions) MasterTimeout(duration string) *DeleteVotingConfigExclusions { + r.values.Set("master_timeout", duration) + + return r +} + +// WaitForRemoval Specifies whether to wait for all excluded nodes to be removed from the +// cluster before clearing the voting configuration exclusions list. +// Defaults to true, meaning that all excluded nodes must be removed from +// the cluster before this API takes any action. If set to false then the +// voting configuration exclusions list is cleared even if some excluded +// nodes are still in the cluster. +// API name: wait_for_removal +func (r *DeleteVotingConfigExclusions) WaitForRemoval(waitforremoval bool) *DeleteVotingConfigExclusions { + r.values.Set("wait_for_removal", strconv.FormatBool(waitforremoval)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteVotingConfigExclusions) ErrorTrace(errortrace bool) *DeleteVotingConfigExclusions { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteVotingConfigExclusions) FilterPath(filterpaths ...string) *DeleteVotingConfigExclusions { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteVotingConfigExclusions) Human(human bool) *DeleteVotingConfigExclusions { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteVotingConfigExclusions) Pretty(pretty bool) *DeleteVotingConfigExclusions { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/existscomponenttemplate/exists_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/existscomponenttemplate/exists_component_template.go new file mode 100644 index 000000000..fef29743b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/existscomponenttemplate/exists_component_template.go @@ -0,0 +1,313 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Check component templates. +// Returns information about whether a particular component template exists. +package existscomponenttemplate + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ExistsComponentTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExistsComponentTemplate type alias for index. +type NewExistsComponentTemplate func(name string) *ExistsComponentTemplate + +// NewExistsComponentTemplateFunc returns a new instance of ExistsComponentTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExistsComponentTemplateFunc(tp elastictransport.Interface) NewExistsComponentTemplate { + return func(name string) *ExistsComponentTemplate { + n := New(tp) + + n._name(name) + + return n + } +} + +// Check component templates. +// Returns information about whether a particular component template exists. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template +func New(tp elastictransport.Interface) *ExistsComponentTemplate { + r := &ExistsComponentTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ExistsComponentTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_component_template") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodHead + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ExistsComponentTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.exists_component_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.exists_component_template") + if reader := instrument.RecordRequestBody(ctx, "cluster.exists_component_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.exists_component_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ExistsComponentTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a existscomponenttemplate.Response +func (r ExistsComponentTemplate) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ExistsComponentTemplate) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.exists_component_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ExistsComponentTemplate query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ExistsComponentTemplate headers map. +func (r *ExistsComponentTemplate) Header(key, value string) *ExistsComponentTemplate { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of component template names used to limit the request. +// Wildcard (*) expressions are supported. +// API Name: name +func (r *ExistsComponentTemplate) _name(name string) *ExistsComponentTemplate { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an +// error. +// API name: master_timeout +func (r *ExistsComponentTemplate) MasterTimeout(duration string) *ExistsComponentTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// Local If true, the request retrieves information from the local node only. +// Defaults to false, which means information is retrieved from the master node. +// API name: local +func (r *ExistsComponentTemplate) Local(local bool) *ExistsComponentTemplate { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ExistsComponentTemplate) ErrorTrace(errortrace bool) *ExistsComponentTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ExistsComponentTemplate) FilterPath(filterpaths ...string) *ExistsComponentTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ExistsComponentTemplate) Human(human bool) *ExistsComponentTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ExistsComponentTemplate) Pretty(pretty bool) *ExistsComponentTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getcomponenttemplate/get_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getcomponenttemplate/get_component_template.go new file mode 100644 index 000000000..b132996dc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getcomponenttemplate/get_component_template.go @@ -0,0 +1,396 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get component templates. +// Get information about component templates. +package getcomponenttemplate + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetComponentTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetComponentTemplate type alias for index. +type NewGetComponentTemplate func() *GetComponentTemplate + +// NewGetComponentTemplateFunc returns a new instance of GetComponentTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetComponentTemplateFunc(tp elastictransport.Interface) NewGetComponentTemplate { + return func() *GetComponentTemplate { + n := New(tp) + + return n + } +} + +// Get component templates. +// Get information about component templates. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template +func New(tp elastictransport.Interface) *GetComponentTemplate { + r := &GetComponentTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetComponentTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_component_template") + + method = http.MethodGet + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_component_template") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetComponentTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.get_component_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.get_component_template") + if reader := instrument.RecordRequestBody(ctx, "cluster.get_component_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.get_component_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetComponentTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getcomponenttemplate.Response +func (r GetComponentTemplate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.get_component_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetComponentTemplate) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.get_component_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetComponentTemplate query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetComponentTemplate headers map. +func (r *GetComponentTemplate) Header(key, value string) *GetComponentTemplate { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of component template names used to limit the request. +// Wildcard (`*`) expressions are supported. +// API Name: name +func (r *GetComponentTemplate) Name(name string) *GetComponentTemplate { + r.paramSet |= nameMask + r.name = name + + return r +} + +// FlatSettings If `true`, returns settings in flat format. +// API name: flat_settings +func (r *GetComponentTemplate) FlatSettings(flatsettings bool) *GetComponentTemplate { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) + + return r +} + +// SettingsFilter Filter out results, for example to filter out sensitive information. Supports +// wildcards or full settings keys +// API name: settings_filter +func (r *GetComponentTemplate) SettingsFilter(settingsfilters ...string) *GetComponentTemplate { + tmp := []string{} + for _, item := range settingsfilters { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("settings_filter", strings.Join(tmp, ",")) + + return r +} + +// IncludeDefaults Return all default configurations for the component template (default: false) +// API name: include_defaults +func (r *GetComponentTemplate) IncludeDefaults(includedefaults bool) *GetComponentTemplate { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// Local If `true`, the request retrieves information from the local node only. +// If `false`, information is retrieved from the master node. +// API name: local +func (r *GetComponentTemplate) Local(local bool) *GetComponentTemplate { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetComponentTemplate) MasterTimeout(duration string) *GetComponentTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetComponentTemplate) ErrorTrace(errortrace bool) *GetComponentTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetComponentTemplate) FilterPath(filterpaths ...string) *GetComponentTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetComponentTemplate) Human(human bool) *GetComponentTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetComponentTemplate) Pretty(pretty bool) *GetComponentTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getcomponenttemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getcomponenttemplate/response.go new file mode 100644 index 000000000..79ea5752c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getcomponenttemplate/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getcomponenttemplate + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getcomponenttemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/get_component_template/ClusterGetComponentTemplateResponse.ts#L22-L24 +type Response struct { + ComponentTemplates []types.ClusterComponentTemplate `json:"component_templates"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getsettings/get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getsettings/get_settings.go new file mode 100644 index 000000000..32855214e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getsettings/get_settings.go @@ -0,0 +1,361 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get cluster-wide settings. +// +// By default, it returns only settings that have been explicitly defined. +package getsettings + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetSettings struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetSettings type alias for index. +type NewGetSettings func() *GetSettings + +// NewGetSettingsFunc returns a new instance of GetSettings with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetSettingsFunc(tp elastictransport.Interface) NewGetSettings { + return func() *GetSettings { + n := New(tp) + + return n + } +} + +// Get cluster-wide settings. +// +// By default, it returns only settings that have been explicitly defined. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings +func New(tp elastictransport.Interface) *GetSettings { + r := &GetSettings{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetSettings) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("settings") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetSettings) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.get_settings") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.get_settings") + if reader := instrument.RecordRequestBody(ctx, "cluster.get_settings", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.get_settings") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetSettings query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getsettings.Response +func (r GetSettings) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetSettings) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetSettings query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetSettings headers map. +func (r *GetSettings) Header(key, value string) *GetSettings { + r.headers.Set(key, value) + + return r +} + +// FlatSettings If `true`, returns settings in flat format. +// API name: flat_settings +func (r *GetSettings) FlatSettings(flatsettings bool) *GetSettings { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) + + return r +} + +// IncludeDefaults If `true`, returns default cluster settings from the local node. +// API name: include_defaults +func (r *GetSettings) IncludeDefaults(includedefaults bool) *GetSettings { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetSettings) MasterTimeout(duration string) *GetSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *GetSettings) Timeout(duration string) *GetSettings { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetSettings) ErrorTrace(errortrace bool) *GetSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetSettings) FilterPath(filterpaths ...string) *GetSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetSettings) Human(human bool) *GetSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetSettings) Pretty(pretty bool) *GetSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getsettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getsettings/response.go new file mode 100644 index 000000000..f36d433d5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getsettings/response.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getsettings + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package getsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/get_settings/ClusterGetSettingsResponse.ts#L23-L32 +type Response struct { + + // Defaults The default setting values. + Defaults map[string]json.RawMessage `json:"defaults,omitempty"` + // Persistent The settings that persist after the cluster restarts. + Persistent map[string]json.RawMessage `json:"persistent"` + // Transient The settings that do not persist after the cluster restarts. + Transient map[string]json.RawMessage `json:"transient"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Defaults: make(map[string]json.RawMessage, 0), + Persistent: make(map[string]json.RawMessage, 0), + Transient: make(map[string]json.RawMessage, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/health/health.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/health/health.go new file mode 100644 index 000000000..5015af39a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/health/health.go @@ -0,0 +1,495 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the cluster health status. +// +// You can also use the API to get the health status of only specified data +// streams and indices. +// For data streams, the API retrieves the health status of the stream’s backing +// indices. +// +// The cluster health status is: green, yellow or red. +// On the shard level, a red status indicates that the specific shard is not +// allocated in the cluster. Yellow means that the primary shard is allocated +// but replicas are not. Green means that all shards are allocated. +// The index level status is controlled by the worst shard status. +// +// One of the main benefits of the API is the ability to wait until the cluster +// reaches a certain high watermark health level. +// The cluster status is controlled by the worst index status. +package health + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "slices" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/healthstatus" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/level" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/waitforevents" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Health struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewHealth type alias for index. +type NewHealth func() *Health + +// NewHealthFunc returns a new instance of Health with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewHealthFunc(tp elastictransport.Interface) NewHealth { + return func() *Health { + n := New(tp) + + return n + } +} + +// Get the cluster health status. +// +// You can also use the API to get the health status of only specified data +// streams and indices. +// For data streams, the API retrieves the health status of the stream’s backing +// indices. +// +// The cluster health status is: green, yellow or red. +// On the shard level, a red status indicates that the specific shard is not +// allocated in the cluster. Yellow means that the primary shard is allocated +// but replicas are not. Green means that all shards are allocated. +// The index level status is controlled by the worst shard status. +// +// One of the main benefits of the API is the ability to wait until the cluster +// reaches a certain high watermark health level. +// The cluster status is controlled by the worst index status. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health +func New(tp elastictransport.Interface) *Health { + r := &Health{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Health) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("health") + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("health") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Health) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.health") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.health") + if reader := instrument.RecordRequestBody(ctx, "cluster.health", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.health") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Health query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a health.Response +func (r Health) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.health") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 || slices.Contains([]int{408}, res.StatusCode) { + + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Health) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.health") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Health query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Health headers map. +func (r *Health) Header(key, value string) *Health { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and index aliases used to +// limit the request. Wildcard expressions (`*`) are supported. To target all +// data streams and indices in a cluster, omit this parameter or use _all or +// `*`. +// API Name: index +func (r *Health) Index(index string) *Health { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, +// closed or both. +// API name: expand_wildcards +func (r *Health) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Health { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// Level Can be one of cluster, indices or shards. Controls the details level of the +// health information returned. +// API name: level +func (r *Health) Level(level level.Level) *Health { + r.values.Set("level", level.String()) + + return r +} + +// Local If true, the request retrieves information from the local node only. Defaults +// to false, which means information is retrieved from the master node. +// API name: local +func (r *Health) Local(local bool) *Health { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *Health) MasterTimeout(duration string) *Health { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *Health) Timeout(duration string) *Health { + r.values.Set("timeout", duration) + + return r +} + +// WaitForActiveShards A number controlling to how many active shards to wait for, all to wait for +// all shards in the cluster to be active, or 0 to not wait. +// API name: wait_for_active_shards +func (r *Health) WaitForActiveShards(waitforactiveshards string) *Health { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// WaitForEvents Can be one of immediate, urgent, high, normal, low, languid. Wait until all +// currently queued events with the given priority are processed. +// API name: wait_for_events +func (r *Health) WaitForEvents(waitforevents waitforevents.WaitForEvents) *Health { + r.values.Set("wait_for_events", waitforevents.String()) + + return r +} + +// WaitForNodes The request waits until the specified number N of nodes is available. It also +// accepts >=N, <=N, >N and +// yellow > red. By default, will not wait for any status. +// API name: wait_for_status +func (r *Health) WaitForStatus(waitforstatus healthstatus.HealthStatus) *Health { + r.values.Set("wait_for_status", waitforstatus.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Health) ErrorTrace(errortrace bool) *Health { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Health) FilterPath(filterpaths ...string) *Health { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Health) Human(human bool) *Health { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Health) Pretty(pretty bool) *Health { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/health/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/health/response.go new file mode 100644 index 000000000..226fb9eb4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/health/response.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package health + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/healthstatus" +) + +// Response holds the response body struct for the package health +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/health/ClusterHealthResponse.ts#L26-L38 +type Response struct { + + // ActivePrimaryShards The number of active primary shards. + ActivePrimaryShards int `json:"active_primary_shards"` + // ActiveShards The total number of active primary and replica shards. + ActiveShards int `json:"active_shards"` + // ActiveShardsPercent The ratio of active shards in the cluster expressed as a string formatted + // percentage. + ActiveShardsPercent *string `json:"active_shards_percent,omitempty"` + // ActiveShardsPercentAsNumber The ratio of active shards in the cluster expressed as a percentage. + ActiveShardsPercentAsNumber types.Float64 `json:"active_shards_percent_as_number"` + // ClusterName The name of the cluster. + ClusterName string `json:"cluster_name"` + // DelayedUnassignedShards The number of shards whose allocation has been delayed by the timeout + // settings. + DelayedUnassignedShards int `json:"delayed_unassigned_shards"` + Indices map[string]types.IndexHealthStats `json:"indices,omitempty"` + // InitializingShards The number of shards that are under initialization. + InitializingShards int `json:"initializing_shards"` + // NumberOfDataNodes The number of nodes that are dedicated data nodes. + NumberOfDataNodes int `json:"number_of_data_nodes"` + // NumberOfInFlightFetch The number of unfinished fetches. + NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"` + // NumberOfNodes The number of nodes within the cluster. + NumberOfNodes int `json:"number_of_nodes"` + // NumberOfPendingTasks The number of cluster-level changes that have not yet been executed. + NumberOfPendingTasks int `json:"number_of_pending_tasks"` + // RelocatingShards The number of shards that are under relocation. + RelocatingShards int `json:"relocating_shards"` + Status healthstatus.HealthStatus `json:"status"` + // TaskMaxWaitingInQueue The time since the earliest initiated task is waiting for being performed. + TaskMaxWaitingInQueue types.Duration `json:"task_max_waiting_in_queue,omitempty"` + // TaskMaxWaitingInQueueMillis The time expressed in milliseconds since the earliest initiated task is + // waiting for being performed. + TaskMaxWaitingInQueueMillis int64 `json:"task_max_waiting_in_queue_millis"` + // TimedOut If false the response returned within the period of time that is specified by + // the timeout parameter (30s by default) + TimedOut bool `json:"timed_out"` + // UnassignedPrimaryShards The number of primary shards that are not allocated. + UnassignedPrimaryShards int `json:"unassigned_primary_shards"` + // UnassignedShards The number of shards that are not allocated. + UnassignedShards int `json:"unassigned_shards"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Indices: make(map[string]types.IndexHealthStats, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/info/info.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/info/info.go new file mode 100644 index 000000000..0c97c3a3c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/info/info.go @@ -0,0 +1,345 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get cluster info. +// Returns basic information about the cluster. +package info + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + targetMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Info struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + target string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewInfo type alias for index. +type NewInfo func(target string) *Info + +// NewInfoFunc returns a new instance of Info with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewInfoFunc(tp elastictransport.Interface) NewInfo { + return func(target string) *Info { + n := New(tp) + + n._target(target) + + return n + } +} + +// Get cluster info. +// Returns basic information about the cluster. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info +func New(tp elastictransport.Interface) *Info { + r := &Info{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Info) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == targetMask: + path.WriteString("/") + path.WriteString("_info") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "target", r.target) + } + path.WriteString(r.target) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Info) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.info") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.info") + if reader := instrument.RecordRequestBody(ctx, "cluster.info", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.info") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Info query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a info.Response +func (r Info) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Info) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Info query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Info headers map. +func (r *Info) Header(key, value string) *Info { + r.headers.Set(key, value) + + return r +} + +// Target Limits the information returned to the specific target. Supports a +// comma-separated list, such as http,ingest. +// API Name: target +func (r *Info) _target(target string) *Info { + r.paramSet |= targetMask + r.target = target + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Info) ErrorTrace(errortrace bool) *Info { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Info) FilterPath(filterpaths ...string) *Info { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Info) Human(human bool) *Info { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Info) Pretty(pretty bool) *Info { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/info/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/info/response.go new file mode 100644 index 000000000..229716ce3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/info/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package info + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package info +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/info/ClusterInfoResponse.ts#L26-L34 +type Response struct { + ClusterName string `json:"cluster_name"` + Http *types.Http `json:"http,omitempty"` + Ingest *types.NodesIngest `json:"ingest,omitempty"` + Script *types.Scripting `json:"script,omitempty"` + ThreadPool map[string]types.ThreadCount `json:"thread_pool,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + ThreadPool: make(map[string]types.ThreadCount, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/pendingtasks/pending_tasks.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/pendingtasks/pending_tasks.go new file mode 100644 index 000000000..2cf88879a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/pendingtasks/pending_tasks.go @@ -0,0 +1,360 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the pending cluster tasks. +// Get information about cluster-level changes (such as create index, update +// mapping, allocate or fail shard) that have not yet taken effect. +// +// NOTE: This API returns a list of any pending updates to the cluster state. +// These are distinct from the tasks reported by the task management API which +// include periodic tasks and tasks initiated by the user, such as node stats, +// search queries, or create index requests. +// However, if a user-initiated task such as a create index command causes a +// cluster state update, the activity of this task might be reported by both +// task api and pending cluster tasks API. +package pendingtasks + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PendingTasks struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPendingTasks type alias for index. +type NewPendingTasks func() *PendingTasks + +// NewPendingTasksFunc returns a new instance of PendingTasks with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPendingTasksFunc(tp elastictransport.Interface) NewPendingTasks { + return func() *PendingTasks { + n := New(tp) + + return n + } +} + +// Get the pending cluster tasks. +// Get information about cluster-level changes (such as create index, update +// mapping, allocate or fail shard) that have not yet taken effect. +// +// NOTE: This API returns a list of any pending updates to the cluster state. +// These are distinct from the tasks reported by the task management API which +// include periodic tasks and tasks initiated by the user, such as node stats, +// search queries, or create index requests. +// However, if a user-initiated task such as a create index command causes a +// cluster state update, the activity of this task might be reported by both +// task api and pending cluster tasks API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks +func New(tp elastictransport.Interface) *PendingTasks { + r := &PendingTasks{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PendingTasks) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("pending_tasks") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PendingTasks) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.pending_tasks") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.pending_tasks") + if reader := instrument.RecordRequestBody(ctx, "cluster.pending_tasks", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.pending_tasks") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PendingTasks query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a pendingtasks.Response +func (r PendingTasks) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.pending_tasks") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r PendingTasks) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.pending_tasks") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the PendingTasks query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the PendingTasks headers map. +func (r *PendingTasks) Header(key, value string) *PendingTasks { + r.headers.Set(key, value) + + return r +} + +// Local If `true`, the request retrieves information from the local node only. +// If `false`, information is retrieved from the master node. +// API name: local +func (r *PendingTasks) Local(local bool) *PendingTasks { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *PendingTasks) MasterTimeout(duration string) *PendingTasks { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PendingTasks) ErrorTrace(errortrace bool) *PendingTasks { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PendingTasks) FilterPath(filterpaths ...string) *PendingTasks { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PendingTasks) Human(human bool) *PendingTasks { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PendingTasks) Pretty(pretty bool) *PendingTasks { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/pendingtasks/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/pendingtasks/response.go new file mode 100644 index 000000000..9142db077 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/pendingtasks/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package pendingtasks + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package pendingtasks +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/pending_tasks/ClusterPendingTasksResponse.ts#L22-L24 +type Response struct { + Tasks []types.PendingTask `json:"tasks"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go new file mode 100644 index 000000000..6caa60143 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go @@ -0,0 +1,384 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update voting configuration exclusions. +// Update the cluster voting config exclusions by node IDs or node names. +// By default, if there are more than three master-eligible nodes in the cluster +// and you remove fewer than half of the master-eligible nodes in the cluster at +// once, the voting configuration automatically shrinks. +// If you want to shrink the voting configuration to contain fewer than three +// nodes or to remove half or more of the master-eligible nodes in the cluster +// at once, use this API to remove departing nodes from the voting configuration +// manually. +// The API adds an entry for each specified node to the cluster’s voting +// configuration exclusions list. +// It then waits until the cluster has reconfigured its voting configuration to +// exclude the specified nodes. +// +// Clusters should have no voting configuration exclusions in normal operation. +// Once the excluded nodes have stopped, clear the voting configuration +// exclusions with `DELETE /_cluster/voting_config_exclusions`. +// This API waits for the nodes to be fully removed from the cluster before it +// returns. +// If your cluster has voting configuration exclusions for nodes that you no +// longer intend to remove, use `DELETE +// /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the +// voting configuration exclusions without waiting for the nodes to leave the +// cluster. +// +// A response to `POST /_cluster/voting_config_exclusions` with an HTTP status +// code of 200 OK guarantees that the node has been removed from the voting +// configuration and will not be reinstated until the voting configuration +// exclusions are cleared by calling `DELETE +// /_cluster/voting_config_exclusions`. +// If the call to `POST /_cluster/voting_config_exclusions` fails or returns a +// response with an HTTP status code other than 200 OK then the node may not +// have been removed from the voting configuration. +// In that case, you may safely retry the call. +// +// NOTE: Voting exclusions are required only when you remove at least half of +// the master-eligible nodes from a cluster in a short time period. +// They are not required when removing master-ineligible nodes or when removing +// fewer than half of the master-eligible nodes. +package postvotingconfigexclusions + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PostVotingConfigExclusions struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPostVotingConfigExclusions type alias for index. +type NewPostVotingConfigExclusions func() *PostVotingConfigExclusions + +// NewPostVotingConfigExclusionsFunc returns a new instance of PostVotingConfigExclusions with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPostVotingConfigExclusionsFunc(tp elastictransport.Interface) NewPostVotingConfigExclusions { + return func() *PostVotingConfigExclusions { + n := New(tp) + + return n + } +} + +// Update voting configuration exclusions. +// Update the cluster voting config exclusions by node IDs or node names. +// By default, if there are more than three master-eligible nodes in the cluster +// and you remove fewer than half of the master-eligible nodes in the cluster at +// once, the voting configuration automatically shrinks. +// If you want to shrink the voting configuration to contain fewer than three +// nodes or to remove half or more of the master-eligible nodes in the cluster +// at once, use this API to remove departing nodes from the voting configuration +// manually. +// The API adds an entry for each specified node to the cluster’s voting +// configuration exclusions list. +// It then waits until the cluster has reconfigured its voting configuration to +// exclude the specified nodes. +// +// Clusters should have no voting configuration exclusions in normal operation. +// Once the excluded nodes have stopped, clear the voting configuration +// exclusions with `DELETE /_cluster/voting_config_exclusions`. +// This API waits for the nodes to be fully removed from the cluster before it +// returns. +// If your cluster has voting configuration exclusions for nodes that you no +// longer intend to remove, use `DELETE +// /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the +// voting configuration exclusions without waiting for the nodes to leave the +// cluster. +// +// A response to `POST /_cluster/voting_config_exclusions` with an HTTP status +// code of 200 OK guarantees that the node has been removed from the voting +// configuration and will not be reinstated until the voting configuration +// exclusions are cleared by calling `DELETE +// /_cluster/voting_config_exclusions`. +// If the call to `POST /_cluster/voting_config_exclusions` fails or returns a +// response with an HTTP status code other than 200 OK then the node may not +// have been removed from the voting configuration. +// In that case, you may safely retry the call. +// +// NOTE: Voting exclusions are required only when you remove at least half of +// the master-eligible nodes from a cluster in a short time period. +// They are not required when removing master-ineligible nodes or when removing +// fewer than half of the master-eligible nodes. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions +func New(tp elastictransport.Interface) *PostVotingConfigExclusions { + r := &PostVotingConfigExclusions{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PostVotingConfigExclusions) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("voting_config_exclusions") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PostVotingConfigExclusions) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.post_voting_config_exclusions") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.post_voting_config_exclusions") + if reader := instrument.RecordRequestBody(ctx, "cluster.post_voting_config_exclusions", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.post_voting_config_exclusions") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PostVotingConfigExclusions query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a postvotingconfigexclusions.Response +func (r PostVotingConfigExclusions) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r PostVotingConfigExclusions) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.post_voting_config_exclusions") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the PostVotingConfigExclusions query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the PostVotingConfigExclusions headers map. +func (r *PostVotingConfigExclusions) Header(key, value string) *PostVotingConfigExclusions { + r.headers.Set(key, value) + + return r +} + +// NodeNames A comma-separated list of the names of the nodes to exclude from the +// voting configuration. If specified, you may not also specify node_ids. +// API name: node_names +func (r *PostVotingConfigExclusions) NodeNames(names ...string) *PostVotingConfigExclusions { + r.values.Set("node_names", strings.Join(names, ",")) + + return r +} + +// NodeIds A comma-separated list of the persistent ids of the nodes to exclude +// from the voting configuration. If specified, you may not also specify +// node_names. +// API name: node_ids +func (r *PostVotingConfigExclusions) NodeIds(ids ...string) *PostVotingConfigExclusions { + r.values.Set("node_ids", strings.Join(ids, ",")) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PostVotingConfigExclusions) MasterTimeout(duration string) *PostVotingConfigExclusions { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout When adding a voting configuration exclusion, the API waits for the +// specified nodes to be excluded from the voting configuration before +// returning. If the timeout expires before the appropriate condition +// is satisfied, the request fails and returns an error. +// API name: timeout +func (r *PostVotingConfigExclusions) Timeout(duration string) *PostVotingConfigExclusions { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PostVotingConfigExclusions) ErrorTrace(errortrace bool) *PostVotingConfigExclusions { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PostVotingConfigExclusions) FilterPath(filterpaths ...string) *PostVotingConfigExclusions { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PostVotingConfigExclusions) Human(human bool) *PostVotingConfigExclusions { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PostVotingConfigExclusions) Pretty(pretty bool) *PostVotingConfigExclusions { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putcomponenttemplate/put_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putcomponenttemplate/put_component_template.go new file mode 100644 index 000000000..f28278df5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putcomponenttemplate/put_component_template.go @@ -0,0 +1,512 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a component template. +// Component templates are building blocks for constructing index templates that +// specify index mappings, settings, and aliases. +// +// An index template can be composed of multiple component templates. +// To use a component template, specify it in an index template’s `composed_of` +// list. +// Component templates are only applied to new data streams and indices as part +// of a matching index template. +// +// Settings and mappings specified directly in the index template or the create +// index request override any settings or mappings specified in a component +// template. +// +// Component templates are only used during index creation. +// For data streams, this includes data stream creation and the creation of a +// stream’s backing indices. +// Changes to component templates do not affect existing indices, including a +// stream’s backing indices. +// +// You can use C-style `/* *\/` block comments in component templates. +// You can include comments anywhere in the request body except before the +// opening curly bracket. +// +// **Applying component templates** +// +// You cannot directly apply a component template to a data stream or index. +// To be applied, a component template must be included in an index template's +// `composed_of` list. +package putcomponenttemplate + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutComponentTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutComponentTemplate type alias for index. +type NewPutComponentTemplate func(name string) *PutComponentTemplate + +// NewPutComponentTemplateFunc returns a new instance of PutComponentTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutComponentTemplateFunc(tp elastictransport.Interface) NewPutComponentTemplate { + return func(name string) *PutComponentTemplate { + n := New(tp) + + n._name(name) + + return n + } +} + +// Create or update a component template. +// Component templates are building blocks for constructing index templates that +// specify index mappings, settings, and aliases. +// +// An index template can be composed of multiple component templates. +// To use a component template, specify it in an index template’s `composed_of` +// list. +// Component templates are only applied to new data streams and indices as part +// of a matching index template. +// +// Settings and mappings specified directly in the index template or the create +// index request override any settings or mappings specified in a component +// template. +// +// Component templates are only used during index creation. +// For data streams, this includes data stream creation and the creation of a +// stream’s backing indices. +// Changes to component templates do not affect existing indices, including a +// stream’s backing indices. +// +// You can use C-style `/* *\/` block comments in component templates. +// You can include comments anywhere in the request body except before the +// opening curly bracket. +// +// **Applying component templates** +// +// You cannot directly apply a component template to a data stream or index. +// To be applied, a component template must be included in an index template's +// `composed_of` list. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template +func New(tp elastictransport.Interface) *PutComponentTemplate { + r := &PutComponentTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutComponentTemplate) Raw(raw io.Reader) *PutComponentTemplate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutComponentTemplate) Request(req *Request) *PutComponentTemplate { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutComponentTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutComponentTemplate: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_component_template") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutComponentTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.put_component_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.put_component_template") + if reader := instrument.RecordRequestBody(ctx, "cluster.put_component_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.put_component_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutComponentTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putcomponenttemplate.Response +func (r PutComponentTemplate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.put_component_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutComponentTemplate headers map. +func (r *PutComponentTemplate) Header(key, value string) *PutComponentTemplate { + r.headers.Set(key, value) + + return r +} + +// Name Name of the component template to create. +// Elasticsearch includes the following built-in component templates: +// `logs-mappings`; `logs-settings`; `metrics-mappings`; +// `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. +// Elastic Agent uses these templates to configure backing indices for its data +// streams. +// If you use Elastic Agent and want to overwrite one of these templates, set +// the `version` for your replacement template higher than the current version. +// If you don’t use Elastic Agent and want to disable all built-in component and +// index templates, set `stack.templates.enabled` to `false` using the cluster +// update settings API. +// API Name: name +func (r *PutComponentTemplate) _name(name string) *PutComponentTemplate { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Create If `true`, this request cannot replace or update existing component +// templates. +// API name: create +func (r *PutComponentTemplate) Create(create bool) *PutComponentTemplate { + r.values.Set("create", strconv.FormatBool(create)) + + return r +} + +// Cause User defined reason for create the component template. +// API name: cause +func (r *PutComponentTemplate) Cause(cause string) *PutComponentTemplate { + r.values.Set("cause", cause) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *PutComponentTemplate) MasterTimeout(duration string) *PutComponentTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutComponentTemplate) ErrorTrace(errortrace bool) *PutComponentTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutComponentTemplate) FilterPath(filterpaths ...string) *PutComponentTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutComponentTemplate) Human(human bool) *PutComponentTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutComponentTemplate) Pretty(pretty bool) *PutComponentTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Marks this index template as deprecated. When creating or updating a +// non-deprecated index template +// that uses deprecated components, Elasticsearch will emit a deprecation +// warning. +// API name: deprecated +func (r *PutComponentTemplate) Deprecated(deprecated bool) *PutComponentTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Deprecated = &deprecated + + return r +} + +// Optional user metadata about the component template. +// It may have any contents. This map is not automatically generated by +// Elasticsearch. +// This information is stored in the cluster state, so keeping it short is +// preferable. +// To unset `_meta`, replace the template without specifying this information. +// API name: _meta +func (r *PutComponentTemplate) Meta_(metadata types.MetadataVariant) *PutComponentTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() + + return r +} + +// The template to be applied which includes mappings, settings, or aliases +// configuration. +// API name: template +func (r *PutComponentTemplate) Template(template types.IndexStateVariant) *PutComponentTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Template = *template.IndexStateCaster() + + return r +} + +// Version number used to manage component templates externally. +// This number isn't automatically generated or incremented by Elasticsearch. +// To unset a version, replace the template without specifying a version. +// API name: version +func (r *PutComponentTemplate) Version(versionnumber int64) *PutComponentTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Version = &versionnumber + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putcomponenttemplate/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putcomponenttemplate/request.go new file mode 100644 index 000000000..a21cc2e15 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putcomponenttemplate/request.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putcomponenttemplate + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putcomponenttemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/put_component_template/ClusterPutComponentTemplateRequest.ts#L25-L110 +type Request struct { + + // Deprecated Marks this index template as deprecated. When creating or updating a + // non-deprecated index template + // that uses deprecated components, Elasticsearch will emit a deprecation + // warning. + Deprecated *bool `json:"deprecated,omitempty"` + // Meta_ Optional user metadata about the component template. + // It may have any contents. This map is not automatically generated by + // Elasticsearch. + // This information is stored in the cluster state, so keeping it short is + // preferable. + // To unset `_meta`, replace the template without specifying this information. + Meta_ types.Metadata `json:"_meta,omitempty"` + // Template The template to be applied which includes mappings, settings, or aliases + // configuration. + Template types.IndexState `json:"template"` + // Version Version number used to manage component templates externally. + // This number isn't automatically generated or incremented by Elasticsearch. + // To unset a version, replace the template without specifying a version. + Version *int64 `json:"version,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putcomponenttemplate request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deprecated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deprecated", err) + } + s.Deprecated = &value + case bool: + s.Deprecated = &v + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return fmt.Errorf("%s | %w", "Template", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putcomponenttemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putcomponenttemplate/response.go new file mode 100644 index 000000000..2f35835f6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putcomponenttemplate/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putcomponenttemplate + +// Response holds the response body struct for the package putcomponenttemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/put_component_template/ClusterPutComponentTemplateResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putsettings/put_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putsettings/put_settings.go new file mode 100644 index 000000000..6e1f0d82a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putsettings/put_settings.go @@ -0,0 +1,483 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the cluster settings. +// +// Configure and update dynamic settings on a running cluster. +// You can also configure dynamic settings locally on an unstarted or shut down +// node in `elasticsearch.yml`. +// +// Updates made with this API can be persistent, which apply across cluster +// restarts, or transient, which reset after a cluster restart. +// You can also reset transient or persistent settings by assigning them a null +// value. +// +// If you configure the same setting using multiple methods, Elasticsearch +// applies the settings in following order of precedence: 1) Transient setting; +// 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting +// value. +// For example, you can apply a transient setting to override a persistent +// setting or `elasticsearch.yml` setting. +// However, a change to an `elasticsearch.yml` setting will not override a +// defined transient or persistent setting. +// +// TIP: In Elastic Cloud, use the user settings feature to configure all cluster +// settings. This method automatically rejects unsafe settings that could break +// your cluster. +// If you run Elasticsearch on your own hardware, use this API to configure +// dynamic cluster settings. +// Only use `elasticsearch.yml` for static cluster settings and node settings. +// The API doesn’t require a restart and ensures a setting’s value is the same +// on all nodes. +// +// WARNING: Transient cluster settings are no longer recommended. Use persistent +// cluster settings instead. +// If a cluster becomes unstable, transient settings can clear unexpectedly, +// resulting in a potentially undesired cluster configuration. +package putsettings + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutSettings struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutSettings type alias for index. +type NewPutSettings func() *PutSettings + +// NewPutSettingsFunc returns a new instance of PutSettings with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutSettingsFunc(tp elastictransport.Interface) NewPutSettings { + return func() *PutSettings { + n := New(tp) + + return n + } +} + +// Update the cluster settings. +// +// Configure and update dynamic settings on a running cluster. +// You can also configure dynamic settings locally on an unstarted or shut down +// node in `elasticsearch.yml`. +// +// Updates made with this API can be persistent, which apply across cluster +// restarts, or transient, which reset after a cluster restart. +// You can also reset transient or persistent settings by assigning them a null +// value. +// +// If you configure the same setting using multiple methods, Elasticsearch +// applies the settings in following order of precedence: 1) Transient setting; +// 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting +// value. +// For example, you can apply a transient setting to override a persistent +// setting or `elasticsearch.yml` setting. +// However, a change to an `elasticsearch.yml` setting will not override a +// defined transient or persistent setting. +// +// TIP: In Elastic Cloud, use the user settings feature to configure all cluster +// settings. This method automatically rejects unsafe settings that could break +// your cluster. +// If you run Elasticsearch on your own hardware, use this API to configure +// dynamic cluster settings. +// Only use `elasticsearch.yml` for static cluster settings and node settings. +// The API doesn’t require a restart and ensures a setting’s value is the same +// on all nodes. +// +// WARNING: Transient cluster settings are no longer recommended. Use persistent +// cluster settings instead. +// If a cluster becomes unstable, transient settings can clear unexpectedly, +// resulting in a potentially undesired cluster configuration. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings +func New(tp elastictransport.Interface) *PutSettings { + r := &PutSettings{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutSettings) Raw(raw io.Reader) *PutSettings { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutSettings) Request(req *Request) *PutSettings { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutSettings) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutSettings: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("settings") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutSettings) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.put_settings") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.put_settings") + if reader := instrument.RecordRequestBody(ctx, "cluster.put_settings", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.put_settings") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutSettings query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putsettings.Response +func (r PutSettings) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.put_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutSettings headers map. +func (r *PutSettings) Header(key, value string) *PutSettings { + r.headers.Set(key, value) + + return r +} + +// FlatSettings Return settings in flat format (default: false) +// API name: flat_settings +func (r *PutSettings) FlatSettings(flatsettings bool) *PutSettings { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) + + return r +} + +// MasterTimeout Explicit operation timeout for connection to master node +// API name: master_timeout +func (r *PutSettings) MasterTimeout(duration string) *PutSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Explicit operation timeout +// API name: timeout +func (r *PutSettings) Timeout(duration string) *PutSettings { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutSettings) ErrorTrace(errortrace bool) *PutSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutSettings) FilterPath(filterpaths ...string) *PutSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutSettings) Human(human bool) *PutSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutSettings) Pretty(pretty bool) *PutSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The settings that persist after the cluster restarts. +// API name: persistent +func (r *PutSettings) Persistent(persistent map[string]json.RawMessage) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Persistent = persistent + return r +} + +func (r *PutSettings) AddPersistent(key string, value json.RawMessage) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Persistent == nil { + r.req.Persistent = make(map[string]json.RawMessage) + } else { + tmp = r.req.Persistent + } + + tmp[key] = value + + r.req.Persistent = tmp + return r +} + +// The settings that do not persist after the cluster restarts. +// API name: transient +func (r *PutSettings) Transient(transient map[string]json.RawMessage) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Transient = transient + return r +} + +func (r *PutSettings) AddTransient(key string, value json.RawMessage) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Transient == nil { + r.req.Transient = make(map[string]json.RawMessage) + } else { + tmp = r.req.Transient + } + + tmp[key] = value + + r.req.Transient = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putsettings/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putsettings/request.go new file mode 100644 index 000000000..af253e5e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putsettings/request.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putsettings + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package putsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/put_settings/ClusterPutSettingsRequest.ts#L25-L71 +type Request struct { + + // Persistent The settings that persist after the cluster restarts. + Persistent map[string]json.RawMessage `json:"persistent,omitempty"` + // Transient The settings that do not persist after the cluster restarts. + Transient map[string]json.RawMessage `json:"transient,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Persistent: make(map[string]json.RawMessage, 0), + Transient: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putsettings request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putsettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putsettings/response.go new file mode 100644 index 000000000..120a9f643 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putsettings/response.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putsettings + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package putsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/put_settings/ClusterPutSettingsResponse.ts#L23-L29 +type Response struct { + Acknowledged bool `json:"acknowledged"` + Persistent map[string]json.RawMessage `json:"persistent"` + Transient map[string]json.RawMessage `json:"transient"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Persistent: make(map[string]json.RawMessage, 0), + Transient: make(map[string]json.RawMessage, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/remoteinfo/remote_info.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/remoteinfo/remote_info.go new file mode 100644 index 000000000..4b7f6f534 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/remoteinfo/remote_info.go @@ -0,0 +1,353 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get remote cluster information. +// +// Get information about configured remote clusters. +// The API returns connection and endpoint information keyed by the configured +// remote cluster alias. +// +// > info +// > This API returns information that reflects current state on the local +// cluster. +// > The `connected` field does not necessarily reflect whether a remote cluster +// is down or unavailable, only whether there is currently an open connection to +// it. +// > Elasticsearch does not spontaneously try to reconnect to a disconnected +// remote cluster. +// > To trigger a reconnection, attempt a cross-cluster search, ES|QL +// cross-cluster search, or try the [resolve cluster +// endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). +package remoteinfo + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RemoteInfo struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRemoteInfo type alias for index. +type NewRemoteInfo func() *RemoteInfo + +// NewRemoteInfoFunc returns a new instance of RemoteInfo with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRemoteInfoFunc(tp elastictransport.Interface) NewRemoteInfo { + return func() *RemoteInfo { + n := New(tp) + + return n + } +} + +// Get remote cluster information. +// +// Get information about configured remote clusters. +// The API returns connection and endpoint information keyed by the configured +// remote cluster alias. +// +// > info +// > This API returns information that reflects current state on the local +// cluster. +// > The `connected` field does not necessarily reflect whether a remote cluster +// is down or unavailable, only whether there is currently an open connection to +// it. +// > Elasticsearch does not spontaneously try to reconnect to a disconnected +// remote cluster. +// > To trigger a reconnection, attempt a cross-cluster search, ES|QL +// cross-cluster search, or try the [resolve cluster +// endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info +func New(tp elastictransport.Interface) *RemoteInfo { + r := &RemoteInfo{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RemoteInfo) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_remote") + path.WriteString("/") + path.WriteString("info") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RemoteInfo) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.remote_info") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.remote_info") + if reader := instrument.RecordRequestBody(ctx, "cluster.remote_info", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.remote_info") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RemoteInfo query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a remoteinfo.Response +func (r RemoteInfo) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.remote_info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r RemoteInfo) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.remote_info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the RemoteInfo query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the RemoteInfo headers map. +func (r *RemoteInfo) Header(key, value string) *RemoteInfo { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RemoteInfo) ErrorTrace(errortrace bool) *RemoteInfo { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RemoteInfo) FilterPath(filterpaths ...string) *RemoteInfo { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RemoteInfo) Human(human bool) *RemoteInfo { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RemoteInfo) Pretty(pretty bool) *RemoteInfo { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/remoteinfo/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/remoteinfo/response.go new file mode 100644 index 000000000..20e3e017e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/remoteinfo/response.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package remoteinfo + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package remoteinfo +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L24-L27 + +type Response map[string]types.ClusterRemoteInfo + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} + +func (r Response) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + o := make(map[string]any, 0) + dec.Decode(&o) + dec = json.NewDecoder(bytes.NewReader(data)) + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + key := fmt.Sprintf("%s", t) + if target, ok := o[key]; ok { + if t, ok := target.(map[string]any)["mode"]; ok { + + switch t { + + case "sniff": + oo := types.NewClusterRemoteSniffInfo() + err := dec.Decode(&oo) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + r[key] = oo + + case "proxy": + oo := types.NewClusterRemoteProxyInfo() + err := dec.Decode(&oo) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + r[key] = oo + + } + } + } + + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/reroute/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/reroute/request.go new file mode 100644 index 000000000..492033ac5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/reroute/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package reroute + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package reroute +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/reroute/ClusterRerouteRequest.ts#L25-L91 +type Request struct { + + // Commands Defines the commands to perform. + Commands []types.Command `json:"commands,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Reroute request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/reroute/reroute.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/reroute/reroute.go new file mode 100644 index 000000000..2982784c4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/reroute/reroute.go @@ -0,0 +1,462 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Reroute the cluster. +// Manually change the allocation of individual shards in the cluster. +// For example, a shard can be moved from one node to another explicitly, an +// allocation can be canceled, and an unassigned shard can be explicitly +// allocated to a specific node. +// +// It is important to note that after processing any reroute commands +// Elasticsearch will perform rebalancing as normal (respecting the values of +// settings such as `cluster.routing.rebalance.enable`) in order to remain in a +// balanced state. +// For example, if the requested allocation includes moving a shard from node1 +// to node2 then this may cause a shard to be moved from node2 back to node1 to +// even things out. +// +// The cluster can be set to disable allocations using the +// `cluster.routing.allocation.enable` setting. +// If allocations are disabled then the only allocations that will be performed +// are explicit ones given using the reroute command, and consequent allocations +// due to rebalancing. +// +// The cluster will attempt to allocate a shard a maximum of +// `index.allocation.max_retries` times in a row (defaults to `5`), before +// giving up and leaving the shard unallocated. +// This scenario can be caused by structural problems such as having an analyzer +// which refers to a stopwords file which doesn’t exist on all nodes. +// +// Once the problem has been corrected, allocation can be manually retried by +// calling the reroute API with the `?retry_failed` URI query parameter, which +// will attempt a single retry round for these shards. +package reroute + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Reroute struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewReroute type alias for index. +type NewReroute func() *Reroute + +// NewRerouteFunc returns a new instance of Reroute with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRerouteFunc(tp elastictransport.Interface) NewReroute { + return func() *Reroute { + n := New(tp) + + return n + } +} + +// Reroute the cluster. +// Manually change the allocation of individual shards in the cluster. +// For example, a shard can be moved from one node to another explicitly, an +// allocation can be canceled, and an unassigned shard can be explicitly +// allocated to a specific node. +// +// It is important to note that after processing any reroute commands +// Elasticsearch will perform rebalancing as normal (respecting the values of +// settings such as `cluster.routing.rebalance.enable`) in order to remain in a +// balanced state. +// For example, if the requested allocation includes moving a shard from node1 +// to node2 then this may cause a shard to be moved from node2 back to node1 to +// even things out. +// +// The cluster can be set to disable allocations using the +// `cluster.routing.allocation.enable` setting. +// If allocations are disabled then the only allocations that will be performed +// are explicit ones given using the reroute command, and consequent allocations +// due to rebalancing. +// +// The cluster will attempt to allocate a shard a maximum of +// `index.allocation.max_retries` times in a row (defaults to `5`), before +// giving up and leaving the shard unallocated. +// This scenario can be caused by structural problems such as having an analyzer +// which refers to a stopwords file which doesn’t exist on all nodes. +// +// Once the problem has been corrected, allocation can be manually retried by +// calling the reroute API with the `?retry_failed` URI query parameter, which +// will attempt a single retry round for these shards. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute +func New(tp elastictransport.Interface) *Reroute { + r := &Reroute{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Reroute) Raw(raw io.Reader) *Reroute { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Reroute) Request(req *Request) *Reroute { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Reroute) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Reroute: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("reroute") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Reroute) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.reroute") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.reroute") + if reader := instrument.RecordRequestBody(ctx, "cluster.reroute", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.reroute") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Reroute query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a reroute.Response +func (r Reroute) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.reroute") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Reroute headers map. +func (r *Reroute) Header(key, value string) *Reroute { + r.headers.Set(key, value) + + return r +} + +// DryRun If true, then the request simulates the operation. +// It will calculate the result of applying the commands to the current cluster +// state and return the resulting cluster state after the commands (and +// rebalancing) have been applied; it will not actually perform the requested +// changes. +// API name: dry_run +func (r *Reroute) DryRun(dryrun bool) *Reroute { + r.values.Set("dry_run", strconv.FormatBool(dryrun)) + + return r +} + +// Explain If true, then the response contains an explanation of why the commands can or +// cannot run. +// API name: explain +func (r *Reroute) Explain(explain bool) *Reroute { + r.values.Set("explain", strconv.FormatBool(explain)) + + return r +} + +// Metric Limits the information returned to the specified metrics. +// API name: metric +func (r *Reroute) Metric(metrics ...string) *Reroute { + r.values.Set("metric", strings.Join(metrics, ",")) + + return r +} + +// RetryFailed If true, then retries allocation of shards that are blocked due to too many +// subsequent allocation failures. +// API name: retry_failed +func (r *Reroute) RetryFailed(retryfailed bool) *Reroute { + r.values.Set("retry_failed", strconv.FormatBool(retryfailed)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *Reroute) MasterTimeout(duration string) *Reroute { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *Reroute) Timeout(duration string) *Reroute { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Reroute) ErrorTrace(errortrace bool) *Reroute { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Reroute) FilterPath(filterpaths ...string) *Reroute { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Reroute) Human(human bool) *Reroute { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Reroute) Pretty(pretty bool) *Reroute { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Defines the commands to perform. +// API name: commands +func (r *Reroute) Commands(commands ...types.CommandVariant) *Reroute { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range commands { + + r.req.Commands = append(r.req.Commands, *v.CommandCaster()) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/reroute/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/reroute/response.go new file mode 100644 index 000000000..f519179d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/reroute/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package reroute + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package reroute +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/reroute/ClusterRerouteResponse.ts#L23-L34 +type Response struct { + Acknowledged bool `json:"acknowledged"` + Explanations []types.RerouteExplanation `json:"explanations,omitempty"` + // State There aren't any guarantees on the output/structure of the raw cluster state. + // Here you will find the internal representation of the cluster, which can + // differ from the external representation. + State json.RawMessage `json:"state,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/state/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/state/response.go new file mode 100644 index 000000000..a60a61f53 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/state/response.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package state + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package state +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/state/ClusterStateResponse.ts#L22-L31 + +type Response = json.RawMessage + +func NewResponse() *Response { + return new(Response) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/state/state.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/state/state.go new file mode 100644 index 000000000..60eee4628 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/state/state.go @@ -0,0 +1,517 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the cluster state. +// Get comprehensive information about the state of the cluster. +// +// The cluster state is an internal data structure which keeps track of a +// variety of information needed by every node, including the identity and +// attributes of the other nodes in the cluster; cluster-wide settings; index +// metadata, including the mapping and settings for each index; the location and +// status of every shard copy in the cluster. +// +// The elected master node ensures that every node in the cluster has a copy of +// the same cluster state. +// This API lets you retrieve a representation of this internal state for +// debugging or diagnostic purposes. +// You may need to consult the Elasticsearch source code to determine the +// precise meaning of the response. +// +// By default the API will route requests to the elected master node since this +// node is the authoritative source of cluster states. +// You can also retrieve the cluster state held on the node handling the API +// request by adding the `?local=true` query parameter. +// +// Elasticsearch may need to expend significant effort to compute a response to +// this API in larger clusters, and the response may comprise a very large +// quantity of data. +// If you use this API repeatedly, your cluster may become unstable. +// +// WARNING: The response is a representation of an internal data structure. +// Its format is not subject to the same compatibility guarantees as other more +// stable APIs and may change from version to version. +// Do not query this API using external monitoring tools. +// Instead, obtain the information you require using other more stable cluster +// APIs. +package state + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + metricMask = iota + 1 + + indexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type State struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + metric string + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewState type alias for index. +type NewState func() *State + +// NewStateFunc returns a new instance of State with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStateFunc(tp elastictransport.Interface) NewState { + return func() *State { + n := New(tp) + + return n + } +} + +// Get the cluster state. +// Get comprehensive information about the state of the cluster. +// +// The cluster state is an internal data structure which keeps track of a +// variety of information needed by every node, including the identity and +// attributes of the other nodes in the cluster; cluster-wide settings; index +// metadata, including the mapping and settings for each index; the location and +// status of every shard copy in the cluster. +// +// The elected master node ensures that every node in the cluster has a copy of +// the same cluster state. +// This API lets you retrieve a representation of this internal state for +// debugging or diagnostic purposes. +// You may need to consult the Elasticsearch source code to determine the +// precise meaning of the response. +// +// By default the API will route requests to the elected master node since this +// node is the authoritative source of cluster states. +// You can also retrieve the cluster state held on the node handling the API +// request by adding the `?local=true` query parameter. +// +// Elasticsearch may need to expend significant effort to compute a response to +// this API in larger clusters, and the response may comprise a very large +// quantity of data. +// If you use this API repeatedly, your cluster may become unstable. +// +// WARNING: The response is a representation of an internal data structure. +// Its format is not subject to the same compatibility guarantees as other more +// stable APIs and may change from version to version. +// Do not query this API using external monitoring tools. +// Instead, obtain the information you require using other more stable cluster +// APIs. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state +func New(tp elastictransport.Interface) *State { + r := &State{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *State) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("state") + + method = http.MethodGet + case r.paramSet == metricMask: + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("state") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", r.metric) + } + path.WriteString(r.metric) + + method = http.MethodGet + case r.paramSet == metricMask|indexMask: + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("state") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", r.metric) + } + path.WriteString(r.metric) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r State) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.state") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.state") + if reader := instrument.RecordRequestBody(ctx, "cluster.state", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.state") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the State query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a state.Response +func (r State) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.state") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := new(Response) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return *response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r State) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.state") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the State query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the State headers map. +func (r *State) Header(key, value string) *State { + r.headers.Set(key, value) + + return r +} + +// Metric Limit the information returned to the specified metrics +// API Name: metric +func (r *State) Metric(metric string) *State { + r.paramSet |= metricMask + r.metric = metric + + return r +} + +// Index A comma-separated list of index names; use `_all` or empty string to perform +// the operation on all indices +// API Name: index +func (r *State) Index(index string) *State { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete +// indices. (This includes `_all` string or when no indices have been specified) +// API name: allow_no_indices +func (r *State) AllowNoIndices(allownoindices bool) *State { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, +// closed or both. +// API name: expand_wildcards +func (r *State) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *State { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// FlatSettings Return settings in flat format (default: false) +// API name: flat_settings +func (r *State) FlatSettings(flatsettings bool) *State { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) + + return r +} + +// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable +// (missing or closed) +// API name: ignore_unavailable +func (r *State) IgnoreUnavailable(ignoreunavailable bool) *State { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Local Return local information, do not retrieve the state from master node +// (default: false) +// API name: local +func (r *State) Local(local bool) *State { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Specify timeout for connection to master +// API name: master_timeout +func (r *State) MasterTimeout(duration string) *State { + r.values.Set("master_timeout", duration) + + return r +} + +// WaitForMetadataVersion Wait for the metadata version to be equal or greater than the specified +// metadata version +// API name: wait_for_metadata_version +func (r *State) WaitForMetadataVersion(versionnumber string) *State { + r.values.Set("wait_for_metadata_version", versionnumber) + + return r +} + +// WaitForTimeout The maximum time to wait for wait_for_metadata_version before timing out +// API name: wait_for_timeout +func (r *State) WaitForTimeout(duration string) *State { + r.values.Set("wait_for_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *State) ErrorTrace(errortrace bool) *State { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *State) FilterPath(filterpaths ...string) *State { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *State) Human(human bool) *State { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *State) Pretty(pretty bool) *State { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/stats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/stats/response.go new file mode 100644 index 000000000..076fc3493 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/stats/response.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/healthstatus" +) + +// Response holds the response body struct for the package stats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/ClusterStatsResponse.ts#L71-L74 +type Response struct { + + // Ccs Cross-cluster stats + Ccs types.CCSStats `json:"ccs"` + // ClusterName Name of the cluster, based on the cluster name setting. + ClusterName string `json:"cluster_name"` + // ClusterUuid Unique identifier for the cluster. + ClusterUuid string `json:"cluster_uuid"` + // Indices Contains statistics about indices with shards assigned to selected nodes. + Indices types.ClusterIndices `json:"indices"` + // NodeStats Contains statistics about the number of nodes selected by the request’s node + // filters. + NodeStats *types.NodeStatistics `json:"_nodes,omitempty"` + // Nodes Contains statistics about nodes selected by the request’s node filters. + Nodes types.ClusterNodes `json:"nodes"` + // Repositories Contains stats on repository feature usage exposed in cluster stats for + // telemetry. + Repositories map[string]map[string]int64 `json:"repositories"` + // Snapshots Contains stats cluster snapshots. + Snapshots types.ClusterSnapshotStats `json:"snapshots"` + // Status Health status of the cluster, based on the state of its primary and replica + // shards. + Status *healthstatus.HealthStatus `json:"status,omitempty"` + // Timestamp Unix timestamp, in milliseconds, for the last time the cluster statistics + // were refreshed. + Timestamp int64 `json:"timestamp"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Repositories: make(map[string]map[string]int64, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/stats/stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/stats/stats.go new file mode 100644 index 000000000..f9d127165 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/cluster/stats/stats.go @@ -0,0 +1,378 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get cluster statistics. +// Get basic index metrics (shard numbers, store size, memory usage) and +// information about the current nodes that form the cluster (number, roles, os, +// jvm versions, memory usage, cpu and installed plugins). +package stats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nodeidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Stats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + nodeid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStats type alias for index. +type NewStats func() *Stats + +// NewStatsFunc returns a new instance of Stats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStatsFunc(tp elastictransport.Interface) NewStats { + return func() *Stats { + n := New(tp) + + return n + } +} + +// Get cluster statistics. +// Get basic index metrics (shard numbers, store size, memory usage) and +// information about the current nodes that form the cluster (number, roles, os, +// jvm versions, memory usage, cpu and installed plugins). +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats +func New(tp elastictransport.Interface) *Stats { + r := &Stats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Stats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + case r.paramSet == nodeidMask: + path.WriteString("/") + path.WriteString("_cluster") + path.WriteString("/") + path.WriteString("stats") + path.WriteString("/") + path.WriteString("nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Stats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "cluster.stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "cluster.stats") + if reader := instrument.RecordRequestBody(ctx, "cluster.stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "cluster.stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Stats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stats.Response +func (r Stats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Stats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "cluster.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Stats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Stats headers map. +func (r *Stats) Header(key, value string) *Stats { + r.headers.Set(key, value) + + return r +} + +// NodeId Comma-separated list of node filters used to limit returned information. +// Defaults to all nodes in the cluster. +// API Name: nodeid +func (r *Stats) NodeId(nodeid string) *Stats { + r.paramSet |= nodeidMask + r.nodeid = nodeid + + return r +} + +// IncludeRemotes Include remote cluster data into the response +// API name: include_remotes +func (r *Stats) IncludeRemotes(includeremotes bool) *Stats { + r.values.Set("include_remotes", strconv.FormatBool(includeremotes)) + + return r +} + +// Timeout Period to wait for each node to respond. +// If a node does not respond before its timeout expires, the response does not +// include its stats. +// However, timed out nodes are included in the response’s `_nodes.failed` +// property. Defaults to no timeout. +// API name: timeout +func (r *Stats) Timeout(duration string) *Stats { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Stats) ErrorTrace(errortrace bool) *Stats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Stats) FilterPath(filterpaths ...string) *Stats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Stats) Human(human bool) *Stats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Stats) Pretty(pretty bool) *Stats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/checkin/check_in.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/checkin/check_in.go new file mode 100644 index 000000000..ca7014fce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/checkin/check_in.go @@ -0,0 +1,350 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Check in a connector. +// +// Update the `last_seen` field in the connector and set it to the current +// timestamp. +package checkin + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CheckIn struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCheckIn type alias for index. +type NewCheckIn func(connectorid string) *CheckIn + +// NewCheckInFunc returns a new instance of CheckIn with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCheckInFunc(tp elastictransport.Interface) NewCheckIn { + return func(connectorid string) *CheckIn { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Check in a connector. +// +// Update the `last_seen` field in the connector and set it to the current +// timestamp. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in +func New(tp elastictransport.Interface) *CheckIn { + r := &CheckIn{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CheckIn) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_check_in") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CheckIn) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.check_in") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.check_in") + if reader := instrument.RecordRequestBody(ctx, "connector.check_in", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.check_in") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CheckIn query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a checkin.Response +func (r CheckIn) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.check_in") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r CheckIn) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.check_in") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the CheckIn query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the CheckIn headers map. +func (r *CheckIn) Header(key, value string) *CheckIn { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be checked in +// API Name: connectorid +func (r *CheckIn) _connectorid(connectorid string) *CheckIn { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CheckIn) ErrorTrace(errortrace bool) *CheckIn { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CheckIn) FilterPath(filterpaths ...string) *CheckIn { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CheckIn) Human(human bool) *CheckIn { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CheckIn) Pretty(pretty bool) *CheckIn { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/checkin/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/checkin/response.go new file mode 100644 index 000000000..1092874ba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/checkin/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package checkin + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package checkin +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/check_in/ConnectorCheckInResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/delete/delete.go new file mode 100644 index 000000000..d781996d6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/delete/delete.go @@ -0,0 +1,371 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a connector. +// +// Removes a connector and associated sync jobs. +// This is a destructive action that is not recoverable. +// NOTE: This action doesn’t delete any API keys, ingest pipelines, or data +// indices associated with the connector. +// These need to be removed manually. +package delete + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Delete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDelete type alias for index. +type NewDelete func(connectorid string) *Delete + +// NewDeleteFunc returns a new instance of Delete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteFunc(tp elastictransport.Interface) NewDelete { + return func(connectorid string) *Delete { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Delete a connector. +// +// Removes a connector and associated sync jobs. +// This is a destructive action that is not recoverable. +// NOTE: This action doesn’t delete any API keys, ingest pipelines, or data +// indices associated with the connector. +// These need to be removed manually. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete +func New(tp elastictransport.Interface) *Delete { + r := &Delete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Delete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Delete) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.delete") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.delete") + if reader := instrument.RecordRequestBody(ctx, "connector.delete", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.delete") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Delete query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a delete.Response +func (r Delete) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Delete) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Delete query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Delete headers map. +func (r *Delete) Header(key, value string) *Delete { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be deleted +// API Name: connectorid +func (r *Delete) _connectorid(connectorid string) *Delete { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// DeleteSyncJobs A flag indicating if associated sync jobs should be also removed. Defaults to +// false. +// API name: delete_sync_jobs +func (r *Delete) DeleteSyncJobs(deletesyncjobs bool) *Delete { + r.values.Set("delete_sync_jobs", strconv.FormatBool(deletesyncjobs)) + + return r +} + +// Hard A flag indicating if the connector should be hard deleted. +// API name: hard +func (r *Delete) Hard(hard bool) *Delete { + r.values.Set("hard", strconv.FormatBool(hard)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Delete) ErrorTrace(errortrace bool) *Delete { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Delete) FilterPath(filterpaths ...string) *Delete { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Delete) Human(human bool) *Delete { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Delete) Pretty(pretty bool) *Delete { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/delete/response.go new file mode 100644 index 000000000..749748e53 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/delete/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package delete + +// Response holds the response body struct for the package delete +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/delete/ConnectorDeleteResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/get/get.go new file mode 100644 index 000000000..aad8e974f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/get/get.go @@ -0,0 +1,355 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get a connector. +// +// Get the details about a connector. +package get + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Get struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGet type alias for index. +type NewGet func(connectorid string) *Get + +// NewGetFunc returns a new instance of Get with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFunc(tp elastictransport.Interface) NewGet { + return func(connectorid string) *Get { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Get a connector. +// +// Get the details about a connector. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get +func New(tp elastictransport.Interface) *Get { + r := &Get{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Get) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Get) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.get") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.get") + if reader := instrument.RecordRequestBody(ctx, "connector.get", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.get") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Get query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a get.Response +func (r Get) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Get) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Get query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Get headers map. +func (r *Get) Header(key, value string) *Get { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector +// API Name: connectorid +func (r *Get) _connectorid(connectorid string) *Get { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// IncludeDeleted A flag to indicate if the desired connector should be fetched, even if it was +// soft-deleted. +// API name: include_deleted +func (r *Get) IncludeDeleted(includedeleted bool) *Get { + r.values.Set("include_deleted", strconv.FormatBool(includedeleted)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Get) ErrorTrace(errortrace bool) *Get { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Get) FilterPath(filterpaths ...string) *Get { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Get) Human(human bool) *Get { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Get) Pretty(pretty bool) *Get { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/get/response.go new file mode 100644 index 000000000..3253b21df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/get/response.go @@ -0,0 +1,361 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package get + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectorstatus" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncstatus" +) + +// Response holds the response body struct for the package get +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/get/ConnectorGetResponse.ts#L22-L25 +type Response struct { + ApiKeyId *string `json:"api_key_id,omitempty"` + ApiKeySecretId *string `json:"api_key_secret_id,omitempty"` + Configuration types.ConnectorConfiguration `json:"configuration"` + CustomScheduling types.ConnectorCustomScheduling `json:"custom_scheduling"` + Deleted bool `json:"deleted"` + Description *string `json:"description,omitempty"` + Error *string `json:"error,omitempty"` + Features *types.ConnectorFeatures `json:"features,omitempty"` + Filtering []types.FilteringConfig `json:"filtering"` + Id *string `json:"id,omitempty"` + IndexName *string `json:"index_name,omitempty"` + IsNative bool `json:"is_native"` + Language *string `json:"language,omitempty"` + LastAccessControlSyncError *string `json:"last_access_control_sync_error,omitempty"` + LastAccessControlSyncScheduledAt types.DateTime `json:"last_access_control_sync_scheduled_at,omitempty"` + LastAccessControlSyncStatus *syncstatus.SyncStatus `json:"last_access_control_sync_status,omitempty"` + LastDeletedDocumentCount *int64 `json:"last_deleted_document_count,omitempty"` + LastIncrementalSyncScheduledAt types.DateTime `json:"last_incremental_sync_scheduled_at,omitempty"` + LastIndexedDocumentCount *int64 `json:"last_indexed_document_count,omitempty"` + LastSeen types.DateTime `json:"last_seen,omitempty"` + LastSyncError *string `json:"last_sync_error,omitempty"` + LastSyncScheduledAt types.DateTime `json:"last_sync_scheduled_at,omitempty"` + LastSyncStatus *syncstatus.SyncStatus `json:"last_sync_status,omitempty"` + LastSynced types.DateTime `json:"last_synced,omitempty"` + Name *string `json:"name,omitempty"` + Pipeline *types.IngestPipelineParams `json:"pipeline,omitempty"` + Scheduling types.SchedulingConfiguration `json:"scheduling"` + ServiceType *string `json:"service_type,omitempty"` + Status connectorstatus.ConnectorStatus `json:"status"` + SyncCursor json.RawMessage `json:"sync_cursor,omitempty"` + SyncNow bool `json:"sync_now"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKeyId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKeyId = &o + + case "api_key_secret_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKeySecretId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKeySecretId = &o + + case "configuration": + if err := dec.Decode(&s.Configuration); err != nil { + return fmt.Errorf("%s | %w", "Configuration", err) + } + + case "custom_scheduling": + if err := dec.Decode(&s.CustomScheduling); err != nil { + return fmt.Errorf("%s | %w", "CustomScheduling", err) + } + + case "deleted": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deleted", err) + } + s.Deleted = value + case bool: + s.Deleted = v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Error = &o + + case "features": + if err := dec.Decode(&s.Features); err != nil { + return fmt.Errorf("%s | %w", "Features", err) + } + + case "filtering": + if err := dec.Decode(&s.Filtering); err != nil { + return fmt.Errorf("%s | %w", "Filtering", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "index_name": + if err := dec.Decode(&s.IndexName); err != nil { + return fmt.Errorf("%s | %w", "IndexName", err) + } + + case "is_native": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsNative", err) + } + s.IsNative = value + case bool: + s.IsNative = v + } + + case "language": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Language", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Language = &o + + case "last_access_control_sync_error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LastAccessControlSyncError", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LastAccessControlSyncError = &o + + case "last_access_control_sync_scheduled_at": + if err := dec.Decode(&s.LastAccessControlSyncScheduledAt); err != nil { + return fmt.Errorf("%s | %w", "LastAccessControlSyncScheduledAt", err) + } + + case "last_access_control_sync_status": + if err := dec.Decode(&s.LastAccessControlSyncStatus); err != nil { + return fmt.Errorf("%s | %w", "LastAccessControlSyncStatus", err) + } + + case "last_deleted_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LastDeletedDocumentCount", err) + } + s.LastDeletedDocumentCount = &value + case float64: + f := int64(v) + s.LastDeletedDocumentCount = &f + } + + case "last_incremental_sync_scheduled_at": + if err := dec.Decode(&s.LastIncrementalSyncScheduledAt); err != nil { + return fmt.Errorf("%s | %w", "LastIncrementalSyncScheduledAt", err) + } + + case "last_indexed_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LastIndexedDocumentCount", err) + } + s.LastIndexedDocumentCount = &value + case float64: + f := int64(v) + s.LastIndexedDocumentCount = &f + } + + case "last_seen": + if err := dec.Decode(&s.LastSeen); err != nil { + return fmt.Errorf("%s | %w", "LastSeen", err) + } + + case "last_sync_error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LastSyncError", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LastSyncError = &o + + case "last_sync_scheduled_at": + if err := dec.Decode(&s.LastSyncScheduledAt); err != nil { + return fmt.Errorf("%s | %w", "LastSyncScheduledAt", err) + } + + case "last_sync_status": + if err := dec.Decode(&s.LastSyncStatus); err != nil { + return fmt.Errorf("%s | %w", "LastSyncStatus", err) + } + + case "last_synced": + if err := dec.Decode(&s.LastSynced); err != nil { + return fmt.Errorf("%s | %w", "LastSynced", err) + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "pipeline": + if err := dec.Decode(&s.Pipeline); err != nil { + return fmt.Errorf("%s | %w", "Pipeline", err) + } + + case "scheduling": + if err := dec.Decode(&s.Scheduling); err != nil { + return fmt.Errorf("%s | %w", "Scheduling", err) + } + + case "service_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ServiceType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServiceType = &o + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "sync_cursor": + if err := dec.Decode(&s.SyncCursor); err != nil { + return fmt.Errorf("%s | %w", "SyncCursor", err) + } + + case "sync_now": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SyncNow", err) + } + s.SyncNow = value + case bool: + s.SyncNow = v + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/lastsync/last_sync.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/lastsync/last_sync.go new file mode 100644 index 000000000..701fd692f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/lastsync/last_sync.go @@ -0,0 +1,515 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the connector last sync stats. +// +// Update the fields related to the last sync of a connector. +// This action is used for analytics and monitoring. +package lastsync + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncstatus" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type LastSync struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewLastSync type alias for index. +type NewLastSync func(connectorid string) *LastSync + +// NewLastSyncFunc returns a new instance of LastSync with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewLastSyncFunc(tp elastictransport.Interface) NewLastSync { + return func(connectorid string) *LastSync { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector last sync stats. +// +// Update the fields related to the last sync of a connector. +// This action is used for analytics and monitoring. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-last-sync +func New(tp elastictransport.Interface) *LastSync { + r := &LastSync{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *LastSync) Raw(raw io.Reader) *LastSync { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *LastSync) Request(req *Request) *LastSync { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *LastSync) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for LastSync: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_last_sync") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r LastSync) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.last_sync") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.last_sync") + if reader := instrument.RecordRequestBody(ctx, "connector.last_sync", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.last_sync") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the LastSync query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a lastsync.Response +func (r LastSync) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.last_sync") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the LastSync headers map. +func (r *LastSync) Header(key, value string) *LastSync { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated +// API Name: connectorid +func (r *LastSync) _connectorid(connectorid string) *LastSync { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *LastSync) ErrorTrace(errortrace bool) *LastSync { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *LastSync) FilterPath(filterpaths ...string) *LastSync { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *LastSync) Human(human bool) *LastSync { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *LastSync) Pretty(pretty bool) *LastSync { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: last_access_control_sync_error +func (r *LastSync) LastAccessControlSyncError(lastaccesscontrolsyncerror string) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastAccessControlSyncError = &lastaccesscontrolsyncerror + + return r +} + +// API name: last_access_control_sync_scheduled_at +func (r *LastSync) LastAccessControlSyncScheduledAt(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastAccessControlSyncScheduledAt = *datetime.DateTimeCaster() + + return r +} + +// API name: last_access_control_sync_status +func (r *LastSync) LastAccessControlSyncStatus(lastaccesscontrolsyncstatus syncstatus.SyncStatus) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.LastAccessControlSyncStatus = &lastaccesscontrolsyncstatus + return r +} + +// API name: last_deleted_document_count +func (r *LastSync) LastDeletedDocumentCount(lastdeleteddocumentcount int64) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastDeletedDocumentCount = &lastdeleteddocumentcount + + return r +} + +// API name: last_incremental_sync_scheduled_at +func (r *LastSync) LastIncrementalSyncScheduledAt(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastIncrementalSyncScheduledAt = *datetime.DateTimeCaster() + + return r +} + +// API name: last_indexed_document_count +func (r *LastSync) LastIndexedDocumentCount(lastindexeddocumentcount int64) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastIndexedDocumentCount = &lastindexeddocumentcount + + return r +} + +// API name: last_seen +func (r *LastSync) LastSeen(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastSeen = *datetime.DateTimeCaster() + + return r +} + +// API name: last_sync_error +func (r *LastSync) LastSyncError(lastsyncerror string) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastSyncError = &lastsyncerror + + return r +} + +// API name: last_sync_scheduled_at +func (r *LastSync) LastSyncScheduledAt(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastSyncScheduledAt = *datetime.DateTimeCaster() + + return r +} + +// API name: last_sync_status +func (r *LastSync) LastSyncStatus(lastsyncstatus syncstatus.SyncStatus) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.LastSyncStatus = &lastsyncstatus + return r +} + +// API name: last_synced +func (r *LastSync) LastSynced(datetime types.DateTimeVariant) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastSynced = *datetime.DateTimeCaster() + + return r +} + +// API name: sync_cursor +func (r *LastSync) SyncCursor(synccursor any) *LastSync { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + switch casted := synccursor.(type) { + case json.RawMessage: + r.req.SyncCursor = casted + default: + r.deferred = append(r.deferred, func(request *Request) error { + data, err := json.Marshal(synccursor) + if err != nil { + return err + } + r.req.SyncCursor = data + return nil + }) + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/lastsync/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/lastsync/request.go new file mode 100644 index 000000000..0d8bf15c0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/lastsync/request.go @@ -0,0 +1,183 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package lastsync + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncstatus" +) + +// Request holds the request body struct for the package lastsync +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/last_sync/ConnectorUpdateLastSyncRequest.ts#L26-L66 +type Request struct { + LastAccessControlSyncError *string `json:"last_access_control_sync_error,omitempty"` + LastAccessControlSyncScheduledAt types.DateTime `json:"last_access_control_sync_scheduled_at,omitempty"` + LastAccessControlSyncStatus *syncstatus.SyncStatus `json:"last_access_control_sync_status,omitempty"` + LastDeletedDocumentCount *int64 `json:"last_deleted_document_count,omitempty"` + LastIncrementalSyncScheduledAt types.DateTime `json:"last_incremental_sync_scheduled_at,omitempty"` + LastIndexedDocumentCount *int64 `json:"last_indexed_document_count,omitempty"` + LastSeen types.DateTime `json:"last_seen,omitempty"` + LastSyncError *string `json:"last_sync_error,omitempty"` + LastSyncScheduledAt types.DateTime `json:"last_sync_scheduled_at,omitempty"` + LastSyncStatus *syncstatus.SyncStatus `json:"last_sync_status,omitempty"` + LastSynced types.DateTime `json:"last_synced,omitempty"` + SyncCursor json.RawMessage `json:"sync_cursor,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Lastsync request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "last_access_control_sync_error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LastAccessControlSyncError", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LastAccessControlSyncError = &o + + case "last_access_control_sync_scheduled_at": + if err := dec.Decode(&s.LastAccessControlSyncScheduledAt); err != nil { + return fmt.Errorf("%s | %w", "LastAccessControlSyncScheduledAt", err) + } + + case "last_access_control_sync_status": + if err := dec.Decode(&s.LastAccessControlSyncStatus); err != nil { + return fmt.Errorf("%s | %w", "LastAccessControlSyncStatus", err) + } + + case "last_deleted_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LastDeletedDocumentCount", err) + } + s.LastDeletedDocumentCount = &value + case float64: + f := int64(v) + s.LastDeletedDocumentCount = &f + } + + case "last_incremental_sync_scheduled_at": + if err := dec.Decode(&s.LastIncrementalSyncScheduledAt); err != nil { + return fmt.Errorf("%s | %w", "LastIncrementalSyncScheduledAt", err) + } + + case "last_indexed_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LastIndexedDocumentCount", err) + } + s.LastIndexedDocumentCount = &value + case float64: + f := int64(v) + s.LastIndexedDocumentCount = &f + } + + case "last_seen": + if err := dec.Decode(&s.LastSeen); err != nil { + return fmt.Errorf("%s | %w", "LastSeen", err) + } + + case "last_sync_error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LastSyncError", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LastSyncError = &o + + case "last_sync_scheduled_at": + if err := dec.Decode(&s.LastSyncScheduledAt); err != nil { + return fmt.Errorf("%s | %w", "LastSyncScheduledAt", err) + } + + case "last_sync_status": + if err := dec.Decode(&s.LastSyncStatus); err != nil { + return fmt.Errorf("%s | %w", "LastSyncStatus", err) + } + + case "last_synced": + if err := dec.Decode(&s.LastSynced); err != nil { + return fmt.Errorf("%s | %w", "LastSynced", err) + } + + case "sync_cursor": + if err := dec.Decode(&s.SyncCursor); err != nil { + return fmt.Errorf("%s | %w", "SyncCursor", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/lastsync/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/lastsync/response.go new file mode 100644 index 000000000..1d62ebae0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/lastsync/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package lastsync + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package lastsync +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/last_sync/ConnectorUpdateLastSyncResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/list/list.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/list/list.go new file mode 100644 index 000000000..bd103ee7b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/list/list.go @@ -0,0 +1,383 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get all connectors. +// +// Get information about all connectors. +package list + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type List struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewList type alias for index. +type NewList func() *List + +// NewListFunc returns a new instance of List with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewListFunc(tp elastictransport.Interface) NewList { + return func() *List { + n := New(tp) + + return n + } +} + +// Get all connectors. +// +// Get information about all connectors. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list +func New(tp elastictransport.Interface) *List { + r := &List{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *List) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_connector") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r List) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.list") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.list") + if reader := instrument.RecordRequestBody(ctx, "connector.list", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.list") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the List query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a list.Response +func (r List) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.list") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r List) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.list") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the List query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the List headers map. +func (r *List) Header(key, value string) *List { + r.headers.Set(key, value) + + return r +} + +// From Starting offset (default: 0) +// API name: from +func (r *List) From(from int) *List { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size Specifies a max number of results to get +// API name: size +func (r *List) Size(size int) *List { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// IndexName A comma-separated list of connector index names to fetch connector documents +// for +// API name: index_name +func (r *List) IndexName(indices ...string) *List { + r.values.Set("index_name", strings.Join(indices, ",")) + + return r +} + +// ConnectorName A comma-separated list of connector names to fetch connector documents for +// API name: connector_name +func (r *List) ConnectorName(names ...string) *List { + r.values.Set("connector_name", strings.Join(names, ",")) + + return r +} + +// ServiceType A comma-separated list of connector service types to fetch connector +// documents for +// API name: service_type +func (r *List) ServiceType(names ...string) *List { + r.values.Set("service_type", strings.Join(names, ",")) + + return r +} + +// IncludeDeleted A flag to indicate if the desired connector should be fetched, even if it was +// soft-deleted. +// API name: include_deleted +func (r *List) IncludeDeleted(includedeleted bool) *List { + r.values.Set("include_deleted", strconv.FormatBool(includedeleted)) + + return r +} + +// Query A wildcard query string that filters connectors with matching name, +// description or index name +// API name: query +func (r *List) Query(query string) *List { + r.values.Set("query", query) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *List) ErrorTrace(errortrace bool) *List { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *List) FilterPath(filterpaths ...string) *List { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *List) Human(human bool) *List { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *List) Pretty(pretty bool) *List { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/list/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/list/response.go new file mode 100644 index 000000000..853620bce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/list/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package list + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package list +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/list/ConnectorListResponse.ts#L23-L28 +type Response struct { + Count int64 `json:"count"` + Results []types.Connector `json:"results"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/post/post.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/post/post.go new file mode 100644 index 000000000..3e8c97e7e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/post/post.go @@ -0,0 +1,421 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a connector. +// +// Connectors are Elasticsearch integrations that bring content from third-party +// data sources, which can be deployed on Elastic Cloud or hosted on your own +// infrastructure. +// Elastic managed connectors (Native connectors) are a managed service on +// Elastic Cloud. +// Self-managed connectors (Connector clients) are self-managed on your +// infrastructure. +package post + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Post struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPost type alias for index. +type NewPost func() *Post + +// NewPostFunc returns a new instance of Post with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPostFunc(tp elastictransport.Interface) NewPost { + return func() *Post { + n := New(tp) + + return n + } +} + +// Create a connector. +// +// Connectors are Elasticsearch integrations that bring content from third-party +// data sources, which can be deployed on Elastic Cloud or hosted on your own +// infrastructure. +// Elastic managed connectors (Native connectors) are a managed service on +// Elastic Cloud. +// Self-managed connectors (Connector clients) are self-managed on your +// infrastructure. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put +func New(tp elastictransport.Interface) *Post { + r := &Post{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Post) Raw(raw io.Reader) *Post { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Post) Request(req *Request) *Post { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Post) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Post: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_connector") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Post) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.post") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.post") + if reader := instrument.RecordRequestBody(ctx, "connector.post", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.post") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Post query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a post.Response +func (r Post) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.post") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Post headers map. +func (r *Post) Header(key, value string) *Post { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Post) ErrorTrace(errortrace bool) *Post { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Post) FilterPath(filterpaths ...string) *Post { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Post) Human(human bool) *Post { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Post) Pretty(pretty bool) *Post { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: description +func (r *Post) Description(description string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// API name: index_name +func (r *Post) IndexName(indexname string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexName = &indexname + + return r +} + +// API name: is_native +func (r *Post) IsNative(isnative bool) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IsNative = &isnative + + return r +} + +// API name: language +func (r *Post) Language(language string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Language = &language + + return r +} + +// API name: name +func (r *Post) Name(name string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Name = &name + + return r +} + +// API name: service_type +func (r *Post) ServiceType(servicetype string) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceType = &servicetype + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/post/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/post/request.go new file mode 100644 index 000000000..6bcb0853b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/post/request.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package post + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Request holds the request body struct for the package post +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/post/ConnectorPostRequest.ts#L22-L52 +type Request struct { + Description *string `json:"description,omitempty"` + IndexName *string `json:"index_name,omitempty"` + IsNative *bool `json:"is_native,omitempty"` + Language *string `json:"language,omitempty"` + Name *string `json:"name,omitempty"` + ServiceType *string `json:"service_type,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Post request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "index_name": + if err := dec.Decode(&s.IndexName); err != nil { + return fmt.Errorf("%s | %w", "IndexName", err) + } + + case "is_native": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsNative", err) + } + s.IsNative = &value + case bool: + s.IsNative = &v + } + + case "language": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Language", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Language = &o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "service_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ServiceType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServiceType = &o + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/post/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/post/response.go new file mode 100644 index 000000000..663443b30 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/post/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package post + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package post +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/post/ConnectorPostResponse.ts#L23-L28 +type Response struct { + Id string `json:"id"` + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/put/put.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/put/put.go new file mode 100644 index 000000000..758687f42 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/put/put.go @@ -0,0 +1,432 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a connector. +package put + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Put struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPut type alias for index. +type NewPut func() *Put + +// NewPutFunc returns a new instance of Put with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutFunc(tp elastictransport.Interface) NewPut { + return func() *Put { + n := New(tp) + + return n + } +} + +// Create or update a connector. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put +func New(tp elastictransport.Interface) *Put { + r := &Put{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Put) Raw(raw io.Reader) *Put { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Put) Request(req *Request) *Put { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Put) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Put: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + + method = http.MethodPut + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_connector") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Put) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.put") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.put") + if reader := instrument.RecordRequestBody(ctx, "connector.put", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.put") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Put query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a put.Response +func (r Put) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.put") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Put headers map. +func (r *Put) Header(key, value string) *Put { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be created or updated. ID is +// auto-generated if not provided. +// API Name: connectorid +func (r *Put) ConnectorId(connectorid string) *Put { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Put) ErrorTrace(errortrace bool) *Put { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Put) FilterPath(filterpaths ...string) *Put { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Put) Human(human bool) *Put { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Put) Pretty(pretty bool) *Put { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: description +func (r *Put) Description(description string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// API name: index_name +func (r *Put) IndexName(indexname string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexName = &indexname + + return r +} + +// API name: is_native +func (r *Put) IsNative(isnative bool) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IsNative = &isnative + + return r +} + +// API name: language +func (r *Put) Language(language string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Language = &language + + return r +} + +// API name: name +func (r *Put) Name(name string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Name = &name + + return r +} + +// API name: service_type +func (r *Put) ServiceType(servicetype string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceType = &servicetype + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/put/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/put/request.go new file mode 100644 index 000000000..b27c8287d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/put/request.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package put + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Request holds the request body struct for the package put +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/put/ConnectorPutRequest.ts#L22-L58 +type Request struct { + Description *string `json:"description,omitempty"` + IndexName *string `json:"index_name,omitempty"` + IsNative *bool `json:"is_native,omitempty"` + Language *string `json:"language,omitempty"` + Name *string `json:"name,omitempty"` + ServiceType *string `json:"service_type,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Put request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "index_name": + if err := dec.Decode(&s.IndexName); err != nil { + return fmt.Errorf("%s | %w", "IndexName", err) + } + + case "is_native": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsNative", err) + } + s.IsNative = &value + case bool: + s.IsNative = &v + } + + case "language": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Language", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Language = &o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "service_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ServiceType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServiceType = &o + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/put/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/put/response.go new file mode 100644 index 000000000..3e7b012c4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/put/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package put + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package put +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/put/ConnectorPutResponse.ts#L23-L28 +type Response struct { + Id string `json:"id"` + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/secretpost/secret_post.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/secretpost/secret_post.go new file mode 100644 index 000000000..661674af0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/secretpost/secret_post.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Creates a secret for a Connector. +package secretpost + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SecretPost struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSecretPost type alias for index. +type NewSecretPost func() *SecretPost + +// NewSecretPostFunc returns a new instance of SecretPost with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSecretPostFunc(tp elastictransport.Interface) NewSecretPost { + return func() *SecretPost { + n := New(tp) + + return n + } +} + +// Creates a secret for a Connector. +func New(tp elastictransport.Interface) *SecretPost { + r := &SecretPost{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SecretPost) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_secret") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SecretPost) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.secret_post") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.secret_post") + if reader := instrument.RecordRequestBody(ctx, "connector.secret_post", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.secret_post") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SecretPost query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a secretpost.Response +func (r SecretPost) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r SecretPost) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.secret_post") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the SecretPost query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the SecretPost headers map. +func (r *SecretPost) Header(key, value string) *SecretPost { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcancel/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcancel/response.go new file mode 100644 index 000000000..7d0bbbf8f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcancel/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package syncjobcancel + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package syncjobcancel +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/sync_job_cancel/SyncJobCancelResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcancel/sync_job_cancel.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcancel/sync_job_cancel.go new file mode 100644 index 000000000..8b06d48a7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcancel/sync_job_cancel.go @@ -0,0 +1,356 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Cancel a connector sync job. +// +// Cancel a connector sync job, which sets the status to cancelling and updates +// `cancellation_requested_at` to the current time. +// The connector service is then responsible for setting the status of connector +// sync jobs to cancelled. +package syncjobcancel + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobCancel struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobCancel type alias for index. +type NewSyncJobCancel func(connectorsyncjobid string) *SyncJobCancel + +// NewSyncJobCancelFunc returns a new instance of SyncJobCancel with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobCancelFunc(tp elastictransport.Interface) NewSyncJobCancel { + return func(connectorsyncjobid string) *SyncJobCancel { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Cancel a connector sync job. +// +// Cancel a connector sync job, which sets the status to cancelling and updates +// `cancellation_requested_at` to the current time. +// The connector service is then responsible for setting the status of connector +// sync jobs to cancelled. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel +func New(tp elastictransport.Interface) *SyncJobCancel { + r := &SyncJobCancel{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobCancel) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + path.WriteString("/") + path.WriteString("_cancel") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobCancel) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_cancel") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_cancel") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_cancel", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_cancel") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobCancel query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjobcancel.Response +func (r SyncJobCancel) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_cancel") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r SyncJobCancel) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_cancel") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the SyncJobCancel query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the SyncJobCancel headers map. +func (r *SyncJobCancel) Header(key, value string) *SyncJobCancel { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier of the connector sync job +// API Name: connectorsyncjobid +func (r *SyncJobCancel) _connectorsyncjobid(connectorsyncjobid string) *SyncJobCancel { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobCancel) ErrorTrace(errortrace bool) *SyncJobCancel { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobCancel) FilterPath(filterpaths ...string) *SyncJobCancel { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobCancel) Human(human bool) *SyncJobCancel { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobCancel) Pretty(pretty bool) *SyncJobCancel { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcheckin/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcheckin/response.go new file mode 100644 index 000000000..b4d15380c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcheckin/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package syncjobcheckin + +// Response holds the response body struct for the package syncjobcheckin +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/sync_job_check_in/SyncJobCheckInResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcheckin/sync_job_check_in.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcheckin/sync_job_check_in.go new file mode 100644 index 000000000..bbe82d67a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcheckin/sync_job_check_in.go @@ -0,0 +1,360 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Check in a connector sync job. +// Check in a connector sync job and set the `last_seen` field to the current +// time before updating it in the internal index. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package syncjobcheckin + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobCheckIn struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobCheckIn type alias for index. +type NewSyncJobCheckIn func(connectorsyncjobid string) *SyncJobCheckIn + +// NewSyncJobCheckInFunc returns a new instance of SyncJobCheckIn with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobCheckInFunc(tp elastictransport.Interface) NewSyncJobCheckIn { + return func(connectorsyncjobid string) *SyncJobCheckIn { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Check in a connector sync job. +// Check in a connector sync job and set the `last_seen` field to the current +// time before updating it in the internal index. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in +func New(tp elastictransport.Interface) *SyncJobCheckIn { + r := &SyncJobCheckIn{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobCheckIn) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + path.WriteString("/") + path.WriteString("_check_in") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobCheckIn) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_check_in") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_check_in") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_check_in", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_check_in") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobCheckIn query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjobcheckin.Response +func (r SyncJobCheckIn) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_check_in") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r SyncJobCheckIn) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_check_in") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the SyncJobCheckIn query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the SyncJobCheckIn headers map. +func (r *SyncJobCheckIn) Header(key, value string) *SyncJobCheckIn { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier of the connector sync job to be checked in. +// API Name: connectorsyncjobid +func (r *SyncJobCheckIn) _connectorsyncjobid(connectorsyncjobid string) *SyncJobCheckIn { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobCheckIn) ErrorTrace(errortrace bool) *SyncJobCheckIn { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobCheckIn) FilterPath(filterpaths ...string) *SyncJobCheckIn { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobCheckIn) Human(human bool) *SyncJobCheckIn { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobCheckIn) Pretty(pretty bool) *SyncJobCheckIn { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobclaim/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobclaim/request.go new file mode 100644 index 000000000..c4273f239 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobclaim/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package syncjobclaim + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package syncjobclaim +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/sync_job_claim/SyncJobClaimRequest.ts#L23-L61 +type Request struct { + + // SyncCursor The cursor object from the last incremental sync job. + // This should reference the `sync_cursor` field in the connector state for + // which the job runs. + SyncCursor json.RawMessage `json:"sync_cursor,omitempty"` + // WorkerHostname The host name of the current system that will run the job. + WorkerHostname string `json:"worker_hostname"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Syncjobclaim request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobclaim/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobclaim/response.go new file mode 100644 index 000000000..87369f542 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobclaim/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package syncjobclaim + +// Response holds the response body struct for the package syncjobclaim +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/sync_job_claim/SyncJobClaimResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobclaim/sync_job_claim.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobclaim/sync_job_claim.go new file mode 100644 index 000000000..20e10f0f9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobclaim/sync_job_claim.go @@ -0,0 +1,422 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Claim a connector sync job. +// This action updates the job status to `in_progress` and sets the `last_seen` +// and `started_at` timestamps to the current time. +// Additionally, it can set the `sync_cursor` property for the sync job. +// +// This API is not intended for direct connector management by users. +// It supports the implementation of services that utilize the connector +// protocol to communicate with Elasticsearch. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package syncjobclaim + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobClaim struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobClaim type alias for index. +type NewSyncJobClaim func(connectorsyncjobid string) *SyncJobClaim + +// NewSyncJobClaimFunc returns a new instance of SyncJobClaim with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobClaimFunc(tp elastictransport.Interface) NewSyncJobClaim { + return func(connectorsyncjobid string) *SyncJobClaim { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Claim a connector sync job. +// This action updates the job status to `in_progress` and sets the `last_seen` +// and `started_at` timestamps to the current time. +// Additionally, it can set the `sync_cursor` property for the sync job. +// +// This API is not intended for direct connector management by users. +// It supports the implementation of services that utilize the connector +// protocol to communicate with Elasticsearch. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim +func New(tp elastictransport.Interface) *SyncJobClaim { + r := &SyncJobClaim{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SyncJobClaim) Raw(raw io.Reader) *SyncJobClaim { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SyncJobClaim) Request(req *Request) *SyncJobClaim { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobClaim) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SyncJobClaim: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + path.WriteString("/") + path.WriteString("_claim") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobClaim) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_claim") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_claim") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_claim", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_claim") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobClaim query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjobclaim.Response +func (r SyncJobClaim) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_claim") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SyncJobClaim headers map. +func (r *SyncJobClaim) Header(key, value string) *SyncJobClaim { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier of the connector sync job. +// API Name: connectorsyncjobid +func (r *SyncJobClaim) _connectorsyncjobid(connectorsyncjobid string) *SyncJobClaim { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobClaim) ErrorTrace(errortrace bool) *SyncJobClaim { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobClaim) FilterPath(filterpaths ...string) *SyncJobClaim { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobClaim) Human(human bool) *SyncJobClaim { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobClaim) Pretty(pretty bool) *SyncJobClaim { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The cursor object from the last incremental sync job. +// This should reference the `sync_cursor` field in the connector state for +// which the job runs. +// API name: sync_cursor +func (r *SyncJobClaim) SyncCursor(synccursor any) *SyncJobClaim { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + switch casted := synccursor.(type) { + case json.RawMessage: + r.req.SyncCursor = casted + default: + r.deferred = append(r.deferred, func(request *Request) error { + data, err := json.Marshal(synccursor) + if err != nil { + return err + } + r.req.SyncCursor = data + return nil + }) + } + return r +} + +// The host name of the current system that will run the job. +// API name: worker_hostname +func (r *SyncJobClaim) WorkerHostname(workerhostname string) *SyncJobClaim { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.WorkerHostname = workerhostname + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobdelete/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobdelete/response.go new file mode 100644 index 000000000..077ed7e40 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobdelete/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package syncjobdelete + +// Response holds the response body struct for the package syncjobdelete +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/sync_job_delete/SyncJobDeleteResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobdelete/sync_job_delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobdelete/sync_job_delete.go new file mode 100644 index 000000000..c6d0e1cda --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobdelete/sync_job_delete.go @@ -0,0 +1,350 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a connector sync job. +// +// Remove a connector sync job and its associated data. +// This is a destructive action that is not recoverable. +package syncjobdelete + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobDelete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobDelete type alias for index. +type NewSyncJobDelete func(connectorsyncjobid string) *SyncJobDelete + +// NewSyncJobDeleteFunc returns a new instance of SyncJobDelete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobDeleteFunc(tp elastictransport.Interface) NewSyncJobDelete { + return func(connectorsyncjobid string) *SyncJobDelete { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Delete a connector sync job. +// +// Remove a connector sync job and its associated data. +// This is a destructive action that is not recoverable. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete +func New(tp elastictransport.Interface) *SyncJobDelete { + r := &SyncJobDelete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobDelete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobDelete) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_delete") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_delete") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_delete", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_delete") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobDelete query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjobdelete.Response +func (r SyncJobDelete) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r SyncJobDelete) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the SyncJobDelete query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the SyncJobDelete headers map. +func (r *SyncJobDelete) Header(key, value string) *SyncJobDelete { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier of the connector sync job to be deleted +// API Name: connectorsyncjobid +func (r *SyncJobDelete) _connectorsyncjobid(connectorsyncjobid string) *SyncJobDelete { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobDelete) ErrorTrace(errortrace bool) *SyncJobDelete { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobDelete) FilterPath(filterpaths ...string) *SyncJobDelete { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobDelete) Human(human bool) *SyncJobDelete { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobDelete) Pretty(pretty bool) *SyncJobDelete { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoberror/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoberror/request.go new file mode 100644 index 000000000..8b1dbad69 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoberror/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package syncjoberror + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package syncjoberror +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/sync_job_error/SyncJobErrorRequest.ts#L23-L52 +type Request struct { + + // Error The error for the connector sync job error field. + Error string `json:"error"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Syncjoberror request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoberror/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoberror/response.go new file mode 100644 index 000000000..ab9888a89 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoberror/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package syncjoberror + +// Response holds the response body struct for the package syncjoberror +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/sync_job_error/SyncJobErrorResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoberror/sync_job_error.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoberror/sync_job_error.go new file mode 100644 index 000000000..c4bab0375 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoberror/sync_job_error.go @@ -0,0 +1,387 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Set a connector sync job error. +// Set the `error` field for a connector sync job and set its `status` to +// `error`. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package syncjoberror + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobError struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobError type alias for index. +type NewSyncJobError func(connectorsyncjobid string) *SyncJobError + +// NewSyncJobErrorFunc returns a new instance of SyncJobError with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobErrorFunc(tp elastictransport.Interface) NewSyncJobError { + return func(connectorsyncjobid string) *SyncJobError { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Set a connector sync job error. +// Set the `error` field for a connector sync job and set its `status` to +// `error`. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error +func New(tp elastictransport.Interface) *SyncJobError { + r := &SyncJobError{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SyncJobError) Raw(raw io.Reader) *SyncJobError { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SyncJobError) Request(req *Request) *SyncJobError { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobError) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SyncJobError: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + path.WriteString("/") + path.WriteString("_error") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobError) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_error") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_error") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_error", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_error") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobError query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjoberror.Response +func (r SyncJobError) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_error") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SyncJobError headers map. +func (r *SyncJobError) Header(key, value string) *SyncJobError { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier for the connector sync job. +// API Name: connectorsyncjobid +func (r *SyncJobError) _connectorsyncjobid(connectorsyncjobid string) *SyncJobError { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobError) ErrorTrace(errortrace bool) *SyncJobError { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobError) FilterPath(filterpaths ...string) *SyncJobError { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobError) Human(human bool) *SyncJobError { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobError) Pretty(pretty bool) *SyncJobError { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The error for the connector sync job error field. +// API name: error +func (r *SyncJobError) Error(error string) *SyncJobError { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Error = error + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobget/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobget/response.go new file mode 100644 index 000000000..c3cf27afa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobget/response.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package syncjobget + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtriggermethod" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncstatus" +) + +// Response holds the response body struct for the package syncjobget +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/sync_job_get/SyncJobGetResponse.ts#L22-L25 +type Response struct { + CancelationRequestedAt types.DateTime `json:"cancelation_requested_at,omitempty"` + CanceledAt types.DateTime `json:"canceled_at,omitempty"` + CompletedAt types.DateTime `json:"completed_at,omitempty"` + Connector types.SyncJobConnectorReference `json:"connector"` + CreatedAt types.DateTime `json:"created_at"` + DeletedDocumentCount int64 `json:"deleted_document_count"` + Error *string `json:"error,omitempty"` + Id string `json:"id"` + IndexedDocumentCount int64 `json:"indexed_document_count"` + IndexedDocumentVolume int64 `json:"indexed_document_volume"` + JobType syncjobtype.SyncJobType `json:"job_type"` + LastSeen types.DateTime `json:"last_seen,omitempty"` + Metadata map[string]json.RawMessage `json:"metadata"` + StartedAt types.DateTime `json:"started_at,omitempty"` + Status syncstatus.SyncStatus `json:"status"` + TotalDocumentCount int64 `json:"total_document_count"` + TriggerMethod syncjobtriggermethod.SyncJobTriggerMethod `json:"trigger_method"` + WorkerHostname *string `json:"worker_hostname,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Metadata: make(map[string]json.RawMessage, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cancelation_requested_at": + if err := dec.Decode(&s.CancelationRequestedAt); err != nil { + return fmt.Errorf("%s | %w", "CancelationRequestedAt", err) + } + + case "canceled_at": + if err := dec.Decode(&s.CanceledAt); err != nil { + return fmt.Errorf("%s | %w", "CanceledAt", err) + } + + case "completed_at": + if err := dec.Decode(&s.CompletedAt); err != nil { + return fmt.Errorf("%s | %w", "CompletedAt", err) + } + + case "connector": + if err := dec.Decode(&s.Connector); err != nil { + return fmt.Errorf("%s | %w", "Connector", err) + } + + case "created_at": + if err := dec.Decode(&s.CreatedAt); err != nil { + return fmt.Errorf("%s | %w", "CreatedAt", err) + } + + case "deleted_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DeletedDocumentCount", err) + } + s.DeletedDocumentCount = value + case float64: + f := int64(v) + s.DeletedDocumentCount = f + } + + case "error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Error = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "indexed_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexedDocumentCount", err) + } + s.IndexedDocumentCount = value + case float64: + f := int64(v) + s.IndexedDocumentCount = f + } + + case "indexed_document_volume": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexedDocumentVolume", err) + } + s.IndexedDocumentVolume = value + case float64: + f := int64(v) + s.IndexedDocumentVolume = f + } + + case "job_type": + if err := dec.Decode(&s.JobType); err != nil { + return fmt.Errorf("%s | %w", "JobType", err) + } + + case "last_seen": + if err := dec.Decode(&s.LastSeen); err != nil { + return fmt.Errorf("%s | %w", "LastSeen", err) + } + + case "metadata": + if s.Metadata == nil { + s.Metadata = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "started_at": + if err := dec.Decode(&s.StartedAt); err != nil { + return fmt.Errorf("%s | %w", "StartedAt", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "total_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalDocumentCount", err) + } + s.TotalDocumentCount = value + case float64: + f := int64(v) + s.TotalDocumentCount = f + } + + case "trigger_method": + if err := dec.Decode(&s.TriggerMethod); err != nil { + return fmt.Errorf("%s | %w", "TriggerMethod", err) + } + + case "worker_hostname": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "WorkerHostname", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WorkerHostname = &o + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobget/sync_job_get.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobget/sync_job_get.go new file mode 100644 index 000000000..98c333a2c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobget/sync_job_get.go @@ -0,0 +1,344 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get a connector sync job. +package syncjobget + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobGet struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobGet type alias for index. +type NewSyncJobGet func(connectorsyncjobid string) *SyncJobGet + +// NewSyncJobGetFunc returns a new instance of SyncJobGet with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobGetFunc(tp elastictransport.Interface) NewSyncJobGet { + return func(connectorsyncjobid string) *SyncJobGet { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Get a connector sync job. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get +func New(tp elastictransport.Interface) *SyncJobGet { + r := &SyncJobGet{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobGet) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobGet) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_get") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_get") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_get", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_get") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobGet query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjobget.Response +func (r SyncJobGet) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r SyncJobGet) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the SyncJobGet query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the SyncJobGet headers map. +func (r *SyncJobGet) Header(key, value string) *SyncJobGet { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier of the connector sync job +// API Name: connectorsyncjobid +func (r *SyncJobGet) _connectorsyncjobid(connectorsyncjobid string) *SyncJobGet { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobGet) ErrorTrace(errortrace bool) *SyncJobGet { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobGet) FilterPath(filterpaths ...string) *SyncJobGet { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobGet) Human(human bool) *SyncJobGet { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobGet) Pretty(pretty bool) *SyncJobGet { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoblist/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoblist/response.go new file mode 100644 index 000000000..f88a688e7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoblist/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package syncjoblist + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package syncjoblist +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/sync_job_list/SyncJobListResponse.ts#L23-L28 +type Response struct { + Count int64 `json:"count"` + Results []types.ConnectorSyncJob `json:"results"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoblist/sync_job_list.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoblist/sync_job_list.go new file mode 100644 index 000000000..d9c40100f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoblist/sync_job_list.go @@ -0,0 +1,373 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get all connector sync jobs. +// +// Get information about all stored connector sync jobs listed by their creation +// date in ascending order. +package syncjoblist + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncstatus" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobList struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobList type alias for index. +type NewSyncJobList func() *SyncJobList + +// NewSyncJobListFunc returns a new instance of SyncJobList with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobListFunc(tp elastictransport.Interface) NewSyncJobList { + return func() *SyncJobList { + n := New(tp) + + return n + } +} + +// Get all connector sync jobs. +// +// Get information about all stored connector sync jobs listed by their creation +// date in ascending order. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list +func New(tp elastictransport.Interface) *SyncJobList { + r := &SyncJobList{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobList) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobList) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_list") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_list") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_list", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_list") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobList query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjoblist.Response +func (r SyncJobList) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_list") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r SyncJobList) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_list") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the SyncJobList query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the SyncJobList headers map. +func (r *SyncJobList) Header(key, value string) *SyncJobList { + r.headers.Set(key, value) + + return r +} + +// From Starting offset (default: 0) +// API name: from +func (r *SyncJobList) From(from int) *SyncJobList { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size Specifies a max number of results to get +// API name: size +func (r *SyncJobList) Size(size int) *SyncJobList { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// Status A sync job status to fetch connector sync jobs for +// API name: status +func (r *SyncJobList) Status(status syncstatus.SyncStatus) *SyncJobList { + r.values.Set("status", status.String()) + + return r +} + +// ConnectorId A connector id to fetch connector sync jobs for +// API name: connector_id +func (r *SyncJobList) ConnectorId(id string) *SyncJobList { + r.values.Set("connector_id", id) + + return r +} + +// JobType A comma-separated list of job types to fetch the sync jobs for +// API name: job_type +func (r *SyncJobList) JobType(jobtypes ...syncjobtype.SyncJobType) *SyncJobList { + tmp := []string{} + for _, item := range jobtypes { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("job_type", strings.Join(tmp, ",")) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobList) ErrorTrace(errortrace bool) *SyncJobList { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobList) FilterPath(filterpaths ...string) *SyncJobList { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobList) Human(human bool) *SyncJobList { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobList) Pretty(pretty bool) *SyncJobList { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobpost/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobpost/request.go new file mode 100644 index 000000000..b0c4c46f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobpost/request.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package syncjobpost + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtriggermethod" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtype" +) + +// Request holds the request body struct for the package syncjobpost +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/sync_job_post/SyncJobPostRequest.ts#L23-L51 +type Request struct { + + // Id The id of the associated connector + Id string `json:"id"` + JobType *syncjobtype.SyncJobType `json:"job_type,omitempty"` + TriggerMethod *syncjobtriggermethod.SyncJobTriggerMethod `json:"trigger_method,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Syncjobpost request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "job_type": + if err := dec.Decode(&s.JobType); err != nil { + return fmt.Errorf("%s | %w", "JobType", err) + } + + case "trigger_method": + if err := dec.Decode(&s.TriggerMethod); err != nil { + return fmt.Errorf("%s | %w", "TriggerMethod", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobpost/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobpost/response.go new file mode 100644 index 000000000..e5b56d599 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobpost/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package syncjobpost + +// Response holds the response body struct for the package syncjobpost +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/sync_job_post/SyncJobPostResponse.ts#L22-L26 +type Response struct { + Id string `json:"id"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobpost/sync_job_post.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobpost/sync_job_post.go new file mode 100644 index 000000000..4ec7cd211 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobpost/sync_job_post.go @@ -0,0 +1,376 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a connector sync job. +// +// Create a connector sync job document in the internal index and initialize its +// counters and timestamps with default values. +package syncjobpost + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtriggermethod" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtype" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobPost struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobPost type alias for index. +type NewSyncJobPost func() *SyncJobPost + +// NewSyncJobPostFunc returns a new instance of SyncJobPost with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobPostFunc(tp elastictransport.Interface) NewSyncJobPost { + return func() *SyncJobPost { + n := New(tp) + + return n + } +} + +// Create a connector sync job. +// +// Create a connector sync job document in the internal index and initialize its +// counters and timestamps with default values. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post +func New(tp elastictransport.Interface) *SyncJobPost { + r := &SyncJobPost{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SyncJobPost) Raw(raw io.Reader) *SyncJobPost { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SyncJobPost) Request(req *Request) *SyncJobPost { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobPost) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SyncJobPost: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobPost) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_post") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_post") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_post", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_post") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobPost query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjobpost.Response +func (r SyncJobPost) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_post") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SyncJobPost headers map. +func (r *SyncJobPost) Header(key, value string) *SyncJobPost { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobPost) ErrorTrace(errortrace bool) *SyncJobPost { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobPost) FilterPath(filterpaths ...string) *SyncJobPost { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobPost) Human(human bool) *SyncJobPost { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobPost) Pretty(pretty bool) *SyncJobPost { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The id of the associated connector +// API name: id +func (r *SyncJobPost) Id(id string) *SyncJobPost { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Id = id + + return r +} + +// API name: job_type +func (r *SyncJobPost) JobType(jobtype syncjobtype.SyncJobType) *SyncJobPost { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.JobType = &jobtype + return r +} + +// API name: trigger_method +func (r *SyncJobPost) TriggerMethod(triggermethod syncjobtriggermethod.SyncJobTriggerMethod) *SyncJobPost { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TriggerMethod = &triggermethod + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobupdatestats/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobupdatestats/request.go new file mode 100644 index 000000000..101f66d0f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobupdatestats/request.go @@ -0,0 +1,161 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package syncjobupdatestats + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package syncjobupdatestats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/sync_job_update_stats/SyncJobUpdateStatsRequest.ts#L24-L78 +type Request struct { + + // DeletedDocumentCount The number of documents the sync job deleted. + DeletedDocumentCount int64 `json:"deleted_document_count"` + // IndexedDocumentCount The number of documents the sync job indexed. + IndexedDocumentCount int64 `json:"indexed_document_count"` + // IndexedDocumentVolume The total size of the data (in MiB) the sync job indexed. + IndexedDocumentVolume int64 `json:"indexed_document_volume"` + // LastSeen The timestamp to use in the `last_seen` property for the connector sync job. + LastSeen types.Duration `json:"last_seen,omitempty"` + // Metadata The connector-specific metadata. + Metadata types.Metadata `json:"metadata,omitempty"` + // TotalDocumentCount The total number of documents in the target index after the sync job + // finished. + TotalDocumentCount *int `json:"total_document_count,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Syncjobupdatestats request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deleted_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DeletedDocumentCount", err) + } + s.DeletedDocumentCount = value + case float64: + f := int64(v) + s.DeletedDocumentCount = f + } + + case "indexed_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexedDocumentCount", err) + } + s.IndexedDocumentCount = value + case float64: + f := int64(v) + s.IndexedDocumentCount = f + } + + case "indexed_document_volume": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexedDocumentVolume", err) + } + s.IndexedDocumentVolume = value + case float64: + f := int64(v) + s.IndexedDocumentVolume = f + } + + case "last_seen": + if err := dec.Decode(&s.LastSeen); err != nil { + return fmt.Errorf("%s | %w", "LastSeen", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "total_document_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalDocumentCount", err) + } + s.TotalDocumentCount = &value + case float64: + f := int(v) + s.TotalDocumentCount = &f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobupdatestats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobupdatestats/response.go new file mode 100644 index 000000000..7155cdace --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobupdatestats/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package syncjobupdatestats + +// Response holds the response body struct for the package syncjobupdatestats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/sync_job_update_stats/SyncJobUpdateStatsResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobupdatestats/sync_job_update_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobupdatestats/sync_job_update_stats.go new file mode 100644 index 000000000..f7b994ad4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobupdatestats/sync_job_update_stats.go @@ -0,0 +1,459 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Set the connector sync job stats. +// Stats include: `deleted_document_count`, `indexed_document_count`, +// `indexed_document_volume`, and `total_document_count`. +// You can also update `last_seen`. +// This API is mainly used by the connector service for updating sync job +// information. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package syncjobupdatestats + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectorsyncjobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SyncJobUpdateStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorsyncjobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSyncJobUpdateStats type alias for index. +type NewSyncJobUpdateStats func(connectorsyncjobid string) *SyncJobUpdateStats + +// NewSyncJobUpdateStatsFunc returns a new instance of SyncJobUpdateStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSyncJobUpdateStatsFunc(tp elastictransport.Interface) NewSyncJobUpdateStats { + return func(connectorsyncjobid string) *SyncJobUpdateStats { + n := New(tp) + + n._connectorsyncjobid(connectorsyncjobid) + + return n + } +} + +// Set the connector sync job stats. +// Stats include: `deleted_document_count`, `indexed_document_count`, +// `indexed_document_volume`, and `total_document_count`. +// You can also update `last_seen`. +// This API is mainly used by the connector service for updating sync job +// information. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats +func New(tp elastictransport.Interface) *SyncJobUpdateStats { + r := &SyncJobUpdateStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SyncJobUpdateStats) Raw(raw io.Reader) *SyncJobUpdateStats { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SyncJobUpdateStats) Request(req *Request) *SyncJobUpdateStats { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SyncJobUpdateStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SyncJobUpdateStats: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectorsyncjobidMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + path.WriteString("_sync_job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorsyncjobid", r.connectorsyncjobid) + } + path.WriteString(r.connectorsyncjobid) + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SyncJobUpdateStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.sync_job_update_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.sync_job_update_stats") + if reader := instrument.RecordRequestBody(ctx, "connector.sync_job_update_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.sync_job_update_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SyncJobUpdateStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a syncjobupdatestats.Response +func (r SyncJobUpdateStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.sync_job_update_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SyncJobUpdateStats headers map. +func (r *SyncJobUpdateStats) Header(key, value string) *SyncJobUpdateStats { + r.headers.Set(key, value) + + return r +} + +// ConnectorSyncJobId The unique identifier of the connector sync job. +// API Name: connectorsyncjobid +func (r *SyncJobUpdateStats) _connectorsyncjobid(connectorsyncjobid string) *SyncJobUpdateStats { + r.paramSet |= connectorsyncjobidMask + r.connectorsyncjobid = connectorsyncjobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SyncJobUpdateStats) ErrorTrace(errortrace bool) *SyncJobUpdateStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SyncJobUpdateStats) FilterPath(filterpaths ...string) *SyncJobUpdateStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SyncJobUpdateStats) Human(human bool) *SyncJobUpdateStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SyncJobUpdateStats) Pretty(pretty bool) *SyncJobUpdateStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The number of documents the sync job deleted. +// API name: deleted_document_count +func (r *SyncJobUpdateStats) DeletedDocumentCount(deleteddocumentcount int64) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DeletedDocumentCount = deleteddocumentcount + + return r +} + +// The number of documents the sync job indexed. +// API name: indexed_document_count +func (r *SyncJobUpdateStats) IndexedDocumentCount(indexeddocumentcount int64) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexedDocumentCount = indexeddocumentcount + + return r +} + +// The total size of the data (in MiB) the sync job indexed. +// API name: indexed_document_volume +func (r *SyncJobUpdateStats) IndexedDocumentVolume(indexeddocumentvolume int64) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexedDocumentVolume = indexeddocumentvolume + + return r +} + +// The timestamp to use in the `last_seen` property for the connector sync job. +// API name: last_seen +func (r *SyncJobUpdateStats) LastSeen(duration types.DurationVariant) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastSeen = *duration.DurationCaster() + + return r +} + +// The connector-specific metadata. +// API name: metadata +func (r *SyncJobUpdateStats) Metadata(metadata types.MetadataVariant) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// The total number of documents in the target index after the sync job +// finished. +// API name: total_document_count +func (r *SyncJobUpdateStats) TotalDocumentCount(totaldocumentcount int) *SyncJobUpdateStats { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TotalDocumentCount = &totaldocumentcount + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateactivefiltering/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateactivefiltering/response.go new file mode 100644 index 000000000..fde104bfb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateactivefiltering/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateactivefiltering + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updateactivefiltering +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_active_filtering/ConnectorUpdateActiveFilteringResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateactivefiltering/update_active_filtering.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateactivefiltering/update_active_filtering.go new file mode 100644 index 000000000..2cea1804a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateactivefiltering/update_active_filtering.go @@ -0,0 +1,356 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Activate the connector draft filter. +// +// Activates the valid draft filtering for a connector. +package updateactivefiltering + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateActiveFiltering struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateActiveFiltering type alias for index. +type NewUpdateActiveFiltering func(connectorid string) *UpdateActiveFiltering + +// NewUpdateActiveFilteringFunc returns a new instance of UpdateActiveFiltering with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateActiveFilteringFunc(tp elastictransport.Interface) NewUpdateActiveFiltering { + return func(connectorid string) *UpdateActiveFiltering { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Activate the connector draft filter. +// +// Activates the valid draft filtering for a connector. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering +func New(tp elastictransport.Interface) *UpdateActiveFiltering { + r := &UpdateActiveFiltering{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateActiveFiltering) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_filtering") + path.WriteString("/") + path.WriteString("_activate") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateActiveFiltering) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_active_filtering") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_active_filtering") + if reader := instrument.RecordRequestBody(ctx, "connector.update_active_filtering", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_active_filtering") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateActiveFiltering query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updateactivefiltering.Response +func (r UpdateActiveFiltering) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_active_filtering") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r UpdateActiveFiltering) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_active_filtering") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the UpdateActiveFiltering query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the UpdateActiveFiltering headers map. +func (r *UpdateActiveFiltering) Header(key, value string) *UpdateActiveFiltering { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated +// API Name: connectorid +func (r *UpdateActiveFiltering) _connectorid(connectorid string) *UpdateActiveFiltering { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateActiveFiltering) ErrorTrace(errortrace bool) *UpdateActiveFiltering { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateActiveFiltering) FilterPath(filterpaths ...string) *UpdateActiveFiltering { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateActiveFiltering) Human(human bool) *UpdateActiveFiltering { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateActiveFiltering) Pretty(pretty bool) *UpdateActiveFiltering { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateapikeyid/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateapikeyid/request.go new file mode 100644 index 000000000..768ce1b1a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateapikeyid/request.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateapikeyid + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package updateapikeyid +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_api_key_id/ConnectorUpdateAPIKeyIDRequest.ts#L21-L53 +type Request struct { + ApiKeyId *string `json:"api_key_id,omitempty"` + ApiKeySecretId *string `json:"api_key_secret_id,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updateapikeyid request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateapikeyid/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateapikeyid/response.go new file mode 100644 index 000000000..f4e73a6ff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateapikeyid/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateapikeyid + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updateapikeyid +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_api_key_id/ConnectorUpdateAPIKeyIDResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateapikeyid/update_api_key_id.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateapikeyid/update_api_key_id.go new file mode 100644 index 000000000..a1a17bcbc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateapikeyid/update_api_key_id.go @@ -0,0 +1,396 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the connector API key ID. +// +// Update the `api_key_id` and `api_key_secret_id` fields of a connector. +// You can specify the ID of the API key used for authorization and the ID of +// the connector secret where the API key is stored. +// The connector secret ID is required only for Elastic managed (native) +// connectors. +// Self-managed connectors (connector clients) do not use this field. +package updateapikeyid + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateApiKeyId struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateApiKeyId type alias for index. +type NewUpdateApiKeyId func(connectorid string) *UpdateApiKeyId + +// NewUpdateApiKeyIdFunc returns a new instance of UpdateApiKeyId with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateApiKeyIdFunc(tp elastictransport.Interface) NewUpdateApiKeyId { + return func(connectorid string) *UpdateApiKeyId { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector API key ID. +// +// Update the `api_key_id` and `api_key_secret_id` fields of a connector. +// You can specify the ID of the API key used for authorization and the ID of +// the connector secret where the API key is stored. +// The connector secret ID is required only for Elastic managed (native) +// connectors. +// Self-managed connectors (connector clients) do not use this field. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id +func New(tp elastictransport.Interface) *UpdateApiKeyId { + r := &UpdateApiKeyId{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateApiKeyId) Raw(raw io.Reader) *UpdateApiKeyId { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateApiKeyId) Request(req *Request) *UpdateApiKeyId { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateApiKeyId) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateApiKeyId: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_api_key_id") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateApiKeyId) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_api_key_id") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_api_key_id") + if reader := instrument.RecordRequestBody(ctx, "connector.update_api_key_id", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_api_key_id") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateApiKeyId query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updateapikeyid.Response +func (r UpdateApiKeyId) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_api_key_id") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateApiKeyId headers map. +func (r *UpdateApiKeyId) Header(key, value string) *UpdateApiKeyId { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated +// API Name: connectorid +func (r *UpdateApiKeyId) _connectorid(connectorid string) *UpdateApiKeyId { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateApiKeyId) ErrorTrace(errortrace bool) *UpdateApiKeyId { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateApiKeyId) FilterPath(filterpaths ...string) *UpdateApiKeyId { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateApiKeyId) Human(human bool) *UpdateApiKeyId { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateApiKeyId) Pretty(pretty bool) *UpdateApiKeyId { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: api_key_id +func (r *UpdateApiKeyId) ApiKeyId(apikeyid string) *UpdateApiKeyId { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ApiKeyId = &apikeyid + + return r +} + +// API name: api_key_secret_id +func (r *UpdateApiKeyId) ApiKeySecretId(apikeysecretid string) *UpdateApiKeyId { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ApiKeySecretId = &apikeysecretid + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateconfiguration/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateconfiguration/request.go new file mode 100644 index 000000000..d07915a3d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateconfiguration/request.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateconfiguration + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package updateconfiguration +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_configuration/ConnectorUpdateConfigurationRequest.ts#L25-L55 +type Request struct { + Configuration types.ConnectorConfiguration `json:"configuration,omitempty"` + Values map[string]json.RawMessage `json:"values,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Values: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updateconfiguration request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "configuration": + if err := dec.Decode(&s.Configuration); err != nil { + return fmt.Errorf("%s | %w", "Configuration", err) + } + + case "values": + if s.Values == nil { + s.Values = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Values); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateconfiguration/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateconfiguration/response.go new file mode 100644 index 000000000..689dc869e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateconfiguration/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateconfiguration + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updateconfiguration +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_configuration/ConnectorUpdateConfigurationResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateconfiguration/update_configuration.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateconfiguration/update_configuration.go new file mode 100644 index 000000000..55af8d058 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateconfiguration/update_configuration.go @@ -0,0 +1,403 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the connector configuration. +// +// Update the configuration field in the connector document. +package updateconfiguration + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateConfiguration struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateConfiguration type alias for index. +type NewUpdateConfiguration func(connectorid string) *UpdateConfiguration + +// NewUpdateConfigurationFunc returns a new instance of UpdateConfiguration with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateConfigurationFunc(tp elastictransport.Interface) NewUpdateConfiguration { + return func(connectorid string) *UpdateConfiguration { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector configuration. +// +// Update the configuration field in the connector document. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration +func New(tp elastictransport.Interface) *UpdateConfiguration { + r := &UpdateConfiguration{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateConfiguration) Raw(raw io.Reader) *UpdateConfiguration { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateConfiguration) Request(req *Request) *UpdateConfiguration { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateConfiguration) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateConfiguration: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_configuration") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateConfiguration) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_configuration") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_configuration") + if reader := instrument.RecordRequestBody(ctx, "connector.update_configuration", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_configuration") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateConfiguration query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updateconfiguration.Response +func (r UpdateConfiguration) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_configuration") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateConfiguration headers map. +func (r *UpdateConfiguration) Header(key, value string) *UpdateConfiguration { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated +// API Name: connectorid +func (r *UpdateConfiguration) _connectorid(connectorid string) *UpdateConfiguration { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateConfiguration) ErrorTrace(errortrace bool) *UpdateConfiguration { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateConfiguration) FilterPath(filterpaths ...string) *UpdateConfiguration { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateConfiguration) Human(human bool) *UpdateConfiguration { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateConfiguration) Pretty(pretty bool) *UpdateConfiguration { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: configuration +func (r *UpdateConfiguration) Configuration(connectorconfiguration types.ConnectorConfigurationVariant) *UpdateConfiguration { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Configuration = *connectorconfiguration.ConnectorConfigurationCaster() + + return r +} + +// API name: values +func (r *UpdateConfiguration) Values(values map[string]json.RawMessage) *UpdateConfiguration { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Values = values + return r +} + +func (r *UpdateConfiguration) AddValue(key string, value json.RawMessage) *UpdateConfiguration { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Values == nil { + r.req.Values = make(map[string]json.RawMessage) + } else { + tmp = r.req.Values + } + + tmp[key] = value + + r.req.Values = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateerror/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateerror/request.go new file mode 100644 index 000000000..d96d1578d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateerror/request.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateerror + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package updateerror +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_error/ConnectorUpdateErrorRequest.ts#L23-L54 +type Request struct { + Error any `json:"error"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updateerror request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateerror/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateerror/response.go new file mode 100644 index 000000000..3cb07213d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateerror/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateerror + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updateerror +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_error/ConnectorUpdateErrorResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateerror/update_error.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateerror/update_error.go new file mode 100644 index 000000000..5ef36e0e1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateerror/update_error.go @@ -0,0 +1,382 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the connector error field. +// +// Set the error field for the connector. +// If the error provided in the request body is non-null, the connector’s status +// is updated to error. +// Otherwise, if the error is reset to null, the connector status is updated to +// connected. +package updateerror + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateError struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateError type alias for index. +type NewUpdateError func(connectorid string) *UpdateError + +// NewUpdateErrorFunc returns a new instance of UpdateError with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateErrorFunc(tp elastictransport.Interface) NewUpdateError { + return func(connectorid string) *UpdateError { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector error field. +// +// Set the error field for the connector. +// If the error provided in the request body is non-null, the connector’s status +// is updated to error. +// Otherwise, if the error is reset to null, the connector status is updated to +// connected. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error +func New(tp elastictransport.Interface) *UpdateError { + r := &UpdateError{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateError) Raw(raw io.Reader) *UpdateError { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateError) Request(req *Request) *UpdateError { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateError) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateError: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_error") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateError) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_error") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_error") + if reader := instrument.RecordRequestBody(ctx, "connector.update_error", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_error") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateError query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updateerror.Response +func (r UpdateError) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_error") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateError headers map. +func (r *UpdateError) Header(key, value string) *UpdateError { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated +// API Name: connectorid +func (r *UpdateError) _connectorid(connectorid string) *UpdateError { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateError) ErrorTrace(errortrace bool) *UpdateError { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateError) FilterPath(filterpaths ...string) *UpdateError { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateError) Human(human bool) *UpdateError { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateError) Pretty(pretty bool) *UpdateError { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: error +func (r *UpdateError) Error(error any) *UpdateError { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Error = error + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefeatures/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefeatures/request.go new file mode 100644 index 000000000..4b5f059d9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefeatures/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatefeatures + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package updatefeatures +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_features/ConnectorUpdateFeaturesRequest.ts#L23-L61 +type Request struct { + Features types.ConnectorFeatures `json:"features"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatefeatures request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefeatures/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefeatures/response.go new file mode 100644 index 000000000..4fdaedac5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefeatures/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatefeatures + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updatefeatures +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_features/ConnectorUpdateFeaturesResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefeatures/update_features.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefeatures/update_features.go new file mode 100644 index 000000000..3b424e6c1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefeatures/update_features.go @@ -0,0 +1,400 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the connector features. +// Update the connector features in the connector document. +// This API can be used to control the following aspects of a connector: +// +// * document-level security +// * incremental syncs +// * advanced sync rules +// * basic sync rules +// +// Normally, the running connector service automatically manages these features. +// However, you can use this API to override the default behavior. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +package updatefeatures + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateFeatures struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateFeatures type alias for index. +type NewUpdateFeatures func(connectorid string) *UpdateFeatures + +// NewUpdateFeaturesFunc returns a new instance of UpdateFeatures with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateFeaturesFunc(tp elastictransport.Interface) NewUpdateFeatures { + return func(connectorid string) *UpdateFeatures { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector features. +// Update the connector features in the connector document. +// This API can be used to control the following aspects of a connector: +// +// * document-level security +// * incremental syncs +// * advanced sync rules +// * basic sync rules +// +// Normally, the running connector service automatically manages these features. +// However, you can use this API to override the default behavior. +// +// To sync data using self-managed connectors, you need to deploy the Elastic +// connector service on your own infrastructure. +// This service runs automatically on Elastic Cloud for Elastic managed +// connectors. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features +func New(tp elastictransport.Interface) *UpdateFeatures { + r := &UpdateFeatures{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateFeatures) Raw(raw io.Reader) *UpdateFeatures { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateFeatures) Request(req *Request) *UpdateFeatures { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateFeatures) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateFeatures: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_features") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateFeatures) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_features") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_features") + if reader := instrument.RecordRequestBody(ctx, "connector.update_features", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_features") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateFeatures query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatefeatures.Response +func (r UpdateFeatures) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_features") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateFeatures headers map. +func (r *UpdateFeatures) Header(key, value string) *UpdateFeatures { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated. +// API Name: connectorid +func (r *UpdateFeatures) _connectorid(connectorid string) *UpdateFeatures { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateFeatures) ErrorTrace(errortrace bool) *UpdateFeatures { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateFeatures) FilterPath(filterpaths ...string) *UpdateFeatures { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateFeatures) Human(human bool) *UpdateFeatures { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateFeatures) Pretty(pretty bool) *UpdateFeatures { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: features +func (r *UpdateFeatures) Features(features types.ConnectorFeaturesVariant) *UpdateFeatures { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Features = *features.ConnectorFeaturesCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefiltering/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefiltering/request.go new file mode 100644 index 000000000..d3db92842 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefiltering/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatefiltering + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package updatefiltering +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_filtering/ConnectorUpdateFilteringRequest.ts#L27-L60 +type Request struct { + AdvancedSnippet *types.FilteringAdvancedSnippet `json:"advanced_snippet,omitempty"` + Filtering []types.FilteringConfig `json:"filtering,omitempty"` + Rules []types.FilteringRule `json:"rules,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatefiltering request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefiltering/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefiltering/response.go new file mode 100644 index 000000000..561c5a6db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefiltering/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatefiltering + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updatefiltering +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_filtering/ConnectorUpdateFilteringResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefiltering/update_filtering.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefiltering/update_filtering.go new file mode 100644 index 000000000..34e30b8d5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefiltering/update_filtering.go @@ -0,0 +1,412 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the connector filtering. +// +// Update the draft filtering configuration of a connector and marks the draft +// validation state as edited. +// The filtering draft is activated once validated by the running Elastic +// connector service. +// The filtering property is used to configure sync rules (both basic and +// advanced) for a connector. +package updatefiltering + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateFiltering struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateFiltering type alias for index. +type NewUpdateFiltering func(connectorid string) *UpdateFiltering + +// NewUpdateFilteringFunc returns a new instance of UpdateFiltering with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateFilteringFunc(tp elastictransport.Interface) NewUpdateFiltering { + return func(connectorid string) *UpdateFiltering { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector filtering. +// +// Update the draft filtering configuration of a connector and marks the draft +// validation state as edited. +// The filtering draft is activated once validated by the running Elastic +// connector service. +// The filtering property is used to configure sync rules (both basic and +// advanced) for a connector. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering +func New(tp elastictransport.Interface) *UpdateFiltering { + r := &UpdateFiltering{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateFiltering) Raw(raw io.Reader) *UpdateFiltering { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateFiltering) Request(req *Request) *UpdateFiltering { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateFiltering) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateFiltering: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_filtering") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateFiltering) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_filtering") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_filtering") + if reader := instrument.RecordRequestBody(ctx, "connector.update_filtering", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_filtering") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateFiltering query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatefiltering.Response +func (r UpdateFiltering) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_filtering") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateFiltering headers map. +func (r *UpdateFiltering) Header(key, value string) *UpdateFiltering { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated +// API Name: connectorid +func (r *UpdateFiltering) _connectorid(connectorid string) *UpdateFiltering { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateFiltering) ErrorTrace(errortrace bool) *UpdateFiltering { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateFiltering) FilterPath(filterpaths ...string) *UpdateFiltering { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateFiltering) Human(human bool) *UpdateFiltering { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateFiltering) Pretty(pretty bool) *UpdateFiltering { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: advanced_snippet +func (r *UpdateFiltering) AdvancedSnippet(advancedsnippet types.FilteringAdvancedSnippetVariant) *UpdateFiltering { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AdvancedSnippet = advancedsnippet.FilteringAdvancedSnippetCaster() + + return r +} + +// API name: filtering +func (r *UpdateFiltering) Filtering(filterings ...types.FilteringConfigVariant) *UpdateFiltering { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range filterings { + + r.req.Filtering = append(r.req.Filtering, *v.FilteringConfigCaster()) + + } + return r +} + +// API name: rules +func (r *UpdateFiltering) Rules(rules ...types.FilteringRuleVariant) *UpdateFiltering { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range rules { + + r.req.Rules = append(r.req.Rules, *v.FilteringRuleCaster()) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefilteringvalidation/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefilteringvalidation/request.go new file mode 100644 index 000000000..a68ef5c80 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefilteringvalidation/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatefilteringvalidation + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package updatefilteringvalidation +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_filtering_validation/ConnectorUpdateFilteringValidationRequest.ts#L23-L48 +type Request struct { + Validation types.FilteringRulesValidation `json:"validation"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatefilteringvalidation request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefilteringvalidation/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefilteringvalidation/response.go new file mode 100644 index 000000000..3fa1df7fe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefilteringvalidation/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatefilteringvalidation + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updatefilteringvalidation +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_filtering_validation/ConnectorUpdateFilteringValidationResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefilteringvalidation/update_filtering_validation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefilteringvalidation/update_filtering_validation.go new file mode 100644 index 000000000..4963c9050 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefilteringvalidation/update_filtering_validation.go @@ -0,0 +1,376 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the connector draft filtering validation. +// +// Update the draft filtering validation info for a connector. +package updatefilteringvalidation + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateFilteringValidation struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateFilteringValidation type alias for index. +type NewUpdateFilteringValidation func(connectorid string) *UpdateFilteringValidation + +// NewUpdateFilteringValidationFunc returns a new instance of UpdateFilteringValidation with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateFilteringValidationFunc(tp elastictransport.Interface) NewUpdateFilteringValidation { + return func(connectorid string) *UpdateFilteringValidation { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector draft filtering validation. +// +// Update the draft filtering validation info for a connector. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering-validation +func New(tp elastictransport.Interface) *UpdateFilteringValidation { + r := &UpdateFilteringValidation{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateFilteringValidation) Raw(raw io.Reader) *UpdateFilteringValidation { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateFilteringValidation) Request(req *Request) *UpdateFilteringValidation { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateFilteringValidation) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateFilteringValidation: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_filtering") + path.WriteString("/") + path.WriteString("_validation") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateFilteringValidation) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_filtering_validation") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_filtering_validation") + if reader := instrument.RecordRequestBody(ctx, "connector.update_filtering_validation", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_filtering_validation") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateFilteringValidation query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatefilteringvalidation.Response +func (r UpdateFilteringValidation) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_filtering_validation") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateFilteringValidation headers map. +func (r *UpdateFilteringValidation) Header(key, value string) *UpdateFilteringValidation { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated +// API Name: connectorid +func (r *UpdateFilteringValidation) _connectorid(connectorid string) *UpdateFilteringValidation { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateFilteringValidation) ErrorTrace(errortrace bool) *UpdateFilteringValidation { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateFilteringValidation) FilterPath(filterpaths ...string) *UpdateFilteringValidation { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateFilteringValidation) Human(human bool) *UpdateFilteringValidation { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateFilteringValidation) Pretty(pretty bool) *UpdateFilteringValidation { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: validation +func (r *UpdateFilteringValidation) Validation(validation types.FilteringRulesValidationVariant) *UpdateFilteringValidation { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Validation = *validation.FilteringRulesValidationCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateindexname/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateindexname/request.go new file mode 100644 index 000000000..e35af35d5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateindexname/request.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateindexname + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package updateindexname +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_index_name/ConnectorUpdateIndexNameRequest.ts#L23-L51 +type Request struct { + IndexName any `json:"index_name"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updateindexname request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index_name": + if err := dec.Decode(&s.IndexName); err != nil { + return fmt.Errorf("%s | %w", "IndexName", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateindexname/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateindexname/response.go new file mode 100644 index 000000000..563f76fad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateindexname/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateindexname + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updateindexname +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_index_name/ConnectorUpdateIndexNameResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateindexname/update_index_name.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateindexname/update_index_name.go new file mode 100644 index 000000000..7fceb7bfb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateindexname/update_index_name.go @@ -0,0 +1,376 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the connector index name. +// +// Update the `index_name` field of a connector, specifying the index where the +// data ingested by the connector is stored. +package updateindexname + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateIndexName struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateIndexName type alias for index. +type NewUpdateIndexName func(connectorid string) *UpdateIndexName + +// NewUpdateIndexNameFunc returns a new instance of UpdateIndexName with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateIndexNameFunc(tp elastictransport.Interface) NewUpdateIndexName { + return func(connectorid string) *UpdateIndexName { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector index name. +// +// Update the `index_name` field of a connector, specifying the index where the +// data ingested by the connector is stored. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name +func New(tp elastictransport.Interface) *UpdateIndexName { + r := &UpdateIndexName{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateIndexName) Raw(raw io.Reader) *UpdateIndexName { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateIndexName) Request(req *Request) *UpdateIndexName { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateIndexName) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateIndexName: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_index_name") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateIndexName) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_index_name") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_index_name") + if reader := instrument.RecordRequestBody(ctx, "connector.update_index_name", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_index_name") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateIndexName query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updateindexname.Response +func (r UpdateIndexName) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_index_name") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateIndexName headers map. +func (r *UpdateIndexName) Header(key, value string) *UpdateIndexName { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated +// API Name: connectorid +func (r *UpdateIndexName) _connectorid(connectorid string) *UpdateIndexName { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateIndexName) ErrorTrace(errortrace bool) *UpdateIndexName { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateIndexName) FilterPath(filterpaths ...string) *UpdateIndexName { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateIndexName) Human(human bool) *UpdateIndexName { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateIndexName) Pretty(pretty bool) *UpdateIndexName { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: index_name +func (r *UpdateIndexName) IndexName(indexname any) *UpdateIndexName { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexName = indexname + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatename/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatename/request.go new file mode 100644 index 000000000..3f25785d9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatename/request.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatename + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package updatename +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_name/ConnectorUpdateNameRequest.ts#L22-L49 +type Request struct { + Description *string `json:"description,omitempty"` + Name *string `json:"name,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatename request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatename/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatename/response.go new file mode 100644 index 000000000..03bf7732b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatename/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatename + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updatename +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_name/ConnectorUpdateNameResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatename/update_name.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatename/update_name.go new file mode 100644 index 000000000..e3f980da9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatename/update_name.go @@ -0,0 +1,382 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the connector name and description. +package updatename + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateName struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateName type alias for index. +type NewUpdateName func(connectorid string) *UpdateName + +// NewUpdateNameFunc returns a new instance of UpdateName with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateNameFunc(tp elastictransport.Interface) NewUpdateName { + return func(connectorid string) *UpdateName { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector name and description. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name +func New(tp elastictransport.Interface) *UpdateName { + r := &UpdateName{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateName) Raw(raw io.Reader) *UpdateName { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateName) Request(req *Request) *UpdateName { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateName) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateName: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_name") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateName) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_name") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_name") + if reader := instrument.RecordRequestBody(ctx, "connector.update_name", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_name") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateName query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatename.Response +func (r UpdateName) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_name") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateName headers map. +func (r *UpdateName) Header(key, value string) *UpdateName { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated +// API Name: connectorid +func (r *UpdateName) _connectorid(connectorid string) *UpdateName { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateName) ErrorTrace(errortrace bool) *UpdateName { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateName) FilterPath(filterpaths ...string) *UpdateName { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateName) Human(human bool) *UpdateName { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateName) Pretty(pretty bool) *UpdateName { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: description +func (r *UpdateName) Description(description string) *UpdateName { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// API name: name +func (r *UpdateName) Name(name string) *UpdateName { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Name = &name + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatenative/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatenative/request.go new file mode 100644 index 000000000..cbe1518c6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatenative/request.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatenative + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package updatenative +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_native/ConnectorUpdateNativeRequest.ts#L22-L48 +type Request struct { + IsNative bool `json:"is_native"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatenative request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatenative/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatenative/response.go new file mode 100644 index 000000000..13b681d42 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatenative/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatenative + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updatenative +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_native/ConnectorUpdateNativeResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatenative/update_native.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatenative/update_native.go new file mode 100644 index 000000000..0b103e66f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatenative/update_native.go @@ -0,0 +1,370 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the connector is_native flag. +package updatenative + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateNative struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateNative type alias for index. +type NewUpdateNative func(connectorid string) *UpdateNative + +// NewUpdateNativeFunc returns a new instance of UpdateNative with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateNativeFunc(tp elastictransport.Interface) NewUpdateNative { + return func(connectorid string) *UpdateNative { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector is_native flag. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-native +func New(tp elastictransport.Interface) *UpdateNative { + r := &UpdateNative{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateNative) Raw(raw io.Reader) *UpdateNative { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateNative) Request(req *Request) *UpdateNative { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateNative) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateNative: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_native") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateNative) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_native") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_native") + if reader := instrument.RecordRequestBody(ctx, "connector.update_native", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_native") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateNative query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatenative.Response +func (r UpdateNative) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_native") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateNative headers map. +func (r *UpdateNative) Header(key, value string) *UpdateNative { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated +// API Name: connectorid +func (r *UpdateNative) _connectorid(connectorid string) *UpdateNative { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateNative) ErrorTrace(errortrace bool) *UpdateNative { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateNative) FilterPath(filterpaths ...string) *UpdateNative { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateNative) Human(human bool) *UpdateNative { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateNative) Pretty(pretty bool) *UpdateNative { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: is_native +func (r *UpdateNative) IsNative(isnative bool) *UpdateNative { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IsNative = isnative + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatepipeline/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatepipeline/request.go new file mode 100644 index 000000000..b4a391eab --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatepipeline/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatepipeline + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package updatepipeline +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_pipeline/ConnectorUpdatePipelineRequest.ts#L23-L52 +type Request struct { + Pipeline types.IngestPipelineParams `json:"pipeline"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatepipeline request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatepipeline/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatepipeline/response.go new file mode 100644 index 000000000..c196ee2d2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatepipeline/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatepipeline + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updatepipeline +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_pipeline/ConnectorUpdatePipelineResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatepipeline/update_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatepipeline/update_pipeline.go new file mode 100644 index 000000000..0259bc725 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatepipeline/update_pipeline.go @@ -0,0 +1,376 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the connector pipeline. +// +// When you create a new connector, the configuration of an ingest pipeline is +// populated with default settings. +package updatepipeline + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdatePipeline struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdatePipeline type alias for index. +type NewUpdatePipeline func(connectorid string) *UpdatePipeline + +// NewUpdatePipelineFunc returns a new instance of UpdatePipeline with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdatePipelineFunc(tp elastictransport.Interface) NewUpdatePipeline { + return func(connectorid string) *UpdatePipeline { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector pipeline. +// +// When you create a new connector, the configuration of an ingest pipeline is +// populated with default settings. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline +func New(tp elastictransport.Interface) *UpdatePipeline { + r := &UpdatePipeline{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdatePipeline) Raw(raw io.Reader) *UpdatePipeline { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdatePipeline) Request(req *Request) *UpdatePipeline { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdatePipeline) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdatePipeline: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_pipeline") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdatePipeline) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_pipeline") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_pipeline") + if reader := instrument.RecordRequestBody(ctx, "connector.update_pipeline", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_pipeline") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdatePipeline query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatepipeline.Response +func (r UpdatePipeline) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdatePipeline headers map. +func (r *UpdatePipeline) Header(key, value string) *UpdatePipeline { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated +// API Name: connectorid +func (r *UpdatePipeline) _connectorid(connectorid string) *UpdatePipeline { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdatePipeline) ErrorTrace(errortrace bool) *UpdatePipeline { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdatePipeline) FilterPath(filterpaths ...string) *UpdatePipeline { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdatePipeline) Human(human bool) *UpdatePipeline { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdatePipeline) Pretty(pretty bool) *UpdatePipeline { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: pipeline +func (r *UpdatePipeline) Pipeline(pipeline types.IngestPipelineParamsVariant) *UpdatePipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Pipeline = *pipeline.IngestPipelineParamsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatescheduling/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatescheduling/request.go new file mode 100644 index 000000000..753827f31 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatescheduling/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatescheduling + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package updatescheduling +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_scheduling/ConnectorUpdateSchedulingRequest.ts#L23-L50 +type Request struct { + Scheduling types.SchedulingConfiguration `json:"scheduling"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatescheduling request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatescheduling/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatescheduling/response.go new file mode 100644 index 000000000..6c90132db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatescheduling/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatescheduling + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updatescheduling +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_scheduling/ConnectorUpdateSchedulingResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatescheduling/update_scheduling.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatescheduling/update_scheduling.go new file mode 100644 index 000000000..4ed713a1e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatescheduling/update_scheduling.go @@ -0,0 +1,370 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the connector scheduling. +package updatescheduling + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateScheduling struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateScheduling type alias for index. +type NewUpdateScheduling func(connectorid string) *UpdateScheduling + +// NewUpdateSchedulingFunc returns a new instance of UpdateScheduling with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateSchedulingFunc(tp elastictransport.Interface) NewUpdateScheduling { + return func(connectorid string) *UpdateScheduling { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector scheduling. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling +func New(tp elastictransport.Interface) *UpdateScheduling { + r := &UpdateScheduling{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateScheduling) Raw(raw io.Reader) *UpdateScheduling { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateScheduling) Request(req *Request) *UpdateScheduling { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateScheduling) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateScheduling: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_scheduling") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateScheduling) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_scheduling") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_scheduling") + if reader := instrument.RecordRequestBody(ctx, "connector.update_scheduling", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_scheduling") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateScheduling query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatescheduling.Response +func (r UpdateScheduling) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_scheduling") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateScheduling headers map. +func (r *UpdateScheduling) Header(key, value string) *UpdateScheduling { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated +// API Name: connectorid +func (r *UpdateScheduling) _connectorid(connectorid string) *UpdateScheduling { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateScheduling) ErrorTrace(errortrace bool) *UpdateScheduling { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateScheduling) FilterPath(filterpaths ...string) *UpdateScheduling { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateScheduling) Human(human bool) *UpdateScheduling { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateScheduling) Pretty(pretty bool) *UpdateScheduling { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: scheduling +func (r *UpdateScheduling) Scheduling(scheduling types.SchedulingConfigurationVariant) *UpdateScheduling { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Scheduling = *scheduling.SchedulingConfigurationCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateservicetype/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateservicetype/request.go new file mode 100644 index 000000000..6ec8734b7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateservicetype/request.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateservicetype + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package updateservicetype +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_service_type/ConnectorUpdateServiceTypeRequest.ts#L22-L48 +type Request struct { + ServiceType string `json:"service_type"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updateservicetype request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateservicetype/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateservicetype/response.go new file mode 100644 index 000000000..397ca85f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateservicetype/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateservicetype + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updateservicetype +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_service_type/ConnectorUpdateServiceTypeResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateservicetype/update_service_type.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateservicetype/update_service_type.go new file mode 100644 index 000000000..06a033c63 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateservicetype/update_service_type.go @@ -0,0 +1,370 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the connector service type. +package updateservicetype + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateServiceType struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateServiceType type alias for index. +type NewUpdateServiceType func(connectorid string) *UpdateServiceType + +// NewUpdateServiceTypeFunc returns a new instance of UpdateServiceType with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateServiceTypeFunc(tp elastictransport.Interface) NewUpdateServiceType { + return func(connectorid string) *UpdateServiceType { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector service type. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type +func New(tp elastictransport.Interface) *UpdateServiceType { + r := &UpdateServiceType{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateServiceType) Raw(raw io.Reader) *UpdateServiceType { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateServiceType) Request(req *Request) *UpdateServiceType { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateServiceType) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateServiceType: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_service_type") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateServiceType) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_service_type") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_service_type") + if reader := instrument.RecordRequestBody(ctx, "connector.update_service_type", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_service_type") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateServiceType query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updateservicetype.Response +func (r UpdateServiceType) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_service_type") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateServiceType headers map. +func (r *UpdateServiceType) Header(key, value string) *UpdateServiceType { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated +// API Name: connectorid +func (r *UpdateServiceType) _connectorid(connectorid string) *UpdateServiceType { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateServiceType) ErrorTrace(errortrace bool) *UpdateServiceType { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateServiceType) FilterPath(filterpaths ...string) *UpdateServiceType { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateServiceType) Human(human bool) *UpdateServiceType { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateServiceType) Pretty(pretty bool) *UpdateServiceType { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: service_type +func (r *UpdateServiceType) ServiceType(servicetype string) *UpdateServiceType { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceType = servicetype + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatestatus/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatestatus/request.go new file mode 100644 index 000000000..6f1dca7e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatestatus/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatestatus + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectorstatus" +) + +// Request holds the request body struct for the package updatestatus +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_status/ConnectorUpdateStatusRequest.ts#L23-L49 +type Request struct { + Status connectorstatus.ConnectorStatus `json:"status"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatestatus request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatestatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatestatus/response.go new file mode 100644 index 000000000..1b0d3973d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatestatus/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatestatus + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package updatestatus +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/update_status/ConnectorUpdateStatusResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatestatus/update_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatestatus/update_status.go new file mode 100644 index 000000000..afe4819dc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatestatus/update_status.go @@ -0,0 +1,369 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the connector status. +package updatestatus + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectorstatus" +) + +const ( + connectoridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateStatus struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + connectorid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateStatus type alias for index. +type NewUpdateStatus func(connectorid string) *UpdateStatus + +// NewUpdateStatusFunc returns a new instance of UpdateStatus with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateStatusFunc(tp elastictransport.Interface) NewUpdateStatus { + return func(connectorid string) *UpdateStatus { + n := New(tp) + + n._connectorid(connectorid) + + return n + } +} + +// Update the connector status. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status +func New(tp elastictransport.Interface) *UpdateStatus { + r := &UpdateStatus{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateStatus) Raw(raw io.Reader) *UpdateStatus { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateStatus) Request(req *Request) *UpdateStatus { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateStatus) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateStatus: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == connectoridMask: + path.WriteString("/") + path.WriteString("_connector") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "connectorid", r.connectorid) + } + path.WriteString(r.connectorid) + path.WriteString("/") + path.WriteString("_status") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateStatus) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "connector.update_status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "connector.update_status") + if reader := instrument.RecordRequestBody(ctx, "connector.update_status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "connector.update_status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateStatus query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatestatus.Response +func (r UpdateStatus) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "connector.update_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateStatus headers map. +func (r *UpdateStatus) Header(key, value string) *UpdateStatus { + r.headers.Set(key, value) + + return r +} + +// ConnectorId The unique identifier of the connector to be updated +// API Name: connectorid +func (r *UpdateStatus) _connectorid(connectorid string) *UpdateStatus { + r.paramSet |= connectoridMask + r.connectorid = connectorid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateStatus) ErrorTrace(errortrace bool) *UpdateStatus { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateStatus) FilterPath(filterpaths ...string) *UpdateStatus { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateStatus) Human(human bool) *UpdateStatus { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateStatus) Pretty(pretty bool) *UpdateStatus { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: status +func (r *UpdateStatus) Status(status connectorstatus.ConnectorStatus) *UpdateStatus { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Status = status + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/bulk/bulk.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/bulk/bulk.go new file mode 100644 index 000000000..e23a991f3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/bulk/bulk.go @@ -0,0 +1,811 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Bulk index or delete documents. +// Perform multiple `index`, `create`, `delete`, and `update` actions in a +// single request. +// This reduces overhead and can greatly increase indexing speed. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To use the `create` action, you must have the `create_doc`, `create`, +// `index`, or `write` index privilege. Data streams support only the `create` +// action. +// * To use the `index` action, you must have the `create`, `index`, or `write` +// index privilege. +// * To use the `delete` action, you must have the `delete` or `write` index +// privilege. +// * To use the `update` action, you must have the `index` or `write` index +// privilege. +// * To automatically create a data stream or index with a bulk API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// * To make the result of a bulk operation visible to search using the +// `refresh` parameter, you must have the `maintenance` or `manage` index +// privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The actions are specified in the request body using a newline delimited JSON +// (NDJSON) structure: +// +// ``` +// action_and_meta_data\n +// optional_source\n +// action_and_meta_data\n +// optional_source\n +// .... +// action_and_meta_data\n +// optional_source\n +// ``` +// +// The `index` and `create` actions expect a source on the next line and have +// the same semantics as the `op_type` parameter in the standard index API. +// A `create` action fails if a document with the same ID already exists in the +// target +// An `index` action adds or replaces a document as necessary. +// +// NOTE: Data streams support only the `create` action. +// To update or delete a document in a data stream, you must target the backing +// index containing the document. +// +// An `update` action expects that the partial doc, upsert, and script and its +// options are specified on the next line. +// +// A `delete` action does not expect a source on the next line and has the same +// semantics as the standard delete API. +// +// NOTE: The final line of data must end with a newline character (`\n`). +// Each newline character may be preceded by a carriage return (`\r`). +// When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header +// of `application/json` or `application/x-ndjson`. +// Because this format uses literal newline characters (`\n`) as delimiters, +// make sure that the JSON actions and sources are not pretty printed. +// +// If you provide a target in the request path, it is used for any actions that +// don't explicitly specify an `_index` argument. +// +// A note on the format: the idea here is to make processing as fast as +// possible. +// As some of the actions are redirected to other shards on other nodes, only +// `action_meta_data` is parsed on the receiving node side. +// +// Client libraries using this protocol should try and strive to do something +// similar on the client side, and reduce buffering as much as possible. +// +// There is no "correct" number of actions to perform in a single bulk request. +// Experiment with different settings to find the optimal size for your +// particular workload. +// Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by +// default so clients must ensure that no request exceeds this size. +// It is not possible to index a single document that exceeds the size limit, so +// you must pre-process any such documents into smaller pieces before sending +// them to Elasticsearch. +// For instance, split documents into pages or chapters before indexing them, or +// store raw binary data in a system outside Elasticsearch and replace the raw +// data with a link to the external system in the documents that you send to +// Elasticsearch. +// +// **Client suppport for bulk requests** +// +// Some of the officially supported clients provide helpers to assist with bulk +// requests and reindexing: +// +// * Go: Check out `esutil.BulkIndexer` +// * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and +// `Search::Elasticsearch::Client::5_0::Scroll` +// * Python: Check out `elasticsearch.helpers.*` +// * JavaScript: Check out `client.helpers.*` +// * .NET: Check out `BulkAllObservable` +// * PHP: Check out bulk indexing. +// +// **Submitting bulk requests with cURL** +// +// If you're providing text file input to `curl`, you must use the +// `--data-binary` flag instead of plain `-d`. +// The latter doesn't preserve newlines. For example: +// +// ``` +// $ cat requests +// { "index" : { "_index" : "test", "_id" : "1" } } +// { "field1" : "value1" } +// $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk +// --data-binary "@requests"; echo +// {"took":7, "errors": false, +// "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} +// ``` +// +// **Optimistic concurrency control** +// +// Each `index` and `delete` action within a bulk API call may include the +// `if_seq_no` and `if_primary_term` parameters in their respective action and +// meta data lines. +// The `if_seq_no` and `if_primary_term` parameters control how operations are +// run, based on the last modification to existing documents. See Optimistic +// concurrency control for more details. +// +// **Versioning** +// +// Each bulk item can include the version value using the `version` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_version` mapping. +// It also support the `version_type`. +// +// **Routing** +// +// Each bulk item can include the routing value using the `routing` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_routing` mapping. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Wait for active shards** +// +// When making bulk calls, you can set the `wait_for_active_shards` parameter to +// require a minimum number of shard copies to be active before starting to +// process the bulk request. +// +// **Refresh** +// +// Control when the changes made by this request are visible to search. +// +// NOTE: Only the shards that receive the bulk request will be affected by +// refresh. +// Imagine a `_bulk?refresh=wait_for` request with three documents in it that +// happen to be routed to different shards in an index with five shards. +// The request will only wait for those three shards to refresh. +// The other two shards that make up the index do not participate in the `_bulk` +// request at all. +// +// You might want to disable the refresh interval temporarily to improve +// indexing throughput for large bulk requests. +// Refer to the linked documentation for step-by-step instructions using the +// index settings API. +package bulk + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Bulk struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewBulk type alias for index. +type NewBulk func() *Bulk + +// NewBulkFunc returns a new instance of Bulk with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewBulkFunc(tp elastictransport.Interface) NewBulk { + return func() *Bulk { + n := New(tp) + + return n + } +} + +// Bulk index or delete documents. +// Perform multiple `index`, `create`, `delete`, and `update` actions in a +// single request. +// This reduces overhead and can greatly increase indexing speed. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To use the `create` action, you must have the `create_doc`, `create`, +// `index`, or `write` index privilege. Data streams support only the `create` +// action. +// * To use the `index` action, you must have the `create`, `index`, or `write` +// index privilege. +// * To use the `delete` action, you must have the `delete` or `write` index +// privilege. +// * To use the `update` action, you must have the `index` or `write` index +// privilege. +// * To automatically create a data stream or index with a bulk API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// * To make the result of a bulk operation visible to search using the +// `refresh` parameter, you must have the `maintenance` or `manage` index +// privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The actions are specified in the request body using a newline delimited JSON +// (NDJSON) structure: +// +// ``` +// action_and_meta_data\n +// optional_source\n +// action_and_meta_data\n +// optional_source\n +// .... +// action_and_meta_data\n +// optional_source\n +// ``` +// +// The `index` and `create` actions expect a source on the next line and have +// the same semantics as the `op_type` parameter in the standard index API. +// A `create` action fails if a document with the same ID already exists in the +// target +// An `index` action adds or replaces a document as necessary. +// +// NOTE: Data streams support only the `create` action. +// To update or delete a document in a data stream, you must target the backing +// index containing the document. +// +// An `update` action expects that the partial doc, upsert, and script and its +// options are specified on the next line. +// +// A `delete` action does not expect a source on the next line and has the same +// semantics as the standard delete API. +// +// NOTE: The final line of data must end with a newline character (`\n`). +// Each newline character may be preceded by a carriage return (`\r`). +// When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header +// of `application/json` or `application/x-ndjson`. +// Because this format uses literal newline characters (`\n`) as delimiters, +// make sure that the JSON actions and sources are not pretty printed. +// +// If you provide a target in the request path, it is used for any actions that +// don't explicitly specify an `_index` argument. +// +// A note on the format: the idea here is to make processing as fast as +// possible. +// As some of the actions are redirected to other shards on other nodes, only +// `action_meta_data` is parsed on the receiving node side. +// +// Client libraries using this protocol should try and strive to do something +// similar on the client side, and reduce buffering as much as possible. +// +// There is no "correct" number of actions to perform in a single bulk request. +// Experiment with different settings to find the optimal size for your +// particular workload. +// Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by +// default so clients must ensure that no request exceeds this size. +// It is not possible to index a single document that exceeds the size limit, so +// you must pre-process any such documents into smaller pieces before sending +// them to Elasticsearch. +// For instance, split documents into pages or chapters before indexing them, or +// store raw binary data in a system outside Elasticsearch and replace the raw +// data with a link to the external system in the documents that you send to +// Elasticsearch. +// +// **Client suppport for bulk requests** +// +// Some of the officially supported clients provide helpers to assist with bulk +// requests and reindexing: +// +// * Go: Check out `esutil.BulkIndexer` +// * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and +// `Search::Elasticsearch::Client::5_0::Scroll` +// * Python: Check out `elasticsearch.helpers.*` +// * JavaScript: Check out `client.helpers.*` +// * .NET: Check out `BulkAllObservable` +// * PHP: Check out bulk indexing. +// +// **Submitting bulk requests with cURL** +// +// If you're providing text file input to `curl`, you must use the +// `--data-binary` flag instead of plain `-d`. +// The latter doesn't preserve newlines. For example: +// +// ``` +// $ cat requests +// { "index" : { "_index" : "test", "_id" : "1" } } +// { "field1" : "value1" } +// $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk +// --data-binary "@requests"; echo +// {"took":7, "errors": false, +// "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} +// ``` +// +// **Optimistic concurrency control** +// +// Each `index` and `delete` action within a bulk API call may include the +// `if_seq_no` and `if_primary_term` parameters in their respective action and +// meta data lines. +// The `if_seq_no` and `if_primary_term` parameters control how operations are +// run, based on the last modification to existing documents. See Optimistic +// concurrency control for more details. +// +// **Versioning** +// +// Each bulk item can include the version value using the `version` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_version` mapping. +// It also support the `version_type`. +// +// **Routing** +// +// Each bulk item can include the routing value using the `routing` field. +// It automatically follows the behavior of the index or delete operation based +// on the `_routing` mapping. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Wait for active shards** +// +// When making bulk calls, you can set the `wait_for_active_shards` parameter to +// require a minimum number of shard copies to be active before starting to +// process the bulk request. +// +// **Refresh** +// +// Control when the changes made by this request are visible to search. +// +// NOTE: Only the shards that receive the bulk request will be affected by +// refresh. +// Imagine a `_bulk?refresh=wait_for` request with three documents in it that +// happen to be routed to different shards in an index with five shards. +// The request will only wait for those three shards to refresh. +// The other two shards that make up the index do not participate in the `_bulk` +// request at all. +// +// You might want to disable the refresh interval temporarily to improve +// indexing throughput for large bulk requests. +// Refer to the linked documentation for step-by-step instructions using the +// index settings API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk +func New(tp elastictransport.Interface) *Bulk { + r := &Bulk{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Bulk) Raw(raw io.Reader) *Bulk { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Bulk) Request(req *Request) *Bulk { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Bulk) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + for _, elem := range *r.req { + data, err := json.Marshal(elem) + if err != nil { + return nil, err + } + r.buf.Write(data) + r.buf.Write([]byte("\n")) + } + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Bulk: %w", err) + } + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_bulk") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_bulk") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+x-ndjson;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Bulk) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "bulk") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "bulk") + if reader := instrument.RecordRequestBody(ctx, "bulk", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "bulk") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Bulk query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a bulk.Response +func (r Bulk) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "bulk") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Bulk headers map. +func (r *Bulk) Header(key, value string) *Bulk { + r.headers.Set(key, value) + + return r +} + +// Index The name of the data stream, index, or index alias to perform bulk actions +// on. +// API Name: index +func (r *Bulk) Index(index string) *Bulk { + r.paramSet |= indexMask + r.index = index + + return r +} + +// IncludeSourceOnError True or false if to include the document source in the error message in case +// of parsing errors. +// API name: include_source_on_error +func (r *Bulk) IncludeSourceOnError(includesourceonerror bool) *Bulk { + r.values.Set("include_source_on_error", strconv.FormatBool(includesourceonerror)) + + return r +} + +// ListExecutedPipelines If `true`, the response will include the ingest pipelines that were run for +// each index or create. +// API name: list_executed_pipelines +func (r *Bulk) ListExecutedPipelines(listexecutedpipelines bool) *Bulk { + r.values.Set("list_executed_pipelines", strconv.FormatBool(listexecutedpipelines)) + + return r +} + +// Pipeline The pipeline identifier to use to preprocess incoming documents. +// If the index has a default ingest pipeline specified, setting the value to +// `_none` turns off the default ingest pipeline for this request. +// If a final pipeline is configured, it will always run regardless of the value +// of this parameter. +// API name: pipeline +func (r *Bulk) Pipeline(pipeline string) *Bulk { + r.values.Set("pipeline", pipeline) + + return r +} + +// Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation +// visible to search. +// If `wait_for`, wait for a refresh to make this operation visible to search. +// If `false`, do nothing with refreshes. +// Valid values: `true`, `false`, `wait_for`. +// API name: refresh +func (r *Bulk) Refresh(refresh refresh.Refresh) *Bulk { + r.values.Set("refresh", refresh.String()) + + return r +} + +// Routing A custom value that is used to route operations to a specific shard. +// API name: routing +func (r *Bulk) Routing(routing string) *Bulk { + r.values.Set("routing", routing) + + return r +} + +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or +// contains a list of fields to return. +// API name: _source +func (r *Bulk) Source_(sourceconfigparam string) *Bulk { + r.values.Set("_source", sourceconfigparam) + + return r +} + +// SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// If the `_source` parameter is `false`, this parameter is ignored. +// API name: _source_excludes +func (r *Bulk) SourceExcludes_(fields ...string) *Bulk { + r.values.Set("_source_excludes", strings.Join(fields, ",")) + + return r +} + +// SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. +// You can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. +// API name: _source_includes +func (r *Bulk) SourceIncludes_(fields ...string) *Bulk { + r.values.Set("_source_includes", strings.Join(fields, ",")) + + return r +} + +// Timeout The period each action waits for the following operations: automatic index +// creation, dynamic mapping updates, and waiting for active shards. +// The default is `1m` (one minute), which guarantees Elasticsearch waits for at +// least the timeout before failing. +// The actual wait time could be longer, particularly when multiple waits occur. +// API name: timeout +func (r *Bulk) Timeout(duration string) *Bulk { + r.values.Set("timeout", duration) + + return r +} + +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). +// The default is `1`, which waits for each primary shard to be active. +// API name: wait_for_active_shards +func (r *Bulk) WaitForActiveShards(waitforactiveshards string) *Bulk { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// RequireAlias If `true`, the request's actions must target an index alias. +// API name: require_alias +func (r *Bulk) RequireAlias(requirealias bool) *Bulk { + r.values.Set("require_alias", strconv.FormatBool(requirealias)) + + return r +} + +// RequireDataStream If `true`, the request's actions must target a data stream (existing or to be +// created). +// API name: require_data_stream +func (r *Bulk) RequireDataStream(requiredatastream bool) *Bulk { + r.values.Set("require_data_stream", strconv.FormatBool(requiredatastream)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Bulk) ErrorTrace(errortrace bool) *Bulk { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Bulk) FilterPath(filterpaths ...string) *Bulk { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Bulk) Human(human bool) *Bulk { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Bulk) Pretty(pretty bool) *Bulk { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/bulk/helpers.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/bulk/helpers.go new file mode 100644 index 000000000..becff08c3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/bulk/helpers.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package bulk + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// CreateOp is a helper function to add a CreateOperation to the current bulk request. +// doc argument can be a []byte, json.RawMessage or a struct. +func (r *Bulk) CreateOp(op types.CreateOperation, doc interface{}) error { + operation := types.OperationContainer{Create: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + + switch v := doc.(type) { + case []byte: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: invalid json") + } + case json.RawMessage: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: invalid json") + } + default: + body, err := json.Marshal(doc) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + if _, err := r.buf.Write(body); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + } + + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + + return nil +} + +// IndexOp is a helper function to add an IndexOperation to the current bulk request. +// doc argument can be a []byte, json.RawMessage or a struct. +func (r *Bulk) IndexOp(op types.IndexOperation, doc interface{}) error { + operation := types.OperationContainer{Index: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + + switch v := doc.(type) { + case []byte: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: invalid json") + } + case json.RawMessage: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: invalid json") + } + default: + body, err := json.Marshal(doc) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + if _, err := r.buf.Write(body); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + } + + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + + return nil +} + +// UpdateOp is a helper function to add an UpdateOperation with and UpdateAction to the current bulk request. +// update is optional, if both doc and update.Doc are provided, update.Doc has precedence. +func (r *Bulk) UpdateOp(op types.UpdateOperation, doc interface{}, update *types.UpdateAction) error { + operation := types.OperationContainer{Update: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + if update == nil { + update = types.NewUpdateAction() + } + + if len(update.Doc) == 0 { + switch v := doc.(type) { + case []byte: + if json.Valid(v) { + update.Doc = v + } else { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: invalid json") + } + case json.RawMessage: + if json.Valid(v) { + update.Doc = v + } else { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: invalid json") + } + default: + //doc can be nil if passed in script + if doc == nil { + break + } + body, err := json.Marshal(doc) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + update.Doc = body + } + } + + body, err := json.Marshal(update) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + if _, err := r.buf.Write(body); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + return nil +} + +// DeleteOp is a helper function to add a DeleteOperation to the current bulk request. +func (r *Bulk) DeleteOp(op types.DeleteOperation) error { + operation := types.OperationContainer{Delete: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.DeleteOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.DeleteOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.DeleteOp: %w", err) + } + + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/bulk/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/bulk/request.go new file mode 100644 index 000000000..49c8b7d64 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/bulk/request.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package bulk + +// Request holds the request body struct for the package bulk +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/bulk/BulkRequest.ts#L32-L251 +type Request = []any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/bulk/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/bulk/response.go new file mode 100644 index 000000000..80c5eee36 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/bulk/response.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package bulk + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operationtype" +) + +// Response holds the response body struct for the package bulk +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/bulk/BulkResponse.ts#L24-L45 +type Response struct { + + // Errors If `true`, one or more of the operations in the bulk request did not complete + // successfully. + Errors bool `json:"errors"` + IngestTook *int64 `json:"ingest_took,omitempty"` + // Items The result of each operation in the bulk request, in the order they were + // submitted. + Items []map[operationtype.OperationType]types.ResponseItem `json:"items"` + // Took The length of time, in milliseconds, it took to process the bulk request. + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/clearscroll/clear_scroll.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/clearscroll/clear_scroll.go new file mode 100644 index 000000000..7972503ef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/clearscroll/clear_scroll.go @@ -0,0 +1,355 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clear a scrolling search. +// Clear the search context and results for a scrolling search. +package clearscroll + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "slices" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ClearScroll struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + scrollid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClearScroll type alias for index. +type NewClearScroll func() *ClearScroll + +// NewClearScrollFunc returns a new instance of ClearScroll with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewClearScrollFunc(tp elastictransport.Interface) NewClearScroll { + return func() *ClearScroll { + n := New(tp) + + return n + } +} + +// Clear a scrolling search. +// Clear the search context and results for a scrolling search. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll +func New(tp elastictransport.Interface) *ClearScroll { + r := &ClearScroll{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ClearScroll) Raw(raw io.Reader) *ClearScroll { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ClearScroll) Request(req *Request) *ClearScroll { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ClearScroll) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ClearScroll: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_search") + path.WriteString("/") + path.WriteString("scroll") + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ClearScroll) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "clear_scroll") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "clear_scroll") + if reader := instrument.RecordRequestBody(ctx, "clear_scroll", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "clear_scroll") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ClearScroll query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a clearscroll.Response +func (r ClearScroll) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "clear_scroll") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 || slices.Contains([]int{404}, res.StatusCode) { + + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ClearScroll headers map. +func (r *ClearScroll) Header(key, value string) *ClearScroll { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ClearScroll) ErrorTrace(errortrace bool) *ClearScroll { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ClearScroll) FilterPath(filterpaths ...string) *ClearScroll { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ClearScroll) Human(human bool) *ClearScroll { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ClearScroll) Pretty(pretty bool) *ClearScroll { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The scroll IDs to clear. +// To clear all scroll IDs, use `_all`. +// API name: scroll_id +func (r *ClearScroll) ScrollId(scrollids ...string) *ClearScroll { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ScrollId = scrollids + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/clearscroll/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/clearscroll/request.go new file mode 100644 index 000000000..fa80484ce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/clearscroll/request.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clearscroll + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package clearscroll +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/clear_scroll/ClearScrollRequest.ts#L23-L61 +type Request struct { + + // ScrollId The scroll IDs to clear. + // To clear all scroll IDs, use `_all`. + ScrollId []string `json:"scroll_id,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Clearscroll request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "scroll_id": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ScrollId", err) + } + + s.ScrollId = append(s.ScrollId, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.ScrollId); err != nil { + return fmt.Errorf("%s | %w", "ScrollId", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/clearscroll/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/clearscroll/response.go new file mode 100644 index 000000000..9f846d379 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/clearscroll/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clearscroll + +// Response holds the response body struct for the package clearscroll +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/clear_scroll/ClearScrollResponse.ts#L22-L43 +type Response struct { + + // NumFreed The number of scrolling search requests cleared. + NumFreed int `json:"num_freed"` + // Succeeded If `true`, the request succeeded. + // This does not indicate whether any scrolling search requests were cleared. + Succeeded bool `json:"succeeded"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/closepointintime/close_point_in_time.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/closepointintime/close_point_in_time.go new file mode 100644 index 000000000..13e18918e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/closepointintime/close_point_in_time.go @@ -0,0 +1,362 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Close a point in time. +// A point in time must be opened explicitly before being used in search +// requests. +// The `keep_alive` parameter tells Elasticsearch how long it should persist. +// A point in time is automatically closed when the `keep_alive` period has +// elapsed. +// However, keeping points in time has a cost; close them as soon as they are no +// longer required for search requests. +package closepointintime + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "slices" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ClosePointInTime struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClosePointInTime type alias for index. +type NewClosePointInTime func() *ClosePointInTime + +// NewClosePointInTimeFunc returns a new instance of ClosePointInTime with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewClosePointInTimeFunc(tp elastictransport.Interface) NewClosePointInTime { + return func() *ClosePointInTime { + n := New(tp) + + return n + } +} + +// Close a point in time. +// A point in time must be opened explicitly before being used in search +// requests. +// The `keep_alive` parameter tells Elasticsearch how long it should persist. +// A point in time is automatically closed when the `keep_alive` period has +// elapsed. +// However, keeping points in time has a cost; close them as soon as they are no +// longer required for search requests. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time +func New(tp elastictransport.Interface) *ClosePointInTime { + r := &ClosePointInTime{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ClosePointInTime) Raw(raw io.Reader) *ClosePointInTime { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ClosePointInTime) Request(req *Request) *ClosePointInTime { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ClosePointInTime) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ClosePointInTime: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_pit") + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ClosePointInTime) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "close_point_in_time") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "close_point_in_time") + if reader := instrument.RecordRequestBody(ctx, "close_point_in_time", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "close_point_in_time") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ClosePointInTime query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a closepointintime.Response +func (r ClosePointInTime) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "close_point_in_time") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 || slices.Contains([]int{404}, res.StatusCode) { + + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ClosePointInTime headers map. +func (r *ClosePointInTime) Header(key, value string) *ClosePointInTime { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ClosePointInTime) ErrorTrace(errortrace bool) *ClosePointInTime { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ClosePointInTime) FilterPath(filterpaths ...string) *ClosePointInTime { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ClosePointInTime) Human(human bool) *ClosePointInTime { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ClosePointInTime) Pretty(pretty bool) *ClosePointInTime { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The ID of the point-in-time. +// API name: id +func (r *ClosePointInTime) Id(id string) *ClosePointInTime { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Id = id + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/closepointintime/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/closepointintime/request.go new file mode 100644 index 000000000..14c3b00c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/closepointintime/request.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package closepointintime + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package closepointintime +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/close_point_in_time/ClosePointInTimeRequest.ts#L23-L48 +type Request struct { + + // Id The ID of the point-in-time. + Id string `json:"id"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Closepointintime request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/closepointintime/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/closepointintime/response.go new file mode 100644 index 000000000..63e133b1e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/closepointintime/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package closepointintime + +// Response holds the response body struct for the package closepointintime +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/close_point_in_time/ClosePointInTimeResponse.ts#L22-L42 +type Response struct { + + // NumFreed The number of search contexts that were successfully closed. + NumFreed int `json:"num_freed"` + // Succeeded If `true`, all search contexts associated with the point-in-time ID were + // successfully closed. + Succeeded bool `json:"succeeded"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/count/count.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/count/count.go new file mode 100644 index 000000000..42ff6c41e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/count/count.go @@ -0,0 +1,553 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Count search results. +// Get the number of documents matching a query. +// +// The query can be provided either by using a simple query string as a +// parameter, or by defining Query DSL within the request body. +// The query is optional. When no query is provided, the API uses `match_all` to +// count all the documents. +// +// The count API supports multi-target syntax. You can run a single count API +// search across multiple data streams and indices. +// +// The operation is broadcast across all shards. +// For each shard ID group, a replica is chosen and the search is run against +// it. +// This means that replicas increase the scalability of the count. +package count + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Count struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCount type alias for index. +type NewCount func() *Count + +// NewCountFunc returns a new instance of Count with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCountFunc(tp elastictransport.Interface) NewCount { + return func() *Count { + n := New(tp) + + return n + } +} + +// Count search results. +// Get the number of documents matching a query. +// +// The query can be provided either by using a simple query string as a +// parameter, or by defining Query DSL within the request body. +// The query is optional. When no query is provided, the API uses `match_all` to +// count all the documents. +// +// The count API supports multi-target syntax. You can run a single count API +// search across multiple data streams and indices. +// +// The operation is broadcast across all shards. +// For each shard ID group, a replica is chosen and the search is run against +// it. +// This means that replicas increase the scalability of the count. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count +func New(tp elastictransport.Interface) *Count { + r := &Count{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Count) Raw(raw io.Reader) *Count { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Count) Request(req *Request) *Count { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Count) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Count: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_count") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_count") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Count) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "count") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "count") + if reader := instrument.RecordRequestBody(ctx, "count", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "count") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Count query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a count.Response +func (r Count) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "count") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Count headers map. +func (r *Count) Header(key, value string) *Count { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). +// To search all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *Count) Index(index string) *Count { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. +// API name: allow_no_indices +func (r *Count) AllowNoIndices(allownoindices bool) *Count { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// Analyzer The analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: analyzer +func (r *Count) Analyzer(analyzer string) *Count { + r.values.Set("analyzer", analyzer) + + return r +} + +// AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: analyze_wildcard +func (r *Count) AnalyzeWildcard(analyzewildcard bool) *Count { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) + + return r +} + +// DefaultOperator The default operator for query string query: `AND` or `OR`. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: default_operator +func (r *Count) DefaultOperator(defaultoperator operator.Operator) *Count { + r.values.Set("default_operator", defaultoperator.String()) + + return r +} + +// Df The field to use as a default when no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: df +func (r *Count) Df(df string) *Count { + r.values.Set("df", df) + + return r +} + +// ExpandWildcards The type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// It supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *Count) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Count { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreThrottled If `true`, concrete, expanded, or aliased indices are ignored when frozen. +// API name: ignore_throttled +func (r *Count) IgnoreThrottled(ignorethrottled bool) *Count { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *Count) IgnoreUnavailable(ignoreunavailable bool) *Count { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Lenient If `true`, format-based query failures (such as providing text to a numeric +// field) in the query string will be ignored. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: lenient +func (r *Count) Lenient(lenient bool) *Count { + r.values.Set("lenient", strconv.FormatBool(lenient)) + + return r +} + +// MinScore The minimum `_score` value that documents must have to be included in the +// result. +// API name: min_score +func (r *Count) MinScore(minscore string) *Count { + r.values.Set("min_score", minscore) + + return r +} + +// Preference The node or shard the operation should be performed on. +// By default, it is random. +// API name: preference +func (r *Count) Preference(preference string) *Count { + r.values.Set("preference", preference) + + return r +} + +// Routing A custom value used to route operations to a specific shard. +// API name: routing +func (r *Count) Routing(routing string) *Count { + r.values.Set("routing", routing) + + return r +} + +// TerminateAfter The maximum number of documents to collect for each shard. +// If a query reaches this limit, Elasticsearch terminates the query early. +// Elasticsearch collects documents before sorting. +// +// IMPORTANT: Use with caution. +// Elasticsearch applies this parameter to each shard handling the request. +// When possible, let Elasticsearch perform early termination automatically. +// Avoid specifying this parameter for requests that target data streams with +// backing indices across multiple data tiers. +// API name: terminate_after +func (r *Count) TerminateAfter(terminateafter string) *Count { + r.values.Set("terminate_after", terminateafter) + + return r +} + +// Q The query in Lucene query string syntax. This parameter cannot be used with a +// request body. +// API name: q +func (r *Count) Q(q string) *Count { + r.values.Set("q", q) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Count) ErrorTrace(errortrace bool) *Count { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Count) FilterPath(filterpaths ...string) *Count { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Count) Human(human bool) *Count { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Count) Pretty(pretty bool) *Count { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Defines the search query using Query DSL. A request body query cannot be used +// with the `q` query string parameter. +// API name: query +func (r *Count) Query(query types.QueryVariant) *Count { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/count/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/count/request.go new file mode 100644 index 000000000..1cec99c53 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/count/request.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package count + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package count +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/count/CountRequest.ts#L26-L154 +type Request struct { + + // Query Defines the search query using Query DSL. A request body query cannot be used + // with the `q` query string parameter. + Query *types.Query `json:"query,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Count request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/count/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/count/response.go new file mode 100644 index 000000000..c6ce44474 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/count/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package count + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package count +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/count/CountResponse.ts#L23-L25 +type Response struct { + Count int64 `json:"count"` + Shards_ types.ShardStatistics `json:"_shards"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/create/create.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/create/create.go new file mode 100644 index 000000000..5793fb2ed --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/create/create.go @@ -0,0 +1,748 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a new document in the index. +// +// You can index a new JSON document with the `//_doc/` or +// `//_create/<_id>` APIs +// Using `_create` guarantees that the document is indexed only if it does not +// already exist. +// It returns a 409 response when a document with a same ID already exists in +// the index. +// To update an existing document, you must use the `//_doc/` API. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add a document using the `PUT //_create/<_id>` or `POST +// //_create/<_id>` request formats, you must have the `create_doc`, +// `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. +package create + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +const ( + idMask = iota + 1 + + indexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Create struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req any + deferred []func(request any) error + buf *gobytes.Buffer + + paramSet int + + id string + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCreate type alias for index. +type NewCreate func(index, id string) *Create + +// NewCreateFunc returns a new instance of Create with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCreateFunc(tp elastictransport.Interface) NewCreate { + return func(index, id string) *Create { + n := New(tp) + + n._id(id) + + n._index(index) + + return n + } +} + +// Create a new document in the index. +// +// You can index a new JSON document with the `//_doc/` or +// `//_create/<_id>` APIs +// Using `_create` guarantees that the document is indexed only if it does not +// already exist. +// It returns a 409 response when a document with a same ID already exists in +// the index. +// To update an existing document, you must use the `//_doc/` API. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add a document using the `PUT //_create/<_id>` or `POST +// //_create/<_id>` request formats, you must have the `create_doc`, +// `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create +func New(tp elastictransport.Interface) *Create { + r := &Create{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Create) Raw(raw io.Reader) *Create { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Create) Request(req any) *Create { + r.req = req + + return r +} + +// Document allows to set the request property with the appropriate payload. +func (r *Create) Document(document any) *Create { + r.req = document + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Create) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Create: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|idMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_create") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Create) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "create") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "create") + if reader := instrument.RecordRequestBody(ctx, "create", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "create") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Create query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a create.Response +func (r Create) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "create") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Create headers map. +func (r *Create) Header(key, value string) *Create { + r.headers.Set(key, value) + + return r +} + +// Id A unique identifier for the document. +// To automatically generate a document ID, use the `POST //_doc/` +// request format. +// API Name: id +func (r *Create) _id(id string) *Create { + r.paramSet |= idMask + r.id = id + + return r +} + +// Index The name of the data stream or index to target. +// If the target doesn't exist and matches the name or wildcard (`*`) pattern of +// an index template with a `data_stream` definition, this request creates the +// data stream. +// If the target doesn't exist and doesn’t match a data stream template, this +// request creates the index. +// API Name: index +func (r *Create) _index(index string) *Create { + r.paramSet |= indexMask + r.index = index + + return r +} + +// IncludeSourceOnError True or false if to include the document source in the error message in case +// of parsing errors. +// API name: include_source_on_error +func (r *Create) IncludeSourceOnError(includesourceonerror bool) *Create { + r.values.Set("include_source_on_error", strconv.FormatBool(includesourceonerror)) + + return r +} + +// Pipeline The ID of the pipeline to use to preprocess incoming documents. +// If the index has a default ingest pipeline specified, setting the value to +// `_none` turns off the default ingest pipeline for this request. +// If a final pipeline is configured, it will always run regardless of the value +// of this parameter. +// API name: pipeline +func (r *Create) Pipeline(pipeline string) *Create { + r.values.Set("pipeline", pipeline) + + return r +} + +// Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation +// visible to search. +// If `wait_for`, it waits for a refresh to make this operation visible to +// search. +// If `false`, it does nothing with refreshes. +// API name: refresh +func (r *Create) Refresh(refresh refresh.Refresh) *Create { + r.values.Set("refresh", refresh.String()) + + return r +} + +// RequireAlias If `true`, the destination must be an index alias. +// API name: require_alias +func (r *Create) RequireAlias(requirealias bool) *Create { + r.values.Set("require_alias", strconv.FormatBool(requirealias)) + + return r +} + +// RequireDataStream If `true`, the request's actions must target a data stream (existing or to be +// created). +// API name: require_data_stream +func (r *Create) RequireDataStream(requiredatastream bool) *Create { + r.values.Set("require_data_stream", strconv.FormatBool(requiredatastream)) + + return r +} + +// Routing A custom value that is used to route operations to a specific shard. +// API name: routing +func (r *Create) Routing(routing string) *Create { + r.values.Set("routing", routing) + + return r +} + +// Timeout The period the request waits for the following operations: automatic index +// creation, dynamic mapping updates, waiting for active shards. +// Elasticsearch waits for at least the specified timeout period before failing. +// The actual wait time could be longer, particularly when multiple waits occur. +// +// This parameter is useful for situations where the primary shard assigned to +// perform the operation might not be available when the operation runs. +// Some reasons for this might be that the primary shard is currently recovering +// from a gateway or undergoing relocation. +// By default, the operation will wait on the primary shard to become available +// for at least 1 minute before failing and responding with an error. +// The actual wait time could be longer, particularly when multiple waits occur. +// API name: timeout +func (r *Create) Timeout(duration string) *Create { + r.values.Set("timeout", duration) + + return r +} + +// Version The explicit version number for concurrency control. +// It must be a non-negative long number. +// API name: version +func (r *Create) Version(versionnumber string) *Create { + r.values.Set("version", versionnumber) + + return r +} + +// VersionType The version type. +// API name: version_type +func (r *Create) VersionType(versiontype versiontype.VersionType) *Create { + r.values.Set("version_type", versiontype.String()) + + return r +} + +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// You can set it to `all` or any positive integer up to the total number of +// shards in the index (`number_of_replicas+1`). +// The default value of `1` means it waits for each primary shard to be active. +// API name: wait_for_active_shards +func (r *Create) WaitForActiveShards(waitforactiveshards string) *Create { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Create) ErrorTrace(errortrace bool) *Create { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Create) FilterPath(filterpaths ...string) *Create { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Create) Human(human bool) *Create { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Create) Pretty(pretty bool) *Create { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/create/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/create/request.go new file mode 100644 index 000000000..b3f8d22c2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/create/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package create + +import ( + "encoding/json" +) + +// Request holds the request body struct for the package create +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/create/CreateRequest.ts#L32-L199 +type Request = json.RawMessage + +// NewRequest returns a Request +func NewRequest() *Request { + r := new(json.RawMessage) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/create/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/create/response.go new file mode 100644 index 000000000..4b23635c3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/create/response.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package create + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package create +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/create/CreateResponse.ts#L22-L25 +type Response struct { + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + // Id_ The unique identifier for the added document. + Id_ string `json:"_id"` + // Index_ The name of the index the document was added to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result The result of the indexing operation: `created` or `updated`. + Result result.Result `json:"result"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Information about the replication process of the operation. + Shards_ types.ShardStatistics `json:"_shards"` + // Version_ The document version, which is incremented each time the document is updated. + Version_ int64 `json:"_version"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/delete/delete.go new file mode 100644 index 000000000..7aa52e730 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/delete/delete.go @@ -0,0 +1,551 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a document. +// +// Remove a JSON document from the specified index. +// +// NOTE: You cannot send deletion requests directly to a data stream. +// To delete a document in a data stream, you must target the backing index +// containing the document. +// +// **Optimistic concurrency control** +// +// Delete operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Versioning** +// +// Each document indexed is versioned. +// When deleting a document, the version can be specified to make sure the +// relevant document you are trying to delete is actually being deleted and it +// has not changed in the meantime. +// Every write operation run on a document, deletes included, causes its version +// to be incremented. +// The version number of a deleted document remains available for a short time +// after deletion to allow for control of concurrent operations. +// The length of time for which a deleted document's version remains available +// is determined by the `index.gc_deletes` index setting. +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to delete a document. +// +// If the `_routing` mapping is set to `required` and no routing value is +// specified, the delete API throws a `RoutingMissingException` and rejects the +// request. +// +// For example: +// +// ``` +// DELETE /my-index-000001/_doc/1?routing=shard-1 +// ``` +// +// This request deletes the document with ID 1, but it is routed based on the +// user. +// The document is not deleted if the correct routing is not specified. +// +// **Distributed** +// +// The delete operation gets hashed into a specific shard ID. +// It then gets redirected into the primary shard within that ID group and +// replicated (if needed) to shard replicas within that ID group. +package delete + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "slices" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +const ( + idMask = iota + 1 + + indexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Delete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDelete type alias for index. +type NewDelete func(index, id string) *Delete + +// NewDeleteFunc returns a new instance of Delete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteFunc(tp elastictransport.Interface) NewDelete { + return func(index, id string) *Delete { + n := New(tp) + + n._id(id) + + n._index(index) + + return n + } +} + +// Delete a document. +// +// Remove a JSON document from the specified index. +// +// NOTE: You cannot send deletion requests directly to a data stream. +// To delete a document in a data stream, you must target the backing index +// containing the document. +// +// **Optimistic concurrency control** +// +// Delete operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Versioning** +// +// Each document indexed is versioned. +// When deleting a document, the version can be specified to make sure the +// relevant document you are trying to delete is actually being deleted and it +// has not changed in the meantime. +// Every write operation run on a document, deletes included, causes its version +// to be incremented. +// The version number of a deleted document remains available for a short time +// after deletion to allow for control of concurrent operations. +// The length of time for which a deleted document's version remains available +// is determined by the `index.gc_deletes` index setting. +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to delete a document. +// +// If the `_routing` mapping is set to `required` and no routing value is +// specified, the delete API throws a `RoutingMissingException` and rejects the +// request. +// +// For example: +// +// ``` +// DELETE /my-index-000001/_doc/1?routing=shard-1 +// ``` +// +// This request deletes the document with ID 1, but it is routed based on the +// user. +// The document is not deleted if the correct routing is not specified. +// +// **Distributed** +// +// The delete operation gets hashed into a specific shard ID. +// It then gets redirected into the primary shard within that ID group and +// replicated (if needed) to shard replicas within that ID group. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete +func New(tp elastictransport.Interface) *Delete { + r := &Delete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Delete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|idMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_doc") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Delete) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "delete") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "delete") + if reader := instrument.RecordRequestBody(ctx, "delete", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "delete") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Delete query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a delete.Response +func (r Delete) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 || slices.Contains([]int{404}, res.StatusCode) { + + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Delete) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Delete query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Delete headers map. +func (r *Delete) Header(key, value string) *Delete { + r.headers.Set(key, value) + + return r +} + +// Id A unique identifier for the document. +// API Name: id +func (r *Delete) _id(id string) *Delete { + r.paramSet |= idMask + r.id = id + + return r +} + +// Index The name of the target index. +// API Name: index +func (r *Delete) _index(index string) *Delete { + r.paramSet |= indexMask + r.index = index + + return r +} + +// IfPrimaryTerm Only perform the operation if the document has this primary term. +// API name: if_primary_term +func (r *Delete) IfPrimaryTerm(ifprimaryterm string) *Delete { + r.values.Set("if_primary_term", ifprimaryterm) + + return r +} + +// IfSeqNo Only perform the operation if the document has this sequence number. +// API name: if_seq_no +func (r *Delete) IfSeqNo(sequencenumber string) *Delete { + r.values.Set("if_seq_no", sequencenumber) + + return r +} + +// Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation +// visible to search. +// If `wait_for`, it waits for a refresh to make this operation visible to +// search. +// If `false`, it does nothing with refreshes. +// API name: refresh +func (r *Delete) Refresh(refresh refresh.Refresh) *Delete { + r.values.Set("refresh", refresh.String()) + + return r +} + +// Routing A custom value used to route operations to a specific shard. +// API name: routing +func (r *Delete) Routing(routing string) *Delete { + r.values.Set("routing", routing) + + return r +} + +// Timeout The period to wait for active shards. +// +// This parameter is useful for situations where the primary shard assigned to +// perform the delete operation might not be available when the delete operation +// runs. +// Some reasons for this might be that the primary shard is currently recovering +// from a store or undergoing relocation. +// By default, the delete operation will wait on the primary shard to become +// available for up to 1 minute before failing and responding with an error. +// API name: timeout +func (r *Delete) Timeout(duration string) *Delete { + r.values.Set("timeout", duration) + + return r +} + +// Version An explicit version number for concurrency control. +// It must match the current version of the document for the request to succeed. +// API name: version +func (r *Delete) Version(versionnumber string) *Delete { + r.values.Set("version", versionnumber) + + return r +} + +// VersionType The version type. +// API name: version_type +func (r *Delete) VersionType(versiontype versiontype.VersionType) *Delete { + r.values.Set("version_type", versiontype.String()) + + return r +} + +// WaitForActiveShards The minimum number of shard copies that must be active before proceeding with +// the operation. +// You can set it to `all` or any positive integer up to the total number of +// shards in the index (`number_of_replicas+1`). +// The default value of `1` means it waits for each primary shard to be active. +// API name: wait_for_active_shards +func (r *Delete) WaitForActiveShards(waitforactiveshards string) *Delete { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Delete) ErrorTrace(errortrace bool) *Delete { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Delete) FilterPath(filterpaths ...string) *Delete { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Delete) Human(human bool) *Delete { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Delete) Pretty(pretty bool) *Delete { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/delete/response.go new file mode 100644 index 000000000..2f73f2713 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/delete/response.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package delete + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package delete +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/delete/DeleteResponse.ts#L22-L35 +type Response struct { + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + // Id_ The unique identifier for the added document. + Id_ string `json:"_id"` + // Index_ The name of the index the document was added to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result The result of the indexing operation: `created` or `updated`. + Result result.Result `json:"result"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Information about the replication process of the operation. + Shards_ types.ShardStatistics `json:"_shards"` + // Version_ The document version, which is incremented each time the document is updated. + Version_ int64 `json:"_version"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyquery/delete_by_query.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyquery/delete_by_query.go new file mode 100644 index 000000000..86f52bd41 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyquery/delete_by_query.go @@ -0,0 +1,937 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete documents. +// +// Deletes documents that match the specified query. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `delete` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// When you submit a delete by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and deletes +// matching documents using internal versioning. +// If a document changes between the time that the snapshot is taken and the +// delete operation is processed, it results in a version conflict and the +// delete operation fails. +// +// NOTE: Documents with a version equal to 0 cannot be deleted using delete by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing a delete by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents to delete. +// A bulk delete request is performed for each batch of matching documents. +// If a search or bulk request is rejected, the requests are retried up to 10 +// times, with exponential back off. +// If the maximum retry limit is reached, processing halts and all failed +// requests are returned in the response. +// Any delete requests that completed successfully still stick, they are not +// rolled back. +// +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts the operation could attempt +// to delete more documents from the source than `max_docs` until it has +// successfully deleted `max_docs documents`, or it has gone through every +// document in the source query. +// +// **Throttling delete requests** +// +// To control the rate at which delete by query issues batches of delete +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to disable throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is `1000`, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single `_bulk` request, large batch sizes +// cause Elasticsearch to create many requests and wait before starting the next +// set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Delete by query supports sliced scroll to parallelize the delete process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` lets Elasticsearch choose the number of slices to +// use. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// Adding slices to the delete by query operation creates sub-requests which +// means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with slices only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with `slices` will cancel each sub-request. +// * Due to the nature of `slices` each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// `slices` are distributed proportionally to each sub-request. Combine that +// with the earlier point about distribution being uneven and you should +// conclude that using `max_docs` with `slices` might not result in exactly +// `max_docs` documents being deleted. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many `slices` hurts +// performance. Setting `slices` higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Delete performance scales linearly across available resources with the +// number of slices. +// +// Whether query or delete performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Cancel a delete by query operation** +// +// Any delete by query can be canceled using the task cancel API. For example: +// +// ``` +// POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +// ``` +// +// The task ID can be found by using the get tasks API. +// +// Cancellation should happen quickly but might take a few seconds. +// The get task status API will continue to list the delete by query task until +// this task checks that it has been cancelled and terminates itself. +package deletebyquery + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conflicts" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteByQuery struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteByQuery type alias for index. +type NewDeleteByQuery func(index string) *DeleteByQuery + +// NewDeleteByQueryFunc returns a new instance of DeleteByQuery with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteByQueryFunc(tp elastictransport.Interface) NewDeleteByQuery { + return func(index string) *DeleteByQuery { + n := New(tp) + + n._index(index) + + return n + } +} + +// Delete documents. +// +// Deletes documents that match the specified query. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `delete` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// When you submit a delete by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and deletes +// matching documents using internal versioning. +// If a document changes between the time that the snapshot is taken and the +// delete operation is processed, it results in a version conflict and the +// delete operation fails. +// +// NOTE: Documents with a version equal to 0 cannot be deleted using delete by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing a delete by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents to delete. +// A bulk delete request is performed for each batch of matching documents. +// If a search or bulk request is rejected, the requests are retried up to 10 +// times, with exponential back off. +// If the maximum retry limit is reached, processing halts and all failed +// requests are returned in the response. +// Any delete requests that completed successfully still stick, they are not +// rolled back. +// +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts the operation could attempt +// to delete more documents from the source than `max_docs` until it has +// successfully deleted `max_docs documents`, or it has gone through every +// document in the source query. +// +// **Throttling delete requests** +// +// To control the rate at which delete by query issues batches of delete +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to disable throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is `1000`, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single `_bulk` request, large batch sizes +// cause Elasticsearch to create many requests and wait before starting the next +// set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Delete by query supports sliced scroll to parallelize the delete process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` lets Elasticsearch choose the number of slices to +// use. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// Adding slices to the delete by query operation creates sub-requests which +// means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with slices only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with `slices` will cancel each sub-request. +// * Due to the nature of `slices` each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// `slices` are distributed proportionally to each sub-request. Combine that +// with the earlier point about distribution being uneven and you should +// conclude that using `max_docs` with `slices` might not result in exactly +// `max_docs` documents being deleted. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many `slices` hurts +// performance. Setting `slices` higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Delete performance scales linearly across available resources with the +// number of slices. +// +// Whether query or delete performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// +// **Cancel a delete by query operation** +// +// Any delete by query can be canceled using the task cancel API. For example: +// +// ``` +// POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +// ``` +// +// The task ID can be found by using the get tasks API. +// +// Cancellation should happen quickly but might take a few seconds. +// The get task status API will continue to list the delete by query task until +// this task checks that it has been cancelled and terminates itself. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query +func New(tp elastictransport.Interface) *DeleteByQuery { + r := &DeleteByQuery{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *DeleteByQuery) Raw(raw io.Reader) *DeleteByQuery { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *DeleteByQuery) Request(req *Request) *DeleteByQuery { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteByQuery) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for DeleteByQuery: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_delete_by_query") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteByQuery) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "delete_by_query") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "delete_by_query") + if reader := instrument.RecordRequestBody(ctx, "delete_by_query", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "delete_by_query") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteByQuery query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletebyquery.Response +func (r DeleteByQuery) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "delete_by_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the DeleteByQuery headers map. +func (r *DeleteByQuery) Header(key, value string) *DeleteByQuery { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). +// To search all data streams or indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *DeleteByQuery) _index(index string) *DeleteByQuery { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. +// API name: allow_no_indices +func (r *DeleteByQuery) AllowNoIndices(allownoindices bool) *DeleteByQuery { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// Analyzer Analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: analyzer +func (r *DeleteByQuery) Analyzer(analyzer string) *DeleteByQuery { + r.values.Set("analyzer", analyzer) + + return r +} + +// AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: analyze_wildcard +func (r *DeleteByQuery) AnalyzeWildcard(analyzewildcard bool) *DeleteByQuery { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) + + return r +} + +// Conflicts What to do if delete by query hits version conflicts: `abort` or `proceed`. +// API name: conflicts +func (r *DeleteByQuery) Conflicts(conflicts conflicts.Conflicts) *DeleteByQuery { + r.values.Set("conflicts", conflicts.String()) + + return r +} + +// DefaultOperator The default operator for query string query: `AND` or `OR`. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: default_operator +func (r *DeleteByQuery) DefaultOperator(defaultoperator operator.Operator) *DeleteByQuery { + r.values.Set("default_operator", defaultoperator.String()) + + return r +} + +// Df The field to use as default where no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: df +func (r *DeleteByQuery) Df(df string) *DeleteByQuery { + r.values.Set("df", df) + + return r +} + +// ExpandWildcards The type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// It supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *DeleteByQuery) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *DeleteByQuery { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// From Skips the specified number of documents. +// API name: from +func (r *DeleteByQuery) From(from string) *DeleteByQuery { + r.values.Set("from", from) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *DeleteByQuery) IgnoreUnavailable(ignoreunavailable bool) *DeleteByQuery { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Lenient If `true`, format-based query failures (such as providing text to a numeric +// field) in the query string will be ignored. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: lenient +func (r *DeleteByQuery) Lenient(lenient bool) *DeleteByQuery { + r.values.Set("lenient", strconv.FormatBool(lenient)) + + return r +} + +// Preference The node or shard the operation should be performed on. +// It is random by default. +// API name: preference +func (r *DeleteByQuery) Preference(preference string) *DeleteByQuery { + r.values.Set("preference", preference) + + return r +} + +// Refresh If `true`, Elasticsearch refreshes all shards involved in the delete by query +// after the request completes. +// This is different than the delete API's `refresh` parameter, which causes +// just the shard that received the delete request to be refreshed. +// Unlike the delete API, it does not support `wait_for`. +// API name: refresh +func (r *DeleteByQuery) Refresh(refresh bool) *DeleteByQuery { + r.values.Set("refresh", strconv.FormatBool(refresh)) + + return r +} + +// RequestCache If `true`, the request cache is used for this request. +// Defaults to the index-level setting. +// API name: request_cache +func (r *DeleteByQuery) RequestCache(requestcache bool) *DeleteByQuery { + r.values.Set("request_cache", strconv.FormatBool(requestcache)) + + return r +} + +// RequestsPerSecond The throttle for this request in sub-requests per second. +// API name: requests_per_second +func (r *DeleteByQuery) RequestsPerSecond(requestspersecond string) *DeleteByQuery { + r.values.Set("requests_per_second", requestspersecond) + + return r +} + +// Routing A custom value used to route operations to a specific shard. +// API name: routing +func (r *DeleteByQuery) Routing(routing string) *DeleteByQuery { + r.values.Set("routing", routing) + + return r +} + +// Q A query in the Lucene query string syntax. +// API name: q +func (r *DeleteByQuery) Q(q string) *DeleteByQuery { + r.values.Set("q", q) + + return r +} + +// Scroll The period to retain the search context for scrolling. +// API name: scroll +func (r *DeleteByQuery) Scroll(duration string) *DeleteByQuery { + r.values.Set("scroll", duration) + + return r +} + +// ScrollSize The size of the scroll request that powers the operation. +// API name: scroll_size +func (r *DeleteByQuery) ScrollSize(scrollsize string) *DeleteByQuery { + r.values.Set("scroll_size", scrollsize) + + return r +} + +// SearchTimeout The explicit timeout for each search request. +// It defaults to no timeout. +// API name: search_timeout +func (r *DeleteByQuery) SearchTimeout(duration string) *DeleteByQuery { + r.values.Set("search_timeout", duration) + + return r +} + +// SearchType The type of the search operation. +// Available options include `query_then_fetch` and `dfs_query_then_fetch`. +// API name: search_type +func (r *DeleteByQuery) SearchType(searchtype searchtype.SearchType) *DeleteByQuery { + r.values.Set("search_type", searchtype.String()) + + return r +} + +// Slices The number of slices this task should be divided into. +// API name: slices +func (r *DeleteByQuery) Slices(slices string) *DeleteByQuery { + r.values.Set("slices", slices) + + return r +} + +// Sort A comma-separated list of `:` pairs. +// API name: sort +func (r *DeleteByQuery) Sort(sorts ...string) *DeleteByQuery { + tmp := []string{} + for _, item := range sorts { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("sort", strings.Join(tmp, ",")) + + return r +} + +// Stats The specific `tag` of the request for logging and statistical purposes. +// API name: stats +func (r *DeleteByQuery) Stats(stats ...string) *DeleteByQuery { + tmp := []string{} + for _, item := range stats { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("stats", strings.Join(tmp, ",")) + + return r +} + +// TerminateAfter The maximum number of documents to collect for each shard. +// If a query reaches this limit, Elasticsearch terminates the query early. +// Elasticsearch collects documents before sorting. +// +// Use with caution. +// Elasticsearch applies this parameter to each shard handling the request. +// When possible, let Elasticsearch perform early termination automatically. +// Avoid specifying this parameter for requests that target data streams with +// backing indices across multiple data tiers. +// API name: terminate_after +func (r *DeleteByQuery) TerminateAfter(terminateafter string) *DeleteByQuery { + r.values.Set("terminate_after", terminateafter) + + return r +} + +// Timeout The period each deletion request waits for active shards. +// API name: timeout +func (r *DeleteByQuery) Timeout(duration string) *DeleteByQuery { + r.values.Set("timeout", duration) + + return r +} + +// Version If `true`, returns the document version as part of a hit. +// API name: version +func (r *DeleteByQuery) Version(version bool) *DeleteByQuery { + r.values.Set("version", strconv.FormatBool(version)) + + return r +} + +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). +// The `timeout` value controls how long each write request waits for +// unavailable shards to become available. +// API name: wait_for_active_shards +func (r *DeleteByQuery) WaitForActiveShards(waitforactiveshards string) *DeleteByQuery { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// WaitForCompletion If `true`, the request blocks until the operation is complete. +// If `false`, Elasticsearch performs some preflight checks, launches the +// request, and returns a task you can use to cancel or get the status of the +// task. Elasticsearch creates a record of this task as a document at +// `.tasks/task/${taskId}`. When you are done with a task, you should delete the +// task document so Elasticsearch can reclaim the space. +// API name: wait_for_completion +func (r *DeleteByQuery) WaitForCompletion(waitforcompletion bool) *DeleteByQuery { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteByQuery) ErrorTrace(errortrace bool) *DeleteByQuery { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteByQuery) FilterPath(filterpaths ...string) *DeleteByQuery { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteByQuery) Human(human bool) *DeleteByQuery { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteByQuery) Pretty(pretty bool) *DeleteByQuery { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The maximum number of documents to delete. +// API name: max_docs +func (r *DeleteByQuery) MaxDocs(maxdocs int64) *DeleteByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxDocs = &maxdocs + + return r +} + +// The documents to delete specified with Query DSL. +// API name: query +func (r *DeleteByQuery) Query(query types.QueryVariant) *DeleteByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} + +// Slice the request manually using the provided slice ID and total number of +// slices. +// API name: slice +func (r *DeleteByQuery) Slice(slice types.SlicedScrollVariant) *DeleteByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Slice = slice.SlicedScrollCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyquery/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyquery/request.go new file mode 100644 index 000000000..76b724125 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyquery/request.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletebyquery + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package deletebyquery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/delete_by_query/DeleteByQueryRequest.ts#L36-L314 +type Request struct { + + // MaxDocs The maximum number of documents to delete. + MaxDocs *int64 `json:"max_docs,omitempty"` + // Query The documents to delete specified with Query DSL. + Query *types.Query `json:"query,omitempty"` + // Slice Slice the request manually using the provided slice ID and total number of + // slices. + Slice *types.SlicedScroll `json:"slice,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Deletebyquery request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyquery/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyquery/response.go new file mode 100644 index 000000000..f209ee408 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyquery/response.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletebyquery + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package deletebyquery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/delete_by_query/DeleteByQueryResponse.ts#L26-L88 +type Response struct { + + // Batches The number of scroll responses pulled back by the delete by query. + Batches *int64 `json:"batches,omitempty"` + // Deleted The number of documents that were successfully deleted. + Deleted *int64 `json:"deleted,omitempty"` + // Failures An array of failures if there were any unrecoverable errors during the + // process. + // If this array is not empty, the request ended abnormally because of those + // failures. + // Delete by query is implemented using batches and any failures cause the + // entire process to end but all failures in the current batch are collected + // into the array. + // You can use the `conflicts` option to prevent reindex from ending on version + // conflicts. + Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` + // Noops This field is always equal to zero for delete by query. + // It exists only so that delete by query, update by query, and reindex APIs + // return responses with the same structure. + Noops *int64 `json:"noops,omitempty"` + // RequestsPerSecond The number of requests per second effectively run during the delete by query. + RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` + // Retries The number of retries attempted by delete by query. + // `bulk` is the number of bulk actions retried. + // `search` is the number of search actions retried. + Retries *types.Retries `json:"retries,omitempty"` + SliceId *int `json:"slice_id,omitempty"` + Task *string `json:"task,omitempty"` + Throttled types.Duration `json:"throttled,omitempty"` + // ThrottledMillis The number of milliseconds the request slept to conform to + // `requests_per_second`. + ThrottledMillis *int64 `json:"throttled_millis,omitempty"` + ThrottledUntil types.Duration `json:"throttled_until,omitempty"` + // ThrottledUntilMillis This field should always be equal to zero in a `_delete_by_query` response. + // It has meaning only when using the task API, where it indicates the next time + // (in milliseconds since epoch) a throttled request will be run again in order + // to conform to `requests_per_second`. + ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` + // TimedOut If `true`, some requests run during the delete by query operation timed out. + TimedOut *bool `json:"timed_out,omitempty"` + // Took The number of milliseconds from start to end of the whole operation. + Took *int64 `json:"took,omitempty"` + // Total The number of documents that were successfully processed. + Total *int64 `json:"total,omitempty"` + // VersionConflicts The number of version conflicts that the delete by query hit. + VersionConflicts *int64 `json:"version_conflicts,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go new file mode 100644 index 000000000..194c76078 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go @@ -0,0 +1,365 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Throttle a delete by query operation. +// +// Change the number of requests per second for a particular delete by query +// operation. +// Rethrottling that speeds up the query takes effect immediately but +// rethrotting that slows down the query takes effect after completing the +// current batch to prevent scroll timeouts. +package deletebyqueryrethrottle + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + taskidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteByQueryRethrottle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + taskid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteByQueryRethrottle type alias for index. +type NewDeleteByQueryRethrottle func(taskid string) *DeleteByQueryRethrottle + +// NewDeleteByQueryRethrottleFunc returns a new instance of DeleteByQueryRethrottle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteByQueryRethrottleFunc(tp elastictransport.Interface) NewDeleteByQueryRethrottle { + return func(taskid string) *DeleteByQueryRethrottle { + n := New(tp) + + n._taskid(taskid) + + return n + } +} + +// Throttle a delete by query operation. +// +// Change the number of requests per second for a particular delete by query +// operation. +// Rethrottling that speeds up the query takes effect immediately but +// rethrotting that slows down the query takes effect after completing the +// current batch to prevent scroll timeouts. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle +func New(tp elastictransport.Interface) *DeleteByQueryRethrottle { + r := &DeleteByQueryRethrottle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteByQueryRethrottle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == taskidMask: + path.WriteString("/") + path.WriteString("_delete_by_query") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "taskid", r.taskid) + } + path.WriteString(r.taskid) + path.WriteString("/") + path.WriteString("_rethrottle") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteByQueryRethrottle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "delete_by_query_rethrottle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "delete_by_query_rethrottle") + if reader := instrument.RecordRequestBody(ctx, "delete_by_query_rethrottle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "delete_by_query_rethrottle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteByQueryRethrottle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletebyqueryrethrottle.Response +func (r DeleteByQueryRethrottle) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "delete_by_query_rethrottle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteByQueryRethrottle) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "delete_by_query_rethrottle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteByQueryRethrottle query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteByQueryRethrottle headers map. +func (r *DeleteByQueryRethrottle) Header(key, value string) *DeleteByQueryRethrottle { + r.headers.Set(key, value) + + return r +} + +// TaskId The ID for the task. +// API Name: taskid +func (r *DeleteByQueryRethrottle) _taskid(taskid string) *DeleteByQueryRethrottle { + r.paramSet |= taskidMask + r.taskid = taskid + + return r +} + +// RequestsPerSecond The throttle for this request in sub-requests per second. +// To disable throttling, set it to `-1`. +// API name: requests_per_second +func (r *DeleteByQueryRethrottle) RequestsPerSecond(requestspersecond string) *DeleteByQueryRethrottle { + r.values.Set("requests_per_second", requestspersecond) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteByQueryRethrottle) ErrorTrace(errortrace bool) *DeleteByQueryRethrottle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteByQueryRethrottle) FilterPath(filterpaths ...string) *DeleteByQueryRethrottle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteByQueryRethrottle) Human(human bool) *DeleteByQueryRethrottle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteByQueryRethrottle) Pretty(pretty bool) *DeleteByQueryRethrottle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyqueryrethrottle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyqueryrethrottle/response.go new file mode 100644 index 000000000..959d58211 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyqueryrethrottle/response.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletebyqueryrethrottle + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package deletebyqueryrethrottle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/delete_by_query_rethrottle/DeleteByQueryRethrottleResponse.ts#L22-L25 +type Response struct { + NodeFailures []types.ErrorCause `json:"node_failures,omitempty"` + // Nodes Task information grouped by node, if `group_by` was set to `node` (the + // default). + Nodes map[string]types.NodeTasks `json:"nodes,omitempty"` + TaskFailures []types.TaskFailure `json:"task_failures,omitempty"` + // Tasks Either a flat list of tasks if `group_by` was set to `none`, or grouped by + // parents if + // `group_by` was set to `parents`. + Tasks types.TaskInfos `json:"tasks,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.NodeTasks, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node_failures": + if err := dec.Decode(&s.NodeFailures); err != nil { + return fmt.Errorf("%s | %w", "NodeFailures", err) + } + + case "nodes": + if s.Nodes == nil { + s.Nodes = make(map[string]types.NodeTasks, 0) + } + if err := dec.Decode(&s.Nodes); err != nil { + return fmt.Errorf("%s | %w", "Nodes", err) + } + + case "task_failures": + if err := dec.Decode(&s.TaskFailures); err != nil { + return fmt.Errorf("%s | %w", "TaskFailures", err) + } + + case "tasks": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]types.ParentTaskInfo, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Tasks", err) + } + s.Tasks = o + case '[': + o := []types.TaskInfo{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Tasks", err) + } + s.Tasks = o + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletescript/delete_script.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletescript/delete_script.go new file mode 100644 index 000000000..cc7ade979 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletescript/delete_script.go @@ -0,0 +1,366 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a script or search template. +// Deletes a stored script or search template. +package deletescript + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteScript struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteScript type alias for index. +type NewDeleteScript func(id string) *DeleteScript + +// NewDeleteScriptFunc returns a new instance of DeleteScript with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteScriptFunc(tp elastictransport.Interface) NewDeleteScript { + return func(id string) *DeleteScript { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete a script or search template. +// Deletes a stored script or search template. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script +func New(tp elastictransport.Interface) *DeleteScript { + r := &DeleteScript{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteScript) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_scripts") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteScript) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "delete_script") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "delete_script") + if reader := instrument.RecordRequestBody(ctx, "delete_script", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "delete_script") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteScript query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletescript.Response +func (r DeleteScript) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "delete_script") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteScript) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "delete_script") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteScript query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteScript headers map. +func (r *DeleteScript) Header(key, value string) *DeleteScript { + r.headers.Set(key, value) + + return r +} + +// Id The identifier for the stored script or search template. +// API Name: id +func (r *DeleteScript) _id(id string) *DeleteScript { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *DeleteScript) MasterTimeout(duration string) *DeleteScript { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: timeout +func (r *DeleteScript) Timeout(duration string) *DeleteScript { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteScript) ErrorTrace(errortrace bool) *DeleteScript { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteScript) FilterPath(filterpaths ...string) *DeleteScript { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteScript) Human(human bool) *DeleteScript { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteScript) Pretty(pretty bool) *DeleteScript { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletescript/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletescript/response.go new file mode 100644 index 000000000..022b6047a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/deletescript/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletescript + +// Response holds the response body struct for the package deletescript +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/delete_script/DeleteScriptResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/exists/exists.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/exists/exists.go new file mode 100644 index 000000000..a518b6953 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/exists/exists.go @@ -0,0 +1,461 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Check a document. +// +// Verify that a document exists. +// For example, check to see if a document with the `_id` 0 exists: +// +// ``` +// HEAD my-index-000001/_doc/0 +// ``` +// +// If the document exists, the API returns a status code of `200 - OK`. +// If the document doesn’t exist, the API returns `404 - Not Found`. +// +// **Versioning support** +// +// You can use the `version` parameter to check the document only if its current +// version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. +package exists + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +const ( + idMask = iota + 1 + + indexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Exists struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExists type alias for index. +type NewExists func(index, id string) *Exists + +// NewExistsFunc returns a new instance of Exists with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExistsFunc(tp elastictransport.Interface) NewExists { + return func(index, id string) *Exists { + n := New(tp) + + n._id(id) + + n._index(index) + + return n + } +} + +// Check a document. +// +// Verify that a document exists. +// For example, check to see if a document with the `_id` 0 exists: +// +// ``` +// HEAD my-index-000001/_doc/0 +// ``` +// +// If the document exists, the API returns a status code of `200 - OK`. +// If the document doesn’t exist, the API returns `404 - Not Found`. +// +// **Versioning support** +// +// You can use the `version` parameter to check the document only if its current +// version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get +func New(tp elastictransport.Interface) *Exists { + r := &Exists{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Exists) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|idMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_doc") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodHead + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Exists) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "exists") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "exists") + if reader := instrument.RecordRequestBody(ctx, "exists", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "exists") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Exists query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a exists.Response +func (r Exists) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Exists) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "exists") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Exists query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Exists headers map. +func (r *Exists) Header(key, value string) *Exists { + r.headers.Set(key, value) + + return r +} + +// Id A unique document identifier. +// API Name: id +func (r *Exists) _id(id string) *Exists { + r.paramSet |= idMask + r.id = id + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases. +// It supports wildcards (`*`). +// API Name: index +func (r *Exists) _index(index string) *Exists { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Preference The node or shard the operation should be performed on. +// By default, the operation is randomized between the shard replicas. +// +// If it is set to `_local`, the operation will prefer to be run on a local +// allocated shard when possible. +// If it is set to a custom value, the value is used to guarantee that the same +// shards will be used for the same custom value. +// This can help with "jumping values" when hitting different shards in +// different refresh states. +// A sample value can be something like the web session ID or the user name. +// API name: preference +func (r *Exists) Preference(preference string) *Exists { + r.values.Set("preference", preference) + + return r +} + +// Realtime If `true`, the request is real-time as opposed to near-real-time. +// API name: realtime +func (r *Exists) Realtime(realtime bool) *Exists { + r.values.Set("realtime", strconv.FormatBool(realtime)) + + return r +} + +// Refresh If `true`, the request refreshes the relevant shards before retrieving the +// document. +// Setting it to `true` should be done after careful thought and verification +// that this does not cause a heavy load on the system (and slow down indexing). +// API name: refresh +func (r *Exists) Refresh(refresh bool) *Exists { + r.values.Set("refresh", strconv.FormatBool(refresh)) + + return r +} + +// Routing A custom value used to route operations to a specific shard. +// API name: routing +func (r *Exists) Routing(routing string) *Exists { + r.values.Set("routing", routing) + + return r +} + +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or lists +// the fields to return. +// API name: _source +func (r *Exists) Source_(sourceconfigparam string) *Exists { + r.values.Set("_source", sourceconfigparam) + + return r +} + +// SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// If the `_source` parameter is `false`, this parameter is ignored. +// API name: _source_excludes +func (r *Exists) SourceExcludes_(fields ...string) *Exists { + r.values.Set("_source_excludes", strings.Join(fields, ",")) + + return r +} + +// SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. +// You can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. +// API name: _source_includes +func (r *Exists) SourceIncludes_(fields ...string) *Exists { + r.values.Set("_source_includes", strings.Join(fields, ",")) + + return r +} + +// StoredFields A comma-separated list of stored fields to return as part of a hit. +// If no fields are specified, no stored fields are included in the response. +// If this field is specified, the `_source` parameter defaults to `false`. +// API name: stored_fields +func (r *Exists) StoredFields(fields ...string) *Exists { + r.values.Set("stored_fields", strings.Join(fields, ",")) + + return r +} + +// Version Explicit version number for concurrency control. +// The specified version must match the current version of the document for the +// request to succeed. +// API name: version +func (r *Exists) Version(versionnumber string) *Exists { + r.values.Set("version", versionnumber) + + return r +} + +// VersionType The version type. +// API name: version_type +func (r *Exists) VersionType(versiontype versiontype.VersionType) *Exists { + r.values.Set("version_type", versiontype.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Exists) ErrorTrace(errortrace bool) *Exists { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Exists) FilterPath(filterpaths ...string) *Exists { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Exists) Human(human bool) *Exists { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Exists) Pretty(pretty bool) *Exists { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/existssource/exists_source.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/existssource/exists_source.go new file mode 100644 index 000000000..dde0a283b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/existssource/exists_source.go @@ -0,0 +1,409 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Check for a document source. +// +// Check whether a document source exists in an index. +// For example: +// +// ``` +// HEAD my-index-000001/_source/1 +// ``` +// +// A document's source is not available if it is disabled in the mapping. +package existssource + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +const ( + idMask = iota + 1 + + indexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ExistsSource struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExistsSource type alias for index. +type NewExistsSource func(index, id string) *ExistsSource + +// NewExistsSourceFunc returns a new instance of ExistsSource with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExistsSourceFunc(tp elastictransport.Interface) NewExistsSource { + return func(index, id string) *ExistsSource { + n := New(tp) + + n._id(id) + + n._index(index) + + return n + } +} + +// Check for a document source. +// +// Check whether a document source exists in an index. +// For example: +// +// ``` +// HEAD my-index-000001/_source/1 +// ``` +// +// A document's source is not available if it is disabled in the mapping. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get +func New(tp elastictransport.Interface) *ExistsSource { + r := &ExistsSource{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ExistsSource) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|idMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_source") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodHead + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ExistsSource) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "exists_source") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "exists_source") + if reader := instrument.RecordRequestBody(ctx, "exists_source", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "exists_source") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ExistsSource query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a existssource.Response +func (r ExistsSource) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ExistsSource) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "exists_source") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ExistsSource query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ExistsSource headers map. +func (r *ExistsSource) Header(key, value string) *ExistsSource { + r.headers.Set(key, value) + + return r +} + +// Id A unique identifier for the document. +// API Name: id +func (r *ExistsSource) _id(id string) *ExistsSource { + r.paramSet |= idMask + r.id = id + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases. +// It supports wildcards (`*`). +// API Name: index +func (r *ExistsSource) _index(index string) *ExistsSource { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Preference The node or shard the operation should be performed on. +// By default, the operation is randomized between the shard replicas. +// API name: preference +func (r *ExistsSource) Preference(preference string) *ExistsSource { + r.values.Set("preference", preference) + + return r +} + +// Realtime If `true`, the request is real-time as opposed to near-real-time. +// API name: realtime +func (r *ExistsSource) Realtime(realtime bool) *ExistsSource { + r.values.Set("realtime", strconv.FormatBool(realtime)) + + return r +} + +// Refresh If `true`, the request refreshes the relevant shards before retrieving the +// document. +// Setting it to `true` should be done after careful thought and verification +// that this does not cause a heavy load on the system (and slow down indexing). +// API name: refresh +func (r *ExistsSource) Refresh(refresh bool) *ExistsSource { + r.values.Set("refresh", strconv.FormatBool(refresh)) + + return r +} + +// Routing A custom value used to route operations to a specific shard. +// API name: routing +func (r *ExistsSource) Routing(routing string) *ExistsSource { + r.values.Set("routing", routing) + + return r +} + +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or lists +// the fields to return. +// API name: _source +func (r *ExistsSource) Source_(sourceconfigparam string) *ExistsSource { + r.values.Set("_source", sourceconfigparam) + + return r +} + +// SourceExcludes_ A comma-separated list of source fields to exclude in the response. +// API name: _source_excludes +func (r *ExistsSource) SourceExcludes_(fields ...string) *ExistsSource { + r.values.Set("_source_excludes", strings.Join(fields, ",")) + + return r +} + +// SourceIncludes_ A comma-separated list of source fields to include in the response. +// API name: _source_includes +func (r *ExistsSource) SourceIncludes_(fields ...string) *ExistsSource { + r.values.Set("_source_includes", strings.Join(fields, ",")) + + return r +} + +// Version The version number for concurrency control. +// It must match the current version of the document for the request to succeed. +// API name: version +func (r *ExistsSource) Version(versionnumber string) *ExistsSource { + r.values.Set("version", versionnumber) + + return r +} + +// VersionType The version type. +// API name: version_type +func (r *ExistsSource) VersionType(versiontype versiontype.VersionType) *ExistsSource { + r.values.Set("version_type", versiontype.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ExistsSource) ErrorTrace(errortrace bool) *ExistsSource { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ExistsSource) FilterPath(filterpaths ...string) *ExistsSource { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ExistsSource) Human(human bool) *ExistsSource { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ExistsSource) Pretty(pretty bool) *ExistsSource { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/explain/explain.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/explain/explain.go new file mode 100644 index 000000000..69c128f05 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/explain/explain.go @@ -0,0 +1,514 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Explain a document match result. +// Get information about why a specific document matches, or doesn't match, a +// query. +// It computes a score explanation for a query and a specific document. +package explain + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator" +) + +const ( + idMask = iota + 1 + + indexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Explain struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExplain type alias for index. +type NewExplain func(index, id string) *Explain + +// NewExplainFunc returns a new instance of Explain with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExplainFunc(tp elastictransport.Interface) NewExplain { + return func(index, id string) *Explain { + n := New(tp) + + n._id(id) + + n._index(index) + + return n + } +} + +// Explain a document match result. +// Get information about why a specific document matches, or doesn't match, a +// query. +// It computes a score explanation for a query and a specific document. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain +func New(tp elastictransport.Interface) *Explain { + r := &Explain{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Explain) Raw(raw io.Reader) *Explain { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Explain) Request(req *Request) *Explain { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Explain) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Explain: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|idMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_explain") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Explain) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "explain") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "explain") + if reader := instrument.RecordRequestBody(ctx, "explain", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "explain") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Explain query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a explain.Response +func (r Explain) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "explain") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Explain headers map. +func (r *Explain) Header(key, value string) *Explain { + r.headers.Set(key, value) + + return r +} + +// Id The document identifier. +// API Name: id +func (r *Explain) _id(id string) *Explain { + r.paramSet |= idMask + r.id = id + + return r +} + +// Index Index names that are used to limit the request. +// Only a single index name can be provided to this parameter. +// API Name: index +func (r *Explain) _index(index string) *Explain { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Analyzer The analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: analyzer +func (r *Explain) Analyzer(analyzer string) *Explain { + r.values.Set("analyzer", analyzer) + + return r +} + +// AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: analyze_wildcard +func (r *Explain) AnalyzeWildcard(analyzewildcard bool) *Explain { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) + + return r +} + +// DefaultOperator The default operator for query string query: `AND` or `OR`. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: default_operator +func (r *Explain) DefaultOperator(defaultoperator operator.Operator) *Explain { + r.values.Set("default_operator", defaultoperator.String()) + + return r +} + +// Df The field to use as default where no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: df +func (r *Explain) Df(df string) *Explain { + r.values.Set("df", df) + + return r +} + +// Lenient If `true`, format-based query failures (such as providing text to a numeric +// field) in the query string will be ignored. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: lenient +func (r *Explain) Lenient(lenient bool) *Explain { + r.values.Set("lenient", strconv.FormatBool(lenient)) + + return r +} + +// Preference The node or shard the operation should be performed on. +// It is random by default. +// API name: preference +func (r *Explain) Preference(preference string) *Explain { + r.values.Set("preference", preference) + + return r +} + +// Routing A custom value used to route operations to a specific shard. +// API name: routing +func (r *Explain) Routing(routing string) *Explain { + r.values.Set("routing", routing) + + return r +} + +// Source_ `True` or `false` to return the `_source` field or not or a list of fields to +// return. +// API name: _source +func (r *Explain) Source_(sourceconfigparam string) *Explain { + r.values.Set("_source", sourceconfigparam) + + return r +} + +// SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// If the `_source` parameter is `false`, this parameter is ignored. +// API name: _source_excludes +func (r *Explain) SourceExcludes_(fields ...string) *Explain { + r.values.Set("_source_excludes", strings.Join(fields, ",")) + + return r +} + +// SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. +// You can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. +// API name: _source_includes +func (r *Explain) SourceIncludes_(fields ...string) *Explain { + r.values.Set("_source_includes", strings.Join(fields, ",")) + + return r +} + +// StoredFields A comma-separated list of stored fields to return in the response. +// API name: stored_fields +func (r *Explain) StoredFields(fields ...string) *Explain { + r.values.Set("stored_fields", strings.Join(fields, ",")) + + return r +} + +// Q The query in the Lucene query string syntax. +// API name: q +func (r *Explain) Q(q string) *Explain { + r.values.Set("q", q) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Explain) ErrorTrace(errortrace bool) *Explain { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Explain) FilterPath(filterpaths ...string) *Explain { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Explain) Human(human bool) *Explain { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Explain) Pretty(pretty bool) *Explain { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Defines the search definition using the Query DSL. +// API name: query +func (r *Explain) Query(query types.QueryVariant) *Explain { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/explain/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/explain/request.go new file mode 100644 index 000000000..28b2784de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/explain/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package explain + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package explain +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/explain/ExplainRequest.ts#L26-L125 +type Request struct { + + // Query Defines the search definition using the Query DSL. + Query *types.Query `json:"query,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Explain request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/explain/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/explain/response.go new file mode 100644 index 000000000..ffb4e50aa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/explain/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package explain + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package explain +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/explain/ExplainResponse.ts#L23-L31 +type Response struct { + Explanation *types.ExplanationDetail `json:"explanation,omitempty"` + Get *types.InlineGet `json:"get,omitempty"` + Id_ string `json:"_id"` + Index_ string `json:"_index"` + Matched bool `json:"matched"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/fieldcaps/field_caps.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/fieldcaps/field_caps.go new file mode 100644 index 000000000..5daf91b76 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/fieldcaps/field_caps.go @@ -0,0 +1,504 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the field capabilities. +// +// Get information about the capabilities of fields among multiple indices. +// +// For data streams, the API returns field capabilities among the stream’s +// backing indices. +// It returns runtime fields like any other field. +// For example, a runtime field with a type of keyword is returned the same as +// any other field that belongs to the `keyword` family. +package fieldcaps + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type FieldCaps struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewFieldCaps type alias for index. +type NewFieldCaps func() *FieldCaps + +// NewFieldCapsFunc returns a new instance of FieldCaps with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewFieldCapsFunc(tp elastictransport.Interface) NewFieldCaps { + return func() *FieldCaps { + n := New(tp) + + return n + } +} + +// Get the field capabilities. +// +// Get information about the capabilities of fields among multiple indices. +// +// For data streams, the API returns field capabilities among the stream’s +// backing indices. +// It returns runtime fields like any other field. +// For example, a runtime field with a type of keyword is returned the same as +// any other field that belongs to the `keyword` family. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps +func New(tp elastictransport.Interface) *FieldCaps { + r := &FieldCaps{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *FieldCaps) Raw(raw io.Reader) *FieldCaps { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *FieldCaps) Request(req *Request) *FieldCaps { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *FieldCaps) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for FieldCaps: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_field_caps") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_field_caps") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r FieldCaps) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "field_caps") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "field_caps") + if reader := instrument.RecordRequestBody(ctx, "field_caps", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "field_caps") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the FieldCaps query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a fieldcaps.Response +func (r FieldCaps) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "field_caps") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the FieldCaps headers map. +func (r *FieldCaps) Header(key, value string) *FieldCaps { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases used to limit +// the request. Supports wildcards (*). To target all data streams and indices, +// omit this parameter or use * or _all. +// API Name: index +func (r *FieldCaps) Index(index string) *FieldCaps { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If false, the request returns an error if any wildcard expression, index +// alias, +// or `_all` value targets only missing or closed indices. This behavior applies +// even if the request targets other open indices. For example, a request +// targeting `foo*,bar*` returns an error if an index starts with foo but no +// index starts with bar. +// API name: allow_no_indices +func (r *FieldCaps) AllowNoIndices(allownoindices bool) *FieldCaps { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards The type of index that wildcard patterns can match. If the request can target +// data streams, this argument determines whether wildcard expressions match +// hidden data streams. Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *FieldCaps) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *FieldCaps { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `true`, missing or closed indices are not included in the response. +// API name: ignore_unavailable +func (r *FieldCaps) IgnoreUnavailable(ignoreunavailable bool) *FieldCaps { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// IncludeUnmapped If true, unmapped fields are included in the response. +// API name: include_unmapped +func (r *FieldCaps) IncludeUnmapped(includeunmapped bool) *FieldCaps { + r.values.Set("include_unmapped", strconv.FormatBool(includeunmapped)) + + return r +} + +// Filters A comma-separated list of filters to apply to the response. +// API name: filters +func (r *FieldCaps) Filters(filters string) *FieldCaps { + r.values.Set("filters", filters) + + return r +} + +// Types A comma-separated list of field types to include. +// Any fields that do not match one of these types will be excluded from the +// results. +// It defaults to empty, meaning that all field types are returned. +// API name: types +func (r *FieldCaps) Types(types ...string) *FieldCaps { + tmp := []string{} + for _, item := range types { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("types", strings.Join(tmp, ",")) + + return r +} + +// IncludeEmptyFields If false, empty fields are not included in the response. +// API name: include_empty_fields +func (r *FieldCaps) IncludeEmptyFields(includeemptyfields bool) *FieldCaps { + r.values.Set("include_empty_fields", strconv.FormatBool(includeemptyfields)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *FieldCaps) ErrorTrace(errortrace bool) *FieldCaps { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *FieldCaps) FilterPath(filterpaths ...string) *FieldCaps { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *FieldCaps) Human(human bool) *FieldCaps { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *FieldCaps) Pretty(pretty bool) *FieldCaps { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are +// supported. +// API name: fields +func (r *FieldCaps) Fields(fields ...string) *FieldCaps { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Fields = fields + + return r +} + +// Filter indices if the provided query rewrites to `match_none` on every shard. +// +// IMPORTANT: The filtering is done on a best-effort basis, it uses index +// statistics and mappings to rewrite queries to `match_none` instead of fully +// running the request. +// For instance a range query over a date field can rewrite to `match_none` if +// all documents within a shard (including deleted documents) are outside of the +// provided range. +// However, not all queries can rewrite to `match_none` so this API may return +// an index even if the provided filter matches no document. +// API name: index_filter +func (r *FieldCaps) IndexFilter(indexfilter types.QueryVariant) *FieldCaps { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexFilter = indexfilter.QueryCaster() + + return r +} + +// Define ad-hoc runtime fields in the request similar to the way it is done in +// search requests. +// These fields exist only as part of the query and take precedence over fields +// defined with the same name in the index mappings. +// API name: runtime_mappings +func (r *FieldCaps) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *FieldCaps { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/fieldcaps/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/fieldcaps/request.go new file mode 100644 index 000000000..b8eb2db52 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/fieldcaps/request.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package fieldcaps + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package fieldcaps +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/field_caps/FieldCapabilitiesRequest.ts#L25-L130 +type Request struct { + + // Fields A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are + // supported. + Fields []string `json:"fields,omitempty"` + // IndexFilter Filter indices if the provided query rewrites to `match_none` on every shard. + // + // IMPORTANT: The filtering is done on a best-effort basis, it uses index + // statistics and mappings to rewrite queries to `match_none` instead of fully + // running the request. + // For instance a range query over a date field can rewrite to `match_none` if + // all documents within a shard (including deleted documents) are outside of the + // provided range. + // However, not all queries can rewrite to `match_none` so this API may return + // an index even if the provided filter matches no document. + IndexFilter *types.Query `json:"index_filter,omitempty"` + // RuntimeMappings Define ad-hoc runtime fields in the request similar to the way it is done in + // search requests. + // These fields exist only as part of the query and take precedence over fields + // defined with the same name in the index mappings. + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Fieldcaps request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + } + + case "index_filter": + if err := dec.Decode(&s.IndexFilter); err != nil { + return fmt.Errorf("%s | %w", "IndexFilter", err) + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/fieldcaps/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/fieldcaps/response.go new file mode 100644 index 000000000..d69f7fa7b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/fieldcaps/response.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package fieldcaps + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package fieldcaps +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/field_caps/FieldCapabilitiesResponse.ts#L24-L38 +type Response struct { + Fields map[string]map[string]types.FieldCapability `json:"fields"` + // Indices The list of indices where this field has the same type family, or null if all + // indices have the same type family for the field. + Indices []string `json:"indices"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Fields: make(map[string]map[string]types.FieldCapability, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]map[string]types.FieldCapability, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/get/get.go new file mode 100644 index 000000000..a275e2993 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/get/get.go @@ -0,0 +1,628 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get a document by its ID. +// +// Get a document and its source or stored fields from an index. +// +// By default, this API is realtime and is not affected by the refresh rate of +// the index (when data will become visible for search). +// In the case where stored fields are requested with the `stored_fields` +// parameter and the document has been updated but is not yet refreshed, the API +// will have to parse and analyze the source to extract the stored fields. +// To turn off realtime behavior, set the `realtime` parameter to false. +// +// **Source filtering** +// +// By default, the API returns the contents of the `_source` field unless you +// have used the `stored_fields` parameter or the `_source` field is turned off. +// You can turn off `_source` retrieval by using the `_source` parameter: +// +// ``` +// GET my-index-000001/_doc/0?_source=false +// ``` +// +// If you only need one or two fields from the `_source`, use the +// `_source_includes` or `_source_excludes` parameters to include or filter out +// particular fields. +// This can be helpful with large documents where partial retrieval can save on +// network overhead +// Both parameters take a comma separated list of fields or wildcard +// expressions. +// For example: +// +// ``` +// GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities +// ``` +// +// If you only want to specify includes, you can use a shorter notation: +// +// ``` +// GET my-index-000001/_doc/0?_source=*.id +// ``` +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to retrieve a document. +// For example: +// +// ``` +// GET my-index-000001/_doc/2?routing=user1 +// ``` +// +// This request gets the document with ID 2, but it is routed based on the user. +// The document is not fetched if the correct routing is not specified. +// +// **Distributed** +// +// The GET operation is hashed into a specific shard ID. +// It is then redirected to one of the replicas within that shard ID and returns +// the result. +// The replicas are the primary shard and its replicas within that shard ID +// group. +// This means that the more replicas you have, the better your GET scaling will +// be. +// +// **Versioning support** +// +// You can use the `version` parameter to retrieve the document only if its +// current version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. +package get + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "slices" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +const ( + idMask = iota + 1 + + indexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Get struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGet type alias for index. +type NewGet func(index, id string) *Get + +// NewGetFunc returns a new instance of Get with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFunc(tp elastictransport.Interface) NewGet { + return func(index, id string) *Get { + n := New(tp) + + n._id(id) + + n._index(index) + + return n + } +} + +// Get a document by its ID. +// +// Get a document and its source or stored fields from an index. +// +// By default, this API is realtime and is not affected by the refresh rate of +// the index (when data will become visible for search). +// In the case where stored fields are requested with the `stored_fields` +// parameter and the document has been updated but is not yet refreshed, the API +// will have to parse and analyze the source to extract the stored fields. +// To turn off realtime behavior, set the `realtime` parameter to false. +// +// **Source filtering** +// +// By default, the API returns the contents of the `_source` field unless you +// have used the `stored_fields` parameter or the `_source` field is turned off. +// You can turn off `_source` retrieval by using the `_source` parameter: +// +// ``` +// GET my-index-000001/_doc/0?_source=false +// ``` +// +// If you only need one or two fields from the `_source`, use the +// `_source_includes` or `_source_excludes` parameters to include or filter out +// particular fields. +// This can be helpful with large documents where partial retrieval can save on +// network overhead +// Both parameters take a comma separated list of fields or wildcard +// expressions. +// For example: +// +// ``` +// GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities +// ``` +// +// If you only want to specify includes, you can use a shorter notation: +// +// ``` +// GET my-index-000001/_doc/0?_source=*.id +// ``` +// +// **Routing** +// +// If routing is used during indexing, the routing value also needs to be +// specified to retrieve a document. +// For example: +// +// ``` +// GET my-index-000001/_doc/2?routing=user1 +// ``` +// +// This request gets the document with ID 2, but it is routed based on the user. +// The document is not fetched if the correct routing is not specified. +// +// **Distributed** +// +// The GET operation is hashed into a specific shard ID. +// It is then redirected to one of the replicas within that shard ID and returns +// the result. +// The replicas are the primary shard and its replicas within that shard ID +// group. +// This means that the more replicas you have, the better your GET scaling will +// be. +// +// **Versioning support** +// +// You can use the `version` parameter to retrieve the document only if its +// current version is equal to the specified one. +// +// Internally, Elasticsearch has marked the old document as deleted and added an +// entirely new document. +// The old version of the document doesn't disappear immediately, although you +// won't be able to access it. +// Elasticsearch cleans up deleted documents in the background as you continue +// to index more data. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get +func New(tp elastictransport.Interface) *Get { + r := &Get{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Get) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|idMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_doc") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Get) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "get") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "get") + if reader := instrument.RecordRequestBody(ctx, "get", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "get") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Get query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a get.Response +func (r Get) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 || slices.Contains([]int{404}, res.StatusCode) { + + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Get) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Get query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Get headers map. +func (r *Get) Header(key, value string) *Get { + r.headers.Set(key, value) + + return r +} + +// Id A unique document identifier. +// API Name: id +func (r *Get) _id(id string) *Get { + r.paramSet |= idMask + r.id = id + + return r +} + +// Index The name of the index that contains the document. +// API Name: index +func (r *Get) _index(index string) *Get { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ForceSyntheticSource Indicates whether the request forces synthetic `_source`. +// Use this parameter to test if the mapping supports synthetic `_source` and to +// get a sense of the worst case performance. +// Fetches with this parameter enabled will be slower than enabling synthetic +// source natively in the index. +// API name: force_synthetic_source +func (r *Get) ForceSyntheticSource(forcesyntheticsource bool) *Get { + r.values.Set("force_synthetic_source", strconv.FormatBool(forcesyntheticsource)) + + return r +} + +// Preference The node or shard the operation should be performed on. +// By default, the operation is randomized between the shard replicas. +// +// If it is set to `_local`, the operation will prefer to be run on a local +// allocated shard when possible. +// If it is set to a custom value, the value is used to guarantee that the same +// shards will be used for the same custom value. +// This can help with "jumping values" when hitting different shards in +// different refresh states. +// A sample value can be something like the web session ID or the user name. +// API name: preference +func (r *Get) Preference(preference string) *Get { + r.values.Set("preference", preference) + + return r +} + +// Realtime If `true`, the request is real-time as opposed to near-real-time. +// API name: realtime +func (r *Get) Realtime(realtime bool) *Get { + r.values.Set("realtime", strconv.FormatBool(realtime)) + + return r +} + +// Refresh If `true`, the request refreshes the relevant shards before retrieving the +// document. +// Setting it to `true` should be done after careful thought and verification +// that this does not cause a heavy load on the system (and slow down indexing). +// API name: refresh +func (r *Get) Refresh(refresh bool) *Get { + r.values.Set("refresh", strconv.FormatBool(refresh)) + + return r +} + +// Routing A custom value used to route operations to a specific shard. +// API name: routing +func (r *Get) Routing(routing string) *Get { + r.values.Set("routing", routing) + + return r +} + +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or lists +// the fields to return. +// API name: _source +func (r *Get) Source_(sourceconfigparam string) *Get { + r.values.Set("_source", sourceconfigparam) + + return r +} + +// SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// If the `_source` parameter is `false`, this parameter is ignored. +// API name: _source_excludes +func (r *Get) SourceExcludes_(fields ...string) *Get { + r.values.Set("_source_excludes", strings.Join(fields, ",")) + + return r +} + +// SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. +// You can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. +// API name: _source_includes +func (r *Get) SourceIncludes_(fields ...string) *Get { + r.values.Set("_source_includes", strings.Join(fields, ",")) + + return r +} + +// StoredFields A comma-separated list of stored fields to return as part of a hit. +// If no fields are specified, no stored fields are included in the response. +// If this field is specified, the `_source` parameter defaults to `false`. +// Only leaf fields can be retrieved with the `stored_fields` option. +// Object fields can't be returned; if specified, the request fails. +// API name: stored_fields +func (r *Get) StoredFields(fields ...string) *Get { + r.values.Set("stored_fields", strings.Join(fields, ",")) + + return r +} + +// Version The version number for concurrency control. +// It must match the current version of the document for the request to succeed. +// API name: version +func (r *Get) Version(versionnumber string) *Get { + r.values.Set("version", versionnumber) + + return r +} + +// VersionType The version type. +// API name: version_type +func (r *Get) VersionType(versiontype versiontype.VersionType) *Get { + r.values.Set("version_type", versiontype.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Get) ErrorTrace(errortrace bool) *Get { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Get) FilterPath(filterpaths ...string) *Get { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Get) Human(human bool) *Get { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Get) Pretty(pretty bool) *Get { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/get/response.go new file mode 100644 index 000000000..e7a441b51 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/get/response.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package get + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package get +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/get/GetResponse.ts#L23-L35 +type Response struct { + + // Fields If the `stored_fields` parameter is set to `true` and `found` is `true`, it + // contains the document fields stored in the index. + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Found Indicates whether the document exists. + Found bool `json:"found"` + // Id_ The unique identifier for the document. + Id_ string `json:"_id"` + Ignored_ []string `json:"_ignored,omitempty"` + // Index_ The name of the index the document belongs to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Routing_ The explicit routing, if set. + Routing_ *string `json:"_routing,omitempty"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Source_ If `found` is `true`, it contains the document data formatted in JSON. + // If the `_source` parameter is set to `false` or the `stored_fields` parameter + // is set to `true`, it is excluded. + Source_ json.RawMessage `json:"_source,omitempty"` + // Version_ The document version, which is ncremented each time the document is updated. + Version_ *int64 `json:"_version,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Fields: make(map[string]json.RawMessage, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscript/get_script.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscript/get_script.go new file mode 100644 index 000000000..d6b14a44b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscript/get_script.go @@ -0,0 +1,355 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get a script or search template. +// Retrieves a stored script or search template. +package getscript + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetScript struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetScript type alias for index. +type NewGetScript func(id string) *GetScript + +// NewGetScriptFunc returns a new instance of GetScript with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetScriptFunc(tp elastictransport.Interface) NewGetScript { + return func(id string) *GetScript { + n := New(tp) + + n._id(id) + + return n + } +} + +// Get a script or search template. +// Retrieves a stored script or search template. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script +func New(tp elastictransport.Interface) *GetScript { + r := &GetScript{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetScript) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_scripts") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetScript) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "get_script") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "get_script") + if reader := instrument.RecordRequestBody(ctx, "get_script", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "get_script") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetScript query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getscript.Response +func (r GetScript) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get_script") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetScript) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get_script") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetScript query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetScript headers map. +func (r *GetScript) Header(key, value string) *GetScript { + r.headers.Set(key, value) + + return r +} + +// Id The identifier for the stored script or search template. +// API Name: id +func (r *GetScript) _id(id string) *GetScript { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *GetScript) MasterTimeout(duration string) *GetScript { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetScript) ErrorTrace(errortrace bool) *GetScript { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetScript) FilterPath(filterpaths ...string) *GetScript { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetScript) Human(human bool) *GetScript { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetScript) Pretty(pretty bool) *GetScript { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscript/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscript/response.go new file mode 100644 index 000000000..670f75e5a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscript/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getscript + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getscript +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/get_script/GetScriptResponse.ts#L23-L29 +type Response struct { + Found bool `json:"found"` + Id_ string `json:"_id"` + Script *types.StoredScript `json:"script,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptcontext/get_script_context.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptcontext/get_script_context.go new file mode 100644 index 000000000..9375ef20f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptcontext/get_script_context.go @@ -0,0 +1,323 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get script contexts. +// +// Get a list of supported script contexts and their methods. +package getscriptcontext + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetScriptContext struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetScriptContext type alias for index. +type NewGetScriptContext func() *GetScriptContext + +// NewGetScriptContextFunc returns a new instance of GetScriptContext with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetScriptContextFunc(tp elastictransport.Interface) NewGetScriptContext { + return func() *GetScriptContext { + n := New(tp) + + return n + } +} + +// Get script contexts. +// +// Get a list of supported script contexts and their methods. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context +func New(tp elastictransport.Interface) *GetScriptContext { + r := &GetScriptContext{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetScriptContext) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_script_context") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetScriptContext) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "get_script_context") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "get_script_context") + if reader := instrument.RecordRequestBody(ctx, "get_script_context", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "get_script_context") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetScriptContext query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getscriptcontext.Response +func (r GetScriptContext) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get_script_context") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetScriptContext) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get_script_context") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetScriptContext query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetScriptContext headers map. +func (r *GetScriptContext) Header(key, value string) *GetScriptContext { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetScriptContext) ErrorTrace(errortrace bool) *GetScriptContext { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetScriptContext) FilterPath(filterpaths ...string) *GetScriptContext { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetScriptContext) Human(human bool) *GetScriptContext { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetScriptContext) Pretty(pretty bool) *GetScriptContext { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptcontext/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptcontext/response.go new file mode 100644 index 000000000..cb9975446 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptcontext/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getscriptcontext + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getscriptcontext +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/get_script_context/GetScriptContextResponse.ts#L22-L26 +type Response struct { + Contexts []types.GetScriptContext `json:"contexts"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptlanguages/get_script_languages.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptlanguages/get_script_languages.go new file mode 100644 index 000000000..98cf89037 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptlanguages/get_script_languages.go @@ -0,0 +1,323 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get script languages. +// +// Get a list of available script types, languages, and contexts. +package getscriptlanguages + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetScriptLanguages struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetScriptLanguages type alias for index. +type NewGetScriptLanguages func() *GetScriptLanguages + +// NewGetScriptLanguagesFunc returns a new instance of GetScriptLanguages with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetScriptLanguagesFunc(tp elastictransport.Interface) NewGetScriptLanguages { + return func() *GetScriptLanguages { + n := New(tp) + + return n + } +} + +// Get script languages. +// +// Get a list of available script types, languages, and contexts. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages +func New(tp elastictransport.Interface) *GetScriptLanguages { + r := &GetScriptLanguages{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetScriptLanguages) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_script_language") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetScriptLanguages) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "get_script_languages") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "get_script_languages") + if reader := instrument.RecordRequestBody(ctx, "get_script_languages", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "get_script_languages") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetScriptLanguages query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getscriptlanguages.Response +func (r GetScriptLanguages) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get_script_languages") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetScriptLanguages) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get_script_languages") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetScriptLanguages query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetScriptLanguages headers map. +func (r *GetScriptLanguages) Header(key, value string) *GetScriptLanguages { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetScriptLanguages) ErrorTrace(errortrace bool) *GetScriptLanguages { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetScriptLanguages) FilterPath(filterpaths ...string) *GetScriptLanguages { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetScriptLanguages) Human(human bool) *GetScriptLanguages { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetScriptLanguages) Pretty(pretty bool) *GetScriptLanguages { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptlanguages/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptlanguages/response.go new file mode 100644 index 000000000..b2b0e99a5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptlanguages/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getscriptlanguages + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getscriptlanguages +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/get_script_languages/GetScriptLanguagesResponse.ts#L22-L27 +type Response struct { + LanguageContexts []types.LanguageContext `json:"language_contexts"` + TypesAllowed []string `json:"types_allowed"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getsource/get_source.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getsource/get_source.go new file mode 100644 index 000000000..c4ad5a049 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getsource/get_source.go @@ -0,0 +1,471 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get a document's source. +// +// Get the source of a document. +// For example: +// +// ``` +// GET my-index-000001/_source/1 +// ``` +// +// You can use the source filtering parameters to control which parts of the +// `_source` are returned: +// +// ``` +// GET +// my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities +// ``` +package getsource + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +const ( + idMask = iota + 1 + + indexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetSource struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetSource type alias for index. +type NewGetSource func(index, id string) *GetSource + +// NewGetSourceFunc returns a new instance of GetSource with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetSourceFunc(tp elastictransport.Interface) NewGetSource { + return func(index, id string) *GetSource { + n := New(tp) + + n._id(id) + + n._index(index) + + return n + } +} + +// Get a document's source. +// +// Get the source of a document. +// For example: +// +// ``` +// GET my-index-000001/_source/1 +// ``` +// +// You can use the source filtering parameters to control which parts of the +// `_source` are returned: +// +// ``` +// GET +// my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities +// ``` +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get +func New(tp elastictransport.Interface) *GetSource { + r := &GetSource{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetSource) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|idMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_source") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetSource) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "get_source") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "get_source") + if reader := instrument.RecordRequestBody(ctx, "get_source", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "get_source") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetSource query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getsource.Response +func (r GetSource) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get_source") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := new(Response) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return *response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetSource) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "get_source") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetSource query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetSource headers map. +func (r *GetSource) Header(key, value string) *GetSource { + r.headers.Set(key, value) + + return r +} + +// Id A unique document identifier. +// API Name: id +func (r *GetSource) _id(id string) *GetSource { + r.paramSet |= idMask + r.id = id + + return r +} + +// Index The name of the index that contains the document. +// API Name: index +func (r *GetSource) _index(index string) *GetSource { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Preference The node or shard the operation should be performed on. +// By default, the operation is randomized between the shard replicas. +// API name: preference +func (r *GetSource) Preference(preference string) *GetSource { + r.values.Set("preference", preference) + + return r +} + +// Realtime If `true`, the request is real-time as opposed to near-real-time. +// API name: realtime +func (r *GetSource) Realtime(realtime bool) *GetSource { + r.values.Set("realtime", strconv.FormatBool(realtime)) + + return r +} + +// Refresh If `true`, the request refreshes the relevant shards before retrieving the +// document. +// Setting it to `true` should be done after careful thought and verification +// that this does not cause a heavy load on the system (and slow down indexing). +// API name: refresh +func (r *GetSource) Refresh(refresh bool) *GetSource { + r.values.Set("refresh", strconv.FormatBool(refresh)) + + return r +} + +// Routing A custom value used to route operations to a specific shard. +// API name: routing +func (r *GetSource) Routing(routing string) *GetSource { + r.values.Set("routing", routing) + + return r +} + +// Source_ Indicates whether to return the `_source` field (`true` or `false`) or lists +// the fields to return. +// API name: _source +func (r *GetSource) Source_(sourceconfigparam string) *GetSource { + r.values.Set("_source", sourceconfigparam) + + return r +} + +// SourceExcludes_ A comma-separated list of source fields to exclude in the response. +// API name: _source_excludes +func (r *GetSource) SourceExcludes_(fields ...string) *GetSource { + r.values.Set("_source_excludes", strings.Join(fields, ",")) + + return r +} + +// SourceIncludes_ A comma-separated list of source fields to include in the response. +// API name: _source_includes +func (r *GetSource) SourceIncludes_(fields ...string) *GetSource { + r.values.Set("_source_includes", strings.Join(fields, ",")) + + return r +} + +// Version The version number for concurrency control. +// It must match the current version of the document for the request to succeed. +// API name: version +func (r *GetSource) Version(versionnumber string) *GetSource { + r.values.Set("version", versionnumber) + + return r +} + +// VersionType The version type. +// API name: version_type +func (r *GetSource) VersionType(versiontype versiontype.VersionType) *GetSource { + r.values.Set("version_type", versiontype.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetSource) ErrorTrace(errortrace bool) *GetSource { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetSource) FilterPath(filterpaths ...string) *GetSource { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetSource) Human(human bool) *GetSource { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetSource) Pretty(pretty bool) *GetSource { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getsource/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getsource/response.go new file mode 100644 index 000000000..3aa5dec8e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/getsource/response.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getsource + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package getsource +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/get_source/SourceResponse.ts#L20-L23 + +type Response = json.RawMessage + +func NewResponse() *Response { + return new(Response) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/healthreport/health_report.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/healthreport/health_report.go new file mode 100644 index 000000000..1183b46a7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/healthreport/health_report.go @@ -0,0 +1,427 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the cluster health. +// Get a report with the health status of an Elasticsearch cluster. +// The report contains a list of indicators that compose Elasticsearch +// functionality. +// +// Each indicator has a health status of: green, unknown, yellow or red. +// The indicator will provide an explanation and metadata describing the reason +// for its current health status. +// +// The cluster’s status is controlled by the worst indicator status. +// +// In the event that an indicator’s status is non-green, a list of impacts may +// be present in the indicator result which detail the functionalities that are +// negatively affected by the health issue. +// Each impact carries with it a severity level, an area of the system that is +// affected, and a simple description of the impact on the system. +// +// Some health indicators can determine the root cause of a health problem and +// prescribe a set of steps that can be performed in order to improve the health +// of the system. +// The root cause and remediation steps are encapsulated in a diagnosis. +// A diagnosis contains a cause detailing a root cause analysis, an action +// containing a brief description of the steps to take to fix the problem, the +// list of affected resources (if applicable), and a detailed step-by-step +// troubleshooting guide to fix the diagnosed problem. +// +// NOTE: The health indicators perform root cause analysis of non-green health +// statuses. This can be computationally expensive when called frequently. +// When setting up automated polling of the API for health status, set verbose +// to false to disable the more expensive analysis logic. +package healthreport + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + featureMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type HealthReport struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + feature string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewHealthReport type alias for index. +type NewHealthReport func() *HealthReport + +// NewHealthReportFunc returns a new instance of HealthReport with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewHealthReportFunc(tp elastictransport.Interface) NewHealthReport { + return func() *HealthReport { + n := New(tp) + + return n + } +} + +// Get the cluster health. +// Get a report with the health status of an Elasticsearch cluster. +// The report contains a list of indicators that compose Elasticsearch +// functionality. +// +// Each indicator has a health status of: green, unknown, yellow or red. +// The indicator will provide an explanation and metadata describing the reason +// for its current health status. +// +// The cluster’s status is controlled by the worst indicator status. +// +// In the event that an indicator’s status is non-green, a list of impacts may +// be present in the indicator result which detail the functionalities that are +// negatively affected by the health issue. +// Each impact carries with it a severity level, an area of the system that is +// affected, and a simple description of the impact on the system. +// +// Some health indicators can determine the root cause of a health problem and +// prescribe a set of steps that can be performed in order to improve the health +// of the system. +// The root cause and remediation steps are encapsulated in a diagnosis. +// A diagnosis contains a cause detailing a root cause analysis, an action +// containing a brief description of the steps to take to fix the problem, the +// list of affected resources (if applicable), and a detailed step-by-step +// troubleshooting guide to fix the diagnosed problem. +// +// NOTE: The health indicators perform root cause analysis of non-green health +// statuses. This can be computationally expensive when called frequently. +// When setting up automated polling of the API for health status, set verbose +// to false to disable the more expensive analysis logic. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report +func New(tp elastictransport.Interface) *HealthReport { + r := &HealthReport{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *HealthReport) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_health_report") + + method = http.MethodGet + case r.paramSet == featureMask: + path.WriteString("/") + path.WriteString("_health_report") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "feature", r.feature) + } + path.WriteString(r.feature) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r HealthReport) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "health_report") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "health_report") + if reader := instrument.RecordRequestBody(ctx, "health_report", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "health_report") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the HealthReport query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a healthreport.Response +func (r HealthReport) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "health_report") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r HealthReport) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "health_report") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the HealthReport query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the HealthReport headers map. +func (r *HealthReport) Header(key, value string) *HealthReport { + r.headers.Set(key, value) + + return r +} + +// Feature A feature of the cluster, as returned by the top-level health report API. +// API Name: feature +func (r *HealthReport) Feature(features ...string) *HealthReport { + r.paramSet |= featureMask + r.feature = strings.Join(features, ",") + + return r +} + +// Timeout Explicit operation timeout. +// API name: timeout +func (r *HealthReport) Timeout(duration string) *HealthReport { + r.values.Set("timeout", duration) + + return r +} + +// Verbose Opt-in for more information about the health of the system. +// API name: verbose +func (r *HealthReport) Verbose(verbose bool) *HealthReport { + r.values.Set("verbose", strconv.FormatBool(verbose)) + + return r +} + +// Size Limit the number of affected resources the health report API returns. +// API name: size +func (r *HealthReport) Size(size int) *HealthReport { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *HealthReport) ErrorTrace(errortrace bool) *HealthReport { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *HealthReport) FilterPath(filterpaths ...string) *HealthReport { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *HealthReport) Human(human bool) *HealthReport { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *HealthReport) Pretty(pretty bool) *HealthReport { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/healthreport/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/healthreport/response.go new file mode 100644 index 000000000..79628d1d5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/healthreport/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package healthreport + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indicatorhealthstatus" +) + +// Response holds the response body struct for the package healthreport +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/Response.ts#L22-L28 +type Response struct { + ClusterName string `json:"cluster_name"` + Indicators types.Indicators `json:"indicators"` + Status *indicatorhealthstatus.IndicatorHealthStatus `json:"status,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/index/index.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/index/index.go new file mode 100644 index 000000000..7fd88ea56 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/index/index.go @@ -0,0 +1,934 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a document in an index. +// +// Add a JSON document to the specified data stream or index and make it +// searchable. +// If the target is an index and the document already exists, the request +// updates the document and increments its version. +// +// NOTE: You cannot use this API to send update requests for existing documents +// in a data stream. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add or overwrite a document using the `PUT //_doc/<_id>` request +// format, you must have the `create`, `index`, or `write` index privilege. +// * To add a document using the `POST //_doc/` request format, you must +// have the `create_doc`, `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// NOTE: Replica shards might not all be started when an indexing operation +// returns successfully. +// By default, only the primary is required. Set `wait_for_active_shards` to +// change this default behavior. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Optimistic concurrency control** +// +// Index operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. +// +// **No operation (noop) updates** +// +// When updating a document by using this API, a new version of the document is +// always created even if the document hasn't changed. +// If this isn't acceptable use the `_update` API with `detect_noop` set to +// `true`. +// The `detect_noop` option isn't available on this API because it doesn’t fetch +// the old source and isn't able to compare it against the new source. +// +// There isn't a definitive rule for when noop updates aren't acceptable. +// It's a combination of lots of factors like how frequently your data source +// sends updates that are actually noops and how many queries per second +// Elasticsearch runs on the shard receiving the updates. +// +// **Versioning** +// +// Each indexed document is given a version number. +// By default, internal versioning is used that starts at 1 and increments with +// each update, deletes included. +// Optionally, the version number can be set to an external value (for example, +// if maintained in a database). +// To enable this functionality, `version_type` should be set to `external`. +// The value provided must be a numeric, long value greater than or equal to 0, +// and less than around `9.2e+18`. +// +// NOTE: Versioning is completely real time, and is not affected by the near +// real time aspects of search operations. +// If no version is provided, the operation runs without any version checks. +// +// When using the external version type, the system checks to see if the version +// number passed to the index request is greater than the version of the +// currently stored document. +// If true, the document will be indexed and the new version number used. +// If the value provided is less than or equal to the stored document's version +// number, a version conflict will occur and the index operation will fail. For +// example: +// +// ``` +// PUT my-index-000001/_doc/1?version=2&version_type=external +// +// { +// "user": { +// "id": "elkbee" +// } +// } +// +// In this example, the operation will succeed since the supplied version of 2 +// is higher than the current document version of 1. +// If the document was already updated and its version was set to 2 or higher, +// the indexing command will fail and result in a conflict (409 HTTP status +// code). +// +// A nice side effect is that there is no need to maintain strict ordering of +// async indexing operations run as a result of changes to a source database, as +// long as version numbers from the source database are used. +// Even the simple case of updating the Elasticsearch index using data from a +// database is simplified if external versioning is used, as only the latest +// version will be used if the index operations arrive out of order. +package index + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/optype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +const ( + idMask = iota + 1 + + indexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Index struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req any + deferred []func(request any) error + buf *gobytes.Buffer + + paramSet int + + id string + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewIndex type alias for index. +type NewIndex func(index string) *Index + +// NewIndexFunc returns a new instance of Index with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewIndexFunc(tp elastictransport.Interface) NewIndex { + return func(index string) *Index { + n := New(tp) + + n._index(index) + + return n + } +} + +// Create or update a document in an index. +// +// Add a JSON document to the specified data stream or index and make it +// searchable. +// If the target is an index and the document already exists, the request +// updates the document and increments its version. +// +// NOTE: You cannot use this API to send update requests for existing documents +// in a data stream. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or index alias: +// +// * To add or overwrite a document using the `PUT //_doc/<_id>` request +// format, you must have the `create`, `index`, or `write` index privilege. +// * To add a document using the `POST //_doc/` request format, you must +// have the `create_doc`, `create`, `index`, or `write` index privilege. +// * To automatically create a data stream or index with this API request, you +// must have the `auto_configure`, `create_index`, or `manage` index privilege. +// +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// NOTE: Replica shards might not all be started when an indexing operation +// returns successfully. +// By default, only the primary is required. Set `wait_for_active_shards` to +// change this default behavior. +// +// **Automatically create data streams and indices** +// +// If the request's target doesn't exist and matches an index template with a +// `data_stream` definition, the index operation automatically creates the data +// stream. +// +// If the target doesn't exist and doesn't match a data stream template, the +// operation automatically creates the index and applies any matching index +// templates. +// +// NOTE: Elasticsearch includes several built-in index templates. To avoid +// naming collisions with these templates, refer to index pattern documentation. +// +// If no mapping exists, the index operation creates a dynamic mapping. +// By default, new fields and objects are automatically added to the mapping if +// needed. +// +// Automatic index creation is controlled by the `action.auto_create_index` +// setting. +// If it is `true`, any index can be created automatically. +// You can modify this setting to explicitly allow or block automatic creation +// of indices that match specified patterns or set it to `false` to turn off +// automatic index creation entirely. +// Specify a comma-separated list of patterns you want to allow or prefix each +// pattern with `+` or `-` to indicate whether it should be allowed or blocked. +// When a list is specified, the default behaviour is to disallow. +// +// NOTE: The `action.auto_create_index` setting affects the automatic creation +// of indices only. +// It does not affect the creation of data streams. +// +// **Optimistic concurrency control** +// +// Index operations can be made conditional and only be performed if the last +// modification to the document was assigned the sequence number and primary +// term specified by the `if_seq_no` and `if_primary_term` parameters. +// If a mismatch is detected, the operation will result in a +// `VersionConflictException` and a status code of `409`. +// +// **Routing** +// +// By default, shard placement — or routing — is controlled by using a hash of +// the document's ID value. +// For more explicit control, the value fed into the hash function used by the +// router can be directly specified on a per-operation basis using the `routing` +// parameter. +// +// When setting up explicit mapping, you can also use the `_routing` field to +// direct the index operation to extract the routing value from the document +// itself. +// This does come at the (very minimal) cost of an additional document parsing +// pass. +// If the `_routing` mapping is defined and set to be required, the index +// operation will fail if no routing value is provided or extracted. +// +// NOTE: Data streams do not support custom routing unless they were created +// with the `allow_custom_routing` setting enabled in the template. +// +// **Distributed** +// +// The index operation is directed to the primary shard based on its route and +// performed on the actual node containing this shard. +// After the primary shard completes the operation, if needed, the update is +// distributed to applicable replicas. +// +// **Active shards** +// +// To improve the resiliency of writes to the system, indexing operations can be +// configured to wait for a certain number of active shard copies before +// proceeding with the operation. +// If the requisite number of active shard copies are not available, then the +// write operation must wait and retry, until either the requisite shard copies +// have started or a timeout occurs. +// By default, write operations only wait for the primary shards to be active +// before proceeding (that is to say `wait_for_active_shards` is `1`). +// This default can be overridden in the index settings dynamically by setting +// `index.write.wait_for_active_shards`. +// To alter this behavior per operation, use the `wait_for_active_shards +// request` parameter. +// +// Valid values are all or any positive integer up to the total number of +// configured copies per shard in the index (which is `number_of_replicas`+1). +// Specifying a negative value or a number greater than the number of shard +// copies will throw an error. +// +// For example, suppose you have a cluster of three nodes, A, B, and C and you +// create an index index with the number of replicas set to 3 (resulting in 4 +// shard copies, one more copy than there are nodes). +// If you attempt an indexing operation, by default the operation will only +// ensure the primary copy of each shard is available before proceeding. +// This means that even if B and C went down and A hosted the primary shard +// copies, the indexing operation would still proceed with only one copy of the +// data. +// If `wait_for_active_shards` is set on the request to `3` (and all three nodes +// are up), the indexing operation will require 3 active shard copies before +// proceeding. +// This requirement should be met because there are 3 active nodes in the +// cluster, each one holding a copy of the shard. +// However, if you set `wait_for_active_shards` to `all` (or to `4`, which is +// the same in this situation), the indexing operation will not proceed as you +// do not have all 4 copies of each shard active in the index. +// The operation will timeout unless a new node is brought up in the cluster to +// host the fourth copy of the shard. +// +// It is important to note that this setting greatly reduces the chances of the +// write operation not writing to the requisite number of shard copies, but it +// does not completely eliminate the possibility, because this check occurs +// before the write operation starts. +// After the write operation is underway, it is still possible for replication +// to fail on any number of shard copies but still succeed on the primary. +// The `_shards` section of the API response reveals the number of shard copies +// on which replication succeeded and failed. +// +// **No operation (noop) updates** +// +// When updating a document by using this API, a new version of the document is +// always created even if the document hasn't changed. +// If this isn't acceptable use the `_update` API with `detect_noop` set to +// `true`. +// The `detect_noop` option isn't available on this API because it doesn’t fetch +// the old source and isn't able to compare it against the new source. +// +// There isn't a definitive rule for when noop updates aren't acceptable. +// It's a combination of lots of factors like how frequently your data source +// sends updates that are actually noops and how many queries per second +// Elasticsearch runs on the shard receiving the updates. +// +// **Versioning** +// +// Each indexed document is given a version number. +// By default, internal versioning is used that starts at 1 and increments with +// each update, deletes included. +// Optionally, the version number can be set to an external value (for example, +// if maintained in a database). +// To enable this functionality, `version_type` should be set to `external`. +// The value provided must be a numeric, long value greater than or equal to 0, +// and less than around `9.2e+18`. +// +// NOTE: Versioning is completely real time, and is not affected by the near +// real time aspects of search operations. +// If no version is provided, the operation runs without any version checks. +// +// When using the external version type, the system checks to see if the version +// number passed to the index request is greater than the version of the +// currently stored document. +// If true, the document will be indexed and the new version number used. +// If the value provided is less than or equal to the stored document's version +// number, a version conflict will occur and the index operation will fail. For +// example: +// +// ``` +// PUT my-index-000001/_doc/1?version=2&version_type=external +// +// { +// "user": { +// "id": "elkbee" +// } +// } +// +// In this example, the operation will succeed since the supplied version of 2 +// is higher than the current document version of 1. +// If the document was already updated and its version was set to 2 or higher, +// the indexing command will fail and result in a conflict (409 HTTP status +// code). +// +// A nice side effect is that there is no need to maintain strict ordering of +// async indexing operations run as a result of changes to a source database, as +// long as version numbers from the source database are used. +// Even the simple case of updating the Elasticsearch index using data from a +// database is simplified if external versioning is used, as only the latest +// version will be used if the index operations arrive out of order. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create +func New(tp elastictransport.Interface) *Index { + r := &Index{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Index) Raw(raw io.Reader) *Index { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Index) Request(req any) *Index { + r.req = req + + return r +} + +// Document allows to set the request property with the appropriate payload. +func (r *Index) Document(document any) *Index { + r.req = document + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Index) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Index: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|idMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_doc") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_doc") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Index) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "index") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "index") + if reader := instrument.RecordRequestBody(ctx, "index", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "index") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Index query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a index.Response +func (r Index) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "index") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Index headers map. +func (r *Index) Header(key, value string) *Index { + r.headers.Set(key, value) + + return r +} + +// Id A unique identifier for the document. +// To automatically generate a document ID, use the `POST //_doc/` +// request format and omit this parameter. +// API Name: id +func (r *Index) Id(id string) *Index { + r.paramSet |= idMask + r.id = id + + return r +} + +// Index The name of the data stream or index to target. +// If the target doesn't exist and matches the name or wildcard (`*`) pattern of +// an index template with a `data_stream` definition, this request creates the +// data stream. +// If the target doesn't exist and doesn't match a data stream template, this +// request creates the index. +// You can check for existing targets with the resolve index API. +// API Name: index +func (r *Index) _index(index string) *Index { + r.paramSet |= indexMask + r.index = index + + return r +} + +// IfPrimaryTerm Only perform the operation if the document has this primary term. +// API name: if_primary_term +func (r *Index) IfPrimaryTerm(ifprimaryterm string) *Index { + r.values.Set("if_primary_term", ifprimaryterm) + + return r +} + +// IfSeqNo Only perform the operation if the document has this sequence number. +// API name: if_seq_no +func (r *Index) IfSeqNo(sequencenumber string) *Index { + r.values.Set("if_seq_no", sequencenumber) + + return r +} + +// IncludeSourceOnError True or false if to include the document source in the error message in case +// of parsing errors. +// API name: include_source_on_error +func (r *Index) IncludeSourceOnError(includesourceonerror bool) *Index { + r.values.Set("include_source_on_error", strconv.FormatBool(includesourceonerror)) + + return r +} + +// OpType Set to `create` to only index the document if it does not already exist (put +// if absent). +// If a document with the specified `_id` already exists, the indexing operation +// will fail. +// The behavior is the same as using the `/_create` endpoint. +// If a document ID is specified, this paramater defaults to `index`. +// Otherwise, it defaults to `create`. +// If the request targets a data stream, an `op_type` of `create` is required. +// API name: op_type +func (r *Index) OpType(optype optype.OpType) *Index { + r.values.Set("op_type", optype.String()) + + return r +} + +// Pipeline The ID of the pipeline to use to preprocess incoming documents. +// If the index has a default ingest pipeline specified, then setting the value +// to `_none` disables the default ingest pipeline for this request. +// If a final pipeline is configured it will always run, regardless of the value +// of this parameter. +// API name: pipeline +func (r *Index) Pipeline(pipeline string) *Index { + r.values.Set("pipeline", pipeline) + + return r +} + +// Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation +// visible to search. +// If `wait_for`, it waits for a refresh to make this operation visible to +// search. +// If `false`, it does nothing with refreshes. +// API name: refresh +func (r *Index) Refresh(refresh refresh.Refresh) *Index { + r.values.Set("refresh", refresh.String()) + + return r +} + +// Routing A custom value that is used to route operations to a specific shard. +// API name: routing +func (r *Index) Routing(routing string) *Index { + r.values.Set("routing", routing) + + return r +} + +// Timeout The period the request waits for the following operations: automatic index +// creation, dynamic mapping updates, waiting for active shards. +// +// This parameter is useful for situations where the primary shard assigned to +// perform the operation might not be available when the operation runs. +// Some reasons for this might be that the primary shard is currently recovering +// from a gateway or undergoing relocation. +// By default, the operation will wait on the primary shard to become available +// for at least 1 minute before failing and responding with an error. +// The actual wait time could be longer, particularly when multiple waits occur. +// API name: timeout +func (r *Index) Timeout(duration string) *Index { + r.values.Set("timeout", duration) + + return r +} + +// Version An explicit version number for concurrency control. +// It must be a non-negative long number. +// API name: version +func (r *Index) Version(versionnumber string) *Index { + r.values.Set("version", versionnumber) + + return r +} + +// VersionType The version type. +// API name: version_type +func (r *Index) VersionType(versiontype versiontype.VersionType) *Index { + r.values.Set("version_type", versiontype.String()) + + return r +} + +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// You can set it to `all` or any positive integer up to the total number of +// shards in the index (`number_of_replicas+1`). +// The default value of `1` means it waits for each primary shard to be active. +// API name: wait_for_active_shards +func (r *Index) WaitForActiveShards(waitforactiveshards string) *Index { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// RequireAlias If `true`, the destination must be an index alias. +// API name: require_alias +func (r *Index) RequireAlias(requirealias bool) *Index { + r.values.Set("require_alias", strconv.FormatBool(requirealias)) + + return r +} + +// RequireDataStream If `true`, the request's actions must target a data stream (existing or to be +// created). +// API name: require_data_stream +func (r *Index) RequireDataStream(requiredatastream bool) *Index { + r.values.Set("require_data_stream", strconv.FormatBool(requiredatastream)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Index) ErrorTrace(errortrace bool) *Index { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Index) FilterPath(filterpaths ...string) *Index { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Index) Human(human bool) *Index { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Index) Pretty(pretty bool) *Index { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/index/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/index/request.go new file mode 100644 index 000000000..c75babefb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/index/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package index + +import ( + "encoding/json" +) + +// Request holds the request body struct for the package index +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/index/IndexRequest.ts#L35-L273 +type Request = json.RawMessage + +// NewRequest returns a Request +func NewRequest() *Request { + r := new(json.RawMessage) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/index/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/index/response.go new file mode 100644 index 000000000..bd8d433fc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/index/response.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package index + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package index +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/index/IndexResponse.ts#L22-L25 +type Response struct { + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + // Id_ The unique identifier for the added document. + Id_ string `json:"_id"` + // Index_ The name of the index the document was added to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result The result of the indexing operation: `created` or `updated`. + Result result.Result `json:"result"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Information about the replication process of the operation. + Shards_ types.ShardStatistics `json:"_shards"` + // Version_ The document version, which is incremented each time the document is updated. + Version_ int64 `json:"_version"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/info/info.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/info/info.go new file mode 100644 index 000000000..e75ee2b46 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/info/info.go @@ -0,0 +1,323 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get cluster info. +// Get basic build, version, and cluster information. +// ::: In Serverless, this API is retained for backward compatibility only. Some +// response fields, such as the version number, should be ignored. +package info + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Info struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewInfo type alias for index. +type NewInfo func() *Info + +// NewInfoFunc returns a new instance of Info with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewInfoFunc(tp elastictransport.Interface) NewInfo { + return func() *Info { + n := New(tp) + + return n + } +} + +// Get cluster info. +// Get basic build, version, and cluster information. +// ::: In Serverless, this API is retained for backward compatibility only. Some +// response fields, such as the version number, should be ignored. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info +func New(tp elastictransport.Interface) *Info { + r := &Info{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Info) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Info) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "info") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "info") + if reader := instrument.RecordRequestBody(ctx, "info", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "info") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Info query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a info.Response +func (r Info) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Info) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Info query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Info headers map. +func (r *Info) Header(key, value string) *Info { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Info) ErrorTrace(errortrace bool) *Info { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Info) FilterPath(filterpaths ...string) *Info { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Info) Human(human bool) *Info { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Info) Pretty(pretty bool) *Info { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/info/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/info/response.go new file mode 100644 index 000000000..ec98bd110 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/info/response.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package info + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package info +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/info/RootNodeInfoResponse.ts#L23-L40 +type Response struct { + + // ClusterName The responding cluster's name. + ClusterName string `json:"cluster_name"` + ClusterUuid string `json:"cluster_uuid"` + // Name The responding node's name. + Name string `json:"name"` + Tagline string `json:"tagline"` + // Version The running version of Elasticsearch. + Version types.ElasticsearchVersionInfo `json:"version"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mget/mget.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mget/mget.go new file mode 100644 index 000000000..64c4c7829 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mget/mget.go @@ -0,0 +1,521 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get multiple documents. +// +// Get multiple JSON documents by ID from one or more indices. +// If you specify an index in the request URI, you only need to specify the +// document IDs in the request body. +// To ensure fast responses, this multi get (mget) API responds with partial +// results if one or more shards fail. +// +// **Filter source fields** +// +// By default, the `_source` field is returned for every document (if stored). +// Use the `_source` and `_source_include` or `source_exclude` attributes to +// filter what fields are returned for a particular document. +// You can include the `_source`, `_source_includes`, and `_source_excludes` +// query parameters in the request URI to specify the defaults to use when there +// are no per-document instructions. +// +// **Get stored fields** +// +// Use the `stored_fields` attribute to specify the set of stored fields you +// want to retrieve. +// Any requested fields that are not stored are ignored. +// You can include the `stored_fields` query parameter in the request URI to +// specify the defaults to use when there are no per-document instructions. +package mget + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Mget struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMget type alias for index. +type NewMget func() *Mget + +// NewMgetFunc returns a new instance of Mget with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMgetFunc(tp elastictransport.Interface) NewMget { + return func() *Mget { + n := New(tp) + + return n + } +} + +// Get multiple documents. +// +// Get multiple JSON documents by ID from one or more indices. +// If you specify an index in the request URI, you only need to specify the +// document IDs in the request body. +// To ensure fast responses, this multi get (mget) API responds with partial +// results if one or more shards fail. +// +// **Filter source fields** +// +// By default, the `_source` field is returned for every document (if stored). +// Use the `_source` and `_source_include` or `source_exclude` attributes to +// filter what fields are returned for a particular document. +// You can include the `_source`, `_source_includes`, and `_source_excludes` +// query parameters in the request URI to specify the defaults to use when there +// are no per-document instructions. +// +// **Get stored fields** +// +// Use the `stored_fields` attribute to specify the set of stored fields you +// want to retrieve. +// Any requested fields that are not stored are ignored. +// You can include the `stored_fields` query parameter in the request URI to +// specify the defaults to use when there are no per-document instructions. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget +func New(tp elastictransport.Interface) *Mget { + r := &Mget{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Mget) Raw(raw io.Reader) *Mget { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Mget) Request(req *Request) *Mget { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Mget) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Mget: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_mget") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_mget") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Mget) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "mget") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "mget") + if reader := instrument.RecordRequestBody(ctx, "mget", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "mget") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Mget query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a mget.Response +func (r Mget) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "mget") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Mget headers map. +func (r *Mget) Header(key, value string) *Mget { + r.headers.Set(key, value) + + return r +} + +// Index Name of the index to retrieve documents from when `ids` are specified, or +// when a document in the `docs` array does not specify an index. +// API Name: index +func (r *Mget) Index(index string) *Mget { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ForceSyntheticSource Should this request force synthetic _source? +// Use this to test if the mapping supports synthetic _source and to get a sense +// of the worst case performance. +// Fetches with this enabled will be slower the enabling synthetic source +// natively in the index. +// API name: force_synthetic_source +func (r *Mget) ForceSyntheticSource(forcesyntheticsource bool) *Mget { + r.values.Set("force_synthetic_source", strconv.FormatBool(forcesyntheticsource)) + + return r +} + +// Preference Specifies the node or shard the operation should be performed on. Random by +// default. +// API name: preference +func (r *Mget) Preference(preference string) *Mget { + r.values.Set("preference", preference) + + return r +} + +// Realtime If `true`, the request is real-time as opposed to near-real-time. +// API name: realtime +func (r *Mget) Realtime(realtime bool) *Mget { + r.values.Set("realtime", strconv.FormatBool(realtime)) + + return r +} + +// Refresh If `true`, the request refreshes relevant shards before retrieving documents. +// API name: refresh +func (r *Mget) Refresh(refresh bool) *Mget { + r.values.Set("refresh", strconv.FormatBool(refresh)) + + return r +} + +// Routing Custom value used to route operations to a specific shard. +// API name: routing +func (r *Mget) Routing(routing string) *Mget { + r.values.Set("routing", routing) + + return r +} + +// Source_ True or false to return the `_source` field or not, or a list of fields to +// return. +// API name: _source +func (r *Mget) Source_(sourceconfigparam string) *Mget { + r.values.Set("_source", sourceconfigparam) + + return r +} + +// SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// API name: _source_excludes +func (r *Mget) SourceExcludes_(fields ...string) *Mget { + r.values.Set("_source_excludes", strings.Join(fields, ",")) + + return r +} + +// SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. You +// can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. +// API name: _source_includes +func (r *Mget) SourceIncludes_(fields ...string) *Mget { + r.values.Set("_source_includes", strings.Join(fields, ",")) + + return r +} + +// StoredFields If `true`, retrieves the document fields stored in the index rather than the +// document `_source`. +// API name: stored_fields +func (r *Mget) StoredFields(fields ...string) *Mget { + r.values.Set("stored_fields", strings.Join(fields, ",")) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Mget) ErrorTrace(errortrace bool) *Mget { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Mget) FilterPath(filterpaths ...string) *Mget { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Mget) Human(human bool) *Mget { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Mget) Pretty(pretty bool) *Mget { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The documents you want to retrieve. Required if no index is specified in the +// request URI. +// API name: docs +func (r *Mget) Docs(docs ...types.MgetOperationVariant) *Mget { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docs { + + r.req.Docs = append(r.req.Docs, *v.MgetOperationCaster()) + + } + return r +} + +// The IDs of the documents you want to retrieve. Allowed when the index is +// specified in the request URI. +// API name: ids +func (r *Mget) Ids(ids ...string) *Mget { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Ids = ids + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mget/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mget/request.go new file mode 100644 index 000000000..e77f93212 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mget/request.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package mget + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package mget +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/mget/MultiGetRequest.ts#L25-L127 +type Request struct { + + // Docs The documents you want to retrieve. Required if no index is specified in the + // request URI. + Docs []types.MgetOperation `json:"docs,omitempty"` + // Ids The IDs of the documents you want to retrieve. Allowed when the index is + // specified in the request URI. + Ids []string `json:"ids,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Mget request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "docs": + if err := dec.Decode(&s.Docs); err != nil { + return fmt.Errorf("%s | %w", "Docs", err) + } + + case "ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + + s.Ids = append(s.Ids, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Ids); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mget/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mget/response.go new file mode 100644 index 000000000..f95332b14 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mget/response.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package mget + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package mget +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/mget/MultiGetResponse.ts#L22-L31 +type Response struct { + + // Docs The response includes a docs array that contains the documents in the order + // specified in the request. + // The structure of the returned documents is similar to that returned by the + // get API. + // If there is a failure getting a particular document, the error is included in + // place of the document. + Docs []types.MgetResponseItem `json:"docs"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "docs": + messageArray := []json.RawMessage{} + if err := dec.Decode(&messageArray); err != nil { + return fmt.Errorf("%s | %w", "Docs", err) + } + docs_field: + for _, message := range messageArray { + keyDec := json.NewDecoder(bytes.NewReader(message)) + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Docs", err) + } + + switch t { + + case "fields", "found", "_ignored", "_primary_term", "_routing", "_seq_no", "_source", "_version": + o := types.NewGetResult() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Docs", err) + } + s.Docs = append(s.Docs, o) + continue docs_field + + case "error": + o := types.NewMultiGetError() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Docs", err) + } + s.Docs = append(s.Docs, o) + continue docs_field + + } + } + + var o any + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Docs", err) + } + s.Docs = append(s.Docs, o) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearch/helpers.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearch/helpers.go new file mode 100644 index 000000000..cd063729a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearch/helpers.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package msearch + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// AddSearch is a helper function to add a new search to the buffer of the current msearch request. +func (r *Msearch) AddSearch(header types.MultisearchHeader, body types.SearchRequestBody) error { + h, err := json.Marshal(header) + if err != nil { + return fmt.Errorf("msearch.AddSearch cannot serialize header: %w", err) + } + + b, err := json.Marshal(body) + if err != nil { + return fmt.Errorf("msearch.AddSearch cannot serialize body: %w", err) + } + + r.buf.Write(h) + r.buf.Write([]byte("\n")) + r.buf.Write(b) + r.buf.Write([]byte("\n")) + + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearch/msearch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearch/msearch.go new file mode 100644 index 000000000..d2745a8cd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearch/msearch.go @@ -0,0 +1,538 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run multiple searches. +// +// The format of the request is similar to the bulk API format and makes use of +// the newline delimited JSON (NDJSON) format. +// The structure is as follows: +// +// ``` +// header\n +// body\n +// header\n +// body\n +// ``` +// +// This structure is specifically optimized to reduce parsing if a specific +// search ends up redirected to another node. +// +// IMPORTANT: The final line of data must end with a newline character `\n`. +// Each newline character may be preceded by a carriage return `\r`. +// When sending requests to this endpoint the `Content-Type` header should be +// set to `application/x-ndjson`. +package msearch + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Msearch struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMsearch type alias for index. +type NewMsearch func() *Msearch + +// NewMsearchFunc returns a new instance of Msearch with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMsearchFunc(tp elastictransport.Interface) NewMsearch { + return func() *Msearch { + n := New(tp) + + return n + } +} + +// Run multiple searches. +// +// The format of the request is similar to the bulk API format and makes use of +// the newline delimited JSON (NDJSON) format. +// The structure is as follows: +// +// ``` +// header\n +// body\n +// header\n +// body\n +// ``` +// +// This structure is specifically optimized to reduce parsing if a specific +// search ends up redirected to another node. +// +// IMPORTANT: The final line of data must end with a newline character `\n`. +// Each newline character may be preceded by a carriage return `\r`. +// When sending requests to this endpoint the `Content-Type` header should be +// set to `application/x-ndjson`. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch +func New(tp elastictransport.Interface) *Msearch { + r := &Msearch{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Msearch) Raw(raw io.Reader) *Msearch { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Msearch) Request(req *Request) *Msearch { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Msearch) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + for _, elem := range *r.req { + data, err := json.Marshal(elem) + if err != nil { + return nil, err + } + r.buf.Write(data) + r.buf.Write([]byte("\n")) + } + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Msearch: %w", err) + } + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_msearch") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_msearch") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+x-ndjson;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Msearch) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "msearch") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "msearch") + if reader := instrument.RecordRequestBody(ctx, "msearch", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "msearch") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Msearch query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a msearch.Response +func (r Msearch) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "msearch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + r.TypedKeys(true) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Msearch headers map. +func (r *Msearch) Header(key, value string) *Msearch { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and index aliases to search. +// API Name: index +func (r *Msearch) Index(index string) *Msearch { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If false, the request returns an error if any wildcard expression, index +// alias, or _all value targets only missing or closed indices. This behavior +// applies even if the request targets other open indices. For example, a +// request targeting foo*,bar* returns an error if an index starts with foo but +// no index starts with bar. +// API name: allow_no_indices +func (r *Msearch) AllowNoIndices(allownoindices bool) *Msearch { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// CcsMinimizeRoundtrips If true, network roundtrips between the coordinating node and remote clusters +// are minimized for cross-cluster search requests. +// API name: ccs_minimize_roundtrips +func (r *Msearch) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Msearch { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) + + return r +} + +// ExpandWildcards Type of index that wildcard expressions can match. If the request can target +// data streams, this argument determines whether wildcard expressions match +// hidden data streams. +// API name: expand_wildcards +func (r *Msearch) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Msearch { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreThrottled If true, concrete, expanded or aliased indices are ignored when frozen. +// API name: ignore_throttled +func (r *Msearch) IgnoreThrottled(ignorethrottled bool) *Msearch { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// IgnoreUnavailable If true, missing or closed indices are not included in the response. +// API name: ignore_unavailable +func (r *Msearch) IgnoreUnavailable(ignoreunavailable bool) *Msearch { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// IncludeNamedQueriesScore Indicates whether hit.matched_queries should be rendered as a map that +// includes +// the name of the matched query associated with its score (true) +// or as an array containing the name of the matched queries (false) +// This functionality reruns each named query on every hit in a search response. +// Typically, this adds a small overhead to a request. +// However, using computationally expensive named queries on a large number of +// hits may add significant overhead. +// API name: include_named_queries_score +func (r *Msearch) IncludeNamedQueriesScore(includenamedqueriesscore bool) *Msearch { + r.values.Set("include_named_queries_score", strconv.FormatBool(includenamedqueriesscore)) + + return r +} + +// MaxConcurrentSearches Maximum number of concurrent searches the multi search API can execute. +// Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`. +// API name: max_concurrent_searches +func (r *Msearch) MaxConcurrentSearches(maxconcurrentsearches int) *Msearch { + r.values.Set("max_concurrent_searches", strconv.Itoa(maxconcurrentsearches)) + + return r +} + +// MaxConcurrentShardRequests Maximum number of concurrent shard requests that each sub-search request +// executes per node. +// API name: max_concurrent_shard_requests +func (r *Msearch) MaxConcurrentShardRequests(maxconcurrentshardrequests int) *Msearch { + r.values.Set("max_concurrent_shard_requests", strconv.Itoa(maxconcurrentshardrequests)) + + return r +} + +// PreFilterShardSize Defines a threshold that enforces a pre-filter roundtrip to prefilter search +// shards based on query rewriting if the number of shards the search request +// expands to exceeds the threshold. This filter roundtrip can limit the number +// of shards significantly if for instance a shard can not match any documents +// based on its rewrite method i.e., if date filters are mandatory to match but +// the shard bounds and the query are disjoint. +// API name: pre_filter_shard_size +func (r *Msearch) PreFilterShardSize(prefiltershardsize string) *Msearch { + r.values.Set("pre_filter_shard_size", prefiltershardsize) + + return r +} + +// RestTotalHitsAsInt If true, hits.total are returned as an integer in the response. Defaults to +// false, which returns an object. +// API name: rest_total_hits_as_int +func (r *Msearch) RestTotalHitsAsInt(resttotalhitsasint bool) *Msearch { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) + + return r +} + +// Routing Custom routing value used to route search operations to a specific shard. +// API name: routing +func (r *Msearch) Routing(routing string) *Msearch { + r.values.Set("routing", routing) + + return r +} + +// SearchType Indicates whether global term and document frequencies should be used when +// scoring returned documents. +// API name: search_type +func (r *Msearch) SearchType(searchtype searchtype.SearchType) *Msearch { + r.values.Set("search_type", searchtype.String()) + + return r +} + +// TypedKeys Specifies whether aggregation and suggester names should be prefixed by their +// respective types in the response. +// API name: typed_keys +func (r *Msearch) TypedKeys(typedkeys bool) *Msearch { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Msearch) ErrorTrace(errortrace bool) *Msearch { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Msearch) FilterPath(filterpaths ...string) *Msearch { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Msearch) Human(human bool) *Msearch { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Msearch) Pretty(pretty bool) *Msearch { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearch/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearch/request.go new file mode 100644 index 000000000..cb7c72c3d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearch/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package msearch + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package msearch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/msearch/MultiSearchRequest.ts#L25-L140 +type Request = []types.MsearchRequestItem diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearch/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearch/response.go new file mode 100644 index 000000000..7f74b3100 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearch/response.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package msearch + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package msearch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/msearch/MultiSearchResponse.ts#L25-L28 +type Response struct { + Responses []types.MsearchResponseItem `json:"responses"` + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "responses": + messageArray := []json.RawMessage{} + if err := dec.Decode(&messageArray); err != nil { + return fmt.Errorf("%s | %w", "Responses", err) + } + responses_field: + for _, message := range messageArray { + keyDec := json.NewDecoder(bytes.NewReader(message)) + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Responses", err) + } + + switch t { + + case "aggregations", "_clusters", "fields", "hits", "max_score", "num_reduce_phases", "pit_id", "profile", "_scroll_id", "_shards", "suggest", "terminated_early", "timed_out", "took": + o := types.NewMultiSearchItem() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Responses", err) + } + s.Responses = append(s.Responses, o) + continue responses_field + + case "error": + o := types.NewErrorResponseBase() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Responses", err) + } + s.Responses = append(s.Responses, o) + continue responses_field + + } + } + + var o any + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Responses", err) + } + s.Responses = append(s.Responses, o) + } + + case "took": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + s.Took = value + case float64: + f := int64(v) + s.Took = f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearchtemplate/msearch_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearchtemplate/msearch_template.go new file mode 100644 index 000000000..3a913014d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearchtemplate/msearch_template.go @@ -0,0 +1,450 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run multiple templated searches. +// +// Run multiple templated searches with a single request. +// If you are providing a text file or text input to `curl`, use the +// `--data-binary` flag instead of `-d` to preserve newlines. +// For example: +// +// ``` +// $ cat requests +// { "index": "my-index" } +// { "id": "my-search-template", "params": { "query_string": "hello world", +// "from": 0, "size": 10 }} +// { "index": "my-other-index" } +// { "id": "my-other-search-template", "params": { "query_type": "match_all" }} +// +// $ curl -H "Content-Type: application/x-ndjson" -XGET +// localhost:9200/_msearch/template --data-binary "@requests"; echo +// ``` +package msearchtemplate + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type MsearchTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMsearchTemplate type alias for index. +type NewMsearchTemplate func() *MsearchTemplate + +// NewMsearchTemplateFunc returns a new instance of MsearchTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMsearchTemplateFunc(tp elastictransport.Interface) NewMsearchTemplate { + return func() *MsearchTemplate { + n := New(tp) + + return n + } +} + +// Run multiple templated searches. +// +// Run multiple templated searches with a single request. +// If you are providing a text file or text input to `curl`, use the +// `--data-binary` flag instead of `-d` to preserve newlines. +// For example: +// +// ``` +// $ cat requests +// { "index": "my-index" } +// { "id": "my-search-template", "params": { "query_string": "hello world", +// "from": 0, "size": 10 }} +// { "index": "my-other-index" } +// { "id": "my-other-search-template", "params": { "query_type": "match_all" }} +// +// $ curl -H "Content-Type: application/x-ndjson" -XGET +// localhost:9200/_msearch/template --data-binary "@requests"; echo +// ``` +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template +func New(tp elastictransport.Interface) *MsearchTemplate { + r := &MsearchTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *MsearchTemplate) Raw(raw io.Reader) *MsearchTemplate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *MsearchTemplate) Request(req *Request) *MsearchTemplate { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *MsearchTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + for _, elem := range *r.req { + data, err := json.Marshal(elem) + if err != nil { + return nil, err + } + r.buf.Write(data) + r.buf.Write([]byte("\n")) + } + + if err != nil { + return nil, fmt.Errorf("could not serialise request for MsearchTemplate: %w", err) + } + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_msearch") + path.WriteString("/") + path.WriteString("template") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_msearch") + path.WriteString("/") + path.WriteString("template") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+x-ndjson;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r MsearchTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "msearch_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "msearch_template") + if reader := instrument.RecordRequestBody(ctx, "msearch_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "msearch_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the MsearchTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a msearchtemplate.Response +func (r MsearchTemplate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "msearch_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + r.TypedKeys(true) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the MsearchTemplate headers map. +func (r *MsearchTemplate) Header(key, value string) *MsearchTemplate { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). +// To search all data streams and indices, omit this parameter or use `*`. +// API Name: index +func (r *MsearchTemplate) Index(index string) *MsearchTemplate { + r.paramSet |= indexMask + r.index = index + + return r +} + +// CcsMinimizeRoundtrips If `true`, network round-trips are minimized for cross-cluster search +// requests. +// API name: ccs_minimize_roundtrips +func (r *MsearchTemplate) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *MsearchTemplate { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) + + return r +} + +// MaxConcurrentSearches The maximum number of concurrent searches the API can run. +// API name: max_concurrent_searches +func (r *MsearchTemplate) MaxConcurrentSearches(maxconcurrentsearches string) *MsearchTemplate { + r.values.Set("max_concurrent_searches", maxconcurrentsearches) + + return r +} + +// SearchType The type of the search operation. +// API name: search_type +func (r *MsearchTemplate) SearchType(searchtype searchtype.SearchType) *MsearchTemplate { + r.values.Set("search_type", searchtype.String()) + + return r +} + +// RestTotalHitsAsInt If `true`, the response returns `hits.total` as an integer. +// If `false`, it returns `hits.total` as an object. +// API name: rest_total_hits_as_int +func (r *MsearchTemplate) RestTotalHitsAsInt(resttotalhitsasint bool) *MsearchTemplate { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) + + return r +} + +// TypedKeys If `true`, the response prefixes aggregation and suggester names with their +// respective types. +// API name: typed_keys +func (r *MsearchTemplate) TypedKeys(typedkeys bool) *MsearchTemplate { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *MsearchTemplate) ErrorTrace(errortrace bool) *MsearchTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *MsearchTemplate) FilterPath(filterpaths ...string) *MsearchTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *MsearchTemplate) Human(human bool) *MsearchTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *MsearchTemplate) Pretty(pretty bool) *MsearchTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearchtemplate/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearchtemplate/request.go new file mode 100644 index 000000000..b31a418cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearchtemplate/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package msearchtemplate + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package msearchtemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/msearch_template/MultiSearchTemplateRequest.ts#L25-L116 +type Request = []types.RequestItem diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearchtemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearchtemplate/response.go new file mode 100644 index 000000000..5d71cbd82 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/msearchtemplate/response.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package msearchtemplate + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package msearchtemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/msearch_template/MultiSearchTemplateResponse.ts#L22-L33 +type Response struct { + Responses []types.MsearchResponseItem `json:"responses"` + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "responses": + messageArray := []json.RawMessage{} + if err := dec.Decode(&messageArray); err != nil { + return fmt.Errorf("%s | %w", "Responses", err) + } + responses_field: + for _, message := range messageArray { + keyDec := json.NewDecoder(bytes.NewReader(message)) + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Responses", err) + } + + switch t { + + case "aggregations", "_clusters", "fields", "hits", "max_score", "num_reduce_phases", "pit_id", "profile", "_scroll_id", "_shards", "suggest", "terminated_early", "timed_out", "took": + o := types.NewMultiSearchItem() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Responses", err) + } + s.Responses = append(s.Responses, o) + continue responses_field + + case "error": + o := types.NewErrorResponseBase() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Responses", err) + } + s.Responses = append(s.Responses, o) + continue responses_field + + } + } + + var o any + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Responses", err) + } + s.Responses = append(s.Responses, o) + } + + case "took": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + s.Took = value + case float64: + f := int64(v) + s.Took = f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mtermvectors/mtermvectors.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mtermvectors/mtermvectors.go new file mode 100644 index 000000000..8ad642dda --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mtermvectors/mtermvectors.go @@ -0,0 +1,510 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get multiple term vectors. +// +// Get multiple term vectors with a single request. +// You can specify existing documents by index and ID or provide artificial +// documents in the body of the request. +// You can specify the index in the request body or request URI. +// The response contains a `docs` array with all the fetched termvectors. +// Each element has the structure provided by the termvectors API. +// +// **Artificial documents** +// +// You can also use `mtermvectors` to generate term vectors for artificial +// documents provided in the body of the request. +// The mapping used is determined by the specified `_index`. +package mtermvectors + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Mtermvectors struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMtermvectors type alias for index. +type NewMtermvectors func() *Mtermvectors + +// NewMtermvectorsFunc returns a new instance of Mtermvectors with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMtermvectorsFunc(tp elastictransport.Interface) NewMtermvectors { + return func() *Mtermvectors { + n := New(tp) + + return n + } +} + +// Get multiple term vectors. +// +// Get multiple term vectors with a single request. +// You can specify existing documents by index and ID or provide artificial +// documents in the body of the request. +// You can specify the index in the request body or request URI. +// The response contains a `docs` array with all the fetched termvectors. +// Each element has the structure provided by the termvectors API. +// +// **Artificial documents** +// +// You can also use `mtermvectors` to generate term vectors for artificial +// documents provided in the body of the request. +// The mapping used is determined by the specified `_index`. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors +func New(tp elastictransport.Interface) *Mtermvectors { + r := &Mtermvectors{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Mtermvectors) Raw(raw io.Reader) *Mtermvectors { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Mtermvectors) Request(req *Request) *Mtermvectors { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Mtermvectors) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Mtermvectors: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_mtermvectors") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_mtermvectors") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Mtermvectors) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "mtermvectors") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "mtermvectors") + if reader := instrument.RecordRequestBody(ctx, "mtermvectors", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "mtermvectors") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Mtermvectors query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a mtermvectors.Response +func (r Mtermvectors) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "mtermvectors") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Mtermvectors headers map. +func (r *Mtermvectors) Header(key, value string) *Mtermvectors { + r.headers.Set(key, value) + + return r +} + +// Index The name of the index that contains the documents. +// API Name: index +func (r *Mtermvectors) Index(index string) *Mtermvectors { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Fields A comma-separated list or wildcard expressions of fields to include in the +// statistics. +// It is used as the default list unless a specific field list is provided in +// the `completion_fields` or `fielddata_fields` parameters. +// API name: fields +func (r *Mtermvectors) Fields(fields ...string) *Mtermvectors { + r.values.Set("fields", strings.Join(fields, ",")) + + return r +} + +// FieldStatistics If `true`, the response includes the document count, sum of document +// frequencies, and sum of total term frequencies. +// API name: field_statistics +func (r *Mtermvectors) FieldStatistics(fieldstatistics bool) *Mtermvectors { + r.values.Set("field_statistics", strconv.FormatBool(fieldstatistics)) + + return r +} + +// Offsets If `true`, the response includes term offsets. +// API name: offsets +func (r *Mtermvectors) Offsets(offsets bool) *Mtermvectors { + r.values.Set("offsets", strconv.FormatBool(offsets)) + + return r +} + +// Payloads If `true`, the response includes term payloads. +// API name: payloads +func (r *Mtermvectors) Payloads(payloads bool) *Mtermvectors { + r.values.Set("payloads", strconv.FormatBool(payloads)) + + return r +} + +// Positions If `true`, the response includes term positions. +// API name: positions +func (r *Mtermvectors) Positions(positions bool) *Mtermvectors { + r.values.Set("positions", strconv.FormatBool(positions)) + + return r +} + +// Preference The node or shard the operation should be performed on. +// It is random by default. +// API name: preference +func (r *Mtermvectors) Preference(preference string) *Mtermvectors { + r.values.Set("preference", preference) + + return r +} + +// Realtime If true, the request is real-time as opposed to near-real-time. +// API name: realtime +func (r *Mtermvectors) Realtime(realtime bool) *Mtermvectors { + r.values.Set("realtime", strconv.FormatBool(realtime)) + + return r +} + +// Routing A custom value used to route operations to a specific shard. +// API name: routing +func (r *Mtermvectors) Routing(routing string) *Mtermvectors { + r.values.Set("routing", routing) + + return r +} + +// TermStatistics If true, the response includes term frequency and document frequency. +// API name: term_statistics +func (r *Mtermvectors) TermStatistics(termstatistics bool) *Mtermvectors { + r.values.Set("term_statistics", strconv.FormatBool(termstatistics)) + + return r +} + +// Version If `true`, returns the document version as part of a hit. +// API name: version +func (r *Mtermvectors) Version(versionnumber string) *Mtermvectors { + r.values.Set("version", versionnumber) + + return r +} + +// VersionType The version type. +// API name: version_type +func (r *Mtermvectors) VersionType(versiontype versiontype.VersionType) *Mtermvectors { + r.values.Set("version_type", versiontype.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Mtermvectors) ErrorTrace(errortrace bool) *Mtermvectors { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Mtermvectors) FilterPath(filterpaths ...string) *Mtermvectors { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Mtermvectors) Human(human bool) *Mtermvectors { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Mtermvectors) Pretty(pretty bool) *Mtermvectors { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// An array of existing or artificial documents. +// API name: docs +func (r *Mtermvectors) Docs(docs ...types.MTermVectorsOperationVariant) *Mtermvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docs { + + r.req.Docs = append(r.req.Docs, *v.MTermVectorsOperationCaster()) + + } + return r +} + +// A simplified syntax to specify documents by their ID if they're in the same +// index. +// API name: ids +func (r *Mtermvectors) Ids(ids ...string) *Mtermvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ids { + + r.req.Ids = append(r.req.Ids, v) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mtermvectors/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mtermvectors/request.go new file mode 100644 index 000000000..70f531d5a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mtermvectors/request.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package mtermvectors + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package mtermvectors +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/mtermvectors/MultiTermVectorsRequest.ts#L31-L134 +type Request struct { + + // Docs An array of existing or artificial documents. + Docs []types.MTermVectorsOperation `json:"docs,omitempty"` + // Ids A simplified syntax to specify documents by their ID if they're in the same + // index. + Ids []string `json:"ids,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Mtermvectors request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mtermvectors/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mtermvectors/response.go new file mode 100644 index 000000000..206c7d317 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/mtermvectors/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package mtermvectors + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package mtermvectors +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/mtermvectors/MultiTermVectorsResponse.ts#L22-L24 +type Response struct { + Docs []types.TermVectorsResult `json:"docs"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/openpointintime/open_point_in_time.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/openpointintime/open_point_in_time.go new file mode 100644 index 000000000..4bd85164f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/openpointintime/open_point_in_time.go @@ -0,0 +1,566 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Open a point in time. +// +// A search request by default runs against the most recent visible data of the +// target indices, +// which is called point in time. Elasticsearch pit (point in time) is a +// lightweight view into the +// state of the data as it existed when initiated. In some cases, it’s preferred +// to perform multiple +// search requests using the same point in time. For example, if refreshes +// happen between +// `search_after` requests, then the results of those requests might not be +// consistent as changes happening +// between searches are only visible to the more recent point in time. +// +// A point in time must be opened explicitly before being used in search +// requests. +// +// A subsequent search request with the `pit` parameter must not specify +// `index`, `routing`, or `preference` values as these parameters are copied +// from the point in time. +// +// Just like regular searches, you can use `from` and `size` to page through +// point in time search results, up to the first 10,000 hits. +// If you want to retrieve more hits, use PIT with `search_after`. +// +// IMPORTANT: The open point in time request and each subsequent search request +// can return different identifiers; always use the most recently received ID +// for the next search request. +// +// When a PIT that contains shard failures is used in a search request, the +// missing are always reported in the search response as a +// `NoShardAvailableActionException` exception. +// To get rid of these exceptions, a new PIT needs to be created so that shards +// missing from the previous PIT can be handled, assuming they become available +// in the meantime. +// +// **Keeping point in time alive** +// +// The `keep_alive` parameter, which is passed to a open point in time request +// and search request, extends the time to live of the corresponding point in +// time. +// The value does not need to be long enough to process all data — it just needs +// to be long enough for the next request. +// +// Normally, the background merge process optimizes the index by merging +// together smaller segments to create new, bigger segments. +// Once the smaller segments are no longer needed they are deleted. +// However, open point-in-times prevent the old segments from being deleted +// since they are still in use. +// +// TIP: Keeping older segments alive means that more disk space and file handles +// are needed. +// Ensure that you have configured your nodes to have ample free file handles. +// +// Additionally, if a segment contains deleted or updated documents then the +// point in time must keep track of whether each document in the segment was +// live at the time of the initial search request. +// Ensure that your nodes have sufficient heap space if you have many open +// point-in-times on an index that is subject to ongoing deletes or updates. +// Note that a point-in-time doesn't prevent its associated indices from being +// deleted. +// You can check how many point-in-times (that is, search contexts) are open +// with the nodes stats API. +package openpointintime + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type OpenPointInTime struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewOpenPointInTime type alias for index. +type NewOpenPointInTime func(index string) *OpenPointInTime + +// NewOpenPointInTimeFunc returns a new instance of OpenPointInTime with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewOpenPointInTimeFunc(tp elastictransport.Interface) NewOpenPointInTime { + return func(index string) *OpenPointInTime { + n := New(tp) + + n._index(index) + + return n + } +} + +// Open a point in time. +// +// A search request by default runs against the most recent visible data of the +// target indices, +// which is called point in time. Elasticsearch pit (point in time) is a +// lightweight view into the +// state of the data as it existed when initiated. In some cases, it’s preferred +// to perform multiple +// search requests using the same point in time. For example, if refreshes +// happen between +// `search_after` requests, then the results of those requests might not be +// consistent as changes happening +// between searches are only visible to the more recent point in time. +// +// A point in time must be opened explicitly before being used in search +// requests. +// +// A subsequent search request with the `pit` parameter must not specify +// `index`, `routing`, or `preference` values as these parameters are copied +// from the point in time. +// +// Just like regular searches, you can use `from` and `size` to page through +// point in time search results, up to the first 10,000 hits. +// If you want to retrieve more hits, use PIT with `search_after`. +// +// IMPORTANT: The open point in time request and each subsequent search request +// can return different identifiers; always use the most recently received ID +// for the next search request. +// +// When a PIT that contains shard failures is used in a search request, the +// missing are always reported in the search response as a +// `NoShardAvailableActionException` exception. +// To get rid of these exceptions, a new PIT needs to be created so that shards +// missing from the previous PIT can be handled, assuming they become available +// in the meantime. +// +// **Keeping point in time alive** +// +// The `keep_alive` parameter, which is passed to a open point in time request +// and search request, extends the time to live of the corresponding point in +// time. +// The value does not need to be long enough to process all data — it just needs +// to be long enough for the next request. +// +// Normally, the background merge process optimizes the index by merging +// together smaller segments to create new, bigger segments. +// Once the smaller segments are no longer needed they are deleted. +// However, open point-in-times prevent the old segments from being deleted +// since they are still in use. +// +// TIP: Keeping older segments alive means that more disk space and file handles +// are needed. +// Ensure that you have configured your nodes to have ample free file handles. +// +// Additionally, if a segment contains deleted or updated documents then the +// point in time must keep track of whether each document in the segment was +// live at the time of the initial search request. +// Ensure that your nodes have sufficient heap space if you have many open +// point-in-times on an index that is subject to ongoing deletes or updates. +// Note that a point-in-time doesn't prevent its associated indices from being +// deleted. +// You can check how many point-in-times (that is, search contexts) are open +// with the nodes stats API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time +func New(tp elastictransport.Interface) *OpenPointInTime { + r := &OpenPointInTime{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *OpenPointInTime) Raw(raw io.Reader) *OpenPointInTime { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *OpenPointInTime) Request(req *Request) *OpenPointInTime { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *OpenPointInTime) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for OpenPointInTime: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_pit") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r OpenPointInTime) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "open_point_in_time") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "open_point_in_time") + if reader := instrument.RecordRequestBody(ctx, "open_point_in_time", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "open_point_in_time") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the OpenPointInTime query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a openpointintime.Response +func (r OpenPointInTime) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "open_point_in_time") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the OpenPointInTime headers map. +func (r *OpenPointInTime) Header(key, value string) *OpenPointInTime { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of index names to open point in time; use `_all` or +// empty string to perform the operation on all indices +// API Name: index +func (r *OpenPointInTime) _index(index string) *OpenPointInTime { + r.paramSet |= indexMask + r.index = index + + return r +} + +// KeepAlive Extend the length of time that the point in time persists. +// API name: keep_alive +func (r *OpenPointInTime) KeepAlive(duration string) *OpenPointInTime { + r.values.Set("keep_alive", duration) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *OpenPointInTime) IgnoreUnavailable(ignoreunavailable bool) *OpenPointInTime { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Preference The node or shard the operation should be performed on. +// By default, it is random. +// API name: preference +func (r *OpenPointInTime) Preference(preference string) *OpenPointInTime { + r.values.Set("preference", preference) + + return r +} + +// Routing A custom value that is used to route operations to a specific shard. +// API name: routing +func (r *OpenPointInTime) Routing(routing string) *OpenPointInTime { + r.values.Set("routing", routing) + + return r +} + +// ExpandWildcards The type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// It supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *OpenPointInTime) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *OpenPointInTime { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// AllowPartialSearchResults Indicates whether the point in time tolerates unavailable shards or shard +// failures when initially creating the PIT. +// If `false`, creating a point in time request when a shard is missing or +// unavailable will throw an exception. +// If `true`, the point in time will contain all the shards that are available +// at the time of the request. +// API name: allow_partial_search_results +func (r *OpenPointInTime) AllowPartialSearchResults(allowpartialsearchresults bool) *OpenPointInTime { + r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults)) + + return r +} + +// MaxConcurrentShardRequests Maximum number of concurrent shard requests that each sub-search request +// executes per node. +// API name: max_concurrent_shard_requests +func (r *OpenPointInTime) MaxConcurrentShardRequests(maxconcurrentshardrequests int) *OpenPointInTime { + r.values.Set("max_concurrent_shard_requests", strconv.Itoa(maxconcurrentshardrequests)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *OpenPointInTime) ErrorTrace(errortrace bool) *OpenPointInTime { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *OpenPointInTime) FilterPath(filterpaths ...string) *OpenPointInTime { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *OpenPointInTime) Human(human bool) *OpenPointInTime { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *OpenPointInTime) Pretty(pretty bool) *OpenPointInTime { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Filter indices if the provided query rewrites to `match_none` on every shard. +// API name: index_filter +func (r *OpenPointInTime) IndexFilter(indexfilter types.QueryVariant) *OpenPointInTime { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexFilter = indexfilter.QueryCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/openpointintime/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/openpointintime/request.go new file mode 100644 index 000000000..eac839385 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/openpointintime/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package openpointintime + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package openpointintime +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/open_point_in_time/OpenPointInTimeRequest.ts#L26-L127 +type Request struct { + + // IndexFilter Filter indices if the provided query rewrites to `match_none` on every shard. + IndexFilter *types.Query `json:"index_filter,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Openpointintime request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/openpointintime/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/openpointintime/response.go new file mode 100644 index 000000000..3e0d253d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/openpointintime/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package openpointintime + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package openpointintime +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/open_point_in_time/OpenPointInTimeResponse.ts#L23-L29 +type Response struct { + Id string `json:"id"` + // Shards_ Shards used to create the PIT + Shards_ types.ShardStatistics `json:"_shards"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/ping/ping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/ping/ping.go new file mode 100644 index 000000000..1aa770959 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/ping/ping.go @@ -0,0 +1,268 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Ping the cluster. +// Get information about whether the cluster is running. +package ping + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Ping struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPing type alias for index. +type NewPing func() *Ping + +// NewPingFunc returns a new instance of Ping with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPingFunc(tp elastictransport.Interface) NewPing { + return func() *Ping { + n := New(tp) + + return n + } +} + +// Ping the cluster. +// Get information about whether the cluster is running. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster +func New(tp elastictransport.Interface) *Ping { + r := &Ping{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Ping) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + method = http.MethodHead + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Ping) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ping") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ping") + if reader := instrument.RecordRequestBody(ctx, "ping", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ping") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Ping query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a ping.Response +func (r Ping) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Ping) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Ping query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Ping headers map. +func (r *Ping) Header(key, value string) *Ping { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Ping) ErrorTrace(errortrace bool) *Ping { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Ping) FilterPath(filterpaths ...string) *Ping { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Ping) Human(human bool) *Ping { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Ping) Pretty(pretty bool) *Ping { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/putscript/put_script.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/putscript/put_script.go new file mode 100644 index 000000000..9106c5a40 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/putscript/put_script.go @@ -0,0 +1,425 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a script or search template. +// Creates or updates a stored script or search template. +package putscript + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 + + contextMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutScript struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + context string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutScript type alias for index. +type NewPutScript func(id string) *PutScript + +// NewPutScriptFunc returns a new instance of PutScript with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutScriptFunc(tp elastictransport.Interface) NewPutScript { + return func(id string) *PutScript { + n := New(tp) + + n._id(id) + + return n + } +} + +// Create or update a script or search template. +// Creates or updates a stored script or search template. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script +func New(tp elastictransport.Interface) *PutScript { + r := &PutScript{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutScript) Raw(raw io.Reader) *PutScript { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutScript) Request(req *Request) *PutScript { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutScript) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutScript: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_scripts") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + case r.paramSet == idMask|contextMask: + path.WriteString("/") + path.WriteString("_scripts") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "context", r.context) + } + path.WriteString(r.context) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutScript) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "put_script") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "put_script") + if reader := instrument.RecordRequestBody(ctx, "put_script", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "put_script") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutScript query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putscript.Response +func (r PutScript) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "put_script") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutScript headers map. +func (r *PutScript) Header(key, value string) *PutScript { + r.headers.Set(key, value) + + return r +} + +// Id The identifier for the stored script or search template. +// It must be unique within the cluster. +// API Name: id +func (r *PutScript) _id(id string) *PutScript { + r.paramSet |= idMask + r.id = id + + return r +} + +// Context The context in which the script or search template should run. +// To prevent errors, the API immediately compiles the script or template in +// this context. +// API Name: context +func (r *PutScript) Context(context string) *PutScript { + r.paramSet |= contextMask + r.context = context + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *PutScript) MasterTimeout(duration string) *PutScript { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: timeout +func (r *PutScript) Timeout(duration string) *PutScript { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutScript) ErrorTrace(errortrace bool) *PutScript { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutScript) FilterPath(filterpaths ...string) *PutScript { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutScript) Human(human bool) *PutScript { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutScript) Pretty(pretty bool) *PutScript { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The script or search template, its parameters, and its language. +// API name: script +func (r *PutScript) Script(script types.StoredScriptVariant) *PutScript { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Script = *script.StoredScriptCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/putscript/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/putscript/request.go new file mode 100644 index 000000000..7e4277b9d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/putscript/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putscript + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putscript +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/put_script/PutScriptRequest.ts#L25-L87 +type Request struct { + + // Script The script or search template, its parameters, and its language. + Script types.StoredScript `json:"script"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putscript request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/putscript/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/putscript/response.go new file mode 100644 index 000000000..a16412258 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/putscript/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putscript + +// Response holds the response body struct for the package putscript +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/put_script/PutScriptResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rankeval/rank_eval.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rankeval/rank_eval.go new file mode 100644 index 000000000..3557626be --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rankeval/rank_eval.go @@ -0,0 +1,439 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Evaluate ranked search results. +// +// Evaluate the quality of ranked search results over a set of typical search +// queries. +package rankeval + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RankEval struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRankEval type alias for index. +type NewRankEval func() *RankEval + +// NewRankEvalFunc returns a new instance of RankEval with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRankEvalFunc(tp elastictransport.Interface) NewRankEval { + return func() *RankEval { + n := New(tp) + + return n + } +} + +// Evaluate ranked search results. +// +// Evaluate the quality of ranked search results over a set of typical search +// queries. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval +func New(tp elastictransport.Interface) *RankEval { + r := &RankEval{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *RankEval) Raw(raw io.Reader) *RankEval { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *RankEval) Request(req *Request) *RankEval { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RankEval) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for RankEval: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_rank_eval") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_rank_eval") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RankEval) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "rank_eval") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "rank_eval") + if reader := instrument.RecordRequestBody(ctx, "rank_eval", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rank_eval") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RankEval query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a rankeval.Response +func (r RankEval) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rank_eval") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the RankEval headers map. +func (r *RankEval) Header(key, value string) *RankEval { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and index aliases used to +// limit the request. +// Wildcard (`*`) expressions are supported. +// To target all data streams and indices in a cluster, omit this parameter or +// use `_all` or `*`. +// API Name: index +func (r *RankEval) Index(index string) *RankEval { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. This behavior +// applies even if the request targets other open indices. For example, a +// request targeting `foo*,bar*` returns an error if an index starts with `foo` +// but no index starts with `bar`. +// API name: allow_no_indices +func (r *RankEval) AllowNoIndices(allownoindices bool) *RankEval { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, +// closed or both. +// API name: expand_wildcards +func (r *RankEval) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *RankEval { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `true`, missing or closed indices are not included in the response. +// API name: ignore_unavailable +func (r *RankEval) IgnoreUnavailable(ignoreunavailable bool) *RankEval { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// SearchType Search operation type +// API name: search_type +func (r *RankEval) SearchType(searchtype string) *RankEval { + r.values.Set("search_type", searchtype) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RankEval) ErrorTrace(errortrace bool) *RankEval { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RankEval) FilterPath(filterpaths ...string) *RankEval { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RankEval) Human(human bool) *RankEval { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RankEval) Pretty(pretty bool) *RankEval { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Definition of the evaluation metric to calculate. +// API name: metric +func (r *RankEval) Metric(metric types.RankEvalMetricVariant) *RankEval { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metric = metric.RankEvalMetricCaster() + + return r +} + +// A set of typical search requests, together with their provided ratings. +// API name: requests +func (r *RankEval) Requests(requests ...types.RankEvalRequestItemVariant) *RankEval { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range requests { + + r.req.Requests = append(r.req.Requests, *v.RankEvalRequestItemCaster()) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rankeval/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rankeval/request.go new file mode 100644 index 000000000..c899100c9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rankeval/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package rankeval + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package rankeval +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/RankEvalRequest.ts#L24-L76 +type Request struct { + + // Metric Definition of the evaluation metric to calculate. + Metric *types.RankEvalMetric `json:"metric,omitempty"` + // Requests A set of typical search requests, together with their provided ratings. + Requests []types.RankEvalRequestItem `json:"requests"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Rankeval request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rankeval/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rankeval/response.go new file mode 100644 index 000000000..7a7f07d84 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rankeval/response.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package rankeval + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package rankeval +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/RankEvalResponse.ts#L26-L34 +type Response struct { + + // Details The details section contains one entry for every query in the original + // requests section, keyed by the search request id + Details map[string]types.RankEvalMetricDetail `json:"details"` + Failures map[string]json.RawMessage `json:"failures"` + // MetricScore The overall evaluation quality calculated by the defined metric + MetricScore types.Float64 `json:"metric_score"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Details: make(map[string]types.RankEvalMetricDetail, 0), + Failures: make(map[string]json.RawMessage, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindex/reindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindex/reindex.go new file mode 100644 index 000000000..b106e0221 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindex/reindex.go @@ -0,0 +1,641 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Reindex documents. +// +// Copy documents from a source to a destination. +// You can copy all documents to the destination index or reindex a subset of +// the documents. +// The source can be any existing index, alias, or data stream. +// The destination must differ from the source. +// For example, you cannot reindex a data stream into itself. +// +// IMPORTANT: Reindex requires `_source` to be enabled for all documents in the +// source. +// The destination should be configured as wanted before calling the reindex +// API. +// Reindex does not copy the settings from the source or its associated +// template. +// Mappings, shard counts, and replicas, for example, must be configured ahead +// of time. +// +// If the Elasticsearch security features are enabled, you must have the +// following security privileges: +// +// * The `read` index privilege for the source data stream, index, or alias. +// * The `write` index privilege for the destination data stream, index, or +// index alias. +// * To automatically create a data stream or index with a reindex API request, +// you must have the `auto_configure`, `create_index`, or `manage` index +// privilege for the destination data stream, index, or alias. +// * If reindexing from a remote cluster, the `source.remote.user` must have the +// `monitor` cluster privilege and the `read` index privilege for the source +// data stream, index, or alias. +// +// If reindexing from a remote cluster, you must explicitly allow the remote +// host in the `reindex.remote.whitelist` setting. +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The `dest` element can be configured like the index API to control optimistic +// concurrency control. +// Omitting `version_type` or setting it to `internal` causes Elasticsearch to +// blindly dump documents into the destination, overwriting any that happen to +// have the same ID. +// +// Setting `version_type` to `external` causes Elasticsearch to preserve the +// `version` from the source, create any documents that are missing, and update +// any documents that have an older version in the destination than they do in +// the source. +// +// Setting `op_type` to `create` causes the reindex API to create only missing +// documents in the destination. +// All existing documents will cause a version conflict. +// +// IMPORTANT: Because data streams are append-only, any reindex request to a +// destination data stream must have an `op_type` of `create`. +// A reindex can only add new documents to a destination data stream. +// It cannot update existing documents in a destination data stream. +// +// By default, version conflicts abort the reindex process. +// To continue reindexing if there are conflicts, set the `conflicts` request +// body property to `proceed`. +// In this case, the response includes a count of the version conflicts that +// were encountered. +// Note that the handling of other error types is unaffected by the `conflicts` +// property. +// Additionally, if you opt to count version conflicts, the operation could +// attempt to reindex more documents from the source than `max_docs` until it +// has successfully indexed `max_docs` documents into the target or it has gone +// through every document in the source query. +// +// Refer to the linked documentation for examples of how to reindex documents. +package reindex + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conflicts" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Reindex struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewReindex type alias for index. +type NewReindex func() *Reindex + +// NewReindexFunc returns a new instance of Reindex with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewReindexFunc(tp elastictransport.Interface) NewReindex { + return func() *Reindex { + n := New(tp) + + return n + } +} + +// Reindex documents. +// +// Copy documents from a source to a destination. +// You can copy all documents to the destination index or reindex a subset of +// the documents. +// The source can be any existing index, alias, or data stream. +// The destination must differ from the source. +// For example, you cannot reindex a data stream into itself. +// +// IMPORTANT: Reindex requires `_source` to be enabled for all documents in the +// source. +// The destination should be configured as wanted before calling the reindex +// API. +// Reindex does not copy the settings from the source or its associated +// template. +// Mappings, shard counts, and replicas, for example, must be configured ahead +// of time. +// +// If the Elasticsearch security features are enabled, you must have the +// following security privileges: +// +// * The `read` index privilege for the source data stream, index, or alias. +// * The `write` index privilege for the destination data stream, index, or +// index alias. +// * To automatically create a data stream or index with a reindex API request, +// you must have the `auto_configure`, `create_index`, or `manage` index +// privilege for the destination data stream, index, or alias. +// * If reindexing from a remote cluster, the `source.remote.user` must have the +// `monitor` cluster privilege and the `read` index privilege for the source +// data stream, index, or alias. +// +// If reindexing from a remote cluster, you must explicitly allow the remote +// host in the `reindex.remote.whitelist` setting. +// Automatic data stream creation requires a matching index template with data +// stream enabled. +// +// The `dest` element can be configured like the index API to control optimistic +// concurrency control. +// Omitting `version_type` or setting it to `internal` causes Elasticsearch to +// blindly dump documents into the destination, overwriting any that happen to +// have the same ID. +// +// Setting `version_type` to `external` causes Elasticsearch to preserve the +// `version` from the source, create any documents that are missing, and update +// any documents that have an older version in the destination than they do in +// the source. +// +// Setting `op_type` to `create` causes the reindex API to create only missing +// documents in the destination. +// All existing documents will cause a version conflict. +// +// IMPORTANT: Because data streams are append-only, any reindex request to a +// destination data stream must have an `op_type` of `create`. +// A reindex can only add new documents to a destination data stream. +// It cannot update existing documents in a destination data stream. +// +// By default, version conflicts abort the reindex process. +// To continue reindexing if there are conflicts, set the `conflicts` request +// body property to `proceed`. +// In this case, the response includes a count of the version conflicts that +// were encountered. +// Note that the handling of other error types is unaffected by the `conflicts` +// property. +// Additionally, if you opt to count version conflicts, the operation could +// attempt to reindex more documents from the source than `max_docs` until it +// has successfully indexed `max_docs` documents into the target or it has gone +// through every document in the source query. +// +// Refer to the linked documentation for examples of how to reindex documents. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex +func New(tp elastictransport.Interface) *Reindex { + r := &Reindex{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Reindex) Raw(raw io.Reader) *Reindex { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Reindex) Request(req *Request) *Reindex { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Reindex) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Reindex: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_reindex") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Reindex) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "reindex") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "reindex") + if reader := instrument.RecordRequestBody(ctx, "reindex", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "reindex") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Reindex query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a reindex.Response +func (r Reindex) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Reindex headers map. +func (r *Reindex) Header(key, value string) *Reindex { + r.headers.Set(key, value) + + return r +} + +// Refresh If `true`, the request refreshes affected shards to make this operation +// visible to search. +// API name: refresh +func (r *Reindex) Refresh(refresh bool) *Reindex { + r.values.Set("refresh", strconv.FormatBool(refresh)) + + return r +} + +// RequestsPerSecond The throttle for this request in sub-requests per second. +// By default, there is no throttle. +// API name: requests_per_second +func (r *Reindex) RequestsPerSecond(requestspersecond string) *Reindex { + r.values.Set("requests_per_second", requestspersecond) + + return r +} + +// Scroll The period of time that a consistent view of the index should be maintained +// for scrolled search. +// API name: scroll +func (r *Reindex) Scroll(duration string) *Reindex { + r.values.Set("scroll", duration) + + return r +} + +// Slices The number of slices this task should be divided into. +// It defaults to one slice, which means the task isn't sliced into subtasks. +// +// Reindex supports sliced scroll to parallelize the reindexing process. +// This parallelization can improve efficiency and provide a convenient way to +// break the request down into smaller parts. +// +// NOTE: Reindexing from remote clusters does not support manual or automatic +// slicing. +// +// If set to `auto`, Elasticsearch chooses the number of slices to use. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple sources, it will choose the number of slices based on +// the index or backing index with the smallest number of shards. +// API name: slices +func (r *Reindex) Slices(slices string) *Reindex { + r.values.Set("slices", slices) + + return r +} + +// Timeout The period each indexing waits for automatic index creation, dynamic mapping +// updates, and waiting for active shards. +// By default, Elasticsearch waits for at least one minute before failing. +// The actual wait time could be longer, particularly when multiple waits occur. +// API name: timeout +func (r *Reindex) Timeout(duration string) *Reindex { + r.values.Set("timeout", duration) + + return r +} + +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set it to `all` or any positive integer up to the total number of shards in +// the index (`number_of_replicas+1`). +// The default value is one, which means it waits for each primary shard to be +// active. +// API name: wait_for_active_shards +func (r *Reindex) WaitForActiveShards(waitforactiveshards string) *Reindex { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// WaitForCompletion If `true`, the request blocks until the operation is complete. +// API name: wait_for_completion +func (r *Reindex) WaitForCompletion(waitforcompletion bool) *Reindex { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// RequireAlias If `true`, the destination must be an index alias. +// API name: require_alias +func (r *Reindex) RequireAlias(requirealias bool) *Reindex { + r.values.Set("require_alias", strconv.FormatBool(requirealias)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Reindex) ErrorTrace(errortrace bool) *Reindex { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Reindex) FilterPath(filterpaths ...string) *Reindex { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Reindex) Human(human bool) *Reindex { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Reindex) Pretty(pretty bool) *Reindex { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Indicates whether to continue reindexing even when there are conflicts. +// API name: conflicts +func (r *Reindex) Conflicts(conflicts conflicts.Conflicts) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Conflicts = &conflicts + return r +} + +// The destination you are copying to. +// API name: dest +func (r *Reindex) Dest(dest types.ReindexDestinationVariant) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Dest = *dest.ReindexDestinationCaster() + + return r +} + +// The maximum number of documents to reindex. +// By default, all documents are reindexed. +// If it is a value less then or equal to `scroll_size`, a scroll will not be +// used to retrieve the results for the operation. +// +// If `conflicts` is set to `proceed`, the reindex operation could attempt to +// reindex more documents from the source than `max_docs` until it has +// successfully indexed `max_docs` documents into the target or it has gone +// through every document in the source query. +// API name: max_docs +func (r *Reindex) MaxDocs(maxdocs int64) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxDocs = &maxdocs + + return r +} + +// The script to run to update the document source or metadata when reindexing. +// API name: script +func (r *Reindex) Script(script types.ScriptVariant) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Script = script.ScriptCaster() + + return r +} + +// API name: size +func (r *Reindex) Size(size int64) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Size = &size + + return r +} + +// The source you are copying from. +// API name: source +func (r *Reindex) Source(source types.ReindexSourceVariant) *Reindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source = *source.ReindexSourceCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindex/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindex/request.go new file mode 100644 index 000000000..6f82d0dc9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindex/request.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package reindex + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conflicts" +) + +// Request holds the request body struct for the package reindex +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/reindex/ReindexRequest.ts#L27-L178 +type Request struct { + + // Conflicts Indicates whether to continue reindexing even when there are conflicts. + Conflicts *conflicts.Conflicts `json:"conflicts,omitempty"` + // Dest The destination you are copying to. + Dest types.ReindexDestination `json:"dest"` + // MaxDocs The maximum number of documents to reindex. + // By default, all documents are reindexed. + // If it is a value less then or equal to `scroll_size`, a scroll will not be + // used to retrieve the results for the operation. + // + // If `conflicts` is set to `proceed`, the reindex operation could attempt to + // reindex more documents from the source than `max_docs` until it has + // successfully indexed `max_docs` documents into the target or it has gone + // through every document in the source query. + MaxDocs *int64 `json:"max_docs,omitempty"` + // Script The script to run to update the document source or metadata when reindexing. + Script *types.Script `json:"script,omitempty"` + Size *int64 `json:"size,omitempty"` + // Source The source you are copying from. + Source types.ReindexSource `json:"source"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Reindex request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindex/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindex/response.go new file mode 100644 index 000000000..37e64596e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindex/response.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package reindex + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package reindex +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/reindex/ReindexResponse.ts#L26-L92 +type Response struct { + + // Batches The number of scroll responses that were pulled back by the reindex. + Batches *int64 `json:"batches,omitempty"` + // Created The number of documents that were successfully created. + Created *int64 `json:"created,omitempty"` + // Deleted The number of documents that were successfully deleted. + Deleted *int64 `json:"deleted,omitempty"` + // Failures If there were any unrecoverable errors during the process, it is an array of + // those failures. + // If this array is not empty, the request ended because of those failures. + // Reindex is implemented using batches and any failure causes the entire + // process to end but all failures in the current batch are collected into the + // array. + // You can use the `conflicts` option to prevent the reindex from ending on + // version conflicts. + Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` + // Noops The number of documents that were ignored because the script used for the + // reindex returned a `noop` value for `ctx.op`. + Noops *int64 `json:"noops,omitempty"` + // RequestsPerSecond The number of requests per second effectively run during the reindex. + RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` + // Retries The number of retries attempted by reindex. + Retries *types.Retries `json:"retries,omitempty"` + SliceId *int `json:"slice_id,omitempty"` + Task *string `json:"task,omitempty"` + // ThrottledMillis The number of milliseconds the request slept to conform to + // `requests_per_second`. + ThrottledMillis *int64 `json:"throttled_millis,omitempty"` + // ThrottledUntilMillis This field should always be equal to zero in a reindex response. + // It has meaning only when using the task API, where it indicates the next time + // (in milliseconds since epoch) that a throttled request will be run again in + // order to conform to `requests_per_second`. + ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` + // TimedOut If any of the requests that ran during the reindex timed out, it is `true`. + TimedOut *bool `json:"timed_out,omitempty"` + // Took The total milliseconds the entire operation took. + Took *int64 `json:"took,omitempty"` + // Total The number of documents that were successfully processed. + Total *int64 `json:"total,omitempty"` + // Updated The number of documents that were successfully updated. + // That is to say, a document with the same ID already existed before the + // reindex updated it. + Updated *int64 `json:"updated,omitempty"` + // VersionConflicts The number of version conflicts that occurred. + VersionConflicts *int64 `json:"version_conflicts,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindexrethrottle/reindex_rethrottle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindexrethrottle/reindex_rethrottle.go new file mode 100644 index 000000000..4eb827324 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindexrethrottle/reindex_rethrottle.go @@ -0,0 +1,378 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Throttle a reindex operation. +// +// Change the number of requests per second for a particular reindex operation. +// For example: +// +// ``` +// POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +// ``` +// +// Rethrottling that speeds up the query takes effect immediately. +// Rethrottling that slows down the query will take effect after completing the +// current batch. +// This behavior prevents scroll timeouts. +package reindexrethrottle + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + taskidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ReindexRethrottle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + taskid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewReindexRethrottle type alias for index. +type NewReindexRethrottle func(taskid string) *ReindexRethrottle + +// NewReindexRethrottleFunc returns a new instance of ReindexRethrottle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewReindexRethrottleFunc(tp elastictransport.Interface) NewReindexRethrottle { + return func(taskid string) *ReindexRethrottle { + n := New(tp) + + n._taskid(taskid) + + return n + } +} + +// Throttle a reindex operation. +// +// Change the number of requests per second for a particular reindex operation. +// For example: +// +// ``` +// POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +// ``` +// +// Rethrottling that speeds up the query takes effect immediately. +// Rethrottling that slows down the query will take effect after completing the +// current batch. +// This behavior prevents scroll timeouts. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex +func New(tp elastictransport.Interface) *ReindexRethrottle { + r := &ReindexRethrottle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ReindexRethrottle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == taskidMask: + path.WriteString("/") + path.WriteString("_reindex") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "taskid", r.taskid) + } + path.WriteString(r.taskid) + path.WriteString("/") + path.WriteString("_rethrottle") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ReindexRethrottle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "reindex_rethrottle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "reindex_rethrottle") + if reader := instrument.RecordRequestBody(ctx, "reindex_rethrottle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "reindex_rethrottle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ReindexRethrottle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a reindexrethrottle.Response +func (r ReindexRethrottle) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "reindex_rethrottle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ReindexRethrottle) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "reindex_rethrottle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ReindexRethrottle query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ReindexRethrottle headers map. +func (r *ReindexRethrottle) Header(key, value string) *ReindexRethrottle { + r.headers.Set(key, value) + + return r +} + +// TaskId The task identifier, which can be found by using the tasks API. +// API Name: taskid +func (r *ReindexRethrottle) _taskid(taskid string) *ReindexRethrottle { + r.paramSet |= taskidMask + r.taskid = taskid + + return r +} + +// RequestsPerSecond The throttle for this request in sub-requests per second. +// It can be either `-1` to turn off throttling or any decimal number like `1.7` +// or `12` to throttle to that level. +// API name: requests_per_second +func (r *ReindexRethrottle) RequestsPerSecond(requestspersecond string) *ReindexRethrottle { + r.values.Set("requests_per_second", requestspersecond) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ReindexRethrottle) ErrorTrace(errortrace bool) *ReindexRethrottle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ReindexRethrottle) FilterPath(filterpaths ...string) *ReindexRethrottle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ReindexRethrottle) Human(human bool) *ReindexRethrottle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ReindexRethrottle) Pretty(pretty bool) *ReindexRethrottle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindexrethrottle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindexrethrottle/response.go new file mode 100644 index 000000000..e68cdfe9b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/reindexrethrottle/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package reindexrethrottle + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package reindexrethrottle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/reindex_rethrottle/ReindexRethrottleResponse.ts#L23-L25 +type Response struct { + Nodes map[string]types.ReindexNode `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.ReindexNode, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rendersearchtemplate/render_search_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rendersearchtemplate/render_search_template.go new file mode 100644 index 000000000..275b32155 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rendersearchtemplate/render_search_template.go @@ -0,0 +1,418 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Render a search template. +// +// Render a search template as a search request body. +package rendersearchtemplate + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RenderSearchTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRenderSearchTemplate type alias for index. +type NewRenderSearchTemplate func() *RenderSearchTemplate + +// NewRenderSearchTemplateFunc returns a new instance of RenderSearchTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRenderSearchTemplateFunc(tp elastictransport.Interface) NewRenderSearchTemplate { + return func() *RenderSearchTemplate { + n := New(tp) + + return n + } +} + +// Render a search template. +// +// Render a search template as a search request body. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template +func New(tp elastictransport.Interface) *RenderSearchTemplate { + r := &RenderSearchTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *RenderSearchTemplate) Raw(raw io.Reader) *RenderSearchTemplate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *RenderSearchTemplate) Request(req *Request) *RenderSearchTemplate { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RenderSearchTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for RenderSearchTemplate: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_render") + path.WriteString("/") + path.WriteString("template") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RenderSearchTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "render_search_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "render_search_template") + if reader := instrument.RecordRequestBody(ctx, "render_search_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "render_search_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RenderSearchTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a rendersearchtemplate.Response +func (r RenderSearchTemplate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "render_search_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the RenderSearchTemplate headers map. +func (r *RenderSearchTemplate) Header(key, value string) *RenderSearchTemplate { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RenderSearchTemplate) ErrorTrace(errortrace bool) *RenderSearchTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RenderSearchTemplate) FilterPath(filterpaths ...string) *RenderSearchTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RenderSearchTemplate) Human(human bool) *RenderSearchTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RenderSearchTemplate) Pretty(pretty bool) *RenderSearchTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: file +func (r *RenderSearchTemplate) File(file string) *RenderSearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.File = &file + + return r +} + +// The ID of the search template to render. +// If no `source` is specified, this or the `` request path +// parameter is required. +// If you specify both this parameter and the `` parameter, the API +// uses only ``. +// API name: id +func (r *RenderSearchTemplate) Id(id string) *RenderSearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Id = &id + + return r +} + +// Key-value pairs used to replace Mustache variables in the template. +// The key is the variable name. +// The value is the variable value. +// API name: params +func (r *RenderSearchTemplate) Params(params map[string]json.RawMessage) *RenderSearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Params = params + return r +} + +func (r *RenderSearchTemplate) AddParam(key string, value json.RawMessage) *RenderSearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Params == nil { + r.req.Params = make(map[string]json.RawMessage) + } else { + tmp = r.req.Params + } + + tmp[key] = value + + r.req.Params = tmp + return r +} + +// An inline search template. +// It supports the same parameters as the search API's request body. +// These parameters also support Mustache variables. +// If no `id` or `` is specified, this parameter is required. +// API name: source +func (r *RenderSearchTemplate) Source(scriptsource types.ScriptSourceVariant) *RenderSearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source = *scriptsource.ScriptSourceCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rendersearchtemplate/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rendersearchtemplate/request.go new file mode 100644 index 000000000..6277b0609 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rendersearchtemplate/request.go @@ -0,0 +1,155 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package rendersearchtemplate + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package rendersearchtemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/render_search_template/RenderSearchTemplateRequest.ts#L26-L77 +type Request struct { + File *string `json:"file,omitempty"` + // Id The ID of the search template to render. + // If no `source` is specified, this or the `` request path + // parameter is required. + // If you specify both this parameter and the `` parameter, the API + // uses only ``. + Id *string `json:"id,omitempty"` + // Params Key-value pairs used to replace Mustache variables in the template. + // The key is the variable name. + // The value is the variable value. + Params map[string]json.RawMessage `json:"params,omitempty"` + // Source An inline search template. + // It supports the same parameters as the search API's request body. + // These parameters also support Mustache variables. + // If no `id` or `` is specified, this parameter is required. + Source types.ScriptSource `json:"source,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Params: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Rendersearchtemplate request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "file": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "File", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.File = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source", err) + } + + switch t { + + case "aggregations", "collapse", "docvalue_fields", "explain", "ext", "fields", "from", "highlight", "indices_boost", "knn", "min_score", "pit", "post_filter", "profile", "query", "rank", "rescore", "retriever", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "_source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version": + o := types.NewSearchRequestBody() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + s.Source = o + break source_field + + } + } + if s.Source == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rendersearchtemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rendersearchtemplate/response.go new file mode 100644 index 000000000..e71baea20 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/rendersearchtemplate/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package rendersearchtemplate + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package rendersearchtemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/render_search_template/RenderSearchTemplateResponse.ts#L23-L25 +type Response struct { + TemplateOutput map[string]json.RawMessage `json:"template_output"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + TemplateOutput: make(map[string]json.RawMessage, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scriptspainlessexecute/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scriptspainlessexecute/request.go new file mode 100644 index 000000000..066f8e44c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scriptspainlessexecute/request.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package scriptspainlessexecute + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/painlesscontext" +) + +// Request holds the request body struct for the package scriptspainlessexecute +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/scripts_painless_execute/ExecutePainlessScriptRequest.ts#L24-L64 +type Request struct { + + // Context The context that the script should run in. + // NOTE: Result ordering in the field contexts is not guaranteed. + Context *painlesscontext.PainlessContext `json:"context,omitempty"` + // ContextSetup Additional parameters for the `context`. + // NOTE: This parameter is required for all contexts except `painless_test`, + // which is the default if no value is provided for `context`. + ContextSetup *types.PainlessContextSetup `json:"context_setup,omitempty"` + // Script The Painless script to run. + Script *types.Script `json:"script,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Scriptspainlessexecute request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scriptspainlessexecute/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scriptspainlessexecute/response.go new file mode 100644 index 000000000..143666c67 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scriptspainlessexecute/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package scriptspainlessexecute + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package scriptspainlessexecute +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/scripts_painless_execute/ExecutePainlessScriptResponse.ts#L20-L24 +type Response struct { + Result json.RawMessage `json:"result,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go new file mode 100644 index 000000000..cc414447d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go @@ -0,0 +1,402 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run a script. +// +// Runs a script and returns a result. +// Use this API to build and test scripts, such as when defining a script for a +// runtime field. +// This API requires very few dependencies and is especially useful if you don't +// have permissions to write documents on a cluster. +// +// The API uses several _contexts_, which control how scripts are run, what +// variables are available at runtime, and what the return type is. +// +// Each context requires a script, but additional parameters depend on the +// context you're using for that script. +package scriptspainlessexecute + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/painlesscontext" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ScriptsPainlessExecute struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewScriptsPainlessExecute type alias for index. +type NewScriptsPainlessExecute func() *ScriptsPainlessExecute + +// NewScriptsPainlessExecuteFunc returns a new instance of ScriptsPainlessExecute with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewScriptsPainlessExecuteFunc(tp elastictransport.Interface) NewScriptsPainlessExecute { + return func() *ScriptsPainlessExecute { + n := New(tp) + + return n + } +} + +// Run a script. +// +// Runs a script and returns a result. +// Use this API to build and test scripts, such as when defining a script for a +// runtime field. +// This API requires very few dependencies and is especially useful if you don't +// have permissions to write documents on a cluster. +// +// The API uses several _contexts_, which control how scripts are run, what +// variables are available at runtime, and what the return type is. +// +// Each context requires a script, but additional parameters depend on the +// context you're using for that script. +// +// https://www.elastic.co/docs/reference/scripting-languages/painless/painless-api-examples +func New(tp elastictransport.Interface) *ScriptsPainlessExecute { + r := &ScriptsPainlessExecute{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ScriptsPainlessExecute) Raw(raw io.Reader) *ScriptsPainlessExecute { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ScriptsPainlessExecute) Request(req *Request) *ScriptsPainlessExecute { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ScriptsPainlessExecute) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ScriptsPainlessExecute: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_scripts") + path.WriteString("/") + path.WriteString("painless") + path.WriteString("/") + path.WriteString("_execute") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ScriptsPainlessExecute) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "scripts_painless_execute") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "scripts_painless_execute") + if reader := instrument.RecordRequestBody(ctx, "scripts_painless_execute", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "scripts_painless_execute") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ScriptsPainlessExecute query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a scriptspainlessexecute.Response +func (r ScriptsPainlessExecute) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "scripts_painless_execute") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ScriptsPainlessExecute headers map. +func (r *ScriptsPainlessExecute) Header(key, value string) *ScriptsPainlessExecute { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ScriptsPainlessExecute) ErrorTrace(errortrace bool) *ScriptsPainlessExecute { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ScriptsPainlessExecute) FilterPath(filterpaths ...string) *ScriptsPainlessExecute { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ScriptsPainlessExecute) Human(human bool) *ScriptsPainlessExecute { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ScriptsPainlessExecute) Pretty(pretty bool) *ScriptsPainlessExecute { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The context that the script should run in. +// NOTE: Result ordering in the field contexts is not guaranteed. +// API name: context +func (r *ScriptsPainlessExecute) Context(context painlesscontext.PainlessContext) *ScriptsPainlessExecute { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Context = &context + return r +} + +// Additional parameters for the `context`. +// NOTE: This parameter is required for all contexts except `painless_test`, +// which is the default if no value is provided for `context`. +// API name: context_setup +func (r *ScriptsPainlessExecute) ContextSetup(contextsetup types.PainlessContextSetupVariant) *ScriptsPainlessExecute { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ContextSetup = contextsetup.PainlessContextSetupCaster() + + return r +} + +// The Painless script to run. +// API name: script +func (r *ScriptsPainlessExecute) Script(script types.ScriptVariant) *ScriptsPainlessExecute { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Script = script.ScriptCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scroll/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scroll/request.go new file mode 100644 index 000000000..fe8936b5f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scroll/request.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package scroll + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package scroll +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/scroll/ScrollRequest.ts#L24-L88 +type Request struct { + + // Scroll The period to retain the search context for scrolling. + Scroll types.Duration `json:"scroll,omitempty"` + // ScrollId The scroll ID of the search. + ScrollId string `json:"scroll_id"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Scroll request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "scroll": + if err := dec.Decode(&s.Scroll); err != nil { + return fmt.Errorf("%s | %w", "Scroll", err) + } + + case "scroll_id": + if err := dec.Decode(&s.ScrollId); err != nil { + return fmt.Errorf("%s | %w", "ScrollId", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scroll/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scroll/response.go new file mode 100644 index 000000000..d1c501bc7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scroll/response.go @@ -0,0 +1,813 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package scroll + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package scroll +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/scroll/ScrollResponse.ts#L22-L25 +type Response struct { + Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` + Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Hits The returned documents and metadata. + Hits types.HitsMetadata `json:"hits"` + MaxScore *types.Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *types.Profile `json:"profile,omitempty"` + // ScrollId_ The identifier for the search and its search context. + // You can use this scroll ID with the scroll API to retrieve the next batch of + // search results for the request. + // This property is returned only if the `scroll` query parameter is specified + // in the request. + ScrollId_ *string `json:"_scroll_id,omitempty"` + // Shards_ A count of shards used for the request. + Shards_ types.ShardStatistics `json:"_shards"` + Suggest map[string][]types.Suggest `json:"suggest,omitempty"` + TerminatedEarly *bool `json:"terminated_early,omitempty"` + // TimedOut If `true`, the request timed out before completion; returned results may be + // partial or empty. + TimedOut bool `json:"timed_out"` + // Took The number of milliseconds it took Elasticsearch to run the request. + // This value is calculated by measuring the time elapsed between receipt of a + // request on the coordinating node and the time at which the coordinating node + // is ready to send the response. + // It includes: + // + // * Communication time between the coordinating node and data nodes + // * Time the request spends in the search thread pool, queued for execution + // * Actual run time + // + // It does not include: + // + // * Time needed to send the request to Elasticsearch + // * Time needed to serialize the JSON response + // * Time needed to send the response to a client + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Aggregations: make(map[string]types.Aggregate, 0), + Fields: make(map[string]json.RawMessage, 0), + Suggest: make(map[string][]types.Suggest, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := types.NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := types.NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := types.NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := types.NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := types.NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := types.NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := types.NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := types.NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := types.NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := types.NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := types.NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := types.NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := types.NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := types.NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := types.NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := types.NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := types.NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := types.NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := types.NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := types.NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := types.NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := types.NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := types.NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := types.NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := types.NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := types.NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := types.NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := types.NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := types.NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := types.NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := types.NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := types.NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := types.NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := types.NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := types.NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := types.NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := types.NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := types.NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := types.NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := types.NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := types.NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := types.NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := types.NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := types.NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := types.NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := types.NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := types.NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := types.NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := types.NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := types.NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := types.NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := types.NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := types.NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := types.NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := types.NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := types.NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := types.NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := types.NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := types.NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := types.NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := types.NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := types.NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := types.NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := types.NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := types.NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := types.NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := types.NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := types.NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := types.NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := types.NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + } + + case "_clusters": + if err := dec.Decode(&s.Clusters_); err != nil { + return fmt.Errorf("%s | %w", "Clusters_", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return fmt.Errorf("%s | %w", "Hits", err) + } + + case "max_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxScore", err) + } + f := types.Float64(value) + s.MaxScore = &f + case float64: + f := types.Float64(v) + s.MaxScore = &f + } + + case "num_reduce_phases": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumReducePhases", err) + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f + } + + case "pit_id": + if err := dec.Decode(&s.PitId); err != nil { + return fmt.Errorf("%s | %w", "PitId", err) + } + + case "profile": + if err := dec.Decode(&s.Profile); err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + + case "_scroll_id": + if err := dec.Decode(&s.ScrollId_); err != nil { + return fmt.Errorf("%s | %w", "ScrollId_", err) + } + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return fmt.Errorf("%s | %w", "Shards_", err) + } + + case "suggest": + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + switch elems[0] { + + case "completion": + o := types.NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := types.NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := types.NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } + } + + case "terminated_early": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TerminatedEarly", err) + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v + } + + case "timed_out": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimedOut", err) + } + s.TimedOut = value + case bool: + s.TimedOut = v + } + + case "took": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + s.Took = value + case float64: + f := int64(v) + s.Took = f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scroll/scroll.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scroll/scroll.go new file mode 100644 index 000000000..c190ed676 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/scroll/scroll.go @@ -0,0 +1,422 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run a scrolling search. +// +// IMPORTANT: The scroll API is no longer recommend for deep pagination. If you +// need to preserve the index state while paging through more than 10,000 hits, +// use the `search_after` parameter with a point in time (PIT). +// +// The scroll API gets large sets of results from a single scrolling search +// request. +// To get the necessary scroll ID, submit a search API request that includes an +// argument for the `scroll` query parameter. +// The `scroll` parameter indicates how long Elasticsearch should retain the +// search context for the request. +// The search response returns a scroll ID in the `_scroll_id` response body +// parameter. +// You can then use the scroll ID with the scroll API to retrieve the next batch +// of results for the request. +// If the Elasticsearch security features are enabled, the access to the results +// of a specific scroll ID is restricted to the user or API key that submitted +// the search. +// +// You can also use the scroll API to specify a new scroll parameter that +// extends or shortens the retention period for the search context. +// +// IMPORTANT: Results from a scrolling search reflect the state of the index at +// the time of the initial search request. Subsequent indexing or document +// changes only affect later search and scroll requests. +package scroll + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Scroll struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + scrollid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewScroll type alias for index. +type NewScroll func() *Scroll + +// NewScrollFunc returns a new instance of Scroll with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewScrollFunc(tp elastictransport.Interface) NewScroll { + return func() *Scroll { + n := New(tp) + + return n + } +} + +// Run a scrolling search. +// +// IMPORTANT: The scroll API is no longer recommend for deep pagination. If you +// need to preserve the index state while paging through more than 10,000 hits, +// use the `search_after` parameter with a point in time (PIT). +// +// The scroll API gets large sets of results from a single scrolling search +// request. +// To get the necessary scroll ID, submit a search API request that includes an +// argument for the `scroll` query parameter. +// The `scroll` parameter indicates how long Elasticsearch should retain the +// search context for the request. +// The search response returns a scroll ID in the `_scroll_id` response body +// parameter. +// You can then use the scroll ID with the scroll API to retrieve the next batch +// of results for the request. +// If the Elasticsearch security features are enabled, the access to the results +// of a specific scroll ID is restricted to the user or API key that submitted +// the search. +// +// You can also use the scroll API to specify a new scroll parameter that +// extends or shortens the retention period for the search context. +// +// IMPORTANT: Results from a scrolling search reflect the state of the index at +// the time of the initial search request. Subsequent indexing or document +// changes only affect later search and scroll requests. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll +func New(tp elastictransport.Interface) *Scroll { + r := &Scroll{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Scroll) Raw(raw io.Reader) *Scroll { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Scroll) Request(req *Request) *Scroll { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Scroll) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Scroll: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_search") + path.WriteString("/") + path.WriteString("scroll") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Scroll) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "scroll") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "scroll") + if reader := instrument.RecordRequestBody(ctx, "scroll", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "scroll") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Scroll query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a scroll.Response +func (r Scroll) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "scroll") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Scroll headers map. +func (r *Scroll) Header(key, value string) *Scroll { + r.headers.Set(key, value) + + return r +} + +// RestTotalHitsAsInt If true, the API response’s hit.total property is returned as an integer. If +// false, the API response’s hit.total property is returned as an object. +// API name: rest_total_hits_as_int +func (r *Scroll) RestTotalHitsAsInt(resttotalhitsasint bool) *Scroll { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Scroll) ErrorTrace(errortrace bool) *Scroll { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Scroll) FilterPath(filterpaths ...string) *Scroll { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Scroll) Human(human bool) *Scroll { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Scroll) Pretty(pretty bool) *Scroll { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The period to retain the search context for scrolling. +// API name: scroll +func (r *Scroll) Scroll(duration types.DurationVariant) *Scroll { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Scroll = *duration.DurationCaster() + + return r +} + +// The scroll ID of the search. +// API name: scroll_id +func (r *Scroll) ScrollId(scrollid string) *Scroll { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ScrollId = scrollid + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/search/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/search/request.go new file mode 100644 index 000000000..c22fca598 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/search/request.go @@ -0,0 +1,559 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package search + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package search +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/SearchRequest.ts#L53-L588 +type Request struct { + + // Aggregations Defines the aggregations that are run as part of the search request. + Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` + // Collapse Collapses search results the values of the specified field. + Collapse *types.FieldCollapse `json:"collapse,omitempty"` + // DocvalueFields An array of wildcard (`*`) field patterns. + // The request returns doc values for field names matching these patterns in the + // `hits.fields` property of the response. + DocvalueFields []types.FieldAndFormat `json:"docvalue_fields,omitempty"` + // Explain If `true`, the request returns detailed information about score computation + // as part of a hit. + Explain *bool `json:"explain,omitempty"` + // Ext Configuration of search extensions defined by Elasticsearch plugins. + Ext map[string]json.RawMessage `json:"ext,omitempty"` + // Fields An array of wildcard (`*`) field patterns. + // The request returns values for field names matching these patterns in the + // `hits.fields` property of the response. + Fields []types.FieldAndFormat `json:"fields,omitempty"` + // From The starting document offset, which must be non-negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. + // To page through more hits, use the `search_after` parameter. + From *int `json:"from,omitempty"` + // Highlight Specifies the highlighter to use for retrieving highlighted snippets from one + // or more fields in your search results. + Highlight *types.Highlight `json:"highlight,omitempty"` + // IndicesBoost Boost the `_score` of documents from specified indices. + // The boost value is the factor by which scores are multiplied. + // A boost value greater than `1.0` increases the score. + // A boost value between `0` and `1.0` decreases the score. + IndicesBoost []map[string]types.Float64 `json:"indices_boost,omitempty"` + // Knn The approximate kNN search to run. + Knn []types.KnnSearch `json:"knn,omitempty"` + // MinScore The minimum `_score` for matching documents. + // Documents with a lower `_score` are not included in search results and + // results collected by aggregations. + MinScore *types.Float64 `json:"min_score,omitempty"` + // Pit Limit the search to a point in time (PIT). + // If you provide a PIT, you cannot specify an `` in the request path. + Pit *types.PointInTimeReference `json:"pit,omitempty"` + // PostFilter Use the `post_filter` parameter to filter search results. + // The search hits are filtered after the aggregations are calculated. + // A post filter has no impact on the aggregation results. + PostFilter *types.Query `json:"post_filter,omitempty"` + // Profile Set to `true` to return detailed timing information about the execution of + // individual components in a search request. + // NOTE: This is a debugging tool and adds significant overhead to search + // execution. + Profile *bool `json:"profile,omitempty"` + // Query The search definition using the Query DSL. + Query *types.Query `json:"query,omitempty"` + // Rank The Reciprocal Rank Fusion (RRF) to use. + Rank *types.RankContainer `json:"rank,omitempty"` + // Rescore Can be used to improve precision by reordering just the top (for example 100 + // - 500) documents returned by the `query` and `post_filter` phases. + Rescore []types.Rescore `json:"rescore,omitempty"` + // Retriever A retriever is a specification to describe top documents returned from a + // search. + // A retriever replaces other elements of the search API that also return top + // documents such as `query` and `knn`. + Retriever *types.RetrieverContainer `json:"retriever,omitempty"` + // RuntimeMappings One or more runtime fields in the search request. + // These fields take precedence over mapped fields with the same name. + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` + // ScriptFields Retrieve a script evaluation (based on different fields) for each hit. + ScriptFields map[string]types.ScriptField `json:"script_fields,omitempty"` + // SearchAfter Used to retrieve the next page of hits using a set of sort values from the + // previous page. + SearchAfter []types.FieldValue `json:"search_after,omitempty"` + // SeqNoPrimaryTerm If `true`, the request returns sequence number and primary term of the last + // modification of each hit. + SeqNoPrimaryTerm *bool `json:"seq_no_primary_term,omitempty"` + // Size The number of hits to return, which must not be negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. + // To page through more hits, use the `search_after` property. + Size *int `json:"size,omitempty"` + // Slice Split a scrolled search into multiple slices that can be consumed + // independently. + Slice *types.SlicedScroll `json:"slice,omitempty"` + // Sort A comma-separated list of : pairs. + Sort []types.SortCombinations `json:"sort,omitempty"` + // Source_ The source fields that are returned for matching documents. + // These fields are returned in the `hits._source` property of the search + // response. + // If the `stored_fields` property is specified, the `_source` property defaults + // to `false`. + // Otherwise, it defaults to `true`. + Source_ types.SourceConfig `json:"_source,omitempty"` + // Stats The stats groups to associate with the search. + // Each group maintains a statistics aggregation for its associated searches. + // You can retrieve these stats using the indices stats API. + Stats []string `json:"stats,omitempty"` + // StoredFields A comma-separated list of stored fields to return as part of a hit. + // If no fields are specified, no stored fields are included in the response. + // If this field is specified, the `_source` property defaults to `false`. + // You can pass `_source: true` to return both source fields and stored fields + // in the search response. + StoredFields []string `json:"stored_fields,omitempty"` + // Suggest Defines a suggester that provides similar looking terms based on a provided + // text. + Suggest *types.Suggester `json:"suggest,omitempty"` + // TerminateAfter The maximum number of documents to collect for each shard. + // If a query reaches this limit, Elasticsearch terminates the query early. + // Elasticsearch collects documents before sorting. + // + // IMPORTANT: Use with caution. + // Elasticsearch applies this property to each shard handling the request. + // When possible, let Elasticsearch perform early termination automatically. + // Avoid specifying this property for requests that target data streams with + // backing indices across multiple data tiers. + // + // If set to `0` (default), the query does not terminate early. + TerminateAfter *int64 `json:"terminate_after,omitempty"` + // Timeout The period of time to wait for a response from each shard. + // If no response is received before the timeout expires, the request fails and + // returns an error. + // Defaults to no timeout. + Timeout *string `json:"timeout,omitempty"` + // TrackScores If `true`, calculate and return document scores, even if the scores are not + // used for sorting. + TrackScores *bool `json:"track_scores,omitempty"` + // TrackTotalHits Number of hits matching the query to count accurately. + // If `true`, the exact number of hits is returned at the cost of some + // performance. + // If `false`, the response does not include the total number of hits matching + // the query. + TrackTotalHits types.TrackHits `json:"track_total_hits,omitempty"` + // Version If `true`, the request returns the document version as part of a hit. + Version *bool `json:"version,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Aggregations: make(map[string]types.Aggregations, 0), + Ext: make(map[string]json.RawMessage, 0), + ScriptFields: make(map[string]types.ScriptField, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Search request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "collapse": + if err := dec.Decode(&s.Collapse); err != nil { + return fmt.Errorf("%s | %w", "Collapse", err) + } + + case "docvalue_fields": + if err := dec.Decode(&s.DocvalueFields); err != nil { + return fmt.Errorf("%s | %w", "DocvalueFields", err) + } + + case "explain": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Explain", err) + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "ext": + if s.Ext == nil { + s.Ext = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Ext); err != nil { + return fmt.Errorf("%s | %w", "Ext", err) + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "from": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "highlight": + if err := dec.Decode(&s.Highlight); err != nil { + return fmt.Errorf("%s | %w", "Highlight", err) + } + + case "indices_boost": + if err := dec.Decode(&s.IndicesBoost); err != nil { + return fmt.Errorf("%s | %w", "IndicesBoost", err) + } + + case "knn": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := types.NewKnnSearch() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Knn", err) + } + + s.Knn = append(s.Knn, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Knn); err != nil { + return fmt.Errorf("%s | %w", "Knn", err) + } + } + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := types.Float64(value) + s.MinScore = &f + case float64: + f := types.Float64(v) + s.MinScore = &f + } + + case "pit": + if err := dec.Decode(&s.Pit); err != nil { + return fmt.Errorf("%s | %w", "Pit", err) + } + + case "post_filter": + if err := dec.Decode(&s.PostFilter); err != nil { + return fmt.Errorf("%s | %w", "PostFilter", err) + } + + case "profile": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + s.Profile = &value + case bool: + s.Profile = &v + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "rank": + if err := dec.Decode(&s.Rank); err != nil { + return fmt.Errorf("%s | %w", "Rank", err) + } + + case "rescore": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := types.NewRescore() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Rescore", err) + } + + s.Rescore = append(s.Rescore, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Rescore); err != nil { + return fmt.Errorf("%s | %w", "Rescore", err) + } + } + + case "retriever": + if err := dec.Decode(&s.Retriever); err != nil { + return fmt.Errorf("%s | %w", "Retriever", err) + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]types.ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return fmt.Errorf("%s | %w", "ScriptFields", err) + } + + case "search_after": + if err := dec.Decode(&s.SearchAfter); err != nil { + return fmt.Errorf("%s | %w", "SearchAfter", err) + } + + case "seq_no_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SeqNoPrimaryTerm", err) + } + s.SeqNoPrimaryTerm = &value + case bool: + s.SeqNoPrimaryTerm = &v + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "slice": + if err := dec.Decode(&s.Slice); err != nil { + return fmt.Errorf("%s | %w", "Slice", err) + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(types.SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + case "_source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source__field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source_", err) + } + + switch t { + + case "exclude_vectors", "excludes", "includes": + o := types.NewSourceFilter() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + s.Source_ = o + break source__field + + } + } + if s.Source_ == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + } + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return fmt.Errorf("%s | %w", "Stats", err) + } + + case "stored_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + + s.StoredFields = append(s.StoredFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.StoredFields); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + } + + case "suggest": + if err := dec.Decode(&s.Suggest); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + + case "terminate_after": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TerminateAfter", err) + } + s.TerminateAfter = &value + case float64: + f := int64(v) + s.TerminateAfter = &f + } + + case "timeout": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Timeout = &o + + case "track_scores": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TrackScores", err) + } + s.TrackScores = &value + case bool: + s.TrackScores = &v + } + + case "track_total_hits": + if err := dec.Decode(&s.TrackTotalHits); err != nil { + return fmt.Errorf("%s | %w", "TrackTotalHits", err) + } + + case "version": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + s.Version = &value + case bool: + s.Version = &v + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/search/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/search/response.go new file mode 100644 index 000000000..12ab06245 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/search/response.go @@ -0,0 +1,813 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package search + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package search +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/SearchResponse.ts#L34-L37 +type Response struct { + Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` + Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Hits The returned documents and metadata. + Hits types.HitsMetadata `json:"hits"` + MaxScore *types.Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *types.Profile `json:"profile,omitempty"` + // ScrollId_ The identifier for the search and its search context. + // You can use this scroll ID with the scroll API to retrieve the next batch of + // search results for the request. + // This property is returned only if the `scroll` query parameter is specified + // in the request. + ScrollId_ *string `json:"_scroll_id,omitempty"` + // Shards_ A count of shards used for the request. + Shards_ types.ShardStatistics `json:"_shards"` + Suggest map[string][]types.Suggest `json:"suggest,omitempty"` + TerminatedEarly *bool `json:"terminated_early,omitempty"` + // TimedOut If `true`, the request timed out before completion; returned results may be + // partial or empty. + TimedOut bool `json:"timed_out"` + // Took The number of milliseconds it took Elasticsearch to run the request. + // This value is calculated by measuring the time elapsed between receipt of a + // request on the coordinating node and the time at which the coordinating node + // is ready to send the response. + // It includes: + // + // * Communication time between the coordinating node and data nodes + // * Time the request spends in the search thread pool, queued for execution + // * Actual run time + // + // It does not include: + // + // * Time needed to send the request to Elasticsearch + // * Time needed to serialize the JSON response + // * Time needed to send the response to a client + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Aggregations: make(map[string]types.Aggregate, 0), + Fields: make(map[string]json.RawMessage, 0), + Suggest: make(map[string][]types.Suggest, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := types.NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := types.NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := types.NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := types.NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := types.NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := types.NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := types.NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := types.NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := types.NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := types.NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := types.NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := types.NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := types.NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := types.NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := types.NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := types.NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := types.NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := types.NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := types.NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := types.NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := types.NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := types.NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := types.NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := types.NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := types.NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := types.NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := types.NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := types.NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := types.NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := types.NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := types.NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := types.NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := types.NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := types.NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := types.NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := types.NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := types.NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := types.NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := types.NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := types.NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := types.NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := types.NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := types.NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := types.NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := types.NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := types.NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := types.NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := types.NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := types.NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := types.NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := types.NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := types.NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := types.NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := types.NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := types.NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := types.NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := types.NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := types.NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := types.NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := types.NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := types.NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := types.NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := types.NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := types.NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := types.NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := types.NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := types.NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := types.NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := types.NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := types.NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + } + + case "_clusters": + if err := dec.Decode(&s.Clusters_); err != nil { + return fmt.Errorf("%s | %w", "Clusters_", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return fmt.Errorf("%s | %w", "Hits", err) + } + + case "max_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxScore", err) + } + f := types.Float64(value) + s.MaxScore = &f + case float64: + f := types.Float64(v) + s.MaxScore = &f + } + + case "num_reduce_phases": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumReducePhases", err) + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f + } + + case "pit_id": + if err := dec.Decode(&s.PitId); err != nil { + return fmt.Errorf("%s | %w", "PitId", err) + } + + case "profile": + if err := dec.Decode(&s.Profile); err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + + case "_scroll_id": + if err := dec.Decode(&s.ScrollId_); err != nil { + return fmt.Errorf("%s | %w", "ScrollId_", err) + } + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return fmt.Errorf("%s | %w", "Shards_", err) + } + + case "suggest": + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + switch elems[0] { + + case "completion": + o := types.NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := types.NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := types.NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } + } + + case "terminated_early": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TerminatedEarly", err) + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v + } + + case "timed_out": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimedOut", err) + } + s.TimedOut = value + case bool: + s.TimedOut = v + } + + case "took": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + s.Took = value + case float64: + f := int64(v) + s.Took = f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/search/search.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/search/search.go new file mode 100644 index 000000000..5fb51766e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/search/search.go @@ -0,0 +1,1344 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run a search. +// +// Get search hits that match the query defined in the request. +// You can provide search queries using the `q` query string parameter or the +// request body. +// If both are specified, only the query parameter is used. +// +// If the Elasticsearch security features are enabled, you must have the read +// index privilege for the target data stream, index, or alias. For +// cross-cluster search, refer to the documentation about configuring CCS +// privileges. +// To search a point in time (PIT) for an alias, you must have the `read` index +// privilege for the alias's data streams or indices. +// +// **Search slicing** +// +// When paging through a large number of documents, it can be helpful to split +// the search into multiple slices to consume them independently with the +// `slice` and `pit` properties. +// By default the splitting is done first on the shards, then locally on each +// shard. +// The local splitting partitions the shard into contiguous ranges based on +// Lucene document IDs. +// +// For instance if the number of shards is equal to 2 and you request 4 slices, +// the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are +// assigned to the second shard. +// +// IMPORTANT: The same point-in-time ID should be used for all slices. +// If different PIT IDs are used, slices can overlap and miss documents. +// This situation can occur because the splitting criterion is based on Lucene +// document IDs, which are not stable across changes to the index. +package search + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/suggestmode" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Search struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSearch type alias for index. +type NewSearch func() *Search + +// NewSearchFunc returns a new instance of Search with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSearchFunc(tp elastictransport.Interface) NewSearch { + return func() *Search { + n := New(tp) + + return n + } +} + +// Run a search. +// +// Get search hits that match the query defined in the request. +// You can provide search queries using the `q` query string parameter or the +// request body. +// If both are specified, only the query parameter is used. +// +// If the Elasticsearch security features are enabled, you must have the read +// index privilege for the target data stream, index, or alias. For +// cross-cluster search, refer to the documentation about configuring CCS +// privileges. +// To search a point in time (PIT) for an alias, you must have the `read` index +// privilege for the alias's data streams or indices. +// +// **Search slicing** +// +// When paging through a large number of documents, it can be helpful to split +// the search into multiple slices to consume them independently with the +// `slice` and `pit` properties. +// By default the splitting is done first on the shards, then locally on each +// shard. +// The local splitting partitions the shard into contiguous ranges based on +// Lucene document IDs. +// +// For instance if the number of shards is equal to 2 and you request 4 slices, +// the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are +// assigned to the second shard. +// +// IMPORTANT: The same point-in-time ID should be used for all slices. +// If different PIT IDs are used, slices can overlap and miss documents. +// This situation can occur because the splitting criterion is based on Lucene +// document IDs, which are not stable across changes to the index. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search +func New(tp elastictransport.Interface) *Search { + r := &Search{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Search) Raw(raw io.Reader) *Search { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Search) Request(req *Request) *Search { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Search) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Search: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_search") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_search") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Search) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search") + if reader := instrument.RecordRequestBody(ctx, "search", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Search query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a search.Response +func (r Search) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + r.TypedKeys(true) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Search headers map. +func (r *Search) Header(key, value string) *Search { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). +// To search all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *Search) Index(index string) *Search { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. +// API name: allow_no_indices +func (r *Search) AllowNoIndices(allownoindices bool) *Search { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// AllowPartialSearchResults If `true` and there are shard request timeouts or shard failures, the request +// returns partial results. +// If `false`, it returns an error with no partial results. +// +// To override the default behavior, you can set the +// `search.default_allow_partial_results` cluster setting to `false`. +// API name: allow_partial_search_results +func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Search { + r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults)) + + return r +} + +// Analyzer The analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: analyzer +func (r *Search) Analyzer(analyzer string) *Search { + r.values.Set("analyzer", analyzer) + + return r +} + +// AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: analyze_wildcard +func (r *Search) AnalyzeWildcard(analyzewildcard bool) *Search { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) + + return r +} + +// BatchedReduceSize The number of shard results that should be reduced at once on the +// coordinating node. +// If the potential number of shards in the request can be large, this value +// should be used as a protection mechanism to reduce the memory overhead per +// search request. +// API name: batched_reduce_size +func (r *Search) BatchedReduceSize(batchedreducesize string) *Search { + r.values.Set("batched_reduce_size", batchedreducesize) + + return r +} + +// CcsMinimizeRoundtrips If `true`, network round-trips between the coordinating node and the remote +// clusters are minimized when running cross-cluster search (CCS) requests. +// API name: ccs_minimize_roundtrips +func (r *Search) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Search { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) + + return r +} + +// DefaultOperator The default operator for the query string query: `AND` or `OR`. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: default_operator +func (r *Search) DefaultOperator(defaultoperator operator.Operator) *Search { + r.values.Set("default_operator", defaultoperator.String()) + + return r +} + +// Df The field to use as a default when no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: df +func (r *Search) Df(df string) *Search { + r.values.Set("df", df) + + return r +} + +// ExpandWildcards The type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// It supports comma-separated values such as `open,hidden`. +// API name: expand_wildcards +func (r *Search) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Search { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreThrottled If `true`, concrete, expanded or aliased indices will be ignored when frozen. +// API name: ignore_throttled +func (r *Search) IgnoreThrottled(ignorethrottled bool) *Search { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *Search) IgnoreUnavailable(ignoreunavailable bool) *Search { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// IncludeNamedQueriesScore If `true`, the response includes the score contribution from any named +// queries. +// +// This functionality reruns each named query on every hit in a search response. +// Typically, this adds a small overhead to a request. +// However, using computationally expensive named queries on a large number of +// hits may add significant overhead. +// API name: include_named_queries_score +func (r *Search) IncludeNamedQueriesScore(includenamedqueriesscore bool) *Search { + r.values.Set("include_named_queries_score", strconv.FormatBool(includenamedqueriesscore)) + + return r +} + +// Lenient If `true`, format-based query failures (such as providing text to a numeric +// field) in the query string will be ignored. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: lenient +func (r *Search) Lenient(lenient bool) *Search { + r.values.Set("lenient", strconv.FormatBool(lenient)) + + return r +} + +// MaxConcurrentShardRequests The number of concurrent shard requests per node that the search runs +// concurrently. +// This value should be used to limit the impact of the search on the cluster in +// order to limit the number of concurrent shard requests. +// API name: max_concurrent_shard_requests +func (r *Search) MaxConcurrentShardRequests(maxconcurrentshardrequests int) *Search { + r.values.Set("max_concurrent_shard_requests", strconv.Itoa(maxconcurrentshardrequests)) + + return r +} + +// Preference The nodes and shards used for the search. +// By default, Elasticsearch selects from eligible nodes and shards using +// adaptive replica selection, accounting for allocation awareness. +// Valid values are: +// +// * `_only_local` to run the search only on shards on the local node. +// * `_local` to, if possible, run the search on shards on the local node, or if +// not, select shards using the default method. +// * `_only_nodes:,` to run the search on only the specified +// nodes IDs. If suitable shards exist on more than one selected node, use +// shards on those nodes using the default method. If none of the specified +// nodes are available, select shards from any available node using the default +// method. +// * `_prefer_nodes:,` to if possible, run the search on the +// specified nodes IDs. If not, select shards using the default method. +// * `_shards:,` to run the search only on the specified shards. +// You can combine this value with other `preference` values. However, the +// `_shards` value must come first. For example: `_shards:2,3|_local`. +// * `` (any string that does not start with `_`) to route +// searches with the same `` to the same shards in the same +// order. +// API name: preference +func (r *Search) Preference(preference string) *Search { + r.values.Set("preference", preference) + + return r +} + +// PreFilterShardSize A threshold that enforces a pre-filter roundtrip to prefilter search shards +// based on query rewriting if the number of shards the search request expands +// to exceeds the threshold. +// This filter roundtrip can limit the number of shards significantly if for +// instance a shard can not match any documents based on its rewrite method (if +// date filters are mandatory to match but the shard bounds and the query are +// disjoint). +// When unspecified, the pre-filter phase is executed if any of these conditions +// is met: +// +// * The request targets more than 128 shards. +// * The request targets one or more read-only index. +// * The primary sort of the query targets an indexed field. +// API name: pre_filter_shard_size +func (r *Search) PreFilterShardSize(prefiltershardsize string) *Search { + r.values.Set("pre_filter_shard_size", prefiltershardsize) + + return r +} + +// RequestCache If `true`, the caching of search results is enabled for requests where `size` +// is `0`. +// It defaults to index level settings. +// API name: request_cache +func (r *Search) RequestCache(requestcache bool) *Search { + r.values.Set("request_cache", strconv.FormatBool(requestcache)) + + return r +} + +// Routing A custom value that is used to route operations to a specific shard. +// API name: routing +func (r *Search) Routing(routing string) *Search { + r.values.Set("routing", routing) + + return r +} + +// Scroll The period to retain the search context for scrolling. +// By default, this value cannot exceed `1d` (24 hours). +// You can change this limit by using the `search.max_keep_alive` cluster-level +// setting. +// API name: scroll +func (r *Search) Scroll(duration string) *Search { + r.values.Set("scroll", duration) + + return r +} + +// SearchType Indicates how distributed term frequencies are calculated for relevance +// scoring. +// API name: search_type +func (r *Search) SearchType(searchtype searchtype.SearchType) *Search { + r.values.Set("search_type", searchtype.String()) + + return r +} + +// SuggestField The field to use for suggestions. +// API name: suggest_field +func (r *Search) SuggestField(field string) *Search { + r.values.Set("suggest_field", field) + + return r +} + +// SuggestMode The suggest mode. +// This parameter can be used only when the `suggest_field` and `suggest_text` +// query string parameters are specified. +// API name: suggest_mode +func (r *Search) SuggestMode(suggestmode suggestmode.SuggestMode) *Search { + r.values.Set("suggest_mode", suggestmode.String()) + + return r +} + +// SuggestSize The number of suggestions to return. +// This parameter can be used only when the `suggest_field` and `suggest_text` +// query string parameters are specified. +// API name: suggest_size +func (r *Search) SuggestSize(suggestsize string) *Search { + r.values.Set("suggest_size", suggestsize) + + return r +} + +// SuggestText The source text for which the suggestions should be returned. +// This parameter can be used only when the `suggest_field` and `suggest_text` +// query string parameters are specified. +// API name: suggest_text +func (r *Search) SuggestText(suggesttext string) *Search { + r.values.Set("suggest_text", suggesttext) + + return r +} + +// TypedKeys If `true`, aggregation and suggester names are be prefixed by their +// respective types in the response. +// API name: typed_keys +func (r *Search) TypedKeys(typedkeys bool) *Search { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} + +// RestTotalHitsAsInt Indicates whether `hits.total` should be rendered as an integer or an object +// in the rest search response. +// API name: rest_total_hits_as_int +func (r *Search) RestTotalHitsAsInt(resttotalhitsasint bool) *Search { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) + + return r +} + +// SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// If the `_source` parameter is `false`, this parameter is ignored. +// API name: _source_excludes +func (r *Search) SourceExcludes_(fields ...string) *Search { + r.values.Set("_source_excludes", strings.Join(fields, ",")) + + return r +} + +// SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. +// You can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. +// API name: _source_includes +func (r *Search) SourceIncludes_(fields ...string) *Search { + r.values.Set("_source_includes", strings.Join(fields, ",")) + + return r +} + +// Q A query in the Lucene query string syntax. +// Query parameter searches do not support the full Elasticsearch Query DSL but +// are handy for testing. +// +// IMPORTANT: This parameter overrides the query parameter in the request body. +// If both parameters are specified, documents matching the query request body +// parameter are not returned. +// API name: q +func (r *Search) Q(q string) *Search { + r.values.Set("q", q) + + return r +} + +// ForceSyntheticSource Should this request force synthetic _source? +// Use this to test if the mapping supports synthetic _source and to get a sense +// of the worst case performance. +// Fetches with this enabled will be slower the enabling synthetic source +// natively in the index. +// API name: force_synthetic_source +func (r *Search) ForceSyntheticSource(forcesyntheticsource bool) *Search { + r.values.Set("force_synthetic_source", strconv.FormatBool(forcesyntheticsource)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Search) ErrorTrace(errortrace bool) *Search { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Search) FilterPath(filterpaths ...string) *Search { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Search) Human(human bool) *Search { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Search) Pretty(pretty bool) *Search { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Defines the aggregations that are run as part of the search request. +// API name: aggregations +func (r *Search) Aggregations(aggregations map[string]types.Aggregations) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Aggregations = aggregations + return r +} + +func (r *Search) AddAggregation(key string, value types.AggregationsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp + return r +} + +// Collapses search results the values of the specified field. +// API name: collapse +func (r *Search) Collapse(collapse types.FieldCollapseVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Collapse = collapse.FieldCollapseCaster() + + return r +} + +// An array of wildcard (`*`) field patterns. +// The request returns doc values for field names matching these patterns in the +// `hits.fields` property of the response. +// API name: docvalue_fields +func (r *Search) DocvalueFields(docvaluefields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docvaluefields { + + r.req.DocvalueFields = append(r.req.DocvalueFields, *v.FieldAndFormatCaster()) + + } + return r +} + +// If `true`, the request returns detailed information about score computation +// as part of a hit. +// API name: explain +func (r *Search) Explain(explain bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Explain = &explain + + return r +} + +// Configuration of search extensions defined by Elasticsearch plugins. +// API name: ext +func (r *Search) Ext(ext map[string]json.RawMessage) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Ext = ext + return r +} + +func (r *Search) AddExt(key string, value json.RawMessage) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Ext == nil { + r.req.Ext = make(map[string]json.RawMessage) + } else { + tmp = r.req.Ext + } + + tmp[key] = value + + r.req.Ext = tmp + return r +} + +// An array of wildcard (`*`) field patterns. +// The request returns values for field names matching these patterns in the +// `hits.fields` property of the response. +// API name: fields +func (r *Search) Fields(fields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range fields { + + r.req.Fields = append(r.req.Fields, *v.FieldAndFormatCaster()) + + } + return r +} + +// The starting document offset, which must be non-negative. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. +// To page through more hits, use the `search_after` parameter. +// API name: from +func (r *Search) From(from int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.From = &from + + return r +} + +// Specifies the highlighter to use for retrieving highlighted snippets from one +// or more fields in your search results. +// API name: highlight +func (r *Search) Highlight(highlight types.HighlightVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Highlight = highlight.HighlightCaster() + + return r +} + +// Boost the `_score` of documents from specified indices. +// The boost value is the factor by which scores are multiplied. +// A boost value greater than `1.0` increases the score. +// A boost value between `0` and `1.0` decreases the score. +// API name: indices_boost +func (r *Search) IndicesBoost(indicesboost []map[string]types.Float64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndicesBoost = indicesboost + + return r +} + +// The approximate kNN search to run. +// API name: knn +func (r *Search) Knn(knns ...types.KnnSearchVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Knn = make([]types.KnnSearch, len(knns)) + for i, v := range knns { + r.req.Knn[i] = *v.KnnSearchCaster() + } + + return r +} + +// The minimum `_score` for matching documents. +// Documents with a lower `_score` are not included in search results and +// results collected by aggregations. +// API name: min_score +func (r *Search) MinScore(minscore types.Float64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MinScore = &minscore + + return r +} + +// Limit the search to a point in time (PIT). +// If you provide a PIT, you cannot specify an `` in the request path. +// API name: pit +func (r *Search) Pit(pit types.PointInTimeReferenceVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Pit = pit.PointInTimeReferenceCaster() + + return r +} + +// Use the `post_filter` parameter to filter search results. +// The search hits are filtered after the aggregations are calculated. +// A post filter has no impact on the aggregation results. +// API name: post_filter +func (r *Search) PostFilter(postfilter types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PostFilter = postfilter.QueryCaster() + + return r +} + +// Set to `true` to return detailed timing information about the execution of +// individual components in a search request. +// NOTE: This is a debugging tool and adds significant overhead to search +// execution. +// API name: profile +func (r *Search) Profile(profile bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Profile = &profile + + return r +} + +// The search definition using the Query DSL. +// API name: query +func (r *Search) Query(query types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} + +// The Reciprocal Rank Fusion (RRF) to use. +// API name: rank +func (r *Search) Rank(rank types.RankContainerVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Rank = rank.RankContainerCaster() + + return r +} + +// Can be used to improve precision by reordering just the top (for example 100 +// - 500) documents returned by the `query` and `post_filter` phases. +// API name: rescore +func (r *Search) Rescore(rescores ...types.RescoreVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Rescore = make([]types.Rescore, len(rescores)) + for i, v := range rescores { + r.req.Rescore[i] = *v.RescoreCaster() + } + + return r +} + +// A retriever is a specification to describe top documents returned from a +// search. +// A retriever replaces other elements of the search API that also return top +// documents such as `query` and `knn`. +// API name: retriever +func (r *Search) Retriever(retriever types.RetrieverContainerVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Retriever = retriever.RetrieverContainerCaster() + + return r +} + +// One or more runtime fields in the search request. +// These fields take precedence over mapped fields with the same name. +// API name: runtime_mappings +func (r *Search) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() + + return r +} + +// Retrieve a script evaluation (based on different fields) for each hit. +// API name: script_fields +func (r *Search) ScriptFields(scriptfields map[string]types.ScriptField) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScriptFields = scriptfields + return r +} + +func (r *Search) AddScriptField(key string, value types.ScriptFieldVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + + r.req.ScriptFields = tmp + return r +} + +// Used to retrieve the next page of hits using a set of sort values from the +// previous page. +// API name: search_after +func (r *Search) SearchAfter(sortresults ...types.FieldValueVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } + + return r +} + +// If `true`, the request returns sequence number and primary term of the last +// modification of each hit. +// API name: seq_no_primary_term +func (r *Search) SeqNoPrimaryTerm(seqnoprimaryterm bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SeqNoPrimaryTerm = &seqnoprimaryterm + + return r +} + +// The number of hits to return, which must not be negative. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. +// To page through more hits, use the `search_after` property. +// API name: size +func (r *Search) Size(size int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Size = &size + + return r +} + +// Split a scrolled search into multiple slices that can be consumed +// independently. +// API name: slice +func (r *Search) Slice(slice types.SlicedScrollVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Slice = slice.SlicedScrollCaster() + + return r +} + +// A comma-separated list of : pairs. +// API name: sort +func (r *Search) Sort(sorts ...types.SortCombinationsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } + + return r +} + +// The source fields that are returned for matching documents. +// These fields are returned in the `hits._source` property of the search +// response. +// If the `stored_fields` property is specified, the `_source` property defaults +// to `false`. +// Otherwise, it defaults to `true`. +// API name: _source +func (r *Search) Source_(sourceconfig types.SourceConfigVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = *sourceconfig.SourceConfigCaster() + + return r +} + +// The stats groups to associate with the search. +// Each group maintains a statistics aggregation for its associated searches. +// You can retrieve these stats using the indices stats API. +// API name: stats +func (r *Search) Stats(stats ...string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range stats { + + r.req.Stats = append(r.req.Stats, v) + + } + return r +} + +// A comma-separated list of stored fields to return as part of a hit. +// If no fields are specified, no stored fields are included in the response. +// If this field is specified, the `_source` property defaults to `false`. +// You can pass `_source: true` to return both source fields and stored fields +// in the search response. +// API name: stored_fields +func (r *Search) StoredFields(fields ...string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.StoredFields = fields + + return r +} + +// Defines a suggester that provides similar looking terms based on a provided +// text. +// API name: suggest +func (r *Search) Suggest(suggest types.SuggesterVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Suggest = suggest.SuggesterCaster() + + return r +} + +// The maximum number of documents to collect for each shard. +// If a query reaches this limit, Elasticsearch terminates the query early. +// Elasticsearch collects documents before sorting. +// +// IMPORTANT: Use with caution. +// Elasticsearch applies this property to each shard handling the request. +// When possible, let Elasticsearch perform early termination automatically. +// Avoid specifying this property for requests that target data streams with +// backing indices across multiple data tiers. +// +// If set to `0` (default), the query does not terminate early. +// API name: terminate_after +func (r *Search) TerminateAfter(terminateafter int64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TerminateAfter = &terminateafter + + return r +} + +// The period of time to wait for a response from each shard. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// Defaults to no timeout. +// API name: timeout +func (r *Search) Timeout(timeout string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = &timeout + + return r +} + +// If `true`, calculate and return document scores, even if the scores are not +// used for sorting. +// API name: track_scores +func (r *Search) TrackScores(trackscores bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackScores = &trackscores + + return r +} + +// Number of hits matching the query to count accurately. +// If `true`, the exact number of hits is returned at the cost of some +// performance. +// If `false`, the response does not include the total number of hits matching +// the query. +// API name: track_total_hits +func (r *Search) TrackTotalHits(trackhits types.TrackHitsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackTotalHits = *trackhits.TrackHitsCaster() + + return r +} + +// If `true`, the request returns the document version as part of a hit. +// API name: version +func (r *Search) Version(version bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Version = &version + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchmvt/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchmvt/request.go new file mode 100644 index 000000000..59aaba720 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchmvt/request.go @@ -0,0 +1,331 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package searchmvt + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gridaggregationtype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gridtype" +) + +// Request holds the request body struct for the package searchmvt +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search_mvt/SearchMvtRequest.ts#L33-L334 +type Request struct { + + // Aggs Sub-aggregations for the geotile_grid. + // + // It supports the following aggregation types: + // + // - `avg` + // - `boxplot` + // - `cardinality` + // - `extended stats` + // - `max` + // - `median absolute deviation` + // - `min` + // - `percentile` + // - `percentile-rank` + // - `stats` + // - `sum` + // - `value count` + // + // The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is + // reserved for internal aggregations. + Aggs map[string]types.Aggregations `json:"aggs,omitempty"` + // Buffer The size, in pixels, of a clipping buffer outside the tile. This allows + // renderers + // to avoid outline artifacts from geometries that extend past the extent of the + // tile. + Buffer *int `json:"buffer,omitempty"` + // ExactBounds If `false`, the meta layer's feature is the bounding box of the tile. + // If `true`, the meta layer's feature is a bounding box resulting from a + // `geo_bounds` aggregation. The aggregation runs on values that + // intersect + // the `//` tile with `wrap_longitude` set to `false`. The resulting + // bounding box may be larger than the vector tile. + ExactBounds *bool `json:"exact_bounds,omitempty"` + // Extent The size, in pixels, of a side of the tile. Vector tiles are square with + // equal sides. + Extent *int `json:"extent,omitempty"` + // Fields The fields to return in the `hits` layer. + // It supports wildcards (`*`). + // This parameter does not support fields with array values. Fields with array + // values may return inconsistent results. + Fields []string `json:"fields,omitempty"` + // GridAgg The aggregation used to create a grid for the `field`. + GridAgg *gridaggregationtype.GridAggregationType `json:"grid_agg,omitempty"` + // GridPrecision Additional zoom levels available through the aggs layer. For example, if + // `` is `7` + // and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If + // 0, results + // don't include the aggs layer. + GridPrecision *int `json:"grid_precision,omitempty"` + // GridType Determines the geometry type for features in the aggs layer. In the aggs + // layer, + // each feature represents a `geotile_grid` cell. If `grid, each feature is a + // polygon + // of the cells bounding box. If `point`, each feature is a Point that is the + // centroid + // of the cell. + GridType *gridtype.GridType `json:"grid_type,omitempty"` + // Query The query DSL used to filter documents for the search. + Query *types.Query `json:"query,omitempty"` + // RuntimeMappings Defines one or more runtime fields in the search request. These fields take + // precedence over mapped fields with the same name. + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` + // Size The maximum number of features to return in the hits layer. Accepts 0-10000. + // If 0, results don't include the hits layer. + Size *int `json:"size,omitempty"` + // Sort Sort the features in the hits layer. By default, the API calculates a + // bounding + // box for each feature. It sorts features based on this box's diagonal length, + // from longest to shortest. + Sort []types.SortCombinations `json:"sort,omitempty"` + // TrackTotalHits The number of hits matching the query to count accurately. If `true`, the + // exact number + // of hits is returned at the cost of some performance. If `false`, the response + // does + // not include the total number of hits matching the query. + TrackTotalHits types.TrackHits `json:"track_total_hits,omitempty"` + // WithLabels If `true`, the hits and aggs layers will contain additional point features + // representing + // suggested label positions for the original features. + // + // * `Point` and `MultiPoint` features will have one of the points selected. + // * `Polygon` and `MultiPolygon` features will have a single point generated, + // either the centroid, if it is within the polygon, or another point within the + // polygon selected from the sorted triangle-tree. + // * `LineString` features will likewise provide a roughly central point + // selected from the triangle-tree. + // * The aggregation results will provide one central point for each aggregation + // bucket. + // + // All attributes from the original features will also be copied to the new + // label features. + // In addition, the new features will be distinguishable using the tag + // `_mvt_label_position`. + WithLabels *bool `json:"with_labels,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Aggs: make(map[string]types.Aggregations, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Searchmvt request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggs": + if s.Aggs == nil { + s.Aggs = make(map[string]types.Aggregations, 0) + } + if err := dec.Decode(&s.Aggs); err != nil { + return fmt.Errorf("%s | %w", "Aggs", err) + } + + case "buffer": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Buffer", err) + } + s.Buffer = &value + case float64: + f := int(v) + s.Buffer = &f + } + + case "exact_bounds": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ExactBounds", err) + } + s.ExactBounds = &value + case bool: + s.ExactBounds = &v + } + + case "extent": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Extent", err) + } + s.Extent = &value + case float64: + f := int(v) + s.Extent = &f + } + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + } + + case "grid_agg": + if err := dec.Decode(&s.GridAgg); err != nil { + return fmt.Errorf("%s | %w", "GridAgg", err) + } + + case "grid_precision": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "GridPrecision", err) + } + s.GridPrecision = &value + case float64: + f := int(v) + s.GridPrecision = &f + } + + case "grid_type": + if err := dec.Decode(&s.GridType); err != nil { + return fmt.Errorf("%s | %w", "GridType", err) + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(types.SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + case "track_total_hits": + if err := dec.Decode(&s.TrackTotalHits); err != nil { + return fmt.Errorf("%s | %w", "TrackTotalHits", err) + } + + case "with_labels": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "WithLabels", err) + } + s.WithLabels = &value + case bool: + s.WithLabels = &v + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchmvt/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchmvt/response.go new file mode 100644 index 000000000..c7d903818 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchmvt/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package searchmvt + +// Response holds the response body struct for the package searchmvt +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search_mvt/SearchMvtResponse.ts#L22-L25 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchmvt/search_mvt.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchmvt/search_mvt.go new file mode 100644 index 000000000..7b925d6fb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchmvt/search_mvt.go @@ -0,0 +1,942 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Search a vector tile. +// +// Search a vector tile for geospatial values. +// Before using this API, you should be familiar with the Mapbox vector tile +// specification. +// The API returns results as a binary mapbox vector tile. +// +// Internally, Elasticsearch translates a vector tile search API request into a +// search containing: +// +// * A `geo_bounding_box` query on the ``. The query uses the +// `//` tile as a bounding box. +// * A `geotile_grid` or `geohex_grid` aggregation on the ``. The +// `grid_agg` parameter determines the aggregation type. The aggregation uses +// the `//` tile as a bounding box. +// * Optionally, a `geo_bounds` aggregation on the ``. The search only +// includes this aggregation if the `exact_bounds` parameter is `true`. +// * If the optional parameter `with_labels` is `true`, the internal search will +// include a dynamic runtime field that calls the `getLabelPosition` function of +// the geometry doc value. This enables the generation of new point features +// containing suggested geometry labels, so that, for example, multi-polygons +// will have only one label. +// +// The API returns results as a binary Mapbox vector tile. +// Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the +// tile contains three layers: +// +// * A `hits` layer containing a feature for each `` value matching the +// `geo_bounding_box` query. +// * An `aggs` layer containing a feature for each cell of the `geotile_grid` or +// `geohex_grid`. The layer only contains features for cells with matching data. +// * A meta layer containing: +// - A feature containing a bounding box. By default, this is the bounding box +// +// of the tile. +// - Value ranges for any sub-aggregations on the `geotile_grid` or +// +// `geohex_grid`. +// - Metadata for the search. +// +// The API only returns features that can display at its zoom level. +// For example, if a polygon feature has no area at its zoom level, the API +// omits it. +// The API returns errors as UTF-8 encoded JSON. +// +// IMPORTANT: You can specify several options for this API as either a query +// parameter or request body parameter. +// If you specify both parameters, the query parameter takes precedence. +// +// **Grid precision for geotile** +// +// For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles +// for lower zoom levels. +// `grid_precision` represents the additional zoom levels available through +// these cells. The final precision is computed by as follows: ` + +// grid_precision`. +// For example, if `` is 7 and `grid_precision` is 8, then the +// `geotile_grid` aggregation will use a precision of 15. +// The maximum final precision is 29. +// The `grid_precision` also determines the number of cells for the grid as +// follows: `(2^grid_precision) x (2^grid_precision)`. +// For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +// The `aggs` layer only contains features for cells with matching data. +// +// **Grid precision for geohex** +// +// For a `grid_agg` of `geohex`, Elasticsearch uses `` and +// `grid_precision` to calculate a final precision as follows: ` + +// grid_precision`. +// +// This precision determines the H3 resolution of the hexagonal cells produced +// by the `geohex` aggregation. +// The following table maps the H3 resolution for each precision. +// For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +// At a precision of 6, hexagonal cells have an H3 resolution of 2. +// If `` is 3 and `grid_precision` is 4, the precision is 7. +// At a precision of 7, hexagonal cells have an H3 resolution of 3. +// +// | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +// | --------- | ---------------- | ------------- | ----------------| ----- | +// | 1 | 4 | 0 | 122 | 30.5 | +// | 2 | 16 | 0 | 122 | 7.625 | +// | 3 | 64 | 1 | 842 | 13.15625 | +// | 4 | 256 | 1 | 842 | 3.2890625 | +// | 5 | 1024 | 2 | 5882 | 5.744140625 | +// | 6 | 4096 | 2 | 5882 | 1.436035156 | +// | 7 | 16384 | 3 | 41162 | 2.512329102 | +// | 8 | 65536 | 3 | 41162 | 0.6280822754 | +// | 9 | 262144 | 4 | 288122 | 1.099098206 | +// | 10 | 1048576 | 4 | 288122 | 0.2747745514 | +// | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +// | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +// | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +// | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +// | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +// | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +// | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +// | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +// | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +// | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +// | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +// | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +// | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +// | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +// | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +// | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +// | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +// | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +// | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | +// +// Hexagonal cells don't align perfectly on a vector tile. +// Some cells may intersect more than one vector tile. +// To compute the H3 resolution for each precision, Elasticsearch compares the +// average density of hexagonal bins at each resolution with the average density +// of tile bins at each zoom level. +// Elasticsearch uses the H3 resolution that is closest to the corresponding +// geotile density. +// +// Learn how to use the vector tile search API with practical examples in the +// [Vector tile search +// examples](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/vector-tile-search) +// guide. +package searchmvt + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gridaggregationtype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gridtype" +) + +const ( + indexMask = iota + 1 + + fieldMask + + zoomMask + + xMask + + yMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SearchMvt struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + field string + zoom string + x string + y string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSearchMvt type alias for index. +type NewSearchMvt func(index, field, zoom, x, y string) *SearchMvt + +// NewSearchMvtFunc returns a new instance of SearchMvt with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSearchMvtFunc(tp elastictransport.Interface) NewSearchMvt { + return func(index, field, zoom, x, y string) *SearchMvt { + n := New(tp) + + n._index(index) + + n._field(field) + + n._zoom(zoom) + + n._x(x) + + n._y(y) + + return n + } +} + +// Search a vector tile. +// +// Search a vector tile for geospatial values. +// Before using this API, you should be familiar with the Mapbox vector tile +// specification. +// The API returns results as a binary mapbox vector tile. +// +// Internally, Elasticsearch translates a vector tile search API request into a +// search containing: +// +// * A `geo_bounding_box` query on the ``. The query uses the +// `//` tile as a bounding box. +// * A `geotile_grid` or `geohex_grid` aggregation on the ``. The +// `grid_agg` parameter determines the aggregation type. The aggregation uses +// the `//` tile as a bounding box. +// * Optionally, a `geo_bounds` aggregation on the ``. The search only +// includes this aggregation if the `exact_bounds` parameter is `true`. +// * If the optional parameter `with_labels` is `true`, the internal search will +// include a dynamic runtime field that calls the `getLabelPosition` function of +// the geometry doc value. This enables the generation of new point features +// containing suggested geometry labels, so that, for example, multi-polygons +// will have only one label. +// +// The API returns results as a binary Mapbox vector tile. +// Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the +// tile contains three layers: +// +// * A `hits` layer containing a feature for each `` value matching the +// `geo_bounding_box` query. +// * An `aggs` layer containing a feature for each cell of the `geotile_grid` or +// `geohex_grid`. The layer only contains features for cells with matching data. +// * A meta layer containing: +// - A feature containing a bounding box. By default, this is the bounding box +// +// of the tile. +// - Value ranges for any sub-aggregations on the `geotile_grid` or +// +// `geohex_grid`. +// - Metadata for the search. +// +// The API only returns features that can display at its zoom level. +// For example, if a polygon feature has no area at its zoom level, the API +// omits it. +// The API returns errors as UTF-8 encoded JSON. +// +// IMPORTANT: You can specify several options for this API as either a query +// parameter or request body parameter. +// If you specify both parameters, the query parameter takes precedence. +// +// **Grid precision for geotile** +// +// For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles +// for lower zoom levels. +// `grid_precision` represents the additional zoom levels available through +// these cells. The final precision is computed by as follows: ` + +// grid_precision`. +// For example, if `` is 7 and `grid_precision` is 8, then the +// `geotile_grid` aggregation will use a precision of 15. +// The maximum final precision is 29. +// The `grid_precision` also determines the number of cells for the grid as +// follows: `(2^grid_precision) x (2^grid_precision)`. +// For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +// The `aggs` layer only contains features for cells with matching data. +// +// **Grid precision for geohex** +// +// For a `grid_agg` of `geohex`, Elasticsearch uses `` and +// `grid_precision` to calculate a final precision as follows: ` + +// grid_precision`. +// +// This precision determines the H3 resolution of the hexagonal cells produced +// by the `geohex` aggregation. +// The following table maps the H3 resolution for each precision. +// For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +// At a precision of 6, hexagonal cells have an H3 resolution of 2. +// If `` is 3 and `grid_precision` is 4, the precision is 7. +// At a precision of 7, hexagonal cells have an H3 resolution of 3. +// +// | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +// | --------- | ---------------- | ------------- | ----------------| ----- | +// | 1 | 4 | 0 | 122 | 30.5 | +// | 2 | 16 | 0 | 122 | 7.625 | +// | 3 | 64 | 1 | 842 | 13.15625 | +// | 4 | 256 | 1 | 842 | 3.2890625 | +// | 5 | 1024 | 2 | 5882 | 5.744140625 | +// | 6 | 4096 | 2 | 5882 | 1.436035156 | +// | 7 | 16384 | 3 | 41162 | 2.512329102 | +// | 8 | 65536 | 3 | 41162 | 0.6280822754 | +// | 9 | 262144 | 4 | 288122 | 1.099098206 | +// | 10 | 1048576 | 4 | 288122 | 0.2747745514 | +// | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +// | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +// | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +// | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +// | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +// | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +// | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +// | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +// | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +// | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +// | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +// | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +// | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +// | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +// | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +// | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +// | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +// | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +// | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | +// +// Hexagonal cells don't align perfectly on a vector tile. +// Some cells may intersect more than one vector tile. +// To compute the H3 resolution for each precision, Elasticsearch compares the +// average density of hexagonal bins at each resolution with the average density +// of tile bins at each zoom level. +// Elasticsearch uses the H3 resolution that is closest to the corresponding +// geotile density. +// +// Learn how to use the vector tile search API with practical examples in the +// [Vector tile search +// examples](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/vector-tile-search) +// guide. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt +func New(tp elastictransport.Interface) *SearchMvt { + r := &SearchMvt{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SearchMvt) Raw(raw io.Reader) *SearchMvt { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SearchMvt) Request(req *Request) *SearchMvt { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SearchMvt) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SearchMvt: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|fieldMask|zoomMask|xMask|yMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_mvt") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "field", r.field) + } + path.WriteString(r.field) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "zoom", r.zoom) + } + path.WriteString(r.zoom) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "x", r.x) + } + path.WriteString(r.x) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "y", r.y) + } + path.WriteString(r.y) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/json") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.mapbox-vector-tile") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SearchMvt) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_mvt") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_mvt") + if reader := instrument.RecordRequestBody(ctx, "search_mvt", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_mvt") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SearchMvt query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a searchmvt.Response +func (r SearchMvt) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_mvt") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SearchMvt headers map. +func (r *SearchMvt) Header(key, value string) *SearchMvt { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, or aliases to search +// API Name: index +func (r *SearchMvt) _index(index string) *SearchMvt { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Field Field containing geospatial data to return +// API Name: field +func (r *SearchMvt) _field(field string) *SearchMvt { + r.paramSet |= fieldMask + r.field = field + + return r +} + +// Zoom Zoom level for the vector tile to search +// API Name: zoom +func (r *SearchMvt) _zoom(zoom string) *SearchMvt { + r.paramSet |= zoomMask + r.zoom = zoom + + return r +} + +// X X coordinate for the vector tile to search +// API Name: x +func (r *SearchMvt) _x(x string) *SearchMvt { + r.paramSet |= xMask + r.x = x + + return r +} + +// Y Y coordinate for the vector tile to search +// API Name: y +func (r *SearchMvt) _y(y string) *SearchMvt { + r.paramSet |= yMask + r.y = y + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SearchMvt) ErrorTrace(errortrace bool) *SearchMvt { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SearchMvt) FilterPath(filterpaths ...string) *SearchMvt { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SearchMvt) Human(human bool) *SearchMvt { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SearchMvt) Pretty(pretty bool) *SearchMvt { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Sub-aggregations for the geotile_grid. +// +// It supports the following aggregation types: +// +// - `avg` +// - `boxplot` +// - `cardinality` +// - `extended stats` +// - `max` +// - `median absolute deviation` +// - `min` +// - `percentile` +// - `percentile-rank` +// - `stats` +// - `sum` +// - `value count` +// +// The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is +// reserved for internal aggregations. +// API name: aggs +func (r *SearchMvt) Aggs(aggs map[string]types.Aggregations) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Aggs = aggs + return r +} + +func (r *SearchMvt) AddAgg(key string, value types.AggregationsVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggs == nil { + r.req.Aggs = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggs + } + + tmp[key] = *value.AggregationsCaster() + + r.req.Aggs = tmp + return r +} + +// The size, in pixels, of a clipping buffer outside the tile. This allows +// renderers +// to avoid outline artifacts from geometries that extend past the extent of the +// tile. +// API name: buffer +func (r *SearchMvt) Buffer(buffer int) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Buffer = &buffer + + return r +} + +// If `false`, the meta layer's feature is the bounding box of the tile. +// If `true`, the meta layer's feature is a bounding box resulting from a +// `geo_bounds` aggregation. The aggregation runs on values that +// intersect +// the `//` tile with `wrap_longitude` set to `false`. The resulting +// bounding box may be larger than the vector tile. +// API name: exact_bounds +func (r *SearchMvt) ExactBounds(exactbounds bool) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ExactBounds = &exactbounds + + return r +} + +// The size, in pixels, of a side of the tile. Vector tiles are square with +// equal sides. +// API name: extent +func (r *SearchMvt) Extent(extent int) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Extent = &extent + + return r +} + +// The fields to return in the `hits` layer. +// It supports wildcards (`*`). +// This parameter does not support fields with array values. Fields with array +// values may return inconsistent results. +// API name: fields +func (r *SearchMvt) Fields(fields ...string) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Fields = fields + + return r +} + +// The aggregation used to create a grid for the `field`. +// API name: grid_agg +func (r *SearchMvt) GridAgg(gridagg gridaggregationtype.GridAggregationType) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.GridAgg = &gridagg + return r +} + +// Additional zoom levels available through the aggs layer. For example, if +// `` is `7` +// and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If +// 0, results +// don't include the aggs layer. +// API name: grid_precision +func (r *SearchMvt) GridPrecision(gridprecision int) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.GridPrecision = &gridprecision + + return r +} + +// Determines the geometry type for features in the aggs layer. In the aggs +// layer, +// each feature represents a `geotile_grid` cell. If `grid, each feature is a +// polygon +// of the cells bounding box. If `point`, each feature is a Point that is the +// centroid +// of the cell. +// API name: grid_type +func (r *SearchMvt) GridType(gridtype gridtype.GridType) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.GridType = &gridtype + return r +} + +// The query DSL used to filter documents for the search. +// API name: query +func (r *SearchMvt) Query(query types.QueryVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} + +// Defines one or more runtime fields in the search request. These fields take +// precedence over mapped fields with the same name. +// API name: runtime_mappings +func (r *SearchMvt) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() + + return r +} + +// The maximum number of features to return in the hits layer. Accepts 0-10000. +// If 0, results don't include the hits layer. +// API name: size +func (r *SearchMvt) Size(size int) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Size = &size + + return r +} + +// Sort the features in the hits layer. By default, the API calculates a +// bounding +// box for each feature. It sorts features based on this box's diagonal length, +// from longest to shortest. +// API name: sort +func (r *SearchMvt) Sort(sorts ...types.SortCombinationsVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } + + return r +} + +// The number of hits matching the query to count accurately. If `true`, the +// exact number +// of hits is returned at the cost of some performance. If `false`, the response +// does +// not include the total number of hits matching the query. +// API name: track_total_hits +func (r *SearchMvt) TrackTotalHits(trackhits types.TrackHitsVariant) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackTotalHits = *trackhits.TrackHitsCaster() + + return r +} + +// If `true`, the hits and aggs layers will contain additional point features +// representing +// suggested label positions for the original features. +// +// * `Point` and `MultiPoint` features will have one of the points selected. +// * `Polygon` and `MultiPolygon` features will have a single point generated, +// either the centroid, if it is within the polygon, or another point within the +// polygon selected from the sorted triangle-tree. +// * `LineString` features will likewise provide a roughly central point +// selected from the triangle-tree. +// * The aggregation results will provide one central point for each aggregation +// bucket. +// +// All attributes from the original features will also be copied to the new +// label features. +// In addition, the new features will be distinguishable using the tag +// `_mvt_label_position`. +// API name: with_labels +func (r *SearchMvt) WithLabels(withlabels bool) *SearchMvt { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.WithLabels = &withlabels + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchshards/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchshards/response.go new file mode 100644 index 000000000..c16e3479e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchshards/response.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package searchshards + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package searchshards +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search_shards/SearchShardsResponse.ts#L34-L40 +type Response struct { + Indices map[string]types.ShardStoreIndex `json:"indices"` + Nodes map[string]types.SearchShardsNodeAttributes `json:"nodes"` + Shards [][]types.NodeShard `json:"shards"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Indices: make(map[string]types.ShardStoreIndex, 0), + Nodes: make(map[string]types.SearchShardsNodeAttributes, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchshards/search_shards.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchshards/search_shards.go new file mode 100644 index 000000000..a4e2af56c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchshards/search_shards.go @@ -0,0 +1,441 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the search shards. +// +// Get the indices and shards that a search request would be run against. +// This information can be useful for working out issues or planning +// optimizations with routing and shard preferences. +// When filtered aliases are used, the filter is returned as part of the +// `indices` section. +// +// If the Elasticsearch security features are enabled, you must have the +// `view_index_metadata` or `manage` index privilege for the target data stream, +// index, or alias. +package searchshards + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SearchShards struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSearchShards type alias for index. +type NewSearchShards func() *SearchShards + +// NewSearchShardsFunc returns a new instance of SearchShards with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSearchShardsFunc(tp elastictransport.Interface) NewSearchShards { + return func() *SearchShards { + n := New(tp) + + return n + } +} + +// Get the search shards. +// +// Get the indices and shards that a search request would be run against. +// This information can be useful for working out issues or planning +// optimizations with routing and shard preferences. +// When filtered aliases are used, the filter is returned as part of the +// `indices` section. +// +// If the Elasticsearch security features are enabled, you must have the +// `view_index_metadata` or `manage` index privilege for the target data stream, +// index, or alias. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards +func New(tp elastictransport.Interface) *SearchShards { + r := &SearchShards{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SearchShards) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_search_shards") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_search_shards") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SearchShards) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_shards") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_shards") + if reader := instrument.RecordRequestBody(ctx, "search_shards", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_shards") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SearchShards query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a searchshards.Response +func (r SearchShards) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_shards") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r SearchShards) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_shards") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the SearchShards query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the SearchShards headers map. +func (r *SearchShards) Header(key, value string) *SearchShards { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). +// To search all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *SearchShards) Index(index string) *SearchShards { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. +// API name: allow_no_indices +func (r *SearchShards) AllowNoIndices(allownoindices bool) *SearchShards { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *SearchShards) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *SearchShards { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *SearchShards) IgnoreUnavailable(ignoreunavailable bool) *SearchShards { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Local If `true`, the request retrieves information from the local node only. +// API name: local +func (r *SearchShards) Local(local bool) *SearchShards { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// IT can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *SearchShards) MasterTimeout(duration string) *SearchShards { + r.values.Set("master_timeout", duration) + + return r +} + +// Preference The node or shard the operation should be performed on. +// It is random by default. +// API name: preference +func (r *SearchShards) Preference(preference string) *SearchShards { + r.values.Set("preference", preference) + + return r +} + +// Routing A custom value used to route operations to a specific shard. +// API name: routing +func (r *SearchShards) Routing(routing string) *SearchShards { + r.values.Set("routing", routing) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SearchShards) ErrorTrace(errortrace bool) *SearchShards { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SearchShards) FilterPath(filterpaths ...string) *SearchShards { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SearchShards) Human(human bool) *SearchShards { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SearchShards) Pretty(pretty bool) *SearchShards { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchtemplate/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchtemplate/request.go new file mode 100644 index 000000000..0d0a23e93 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchtemplate/request.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package searchtemplate + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package searchtemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search_template/SearchTemplateRequest.ts#L33-L153 +type Request struct { + + // Explain If `true`, returns detailed information about score calculation as part of + // each hit. + // If you specify both this and the `explain` query parameter, the API uses only + // the query parameter. + Explain *bool `json:"explain,omitempty"` + // Id The ID of the search template to use. If no `source` is specified, + // this parameter is required. + Id *string `json:"id,omitempty"` + // Params Key-value pairs used to replace Mustache variables in the template. + // The key is the variable name. + // The value is the variable value. + Params map[string]json.RawMessage `json:"params,omitempty"` + // Profile If `true`, the query execution is profiled. + Profile *bool `json:"profile,omitempty"` + // Source An inline search template. Supports the same parameters as the search API's + // request body. It also supports Mustache variables. If no `id` is specified, + // this + // parameter is required. + Source types.ScriptSource `json:"source,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Params: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Searchtemplate request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "explain": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Explain", err) + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "profile": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + s.Profile = &value + case bool: + s.Profile = &v + } + + case "source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source", err) + } + + switch t { + + case "aggregations", "collapse", "docvalue_fields", "explain", "ext", "fields", "from", "highlight", "indices_boost", "knn", "min_score", "pit", "post_filter", "profile", "query", "rank", "rescore", "retriever", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "_source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version": + o := types.NewSearchRequestBody() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + s.Source = o + break source_field + + } + } + if s.Source == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchtemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchtemplate/response.go new file mode 100644 index 000000000..ceba19944 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchtemplate/response.go @@ -0,0 +1,789 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package searchtemplate + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package searchtemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search_template/SearchTemplateResponse.ts#L30-L48 +type Response struct { + Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` + Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + Hits types.HitsMetadata `json:"hits"` + MaxScore *types.Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *types.Profile `json:"profile,omitempty"` + ScrollId_ *string `json:"_scroll_id,omitempty"` + Shards_ types.ShardStatistics `json:"_shards"` + Suggest map[string][]types.Suggest `json:"suggest,omitempty"` + TerminatedEarly *bool `json:"terminated_early,omitempty"` + TimedOut bool `json:"timed_out"` + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Aggregations: make(map[string]types.Aggregate, 0), + Fields: make(map[string]json.RawMessage, 0), + Suggest: make(map[string][]types.Suggest, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := types.NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := types.NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := types.NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := types.NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := types.NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := types.NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := types.NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := types.NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := types.NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := types.NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := types.NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := types.NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := types.NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := types.NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := types.NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := types.NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := types.NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := types.NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := types.NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := types.NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := types.NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := types.NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := types.NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := types.NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := types.NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := types.NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := types.NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := types.NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := types.NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := types.NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := types.NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := types.NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := types.NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := types.NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := types.NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := types.NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := types.NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := types.NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := types.NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := types.NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := types.NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := types.NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := types.NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := types.NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := types.NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := types.NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := types.NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := types.NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := types.NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := types.NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := types.NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := types.NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := types.NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := types.NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := types.NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := types.NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := types.NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := types.NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := types.NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := types.NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := types.NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := types.NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := types.NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := types.NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := types.NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := types.NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := types.NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := types.NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := types.NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := types.NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + } + + case "_clusters": + if err := dec.Decode(&s.Clusters_); err != nil { + return fmt.Errorf("%s | %w", "Clusters_", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return fmt.Errorf("%s | %w", "Hits", err) + } + + case "max_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxScore", err) + } + f := types.Float64(value) + s.MaxScore = &f + case float64: + f := types.Float64(v) + s.MaxScore = &f + } + + case "num_reduce_phases": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumReducePhases", err) + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f + } + + case "pit_id": + if err := dec.Decode(&s.PitId); err != nil { + return fmt.Errorf("%s | %w", "PitId", err) + } + + case "profile": + if err := dec.Decode(&s.Profile); err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + + case "_scroll_id": + if err := dec.Decode(&s.ScrollId_); err != nil { + return fmt.Errorf("%s | %w", "ScrollId_", err) + } + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return fmt.Errorf("%s | %w", "Shards_", err) + } + + case "suggest": + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + switch elems[0] { + + case "completion": + o := types.NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := types.NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := types.NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } + } + + case "terminated_early": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TerminatedEarly", err) + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v + } + + case "timed_out": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimedOut", err) + } + s.TimedOut = value + case bool: + s.TimedOut = v + } + + case "took": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + s.Took = value + case float64: + f := int64(v) + s.Took = f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchtemplate/search_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchtemplate/search_template.go new file mode 100644 index 000000000..309b38f6a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/searchtemplate/search_template.go @@ -0,0 +1,565 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run a search with a search template. +package searchtemplate + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SearchTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSearchTemplate type alias for index. +type NewSearchTemplate func() *SearchTemplate + +// NewSearchTemplateFunc returns a new instance of SearchTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSearchTemplateFunc(tp elastictransport.Interface) NewSearchTemplate { + return func() *SearchTemplate { + n := New(tp) + + return n + } +} + +// Run a search with a search template. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template +func New(tp elastictransport.Interface) *SearchTemplate { + r := &SearchTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SearchTemplate) Raw(raw io.Reader) *SearchTemplate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SearchTemplate) Request(req *Request) *SearchTemplate { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SearchTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SearchTemplate: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_search") + path.WriteString("/") + path.WriteString("template") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_search") + path.WriteString("/") + path.WriteString("template") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SearchTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_template") + if reader := instrument.RecordRequestBody(ctx, "search_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SearchTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a searchtemplate.Response +func (r SearchTemplate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + r.TypedKeys(true) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SearchTemplate headers map. +func (r *SearchTemplate) Header(key, value string) *SearchTemplate { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). +// API Name: index +func (r *SearchTemplate) Index(index string) *SearchTemplate { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. +// API name: allow_no_indices +func (r *SearchTemplate) AllowNoIndices(allownoindices bool) *SearchTemplate { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// CcsMinimizeRoundtrips If `true`, network round-trips are minimized for cross-cluster search +// requests. +// API name: ccs_minimize_roundtrips +func (r *SearchTemplate) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *SearchTemplate { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) + + return r +} + +// ExpandWildcards The type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *SearchTemplate) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *SearchTemplate { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreThrottled If `true`, specified concrete, expanded, or aliased indices are not included +// in the response when throttled. +// API name: ignore_throttled +func (r *SearchTemplate) IgnoreThrottled(ignorethrottled bool) *SearchTemplate { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *SearchTemplate) IgnoreUnavailable(ignoreunavailable bool) *SearchTemplate { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Preference The node or shard the operation should be performed on. +// It is random by default. +// API name: preference +func (r *SearchTemplate) Preference(preference string) *SearchTemplate { + r.values.Set("preference", preference) + + return r +} + +// Routing A custom value used to route operations to a specific shard. +// API name: routing +func (r *SearchTemplate) Routing(routing string) *SearchTemplate { + r.values.Set("routing", routing) + + return r +} + +// Scroll Specifies how long a consistent view of the index +// should be maintained for scrolled search. +// API name: scroll +func (r *SearchTemplate) Scroll(duration string) *SearchTemplate { + r.values.Set("scroll", duration) + + return r +} + +// SearchType The type of the search operation. +// API name: search_type +func (r *SearchTemplate) SearchType(searchtype searchtype.SearchType) *SearchTemplate { + r.values.Set("search_type", searchtype.String()) + + return r +} + +// RestTotalHitsAsInt If `true`, `hits.total` is rendered as an integer in the response. +// If `false`, it is rendered as an object. +// API name: rest_total_hits_as_int +func (r *SearchTemplate) RestTotalHitsAsInt(resttotalhitsasint bool) *SearchTemplate { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) + + return r +} + +// TypedKeys If `true`, the response prefixes aggregation and suggester names with their +// respective types. +// API name: typed_keys +func (r *SearchTemplate) TypedKeys(typedkeys bool) *SearchTemplate { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SearchTemplate) ErrorTrace(errortrace bool) *SearchTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SearchTemplate) FilterPath(filterpaths ...string) *SearchTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SearchTemplate) Human(human bool) *SearchTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SearchTemplate) Pretty(pretty bool) *SearchTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// If `true`, returns detailed information about score calculation as part of +// each hit. +// If you specify both this and the `explain` query parameter, the API uses only +// the query parameter. +// API name: explain +func (r *SearchTemplate) Explain(explain bool) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Explain = &explain + + return r +} + +// The ID of the search template to use. If no `source` is specified, +// this parameter is required. +// API name: id +func (r *SearchTemplate) Id(id string) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Id = &id + + return r +} + +// Key-value pairs used to replace Mustache variables in the template. +// The key is the variable name. +// The value is the variable value. +// API name: params +func (r *SearchTemplate) Params(params map[string]json.RawMessage) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Params = params + return r +} + +func (r *SearchTemplate) AddParam(key string, value json.RawMessage) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Params == nil { + r.req.Params = make(map[string]json.RawMessage) + } else { + tmp = r.req.Params + } + + tmp[key] = value + + r.req.Params = tmp + return r +} + +// If `true`, the query execution is profiled. +// API name: profile +func (r *SearchTemplate) Profile(profile bool) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Profile = &profile + + return r +} + +// An inline search template. Supports the same parameters as the search API's +// request body. It also supports Mustache variables. If no `id` is specified, +// this +// parameter is required. +// API name: source +func (r *SearchTemplate) Source(scriptsource types.ScriptSourceVariant) *SearchTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source = *scriptsource.ScriptSourceCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termsenum/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termsenum/request.go new file mode 100644 index 000000000..81713c5d6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termsenum/request.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package termsenum + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package termsenum +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/terms_enum/TermsEnumRequest.ts#L26-L93 +type Request struct { + + // CaseInsensitive When `true`, the provided search string is matched against index terms + // without case sensitivity. + CaseInsensitive *bool `json:"case_insensitive,omitempty"` + // Field The string to match at the start of indexed terms. If not provided, all terms + // in the field are considered. + Field string `json:"field"` + // IndexFilter Filter an index shard if the provided query rewrites to `match_none`. + IndexFilter *types.Query `json:"index_filter,omitempty"` + // SearchAfter The string after which terms in the index should be returned. + // It allows for a form of pagination if the last result from one request is + // passed as the `search_after` parameter for a subsequent request. + SearchAfter *string `json:"search_after,omitempty"` + // Size The number of matching terms to return. + Size *int `json:"size,omitempty"` + // String The string to match at the start of indexed terms. + // If it is not provided, all terms in the field are considered. + // + // > info + // > The prefix string cannot be larger than the largest possible keyword value, + // which is Lucene's term byte-length limit of 32766. + String *string `json:"string,omitempty"` + // Timeout The maximum length of time to spend collecting results. + // If the timeout is exceeded the `complete` flag set to `false` in the response + // and the results may be partial or empty. + Timeout types.Duration `json:"timeout,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Termsenum request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "case_insensitive": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CaseInsensitive", err) + } + s.CaseInsensitive = &value + case bool: + s.CaseInsensitive = &v + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "index_filter": + if err := dec.Decode(&s.IndexFilter); err != nil { + return fmt.Errorf("%s | %w", "IndexFilter", err) + } + + case "search_after": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchAfter", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchAfter = &o + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "String", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.String = &o + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termsenum/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termsenum/response.go new file mode 100644 index 000000000..e9cc174b3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termsenum/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package termsenum + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package termsenum +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/terms_enum/TermsEnumResponse.ts#L22-L32 +type Response struct { + + // Complete If `false`, the returned terms set may be incomplete and should be treated as + // approximate. + // This can occur due to a few reasons, such as a request timeout or a node + // error. + Complete bool `json:"complete"` + Shards_ types.ShardStatistics `json:"_shards"` + Terms []string `json:"terms"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termsenum/terms_enum.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termsenum/terms_enum.go new file mode 100644 index 000000000..01a4f2922 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termsenum/terms_enum.go @@ -0,0 +1,481 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get terms in an index. +// +// Discover terms that match a partial string in an index. +// This API is designed for low-latency look-ups used in auto-complete +// scenarios. +// +// > info +// > The terms enum API may return terms from deleted documents. Deleted +// documents are initially only marked as deleted. It is not until their +// segments are merged that documents are actually deleted. Until that happens, +// the terms enum API will return terms from these documents. +package termsenum + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type TermsEnum struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewTermsEnum type alias for index. +type NewTermsEnum func(index string) *TermsEnum + +// NewTermsEnumFunc returns a new instance of TermsEnum with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewTermsEnumFunc(tp elastictransport.Interface) NewTermsEnum { + return func(index string) *TermsEnum { + n := New(tp) + + n._index(index) + + return n + } +} + +// Get terms in an index. +// +// Discover terms that match a partial string in an index. +// This API is designed for low-latency look-ups used in auto-complete +// scenarios. +// +// > info +// > The terms enum API may return terms from deleted documents. Deleted +// documents are initially only marked as deleted. It is not until their +// segments are merged that documents are actually deleted. Until that happens, +// the terms enum API will return terms from these documents. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum +func New(tp elastictransport.Interface) *TermsEnum { + r := &TermsEnum{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *TermsEnum) Raw(raw io.Reader) *TermsEnum { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *TermsEnum) Request(req *Request) *TermsEnum { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *TermsEnum) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for TermsEnum: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_terms_enum") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r TermsEnum) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "terms_enum") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "terms_enum") + if reader := instrument.RecordRequestBody(ctx, "terms_enum", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "terms_enum") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the TermsEnum query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a termsenum.Response +func (r TermsEnum) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "terms_enum") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the TermsEnum headers map. +func (r *TermsEnum) Header(key, value string) *TermsEnum { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and index aliases to search. +// Wildcard (`*`) expressions are supported. +// To search all data streams or indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *TermsEnum) _index(index string) *TermsEnum { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *TermsEnum) ErrorTrace(errortrace bool) *TermsEnum { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *TermsEnum) FilterPath(filterpaths ...string) *TermsEnum { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *TermsEnum) Human(human bool) *TermsEnum { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *TermsEnum) Pretty(pretty bool) *TermsEnum { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// When `true`, the provided search string is matched against index terms +// without case sensitivity. +// API name: case_insensitive +func (r *TermsEnum) CaseInsensitive(caseinsensitive bool) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CaseInsensitive = &caseinsensitive + + return r +} + +// The string to match at the start of indexed terms. If not provided, all terms +// in the field are considered. +// API name: field +func (r *TermsEnum) Field(field string) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Field = field + + return r +} + +// Filter an index shard if the provided query rewrites to `match_none`. +// API name: index_filter +func (r *TermsEnum) IndexFilter(indexfilter types.QueryVariant) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexFilter = indexfilter.QueryCaster() + + return r +} + +// The string after which terms in the index should be returned. +// It allows for a form of pagination if the last result from one request is +// passed as the `search_after` parameter for a subsequent request. +// API name: search_after +func (r *TermsEnum) SearchAfter(searchafter string) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SearchAfter = &searchafter + + return r +} + +// The number of matching terms to return. +// API name: size +func (r *TermsEnum) Size(size int) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Size = &size + + return r +} + +// The string to match at the start of indexed terms. +// If it is not provided, all terms in the field are considered. +// +// > info +// > The prefix string cannot be larger than the largest possible keyword value, +// which is Lucene's term byte-length limit of 32766. +// API name: string +func (r *TermsEnum) String(string string) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.String = &string + + return r +} + +// The maximum length of time to spend collecting results. +// If the timeout is exceeded the `complete` flag set to `false` in the response +// and the results may be partial or empty. +// API name: timeout +func (r *TermsEnum) Timeout(duration types.DurationVariant) *TermsEnum { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termvectors/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termvectors/request.go new file mode 100644 index 000000000..be39c309e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termvectors/request.go @@ -0,0 +1,247 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package termvectors + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +// Request holds the request body struct for the package termvectors +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/termvectors/TermVectorsRequest.ts#L33-L241 +type Request struct { + + // Doc An artificial document (a document not present in the index) for which you + // want to retrieve term vectors. + Doc json.RawMessage `json:"doc,omitempty"` + // FieldStatistics If `true`, the response includes: + // + // * The document count (how many documents contain this field). + // * The sum of document frequencies (the sum of document frequencies for all + // terms in this field). + // * The sum of total term frequencies (the sum of total term frequencies of + // each term in this field). + FieldStatistics *bool `json:"field_statistics,omitempty"` + // Fields A list of fields to include in the statistics. + // It is used as the default list unless a specific field list is provided in + // the `completion_fields` or `fielddata_fields` parameters. + Fields []string `json:"fields,omitempty"` + // Filter Filter terms based on their tf-idf scores. + // This could be useful in order find out a good characteristic vector of a + // document. + // This feature works in a similar manner to the second phase of the More Like + // This Query. + Filter *types.TermVectorsFilter `json:"filter,omitempty"` + // Offsets If `true`, the response includes term offsets. + Offsets *bool `json:"offsets,omitempty"` + // Payloads If `true`, the response includes term payloads. + Payloads *bool `json:"payloads,omitempty"` + // PerFieldAnalyzer Override the default per-field analyzer. + // This is useful in order to generate term vectors in any fashion, especially + // when using artificial documents. + // When providing an analyzer for a field that already stores term vectors, the + // term vectors will be regenerated. + PerFieldAnalyzer map[string]string `json:"per_field_analyzer,omitempty"` + // Positions If `true`, the response includes term positions. + Positions *bool `json:"positions,omitempty"` + // Routing A custom value that is used to route operations to a specific shard. + Routing *string `json:"routing,omitempty"` + // TermStatistics If `true`, the response includes: + // + // * The total term frequency (how often a term occurs in all documents). + // * The document frequency (the number of documents containing the current + // term). + // + // By default these values are not returned since term statistics can have a + // serious performance impact. + TermStatistics *bool `json:"term_statistics,omitempty"` + // Version If `true`, returns the document version as part of a hit. + Version *int64 `json:"version,omitempty"` + // VersionType The version type. + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + PerFieldAnalyzer: make(map[string]string, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Termvectors request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc": + if err := dec.Decode(&s.Doc); err != nil { + return fmt.Errorf("%s | %w", "Doc", err) + } + + case "field_statistics": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FieldStatistics", err) + } + s.FieldStatistics = &value + case bool: + s.FieldStatistics = &v + } + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "offsets": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Offsets", err) + } + s.Offsets = &value + case bool: + s.Offsets = &v + } + + case "payloads": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Payloads", err) + } + s.Payloads = &value + case bool: + s.Payloads = &v + } + + case "per_field_analyzer": + if s.PerFieldAnalyzer == nil { + s.PerFieldAnalyzer = make(map[string]string, 0) + } + if err := dec.Decode(&s.PerFieldAnalyzer); err != nil { + return fmt.Errorf("%s | %w", "PerFieldAnalyzer", err) + } + + case "positions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Positions", err) + } + s.Positions = &value + case bool: + s.Positions = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "term_statistics": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TermStatistics", err) + } + s.TermStatistics = &value + case bool: + s.TermStatistics = &v + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return fmt.Errorf("%s | %w", "VersionType", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termvectors/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termvectors/response.go new file mode 100644 index 000000000..5a9c1e9a4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termvectors/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package termvectors + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package termvectors +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/termvectors/TermVectorsResponse.ts#L25-L34 +type Response struct { + Found bool `json:"found"` + Id_ *string `json:"_id,omitempty"` + Index_ string `json:"_index"` + TermVectors map[string]types.TermVector `json:"term_vectors,omitempty"` + Took int64 `json:"took"` + Version_ int64 `json:"_version"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + TermVectors: make(map[string]types.TermVector, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termvectors/termvectors.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termvectors/termvectors.go new file mode 100644 index 000000000..f7ef507c8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/termvectors/termvectors.go @@ -0,0 +1,718 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get term vector information. +// +// Get information and statistics about terms in the fields of a particular +// document. +// +// You can retrieve term vectors for documents stored in the index or for +// artificial documents passed in the body of the request. +// You can specify the fields you are interested in through the `fields` +// parameter or by adding the fields to the request body. +// For example: +// +// ``` +// GET /my-index-000001/_termvectors/1?fields=message +// ``` +// +// Fields can be specified using wildcards, similar to the multi match query. +// +// Term vectors are real-time by default, not near real-time. +// This can be changed by setting `realtime` parameter to `false`. +// +// You can request three types of values: _term information_, _term statistics_, +// and _field statistics_. +// By default, all term information and field statistics are returned for all +// fields but term statistics are excluded. +// +// **Term information** +// +// * term frequency in the field (always returned) +// * term positions (`positions: true`) +// * start and end offsets (`offsets: true`) +// * term payloads (`payloads: true`), as base64 encoded bytes +// +// If the requested information wasn't stored in the index, it will be computed +// on the fly if possible. +// Additionally, term vectors could be computed for documents not even existing +// in the index, but instead provided by the user. +// +// > warn +// > Start and end offsets assume UTF-16 encoding is being used. If you want to +// use these offsets in order to get the original text that produced this token, +// you should make sure that the string you are taking a sub-string of is also +// encoded using UTF-16. +// +// **Behaviour** +// +// The term and field statistics are not accurate. +// Deleted documents are not taken into account. +// The information is only retrieved for the shard the requested document +// resides in. +// The term and field statistics are therefore only useful as relative measures +// whereas the absolute numbers have no meaning in this context. +// By default, when requesting term vectors of artificial documents, a shard to +// get the statistics from is randomly selected. +// Use `routing` only to hit a particular shard. +// Refer to the linked documentation for detailed examples of how to use this +// API. +package termvectors + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +const ( + indexMask = iota + 1 + + idMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Termvectors struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewTermvectors type alias for index. +type NewTermvectors func(index string) *Termvectors + +// NewTermvectorsFunc returns a new instance of Termvectors with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewTermvectorsFunc(tp elastictransport.Interface) NewTermvectors { + return func(index string) *Termvectors { + n := New(tp) + + n._index(index) + + return n + } +} + +// Get term vector information. +// +// Get information and statistics about terms in the fields of a particular +// document. +// +// You can retrieve term vectors for documents stored in the index or for +// artificial documents passed in the body of the request. +// You can specify the fields you are interested in through the `fields` +// parameter or by adding the fields to the request body. +// For example: +// +// ``` +// GET /my-index-000001/_termvectors/1?fields=message +// ``` +// +// Fields can be specified using wildcards, similar to the multi match query. +// +// Term vectors are real-time by default, not near real-time. +// This can be changed by setting `realtime` parameter to `false`. +// +// You can request three types of values: _term information_, _term statistics_, +// and _field statistics_. +// By default, all term information and field statistics are returned for all +// fields but term statistics are excluded. +// +// **Term information** +// +// * term frequency in the field (always returned) +// * term positions (`positions: true`) +// * start and end offsets (`offsets: true`) +// * term payloads (`payloads: true`), as base64 encoded bytes +// +// If the requested information wasn't stored in the index, it will be computed +// on the fly if possible. +// Additionally, term vectors could be computed for documents not even existing +// in the index, but instead provided by the user. +// +// > warn +// > Start and end offsets assume UTF-16 encoding is being used. If you want to +// use these offsets in order to get the original text that produced this token, +// you should make sure that the string you are taking a sub-string of is also +// encoded using UTF-16. +// +// **Behaviour** +// +// The term and field statistics are not accurate. +// Deleted documents are not taken into account. +// The information is only retrieved for the shard the requested document +// resides in. +// The term and field statistics are therefore only useful as relative measures +// whereas the absolute numbers have no meaning in this context. +// By default, when requesting term vectors of artificial documents, a shard to +// get the statistics from is randomly selected. +// Use `routing` only to hit a particular shard. +// Refer to the linked documentation for detailed examples of how to use this +// API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors +func New(tp elastictransport.Interface) *Termvectors { + r := &Termvectors{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Termvectors) Raw(raw io.Reader) *Termvectors { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Termvectors) Request(req *Request) *Termvectors { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Termvectors) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Termvectors: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|idMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_termvectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_termvectors") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Termvectors) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "termvectors") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "termvectors") + if reader := instrument.RecordRequestBody(ctx, "termvectors", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "termvectors") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Termvectors query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a termvectors.Response +func (r Termvectors) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "termvectors") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Termvectors headers map. +func (r *Termvectors) Header(key, value string) *Termvectors { + r.headers.Set(key, value) + + return r +} + +// Index The name of the index that contains the document. +// API Name: index +func (r *Termvectors) _index(index string) *Termvectors { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Id A unique identifier for the document. +// API Name: id +func (r *Termvectors) Id(id string) *Termvectors { + r.paramSet |= idMask + r.id = id + + return r +} + +// Preference The node or shard the operation should be performed on. +// It is random by default. +// API name: preference +func (r *Termvectors) Preference(preference string) *Termvectors { + r.values.Set("preference", preference) + + return r +} + +// Realtime If true, the request is real-time as opposed to near-real-time. +// API name: realtime +func (r *Termvectors) Realtime(realtime bool) *Termvectors { + r.values.Set("realtime", strconv.FormatBool(realtime)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Termvectors) ErrorTrace(errortrace bool) *Termvectors { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Termvectors) FilterPath(filterpaths ...string) *Termvectors { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Termvectors) Human(human bool) *Termvectors { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Termvectors) Pretty(pretty bool) *Termvectors { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// An artificial document (a document not present in the index) for which you +// want to retrieve term vectors. +// API name: doc +func (r *Termvectors) Doc(doc any) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + switch casted := doc.(type) { + case json.RawMessage: + r.req.Doc = casted + default: + r.deferred = append(r.deferred, func(request *Request) error { + data, err := json.Marshal(doc) + if err != nil { + return err + } + r.req.Doc = data + return nil + }) + } + return r +} + +// If `true`, the response includes: +// +// * The document count (how many documents contain this field). +// * The sum of document frequencies (the sum of document frequencies for all +// terms in this field). +// * The sum of total term frequencies (the sum of total term frequencies of +// each term in this field). +// API name: field_statistics +func (r *Termvectors) FieldStatistics(fieldstatistics bool) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FieldStatistics = &fieldstatistics + + return r +} + +// A list of fields to include in the statistics. +// It is used as the default list unless a specific field list is provided in +// the `completion_fields` or `fielddata_fields` parameters. +// API name: fields +func (r *Termvectors) Fields(fields ...string) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Fields = fields + + return r +} + +// Filter terms based on their tf-idf scores. +// This could be useful in order find out a good characteristic vector of a +// document. +// This feature works in a similar manner to the second phase of the More Like +// This Query. +// API name: filter +func (r *Termvectors) Filter(filter types.TermVectorsFilterVariant) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Filter = filter.TermVectorsFilterCaster() + + return r +} + +// If `true`, the response includes term offsets. +// API name: offsets +func (r *Termvectors) Offsets(offsets bool) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Offsets = &offsets + + return r +} + +// If `true`, the response includes term payloads. +// API name: payloads +func (r *Termvectors) Payloads(payloads bool) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Payloads = &payloads + + return r +} + +// Override the default per-field analyzer. +// This is useful in order to generate term vectors in any fashion, especially +// when using artificial documents. +// When providing an analyzer for a field that already stores term vectors, the +// term vectors will be regenerated. +// API name: per_field_analyzer +func (r *Termvectors) PerFieldAnalyzer(perfieldanalyzer map[string]string) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.PerFieldAnalyzer = perfieldanalyzer + return r +} + +func (r *Termvectors) AddPerFieldAnalyzer(key string, value string) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]string + if r.req.PerFieldAnalyzer == nil { + r.req.PerFieldAnalyzer = make(map[string]string) + } else { + tmp = r.req.PerFieldAnalyzer + } + + tmp[key] = value + + r.req.PerFieldAnalyzer = tmp + return r +} + +// If `true`, the response includes term positions. +// API name: positions +func (r *Termvectors) Positions(positions bool) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Positions = &positions + + return r +} + +// A custom value that is used to route operations to a specific shard. +// API name: routing +func (r *Termvectors) Routing(routing string) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Routing = &routing + + return r +} + +// If `true`, the response includes: +// +// * The total term frequency (how often a term occurs in all documents). +// * The document frequency (the number of documents containing the current +// term). +// +// By default these values are not returned since term statistics can have a +// serious performance impact. +// API name: term_statistics +func (r *Termvectors) TermStatistics(termstatistics bool) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TermStatistics = &termstatistics + + return r +} + +// If `true`, returns the document version as part of a hit. +// API name: version +func (r *Termvectors) Version(versionnumber int64) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Version = &versionnumber + + return r +} + +// The version type. +// API name: version_type +func (r *Termvectors) VersionType(versiontype versiontype.VersionType) *Termvectors { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.VersionType = &versiontype + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/update/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/update/request.go new file mode 100644 index 000000000..b4e29cfe6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/update/request.go @@ -0,0 +1,191 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package update + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package update +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/update/UpdateRequest.ts#L38-L196 +type Request struct { + + // DetectNoop If `true`, the `result` in the response is set to `noop` (no operation) when + // there are no changes to the document. + DetectNoop *bool `json:"detect_noop,omitempty"` + // Doc A partial update to an existing document. + // If both `doc` and `script` are specified, `doc` is ignored. + Doc json.RawMessage `json:"doc,omitempty"` + // DocAsUpsert If `true`, use the contents of 'doc' as the value of 'upsert'. + // NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. + DocAsUpsert *bool `json:"doc_as_upsert,omitempty"` + // Script The script to run to update the document. + Script *types.Script `json:"script,omitempty"` + // ScriptedUpsert If `true`, run the script whether or not the document exists. + ScriptedUpsert *bool `json:"scripted_upsert,omitempty"` + // Source_ If `false`, turn off source retrieval. + // You can also specify a comma-separated list of the fields you want to + // retrieve. + Source_ types.SourceConfig `json:"_source,omitempty"` + // Upsert If the document does not already exist, the contents of 'upsert' are inserted + // as a new document. + // If the document exists, the 'script' is run. + Upsert json.RawMessage `json:"upsert,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Update request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "detect_noop": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DetectNoop", err) + } + s.DetectNoop = &value + case bool: + s.DetectNoop = &v + } + + case "doc": + if err := dec.Decode(&s.Doc); err != nil { + return fmt.Errorf("%s | %w", "Doc", err) + } + + case "doc_as_upsert": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocAsUpsert", err) + } + s.DocAsUpsert = &value + case bool: + s.DocAsUpsert = &v + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "scripted_upsert": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ScriptedUpsert", err) + } + s.ScriptedUpsert = &value + case bool: + s.ScriptedUpsert = &v + } + + case "_source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source__field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source_", err) + } + + switch t { + + case "exclude_vectors", "excludes", "includes": + o := types.NewSourceFilter() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + s.Source_ = o + break source__field + + } + } + if s.Source_ == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + } + + case "upsert": + if err := dec.Decode(&s.Upsert); err != nil { + return fmt.Errorf("%s | %w", "Upsert", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/update/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/update/response.go new file mode 100644 index 000000000..c39ede28b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/update/response.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package update + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package update +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/update/UpdateResponse.ts#L27-L30 +type Response struct { + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + Get *types.InlineGet `json:"get,omitempty"` + // Id_ The unique identifier for the added document. + Id_ string `json:"_id"` + // Index_ The name of the index the document was added to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result The result of the indexing operation: `created` or `updated`. + Result result.Result `json:"result"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Information about the replication process of the operation. + Shards_ types.ShardStatistics `json:"_shards"` + // Version_ The document version, which is incremented each time the document is updated. + Version_ int64 `json:"_version"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/update/update.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/update/update.go new file mode 100644 index 000000000..06e86cf95 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/update/update.go @@ -0,0 +1,656 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update a document. +// +// Update a document by running a script or passing a partial document. +// +// If the Elasticsearch security features are enabled, you must have the `index` +// or `write` index privilege for the target index or index alias. +// +// The script can update, delete, or skip modifying the document. +// The API also supports passing a partial document, which is merged into the +// existing document. +// To fully replace an existing document, use the index API. +// This operation: +// +// * Gets the document (collocated with the shard) from the index. +// * Runs the specified script. +// * Indexes the result. +// +// The document must still be reindexed, but using this API removes some network +// roundtrips and reduces chances of version conflicts between the GET and the +// index operation. +// +// The `_source` field must be enabled to use this API. +// In addition to `_source`, you can access the following variables through the +// `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the +// current timestamp). +// For usage examples such as partial updates, upserts, and scripted updates, +// see the External documentation. +package update + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + idMask = iota + 1 + + indexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Update struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdate type alias for index. +type NewUpdate func(index, id string) *Update + +// NewUpdateFunc returns a new instance of Update with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateFunc(tp elastictransport.Interface) NewUpdate { + return func(index, id string) *Update { + n := New(tp) + + n._id(id) + + n._index(index) + + return n + } +} + +// Update a document. +// +// Update a document by running a script or passing a partial document. +// +// If the Elasticsearch security features are enabled, you must have the `index` +// or `write` index privilege for the target index or index alias. +// +// The script can update, delete, or skip modifying the document. +// The API also supports passing a partial document, which is merged into the +// existing document. +// To fully replace an existing document, use the index API. +// This operation: +// +// * Gets the document (collocated with the shard) from the index. +// * Runs the specified script. +// * Indexes the result. +// +// The document must still be reindexed, but using this API removes some network +// roundtrips and reduces chances of version conflicts between the GET and the +// index operation. +// +// The `_source` field must be enabled to use this API. +// In addition to `_source`, you can access the following variables through the +// `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the +// current timestamp). +// For usage examples such as partial updates, upserts, and scripted updates, +// see the External documentation. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update +func New(tp elastictransport.Interface) *Update { + r := &Update{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Update) Raw(raw io.Reader) *Update { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Update) Request(req *Request) *Update { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Update) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Update: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|idMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_update") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Update) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "update") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "update") + if reader := instrument.RecordRequestBody(ctx, "update", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "update") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Update query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a update.Response +func (r Update) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "update") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Update headers map. +func (r *Update) Header(key, value string) *Update { + r.headers.Set(key, value) + + return r +} + +// Id A unique identifier for the document to be updated. +// API Name: id +func (r *Update) _id(id string) *Update { + r.paramSet |= idMask + r.id = id + + return r +} + +// Index The name of the target index. +// By default, the index is created automatically if it doesn't exist. +// API Name: index +func (r *Update) _index(index string) *Update { + r.paramSet |= indexMask + r.index = index + + return r +} + +// IfPrimaryTerm Only perform the operation if the document has this primary term. +// API name: if_primary_term +func (r *Update) IfPrimaryTerm(ifprimaryterm string) *Update { + r.values.Set("if_primary_term", ifprimaryterm) + + return r +} + +// IfSeqNo Only perform the operation if the document has this sequence number. +// API name: if_seq_no +func (r *Update) IfSeqNo(sequencenumber string) *Update { + r.values.Set("if_seq_no", sequencenumber) + + return r +} + +// IncludeSourceOnError True or false if to include the document source in the error message in case +// of parsing errors. +// API name: include_source_on_error +func (r *Update) IncludeSourceOnError(includesourceonerror bool) *Update { + r.values.Set("include_source_on_error", strconv.FormatBool(includesourceonerror)) + + return r +} + +// Lang The script language. +// API name: lang +func (r *Update) Lang(lang string) *Update { + r.values.Set("lang", lang) + + return r +} + +// Refresh If 'true', Elasticsearch refreshes the affected shards to make this operation +// visible to search. +// If 'wait_for', it waits for a refresh to make this operation visible to +// search. +// If 'false', it does nothing with refreshes. +// API name: refresh +func (r *Update) Refresh(refresh refresh.Refresh) *Update { + r.values.Set("refresh", refresh.String()) + + return r +} + +// RequireAlias If `true`, the destination must be an index alias. +// API name: require_alias +func (r *Update) RequireAlias(requirealias bool) *Update { + r.values.Set("require_alias", strconv.FormatBool(requirealias)) + + return r +} + +// RetryOnConflict The number of times the operation should be retried when a conflict occurs. +// API name: retry_on_conflict +func (r *Update) RetryOnConflict(retryonconflict int) *Update { + r.values.Set("retry_on_conflict", strconv.Itoa(retryonconflict)) + + return r +} + +// Routing A custom value used to route operations to a specific shard. +// API name: routing +func (r *Update) Routing(routing string) *Update { + r.values.Set("routing", routing) + + return r +} + +// Timeout The period to wait for the following operations: dynamic mapping updates and +// waiting for active shards. +// Elasticsearch waits for at least the timeout period before failing. +// The actual wait time could be longer, particularly when multiple waits occur. +// API name: timeout +func (r *Update) Timeout(duration string) *Update { + r.values.Set("timeout", duration) + + return r +} + +// WaitForActiveShards The number of copies of each shard that must be active before proceeding with +// the operation. +// Set to 'all' or any positive integer up to the total number of shards in the +// index (`number_of_replicas`+1). +// The default value of `1` means it waits for each primary shard to be active. +// API name: wait_for_active_shards +func (r *Update) WaitForActiveShards(waitforactiveshards string) *Update { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// SourceExcludes_ The source fields you want to exclude. +// API name: _source_excludes +func (r *Update) SourceExcludes_(fields ...string) *Update { + r.values.Set("_source_excludes", strings.Join(fields, ",")) + + return r +} + +// SourceIncludes_ The source fields you want to retrieve. +// API name: _source_includes +func (r *Update) SourceIncludes_(fields ...string) *Update { + r.values.Set("_source_includes", strings.Join(fields, ",")) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Update) ErrorTrace(errortrace bool) *Update { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Update) FilterPath(filterpaths ...string) *Update { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Update) Human(human bool) *Update { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Update) Pretty(pretty bool) *Update { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// If `true`, the `result` in the response is set to `noop` (no operation) when +// there are no changes to the document. +// API name: detect_noop +func (r *Update) DetectNoop(detectnoop bool) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DetectNoop = &detectnoop + + return r +} + +// A partial update to an existing document. +// If both `doc` and `script` are specified, `doc` is ignored. +// API name: doc +func (r *Update) Doc(doc any) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + switch casted := doc.(type) { + case json.RawMessage: + r.req.Doc = casted + default: + r.deferred = append(r.deferred, func(request *Request) error { + data, err := json.Marshal(doc) + if err != nil { + return err + } + r.req.Doc = data + return nil + }) + } + return r +} + +// If `true`, use the contents of 'doc' as the value of 'upsert'. +// NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. +// API name: doc_as_upsert +func (r *Update) DocAsUpsert(docasupsert bool) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DocAsUpsert = &docasupsert + + return r +} + +// The script to run to update the document. +// API name: script +func (r *Update) Script(script types.ScriptVariant) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Script = script.ScriptCaster() + + return r +} + +// If `true`, run the script whether or not the document exists. +// API name: scripted_upsert +func (r *Update) ScriptedUpsert(scriptedupsert bool) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ScriptedUpsert = &scriptedupsert + + return r +} + +// If `false`, turn off source retrieval. +// You can also specify a comma-separated list of the fields you want to +// retrieve. +// API name: _source +func (r *Update) Source_(sourceconfig types.SourceConfigVariant) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = *sourceconfig.SourceConfigCaster() + + return r +} + +// If the document does not already exist, the contents of 'upsert' are inserted +// as a new document. +// If the document exists, the 'script' is run. +// API name: upsert +func (r *Update) Upsert(upsert any) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + switch casted := upsert.(type) { + case json.RawMessage: + r.req.Upsert = casted + default: + r.deferred = append(r.deferred, func(request *Request) error { + data, err := json.Marshal(upsert) + if err != nil { + return err + } + r.req.Upsert = data + return nil + }) + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyquery/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyquery/request.go new file mode 100644 index 000000000..d6a378991 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyquery/request.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatebyquery + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conflicts" +) + +// Request holds the request body struct for the package updatebyquery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/update_by_query/UpdateByQueryRequest.ts#L37-L349 +type Request struct { + + // Conflicts The preferred behavior when update by query hits version conflicts: `abort` + // or `proceed`. + Conflicts *conflicts.Conflicts `json:"conflicts,omitempty"` + // MaxDocs The maximum number of documents to update. + MaxDocs *int64 `json:"max_docs,omitempty"` + // Query The documents to update using the Query DSL. + Query *types.Query `json:"query,omitempty"` + // Script The script to run to update the document source or metadata when updating. + Script *types.Script `json:"script,omitempty"` + // Slice Slice the request manually using the provided slice ID and total number of + // slices. + Slice *types.SlicedScroll `json:"slice,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatebyquery request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyquery/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyquery/response.go new file mode 100644 index 000000000..a71e18084 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyquery/response.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatebyquery + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package updatebyquery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/update_by_query/UpdateByQueryResponse.ts#L26-L67 +type Response struct { + + // Batches The number of scroll responses pulled back by the update by query. + Batches *int64 `json:"batches,omitempty"` + // Deleted The number of documents that were successfully deleted. + Deleted *int64 `json:"deleted,omitempty"` + // Failures Array of failures if there were any unrecoverable errors during the process. + // If this is non-empty then the request ended because of those failures. + // Update by query is implemented using batches. + // Any failure causes the entire process to end, but all failures in the current + // batch are collected into the array. + // You can use the `conflicts` option to prevent reindex from ending when + // version conflicts occur. + Failures []types.BulkIndexByScrollFailure `json:"failures,omitempty"` + // Noops The number of documents that were ignored because the script used for the + // update by query returned a noop value for `ctx.op`. + Noops *int64 `json:"noops,omitempty"` + // RequestsPerSecond The number of requests per second effectively run during the update by query. + RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` + // Retries The number of retries attempted by update by query. + // `bulk` is the number of bulk actions retried. + // `search` is the number of search actions retried. + Retries *types.Retries `json:"retries,omitempty"` + Task *string `json:"task,omitempty"` + Throttled types.Duration `json:"throttled,omitempty"` + // ThrottledMillis The number of milliseconds the request slept to conform to + // `requests_per_second`. + ThrottledMillis *int64 `json:"throttled_millis,omitempty"` + ThrottledUntil types.Duration `json:"throttled_until,omitempty"` + // ThrottledUntilMillis This field should always be equal to zero in an _update_by_query response. + // It only has meaning when using the task API, where it indicates the next time + // (in milliseconds since epoch) a throttled request will be run again in order + // to conform to `requests_per_second`. + ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` + // TimedOut If true, some requests timed out during the update by query. + TimedOut *bool `json:"timed_out,omitempty"` + // Took The number of milliseconds from start to end of the whole operation. + Took *int64 `json:"took,omitempty"` + // Total The number of documents that were successfully processed. + Total *int64 `json:"total,omitempty"` + // Updated The number of documents that were successfully updated. + Updated *int64 `json:"updated,omitempty"` + // VersionConflicts The number of version conflicts that the update by query hit. + VersionConflicts *int64 `json:"version_conflicts,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyquery/update_by_query.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyquery/update_by_query.go new file mode 100644 index 000000000..132fb05ca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyquery/update_by_query.go @@ -0,0 +1,1027 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update documents. +// Updates documents that match the specified query. +// If no query is specified, performs an update on every document in the data +// stream or index without modifying the source, which is useful for picking up +// mapping changes. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `index` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// +// When you submit an update by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and updates +// matching documents using internal versioning. +// When the versions match, the document is updated and the version number is +// incremented. +// If a document changes between the time that the snapshot is taken and the +// update operation is processed, it results in a version conflict and the +// operation fails. +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts, the operation could attempt +// to update more documents from the source than `max_docs` until it has +// successfully updated `max_docs` documents or it has gone through every +// document in the source query. +// +// NOTE: Documents with a version equal to 0 cannot be updated using update by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing an update by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents. +// A bulk update request is performed for each batch of matching documents. +// Any query or update failures cause the update by query request to fail and +// the failures are shown in the response. +// Any update requests that completed successfully still stick, they are not +// rolled back. +// +// **Refreshing shards** +// +// Specifying the `refresh` parameter refreshes all shards once the request +// completes. +// This is different to the update API's `refresh` parameter, which causes only +// the shard +// that received the request to be refreshed. Unlike the update API, it does not +// support +// `wait_for`. +// +// **Running update by query asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch +// performs some preflight checks, launches the request, and returns a +// [task](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) +// you can use to cancel or get the status of the task. +// Elasticsearch creates a record of this task as a document at +// `.tasks/task/${taskId}`. +// +// **Waiting for active shards** +// +// `wait_for_active_shards` controls how many copies of a shard must be active +// before proceeding with the request. See +// [`wait_for_active_shards`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards) +// for details. `timeout` controls how long each write request waits for +// unavailable +// shards to become available. Both work exactly the way they work in the +// [Bulk +// API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk). +// Update by query uses scrolled searches, so you can also +// specify the `scroll` parameter to control how long it keeps the search +// context +// alive, for example `?scroll=10m`. The default is 5 minutes. +// +// **Throttling update requests** +// +// To control the rate at which update by query issues batches of update +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to turn off throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is 1000, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single _bulk request, large batch sizes cause +// Elasticsearch to create many requests and wait before starting the next set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Update by query supports sliced scroll to parallelize the update process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` chooses a reasonable number for most data streams +// and indices. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// +// Adding `slices` to `_update_by_query` just automates the manual process of +// creating sub-requests, which means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with `slices` only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with slices will cancel each sub-request. +// * Due to the nature of slices each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// slices are distributed proportionally to each sub-request. Combine that with +// the point above about distribution being uneven and you should conclude that +// using `max_docs` with `slices` might not result in exactly `max_docs` +// documents being updated. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many slices hurts +// performance. Setting slices higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Update performance scales linearly across available resources with the +// number of slices. +// +// Whether query or update performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// Refer to the linked documentation for examples of how to update documents +// using the `_update_by_query` API: +package updatebyquery + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conflicts" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateByQuery struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateByQuery type alias for index. +type NewUpdateByQuery func(index string) *UpdateByQuery + +// NewUpdateByQueryFunc returns a new instance of UpdateByQuery with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateByQueryFunc(tp elastictransport.Interface) NewUpdateByQuery { + return func(index string) *UpdateByQuery { + n := New(tp) + + n._index(index) + + return n + } +} + +// Update documents. +// Updates documents that match the specified query. +// If no query is specified, performs an update on every document in the data +// stream or index without modifying the source, which is useful for picking up +// mapping changes. +// +// If the Elasticsearch security features are enabled, you must have the +// following index privileges for the target data stream, index, or alias: +// +// * `read` +// * `index` or `write` +// +// You can specify the query criteria in the request URI or the request body +// using the same syntax as the search API. +// +// When you submit an update by query request, Elasticsearch gets a snapshot of +// the data stream or index when it begins processing the request and updates +// matching documents using internal versioning. +// When the versions match, the document is updated and the version number is +// incremented. +// If a document changes between the time that the snapshot is taken and the +// update operation is processed, it results in a version conflict and the +// operation fails. +// You can opt to count version conflicts instead of halting and returning by +// setting `conflicts` to `proceed`. +// Note that if you opt to count version conflicts, the operation could attempt +// to update more documents from the source than `max_docs` until it has +// successfully updated `max_docs` documents or it has gone through every +// document in the source query. +// +// NOTE: Documents with a version equal to 0 cannot be updated using update by +// query because internal versioning does not support 0 as a valid version +// number. +// +// While processing an update by query request, Elasticsearch performs multiple +// search requests sequentially to find all of the matching documents. +// A bulk update request is performed for each batch of matching documents. +// Any query or update failures cause the update by query request to fail and +// the failures are shown in the response. +// Any update requests that completed successfully still stick, they are not +// rolled back. +// +// **Refreshing shards** +// +// Specifying the `refresh` parameter refreshes all shards once the request +// completes. +// This is different to the update API's `refresh` parameter, which causes only +// the shard +// that received the request to be refreshed. Unlike the update API, it does not +// support +// `wait_for`. +// +// **Running update by query asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch +// performs some preflight checks, launches the request, and returns a +// [task](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) +// you can use to cancel or get the status of the task. +// Elasticsearch creates a record of this task as a document at +// `.tasks/task/${taskId}`. +// +// **Waiting for active shards** +// +// `wait_for_active_shards` controls how many copies of a shard must be active +// before proceeding with the request. See +// [`wait_for_active_shards`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards) +// for details. `timeout` controls how long each write request waits for +// unavailable +// shards to become available. Both work exactly the way they work in the +// [Bulk +// API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk). +// Update by query uses scrolled searches, so you can also +// specify the `scroll` parameter to control how long it keeps the search +// context +// alive, for example `?scroll=10m`. The default is 5 minutes. +// +// **Throttling update requests** +// +// To control the rate at which update by query issues batches of update +// operations, you can set `requests_per_second` to any positive decimal number. +// This pads each batch with a wait time to throttle the rate. +// Set `requests_per_second` to `-1` to turn off throttling. +// +// Throttling uses a wait time between batches so that the internal scroll +// requests can be given a timeout that takes the request padding into account. +// The padding time is the difference between the batch size divided by the +// `requests_per_second` and the time spent writing. +// By default the batch size is 1000, so if `requests_per_second` is set to +// `500`: +// +// ``` +// target_time = 1000 / 500 per second = 2 seconds +// wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +// ``` +// +// Since the batch is issued as a single _bulk request, large batch sizes cause +// Elasticsearch to create many requests and wait before starting the next set. +// This is "bursty" instead of "smooth". +// +// **Slicing** +// +// Update by query supports sliced scroll to parallelize the update process. +// This can improve efficiency and provide a convenient way to break the request +// down into smaller parts. +// +// Setting `slices` to `auto` chooses a reasonable number for most data streams +// and indices. +// This setting will use one slice per shard, up to a certain limit. +// If there are multiple source data streams or indices, it will choose the +// number of slices based on the index or backing index with the smallest number +// of shards. +// +// Adding `slices` to `_update_by_query` just automates the manual process of +// creating sub-requests, which means it has some quirks: +// +// * You can see these requests in the tasks APIs. These sub-requests are +// "child" tasks of the task for the request with slices. +// * Fetching the status of the task for the request with `slices` only contains +// the status of completed slices. +// * These sub-requests are individually addressable for things like +// cancellation and rethrottling. +// * Rethrottling the request with `slices` will rethrottle the unfinished +// sub-request proportionally. +// * Canceling the request with slices will cancel each sub-request. +// * Due to the nature of slices each sub-request won't get a perfectly even +// portion of the documents. All documents will be addressed, but some slices +// may be larger than others. Expect larger slices to have a more even +// distribution. +// * Parameters like `requests_per_second` and `max_docs` on a request with +// slices are distributed proportionally to each sub-request. Combine that with +// the point above about distribution being uneven and you should conclude that +// using `max_docs` with `slices` might not result in exactly `max_docs` +// documents being updated. +// * Each sub-request gets a slightly different snapshot of the source data +// stream or index though these are all taken at approximately the same time. +// +// If you're slicing manually or otherwise tuning automatic slicing, keep in +// mind that: +// +// * Query performance is most efficient when the number of slices is equal to +// the number of shards in the index or backing index. If that number is large +// (for example, 500), choose a lower number as too many slices hurts +// performance. Setting slices higher than the number of shards generally does +// not improve efficiency and adds overhead. +// * Update performance scales linearly across available resources with the +// number of slices. +// +// Whether query or update performance dominates the runtime depends on the +// documents being reindexed and cluster resources. +// Refer to the linked documentation for examples of how to update documents +// using the `_update_by_query` API: +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query +func New(tp elastictransport.Interface) *UpdateByQuery { + r := &UpdateByQuery{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateByQuery) Raw(raw io.Reader) *UpdateByQuery { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateByQuery) Request(req *Request) *UpdateByQuery { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateByQuery) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateByQuery: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_update_by_query") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateByQuery) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "update_by_query") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "update_by_query") + if reader := instrument.RecordRequestBody(ctx, "update_by_query", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "update_by_query") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateByQuery query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatebyquery.Response +func (r UpdateByQuery) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "update_by_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateByQuery headers map. +func (r *UpdateByQuery) Header(key, value string) *UpdateByQuery { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases to search. +// It supports wildcards (`*`). +// To search all data streams or indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *UpdateByQuery) _index(index string) *UpdateByQuery { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. +// API name: allow_no_indices +func (r *UpdateByQuery) AllowNoIndices(allownoindices bool) *UpdateByQuery { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// Analyzer The analyzer to use for the query string. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: analyzer +func (r *UpdateByQuery) Analyzer(analyzer string) *UpdateByQuery { + r.values.Set("analyzer", analyzer) + + return r +} + +// AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: analyze_wildcard +func (r *UpdateByQuery) AnalyzeWildcard(analyzewildcard bool) *UpdateByQuery { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) + + return r +} + +// DefaultOperator The default operator for query string query: `AND` or `OR`. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: default_operator +func (r *UpdateByQuery) DefaultOperator(defaultoperator operator.Operator) *UpdateByQuery { + r.values.Set("default_operator", defaultoperator.String()) + + return r +} + +// Df The field to use as default where no field prefix is given in the query +// string. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: df +func (r *UpdateByQuery) Df(df string) *UpdateByQuery { + r.values.Set("df", df) + + return r +} + +// ExpandWildcards The type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// It supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *UpdateByQuery) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *UpdateByQuery { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// From Skips the specified number of documents. +// API name: from +func (r *UpdateByQuery) From(from string) *UpdateByQuery { + r.values.Set("from", from) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *UpdateByQuery) IgnoreUnavailable(ignoreunavailable bool) *UpdateByQuery { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Lenient If `true`, format-based query failures (such as providing text to a numeric +// field) in the query string will be ignored. +// This parameter can be used only when the `q` query string parameter is +// specified. +// API name: lenient +func (r *UpdateByQuery) Lenient(lenient bool) *UpdateByQuery { + r.values.Set("lenient", strconv.FormatBool(lenient)) + + return r +} + +// Pipeline The ID of the pipeline to use to preprocess incoming documents. +// If the index has a default ingest pipeline specified, then setting the value +// to `_none` disables the default ingest pipeline for this request. +// If a final pipeline is configured it will always run, regardless of the value +// of this parameter. +// API name: pipeline +func (r *UpdateByQuery) Pipeline(pipeline string) *UpdateByQuery { + r.values.Set("pipeline", pipeline) + + return r +} + +// Preference The node or shard the operation should be performed on. +// It is random by default. +// API name: preference +func (r *UpdateByQuery) Preference(preference string) *UpdateByQuery { + r.values.Set("preference", preference) + + return r +} + +// Q A query in the Lucene query string syntax. +// API name: q +func (r *UpdateByQuery) Q(q string) *UpdateByQuery { + r.values.Set("q", q) + + return r +} + +// Refresh If `true`, Elasticsearch refreshes affected shards to make the operation +// visible to search after the request completes. +// This is different than the update API's `refresh` parameter, which causes +// just the shard that received the request to be refreshed. +// API name: refresh +func (r *UpdateByQuery) Refresh(refresh bool) *UpdateByQuery { + r.values.Set("refresh", strconv.FormatBool(refresh)) + + return r +} + +// RequestCache If `true`, the request cache is used for this request. +// It defaults to the index-level setting. +// API name: request_cache +func (r *UpdateByQuery) RequestCache(requestcache bool) *UpdateByQuery { + r.values.Set("request_cache", strconv.FormatBool(requestcache)) + + return r +} + +// RequestsPerSecond The throttle for this request in sub-requests per second. +// API name: requests_per_second +func (r *UpdateByQuery) RequestsPerSecond(requestspersecond string) *UpdateByQuery { + r.values.Set("requests_per_second", requestspersecond) + + return r +} + +// Routing A custom value used to route operations to a specific shard. +// API name: routing +func (r *UpdateByQuery) Routing(routing string) *UpdateByQuery { + r.values.Set("routing", routing) + + return r +} + +// Scroll The period to retain the search context for scrolling. +// API name: scroll +func (r *UpdateByQuery) Scroll(duration string) *UpdateByQuery { + r.values.Set("scroll", duration) + + return r +} + +// ScrollSize The size of the scroll request that powers the operation. +// API name: scroll_size +func (r *UpdateByQuery) ScrollSize(scrollsize string) *UpdateByQuery { + r.values.Set("scroll_size", scrollsize) + + return r +} + +// SearchTimeout An explicit timeout for each search request. +// By default, there is no timeout. +// API name: search_timeout +func (r *UpdateByQuery) SearchTimeout(duration string) *UpdateByQuery { + r.values.Set("search_timeout", duration) + + return r +} + +// SearchType The type of the search operation. Available options include +// `query_then_fetch` and `dfs_query_then_fetch`. +// API name: search_type +func (r *UpdateByQuery) SearchType(searchtype searchtype.SearchType) *UpdateByQuery { + r.values.Set("search_type", searchtype.String()) + + return r +} + +// Slices The number of slices this task should be divided into. +// API name: slices +func (r *UpdateByQuery) Slices(slices string) *UpdateByQuery { + r.values.Set("slices", slices) + + return r +} + +// Sort A comma-separated list of : pairs. +// API name: sort +func (r *UpdateByQuery) Sort(sorts ...string) *UpdateByQuery { + tmp := []string{} + for _, item := range sorts { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("sort", strings.Join(tmp, ",")) + + return r +} + +// Stats The specific `tag` of the request for logging and statistical purposes. +// API name: stats +func (r *UpdateByQuery) Stats(stats ...string) *UpdateByQuery { + tmp := []string{} + for _, item := range stats { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("stats", strings.Join(tmp, ",")) + + return r +} + +// TerminateAfter The maximum number of documents to collect for each shard. +// If a query reaches this limit, Elasticsearch terminates the query early. +// Elasticsearch collects documents before sorting. +// +// IMPORTANT: Use with caution. +// Elasticsearch applies this parameter to each shard handling the request. +// When possible, let Elasticsearch perform early termination automatically. +// Avoid specifying this parameter for requests that target data streams with +// backing indices across multiple data tiers. +// API name: terminate_after +func (r *UpdateByQuery) TerminateAfter(terminateafter string) *UpdateByQuery { + r.values.Set("terminate_after", terminateafter) + + return r +} + +// Timeout The period each update request waits for the following operations: dynamic +// mapping updates, waiting for active shards. +// By default, it is one minute. +// This guarantees Elasticsearch waits for at least the timeout before failing. +// The actual wait time could be longer, particularly when multiple waits occur. +// API name: timeout +func (r *UpdateByQuery) Timeout(duration string) *UpdateByQuery { + r.values.Set("timeout", duration) + + return r +} + +// Version If `true`, returns the document version as part of a hit. +// API name: version +func (r *UpdateByQuery) Version(version bool) *UpdateByQuery { + r.values.Set("version", strconv.FormatBool(version)) + + return r +} + +// VersionType Should the document increment the version number (internal) on hit or not +// (reindex) +// API name: version_type +func (r *UpdateByQuery) VersionType(versiontype bool) *UpdateByQuery { + r.values.Set("version_type", strconv.FormatBool(versiontype)) + + return r +} + +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). +// The `timeout` parameter controls how long each write request waits for +// unavailable shards to become available. +// Both work exactly the way they work in the bulk API. +// API name: wait_for_active_shards +func (r *UpdateByQuery) WaitForActiveShards(waitforactiveshards string) *UpdateByQuery { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// WaitForCompletion If `true`, the request blocks until the operation is complete. +// If `false`, Elasticsearch performs some preflight checks, launches the +// request, and returns a task ID that you can use to cancel or get the status +// of the task. +// Elasticsearch creates a record of this task as a document at +// `.tasks/task/${taskId}`. +// API name: wait_for_completion +func (r *UpdateByQuery) WaitForCompletion(waitforcompletion bool) *UpdateByQuery { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateByQuery) ErrorTrace(errortrace bool) *UpdateByQuery { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateByQuery) FilterPath(filterpaths ...string) *UpdateByQuery { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateByQuery) Human(human bool) *UpdateByQuery { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateByQuery) Pretty(pretty bool) *UpdateByQuery { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The preferred behavior when update by query hits version conflicts: `abort` +// or `proceed`. +// API name: conflicts +func (r *UpdateByQuery) Conflicts(conflicts conflicts.Conflicts) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Conflicts = &conflicts + return r +} + +// The maximum number of documents to update. +// API name: max_docs +func (r *UpdateByQuery) MaxDocs(maxdocs int64) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxDocs = &maxdocs + + return r +} + +// The documents to update using the Query DSL. +// API name: query +func (r *UpdateByQuery) Query(query types.QueryVariant) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} + +// The script to run to update the document source or metadata when updating. +// API name: script +func (r *UpdateByQuery) Script(script types.ScriptVariant) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Script = script.ScriptCaster() + + return r +} + +// Slice the request manually using the provided slice ID and total number of +// slices. +// API name: slice +func (r *UpdateByQuery) Slice(slice types.SlicedScrollVariant) *UpdateByQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Slice = slice.SlicedScrollCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyqueryrethrottle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyqueryrethrottle/response.go new file mode 100644 index 000000000..c2bf9ddfa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyqueryrethrottle/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatebyqueryrethrottle + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package updatebyqueryrethrottle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/update_by_query_rethrottle/UpdateByQueryRethrottleResponse.ts#L23-L25 +type Response struct { + Nodes map[string]types.UpdateByQueryRethrottleNode `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.UpdateByQueryRethrottleNode, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go new file mode 100644 index 000000000..df381bbe7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go @@ -0,0 +1,365 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Throttle an update by query operation. +// +// Change the number of requests per second for a particular update by query +// operation. +// Rethrottling that speeds up the query takes effect immediately but +// rethrotting that slows down the query takes effect after completing the +// current batch to prevent scroll timeouts. +package updatebyqueryrethrottle + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + taskidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateByQueryRethrottle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + taskid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateByQueryRethrottle type alias for index. +type NewUpdateByQueryRethrottle func(taskid string) *UpdateByQueryRethrottle + +// NewUpdateByQueryRethrottleFunc returns a new instance of UpdateByQueryRethrottle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateByQueryRethrottleFunc(tp elastictransport.Interface) NewUpdateByQueryRethrottle { + return func(taskid string) *UpdateByQueryRethrottle { + n := New(tp) + + n._taskid(taskid) + + return n + } +} + +// Throttle an update by query operation. +// +// Change the number of requests per second for a particular update by query +// operation. +// Rethrottling that speeds up the query takes effect immediately but +// rethrotting that slows down the query takes effect after completing the +// current batch to prevent scroll timeouts. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle +func New(tp elastictransport.Interface) *UpdateByQueryRethrottle { + r := &UpdateByQueryRethrottle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateByQueryRethrottle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == taskidMask: + path.WriteString("/") + path.WriteString("_update_by_query") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "taskid", r.taskid) + } + path.WriteString(r.taskid) + path.WriteString("/") + path.WriteString("_rethrottle") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateByQueryRethrottle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "update_by_query_rethrottle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "update_by_query_rethrottle") + if reader := instrument.RecordRequestBody(ctx, "update_by_query_rethrottle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "update_by_query_rethrottle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateByQueryRethrottle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatebyqueryrethrottle.Response +func (r UpdateByQueryRethrottle) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "update_by_query_rethrottle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r UpdateByQueryRethrottle) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "update_by_query_rethrottle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the UpdateByQueryRethrottle query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the UpdateByQueryRethrottle headers map. +func (r *UpdateByQueryRethrottle) Header(key, value string) *UpdateByQueryRethrottle { + r.headers.Set(key, value) + + return r +} + +// TaskId The ID for the task. +// API Name: taskid +func (r *UpdateByQueryRethrottle) _taskid(taskid string) *UpdateByQueryRethrottle { + r.paramSet |= taskidMask + r.taskid = taskid + + return r +} + +// RequestsPerSecond The throttle for this request in sub-requests per second. +// To turn off throttling, set it to `-1`. +// API name: requests_per_second +func (r *UpdateByQueryRethrottle) RequestsPerSecond(requestspersecond string) *UpdateByQueryRethrottle { + r.values.Set("requests_per_second", requestspersecond) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateByQueryRethrottle) ErrorTrace(errortrace bool) *UpdateByQueryRethrottle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateByQueryRethrottle) FilterPath(filterpaths ...string) *UpdateByQueryRethrottle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateByQueryRethrottle) Human(human bool) *UpdateByQueryRethrottle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateByQueryRethrottle) Pretty(pretty bool) *UpdateByQueryRethrottle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go new file mode 100644 index 000000000..3c0ec581a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go @@ -0,0 +1,378 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a dangling index. +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. +package deletedanglingindex + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexuuidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteDanglingIndex struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + indexuuid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteDanglingIndex type alias for index. +type NewDeleteDanglingIndex func(indexuuid string) *DeleteDanglingIndex + +// NewDeleteDanglingIndexFunc returns a new instance of DeleteDanglingIndex with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteDanglingIndexFunc(tp elastictransport.Interface) NewDeleteDanglingIndex { + return func(indexuuid string) *DeleteDanglingIndex { + n := New(tp) + + n._indexuuid(indexuuid) + + return n + } +} + +// Delete a dangling index. +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index +func New(tp elastictransport.Interface) *DeleteDanglingIndex { + r := &DeleteDanglingIndex{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteDanglingIndex) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexuuidMask: + path.WriteString("/") + path.WriteString("_dangling") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "indexuuid", r.indexuuid) + } + path.WriteString(r.indexuuid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteDanglingIndex) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "dangling_indices.delete_dangling_index") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "dangling_indices.delete_dangling_index") + if reader := instrument.RecordRequestBody(ctx, "dangling_indices.delete_dangling_index", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "dangling_indices.delete_dangling_index") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteDanglingIndex query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletedanglingindex.Response +func (r DeleteDanglingIndex) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "dangling_indices.delete_dangling_index") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteDanglingIndex) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "dangling_indices.delete_dangling_index") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteDanglingIndex query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteDanglingIndex headers map. +func (r *DeleteDanglingIndex) Header(key, value string) *DeleteDanglingIndex { + r.headers.Set(key, value) + + return r +} + +// IndexUuid The UUID of the index to delete. Use the get dangling indices API to find the +// UUID. +// API Name: indexuuid +func (r *DeleteDanglingIndex) _indexuuid(indexuuid string) *DeleteDanglingIndex { + r.paramSet |= indexuuidMask + r.indexuuid = indexuuid + + return r +} + +// AcceptDataLoss This parameter must be set to true to acknowledge that it will no longer be +// possible to recove data from the dangling index. +// API name: accept_data_loss +func (r *DeleteDanglingIndex) AcceptDataLoss(acceptdataloss bool) *DeleteDanglingIndex { + r.values.Set("accept_data_loss", strconv.FormatBool(acceptdataloss)) + + return r +} + +// MasterTimeout Specify timeout for connection to master +// API name: master_timeout +func (r *DeleteDanglingIndex) MasterTimeout(duration string) *DeleteDanglingIndex { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Explicit operation timeout +// API name: timeout +func (r *DeleteDanglingIndex) Timeout(duration string) *DeleteDanglingIndex { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteDanglingIndex) ErrorTrace(errortrace bool) *DeleteDanglingIndex { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteDanglingIndex) FilterPath(filterpaths ...string) *DeleteDanglingIndex { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteDanglingIndex) Human(human bool) *DeleteDanglingIndex { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteDanglingIndex) Pretty(pretty bool) *DeleteDanglingIndex { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/deletedanglingindex/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/deletedanglingindex/response.go new file mode 100644 index 000000000..4ef1919ec --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/deletedanglingindex/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletedanglingindex + +// Response holds the response body struct for the package deletedanglingindex +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/dangling_indices/delete_dangling_index/DeleteDanglingIndexResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/importdanglingindex/import_dangling_index.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/importdanglingindex/import_dangling_index.go new file mode 100644 index 000000000..2e8b6f016 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/importdanglingindex/import_dangling_index.go @@ -0,0 +1,383 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Import a dangling index. +// +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. +package importdanglingindex + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexuuidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ImportDanglingIndex struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + indexuuid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewImportDanglingIndex type alias for index. +type NewImportDanglingIndex func(indexuuid string) *ImportDanglingIndex + +// NewImportDanglingIndexFunc returns a new instance of ImportDanglingIndex with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewImportDanglingIndexFunc(tp elastictransport.Interface) NewImportDanglingIndex { + return func(indexuuid string) *ImportDanglingIndex { + n := New(tp) + + n._indexuuid(indexuuid) + + return n + } +} + +// Import a dangling index. +// +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index +func New(tp elastictransport.Interface) *ImportDanglingIndex { + r := &ImportDanglingIndex{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ImportDanglingIndex) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexuuidMask: + path.WriteString("/") + path.WriteString("_dangling") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "indexuuid", r.indexuuid) + } + path.WriteString(r.indexuuid) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ImportDanglingIndex) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "dangling_indices.import_dangling_index") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "dangling_indices.import_dangling_index") + if reader := instrument.RecordRequestBody(ctx, "dangling_indices.import_dangling_index", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "dangling_indices.import_dangling_index") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ImportDanglingIndex query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a importdanglingindex.Response +func (r ImportDanglingIndex) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "dangling_indices.import_dangling_index") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ImportDanglingIndex) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "dangling_indices.import_dangling_index") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ImportDanglingIndex query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ImportDanglingIndex headers map. +func (r *ImportDanglingIndex) Header(key, value string) *ImportDanglingIndex { + r.headers.Set(key, value) + + return r +} + +// IndexUuid The UUID of the index to import. Use the get dangling indices API to locate +// the UUID. +// API Name: indexuuid +func (r *ImportDanglingIndex) _indexuuid(indexuuid string) *ImportDanglingIndex { + r.paramSet |= indexuuidMask + r.indexuuid = indexuuid + + return r +} + +// AcceptDataLoss This parameter must be set to true to import a dangling index. +// Because Elasticsearch cannot know where the dangling index data came from or +// determine which shard copies are fresh and which are stale, it cannot +// guarantee that the imported data represents the latest state of the index +// when it was last in the cluster. +// API name: accept_data_loss +func (r *ImportDanglingIndex) AcceptDataLoss(acceptdataloss bool) *ImportDanglingIndex { + r.values.Set("accept_data_loss", strconv.FormatBool(acceptdataloss)) + + return r +} + +// MasterTimeout Specify timeout for connection to master +// API name: master_timeout +func (r *ImportDanglingIndex) MasterTimeout(duration string) *ImportDanglingIndex { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Explicit operation timeout +// API name: timeout +func (r *ImportDanglingIndex) Timeout(duration string) *ImportDanglingIndex { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ImportDanglingIndex) ErrorTrace(errortrace bool) *ImportDanglingIndex { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ImportDanglingIndex) FilterPath(filterpaths ...string) *ImportDanglingIndex { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ImportDanglingIndex) Human(human bool) *ImportDanglingIndex { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ImportDanglingIndex) Pretty(pretty bool) *ImportDanglingIndex { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/importdanglingindex/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/importdanglingindex/response.go new file mode 100644 index 000000000..0825ad959 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/importdanglingindex/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package importdanglingindex + +// Response holds the response body struct for the package importdanglingindex +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/dangling_indices/import_dangling_index/ImportDanglingIndexResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go new file mode 100644 index 000000000..1a8d73245 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go @@ -0,0 +1,335 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the dangling indices. +// +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. +// +// Use this API to list dangling indices, which you can then import or delete. +package listdanglingindices + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ListDanglingIndices struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewListDanglingIndices type alias for index. +type NewListDanglingIndices func() *ListDanglingIndices + +// NewListDanglingIndicesFunc returns a new instance of ListDanglingIndices with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewListDanglingIndicesFunc(tp elastictransport.Interface) NewListDanglingIndices { + return func() *ListDanglingIndices { + n := New(tp) + + return n + } +} + +// Get the dangling indices. +// +// If Elasticsearch encounters index data that is absent from the current +// cluster state, those indices are considered to be dangling. +// For example, this can happen if you delete more than +// `cluster.indices.tombstones.size` indices while an Elasticsearch node is +// offline. +// +// Use this API to list dangling indices, which you can then import or delete. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices +func New(tp elastictransport.Interface) *ListDanglingIndices { + r := &ListDanglingIndices{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ListDanglingIndices) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_dangling") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ListDanglingIndices) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "dangling_indices.list_dangling_indices") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "dangling_indices.list_dangling_indices") + if reader := instrument.RecordRequestBody(ctx, "dangling_indices.list_dangling_indices", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "dangling_indices.list_dangling_indices") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ListDanglingIndices query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a listdanglingindices.Response +func (r ListDanglingIndices) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "dangling_indices.list_dangling_indices") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ListDanglingIndices) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "dangling_indices.list_dangling_indices") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ListDanglingIndices query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ListDanglingIndices headers map. +func (r *ListDanglingIndices) Header(key, value string) *ListDanglingIndices { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ListDanglingIndices) ErrorTrace(errortrace bool) *ListDanglingIndices { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ListDanglingIndices) FilterPath(filterpaths ...string) *ListDanglingIndices { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ListDanglingIndices) Human(human bool) *ListDanglingIndices { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ListDanglingIndices) Pretty(pretty bool) *ListDanglingIndices { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/listdanglingindices/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/listdanglingindices/response.go new file mode 100644 index 000000000..5644daa1e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/listdanglingindices/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package listdanglingindices + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package listdanglingindices +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L23-L27 +type Response struct { + DanglingIndices []types.DanglingIndex `json:"dangling_indices"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/deletepolicy/delete_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/deletepolicy/delete_policy.go new file mode 100644 index 000000000..9d9b9d512 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/deletepolicy/delete_policy.go @@ -0,0 +1,354 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete an enrich policy. +// Deletes an existing enrich policy and its enrich index. +package deletepolicy + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeletePolicy struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeletePolicy type alias for index. +type NewDeletePolicy func(name string) *DeletePolicy + +// NewDeletePolicyFunc returns a new instance of DeletePolicy with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeletePolicyFunc(tp elastictransport.Interface) NewDeletePolicy { + return func(name string) *DeletePolicy { + n := New(tp) + + n._name(name) + + return n + } +} + +// Delete an enrich policy. +// Deletes an existing enrich policy and its enrich index. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy +func New(tp elastictransport.Interface) *DeletePolicy { + r := &DeletePolicy{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeletePolicy) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_enrich") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeletePolicy) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "enrich.delete_policy") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "enrich.delete_policy") + if reader := instrument.RecordRequestBody(ctx, "enrich.delete_policy", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "enrich.delete_policy") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeletePolicy query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletepolicy.Response +func (r DeletePolicy) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "enrich.delete_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeletePolicy) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "enrich.delete_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeletePolicy query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeletePolicy headers map. +func (r *DeletePolicy) Header(key, value string) *DeletePolicy { + r.headers.Set(key, value) + + return r +} + +// Name Enrich policy to delete. +// API Name: name +func (r *DeletePolicy) _name(name string) *DeletePolicy { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *DeletePolicy) MasterTimeout(duration string) *DeletePolicy { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeletePolicy) ErrorTrace(errortrace bool) *DeletePolicy { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeletePolicy) FilterPath(filterpaths ...string) *DeletePolicy { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeletePolicy) Human(human bool) *DeletePolicy { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeletePolicy) Pretty(pretty bool) *DeletePolicy { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/deletepolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/deletepolicy/response.go new file mode 100644 index 000000000..8af6f8460 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/deletepolicy/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletepolicy + +// Response holds the response body struct for the package deletepolicy +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/enrich/delete_policy/DeleteEnrichPolicyResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/executepolicy/execute_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/executepolicy/execute_policy.go new file mode 100644 index 000000000..fac69a2c4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/executepolicy/execute_policy.go @@ -0,0 +1,365 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run an enrich policy. +// Create the enrich index for an existing enrich policy. +package executepolicy + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ExecutePolicy struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExecutePolicy type alias for index. +type NewExecutePolicy func(name string) *ExecutePolicy + +// NewExecutePolicyFunc returns a new instance of ExecutePolicy with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExecutePolicyFunc(tp elastictransport.Interface) NewExecutePolicy { + return func(name string) *ExecutePolicy { + n := New(tp) + + n._name(name) + + return n + } +} + +// Run an enrich policy. +// Create the enrich index for an existing enrich policy. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy +func New(tp elastictransport.Interface) *ExecutePolicy { + r := &ExecutePolicy{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ExecutePolicy) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_enrich") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_execute") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ExecutePolicy) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "enrich.execute_policy") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "enrich.execute_policy") + if reader := instrument.RecordRequestBody(ctx, "enrich.execute_policy", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "enrich.execute_policy") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ExecutePolicy query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a executepolicy.Response +func (r ExecutePolicy) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "enrich.execute_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ExecutePolicy) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "enrich.execute_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ExecutePolicy query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ExecutePolicy headers map. +func (r *ExecutePolicy) Header(key, value string) *ExecutePolicy { + r.headers.Set(key, value) + + return r +} + +// Name Enrich policy to execute. +// API Name: name +func (r *ExecutePolicy) _name(name string) *ExecutePolicy { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *ExecutePolicy) MasterTimeout(duration string) *ExecutePolicy { + r.values.Set("master_timeout", duration) + + return r +} + +// WaitForCompletion If `true`, the request blocks other enrich policy execution requests until +// complete. +// API name: wait_for_completion +func (r *ExecutePolicy) WaitForCompletion(waitforcompletion bool) *ExecutePolicy { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ExecutePolicy) ErrorTrace(errortrace bool) *ExecutePolicy { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ExecutePolicy) FilterPath(filterpaths ...string) *ExecutePolicy { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ExecutePolicy) Human(human bool) *ExecutePolicy { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ExecutePolicy) Pretty(pretty bool) *ExecutePolicy { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/executepolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/executepolicy/response.go new file mode 100644 index 000000000..924deea5e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/executepolicy/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package executepolicy + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package executepolicy +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/enrich/execute_policy/ExecuteEnrichPolicyResponse.ts#L23-L28 +type Response struct { + Status *types.ExecuteEnrichPolicyStatus `json:"status,omitempty"` + Task *string `json:"task,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/getpolicy/get_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/getpolicy/get_policy.go new file mode 100644 index 000000000..a5184dc7b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/getpolicy/get_policy.go @@ -0,0 +1,360 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get an enrich policy. +// Returns information about an enrich policy. +package getpolicy + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetPolicy struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetPolicy type alias for index. +type NewGetPolicy func() *GetPolicy + +// NewGetPolicyFunc returns a new instance of GetPolicy with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetPolicyFunc(tp elastictransport.Interface) NewGetPolicy { + return func() *GetPolicy { + n := New(tp) + + return n + } +} + +// Get an enrich policy. +// Returns information about an enrich policy. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy +func New(tp elastictransport.Interface) *GetPolicy { + r := &GetPolicy{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetPolicy) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_enrich") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_enrich") + path.WriteString("/") + path.WriteString("policy") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetPolicy) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "enrich.get_policy") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "enrich.get_policy") + if reader := instrument.RecordRequestBody(ctx, "enrich.get_policy", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "enrich.get_policy") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetPolicy query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getpolicy.Response +func (r GetPolicy) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "enrich.get_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetPolicy) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "enrich.get_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetPolicy query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetPolicy headers map. +func (r *GetPolicy) Header(key, value string) *GetPolicy { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of enrich policy names used to limit the request. +// To return information for all enrich policies, omit this parameter. +// API Name: name +func (r *GetPolicy) Name(name string) *GetPolicy { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *GetPolicy) MasterTimeout(duration string) *GetPolicy { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetPolicy) ErrorTrace(errortrace bool) *GetPolicy { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetPolicy) FilterPath(filterpaths ...string) *GetPolicy { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetPolicy) Human(human bool) *GetPolicy { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetPolicy) Pretty(pretty bool) *GetPolicy { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/getpolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/getpolicy/response.go new file mode 100644 index 000000000..5a658d69f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/getpolicy/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getpolicy + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getpolicy +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/enrich/get_policy/GetEnrichPolicyResponse.ts#L22-L24 +type Response struct { + Policies []types.Summary `json:"policies"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/putpolicy/put_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/putpolicy/put_policy.go new file mode 100644 index 000000000..6434bad36 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/putpolicy/put_policy.go @@ -0,0 +1,408 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an enrich policy. +// Creates an enrich policy. +package putpolicy + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutPolicy struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutPolicy type alias for index. +type NewPutPolicy func(name string) *PutPolicy + +// NewPutPolicyFunc returns a new instance of PutPolicy with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutPolicyFunc(tp elastictransport.Interface) NewPutPolicy { + return func(name string) *PutPolicy { + n := New(tp) + + n._name(name) + + return n + } +} + +// Create an enrich policy. +// Creates an enrich policy. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy +func New(tp elastictransport.Interface) *PutPolicy { + r := &PutPolicy{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutPolicy) Raw(raw io.Reader) *PutPolicy { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutPolicy) Request(req *Request) *PutPolicy { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutPolicy) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutPolicy: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_enrich") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutPolicy) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "enrich.put_policy") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "enrich.put_policy") + if reader := instrument.RecordRequestBody(ctx, "enrich.put_policy", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "enrich.put_policy") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutPolicy query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putpolicy.Response +func (r PutPolicy) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "enrich.put_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutPolicy headers map. +func (r *PutPolicy) Header(key, value string) *PutPolicy { + r.headers.Set(key, value) + + return r +} + +// Name Name of the enrich policy to create or update. +// API Name: name +func (r *PutPolicy) _name(name string) *PutPolicy { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PutPolicy) MasterTimeout(duration string) *PutPolicy { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutPolicy) ErrorTrace(errortrace bool) *PutPolicy { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutPolicy) FilterPath(filterpaths ...string) *PutPolicy { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutPolicy) Human(human bool) *PutPolicy { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutPolicy) Pretty(pretty bool) *PutPolicy { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Matches enrich data to incoming documents based on a `geo_shape` query. +// API name: geo_match +func (r *PutPolicy) GeoMatch(geomatch types.EnrichPolicyVariant) *PutPolicy { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.GeoMatch = geomatch.EnrichPolicyCaster() + + return r +} + +// Matches enrich data to incoming documents based on a `term` query. +// API name: match +func (r *PutPolicy) Match(match types.EnrichPolicyVariant) *PutPolicy { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Match = match.EnrichPolicyCaster() + + return r +} + +// Matches a number, date, or IP address in incoming documents to a range in the +// enrich index based on a `term` query. +// API name: range +func (r *PutPolicy) Range(range_ types.EnrichPolicyVariant) *PutPolicy { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Range = range_.EnrichPolicyCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/putpolicy/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/putpolicy/request.go new file mode 100644 index 000000000..8e7b93b7a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/putpolicy/request.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putpolicy + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putpolicy +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/enrich/put_policy/PutEnrichPolicyRequest.ts#L25-L67 +type Request struct { + + // GeoMatch Matches enrich data to incoming documents based on a `geo_shape` query. + GeoMatch *types.EnrichPolicy `json:"geo_match,omitempty"` + // Match Matches enrich data to incoming documents based on a `term` query. + Match *types.EnrichPolicy `json:"match,omitempty"` + // Range Matches a number, date, or IP address in incoming documents to a range in the + // enrich index based on a `term` query. + Range *types.EnrichPolicy `json:"range,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putpolicy request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/putpolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/putpolicy/response.go new file mode 100644 index 000000000..5dc236831 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/putpolicy/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putpolicy + +// Response holds the response body struct for the package putpolicy +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/enrich/put_policy/PutEnrichPolicyResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/stats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/stats/response.go new file mode 100644 index 000000000..82fa73776 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/stats/response.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package stats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/enrich/stats/EnrichStatsResponse.ts#L22-L39 +type Response struct { + + // CacheStats Objects containing information about the enrich cache stats on each ingest + // node. + CacheStats []types.CacheStats `json:"cache_stats,omitempty"` + // CoordinatorStats Objects containing information about each coordinating ingest node for + // configured enrich processors. + CoordinatorStats []types.CoordinatorStats `json:"coordinator_stats"` + // ExecutingPolicies Objects containing information about each enrich policy that is currently + // executing. + ExecutingPolicies []types.ExecutingPolicy `json:"executing_policies"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/stats/stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/stats/stats.go new file mode 100644 index 000000000..04033c218 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/enrich/stats/stats.go @@ -0,0 +1,333 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get enrich stats. +// Returns enrich coordinator statistics and information about enrich policies +// that are currently executing. +package stats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Stats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStats type alias for index. +type NewStats func() *Stats + +// NewStatsFunc returns a new instance of Stats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStatsFunc(tp elastictransport.Interface) NewStats { + return func() *Stats { + n := New(tp) + + return n + } +} + +// Get enrich stats. +// Returns enrich coordinator statistics and information about enrich policies +// that are currently executing. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats +func New(tp elastictransport.Interface) *Stats { + r := &Stats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Stats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_enrich") + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Stats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "enrich.stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "enrich.stats") + if reader := instrument.RecordRequestBody(ctx, "enrich.stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "enrich.stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Stats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stats.Response +func (r Stats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "enrich.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Stats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "enrich.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Stats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Stats headers map. +func (r *Stats) Header(key, value string) *Stats { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Stats) MasterTimeout(duration string) *Stats { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Stats) ErrorTrace(errortrace bool) *Stats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Stats) FilterPath(filterpaths ...string) *Stats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Stats) Human(human bool) *Stats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Stats) Pretty(pretty bool) *Stats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/delete/delete.go new file mode 100644 index 000000000..5a4aa6111 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/delete/delete.go @@ -0,0 +1,351 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete an async EQL search. +// Delete an async EQL search or a stored synchronous EQL search. +// The API also deletes results for the search. +package delete + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Delete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDelete type alias for index. +type NewDelete func(id string) *Delete + +// NewDeleteFunc returns a new instance of Delete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteFunc(tp elastictransport.Interface) NewDelete { + return func(id string) *Delete { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete an async EQL search. +// Delete an async EQL search or a stored synchronous EQL search. +// The API also deletes results for the search. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete +func New(tp elastictransport.Interface) *Delete { + r := &Delete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Delete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_eql") + path.WriteString("/") + path.WriteString("search") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Delete) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "eql.delete") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "eql.delete") + if reader := instrument.RecordRequestBody(ctx, "eql.delete", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "eql.delete") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Delete query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a delete.Response +func (r Delete) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "eql.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Delete) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "eql.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Delete query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Delete headers map. +func (r *Delete) Header(key, value string) *Delete { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the search to delete. +// A search ID is provided in the EQL search API's response for an async search. +// A search ID is also provided if the request’s `keep_on_completion` parameter +// is `true`. +// API Name: id +func (r *Delete) _id(id string) *Delete { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Delete) ErrorTrace(errortrace bool) *Delete { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Delete) FilterPath(filterpaths ...string) *Delete { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Delete) Human(human bool) *Delete { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Delete) Pretty(pretty bool) *Delete { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/delete/response.go new file mode 100644 index 000000000..277868ffb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/delete/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package delete + +// Response holds the response body struct for the package delete +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/eql/delete/EqlDeleteResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/get/get.go new file mode 100644 index 000000000..f69e975d2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/get/get.go @@ -0,0 +1,367 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get async EQL search results. +// Get the current status and available results for an async EQL search or a +// stored synchronous EQL search. +package get + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Get struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGet type alias for index. +type NewGet func(id string) *Get + +// NewGetFunc returns a new instance of Get with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFunc(tp elastictransport.Interface) NewGet { + return func(id string) *Get { + n := New(tp) + + n._id(id) + + return n + } +} + +// Get async EQL search results. +// Get the current status and available results for an async EQL search or a +// stored synchronous EQL search. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get +func New(tp elastictransport.Interface) *Get { + r := &Get{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Get) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_eql") + path.WriteString("/") + path.WriteString("search") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Get) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "eql.get") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "eql.get") + if reader := instrument.RecordRequestBody(ctx, "eql.get", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "eql.get") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Get query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a get.Response +func (r Get) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "eql.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Get) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "eql.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Get query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Get headers map. +func (r *Get) Header(key, value string) *Get { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the search. +// API Name: id +func (r *Get) _id(id string) *Get { + r.paramSet |= idMask + r.id = id + + return r +} + +// KeepAlive Period for which the search and its results are stored on the cluster. +// Defaults to the keep_alive value set by the search’s EQL search API request. +// API name: keep_alive +func (r *Get) KeepAlive(duration string) *Get { + r.values.Set("keep_alive", duration) + + return r +} + +// WaitForCompletionTimeout Timeout duration to wait for the request to finish. +// Defaults to no timeout, meaning the request waits for complete search +// results. +// API name: wait_for_completion_timeout +func (r *Get) WaitForCompletionTimeout(duration string) *Get { + r.values.Set("wait_for_completion_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Get) ErrorTrace(errortrace bool) *Get { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Get) FilterPath(filterpaths ...string) *Get { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Get) Human(human bool) *Get { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Get) Pretty(pretty bool) *Get { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/get/response.go new file mode 100644 index 000000000..2ac4d159d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/get/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package get + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package get +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/eql/get/EqlGetResponse.ts#L22-L25 +type Response struct { + + // Hits Contains matching events and sequences. Also contains related metadata. + Hits types.EqlHits `json:"hits"` + // Id Identifier for the search. + Id *string `json:"id,omitempty"` + // IsPartial If true, the response does not contain complete search results. + IsPartial *bool `json:"is_partial,omitempty"` + // IsRunning If true, the search request is still executing. + IsRunning *bool `json:"is_running,omitempty"` + // ShardFailures Contains information about shard failures (if any), in case + // allow_partial_search_results=true + ShardFailures []types.ShardFailure `json:"shard_failures,omitempty"` + // TimedOut If true, the request timed out before completion. + TimedOut *bool `json:"timed_out,omitempty"` + // Took Milliseconds it took Elasticsearch to execute the request. + Took *int64 `json:"took,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/getstatus/get_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/getstatus/get_status.go new file mode 100644 index 000000000..af1be6275 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/getstatus/get_status.go @@ -0,0 +1,350 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the async EQL status. +// Get the current status for an async EQL search or a stored synchronous EQL +// search without returning results. +package getstatus + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetStatus struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetStatus type alias for index. +type NewGetStatus func(id string) *GetStatus + +// NewGetStatusFunc returns a new instance of GetStatus with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetStatusFunc(tp elastictransport.Interface) NewGetStatus { + return func(id string) *GetStatus { + n := New(tp) + + n._id(id) + + return n + } +} + +// Get the async EQL status. +// Get the current status for an async EQL search or a stored synchronous EQL +// search without returning results. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status +func New(tp elastictransport.Interface) *GetStatus { + r := &GetStatus{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetStatus) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_eql") + path.WriteString("/") + path.WriteString("search") + path.WriteString("/") + path.WriteString("status") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetStatus) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "eql.get_status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "eql.get_status") + if reader := instrument.RecordRequestBody(ctx, "eql.get_status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "eql.get_status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetStatus query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getstatus.Response +func (r GetStatus) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "eql.get_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetStatus) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "eql.get_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetStatus query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetStatus headers map. +func (r *GetStatus) Header(key, value string) *GetStatus { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the search. +// API Name: id +func (r *GetStatus) _id(id string) *GetStatus { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetStatus) ErrorTrace(errortrace bool) *GetStatus { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetStatus) FilterPath(filterpaths ...string) *GetStatus { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetStatus) Human(human bool) *GetStatus { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetStatus) Pretty(pretty bool) *GetStatus { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/getstatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/getstatus/response.go new file mode 100644 index 000000000..a5ad62927 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/getstatus/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getstatus + +// Response holds the response body struct for the package getstatus +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/eql/get_status/EqlGetStatusResponse.ts#L24-L51 +type Response struct { + + // CompletionStatus For a completed search shows the http status code of the completed search. + CompletionStatus *int `json:"completion_status,omitempty"` + // ExpirationTimeInMillis Shows a timestamp when the eql search will be expired, in milliseconds since + // the Unix epoch. When this time is reached, the search and its results are + // deleted, even if the search is still ongoing. + ExpirationTimeInMillis *int64 `json:"expiration_time_in_millis,omitempty"` + // Id Identifier for the search. + Id string `json:"id"` + // IsPartial If true, the search request is still executing. If false, the search is + // completed. + IsPartial bool `json:"is_partial"` + // IsRunning If true, the response does not contain complete search results. This could be + // because either the search is still running (is_running status is false), or + // because it is already completed (is_running status is true) and results are + // partial due to failures or timeouts. + IsRunning bool `json:"is_running"` + // StartTimeInMillis For a running search shows a timestamp when the eql search started, in + // milliseconds since the Unix epoch. + StartTimeInMillis *int64 `json:"start_time_in_millis,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/search/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/search/request.go new file mode 100644 index 000000000..77702d3f7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/search/request.go @@ -0,0 +1,283 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package search + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/resultposition" +) + +// Request holds the request body struct for the package search +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/eql/search/EqlSearchRequest.ts#L28-L166 +type Request struct { + + // AllowPartialSearchResults Allow query execution also in case of shard failures. + // If true, the query will keep running and will return results based on the + // available shards. + // For sequences, the behavior can be further refined using + // allow_partial_sequence_results + AllowPartialSearchResults *bool `json:"allow_partial_search_results,omitempty"` + // AllowPartialSequenceResults This flag applies only to sequences and has effect only if + // allow_partial_search_results=true. + // If true, the sequence query will return results based on the available + // shards, ignoring the others. + // If false, the sequence query will return successfully, but will always have + // empty results. + AllowPartialSequenceResults *bool `json:"allow_partial_sequence_results,omitempty"` + CaseSensitive *bool `json:"case_sensitive,omitempty"` + // EventCategoryField Field containing the event classification, such as process, file, or network. + EventCategoryField *string `json:"event_category_field,omitempty"` + // FetchSize Maximum number of events to search at a time for sequence queries. + FetchSize *uint `json:"fetch_size,omitempty"` + // Fields Array of wildcard (*) patterns. The response returns values for field names + // matching these patterns in the fields property of each hit. + Fields []types.FieldAndFormat `json:"fields,omitempty"` + // Filter Query, written in Query DSL, used to filter the events on which the EQL query + // runs. + Filter []types.Query `json:"filter,omitempty"` + KeepAlive types.Duration `json:"keep_alive,omitempty"` + KeepOnCompletion *bool `json:"keep_on_completion,omitempty"` + // MaxSamplesPerKey By default, the response of a sample query contains up to `10` samples, with + // one sample per unique set of join keys. Use the `size` + // parameter to get a smaller or larger set of samples. To retrieve more than + // one sample per set of join keys, use the + // `max_samples_per_key` parameter. Pipes are not supported for sample queries. + MaxSamplesPerKey *int `json:"max_samples_per_key,omitempty"` + // Query EQL query you wish to run. + Query string `json:"query"` + ResultPosition *resultposition.ResultPosition `json:"result_position,omitempty"` + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` + // Size For basic queries, the maximum number of matching events to return. Defaults + // to 10 + Size *uint `json:"size,omitempty"` + // TiebreakerField Field used to sort hits with the same timestamp in ascending order + TiebreakerField *string `json:"tiebreaker_field,omitempty"` + // TimestampField Field containing event timestamp. Default "@timestamp" + TimestampField *string `json:"timestamp_field,omitempty"` + WaitForCompletionTimeout types.Duration `json:"wait_for_completion_timeout,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Search request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_partial_search_results": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowPartialSearchResults", err) + } + s.AllowPartialSearchResults = &value + case bool: + s.AllowPartialSearchResults = &v + } + + case "allow_partial_sequence_results": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowPartialSequenceResults", err) + } + s.AllowPartialSequenceResults = &value + case bool: + s.AllowPartialSequenceResults = &v + } + + case "case_sensitive": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CaseSensitive", err) + } + s.CaseSensitive = &value + case bool: + s.CaseSensitive = &v + } + + case "event_category_field": + if err := dec.Decode(&s.EventCategoryField); err != nil { + return fmt.Errorf("%s | %w", "EventCategoryField", err) + } + + case "fetch_size": + if err := dec.Decode(&s.FetchSize); err != nil { + return fmt.Errorf("%s | %w", "FetchSize", err) + } + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := types.NewFieldAndFormat() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + } + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := types.NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "keep_alive": + if err := dec.Decode(&s.KeepAlive); err != nil { + return fmt.Errorf("%s | %w", "KeepAlive", err) + } + + case "keep_on_completion": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "KeepOnCompletion", err) + } + s.KeepOnCompletion = &value + case bool: + s.KeepOnCompletion = &v + } + + case "max_samples_per_key": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSamplesPerKey", err) + } + s.MaxSamplesPerKey = &value + case float64: + f := int(v) + s.MaxSamplesPerKey = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "result_position": + if err := dec.Decode(&s.ResultPosition); err != nil { + return fmt.Errorf("%s | %w", "ResultPosition", err) + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + case "tiebreaker_field": + if err := dec.Decode(&s.TiebreakerField); err != nil { + return fmt.Errorf("%s | %w", "TiebreakerField", err) + } + + case "timestamp_field": + if err := dec.Decode(&s.TimestampField); err != nil { + return fmt.Errorf("%s | %w", "TimestampField", err) + } + + case "wait_for_completion_timeout": + if err := dec.Decode(&s.WaitForCompletionTimeout); err != nil { + return fmt.Errorf("%s | %w", "WaitForCompletionTimeout", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/search/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/search/response.go new file mode 100644 index 000000000..0dbb80343 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/search/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package search + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package search +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/eql/search/EqlSearchResponse.ts#L22-L25 +type Response struct { + + // Hits Contains matching events and sequences. Also contains related metadata. + Hits types.EqlHits `json:"hits"` + // Id Identifier for the search. + Id *string `json:"id,omitempty"` + // IsPartial If true, the response does not contain complete search results. + IsPartial *bool `json:"is_partial,omitempty"` + // IsRunning If true, the search request is still executing. + IsRunning *bool `json:"is_running,omitempty"` + // ShardFailures Contains information about shard failures (if any), in case + // allow_partial_search_results=true + ShardFailures []types.ShardFailure `json:"shard_failures,omitempty"` + // TimedOut If true, the request timed out before completion. + TimedOut *bool `json:"timed_out,omitempty"` + // Took Milliseconds it took Elasticsearch to execute the request. + Took *int64 `json:"took,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/search/search.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/search/search.go new file mode 100644 index 000000000..08950ab82 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/eql/search/search.go @@ -0,0 +1,636 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get EQL search results. +// Returns search results for an Event Query Language (EQL) query. +// EQL assumes each document in a data stream or index corresponds to an event. +package search + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/resultposition" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Search struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSearch type alias for index. +type NewSearch func(index string) *Search + +// NewSearchFunc returns a new instance of Search with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSearchFunc(tp elastictransport.Interface) NewSearch { + return func(index string) *Search { + n := New(tp) + + n._index(index) + + return n + } +} + +// Get EQL search results. +// Returns search results for an Event Query Language (EQL) query. +// EQL assumes each document in a data stream or index corresponds to an event. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search +func New(tp elastictransport.Interface) *Search { + r := &Search{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Search) Raw(raw io.Reader) *Search { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Search) Request(req *Request) *Search { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Search) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Search: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_eql") + path.WriteString("/") + path.WriteString("search") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Search) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "eql.search") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "eql.search") + if reader := instrument.RecordRequestBody(ctx, "eql.search", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "eql.search") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Search query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a search.Response +func (r Search) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "eql.search") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Search headers map. +func (r *Search) Header(key, value string) *Search { + r.headers.Set(key, value) + + return r +} + +// Index The name of the index to scope the operation +// API Name: index +func (r *Search) _index(index string) *Search { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete +// indices. (This includes `_all` string or when no indices have been specified) +// API name: allow_no_indices +func (r *Search) AllowNoIndices(allownoindices bool) *Search { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, +// closed or both. +// API name: expand_wildcards +func (r *Search) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Search { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// CcsMinimizeRoundtrips Indicates whether network round-trips should be minimized as part of +// cross-cluster search requests execution +// API name: ccs_minimize_roundtrips +func (r *Search) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Search { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) + + return r +} + +// IgnoreUnavailable If true, missing or closed indices are not included in the response. +// API name: ignore_unavailable +func (r *Search) IgnoreUnavailable(ignoreunavailable bool) *Search { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Search) ErrorTrace(errortrace bool) *Search { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Search) FilterPath(filterpaths ...string) *Search { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Search) Human(human bool) *Search { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Search) Pretty(pretty bool) *Search { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Allow query execution also in case of shard failures. +// If true, the query will keep running and will return results based on the +// available shards. +// For sequences, the behavior can be further refined using +// allow_partial_sequence_results +// API name: allow_partial_search_results +func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowPartialSearchResults = &allowpartialsearchresults + + return r +} + +// This flag applies only to sequences and has effect only if +// allow_partial_search_results=true. +// If true, the sequence query will return results based on the available +// shards, ignoring the others. +// If false, the sequence query will return successfully, but will always have +// empty results. +// API name: allow_partial_sequence_results +func (r *Search) AllowPartialSequenceResults(allowpartialsequenceresults bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowPartialSequenceResults = &allowpartialsequenceresults + + return r +} + +// API name: case_sensitive +func (r *Search) CaseSensitive(casesensitive bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CaseSensitive = &casesensitive + + return r +} + +// Field containing the event classification, such as process, file, or network. +// API name: event_category_field +func (r *Search) EventCategoryField(field string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.EventCategoryField = &field + + return r +} + +// Maximum number of events to search at a time for sequence queries. +// API name: fetch_size +func (r *Search) FetchSize(fetchsize uint) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FetchSize = &fetchsize + + return r +} + +// Array of wildcard (*) patterns. The response returns values for field names +// matching these patterns in the fields property of each hit. +// API name: fields +func (r *Search) Fields(fields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Fields = make([]types.FieldAndFormat, len(fields)) + for i, v := range fields { + r.req.Fields[i] = *v.FieldAndFormatCaster() + } + + return r +} + +// Query, written in Query DSL, used to filter the events on which the EQL query +// runs. +// API name: filter +func (r *Search) Filter(filters ...types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Filter = make([]types.Query, len(filters)) + for i, v := range filters { + r.req.Filter[i] = *v.QueryCaster() + } + + return r +} + +// API name: keep_alive +func (r *Search) KeepAlive(duration types.DurationVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.KeepAlive = *duration.DurationCaster() + + return r +} + +// API name: keep_on_completion +func (r *Search) KeepOnCompletion(keeponcompletion bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.KeepOnCompletion = &keeponcompletion + + return r +} + +// By default, the response of a sample query contains up to `10` samples, with +// one sample per unique set of join keys. Use the `size` +// parameter to get a smaller or larger set of samples. To retrieve more than +// one sample per set of join keys, use the +// `max_samples_per_key` parameter. Pipes are not supported for sample queries. +// API name: max_samples_per_key +func (r *Search) MaxSamplesPerKey(maxsamplesperkey int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxSamplesPerKey = &maxsamplesperkey + + return r +} + +// EQL query you wish to run. +// API name: query +func (r *Search) Query(query string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query + + return r +} + +// API name: result_position +func (r *Search) ResultPosition(resultposition resultposition.ResultPosition) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ResultPosition = &resultposition + return r +} + +// API name: runtime_mappings +func (r *Search) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() + + return r +} + +// For basic queries, the maximum number of matching events to return. Defaults +// to 10 +// API name: size +func (r *Search) Size(size uint) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Size = &size + + return r +} + +// Field used to sort hits with the same timestamp in ascending order +// API name: tiebreaker_field +func (r *Search) TiebreakerField(field string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TiebreakerField = &field + + return r +} + +// Field containing event timestamp. Default "@timestamp" +// API name: timestamp_field +func (r *Search) TimestampField(field string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TimestampField = &field + + return r +} + +// API name: wait_for_completion_timeout +func (r *Search) WaitForCompletionTimeout(duration types.DurationVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.WaitForCompletionTimeout = *duration.DurationCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquery/async_query.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquery/async_query.go new file mode 100644 index 000000000..967e992c1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquery/async_query.go @@ -0,0 +1,568 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run an async ES|QL query. +// Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its +// progress, and retrieve results when they become available. +// +// The API accepts the same parameters and request body as the synchronous query +// API, along with additional async related properties. +package asyncquery + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/esqlformat" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type AsyncQuery struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAsyncQuery type alias for index. +type NewAsyncQuery func() *AsyncQuery + +// NewAsyncQueryFunc returns a new instance of AsyncQuery with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAsyncQueryFunc(tp elastictransport.Interface) NewAsyncQuery { + return func() *AsyncQuery { + n := New(tp) + + return n + } +} + +// Run an async ES|QL query. +// Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its +// progress, and retrieve results when they become available. +// +// The API accepts the same parameters and request body as the synchronous query +// API, along with additional async related properties. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query +func New(tp elastictransport.Interface) *AsyncQuery { + r := &AsyncQuery{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *AsyncQuery) Raw(raw io.Reader) *AsyncQuery { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *AsyncQuery) Request(req *Request) *AsyncQuery { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *AsyncQuery) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for AsyncQuery: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r AsyncQuery) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "esql.async_query") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query") + if reader := instrument.RecordRequestBody(ctx, "esql.async_query", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the AsyncQuery query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a asyncquery.Response +func (r AsyncQuery) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the AsyncQuery headers map. +func (r *AsyncQuery) Header(key, value string) *AsyncQuery { + r.headers.Set(key, value) + + return r +} + +// AllowPartialResults If `true`, partial results will be returned if there are shard failures, but +// the query can continue to execute on other clusters and shards. +// If `false`, the query will fail if there are any failures. +// +// To override the default behavior, you can set the +// `esql.query.allow_partial_results` cluster setting to `false`. +// API name: allow_partial_results +func (r *AsyncQuery) AllowPartialResults(allowpartialresults bool) *AsyncQuery { + r.values.Set("allow_partial_results", strconv.FormatBool(allowpartialresults)) + + return r +} + +// Delimiter The character to use between values within a CSV row. +// It is valid only for the CSV format. +// API name: delimiter +func (r *AsyncQuery) Delimiter(delimiter string) *AsyncQuery { + r.values.Set("delimiter", delimiter) + + return r +} + +// DropNullColumns Indicates whether columns that are entirely `null` will be removed from the +// `columns` and `values` portion of the results. +// If `true`, the response will include an extra section under the name +// `all_columns` which has the name of all the columns. +// API name: drop_null_columns +func (r *AsyncQuery) DropNullColumns(dropnullcolumns bool) *AsyncQuery { + r.values.Set("drop_null_columns", strconv.FormatBool(dropnullcolumns)) + + return r +} + +// Format A short version of the Accept header, e.g. json, yaml. +// +// `csv`, `tsv`, and `txt` formats will return results in a tabular format, +// excluding other metadata fields from the response. +// +// For async requests, nothing will be returned if the async query doesn't +// finish within the timeout. +// The query ID and running status are available in the +// `X-Elasticsearch-Async-Id` and `X-Elasticsearch-Async-Is-Running` HTTP +// headers of the response, respectively. +// API name: format +func (r *AsyncQuery) Format(format esqlformat.EsqlFormat) *AsyncQuery { + r.values.Set("format", format.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AsyncQuery) ErrorTrace(errortrace bool) *AsyncQuery { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AsyncQuery) FilterPath(filterpaths ...string) *AsyncQuery { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AsyncQuery) Human(human bool) *AsyncQuery { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AsyncQuery) Pretty(pretty bool) *AsyncQuery { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// By default, ES|QL returns results as rows. For example, FROM returns each +// individual document as one row. For the JSON, YAML, CBOR and smile formats, +// ES|QL can return the results in a columnar fashion where one row represents +// all the values of a certain column in the results. +// API name: columnar +func (r *AsyncQuery) Columnar(columnar bool) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Columnar = &columnar + + return r +} + +// Specify a Query DSL query in the filter parameter to filter the set of +// documents that an ES|QL query runs on. +// API name: filter +func (r *AsyncQuery) Filter(filter types.QueryVariant) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Filter = filter.QueryCaster() + + return r +} + +// When set to `true` and performing a cross-cluster query, the response will +// include an extra `_clusters` +// object with information about the clusters that participated in the search +// along with info such as shards +// count. +// API name: include_ccs_metadata +func (r *AsyncQuery) IncludeCcsMetadata(includeccsmetadata bool) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IncludeCcsMetadata = &includeccsmetadata + + return r +} + +// The period for which the query and its results are stored in the cluster. +// The default period is five days. +// When this period expires, the query and its results are deleted, even if the +// query is still ongoing. +// If the `keep_on_completion` parameter is false, Elasticsearch only stores +// async queries that do not complete within the period set by the +// `wait_for_completion_timeout` parameter, regardless of this value. +// API name: keep_alive +func (r *AsyncQuery) KeepAlive(duration types.DurationVariant) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.KeepAlive = *duration.DurationCaster() + + return r +} + +// Indicates whether the query and its results are stored in the cluster. +// If false, the query and its results are stored in the cluster only if the +// request does not complete during the period set by the +// `wait_for_completion_timeout` parameter. +// API name: keep_on_completion +func (r *AsyncQuery) KeepOnCompletion(keeponcompletion bool) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.KeepOnCompletion = &keeponcompletion + + return r +} + +// API name: locale +func (r *AsyncQuery) Locale(locale string) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Locale = &locale + + return r +} + +// To avoid any attempts of hacking or code injection, extract the values in a +// separate list of parameters. Use question mark placeholders (?) in the query +// string for each of the parameters. +// API name: params +func (r *AsyncQuery) Params(params ...types.FieldValueVariant) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range params { + + r.req.Params = append(r.req.Params, *v.FieldValueCaster()) + + } + return r +} + +// If provided and `true` the response will include an extra `profile` object +// with information on how the query was executed. This information is for human +// debugging +// and its format can change at any time but it can give some insight into the +// performance +// of each part of the query. +// API name: profile +func (r *AsyncQuery) Profile(profile bool) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Profile = &profile + + return r +} + +// The ES|QL query API accepts an ES|QL query string in the query parameter, +// runs it, and returns the results. +// API name: query +func (r *AsyncQuery) Query(query string) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query + + return r +} + +// Tables to use with the LOOKUP operation. The top level key is the table +// name and the next level key is the column name. +// API name: tables +func (r *AsyncQuery) Tables(tables map[string]map[string]types.TableValuesContainer) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Tables = tables + return r +} + +// The period to wait for the request to finish. +// By default, the request waits for 1 second for the query results. +// If the query completes during this period, results are returned +// Otherwise, a query ID is returned that can later be used to retrieve the +// results. +// API name: wait_for_completion_timeout +func (r *AsyncQuery) WaitForCompletionTimeout(duration types.DurationVariant) *AsyncQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.WaitForCompletionTimeout = *duration.DurationCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquery/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquery/request.go new file mode 100644 index 000000000..d66242da6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquery/request.go @@ -0,0 +1,238 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package asyncquery + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package asyncquery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/async_query/AsyncQueryRequest.ts#L28-L138 +type Request struct { + + // Columnar By default, ES|QL returns results as rows. For example, FROM returns each + // individual document as one row. For the JSON, YAML, CBOR and smile formats, + // ES|QL can return the results in a columnar fashion where one row represents + // all the values of a certain column in the results. + Columnar *bool `json:"columnar,omitempty"` + // Filter Specify a Query DSL query in the filter parameter to filter the set of + // documents that an ES|QL query runs on. + Filter *types.Query `json:"filter,omitempty"` + // IncludeCcsMetadata When set to `true` and performing a cross-cluster query, the response will + // include an extra `_clusters` + // object with information about the clusters that participated in the search + // along with info such as shards + // count. + IncludeCcsMetadata *bool `json:"include_ccs_metadata,omitempty"` + // KeepAlive The period for which the query and its results are stored in the cluster. + // The default period is five days. + // When this period expires, the query and its results are deleted, even if the + // query is still ongoing. + // If the `keep_on_completion` parameter is false, Elasticsearch only stores + // async queries that do not complete within the period set by the + // `wait_for_completion_timeout` parameter, regardless of this value. + KeepAlive types.Duration `json:"keep_alive,omitempty"` + // KeepOnCompletion Indicates whether the query and its results are stored in the cluster. + // If false, the query and its results are stored in the cluster only if the + // request does not complete during the period set by the + // `wait_for_completion_timeout` parameter. + KeepOnCompletion *bool `json:"keep_on_completion,omitempty"` + Locale *string `json:"locale,omitempty"` + // Params To avoid any attempts of hacking or code injection, extract the values in a + // separate list of parameters. Use question mark placeholders (?) in the query + // string for each of the parameters. + Params []types.FieldValue `json:"params,omitempty"` + // Profile If provided and `true` the response will include an extra `profile` object + // with information on how the query was executed. This information is for human + // debugging + // and its format can change at any time but it can give some insight into the + // performance + // of each part of the query. + Profile *bool `json:"profile,omitempty"` + // Query The ES|QL query API accepts an ES|QL query string in the query parameter, + // runs it, and returns the results. + Query string `json:"query"` + // Tables Tables to use with the LOOKUP operation. The top level key is the table + // name and the next level key is the column name. + Tables map[string]map[string]types.TableValuesContainer `json:"tables,omitempty"` + // WaitForCompletionTimeout The period to wait for the request to finish. + // By default, the request waits for 1 second for the query results. + // If the query completes during this period, results are returned + // Otherwise, a query ID is returned that can later be used to retrieve the + // results. + WaitForCompletionTimeout types.Duration `json:"wait_for_completion_timeout,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Tables: make(map[string]map[string]types.TableValuesContainer, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Asyncquery request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "columnar": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Columnar", err) + } + s.Columnar = &value + case bool: + s.Columnar = &v + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "include_ccs_metadata": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncludeCcsMetadata", err) + } + s.IncludeCcsMetadata = &value + case bool: + s.IncludeCcsMetadata = &v + } + + case "keep_alive": + if err := dec.Decode(&s.KeepAlive); err != nil { + return fmt.Errorf("%s | %w", "KeepAlive", err) + } + + case "keep_on_completion": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "KeepOnCompletion", err) + } + s.KeepOnCompletion = &value + case bool: + s.KeepOnCompletion = &v + } + + case "locale": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Locale", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Locale = &o + + case "params": + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "profile": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + s.Profile = &value + case bool: + s.Profile = &v + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "tables": + if s.Tables == nil { + s.Tables = make(map[string]map[string]types.TableValuesContainer, 0) + } + if err := dec.Decode(&s.Tables); err != nil { + return fmt.Errorf("%s | %w", "Tables", err) + } + + case "wait_for_completion_timeout": + if err := dec.Decode(&s.WaitForCompletionTimeout); err != nil { + return fmt.Errorf("%s | %w", "WaitForCompletionTimeout", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquery/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquery/response.go new file mode 100644 index 000000000..f66d98442 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquery/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/509f88ad09a267354d02bf5f9e960503fa9236b7 + +package asyncquery + +// Response holds the response body struct for the package query +// +// https://github.com/elastic/elasticsearch-specification/blob/509f88ad09a267354d02bf5f9e960503fa9236b7/specification/esql/query/QueryResponse.ts#L22-L25 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerydelete/async_query_delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerydelete/async_query_delete.go new file mode 100644 index 000000000..1abac8d2a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerydelete/async_query_delete.go @@ -0,0 +1,364 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete an async ES|QL query. +// If the query is still running, it is cancelled. +// Otherwise, the stored results are deleted. +// +// If the Elasticsearch security features are enabled, only the following users +// can use this API to delete a query: +// +// * The authenticated user that submitted the original query request +// * Users with the `cancel_task` cluster privilege +package asyncquerydelete + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type AsyncQueryDelete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAsyncQueryDelete type alias for index. +type NewAsyncQueryDelete func(id string) *AsyncQueryDelete + +// NewAsyncQueryDeleteFunc returns a new instance of AsyncQueryDelete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAsyncQueryDeleteFunc(tp elastictransport.Interface) NewAsyncQueryDelete { + return func(id string) *AsyncQueryDelete { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete an async ES|QL query. +// If the query is still running, it is cancelled. +// Otherwise, the stored results are deleted. +// +// If the Elasticsearch security features are enabled, only the following users +// can use this API to delete a query: +// +// * The authenticated user that submitted the original query request +// * Users with the `cancel_task` cluster privilege +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete +func New(tp elastictransport.Interface) *AsyncQueryDelete { + r := &AsyncQueryDelete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *AsyncQueryDelete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r AsyncQueryDelete) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "esql.async_query_delete") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_delete") + if reader := instrument.RecordRequestBody(ctx, "esql.async_query_delete", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_delete") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the AsyncQueryDelete query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a asyncquerydelete.Response +func (r AsyncQueryDelete) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r AsyncQueryDelete) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the AsyncQueryDelete query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the AsyncQueryDelete headers map. +func (r *AsyncQueryDelete) Header(key, value string) *AsyncQueryDelete { + r.headers.Set(key, value) + + return r +} + +// Id The unique identifier of the query. +// A query ID is provided in the ES|QL async query API response for a query that +// does not complete in the designated time. +// A query ID is also provided when the request was submitted with the +// `keep_on_completion` parameter set to `true`. +// API Name: id +func (r *AsyncQueryDelete) _id(id string) *AsyncQueryDelete { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AsyncQueryDelete) ErrorTrace(errortrace bool) *AsyncQueryDelete { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AsyncQueryDelete) FilterPath(filterpaths ...string) *AsyncQueryDelete { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AsyncQueryDelete) Human(human bool) *AsyncQueryDelete { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AsyncQueryDelete) Pretty(pretty bool) *AsyncQueryDelete { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerydelete/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerydelete/response.go new file mode 100644 index 000000000..033475eba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerydelete/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package asyncquerydelete + +// Response holds the response body struct for the package asyncquerydelete +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/async_query_delete/AsyncQueryDeleteResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncqueryget/async_query_get.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncqueryget/async_query_get.go new file mode 100644 index 000000000..b4a4c8f1a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncqueryget/async_query_get.go @@ -0,0 +1,399 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get async ES|QL query results. +// Get the current status and available results or stored results for an ES|QL +// asynchronous query. +// If the Elasticsearch security features are enabled, only the user who first +// submitted the ES|QL query can retrieve the results using this API. +package asyncqueryget + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/esqlformat" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type AsyncQueryGet struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAsyncQueryGet type alias for index. +type NewAsyncQueryGet func(id string) *AsyncQueryGet + +// NewAsyncQueryGetFunc returns a new instance of AsyncQueryGet with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAsyncQueryGetFunc(tp elastictransport.Interface) NewAsyncQueryGet { + return func(id string) *AsyncQueryGet { + n := New(tp) + + n._id(id) + + return n + } +} + +// Get async ES|QL query results. +// Get the current status and available results or stored results for an ES|QL +// asynchronous query. +// If the Elasticsearch security features are enabled, only the user who first +// submitted the ES|QL query can retrieve the results using this API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get +func New(tp elastictransport.Interface) *AsyncQueryGet { + r := &AsyncQueryGet{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *AsyncQueryGet) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r AsyncQueryGet) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "esql.async_query_get") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_get") + if reader := instrument.RecordRequestBody(ctx, "esql.async_query_get", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_get") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the AsyncQueryGet query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a asyncqueryget.Response +func (r AsyncQueryGet) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r AsyncQueryGet) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the AsyncQueryGet query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the AsyncQueryGet headers map. +func (r *AsyncQueryGet) Header(key, value string) *AsyncQueryGet { + r.headers.Set(key, value) + + return r +} + +// Id The unique identifier of the query. +// A query ID is provided in the ES|QL async query API response for a query that +// does not complete in the designated time. +// A query ID is also provided when the request was submitted with the +// `keep_on_completion` parameter set to `true`. +// API Name: id +func (r *AsyncQueryGet) _id(id string) *AsyncQueryGet { + r.paramSet |= idMask + r.id = id + + return r +} + +// DropNullColumns Indicates whether columns that are entirely `null` will be removed from the +// `columns` and `values` portion of the results. +// If `true`, the response will include an extra section under the name +// `all_columns` which has the name of all the columns. +// API name: drop_null_columns +func (r *AsyncQueryGet) DropNullColumns(dropnullcolumns bool) *AsyncQueryGet { + r.values.Set("drop_null_columns", strconv.FormatBool(dropnullcolumns)) + + return r +} + +// Format A short version of the Accept header, for example `json` or `yaml`. +// API name: format +func (r *AsyncQueryGet) Format(format esqlformat.EsqlFormat) *AsyncQueryGet { + r.values.Set("format", format.String()) + + return r +} + +// KeepAlive The period for which the query and its results are stored in the cluster. +// When this period expires, the query and its results are deleted, even if the +// query is still ongoing. +// API name: keep_alive +func (r *AsyncQueryGet) KeepAlive(duration string) *AsyncQueryGet { + r.values.Set("keep_alive", duration) + + return r +} + +// WaitForCompletionTimeout The period to wait for the request to finish. +// By default, the request waits for complete query results. +// If the request completes during the period specified in this parameter, +// complete query results are returned. +// Otherwise, the response returns an `is_running` value of `true` and no +// results. +// API name: wait_for_completion_timeout +func (r *AsyncQueryGet) WaitForCompletionTimeout(duration string) *AsyncQueryGet { + r.values.Set("wait_for_completion_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AsyncQueryGet) ErrorTrace(errortrace bool) *AsyncQueryGet { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AsyncQueryGet) FilterPath(filterpaths ...string) *AsyncQueryGet { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AsyncQueryGet) Human(human bool) *AsyncQueryGet { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AsyncQueryGet) Pretty(pretty bool) *AsyncQueryGet { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncqueryget/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncqueryget/response.go new file mode 100644 index 000000000..d4fe0c31e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncqueryget/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/509f88ad09a267354d02bf5f9e960503fa9236b7 + +package asyncqueryget + +// Response holds the response body struct for the package query +// +// https://github.com/elastic/elasticsearch-specification/blob/509f88ad09a267354d02bf5f9e960503fa9236b7/specification/esql/query/QueryResponse.ts#L22-L25 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerystop/async_query_stop.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerystop/async_query_stop.go new file mode 100644 index 000000000..608087b9d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerystop/async_query_stop.go @@ -0,0 +1,369 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Stop async ES|QL query. +// +// This API interrupts the query execution and returns the results so far. +// If the Elasticsearch security features are enabled, only the user who first +// submitted the ES|QL query can stop it. +package asyncquerystop + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type AsyncQueryStop struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAsyncQueryStop type alias for index. +type NewAsyncQueryStop func(id string) *AsyncQueryStop + +// NewAsyncQueryStopFunc returns a new instance of AsyncQueryStop with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAsyncQueryStopFunc(tp elastictransport.Interface) NewAsyncQueryStop { + return func(id string) *AsyncQueryStop { + n := New(tp) + + n._id(id) + + return n + } +} + +// Stop async ES|QL query. +// +// This API interrupts the query execution and returns the results so far. +// If the Elasticsearch security features are enabled, only the user who first +// submitted the ES|QL query can stop it. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-stop +func New(tp elastictransport.Interface) *AsyncQueryStop { + r := &AsyncQueryStop{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *AsyncQueryStop) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + path.WriteString("/") + path.WriteString("stop") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r AsyncQueryStop) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "esql.async_query_stop") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "esql.async_query_stop") + if reader := instrument.RecordRequestBody(ctx, "esql.async_query_stop", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.async_query_stop") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the AsyncQueryStop query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a asyncquerystop.Response +func (r AsyncQueryStop) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r AsyncQueryStop) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.async_query_stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the AsyncQueryStop query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the AsyncQueryStop headers map. +func (r *AsyncQueryStop) Header(key, value string) *AsyncQueryStop { + r.headers.Set(key, value) + + return r +} + +// Id The unique identifier of the query. +// A query ID is provided in the ES|QL async query API response for a query that +// does not complete in the designated time. +// A query ID is also provided when the request was submitted with the +// `keep_on_completion` parameter set to `true`. +// API Name: id +func (r *AsyncQueryStop) _id(id string) *AsyncQueryStop { + r.paramSet |= idMask + r.id = id + + return r +} + +// DropNullColumns Indicates whether columns that are entirely `null` will be removed from the +// `columns` and `values` portion of the results. +// If `true`, the response will include an extra section under the name +// `all_columns` which has the name of all the columns. +// API name: drop_null_columns +func (r *AsyncQueryStop) DropNullColumns(dropnullcolumns bool) *AsyncQueryStop { + r.values.Set("drop_null_columns", strconv.FormatBool(dropnullcolumns)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AsyncQueryStop) ErrorTrace(errortrace bool) *AsyncQueryStop { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AsyncQueryStop) FilterPath(filterpaths ...string) *AsyncQueryStop { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AsyncQueryStop) Human(human bool) *AsyncQueryStop { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AsyncQueryStop) Pretty(pretty bool) *AsyncQueryStop { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerystop/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerystop/response.go new file mode 100644 index 000000000..d63261793 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerystop/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/509f88ad09a267354d02bf5f9e960503fa9236b7 + +package asyncquerystop + +// Response holds the response body struct for the package query +// +// https://github.com/elastic/elasticsearch-specification/blob/509f88ad09a267354d02bf5f9e960503fa9236b7/specification/esql/query/QueryResponse.ts#L22-L25 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/getquery/get_query.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/getquery/get_query.go new file mode 100644 index 000000000..3f351b947 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/getquery/get_query.go @@ -0,0 +1,350 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get a specific running ES|QL query information. +// Returns an object extended information about a running ES|QL query. +package getquery + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetQuery struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetQuery type alias for index. +type NewGetQuery func(id string) *GetQuery + +// NewGetQueryFunc returns a new instance of GetQuery with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetQueryFunc(tp elastictransport.Interface) NewGetQuery { + return func(id string) *GetQuery { + n := New(tp) + + n._id(id) + + return n + } +} + +// Get a specific running ES|QL query information. +// Returns an object extended information about a running ES|QL query. +func New(tp elastictransport.Interface) *GetQuery { + r := &GetQuery{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetQuery) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("queries") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetQuery) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "esql.get_query") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "esql.get_query") + if reader := instrument.RecordRequestBody(ctx, "esql.get_query", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.get_query") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetQuery query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getquery.Response +func (r GetQuery) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.get_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetQuery) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.get_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetQuery query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetQuery headers map. +func (r *GetQuery) Header(key, value string) *GetQuery { + r.headers.Set(key, value) + + return r +} + +// Id The query ID +// API Name: id +func (r *GetQuery) _id(id string) *GetQuery { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetQuery) ErrorTrace(errortrace bool) *GetQuery { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetQuery) FilterPath(filterpaths ...string) *GetQuery { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetQuery) Human(human bool) *GetQuery { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetQuery) Pretty(pretty bool) *GetQuery { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/getquery/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/getquery/response.go new file mode 100644 index 000000000..c7a7c788c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/getquery/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getquery + +// Response holds the response body struct for the package getquery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/get_query/GetQueryResponse.ts#L23-L33 +type Response struct { + CoordinatingNode string `json:"coordinating_node"` + DataNodes []string `json:"data_nodes"` + Id int64 `json:"id"` + Node string `json:"node"` + Query string `json:"query"` + RunningTimeNanos int64 `json:"running_time_nanos"` + StartTimeMillis int64 `json:"start_time_millis"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/listqueries/list_queries.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/listqueries/list_queries.go new file mode 100644 index 000000000..523ab2224 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/listqueries/list_queries.go @@ -0,0 +1,329 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get running ES|QL queries information. +// Returns an object containing IDs and other information about the running +// ES|QL queries. +package listqueries + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ListQueries struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewListQueries type alias for index. +type NewListQueries func() *ListQueries + +// NewListQueriesFunc returns a new instance of ListQueries with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewListQueriesFunc(tp elastictransport.Interface) NewListQueries { + return func() *ListQueries { + n := New(tp) + + return n + } +} + +// Get running ES|QL queries information. +// Returns an object containing IDs and other information about the running +// ES|QL queries. +func New(tp elastictransport.Interface) *ListQueries { + r := &ListQueries{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ListQueries) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("queries") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ListQueries) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "esql.list_queries") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "esql.list_queries") + if reader := instrument.RecordRequestBody(ctx, "esql.list_queries", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.list_queries") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ListQueries query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a listqueries.Response +func (r ListQueries) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.list_queries") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ListQueries) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.list_queries") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ListQueries query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ListQueries headers map. +func (r *ListQueries) Header(key, value string) *ListQueries { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ListQueries) ErrorTrace(errortrace bool) *ListQueries { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ListQueries) FilterPath(filterpaths ...string) *ListQueries { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ListQueries) Human(human bool) *ListQueries { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ListQueries) Pretty(pretty bool) *ListQueries { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/listqueries/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/listqueries/response.go new file mode 100644 index 000000000..6d8009882 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/listqueries/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package listqueries + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package listqueries +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/list_queries/ListQueriesResponse.ts#L32-L36 +type Response struct { + Queries map[string]types.Body `json:"queries"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Queries: make(map[string]types.Body, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/query/helpers.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/query/helpers.go new file mode 100644 index 000000000..0d3da7a68 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/query/helpers.go @@ -0,0 +1,193 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package query + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/esqlformat" +) + +type metadata struct { + Name string `json:"name"` + Type string `json:"type"` +} + +type esqlResponse struct { + Columns []metadata `json:"columns"` + Values [][]any `json:"values"` +} + +// Helper takes a generic type T, a context.Context and an esql.Query request. +// Returns an array of T using the json.Unmarshaler of the type. +func Helper[T any](ctx context.Context, esqlQuery *Query) ([]T, error) { + response, err := esqlQuery. + Columnar(false). + Format(esqlformat.Json). + Header("x-elastic-client-meta", "h=qo"). + Do(ctx) + if err != nil { + return nil, err + } + + var eR esqlResponse + err = json.Unmarshal(response, &eR) + if err != nil { + return nil, fmt.Errorf("cannot read ES|QL response: %w", err) + } + + buf := bytes.NewBuffer(nil) + buf.WriteByte('[') + for rowNum, row := range eR.Values { + buf.WriteByte('{') + for i := 0; i < len(row); i++ { + buf.WriteString(`"` + eR.Columns[i].Name + `":`) + data, err := json.Marshal(row[i]) + if err != nil { + return nil, fmt.Errorf("error while parsing ES|QL response: %w", err) + } + buf.Write(data) + if i != len(row)-1 { + buf.WriteByte(',') + } + } + buf.WriteByte('}') + if rowNum != len(eR.Values)-1 { + buf.WriteByte(',') + } + } + buf.WriteByte(']') + + target := []T{} + err = json.Unmarshal(buf.Bytes(), &target) + if err != nil { + return nil, fmt.Errorf("cannot deserialize ES|QL response: %w", err) + } + + return target, nil +} + +type EsqlIterator[T any] interface { + Next() (*T, error) + More() bool +} + +type iterator[T any] struct { + reader []byte + decoder *json.Decoder + keys []string + skipComma bool +} + +func (d iterator[T]) More() bool { + return d.decoder.More() +} + +func (d iterator[T]) Next() (*T, error) { + var t T + var tmp []any + + if d.skipComma { + d.decoder.Token() + } + + err := d.decoder.Decode(&tmp) + if err != nil { + return nil, err + } + + buf := bytes.Buffer{} + + buf.WriteByte('{') + for index, key := range d.keys { + buf.WriteString(`"` + key + `":`) + value, _ := json.Marshal(tmp[index]) + buf.Write(value) + + if index != len(d.keys)-1 { + buf.WriteByte(',') + } + } + buf.WriteByte('}') + + err = json.Unmarshal(buf.Bytes(), &t) + if err != nil { + return nil, err + } + + d.skipComma = true + return &t, nil +} + +// Helper takes a generic type T, a context.Context and an esql.Query request +// buffer the response and provides an API to consume one item at a time. +func NewIteratorHelper[T any](ctx context.Context, query *Query) (EsqlIterator[T], error) { + response, err := query. + Columnar(false). + Format(esqlformat.Json). + Header("x-elastic-client-meta", "h=qo"). + Perform(ctx) + if err != nil { + return nil, err + } + defer response.Body.Close() + + d := &iterator[T]{} + d.reader, err = io.ReadAll(response.Body) + d.decoder = json.NewDecoder(bytes.NewReader(d.reader)) + + var metas []metadata +OUTER: + for { + t, err := d.decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return nil, err + } + switch t { + case "columns": + err := d.decoder.Decode(&metas) + if err != nil { + return nil, err + } + for _, m := range metas { + d.keys = append(d.keys, m.Name) + } + + case "values": + t, _ := d.decoder.Token() + if t != json.Delim(91) { + return nil, fmt.Errorf("cannot read response from ES|QL, expected ARRAY_START: %w", err) + } + break OUTER + } + } + + if err != nil { + return nil, fmt.Errorf("cannot read response from ES|QL: %w", err) + } + + return d, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/query/query.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/query/query.go new file mode 100644 index 000000000..3b1d8ca55 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/query/query.go @@ -0,0 +1,500 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run an ES|QL query. +// Get search results for an ES|QL (Elasticsearch query language) query. +package query + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/esqlformat" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Query struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewQuery type alias for index. +type NewQuery func() *Query + +// NewQueryFunc returns a new instance of Query with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewQueryFunc(tp elastictransport.Interface) NewQuery { + return func() *Query { + n := New(tp) + + return n + } +} + +// Run an ES|QL query. +// Get search results for an ES|QL (Elasticsearch query language) query. +// +// https://www.elastic.co/docs/explore-analyze/query-filter/languages/esql-rest +func New(tp elastictransport.Interface) *Query { + r := &Query{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Query) Raw(raw io.Reader) *Query { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Query) Request(req *Request) *Query { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Query) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Query: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_query") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Query) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "esql.query") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "esql.query") + if reader := instrument.RecordRequestBody(ctx, "esql.query", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "esql.query") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Query query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a query.Response +func (r Query) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "esql.query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Query headers map. +func (r *Query) Header(key, value string) *Query { + r.headers.Set(key, value) + + return r +} + +// Format A short version of the Accept header, e.g. json, yaml. +// +// `csv`, `tsv`, and `txt` formats will return results in a tabular format, +// excluding other metadata fields from the response. +// API name: format +func (r *Query) Format(format esqlformat.EsqlFormat) *Query { + r.values.Set("format", format.String()) + + return r +} + +// Delimiter The character to use between values within a CSV row. Only valid for the CSV +// format. +// API name: delimiter +func (r *Query) Delimiter(delimiter string) *Query { + r.values.Set("delimiter", delimiter) + + return r +} + +// DropNullColumns Should columns that are entirely `null` be removed from the `columns` and +// `values` portion of the results? +// Defaults to `false`. If `true` then the response will include an extra +// section under the name `all_columns` which has the name of all columns. +// API name: drop_null_columns +func (r *Query) DropNullColumns(dropnullcolumns bool) *Query { + r.values.Set("drop_null_columns", strconv.FormatBool(dropnullcolumns)) + + return r +} + +// AllowPartialResults If `true`, partial results will be returned if there are shard failures, but +// the query can continue to execute on other clusters and shards. +// If `false`, the query will fail if there are any failures. +// +// To override the default behavior, you can set the +// `esql.query.allow_partial_results` cluster setting to `false`. +// API name: allow_partial_results +func (r *Query) AllowPartialResults(allowpartialresults bool) *Query { + r.values.Set("allow_partial_results", strconv.FormatBool(allowpartialresults)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Query) ErrorTrace(errortrace bool) *Query { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Query) FilterPath(filterpaths ...string) *Query { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Query) Human(human bool) *Query { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Query) Pretty(pretty bool) *Query { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// By default, ES|QL returns results as rows. For example, FROM returns each +// individual document as one row. For the JSON, YAML, CBOR and smile formats, +// ES|QL can return the results in a columnar fashion where one row represents +// all the values of a certain column in the results. +// API name: columnar +func (r *Query) Columnar(columnar bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Columnar = &columnar + + return r +} + +// Specify a Query DSL query in the filter parameter to filter the set of +// documents that an ES|QL query runs on. +// API name: filter +func (r *Query) Filter(filter types.QueryVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Filter = filter.QueryCaster() + + return r +} + +// When set to `true` and performing a cross-cluster query, the response will +// include an extra `_clusters` +// object with information about the clusters that participated in the search +// along with info such as shards +// count. +// API name: include_ccs_metadata +func (r *Query) IncludeCcsMetadata(includeccsmetadata bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IncludeCcsMetadata = &includeccsmetadata + + return r +} + +// API name: locale +func (r *Query) Locale(locale string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Locale = &locale + + return r +} + +// To avoid any attempts of hacking or code injection, extract the values in a +// separate list of parameters. Use question mark placeholders (?) in the query +// string for each of the parameters. +// API name: params +func (r *Query) Params(params ...types.FieldValueVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range params { + + r.req.Params = append(r.req.Params, *v.FieldValueCaster()) + + } + return r +} + +// If provided and `true` the response will include an extra `profile` object +// with information on how the query was executed. This information is for human +// debugging +// and its format can change at any time but it can give some insight into the +// performance +// of each part of the query. +// API name: profile +func (r *Query) Profile(profile bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Profile = &profile + + return r +} + +// The ES|QL query API accepts an ES|QL query string in the query parameter, +// runs it, and returns the results. +// API name: query +func (r *Query) Query(query string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query + + return r +} + +// Tables to use with the LOOKUP operation. The top level key is the table +// name and the next level key is the column name. +// API name: tables +func (r *Query) Tables(tables map[string]map[string]types.TableValuesContainer) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Tables = tables + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/query/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/query/request.go new file mode 100644 index 000000000..95603bfc4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/query/request.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package query + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package query +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/query/QueryRequest.ts#L27-L115 +type Request struct { + + // Columnar By default, ES|QL returns results as rows. For example, FROM returns each + // individual document as one row. For the JSON, YAML, CBOR and smile formats, + // ES|QL can return the results in a columnar fashion where one row represents + // all the values of a certain column in the results. + Columnar *bool `json:"columnar,omitempty"` + // Filter Specify a Query DSL query in the filter parameter to filter the set of + // documents that an ES|QL query runs on. + Filter *types.Query `json:"filter,omitempty"` + // IncludeCcsMetadata When set to `true` and performing a cross-cluster query, the response will + // include an extra `_clusters` + // object with information about the clusters that participated in the search + // along with info such as shards + // count. + IncludeCcsMetadata *bool `json:"include_ccs_metadata,omitempty"` + Locale *string `json:"locale,omitempty"` + // Params To avoid any attempts of hacking or code injection, extract the values in a + // separate list of parameters. Use question mark placeholders (?) in the query + // string for each of the parameters. + Params []types.FieldValue `json:"params,omitempty"` + // Profile If provided and `true` the response will include an extra `profile` object + // with information on how the query was executed. This information is for human + // debugging + // and its format can change at any time but it can give some insight into the + // performance + // of each part of the query. + Profile *bool `json:"profile,omitempty"` + // Query The ES|QL query API accepts an ES|QL query string in the query parameter, + // runs it, and returns the results. + Query string `json:"query"` + // Tables Tables to use with the LOOKUP operation. The top level key is the table + // name and the next level key is the column name. + Tables map[string]map[string]types.TableValuesContainer `json:"tables,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Tables: make(map[string]map[string]types.TableValuesContainer, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Query request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/query/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/query/response.go new file mode 100644 index 000000000..e8ac65dc6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/esql/query/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/509f88ad09a267354d02bf5f9e960503fa9236b7 + +package query + +// Response holds the response body struct for the package query +// +// https://github.com/elastic/elasticsearch-specification/blob/509f88ad09a267354d02bf5f9e960503fa9236b7/specification/esql/query/QueryResponse.ts#L22-L25 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/features/getfeatures/get_features.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/features/getfeatures/get_features.go new file mode 100644 index 000000000..9db729933 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/features/getfeatures/get_features.go @@ -0,0 +1,361 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the features. +// Get a list of features that can be included in snapshots using the +// `feature_states` field when creating a snapshot. +// You can use this API to determine which feature states to include when taking +// a snapshot. +// By default, all feature states are included in a snapshot if that snapshot +// includes the global state, or none if it does not. +// +// A feature state includes one or more system indices necessary for a given +// feature to function. +// In order to ensure data integrity, all system indices that comprise a feature +// state are snapshotted and restored together. +// +// The features listed by this API are a combination of built-in features and +// features defined by plugins. +// In order for a feature state to be listed in this API and recognized as a +// valid feature state by the create snapshot API, the plugin that defines that +// feature must be installed on the master node. +package getfeatures + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetFeatures struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetFeatures type alias for index. +type NewGetFeatures func() *GetFeatures + +// NewGetFeaturesFunc returns a new instance of GetFeatures with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFeaturesFunc(tp elastictransport.Interface) NewGetFeatures { + return func() *GetFeatures { + n := New(tp) + + return n + } +} + +// Get the features. +// Get a list of features that can be included in snapshots using the +// `feature_states` field when creating a snapshot. +// You can use this API to determine which feature states to include when taking +// a snapshot. +// By default, all feature states are included in a snapshot if that snapshot +// includes the global state, or none if it does not. +// +// A feature state includes one or more system indices necessary for a given +// feature to function. +// In order to ensure data integrity, all system indices that comprise a feature +// state are snapshotted and restored together. +// +// The features listed by this API are a combination of built-in features and +// features defined by plugins. +// In order for a feature state to be listed in this API and recognized as a +// valid feature state by the create snapshot API, the plugin that defines that +// feature must be installed on the master node. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features +func New(tp elastictransport.Interface) *GetFeatures { + r := &GetFeatures{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetFeatures) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_features") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetFeatures) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "features.get_features") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "features.get_features") + if reader := instrument.RecordRequestBody(ctx, "features.get_features", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "features.get_features") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetFeatures query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getfeatures.Response +func (r GetFeatures) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "features.get_features") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetFeatures) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "features.get_features") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetFeatures query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetFeatures headers map. +func (r *GetFeatures) Header(key, value string) *GetFeatures { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *GetFeatures) MasterTimeout(duration string) *GetFeatures { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetFeatures) ErrorTrace(errortrace bool) *GetFeatures { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetFeatures) FilterPath(filterpaths ...string) *GetFeatures { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetFeatures) Human(human bool) *GetFeatures { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetFeatures) Pretty(pretty bool) *GetFeatures { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/features/getfeatures/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/features/getfeatures/response.go new file mode 100644 index 000000000..be9d70fb4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/features/getfeatures/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getfeatures + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getfeatures +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/features/get_features/GetFeaturesResponse.ts#L22-L26 +type Response struct { + Features []types.Feature `json:"features"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/features/resetfeatures/reset_features.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/features/resetfeatures/reset_features.go new file mode 100644 index 000000000..ade72e67b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/features/resetfeatures/reset_features.go @@ -0,0 +1,377 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Reset the features. +// Clear all of the state information stored in system indices by Elasticsearch +// features, including the security and machine learning indices. +// +// WARNING: Intended for development and testing use only. Do not reset features +// on a production cluster. +// +// Return a cluster to the same state as a new installation by resetting the +// feature state for all Elasticsearch features. +// This deletes all state information stored in system indices. +// +// The response code is HTTP 200 if the state is successfully reset for all +// features. +// It is HTTP 500 if the reset operation failed for any feature. +// +// Note that select features might provide a way to reset particular system +// indices. +// Using this API resets all features, both those that are built-in and +// implemented as plugins. +// +// To list the features that will be affected, use the get features API. +// +// IMPORTANT: The features installed on the node you submit this request to are +// the features that will be reset. Run on the master node if you have any +// doubts about which plugins are installed on individual nodes. +package resetfeatures + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ResetFeatures struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewResetFeatures type alias for index. +type NewResetFeatures func() *ResetFeatures + +// NewResetFeaturesFunc returns a new instance of ResetFeatures with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewResetFeaturesFunc(tp elastictransport.Interface) NewResetFeatures { + return func() *ResetFeatures { + n := New(tp) + + return n + } +} + +// Reset the features. +// Clear all of the state information stored in system indices by Elasticsearch +// features, including the security and machine learning indices. +// +// WARNING: Intended for development and testing use only. Do not reset features +// on a production cluster. +// +// Return a cluster to the same state as a new installation by resetting the +// feature state for all Elasticsearch features. +// This deletes all state information stored in system indices. +// +// The response code is HTTP 200 if the state is successfully reset for all +// features. +// It is HTTP 500 if the reset operation failed for any feature. +// +// Note that select features might provide a way to reset particular system +// indices. +// Using this API resets all features, both those that are built-in and +// implemented as plugins. +// +// To list the features that will be affected, use the get features API. +// +// IMPORTANT: The features installed on the node you submit this request to are +// the features that will be reset. Run on the master node if you have any +// doubts about which plugins are installed on individual nodes. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features +func New(tp elastictransport.Interface) *ResetFeatures { + r := &ResetFeatures{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ResetFeatures) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_features") + path.WriteString("/") + path.WriteString("_reset") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ResetFeatures) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "features.reset_features") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "features.reset_features") + if reader := instrument.RecordRequestBody(ctx, "features.reset_features", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "features.reset_features") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ResetFeatures query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a resetfeatures.Response +func (r ResetFeatures) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "features.reset_features") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ResetFeatures) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "features.reset_features") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ResetFeatures query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ResetFeatures headers map. +func (r *ResetFeatures) Header(key, value string) *ResetFeatures { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *ResetFeatures) MasterTimeout(duration string) *ResetFeatures { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ResetFeatures) ErrorTrace(errortrace bool) *ResetFeatures { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ResetFeatures) FilterPath(filterpaths ...string) *ResetFeatures { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ResetFeatures) Human(human bool) *ResetFeatures { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ResetFeatures) Pretty(pretty bool) *ResetFeatures { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/features/resetfeatures/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/features/resetfeatures/response.go new file mode 100644 index 000000000..28b0accfb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/features/resetfeatures/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package resetfeatures + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package resetfeatures +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/features/reset_features/ResetFeaturesResponse.ts#L22-L26 +type Response struct { + Features []types.Feature `json:"features"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/globalcheckpoints/global_checkpoints.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/globalcheckpoints/global_checkpoints.go new file mode 100644 index 000000000..7fe855d4a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/globalcheckpoints/global_checkpoints.go @@ -0,0 +1,402 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get global checkpoints. +// +// Get the current global checkpoints for an index. +// This API is designed for internal use by the Fleet server project. +package globalcheckpoints + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GlobalCheckpoints struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGlobalCheckpoints type alias for index. +type NewGlobalCheckpoints func(index string) *GlobalCheckpoints + +// NewGlobalCheckpointsFunc returns a new instance of GlobalCheckpoints with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGlobalCheckpointsFunc(tp elastictransport.Interface) NewGlobalCheckpoints { + return func(index string) *GlobalCheckpoints { + n := New(tp) + + n._index(index) + + return n + } +} + +// Get global checkpoints. +// +// Get the current global checkpoints for an index. +// This API is designed for internal use by the Fleet server project. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet +func New(tp elastictransport.Interface) *GlobalCheckpoints { + r := &GlobalCheckpoints{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GlobalCheckpoints) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_fleet") + path.WriteString("/") + path.WriteString("global_checkpoints") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GlobalCheckpoints) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "fleet.global_checkpoints") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "fleet.global_checkpoints") + if reader := instrument.RecordRequestBody(ctx, "fleet.global_checkpoints", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "fleet.global_checkpoints") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GlobalCheckpoints query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a globalcheckpoints.Response +func (r GlobalCheckpoints) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "fleet.global_checkpoints") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GlobalCheckpoints) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "fleet.global_checkpoints") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GlobalCheckpoints query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GlobalCheckpoints headers map. +func (r *GlobalCheckpoints) Header(key, value string) *GlobalCheckpoints { + r.headers.Set(key, value) + + return r +} + +// Index A single index or index alias that resolves to a single index. +// API Name: index +func (r *GlobalCheckpoints) _index(index string) *GlobalCheckpoints { + r.paramSet |= indexMask + r.index = index + + return r +} + +// WaitForAdvance A boolean value which controls whether to wait (until the timeout) for the +// global checkpoints +// to advance past the provided `checkpoints`. +// API name: wait_for_advance +func (r *GlobalCheckpoints) WaitForAdvance(waitforadvance bool) *GlobalCheckpoints { + r.values.Set("wait_for_advance", strconv.FormatBool(waitforadvance)) + + return r +} + +// WaitForIndex A boolean value which controls whether to wait (until the timeout) for the +// target index to exist +// and all primary shards be active. Can only be true when `wait_for_advance` is +// true. +// API name: wait_for_index +func (r *GlobalCheckpoints) WaitForIndex(waitforindex bool) *GlobalCheckpoints { + r.values.Set("wait_for_index", strconv.FormatBool(waitforindex)) + + return r +} + +// Checkpoints A comma separated list of previous global checkpoints. When used in +// combination with `wait_for_advance`, +// the API will only return once the global checkpoints advances past the +// checkpoints. Providing an empty list +// will cause Elasticsearch to immediately return the current global +// checkpoints. +// API name: checkpoints +func (r *GlobalCheckpoints) Checkpoints(checkpoints ...int64) *GlobalCheckpoints { + tmp := []string{} + for _, item := range checkpoints { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("checkpoints", strings.Join(tmp, ",")) + + return r +} + +// Timeout Period to wait for a global checkpoints to advance past `checkpoints`. +// API name: timeout +func (r *GlobalCheckpoints) Timeout(duration string) *GlobalCheckpoints { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GlobalCheckpoints) ErrorTrace(errortrace bool) *GlobalCheckpoints { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GlobalCheckpoints) FilterPath(filterpaths ...string) *GlobalCheckpoints { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GlobalCheckpoints) Human(human bool) *GlobalCheckpoints { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GlobalCheckpoints) Pretty(pretty bool) *GlobalCheckpoints { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/globalcheckpoints/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/globalcheckpoints/response.go new file mode 100644 index 000000000..664594595 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/globalcheckpoints/response.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package globalcheckpoints + +// Response holds the response body struct for the package globalcheckpoints +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/fleet/global_checkpoints/GlobalCheckpointsResponse.ts#L22-L27 +type Response struct { + GlobalCheckpoints []int64 `json:"global_checkpoints"` + TimedOut bool `json:"timed_out"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/msearch/msearch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/msearch/msearch.go new file mode 100644 index 000000000..5963b9c24 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/msearch/msearch.go @@ -0,0 +1,517 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run multiple Fleet searches. +// Run several Fleet searches with a single API request. +// The API follows the same structure as the multi search API. +// However, similar to the Fleet search API, it supports the +// `wait_for_checkpoints` parameter. +package msearch + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Msearch struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMsearch type alias for index. +type NewMsearch func() *Msearch + +// NewMsearchFunc returns a new instance of Msearch with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMsearchFunc(tp elastictransport.Interface) NewMsearch { + return func() *Msearch { + n := New(tp) + + return n + } +} + +// Run multiple Fleet searches. +// Run several Fleet searches with a single API request. +// The API follows the same structure as the multi search API. +// However, similar to the Fleet search API, it supports the +// `wait_for_checkpoints` parameter. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch +func New(tp elastictransport.Interface) *Msearch { + r := &Msearch{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Msearch) Raw(raw io.Reader) *Msearch { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Msearch) Request(req *Request) *Msearch { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Msearch) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + for _, elem := range *r.req { + data, err := json.Marshal(elem) + if err != nil { + return nil, err + } + r.buf.Write(data) + r.buf.Write([]byte("\n")) + } + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Msearch: %w", err) + } + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_fleet") + path.WriteString("/") + path.WriteString("_fleet_msearch") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_fleet") + path.WriteString("/") + path.WriteString("_fleet_msearch") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+x-ndjson;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Msearch) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "fleet.msearch") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "fleet.msearch") + if reader := instrument.RecordRequestBody(ctx, "fleet.msearch", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "fleet.msearch") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Msearch query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a msearch.Response +func (r Msearch) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "fleet.msearch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + r.TypedKeys(true) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Msearch headers map. +func (r *Msearch) Header(key, value string) *Msearch { + r.headers.Set(key, value) + + return r +} + +// Index A single target to search. If the target is an index alias, it must resolve +// to a single index. +// API Name: index +func (r *Msearch) Index(index string) *Msearch { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If false, the request returns an error if any wildcard expression, index +// alias, or _all value targets only missing or closed indices. This behavior +// applies even if the request targets other open indices. For example, a +// request targeting foo*,bar* returns an error if an index starts with foo but +// no index starts with bar. +// API name: allow_no_indices +func (r *Msearch) AllowNoIndices(allownoindices bool) *Msearch { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// CcsMinimizeRoundtrips If true, network roundtrips between the coordinating node and remote clusters +// are minimized for cross-cluster search requests. +// API name: ccs_minimize_roundtrips +func (r *Msearch) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Msearch { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) + + return r +} + +// ExpandWildcards Type of index that wildcard expressions can match. If the request can target +// data streams, this argument determines whether wildcard expressions match +// hidden data streams. +// API name: expand_wildcards +func (r *Msearch) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Msearch { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreThrottled If true, concrete, expanded or aliased indices are ignored when frozen. +// API name: ignore_throttled +func (r *Msearch) IgnoreThrottled(ignorethrottled bool) *Msearch { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// IgnoreUnavailable If true, missing or closed indices are not included in the response. +// API name: ignore_unavailable +func (r *Msearch) IgnoreUnavailable(ignoreunavailable bool) *Msearch { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// MaxConcurrentSearches Maximum number of concurrent searches the multi search API can execute. +// API name: max_concurrent_searches +func (r *Msearch) MaxConcurrentSearches(maxconcurrentsearches int) *Msearch { + r.values.Set("max_concurrent_searches", strconv.Itoa(maxconcurrentsearches)) + + return r +} + +// MaxConcurrentShardRequests Maximum number of concurrent shard requests that each sub-search request +// executes per node. +// API name: max_concurrent_shard_requests +func (r *Msearch) MaxConcurrentShardRequests(maxconcurrentshardrequests int) *Msearch { + r.values.Set("max_concurrent_shard_requests", strconv.Itoa(maxconcurrentshardrequests)) + + return r +} + +// PreFilterShardSize Defines a threshold that enforces a pre-filter roundtrip to prefilter search +// shards based on query rewriting if the number of shards the search request +// expands to exceeds the threshold. This filter roundtrip can limit the number +// of shards significantly if for instance a shard can not match any documents +// based on its rewrite method i.e., if date filters are mandatory to match but +// the shard bounds and the query are disjoint. +// API name: pre_filter_shard_size +func (r *Msearch) PreFilterShardSize(prefiltershardsize string) *Msearch { + r.values.Set("pre_filter_shard_size", prefiltershardsize) + + return r +} + +// SearchType Indicates whether global term and document frequencies should be used when +// scoring returned documents. +// API name: search_type +func (r *Msearch) SearchType(searchtype searchtype.SearchType) *Msearch { + r.values.Set("search_type", searchtype.String()) + + return r +} + +// RestTotalHitsAsInt If true, hits.total are returned as an integer in the response. Defaults to +// false, which returns an object. +// API name: rest_total_hits_as_int +func (r *Msearch) RestTotalHitsAsInt(resttotalhitsasint bool) *Msearch { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) + + return r +} + +// TypedKeys Specifies whether aggregation and suggester names should be prefixed by their +// respective types in the response. +// API name: typed_keys +func (r *Msearch) TypedKeys(typedkeys bool) *Msearch { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} + +// WaitForCheckpoints A comma separated list of checkpoints. When configured, the search API will +// only be executed on a shard +// after the relevant checkpoint has become visible for search. Defaults to an +// empty list which will cause +// Elasticsearch to immediately execute the search. +// API name: wait_for_checkpoints +func (r *Msearch) WaitForCheckpoints(waitforcheckpoints ...int64) *Msearch { + tmp := []string{} + for _, item := range waitforcheckpoints { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("wait_for_checkpoints", strings.Join(tmp, ",")) + + return r +} + +// AllowPartialSearchResults If true, returns partial results if there are shard request timeouts or shard +// failures. +// If false, returns an error with no partial results. +// Defaults to the configured cluster setting +// `search.default_allow_partial_results`, which is true by default. +// API name: allow_partial_search_results +func (r *Msearch) AllowPartialSearchResults(allowpartialsearchresults bool) *Msearch { + r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Msearch) ErrorTrace(errortrace bool) *Msearch { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Msearch) FilterPath(filterpaths ...string) *Msearch { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Msearch) Human(human bool) *Msearch { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Msearch) Pretty(pretty bool) *Msearch { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/msearch/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/msearch/request.go new file mode 100644 index 000000000..a300f2552 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/msearch/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package msearch + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package msearch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/fleet/msearch/MultiSearchRequest.ts#L31-L126 +type Request = []types.MsearchRequestItem diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/msearch/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/msearch/response.go new file mode 100644 index 000000000..7db987d2e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/msearch/response.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package msearch + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package msearch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/fleet/msearch/MultiSearchResponse.ts#L25-L29 +type Response struct { + Docs []types.MsearchResponseItem `json:"docs"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "docs": + messageArray := []json.RawMessage{} + if err := dec.Decode(&messageArray); err != nil { + return fmt.Errorf("%s | %w", "Docs", err) + } + docs_field: + for _, message := range messageArray { + keyDec := json.NewDecoder(bytes.NewReader(message)) + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Docs", err) + } + + switch t { + + case "aggregations", "_clusters", "fields", "hits", "max_score", "num_reduce_phases", "pit_id", "profile", "_scroll_id", "_shards", "suggest", "terminated_early", "timed_out", "took": + o := types.NewMultiSearchItem() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Docs", err) + } + s.Docs = append(s.Docs, o) + continue docs_field + + case "error": + o := types.NewErrorResponseBase() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Docs", err) + } + s.Docs = append(s.Docs, o) + continue docs_field + + } + } + + var o any + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Docs", err) + } + s.Docs = append(s.Docs, o) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/postsecret/post_secret.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/postsecret/post_secret.go new file mode 100644 index 000000000..d935d4db9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/postsecret/post_secret.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Creates a secret stored by Fleet. +package postsecret + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PostSecret struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPostSecret type alias for index. +type NewPostSecret func() *PostSecret + +// NewPostSecretFunc returns a new instance of PostSecret with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPostSecretFunc(tp elastictransport.Interface) NewPostSecret { + return func() *PostSecret { + n := New(tp) + + return n + } +} + +// Creates a secret stored by Fleet. +func New(tp elastictransport.Interface) *PostSecret { + r := &PostSecret{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PostSecret) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_fleet") + path.WriteString("/") + path.WriteString("secret") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PostSecret) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "fleet.post_secret") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "fleet.post_secret") + if reader := instrument.RecordRequestBody(ctx, "fleet.post_secret", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "fleet.post_secret") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PostSecret query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a postsecret.Response +func (r PostSecret) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r PostSecret) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "fleet.post_secret") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the PostSecret query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the PostSecret headers map. +func (r *PostSecret) Header(key, value string) *PostSecret { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/search/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/search/request.go new file mode 100644 index 000000000..2ea620305 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/search/request.go @@ -0,0 +1,488 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package search + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package search +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/fleet/search/SearchRequest.ts#L54-L266 +type Request struct { + Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` + Collapse *types.FieldCollapse `json:"collapse,omitempty"` + // DocvalueFields Array of wildcard (*) patterns. The request returns doc values for field + // names matching these patterns in the hits.fields property of the response. + DocvalueFields []types.FieldAndFormat `json:"docvalue_fields,omitempty"` + // Explain If true, returns detailed information about score computation as part of a + // hit. + Explain *bool `json:"explain,omitempty"` + // Ext Configuration of search extensions defined by Elasticsearch plugins. + Ext map[string]json.RawMessage `json:"ext,omitempty"` + // Fields Array of wildcard (*) patterns. The request returns values for field names + // matching these patterns in the hits.fields property of the response. + Fields []types.FieldAndFormat `json:"fields,omitempty"` + // From Starting document offset. By default, you cannot page through more than + // 10,000 + // hits using the from and size parameters. To page through more hits, use the + // search_after parameter. + From *int `json:"from,omitempty"` + Highlight *types.Highlight `json:"highlight,omitempty"` + // IndicesBoost Boosts the _score of documents from specified indices. + IndicesBoost []map[string]types.Float64 `json:"indices_boost,omitempty"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are + // not included in search results and results collected by aggregations. + MinScore *types.Float64 `json:"min_score,omitempty"` + // Pit Limits the search to a point in time (PIT). If you provide a PIT, you + // cannot specify an in the request path. + Pit *types.PointInTimeReference `json:"pit,omitempty"` + PostFilter *types.Query `json:"post_filter,omitempty"` + Profile *bool `json:"profile,omitempty"` + // Query Defines the search definition using the Query DSL. + Query *types.Query `json:"query,omitempty"` + Rescore []types.Rescore `json:"rescore,omitempty"` + // RuntimeMappings Defines one or more runtime fields in the search request. These fields take + // precedence over mapped fields with the same name. + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` + // ScriptFields Retrieve a script evaluation (based on different fields) for each hit. + ScriptFields map[string]types.ScriptField `json:"script_fields,omitempty"` + SearchAfter []types.FieldValue `json:"search_after,omitempty"` + // SeqNoPrimaryTerm If true, returns sequence number and primary term of the last modification + // of each hit. See Optimistic concurrency control. + SeqNoPrimaryTerm *bool `json:"seq_no_primary_term,omitempty"` + // Size The number of hits to return. By default, you cannot page through more + // than 10,000 hits using the from and size parameters. To page through more + // hits, use the search_after parameter. + Size *int `json:"size,omitempty"` + Slice *types.SlicedScroll `json:"slice,omitempty"` + Sort []types.SortCombinations `json:"sort,omitempty"` + // Source_ Indicates which source fields are returned for matching documents. These + // fields are returned in the hits._source property of the search response. + Source_ types.SourceConfig `json:"_source,omitempty"` + // Stats Stats groups to associate with the search. Each group maintains a statistics + // aggregation for its associated searches. You can retrieve these stats using + // the indices stats API. + Stats []string `json:"stats,omitempty"` + // StoredFields List of stored fields to return as part of a hit. If no fields are specified, + // no stored fields are included in the response. If this field is specified, + // the _source + // parameter defaults to false. You can pass _source: true to return both source + // fields + // and stored fields in the search response. + StoredFields []string `json:"stored_fields,omitempty"` + Suggest *types.Suggester `json:"suggest,omitempty"` + // TerminateAfter Maximum number of documents to collect for each shard. If a query reaches + // this + // limit, Elasticsearch terminates the query early. Elasticsearch collects + // documents + // before sorting. Defaults to 0, which does not terminate query execution + // early. + TerminateAfter *int64 `json:"terminate_after,omitempty"` + // Timeout Specifies the period of time to wait for a response from each shard. If no + // response + // is received before the timeout expires, the request fails and returns an + // error. + // Defaults to no timeout. + Timeout *string `json:"timeout,omitempty"` + // TrackScores If true, calculate and return document scores, even if the scores are not + // used for sorting. + TrackScores *bool `json:"track_scores,omitempty"` + // TrackTotalHits Number of hits matching the query to count accurately. If true, the exact + // number of hits is returned at the cost of some performance. If false, the + // response does not include the total number of hits matching the query. + // Defaults to 10,000 hits. + TrackTotalHits types.TrackHits `json:"track_total_hits,omitempty"` + // Version If true, returns document version as part of a hit. + Version *bool `json:"version,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Aggregations: make(map[string]types.Aggregations, 0), + Ext: make(map[string]json.RawMessage, 0), + ScriptFields: make(map[string]types.ScriptField, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Search request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "collapse": + if err := dec.Decode(&s.Collapse); err != nil { + return fmt.Errorf("%s | %w", "Collapse", err) + } + + case "docvalue_fields": + if err := dec.Decode(&s.DocvalueFields); err != nil { + return fmt.Errorf("%s | %w", "DocvalueFields", err) + } + + case "explain": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Explain", err) + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "ext": + if s.Ext == nil { + s.Ext = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Ext); err != nil { + return fmt.Errorf("%s | %w", "Ext", err) + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "from": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "highlight": + if err := dec.Decode(&s.Highlight); err != nil { + return fmt.Errorf("%s | %w", "Highlight", err) + } + + case "indices_boost": + if err := dec.Decode(&s.IndicesBoost); err != nil { + return fmt.Errorf("%s | %w", "IndicesBoost", err) + } + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := types.Float64(value) + s.MinScore = &f + case float64: + f := types.Float64(v) + s.MinScore = &f + } + + case "pit": + if err := dec.Decode(&s.Pit); err != nil { + return fmt.Errorf("%s | %w", "Pit", err) + } + + case "post_filter": + if err := dec.Decode(&s.PostFilter); err != nil { + return fmt.Errorf("%s | %w", "PostFilter", err) + } + + case "profile": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + s.Profile = &value + case bool: + s.Profile = &v + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "rescore": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := types.NewRescore() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Rescore", err) + } + + s.Rescore = append(s.Rescore, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Rescore); err != nil { + return fmt.Errorf("%s | %w", "Rescore", err) + } + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]types.ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return fmt.Errorf("%s | %w", "ScriptFields", err) + } + + case "search_after": + if err := dec.Decode(&s.SearchAfter); err != nil { + return fmt.Errorf("%s | %w", "SearchAfter", err) + } + + case "seq_no_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SeqNoPrimaryTerm", err) + } + s.SeqNoPrimaryTerm = &value + case bool: + s.SeqNoPrimaryTerm = &v + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "slice": + if err := dec.Decode(&s.Slice); err != nil { + return fmt.Errorf("%s | %w", "Slice", err) + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(types.SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + case "_source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source__field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source_", err) + } + + switch t { + + case "exclude_vectors", "excludes", "includes": + o := types.NewSourceFilter() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + s.Source_ = o + break source__field + + } + } + if s.Source_ == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + } + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return fmt.Errorf("%s | %w", "Stats", err) + } + + case "stored_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + + s.StoredFields = append(s.StoredFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.StoredFields); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + } + + case "suggest": + if err := dec.Decode(&s.Suggest); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + + case "terminate_after": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TerminateAfter", err) + } + s.TerminateAfter = &value + case float64: + f := int64(v) + s.TerminateAfter = &f + } + + case "timeout": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Timeout = &o + + case "track_scores": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TrackScores", err) + } + s.TrackScores = &value + case bool: + s.TrackScores = &v + } + + case "track_total_hits": + if err := dec.Decode(&s.TrackTotalHits); err != nil { + return fmt.Errorf("%s | %w", "TrackTotalHits", err) + } + + case "version": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + s.Version = &value + case bool: + s.Version = &v + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/search/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/search/response.go new file mode 100644 index 000000000..c356b7635 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/search/response.go @@ -0,0 +1,789 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package search + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package search +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/fleet/search/SearchResponse.ts#L33-L50 +type Response struct { + Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` + Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + Hits types.HitsMetadata `json:"hits"` + MaxScore *types.Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *types.Profile `json:"profile,omitempty"` + ScrollId_ *string `json:"_scroll_id,omitempty"` + Shards_ types.ShardStatistics `json:"_shards"` + Suggest map[string][]types.Suggest `json:"suggest,omitempty"` + TerminatedEarly *bool `json:"terminated_early,omitempty"` + TimedOut bool `json:"timed_out"` + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Aggregations: make(map[string]types.Aggregate, 0), + Fields: make(map[string]json.RawMessage, 0), + Suggest: make(map[string][]types.Suggest, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := types.NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := types.NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := types.NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := types.NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := types.NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := types.NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := types.NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := types.NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := types.NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := types.NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := types.NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := types.NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := types.NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := types.NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := types.NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := types.NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := types.NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := types.NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := types.NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := types.NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := types.NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := types.NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := types.NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := types.NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := types.NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := types.NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := types.NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := types.NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := types.NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := types.NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := types.NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := types.NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := types.NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := types.NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := types.NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := types.NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := types.NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := types.NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := types.NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := types.NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := types.NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := types.NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := types.NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := types.NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := types.NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := types.NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := types.NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := types.NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := types.NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := types.NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := types.NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := types.NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := types.NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := types.NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := types.NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := types.NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := types.NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := types.NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := types.NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := types.NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := types.NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := types.NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := types.NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := types.NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := types.NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := types.NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := types.NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := types.NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := types.NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := types.NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + } + + case "_clusters": + if err := dec.Decode(&s.Clusters_); err != nil { + return fmt.Errorf("%s | %w", "Clusters_", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return fmt.Errorf("%s | %w", "Hits", err) + } + + case "max_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxScore", err) + } + f := types.Float64(value) + s.MaxScore = &f + case float64: + f := types.Float64(v) + s.MaxScore = &f + } + + case "num_reduce_phases": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumReducePhases", err) + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f + } + + case "pit_id": + if err := dec.Decode(&s.PitId); err != nil { + return fmt.Errorf("%s | %w", "PitId", err) + } + + case "profile": + if err := dec.Decode(&s.Profile); err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + + case "_scroll_id": + if err := dec.Decode(&s.ScrollId_); err != nil { + return fmt.Errorf("%s | %w", "ScrollId_", err) + } + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return fmt.Errorf("%s | %w", "Shards_", err) + } + + case "suggest": + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + switch elems[0] { + + case "completion": + o := types.NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := types.NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := types.NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } + } + + case "terminated_early": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TerminatedEarly", err) + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v + } + + case "timed_out": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimedOut", err) + } + s.TimedOut = value + case bool: + s.TimedOut = v + } + + case "took": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + s.Took = value + case float64: + f := int64(v) + s.Took = f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/search/search.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/search/search.go new file mode 100644 index 000000000..3909386e4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/fleet/search/search.go @@ -0,0 +1,1085 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run a Fleet search. +// The purpose of the Fleet search API is to provide an API where the search +// will be run only +// after the provided checkpoint has been processed and is visible for searches +// inside of Elasticsearch. +package search + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/suggestmode" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Search struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSearch type alias for index. +type NewSearch func(index string) *Search + +// NewSearchFunc returns a new instance of Search with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSearchFunc(tp elastictransport.Interface) NewSearch { + return func(index string) *Search { + n := New(tp) + + n._index(index) + + return n + } +} + +// Run a Fleet search. +// The purpose of the Fleet search API is to provide an API where the search +// will be run only +// after the provided checkpoint has been processed and is visible for searches +// inside of Elasticsearch. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search +func New(tp elastictransport.Interface) *Search { + r := &Search{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Search) Raw(raw io.Reader) *Search { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Search) Request(req *Request) *Search { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Search) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Search: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_fleet") + path.WriteString("/") + path.WriteString("_fleet_search") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Search) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "fleet.search") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "fleet.search") + if reader := instrument.RecordRequestBody(ctx, "fleet.search", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "fleet.search") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Search query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a search.Response +func (r Search) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "fleet.search") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + r.TypedKeys(true) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Search headers map. +func (r *Search) Header(key, value string) *Search { + r.headers.Set(key, value) + + return r +} + +// Index A single target to search. If the target is an index alias, it must resolve +// to a single index. +// API Name: index +func (r *Search) _index(index string) *Search { + r.paramSet |= indexMask + r.index = index + + return r +} + +// API name: allow_no_indices +func (r *Search) AllowNoIndices(allownoindices bool) *Search { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// API name: analyzer +func (r *Search) Analyzer(analyzer string) *Search { + r.values.Set("analyzer", analyzer) + + return r +} + +// API name: analyze_wildcard +func (r *Search) AnalyzeWildcard(analyzewildcard bool) *Search { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) + + return r +} + +// API name: batched_reduce_size +func (r *Search) BatchedReduceSize(batchedreducesize string) *Search { + r.values.Set("batched_reduce_size", batchedreducesize) + + return r +} + +// API name: ccs_minimize_roundtrips +func (r *Search) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Search { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) + + return r +} + +// API name: default_operator +func (r *Search) DefaultOperator(defaultoperator operator.Operator) *Search { + r.values.Set("default_operator", defaultoperator.String()) + + return r +} + +// API name: df +func (r *Search) Df(df string) *Search { + r.values.Set("df", df) + + return r +} + +// API name: expand_wildcards +func (r *Search) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Search { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// API name: ignore_throttled +func (r *Search) IgnoreThrottled(ignorethrottled bool) *Search { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// API name: ignore_unavailable +func (r *Search) IgnoreUnavailable(ignoreunavailable bool) *Search { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// API name: lenient +func (r *Search) Lenient(lenient bool) *Search { + r.values.Set("lenient", strconv.FormatBool(lenient)) + + return r +} + +// API name: max_concurrent_shard_requests +func (r *Search) MaxConcurrentShardRequests(maxconcurrentshardrequests int) *Search { + r.values.Set("max_concurrent_shard_requests", strconv.Itoa(maxconcurrentshardrequests)) + + return r +} + +// API name: preference +func (r *Search) Preference(preference string) *Search { + r.values.Set("preference", preference) + + return r +} + +// API name: pre_filter_shard_size +func (r *Search) PreFilterShardSize(prefiltershardsize string) *Search { + r.values.Set("pre_filter_shard_size", prefiltershardsize) + + return r +} + +// API name: request_cache +func (r *Search) RequestCache(requestcache bool) *Search { + r.values.Set("request_cache", strconv.FormatBool(requestcache)) + + return r +} + +// API name: routing +func (r *Search) Routing(routing string) *Search { + r.values.Set("routing", routing) + + return r +} + +// API name: scroll +func (r *Search) Scroll(duration string) *Search { + r.values.Set("scroll", duration) + + return r +} + +// API name: search_type +func (r *Search) SearchType(searchtype searchtype.SearchType) *Search { + r.values.Set("search_type", searchtype.String()) + + return r +} + +// SuggestField Specifies which field to use for suggestions. +// API name: suggest_field +func (r *Search) SuggestField(field string) *Search { + r.values.Set("suggest_field", field) + + return r +} + +// API name: suggest_mode +func (r *Search) SuggestMode(suggestmode suggestmode.SuggestMode) *Search { + r.values.Set("suggest_mode", suggestmode.String()) + + return r +} + +// API name: suggest_size +func (r *Search) SuggestSize(suggestsize string) *Search { + r.values.Set("suggest_size", suggestsize) + + return r +} + +// SuggestText The source text for which the suggestions should be returned. +// API name: suggest_text +func (r *Search) SuggestText(suggesttext string) *Search { + r.values.Set("suggest_text", suggesttext) + + return r +} + +// API name: typed_keys +func (r *Search) TypedKeys(typedkeys bool) *Search { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} + +// API name: rest_total_hits_as_int +func (r *Search) RestTotalHitsAsInt(resttotalhitsasint bool) *Search { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) + + return r +} + +// API name: _source_excludes +func (r *Search) SourceExcludes_(fields ...string) *Search { + r.values.Set("_source_excludes", strings.Join(fields, ",")) + + return r +} + +// API name: _source_includes +func (r *Search) SourceIncludes_(fields ...string) *Search { + r.values.Set("_source_includes", strings.Join(fields, ",")) + + return r +} + +// API name: q +func (r *Search) Q(q string) *Search { + r.values.Set("q", q) + + return r +} + +// WaitForCheckpoints A comma separated list of checkpoints. When configured, the search API will +// only be executed on a shard +// after the relevant checkpoint has become visible for search. Defaults to an +// empty list which will cause +// Elasticsearch to immediately execute the search. +// API name: wait_for_checkpoints +func (r *Search) WaitForCheckpoints(waitforcheckpoints ...int64) *Search { + tmp := []string{} + for _, item := range waitforcheckpoints { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("wait_for_checkpoints", strings.Join(tmp, ",")) + + return r +} + +// AllowPartialSearchResults If true, returns partial results if there are shard request timeouts or shard +// failures. +// If false, returns an error with no partial results. +// Defaults to the configured cluster setting +// `search.default_allow_partial_results`, which is true by default. +// API name: allow_partial_search_results +func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Search { + r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Search) ErrorTrace(errortrace bool) *Search { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Search) FilterPath(filterpaths ...string) *Search { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Search) Human(human bool) *Search { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Search) Pretty(pretty bool) *Search { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: aggregations +func (r *Search) Aggregations(aggregations map[string]types.Aggregations) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Aggregations = aggregations + return r +} + +func (r *Search) AddAggregation(key string, value types.AggregationsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp + return r +} + +// API name: collapse +func (r *Search) Collapse(collapse types.FieldCollapseVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Collapse = collapse.FieldCollapseCaster() + + return r +} + +// Array of wildcard (*) patterns. The request returns doc values for field +// names matching these patterns in the hits.fields property of the response. +// API name: docvalue_fields +func (r *Search) DocvalueFields(docvaluefields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docvaluefields { + + r.req.DocvalueFields = append(r.req.DocvalueFields, *v.FieldAndFormatCaster()) + + } + return r +} + +// If true, returns detailed information about score computation as part of a +// hit. +// API name: explain +func (r *Search) Explain(explain bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Explain = &explain + + return r +} + +// Configuration of search extensions defined by Elasticsearch plugins. +// API name: ext +func (r *Search) Ext(ext map[string]json.RawMessage) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Ext = ext + return r +} + +func (r *Search) AddExt(key string, value json.RawMessage) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Ext == nil { + r.req.Ext = make(map[string]json.RawMessage) + } else { + tmp = r.req.Ext + } + + tmp[key] = value + + r.req.Ext = tmp + return r +} + +// Array of wildcard (*) patterns. The request returns values for field names +// matching these patterns in the hits.fields property of the response. +// API name: fields +func (r *Search) Fields(fields ...types.FieldAndFormatVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range fields { + + r.req.Fields = append(r.req.Fields, *v.FieldAndFormatCaster()) + + } + return r +} + +// Starting document offset. By default, you cannot page through more than +// 10,000 +// hits using the from and size parameters. To page through more hits, use the +// search_after parameter. +// API name: from +func (r *Search) From(from int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.From = &from + + return r +} + +// API name: highlight +func (r *Search) Highlight(highlight types.HighlightVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Highlight = highlight.HighlightCaster() + + return r +} + +// Boosts the _score of documents from specified indices. +// API name: indices_boost +func (r *Search) IndicesBoost(indicesboost []map[string]types.Float64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndicesBoost = indicesboost + + return r +} + +// Minimum _score for matching documents. Documents with a lower _score are +// not included in search results and results collected by aggregations. +// API name: min_score +func (r *Search) MinScore(minscore types.Float64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MinScore = &minscore + + return r +} + +// Limits the search to a point in time (PIT). If you provide a PIT, you +// cannot specify an in the request path. +// API name: pit +func (r *Search) Pit(pit types.PointInTimeReferenceVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Pit = pit.PointInTimeReferenceCaster() + + return r +} + +// API name: post_filter +func (r *Search) PostFilter(postfilter types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PostFilter = postfilter.QueryCaster() + + return r +} + +// API name: profile +func (r *Search) Profile(profile bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Profile = &profile + + return r +} + +// Defines the search definition using the Query DSL. +// API name: query +func (r *Search) Query(query types.QueryVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} + +// API name: rescore +func (r *Search) Rescore(rescores ...types.RescoreVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Rescore = make([]types.Rescore, len(rescores)) + for i, v := range rescores { + r.req.Rescore[i] = *v.RescoreCaster() + } + + return r +} + +// Defines one or more runtime fields in the search request. These fields take +// precedence over mapped fields with the same name. +// API name: runtime_mappings +func (r *Search) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() + + return r +} + +// Retrieve a script evaluation (based on different fields) for each hit. +// API name: script_fields +func (r *Search) ScriptFields(scriptfields map[string]types.ScriptField) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScriptFields = scriptfields + return r +} + +func (r *Search) AddScriptField(key string, value types.ScriptFieldVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + + r.req.ScriptFields = tmp + return r +} + +// API name: search_after +func (r *Search) SearchAfter(sortresults ...types.FieldValueVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } + + return r +} + +// If true, returns sequence number and primary term of the last modification +// of each hit. See Optimistic concurrency control. +// API name: seq_no_primary_term +func (r *Search) SeqNoPrimaryTerm(seqnoprimaryterm bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SeqNoPrimaryTerm = &seqnoprimaryterm + + return r +} + +// The number of hits to return. By default, you cannot page through more +// than 10,000 hits using the from and size parameters. To page through more +// hits, use the search_after parameter. +// API name: size +func (r *Search) Size(size int) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Size = &size + + return r +} + +// API name: slice +func (r *Search) Slice(slice types.SlicedScrollVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Slice = slice.SlicedScrollCaster() + + return r +} + +// API name: sort +func (r *Search) Sort(sorts ...types.SortCombinationsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } + + return r +} + +// Indicates which source fields are returned for matching documents. These +// fields are returned in the hits._source property of the search response. +// API name: _source +func (r *Search) Source_(sourceconfig types.SourceConfigVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = *sourceconfig.SourceConfigCaster() + + return r +} + +// Stats groups to associate with the search. Each group maintains a statistics +// aggregation for its associated searches. You can retrieve these stats using +// the indices stats API. +// API name: stats +func (r *Search) Stats(stats ...string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range stats { + + r.req.Stats = append(r.req.Stats, v) + + } + return r +} + +// List of stored fields to return as part of a hit. If no fields are specified, +// no stored fields are included in the response. If this field is specified, +// the _source +// parameter defaults to false. You can pass _source: true to return both source +// fields +// and stored fields in the search response. +// API name: stored_fields +func (r *Search) StoredFields(fields ...string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.StoredFields = fields + + return r +} + +// API name: suggest +func (r *Search) Suggest(suggest types.SuggesterVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Suggest = suggest.SuggesterCaster() + + return r +} + +// Maximum number of documents to collect for each shard. If a query reaches +// this +// limit, Elasticsearch terminates the query early. Elasticsearch collects +// documents +// before sorting. Defaults to 0, which does not terminate query execution +// early. +// API name: terminate_after +func (r *Search) TerminateAfter(terminateafter int64) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TerminateAfter = &terminateafter + + return r +} + +// Specifies the period of time to wait for a response from each shard. If no +// response +// is received before the timeout expires, the request fails and returns an +// error. +// Defaults to no timeout. +// API name: timeout +func (r *Search) Timeout(timeout string) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = &timeout + + return r +} + +// If true, calculate and return document scores, even if the scores are not +// used for sorting. +// API name: track_scores +func (r *Search) TrackScores(trackscores bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackScores = &trackscores + + return r +} + +// Number of hits matching the query to count accurately. If true, the exact +// number of hits is returned at the cost of some performance. If false, the +// response does not include the total number of hits matching the query. +// Defaults to 10,000 hits. +// API name: track_total_hits +func (r *Search) TrackTotalHits(trackhits types.TrackHitsVariant) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TrackTotalHits = *trackhits.TrackHitsCaster() + + return r +} + +// If true, returns document version as part of a hit. +// API name: version +func (r *Search) Version(version bool) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Version = &version + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/graph/explore/explore.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/graph/explore/explore.go new file mode 100644 index 000000000..bfe5d2d06 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/graph/explore/explore.go @@ -0,0 +1,454 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Explore graph analytics. +// Extract and summarize information about the documents and terms in an +// Elasticsearch data stream or index. +// The easiest way to understand the behavior of this API is to use the Graph UI +// to explore connections. +// An initial request to the `_explore` API contains a seed query that +// identifies the documents of interest and specifies the fields that define the +// vertices and connections you want to include in the graph. +// Subsequent requests enable you to spider out from one more vertices of +// interest. +// You can exclude vertices that have already been returned. +package explore + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Explore struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExplore type alias for index. +type NewExplore func(index string) *Explore + +// NewExploreFunc returns a new instance of Explore with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExploreFunc(tp elastictransport.Interface) NewExplore { + return func(index string) *Explore { + n := New(tp) + + n._index(index) + + return n + } +} + +// Explore graph analytics. +// Extract and summarize information about the documents and terms in an +// Elasticsearch data stream or index. +// The easiest way to understand the behavior of this API is to use the Graph UI +// to explore connections. +// An initial request to the `_explore` API contains a seed query that +// identifies the documents of interest and specifies the fields that define the +// vertices and connections you want to include in the graph. +// Subsequent requests enable you to spider out from one more vertices of +// interest. +// You can exclude vertices that have already been returned. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph +func New(tp elastictransport.Interface) *Explore { + r := &Explore{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Explore) Raw(raw io.Reader) *Explore { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Explore) Request(req *Request) *Explore { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Explore) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Explore: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_graph") + path.WriteString("/") + path.WriteString("explore") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Explore) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "graph.explore") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "graph.explore") + if reader := instrument.RecordRequestBody(ctx, "graph.explore", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "graph.explore") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Explore query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a explore.Response +func (r Explore) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "graph.explore") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Explore headers map. +func (r *Explore) Header(key, value string) *Explore { + r.headers.Set(key, value) + + return r +} + +// Index Name of the index. +// API Name: index +func (r *Explore) _index(index string) *Explore { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Routing Custom value used to route operations to a specific shard. +// API name: routing +func (r *Explore) Routing(routing string) *Explore { + r.values.Set("routing", routing) + + return r +} + +// Timeout Specifies the period of time to wait for a response from each shard. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// Defaults to no timeout. +// API name: timeout +func (r *Explore) Timeout(duration string) *Explore { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Explore) ErrorTrace(errortrace bool) *Explore { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Explore) FilterPath(filterpaths ...string) *Explore { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Explore) Human(human bool) *Explore { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Explore) Pretty(pretty bool) *Explore { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Specifies or more fields from which you want to extract terms that are +// associated with the specified vertices. +// API name: connections +func (r *Explore) Connections(connections types.HopVariant) *Explore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Connections = connections.HopCaster() + + return r +} + +// Direct the Graph API how to build the graph. +// API name: controls +func (r *Explore) Controls(controls types.ExploreControlsVariant) *Explore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Controls = controls.ExploreControlsCaster() + + return r +} + +// A seed query that identifies the documents of interest. Can be any valid +// Elasticsearch query. +// API name: query +func (r *Explore) Query(query types.QueryVariant) *Explore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} + +// Specifies one or more fields that contain the terms you want to include in +// the graph as vertices. +// API name: vertices +func (r *Explore) Vertices(vertices ...types.VertexDefinitionVariant) *Explore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range vertices { + + r.req.Vertices = append(r.req.Vertices, *v.VertexDefinitionCaster()) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/graph/explore/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/graph/explore/request.go new file mode 100644 index 000000000..7956e58c3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/graph/explore/request.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package explore + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package explore +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/graph/explore/GraphExploreRequest.ts#L28-L84 +type Request struct { + + // Connections Specifies or more fields from which you want to extract terms that are + // associated with the specified vertices. + Connections *types.Hop `json:"connections,omitempty"` + // Controls Direct the Graph API how to build the graph. + Controls *types.ExploreControls `json:"controls,omitempty"` + // Query A seed query that identifies the documents of interest. Can be any valid + // Elasticsearch query. + Query *types.Query `json:"query,omitempty"` + // Vertices Specifies one or more fields that contain the terms you want to include in + // the graph as vertices. + Vertices []types.VertexDefinition `json:"vertices,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Explore request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/graph/explore/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/graph/explore/response.go new file mode 100644 index 000000000..74312c452 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/graph/explore/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package explore + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package explore +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/graph/explore/GraphExploreResponse.ts#L25-L33 +type Response struct { + Connections []types.Connection `json:"connections"` + Failures []types.ShardFailure `json:"failures"` + TimedOut bool `json:"timed_out"` + Took int64 `json:"took"` + Vertices []types.Vertex `json:"vertices"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/deletelifecycle/delete_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/deletelifecycle/delete_lifecycle.go new file mode 100644 index 000000000..18c605951 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/deletelifecycle/delete_lifecycle.go @@ -0,0 +1,366 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a lifecycle policy. +// You cannot delete policies that are currently in use. If the policy is being +// used to manage any indices, the request fails and returns an error. +package deletelifecycle + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + policyMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + policy string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteLifecycle type alias for index. +type NewDeleteLifecycle func(policy string) *DeleteLifecycle + +// NewDeleteLifecycleFunc returns a new instance of DeleteLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteLifecycleFunc(tp elastictransport.Interface) NewDeleteLifecycle { + return func(policy string) *DeleteLifecycle { + n := New(tp) + + n._policy(policy) + + return n + } +} + +// Delete a lifecycle policy. +// You cannot delete policies that are currently in use. If the policy is being +// used to manage any indices, the request fails and returns an error. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle +func New(tp elastictransport.Interface) *DeleteLifecycle { + r := &DeleteLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == policyMask: + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "policy", r.policy) + } + path.WriteString(r.policy) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteLifecycle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ilm.delete_lifecycle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.delete_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "ilm.delete_lifecycle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.delete_lifecycle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteLifecycle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletelifecycle.Response +func (r DeleteLifecycle) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.delete_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteLifecycle) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.delete_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteLifecycle query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteLifecycle headers map. +func (r *DeleteLifecycle) Header(key, value string) *DeleteLifecycle { + r.headers.Set(key, value) + + return r +} + +// Policy Identifier for the policy. +// API Name: policy +func (r *DeleteLifecycle) _policy(policy string) *DeleteLifecycle { + r.paramSet |= policyMask + r.policy = policy + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *DeleteLifecycle) MasterTimeout(duration string) *DeleteLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *DeleteLifecycle) Timeout(duration string) *DeleteLifecycle { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteLifecycle) ErrorTrace(errortrace bool) *DeleteLifecycle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteLifecycle) FilterPath(filterpaths ...string) *DeleteLifecycle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteLifecycle) Human(human bool) *DeleteLifecycle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteLifecycle) Pretty(pretty bool) *DeleteLifecycle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/deletelifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/deletelifecycle/response.go new file mode 100644 index 000000000..fa214f79a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/deletelifecycle/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletelifecycle + +// Response holds the response body struct for the package deletelifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/delete_lifecycle/DeleteLifecycleResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/explainlifecycle/explain_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/explainlifecycle/explain_lifecycle.go new file mode 100644 index 000000000..e5bd9ff71 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/explainlifecycle/explain_lifecycle.go @@ -0,0 +1,385 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Explain the lifecycle state. +// Get the current lifecycle status for one or more indices. +// For data streams, the API retrieves the current lifecycle status for the +// stream's backing indices. +// +// The response indicates when the index entered each lifecycle state, provides +// the definition of the running phase, and information about any failures. +package explainlifecycle + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ExplainLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExplainLifecycle type alias for index. +type NewExplainLifecycle func(index string) *ExplainLifecycle + +// NewExplainLifecycleFunc returns a new instance of ExplainLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExplainLifecycleFunc(tp elastictransport.Interface) NewExplainLifecycle { + return func(index string) *ExplainLifecycle { + n := New(tp) + + n._index(index) + + return n + } +} + +// Explain the lifecycle state. +// Get the current lifecycle status for one or more indices. +// For data streams, the API retrieves the current lifecycle status for the +// stream's backing indices. +// +// The response indicates when the index entered each lifecycle state, provides +// the definition of the running phase, and information about any failures. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle +func New(tp elastictransport.Interface) *ExplainLifecycle { + r := &ExplainLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ExplainLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("explain") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ExplainLifecycle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ilm.explain_lifecycle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.explain_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "ilm.explain_lifecycle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.explain_lifecycle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ExplainLifecycle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a explainlifecycle.Response +func (r ExplainLifecycle) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.explain_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ExplainLifecycle) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.explain_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ExplainLifecycle query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ExplainLifecycle headers map. +func (r *ExplainLifecycle) Header(key, value string) *ExplainLifecycle { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases to target. +// Supports wildcards (`*`). +// To target all data streams and indices, use `*` or `_all`. +// API Name: index +func (r *ExplainLifecycle) _index(index string) *ExplainLifecycle { + r.paramSet |= indexMask + r.index = index + + return r +} + +// OnlyErrors Filters the returned indices to only indices that are managed by ILM and are +// in an error state, either due to an encountering an error while executing the +// policy, or attempting to use a policy that does not exist. +// API name: only_errors +func (r *ExplainLifecycle) OnlyErrors(onlyerrors bool) *ExplainLifecycle { + r.values.Set("only_errors", strconv.FormatBool(onlyerrors)) + + return r +} + +// OnlyManaged Filters the returned indices to only indices that are managed by ILM. +// API name: only_managed +func (r *ExplainLifecycle) OnlyManaged(onlymanaged bool) *ExplainLifecycle { + r.values.Set("only_managed", strconv.FormatBool(onlymanaged)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *ExplainLifecycle) MasterTimeout(duration string) *ExplainLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ExplainLifecycle) ErrorTrace(errortrace bool) *ExplainLifecycle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ExplainLifecycle) FilterPath(filterpaths ...string) *ExplainLifecycle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ExplainLifecycle) Human(human bool) *ExplainLifecycle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ExplainLifecycle) Pretty(pretty bool) *ExplainLifecycle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/explainlifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/explainlifecycle/response.go new file mode 100644 index 000000000..29634eeb8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/explainlifecycle/response.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package explainlifecycle + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package explainlifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/explain_lifecycle/ExplainLifecycleResponse.ts#L24-L28 +type Response struct { + Indices map[string]types.LifecycleExplain `json:"indices"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Indices: make(map[string]types.LifecycleExplain, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indices": + if s.Indices == nil { + s.Indices = make(map[string]types.LifecycleExplain, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + + switch kind["managed"] { + case true: + oo := types.NewLifecycleExplainManaged() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Indices | %w", err) + } + s.Indices[key] = oo + case false: + oo := types.NewLifecycleExplainUnmanaged() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Indices | %w", err) + } + s.Indices[key] = oo + default: + oo := new(types.LifecycleExplain) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(types.LifecycleExplain) | %w", err) + } + s.Indices[key] = oo + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getlifecycle/get_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getlifecycle/get_lifecycle.go new file mode 100644 index 000000000..dfb9bcbcc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getlifecycle/get_lifecycle.go @@ -0,0 +1,367 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get lifecycle policies. +package getlifecycle + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + policyMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + policy string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetLifecycle type alias for index. +type NewGetLifecycle func() *GetLifecycle + +// NewGetLifecycleFunc returns a new instance of GetLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetLifecycleFunc(tp elastictransport.Interface) NewGetLifecycle { + return func() *GetLifecycle { + n := New(tp) + + return n + } +} + +// Get lifecycle policies. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle +func New(tp elastictransport.Interface) *GetLifecycle { + r := &GetLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == policyMask: + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "policy", r.policy) + } + path.WriteString(r.policy) + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("policy") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetLifecycle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ilm.get_lifecycle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.get_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "ilm.get_lifecycle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.get_lifecycle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetLifecycle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getlifecycle.Response +func (r GetLifecycle) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.get_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetLifecycle) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.get_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetLifecycle query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetLifecycle headers map. +func (r *GetLifecycle) Header(key, value string) *GetLifecycle { + r.headers.Set(key, value) + + return r +} + +// Policy Identifier for the policy. +// API Name: policy +func (r *GetLifecycle) Policy(policy string) *GetLifecycle { + r.paramSet |= policyMask + r.policy = policy + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *GetLifecycle) MasterTimeout(duration string) *GetLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *GetLifecycle) Timeout(duration string) *GetLifecycle { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetLifecycle) ErrorTrace(errortrace bool) *GetLifecycle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetLifecycle) FilterPath(filterpaths ...string) *GetLifecycle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetLifecycle) Human(human bool) *GetLifecycle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetLifecycle) Pretty(pretty bool) *GetLifecycle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getlifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getlifecycle/response.go new file mode 100644 index 000000000..688269210 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getlifecycle/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getlifecycle + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getlifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/get_lifecycle/GetLifecycleResponse.ts#L23-L26 + +type Response map[string]types.Lifecycle + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getstatus/get_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getstatus/get_status.go new file mode 100644 index 000000000..164328a54 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getstatus/get_status.go @@ -0,0 +1,325 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the ILM status. +// +// Get the current index lifecycle management status. +package getstatus + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetStatus struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetStatus type alias for index. +type NewGetStatus func() *GetStatus + +// NewGetStatusFunc returns a new instance of GetStatus with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetStatusFunc(tp elastictransport.Interface) NewGetStatus { + return func() *GetStatus { + n := New(tp) + + return n + } +} + +// Get the ILM status. +// +// Get the current index lifecycle management status. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status +func New(tp elastictransport.Interface) *GetStatus { + r := &GetStatus{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetStatus) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("status") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetStatus) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ilm.get_status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.get_status") + if reader := instrument.RecordRequestBody(ctx, "ilm.get_status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.get_status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetStatus query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getstatus.Response +func (r GetStatus) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.get_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetStatus) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.get_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetStatus query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetStatus headers map. +func (r *GetStatus) Header(key, value string) *GetStatus { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetStatus) ErrorTrace(errortrace bool) *GetStatus { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetStatus) FilterPath(filterpaths ...string) *GetStatus { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetStatus) Human(human bool) *GetStatus { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetStatus) Pretty(pretty bool) *GetStatus { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getstatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getstatus/response.go new file mode 100644 index 000000000..d106db5ed --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getstatus/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getstatus + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/lifecycleoperationmode" +) + +// Response holds the response body struct for the package getstatus +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/get_status/GetIlmStatusResponse.ts#L22-L24 +type Response struct { + OperationMode lifecycleoperationmode.LifecycleOperationMode `json:"operation_mode"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go new file mode 100644 index 000000000..4e08a1d54 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go @@ -0,0 +1,419 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Migrate to data tiers routing. +// Switch the indices, ILM policies, and legacy, composable, and component +// templates from using custom node attributes and attribute-based allocation +// filters to using data tiers. +// Optionally, delete one legacy index template. +// Using node roles enables ILM to automatically move the indices between data +// tiers. +// +// Migrating away from custom node attributes routing can be manually performed. +// This API provides an automated way of performing three out of the four manual +// steps listed in the migration guide: +// +// 1. Stop setting the custom hot attribute on new indices. +// 1. Remove custom allocation settings from existing ILM policies. +// 1. Replace custom allocation settings from existing indices with the +// corresponding tier preference. +// +// ILM must be stopped before performing the migration. +// Use the stop ILM and get ILM status APIs to wait until the reported operation +// mode is `STOPPED`. +package migratetodatatiers + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type MigrateToDataTiers struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMigrateToDataTiers type alias for index. +type NewMigrateToDataTiers func() *MigrateToDataTiers + +// NewMigrateToDataTiersFunc returns a new instance of MigrateToDataTiers with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMigrateToDataTiersFunc(tp elastictransport.Interface) NewMigrateToDataTiers { + return func() *MigrateToDataTiers { + n := New(tp) + + return n + } +} + +// Migrate to data tiers routing. +// Switch the indices, ILM policies, and legacy, composable, and component +// templates from using custom node attributes and attribute-based allocation +// filters to using data tiers. +// Optionally, delete one legacy index template. +// Using node roles enables ILM to automatically move the indices between data +// tiers. +// +// Migrating away from custom node attributes routing can be manually performed. +// This API provides an automated way of performing three out of the four manual +// steps listed in the migration guide: +// +// 1. Stop setting the custom hot attribute on new indices. +// 1. Remove custom allocation settings from existing ILM policies. +// 1. Replace custom allocation settings from existing indices with the +// corresponding tier preference. +// +// ILM must be stopped before performing the migration. +// Use the stop ILM and get ILM status APIs to wait until the reported operation +// mode is `STOPPED`. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers +func New(tp elastictransport.Interface) *MigrateToDataTiers { + r := &MigrateToDataTiers{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *MigrateToDataTiers) Raw(raw io.Reader) *MigrateToDataTiers { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *MigrateToDataTiers) Request(req *Request) *MigrateToDataTiers { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *MigrateToDataTiers) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for MigrateToDataTiers: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("migrate_to_data_tiers") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r MigrateToDataTiers) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ilm.migrate_to_data_tiers") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.migrate_to_data_tiers") + if reader := instrument.RecordRequestBody(ctx, "ilm.migrate_to_data_tiers", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.migrate_to_data_tiers") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the MigrateToDataTiers query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a migratetodatatiers.Response +func (r MigrateToDataTiers) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.migrate_to_data_tiers") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the MigrateToDataTiers headers map. +func (r *MigrateToDataTiers) Header(key, value string) *MigrateToDataTiers { + r.headers.Set(key, value) + + return r +} + +// DryRun If true, simulates the migration from node attributes based allocation +// filters to data tiers, but does not perform the migration. +// This provides a way to retrieve the indices and ILM policies that need to be +// migrated. +// API name: dry_run +func (r *MigrateToDataTiers) DryRun(dryrun bool) *MigrateToDataTiers { + r.values.Set("dry_run", strconv.FormatBool(dryrun)) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *MigrateToDataTiers) MasterTimeout(duration string) *MigrateToDataTiers { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *MigrateToDataTiers) ErrorTrace(errortrace bool) *MigrateToDataTiers { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *MigrateToDataTiers) FilterPath(filterpaths ...string) *MigrateToDataTiers { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *MigrateToDataTiers) Human(human bool) *MigrateToDataTiers { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *MigrateToDataTiers) Pretty(pretty bool) *MigrateToDataTiers { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: legacy_template_to_delete +func (r *MigrateToDataTiers) LegacyTemplateToDelete(legacytemplatetodelete string) *MigrateToDataTiers { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LegacyTemplateToDelete = &legacytemplatetodelete + + return r +} + +// API name: node_attribute +func (r *MigrateToDataTiers) NodeAttribute(nodeattribute string) *MigrateToDataTiers { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.NodeAttribute = &nodeattribute + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/migratetodatatiers/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/migratetodatatiers/request.go new file mode 100644 index 000000000..22a3311f6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/migratetodatatiers/request.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package migratetodatatiers + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package migratetodatatiers +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/migrate_to_data_tiers/Request.ts#L23-L69 +type Request struct { + LegacyTemplateToDelete *string `json:"legacy_template_to_delete,omitempty"` + NodeAttribute *string `json:"node_attribute,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Migratetodatatiers request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/migratetodatatiers/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/migratetodatatiers/response.go new file mode 100644 index 000000000..b474d3b3e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/migratetodatatiers/response.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package migratetodatatiers + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Response holds the response body struct for the package migratetodatatiers +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/migrate_to_data_tiers/Response.ts#L22-L51 +type Response struct { + DryRun bool `json:"dry_run"` + // MigratedComponentTemplates The component templates that were updated to not contain custom routing + // settings for the provided data attribute. + MigratedComponentTemplates []string `json:"migrated_component_templates"` + // MigratedComposableTemplates The composable index templates that were updated to not contain custom + // routing settings for the provided data attribute. + MigratedComposableTemplates []string `json:"migrated_composable_templates"` + // MigratedIlmPolicies The ILM policies that were updated. + MigratedIlmPolicies []string `json:"migrated_ilm_policies"` + // MigratedIndices The indices that were migrated to tier preference routing. + MigratedIndices []string `json:"migrated_indices"` + // MigratedLegacyTemplates The legacy index templates that were updated to not contain custom routing + // settings for the provided data attribute. + MigratedLegacyTemplates []string `json:"migrated_legacy_templates"` + // RemovedLegacyTemplate The name of the legacy index template that was deleted. + // This information is missing if no legacy index templates were deleted. + RemovedLegacyTemplate string `json:"removed_legacy_template"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dry_run": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DryRun", err) + } + s.DryRun = value + case bool: + s.DryRun = v + } + + case "migrated_component_templates": + if err := dec.Decode(&s.MigratedComponentTemplates); err != nil { + return fmt.Errorf("%s | %w", "MigratedComponentTemplates", err) + } + + case "migrated_composable_templates": + if err := dec.Decode(&s.MigratedComposableTemplates); err != nil { + return fmt.Errorf("%s | %w", "MigratedComposableTemplates", err) + } + + case "migrated_ilm_policies": + if err := dec.Decode(&s.MigratedIlmPolicies); err != nil { + return fmt.Errorf("%s | %w", "MigratedIlmPolicies", err) + } + + case "migrated_indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "MigratedIndices", err) + } + + s.MigratedIndices = append(s.MigratedIndices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.MigratedIndices); err != nil { + return fmt.Errorf("%s | %w", "MigratedIndices", err) + } + } + + case "migrated_legacy_templates": + if err := dec.Decode(&s.MigratedLegacyTemplates); err != nil { + return fmt.Errorf("%s | %w", "MigratedLegacyTemplates", err) + } + + case "removed_legacy_template": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RemovedLegacyTemplate", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RemovedLegacyTemplate = o + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/movetostep/move_to_step.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/movetostep/move_to_step.go new file mode 100644 index 000000000..20b5f5d60 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/movetostep/move_to_step.go @@ -0,0 +1,430 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Move to a lifecycle step. +// Manually move an index into a specific step in the lifecycle policy and run +// that step. +// +// WARNING: This operation can result in the loss of data. Manually moving an +// index into a specific step runs that step even if it has already been +// performed. This is a potentially destructive action and this should be +// considered an expert level API. +// +// You must specify both the current step and the step to be executed in the +// body of the request. +// The request will fail if the current step does not match the step currently +// running for the index +// This is to prevent the index from being moved from an unexpected step into +// the next step. +// +// When specifying the target (`next_step`) to which the index will be moved, +// either the name or both the action and name fields are optional. +// If only the phase is specified, the index will move to the first step of the +// first action in the target phase. +// If the phase and action are specified, the index will move to the first step +// of the specified action in the specified phase. +// Only actions specified in the ILM policy are considered valid. +// An index cannot move to a step that is not part of its policy. +package movetostep + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type MoveToStep struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMoveToStep type alias for index. +type NewMoveToStep func(index string) *MoveToStep + +// NewMoveToStepFunc returns a new instance of MoveToStep with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMoveToStepFunc(tp elastictransport.Interface) NewMoveToStep { + return func(index string) *MoveToStep { + n := New(tp) + + n._index(index) + + return n + } +} + +// Move to a lifecycle step. +// Manually move an index into a specific step in the lifecycle policy and run +// that step. +// +// WARNING: This operation can result in the loss of data. Manually moving an +// index into a specific step runs that step even if it has already been +// performed. This is a potentially destructive action and this should be +// considered an expert level API. +// +// You must specify both the current step and the step to be executed in the +// body of the request. +// The request will fail if the current step does not match the step currently +// running for the index +// This is to prevent the index from being moved from an unexpected step into +// the next step. +// +// When specifying the target (`next_step`) to which the index will be moved, +// either the name or both the action and name fields are optional. +// If only the phase is specified, the index will move to the first step of the +// first action in the target phase. +// If the phase and action are specified, the index will move to the first step +// of the specified action in the specified phase. +// Only actions specified in the ILM policy are considered valid. +// An index cannot move to a step that is not part of its policy. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step +func New(tp elastictransport.Interface) *MoveToStep { + r := &MoveToStep{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *MoveToStep) Raw(raw io.Reader) *MoveToStep { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *MoveToStep) Request(req *Request) *MoveToStep { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *MoveToStep) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for MoveToStep: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("move") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r MoveToStep) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ilm.move_to_step") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.move_to_step") + if reader := instrument.RecordRequestBody(ctx, "ilm.move_to_step", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.move_to_step") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the MoveToStep query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a movetostep.Response +func (r MoveToStep) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.move_to_step") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the MoveToStep headers map. +func (r *MoveToStep) Header(key, value string) *MoveToStep { + r.headers.Set(key, value) + + return r +} + +// Index The name of the index whose lifecycle step is to change +// API Name: index +func (r *MoveToStep) _index(index string) *MoveToStep { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *MoveToStep) ErrorTrace(errortrace bool) *MoveToStep { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *MoveToStep) FilterPath(filterpaths ...string) *MoveToStep { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *MoveToStep) Human(human bool) *MoveToStep { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *MoveToStep) Pretty(pretty bool) *MoveToStep { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The step that the index is expected to be in. +// API name: current_step +func (r *MoveToStep) CurrentStep(currentstep types.StepKeyVariant) *MoveToStep { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CurrentStep = *currentstep.StepKeyCaster() + + return r +} + +// The step that you want to run. +// API name: next_step +func (r *MoveToStep) NextStep(nextstep types.StepKeyVariant) *MoveToStep { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.NextStep = *nextstep.StepKeyCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/movetostep/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/movetostep/request.go new file mode 100644 index 000000000..21211d5c4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/movetostep/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package movetostep + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package movetostep +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/move_to_step/MoveToStepRequest.ts#L24-L64 +type Request struct { + + // CurrentStep The step that the index is expected to be in. + CurrentStep types.StepKey `json:"current_step"` + // NextStep The step that you want to run. + NextStep types.StepKey `json:"next_step"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Movetostep request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/movetostep/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/movetostep/response.go new file mode 100644 index 000000000..0ddaba975 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/movetostep/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package movetostep + +// Response holds the response body struct for the package movetostep +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/move_to_step/MoveToStepResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/putlifecycle/put_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/putlifecycle/put_lifecycle.go new file mode 100644 index 000000000..5d65e3d33 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/putlifecycle/put_lifecycle.go @@ -0,0 +1,386 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a lifecycle policy. +// If the specified policy exists, it is replaced and the policy version is +// incremented. +// +// NOTE: Only the latest version of the policy is stored, you cannot revert to +// previous versions. +package putlifecycle + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + policyMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + policy string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutLifecycle type alias for index. +type NewPutLifecycle func(policy string) *PutLifecycle + +// NewPutLifecycleFunc returns a new instance of PutLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutLifecycleFunc(tp elastictransport.Interface) NewPutLifecycle { + return func(policy string) *PutLifecycle { + n := New(tp) + + n._policy(policy) + + return n + } +} + +// Create or update a lifecycle policy. +// If the specified policy exists, it is replaced and the policy version is +// incremented. +// +// NOTE: Only the latest version of the policy is stored, you cannot revert to +// previous versions. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle +func New(tp elastictransport.Interface) *PutLifecycle { + r := &PutLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutLifecycle) Raw(raw io.Reader) *PutLifecycle { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutLifecycle) Request(req *Request) *PutLifecycle { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutLifecycle: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == policyMask: + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "policy", r.policy) + } + path.WriteString(r.policy) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutLifecycle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ilm.put_lifecycle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.put_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "ilm.put_lifecycle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.put_lifecycle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutLifecycle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putlifecycle.Response +func (r PutLifecycle) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.put_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutLifecycle headers map. +func (r *PutLifecycle) Header(key, value string) *PutLifecycle { + r.headers.Set(key, value) + + return r +} + +// Policy Identifier for the policy. +// API Name: policy +func (r *PutLifecycle) _policy(policy string) *PutLifecycle { + r.paramSet |= policyMask + r.policy = policy + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *PutLifecycle) MasterTimeout(duration string) *PutLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *PutLifecycle) Timeout(duration string) *PutLifecycle { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutLifecycle) ErrorTrace(errortrace bool) *PutLifecycle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutLifecycle) FilterPath(filterpaths ...string) *PutLifecycle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutLifecycle) Human(human bool) *PutLifecycle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutLifecycle) Pretty(pretty bool) *PutLifecycle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/putlifecycle/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/putlifecycle/request.go new file mode 100644 index 000000000..8872f0260 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/putlifecycle/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putlifecycle + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putlifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/put_lifecycle/PutLifecycleRequest.ts#L25-L66 +type Request struct { + Policy *types.IlmPolicy `json:"policy,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putlifecycle request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/putlifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/putlifecycle/response.go new file mode 100644 index 000000000..28376dbfe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/putlifecycle/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putlifecycle + +// Response holds the response body struct for the package putlifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/put_lifecycle/PutLifecycleResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/removepolicy/remove_policy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/removepolicy/remove_policy.go new file mode 100644 index 000000000..d2754b06d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/removepolicy/remove_policy.go @@ -0,0 +1,350 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Remove policies from an index. +// Remove the assigned lifecycle policies from an index or a data stream's +// backing indices. +// It also stops managing the indices. +package removepolicy + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RemovePolicy struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRemovePolicy type alias for index. +type NewRemovePolicy func(index string) *RemovePolicy + +// NewRemovePolicyFunc returns a new instance of RemovePolicy with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRemovePolicyFunc(tp elastictransport.Interface) NewRemovePolicy { + return func(index string) *RemovePolicy { + n := New(tp) + + n._index(index) + + return n + } +} + +// Remove policies from an index. +// Remove the assigned lifecycle policies from an index or a data stream's +// backing indices. +// It also stops managing the indices. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy +func New(tp elastictransport.Interface) *RemovePolicy { + r := &RemovePolicy{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RemovePolicy) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("remove") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RemovePolicy) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ilm.remove_policy") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.remove_policy") + if reader := instrument.RecordRequestBody(ctx, "ilm.remove_policy", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.remove_policy") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RemovePolicy query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a removepolicy.Response +func (r RemovePolicy) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.remove_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r RemovePolicy) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.remove_policy") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the RemovePolicy query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the RemovePolicy headers map. +func (r *RemovePolicy) Header(key, value string) *RemovePolicy { + r.headers.Set(key, value) + + return r +} + +// Index The name of the index to remove policy on +// API Name: index +func (r *RemovePolicy) _index(index string) *RemovePolicy { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RemovePolicy) ErrorTrace(errortrace bool) *RemovePolicy { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RemovePolicy) FilterPath(filterpaths ...string) *RemovePolicy { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RemovePolicy) Human(human bool) *RemovePolicy { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RemovePolicy) Pretty(pretty bool) *RemovePolicy { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/removepolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/removepolicy/response.go new file mode 100644 index 000000000..743327df6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/removepolicy/response.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package removepolicy + +// Response holds the response body struct for the package removepolicy +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/remove_policy/RemovePolicyResponse.ts#L22-L27 +type Response struct { + FailedIndexes []string `json:"failed_indexes"` + HasFailures bool `json:"has_failures"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/retry/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/retry/response.go new file mode 100644 index 000000000..80c56e0ae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/retry/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package retry + +// Response holds the response body struct for the package retry +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/retry/RetryIlmResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/retry/retry.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/retry/retry.go new file mode 100644 index 000000000..59adf4e1c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/retry/retry.go @@ -0,0 +1,355 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Retry a policy. +// Retry running the lifecycle policy for an index that is in the ERROR step. +// The API sets the policy back to the step where the error occurred and runs +// the step. +// Use the explain lifecycle state API to determine whether an index is in the +// ERROR step. +package retry + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Retry struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRetry type alias for index. +type NewRetry func(index string) *Retry + +// NewRetryFunc returns a new instance of Retry with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRetryFunc(tp elastictransport.Interface) NewRetry { + return func(index string) *Retry { + n := New(tp) + + n._index(index) + + return n + } +} + +// Retry a policy. +// Retry running the lifecycle policy for an index that is in the ERROR step. +// The API sets the policy back to the step where the error occurred and runs +// the step. +// Use the explain lifecycle state API to determine whether an index is in the +// ERROR step. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry +func New(tp elastictransport.Interface) *Retry { + r := &Retry{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Retry) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("retry") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Retry) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ilm.retry") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.retry") + if reader := instrument.RecordRequestBody(ctx, "ilm.retry", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.retry") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Retry query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a retry.Response +func (r Retry) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.retry") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Retry) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.retry") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Retry query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Retry headers map. +func (r *Retry) Header(key, value string) *Retry { + r.headers.Set(key, value) + + return r +} + +// Index The name of the indices (comma-separated) whose failed lifecycle step is to +// be retry +// API Name: index +func (r *Retry) _index(index string) *Retry { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Retry) ErrorTrace(errortrace bool) *Retry { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Retry) FilterPath(filterpaths ...string) *Retry { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Retry) Human(human bool) *Retry { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Retry) Pretty(pretty bool) *Retry { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/start/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/start/response.go new file mode 100644 index 000000000..cd625de23 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/start/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package start + +// Response holds the response body struct for the package start +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/start/StartIlmResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/start/start.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/start/start.go new file mode 100644 index 000000000..ada1c0ae2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/start/start.go @@ -0,0 +1,347 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Start the ILM plugin. +// Start the index lifecycle management plugin if it is currently stopped. +// ILM is started automatically when the cluster is formed. +// Restarting ILM is necessary only when it has been stopped using the stop ILM +// API. +package start + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Start struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStart type alias for index. +type NewStart func() *Start + +// NewStartFunc returns a new instance of Start with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStartFunc(tp elastictransport.Interface) NewStart { + return func() *Start { + n := New(tp) + + return n + } +} + +// Start the ILM plugin. +// Start the index lifecycle management plugin if it is currently stopped. +// ILM is started automatically when the cluster is formed. +// Restarting ILM is necessary only when it has been stopped using the stop ILM +// API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start +func New(tp elastictransport.Interface) *Start { + r := &Start{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Start) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("start") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Start) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ilm.start") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.start") + if reader := instrument.RecordRequestBody(ctx, "ilm.start", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.start") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Start query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a start.Response +func (r Start) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.start") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Start) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.start") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Start query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Start headers map. +func (r *Start) Header(key, value string) *Start { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *Start) MasterTimeout(duration string) *Start { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *Start) Timeout(duration string) *Start { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Start) ErrorTrace(errortrace bool) *Start { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Start) FilterPath(filterpaths ...string) *Start { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Start) Human(human bool) *Start { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Start) Pretty(pretty bool) *Start { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/stop/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/stop/response.go new file mode 100644 index 000000000..696249f8e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/stop/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stop + +// Response holds the response body struct for the package stop +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/stop/StopIlmResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/stop/stop.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/stop/stop.go new file mode 100644 index 000000000..a9e773cd5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ilm/stop/stop.go @@ -0,0 +1,357 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Stop the ILM plugin. +// Halt all lifecycle management operations and stop the index lifecycle +// management plugin. +// This is useful when you are performing maintenance on the cluster and need to +// prevent ILM from performing any actions on your indices. +// +// The API returns as soon as the stop request has been acknowledged, but the +// plugin might continue to run until in-progress operations complete and the +// plugin can be safely stopped. +// Use the get ILM status API to check whether ILM is running. +package stop + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Stop struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStop type alias for index. +type NewStop func() *Stop + +// NewStopFunc returns a new instance of Stop with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStopFunc(tp elastictransport.Interface) NewStop { + return func() *Stop { + n := New(tp) + + return n + } +} + +// Stop the ILM plugin. +// Halt all lifecycle management operations and stop the index lifecycle +// management plugin. +// This is useful when you are performing maintenance on the cluster and need to +// prevent ILM from performing any actions on your indices. +// +// The API returns as soon as the stop request has been acknowledged, but the +// plugin might continue to run until in-progress operations complete and the +// plugin can be safely stopped. +// Use the get ILM status API to check whether ILM is running. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop +func New(tp elastictransport.Interface) *Stop { + r := &Stop{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Stop) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ilm") + path.WriteString("/") + path.WriteString("stop") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Stop) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ilm.stop") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ilm.stop") + if reader := instrument.RecordRequestBody(ctx, "ilm.stop", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ilm.stop") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Stop query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stop.Response +func (r Stop) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Stop) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ilm.stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Stop query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Stop headers map. +func (r *Stop) Header(key, value string) *Stop { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *Stop) MasterTimeout(duration string) *Stop { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *Stop) Timeout(duration string) *Stop { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Stop) ErrorTrace(errortrace bool) *Stop { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Stop) FilterPath(filterpaths ...string) *Stop { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Stop) Human(human bool) *Stop { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Stop) Pretty(pretty bool) *Stop { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/addblock/add_block.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/addblock/add_block.go new file mode 100644 index 000000000..c35a40399 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/addblock/add_block.go @@ -0,0 +1,438 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Add an index block. +// +// Add an index block to an index. +// Index blocks limit the operations allowed on an index by blocking specific +// operation types. +package addblock + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 + + blockMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type AddBlock struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + block string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAddBlock type alias for index. +type NewAddBlock func(index, block string) *AddBlock + +// NewAddBlockFunc returns a new instance of AddBlock with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAddBlockFunc(tp elastictransport.Interface) NewAddBlock { + return func(index, block string) *AddBlock { + n := New(tp) + + n._index(index) + + n._block(block) + + return n + } +} + +// Add an index block. +// +// Add an index block to an index. +// Index blocks limit the operations allowed on an index by blocking specific +// operation types. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block +func New(tp elastictransport.Interface) *AddBlock { + r := &AddBlock{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *AddBlock) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|blockMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_block") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "block", r.block) + } + path.WriteString(r.block) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r AddBlock) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.add_block") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.add_block") + if reader := instrument.RecordRequestBody(ctx, "indices.add_block", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.add_block") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the AddBlock query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a addblock.Response +func (r AddBlock) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.add_block") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r AddBlock) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.add_block") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the AddBlock query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the AddBlock headers map. +func (r *AddBlock) Header(key, value string) *AddBlock { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list or wildcard expression of index names used to limit +// the request. +// By default, you must explicitly name the indices you are adding blocks to. +// To allow the adding of blocks to indices with `_all`, `*`, or other wildcard +// expressions, change the `action.destructive_requires_name` setting to +// `false`. +// You can update this setting in the `elasticsearch.yml` file or by using the +// cluster update settings API. +// API Name: index +func (r *AddBlock) _index(index string) *AddBlock { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Block The block type to add to the index. +// API Name: block +func (r *AddBlock) _block(block string) *AddBlock { + r.paramSet |= blockMask + r.block = block + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. +// API name: allow_no_indices +func (r *AddBlock) AllowNoIndices(allownoindices bool) *AddBlock { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards The type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// It supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *AddBlock) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *AddBlock { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *AddBlock) IgnoreUnavailable(ignoreunavailable bool) *AddBlock { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *AddBlock) MasterTimeout(duration string) *AddBlock { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response will indicate that it was not +// completely acknowledged. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: timeout +func (r *AddBlock) Timeout(duration string) *AddBlock { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AddBlock) ErrorTrace(errortrace bool) *AddBlock { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AddBlock) FilterPath(filterpaths ...string) *AddBlock { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AddBlock) Human(human bool) *AddBlock { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AddBlock) Pretty(pretty bool) *AddBlock { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/addblock/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/addblock/response.go new file mode 100644 index 000000000..26b9c8236 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/addblock/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package addblock + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package addblock +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/add_block/IndicesAddBlockResponse.ts#L22-L28 +type Response struct { + Acknowledged bool `json:"acknowledged"` + Indices []types.AddIndicesBlockStatus `json:"indices"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/analyze/analyze.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/analyze/analyze.go new file mode 100644 index 000000000..c2ca2a1a5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/analyze/analyze.go @@ -0,0 +1,510 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get tokens from text analysis. +// The analyze API performs analysis on a text string and returns the resulting +// tokens. +// +// Generating excessive amount of tokens may cause a node to run out of memory. +// The `index.analyze.max_token_count` setting enables you to limit the number +// of tokens that can be produced. +// If more than this limit of tokens gets generated, an error occurs. +// The `_analyze` endpoint without a specified index will always use `10000` as +// its limit. +package analyze + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Analyze struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAnalyze type alias for index. +type NewAnalyze func() *Analyze + +// NewAnalyzeFunc returns a new instance of Analyze with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAnalyzeFunc(tp elastictransport.Interface) NewAnalyze { + return func() *Analyze { + n := New(tp) + + return n + } +} + +// Get tokens from text analysis. +// The analyze API performs analysis on a text string and returns the resulting +// tokens. +// +// Generating excessive amount of tokens may cause a node to run out of memory. +// The `index.analyze.max_token_count` setting enables you to limit the number +// of tokens that can be produced. +// If more than this limit of tokens gets generated, an error occurs. +// The `_analyze` endpoint without a specified index will always use `10000` as +// its limit. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze +func New(tp elastictransport.Interface) *Analyze { + r := &Analyze{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Analyze) Raw(raw io.Reader) *Analyze { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Analyze) Request(req *Request) *Analyze { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Analyze) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Analyze: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_analyze") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_analyze") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Analyze) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.analyze") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.analyze") + if reader := instrument.RecordRequestBody(ctx, "indices.analyze", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.analyze") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Analyze query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a analyze.Response +func (r Analyze) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.analyze") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Analyze headers map. +func (r *Analyze) Header(key, value string) *Analyze { + r.headers.Set(key, value) + + return r +} + +// Index Index used to derive the analyzer. +// If specified, the `analyzer` or field parameter overrides this value. +// If no index is specified or the index does not have a default analyzer, the +// analyze API uses the standard analyzer. +// API Name: index +func (r *Analyze) Index(index string) *Analyze { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Analyze) ErrorTrace(errortrace bool) *Analyze { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Analyze) FilterPath(filterpaths ...string) *Analyze { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Analyze) Human(human bool) *Analyze { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Analyze) Pretty(pretty bool) *Analyze { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The name of the analyzer that should be applied to the provided `text`. +// This could be a built-in analyzer, or an analyzer that’s been configured in +// the index. +// API name: analyzer +func (r *Analyze) Analyzer(analyzer string) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Analyzer = &analyzer + + return r +} + +// Array of token attributes used to filter the output of the `explain` +// parameter. +// API name: attributes +func (r *Analyze) Attributes(attributes ...string) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range attributes { + + r.req.Attributes = append(r.req.Attributes, v) + + } + return r +} + +// Array of character filters used to preprocess characters before the +// tokenizer. +// API name: char_filter +func (r *Analyze) CharFilter(charfilters ...types.CharFilterVariant) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range charfilters { + + r.req.CharFilter = append(r.req.CharFilter, *v.CharFilterCaster()) + + } + return r +} + +// If `true`, the response includes token attributes and additional details. +// API name: explain +func (r *Analyze) Explain(explain bool) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Explain = &explain + + return r +} + +// Field used to derive the analyzer. +// To use this parameter, you must specify an index. +// If specified, the `analyzer` parameter overrides this value. +// API name: field +func (r *Analyze) Field(field string) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Field = &field + + return r +} + +// Array of token filters used to apply after the tokenizer. +// API name: filter +func (r *Analyze) Filter(filters ...types.TokenFilterVariant) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range filters { + + r.req.Filter = append(r.req.Filter, *v.TokenFilterCaster()) + + } + return r +} + +// Normalizer to use to convert text into a single token. +// API name: normalizer +func (r *Analyze) Normalizer(normalizer string) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Normalizer = &normalizer + + return r +} + +// Text to analyze. +// If an array of strings is provided, it is analyzed as a multi-value field. +// API name: text +func (r *Analyze) Text(texttoanalyzes ...string) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Text = texttoanalyzes + + return r +} + +// Tokenizer to use to convert text into tokens. +// API name: tokenizer +func (r *Analyze) Tokenizer(tokenizer types.TokenizerVariant) *Analyze { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Tokenizer = *tokenizer.TokenizerCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/analyze/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/analyze/request.go new file mode 100644 index 000000000..7f3adb63e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/analyze/request.go @@ -0,0 +1,812 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package analyze + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package analyze +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/analyze/IndicesAnalyzeRequest.ts#L27-L119 +type Request struct { + + // Analyzer The name of the analyzer that should be applied to the provided `text`. + // This could be a built-in analyzer, or an analyzer that’s been configured in + // the index. + Analyzer *string `json:"analyzer,omitempty"` + // Attributes Array of token attributes used to filter the output of the `explain` + // parameter. + Attributes []string `json:"attributes,omitempty"` + // CharFilter Array of character filters used to preprocess characters before the + // tokenizer. + CharFilter []types.CharFilter `json:"char_filter,omitempty"` + // Explain If `true`, the response includes token attributes and additional details. + Explain *bool `json:"explain,omitempty"` + // Field Field used to derive the analyzer. + // To use this parameter, you must specify an index. + // If specified, the `analyzer` parameter overrides this value. + Field *string `json:"field,omitempty"` + // Filter Array of token filters used to apply after the tokenizer. + Filter []types.TokenFilter `json:"filter,omitempty"` + // Normalizer Normalizer to use to convert text into a single token. + Normalizer *string `json:"normalizer,omitempty"` + // Text Text to analyze. + // If an array of strings is provided, it is analyzed as a multi-value field. + Text []string `json:"text,omitempty"` + // Tokenizer Tokenizer to use to convert text into tokens. + Tokenizer types.Tokenizer `json:"tokenizer,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Analyze request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "attributes": + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "char_filter": + + buf := []json.RawMessage{} + dec.Decode(&buf) + for _, rawMsg := range buf { + + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + kind := make(map[string]string, 0) + localDec.Decode(&kind) + source.Seek(0, io.SeekStart) + + switch kind["type"] { + + case "html_strip": + o := types.NewHtmlStripCharFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "html_strip", err) + } + s.CharFilter = append(s.CharFilter, *o) + case "mapping": + o := types.NewMappingCharFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "mapping", err) + } + s.CharFilter = append(s.CharFilter, *o) + case "pattern_replace": + o := types.NewPatternReplaceCharFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "pattern_replace", err) + } + s.CharFilter = append(s.CharFilter, *o) + case "icu_normalizer": + o := types.NewIcuNormalizationCharFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "icu_normalizer", err) + } + s.CharFilter = append(s.CharFilter, *o) + case "kuromoji_iteration_mark": + o := types.NewKuromojiIterationMarkCharFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "kuromoji_iteration_mark", err) + } + s.CharFilter = append(s.CharFilter, *o) + default: + o := new(any) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("CharFilter | %w", err) + } + s.CharFilter = append(s.CharFilter, *o) + } + } + + case "explain": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Explain", err) + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "filter": + + buf := []json.RawMessage{} + dec.Decode(&buf) + for _, rawMsg := range buf { + + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + kind := make(map[string]string, 0) + localDec.Decode(&kind) + source.Seek(0, io.SeekStart) + + switch kind["type"] { + + case "apostrophe": + o := types.NewApostropheTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "apostrophe", err) + } + s.Filter = append(s.Filter, *o) + case "arabic_stem": + o := types.NewArabicStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "arabic_stem", err) + } + s.Filter = append(s.Filter, *o) + case "arabic_normalization": + o := types.NewArabicNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "arabic_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "asciifolding": + o := types.NewAsciiFoldingTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "asciifolding", err) + } + s.Filter = append(s.Filter, *o) + case "bengali_normalization": + o := types.NewBengaliNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "bengali_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "brazilian_stem": + o := types.NewBrazilianStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "brazilian_stem", err) + } + s.Filter = append(s.Filter, *o) + case "cjk_bigram": + o := types.NewCjkBigramTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "cjk_bigram", err) + } + s.Filter = append(s.Filter, *o) + case "cjk_width": + o := types.NewCjkWidthTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "cjk_width", err) + } + s.Filter = append(s.Filter, *o) + case "classic": + o := types.NewClassicTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "classic", err) + } + s.Filter = append(s.Filter, *o) + case "common_grams": + o := types.NewCommonGramsTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "common_grams", err) + } + s.Filter = append(s.Filter, *o) + case "condition": + o := types.NewConditionTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "condition", err) + } + s.Filter = append(s.Filter, *o) + case "czech_stem": + o := types.NewCzechStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "czech_stem", err) + } + s.Filter = append(s.Filter, *o) + case "decimal_digit": + o := types.NewDecimalDigitTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "decimal_digit", err) + } + s.Filter = append(s.Filter, *o) + case "delimited_payload": + o := types.NewDelimitedPayloadTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "delimited_payload", err) + } + s.Filter = append(s.Filter, *o) + case "dutch_stem": + o := types.NewDutchStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "dutch_stem", err) + } + s.Filter = append(s.Filter, *o) + case "edge_ngram": + o := types.NewEdgeNGramTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "edge_ngram", err) + } + s.Filter = append(s.Filter, *o) + case "elision": + o := types.NewElisionTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "elision", err) + } + s.Filter = append(s.Filter, *o) + case "fingerprint": + o := types.NewFingerprintTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "fingerprint", err) + } + s.Filter = append(s.Filter, *o) + case "flatten_graph": + o := types.NewFlattenGraphTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "flatten_graph", err) + } + s.Filter = append(s.Filter, *o) + case "french_stem": + o := types.NewFrenchStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "french_stem", err) + } + s.Filter = append(s.Filter, *o) + case "german_normalization": + o := types.NewGermanNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "german_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "german_stem": + o := types.NewGermanStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "german_stem", err) + } + s.Filter = append(s.Filter, *o) + case "hindi_normalization": + o := types.NewHindiNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "hindi_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "hunspell": + o := types.NewHunspellTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "hunspell", err) + } + s.Filter = append(s.Filter, *o) + case "hyphenation_decompounder": + o := types.NewHyphenationDecompounderTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "hyphenation_decompounder", err) + } + s.Filter = append(s.Filter, *o) + case "indic_normalization": + o := types.NewIndicNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "indic_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "keep_types": + o := types.NewKeepTypesTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "keep_types", err) + } + s.Filter = append(s.Filter, *o) + case "keep": + o := types.NewKeepWordsTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "keep", err) + } + s.Filter = append(s.Filter, *o) + case "keyword_marker": + o := types.NewKeywordMarkerTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "keyword_marker", err) + } + s.Filter = append(s.Filter, *o) + case "keyword_repeat": + o := types.NewKeywordRepeatTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "keyword_repeat", err) + } + s.Filter = append(s.Filter, *o) + case "kstem": + o := types.NewKStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "kstem", err) + } + s.Filter = append(s.Filter, *o) + case "length": + o := types.NewLengthTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "length", err) + } + s.Filter = append(s.Filter, *o) + case "limit": + o := types.NewLimitTokenCountTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "limit", err) + } + s.Filter = append(s.Filter, *o) + case "lowercase": + o := types.NewLowercaseTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "lowercase", err) + } + s.Filter = append(s.Filter, *o) + case "min_hash": + o := types.NewMinHashTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "min_hash", err) + } + s.Filter = append(s.Filter, *o) + case "multiplexer": + o := types.NewMultiplexerTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "multiplexer", err) + } + s.Filter = append(s.Filter, *o) + case "ngram": + o := types.NewNGramTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ngram", err) + } + s.Filter = append(s.Filter, *o) + case "nori_part_of_speech": + o := types.NewNoriPartOfSpeechTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "nori_part_of_speech", err) + } + s.Filter = append(s.Filter, *o) + case "pattern_capture": + o := types.NewPatternCaptureTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "pattern_capture", err) + } + s.Filter = append(s.Filter, *o) + case "pattern_replace": + o := types.NewPatternReplaceTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "pattern_replace", err) + } + s.Filter = append(s.Filter, *o) + case "persian_normalization": + o := types.NewPersianNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "persian_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "persian_stem": + o := types.NewPersianStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "persian_stem", err) + } + s.Filter = append(s.Filter, *o) + case "porter_stem": + o := types.NewPorterStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "porter_stem", err) + } + s.Filter = append(s.Filter, *o) + case "predicate_token_filter": + o := types.NewPredicateTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "predicate_token_filter", err) + } + s.Filter = append(s.Filter, *o) + case "remove_duplicates": + o := types.NewRemoveDuplicatesTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "remove_duplicates", err) + } + s.Filter = append(s.Filter, *o) + case "reverse": + o := types.NewReverseTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "reverse", err) + } + s.Filter = append(s.Filter, *o) + case "russian_stem": + o := types.NewRussianStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "russian_stem", err) + } + s.Filter = append(s.Filter, *o) + case "scandinavian_folding": + o := types.NewScandinavianFoldingTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "scandinavian_folding", err) + } + s.Filter = append(s.Filter, *o) + case "scandinavian_normalization": + o := types.NewScandinavianNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "scandinavian_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "serbian_normalization": + o := types.NewSerbianNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "serbian_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "shingle": + o := types.NewShingleTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "shingle", err) + } + s.Filter = append(s.Filter, *o) + case "snowball": + o := types.NewSnowballTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "snowball", err) + } + s.Filter = append(s.Filter, *o) + case "sorani_normalization": + o := types.NewSoraniNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "sorani_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "stemmer_override": + o := types.NewStemmerOverrideTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "stemmer_override", err) + } + s.Filter = append(s.Filter, *o) + case "stemmer": + o := types.NewStemmerTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "stemmer", err) + } + s.Filter = append(s.Filter, *o) + case "stop": + o := types.NewStopTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "stop", err) + } + s.Filter = append(s.Filter, *o) + case "synonym_graph": + o := types.NewSynonymGraphTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "synonym_graph", err) + } + s.Filter = append(s.Filter, *o) + case "synonym": + o := types.NewSynonymTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "synonym", err) + } + s.Filter = append(s.Filter, *o) + case "trim": + o := types.NewTrimTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "trim", err) + } + s.Filter = append(s.Filter, *o) + case "truncate": + o := types.NewTruncateTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "truncate", err) + } + s.Filter = append(s.Filter, *o) + case "unique": + o := types.NewUniqueTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "unique", err) + } + s.Filter = append(s.Filter, *o) + case "uppercase": + o := types.NewUppercaseTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "uppercase", err) + } + s.Filter = append(s.Filter, *o) + case "word_delimiter_graph": + o := types.NewWordDelimiterGraphTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "word_delimiter_graph", err) + } + s.Filter = append(s.Filter, *o) + case "word_delimiter": + o := types.NewWordDelimiterTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "word_delimiter", err) + } + s.Filter = append(s.Filter, *o) + case "ja_stop": + o := types.NewJaStopTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ja_stop", err) + } + s.Filter = append(s.Filter, *o) + case "kuromoji_stemmer": + o := types.NewKuromojiStemmerTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "kuromoji_stemmer", err) + } + s.Filter = append(s.Filter, *o) + case "kuromoji_readingform": + o := types.NewKuromojiReadingFormTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "kuromoji_readingform", err) + } + s.Filter = append(s.Filter, *o) + case "kuromoji_part_of_speech": + o := types.NewKuromojiPartOfSpeechTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "kuromoji_part_of_speech", err) + } + s.Filter = append(s.Filter, *o) + case "icu_collation": + o := types.NewIcuCollationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "icu_collation", err) + } + s.Filter = append(s.Filter, *o) + case "icu_folding": + o := types.NewIcuFoldingTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "icu_folding", err) + } + s.Filter = append(s.Filter, *o) + case "icu_normalizer": + o := types.NewIcuNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "icu_normalizer", err) + } + s.Filter = append(s.Filter, *o) + case "icu_transform": + o := types.NewIcuTransformTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "icu_transform", err) + } + s.Filter = append(s.Filter, *o) + case "phonetic": + o := types.NewPhoneticTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "phonetic", err) + } + s.Filter = append(s.Filter, *o) + case "dictionary_decompounder": + o := types.NewDictionaryDecompounderTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "dictionary_decompounder", err) + } + s.Filter = append(s.Filter, *o) + default: + o := new(any) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter = append(s.Filter, *o) + } + } + + case "normalizer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Normalizer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Normalizer = &o + + case "text": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + + s.Text = append(s.Text, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Text); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + } + + case "tokenizer": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + kind := make(map[string]string, 0) + localDec := json.NewDecoder(source) + localDec.Decode(&kind) + source.Seek(0, io.SeekStart) + + switch kind["type"] { + + case "char_group": + o := types.NewCharGroupTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "char_group", err) + } + s.Tokenizer = *o + case "classic": + o := types.NewClassicTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "classic", err) + } + s.Tokenizer = *o + case "edge_ngram": + o := types.NewEdgeNGramTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "edge_ngram", err) + } + s.Tokenizer = *o + case "keyword": + o := types.NewKeywordTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "keyword", err) + } + s.Tokenizer = *o + case "letter": + o := types.NewLetterTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "letter", err) + } + s.Tokenizer = *o + case "lowercase": + o := types.NewLowercaseTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "lowercase", err) + } + s.Tokenizer = *o + case "ngram": + o := types.NewNGramTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ngram", err) + } + s.Tokenizer = *o + case "path_hierarchy": + o := types.NewPathHierarchyTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "path_hierarchy", err) + } + s.Tokenizer = *o + case "pattern": + o := types.NewPatternTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "pattern", err) + } + s.Tokenizer = *o + case "simple_pattern": + o := types.NewSimplePatternTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "simple_pattern", err) + } + s.Tokenizer = *o + case "simple_pattern_split": + o := types.NewSimplePatternSplitTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "simple_pattern_split", err) + } + s.Tokenizer = *o + case "standard": + o := types.NewStandardTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "standard", err) + } + s.Tokenizer = *o + case "thai": + o := types.NewThaiTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "thai", err) + } + s.Tokenizer = *o + case "uax_url_email": + o := types.NewUaxEmailUrlTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "uax_url_email", err) + } + s.Tokenizer = *o + case "whitespace": + o := types.NewWhitespaceTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "whitespace", err) + } + s.Tokenizer = *o + case "icu_tokenizer": + o := types.NewIcuTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "icu_tokenizer", err) + } + s.Tokenizer = *o + case "kuromoji_tokenizer": + o := types.NewKuromojiTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "kuromoji_tokenizer", err) + } + s.Tokenizer = *o + case "nori_tokenizer": + o := types.NewNoriTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "nori_tokenizer", err) + } + s.Tokenizer = *o + default: + if err := localDec.Decode(&s.Tokenizer); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/analyze/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/analyze/response.go new file mode 100644 index 000000000..41def59f9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/analyze/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package analyze + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package analyze +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/analyze/IndicesAnalyzeResponse.ts#L22-L27 +type Response struct { + Detail *types.AnalyzeDetail `json:"detail,omitempty"` + Tokens []types.AnalyzeToken `json:"tokens,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/cancelmigratereindex/cancel_migrate_reindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/cancelmigratereindex/cancel_migrate_reindex.go new file mode 100644 index 000000000..8ec061fb5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/cancelmigratereindex/cancel_migrate_reindex.go @@ -0,0 +1,356 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Cancel a migration reindex operation. +// +// Cancel a migration reindex attempt for a data stream or index. +package cancelmigratereindex + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CancelMigrateReindex struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCancelMigrateReindex type alias for index. +type NewCancelMigrateReindex func(index string) *CancelMigrateReindex + +// NewCancelMigrateReindexFunc returns a new instance of CancelMigrateReindex with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCancelMigrateReindexFunc(tp elastictransport.Interface) NewCancelMigrateReindex { + return func(index string) *CancelMigrateReindex { + n := New(tp) + + n._index(index) + + return n + } +} + +// Cancel a migration reindex operation. +// +// Cancel a migration reindex attempt for a data stream or index. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-cancel-migrate-reindex +func New(tp elastictransport.Interface) *CancelMigrateReindex { + r := &CancelMigrateReindex{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CancelMigrateReindex) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_cancel") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CancelMigrateReindex) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.cancel_migrate_reindex") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.cancel_migrate_reindex") + if reader := instrument.RecordRequestBody(ctx, "indices.cancel_migrate_reindex", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.cancel_migrate_reindex") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CancelMigrateReindex query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a cancelmigratereindex.Response +func (r CancelMigrateReindex) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.cancel_migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r CancelMigrateReindex) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.cancel_migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the CancelMigrateReindex query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the CancelMigrateReindex headers map. +func (r *CancelMigrateReindex) Header(key, value string) *CancelMigrateReindex { + r.headers.Set(key, value) + + return r +} + +// Index The index or data stream name +// API Name: index +func (r *CancelMigrateReindex) _index(index string) *CancelMigrateReindex { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CancelMigrateReindex) ErrorTrace(errortrace bool) *CancelMigrateReindex { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CancelMigrateReindex) FilterPath(filterpaths ...string) *CancelMigrateReindex { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CancelMigrateReindex) Human(human bool) *CancelMigrateReindex { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CancelMigrateReindex) Pretty(pretty bool) *CancelMigrateReindex { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/cancelmigratereindex/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/cancelmigratereindex/response.go new file mode 100644 index 000000000..7e2afcc58 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/cancelmigratereindex/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package cancelmigratereindex + +// Response holds the response body struct for the package cancelmigratereindex +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/cancel_migrate_reindex/MigrateCancelReindexResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clearcache/clear_cache.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clearcache/clear_cache.go new file mode 100644 index 000000000..d79cfb2a0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clearcache/clear_cache.go @@ -0,0 +1,435 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clear the cache. +// Clear the cache of one or more indices. +// For data streams, the API clears the caches of the stream's backing indices. +// +// By default, the clear cache API clears all caches. +// To clear only specific caches, use the `fielddata`, `query`, or `request` +// parameters. +// To clear the cache only of specific fields, use the `fields` parameter. +package clearcache + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ClearCache struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClearCache type alias for index. +type NewClearCache func() *ClearCache + +// NewClearCacheFunc returns a new instance of ClearCache with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewClearCacheFunc(tp elastictransport.Interface) NewClearCache { + return func() *ClearCache { + n := New(tp) + + return n + } +} + +// Clear the cache. +// Clear the cache of one or more indices. +// For data streams, the API clears the caches of the stream's backing indices. +// +// By default, the clear cache API clears all caches. +// To clear only specific caches, use the `fielddata`, `query`, or `request` +// parameters. +// To clear the cache only of specific fields, use the `fields` parameter. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache +func New(tp elastictransport.Interface) *ClearCache { + r := &ClearCache{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ClearCache) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_cache") + path.WriteString("/") + path.WriteString("clear") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_cache") + path.WriteString("/") + path.WriteString("clear") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ClearCache) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.clear_cache") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.clear_cache") + if reader := instrument.RecordRequestBody(ctx, "indices.clear_cache", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.clear_cache") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ClearCache query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a clearcache.Response +func (r ClearCache) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.clear_cache") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ClearCache) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.clear_cache") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ClearCache query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ClearCache headers map. +func (r *ClearCache) Header(key, value string) *ClearCache { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *ClearCache) Index(index string) *ClearCache { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *ClearCache) AllowNoIndices(allownoindices bool) *ClearCache { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *ClearCache) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ClearCache { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// Fielddata If `true`, clears the fields cache. +// Use the `fields` parameter to clear the cache of specific fields only. +// API name: fielddata +func (r *ClearCache) Fielddata(fielddata bool) *ClearCache { + r.values.Set("fielddata", strconv.FormatBool(fielddata)) + + return r +} + +// Fields Comma-separated list of field names used to limit the `fielddata` parameter. +// API name: fields +func (r *ClearCache) Fields(fields ...string) *ClearCache { + r.values.Set("fields", strings.Join(fields, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *ClearCache) IgnoreUnavailable(ignoreunavailable bool) *ClearCache { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Query If `true`, clears the query cache. +// API name: query +func (r *ClearCache) Query(query bool) *ClearCache { + r.values.Set("query", strconv.FormatBool(query)) + + return r +} + +// Request If `true`, clears the request cache. +// API name: request +func (r *ClearCache) Request(request bool) *ClearCache { + r.values.Set("request", strconv.FormatBool(request)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ClearCache) ErrorTrace(errortrace bool) *ClearCache { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ClearCache) FilterPath(filterpaths ...string) *ClearCache { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ClearCache) Human(human bool) *ClearCache { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ClearCache) Pretty(pretty bool) *ClearCache { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clearcache/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clearcache/response.go new file mode 100644 index 000000000..497e62d6d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clearcache/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clearcache + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package clearcache +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/clear_cache/IndicesClearCacheResponse.ts#L22-L25 +type Response struct { + Shards_ *types.ShardStatistics `json:"_shards,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clone/clone.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clone/clone.go new file mode 100644 index 000000000..ec2c25a25 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clone/clone.go @@ -0,0 +1,607 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clone an index. +// Clone an existing index into a new index. +// Each original primary shard is cloned into a new primary shard in the new +// index. +// +// IMPORTANT: Elasticsearch does not apply index templates to the resulting +// index. +// The API also does not copy index metadata from the original index. +// Index metadata includes aliases, index lifecycle management phase +// definitions, and cross-cluster replication (CCR) follower information. +// For example, if you clone a CCR follower index, the resulting clone will not +// be a follower index. +// +// The clone API copies most index settings from the source index to the +// resulting index, with the exception of `index.number_of_replicas` and +// `index.auto_expand_replicas`. +// To set the number of replicas in the resulting index, configure these +// settings in the clone request. +// +// Cloning works as follows: +// +// * First, it creates a new target index with the same definition as the source +// index. +// * Then it hard-links segments from the source index into the target index. If +// the file system does not support hard-linking, all segments are copied into +// the new index, which is a much more time consuming process. +// * Finally, it recovers the target index as though it were a closed index +// which had just been re-opened. +// +// IMPORTANT: Indices can only be cloned if they meet the following +// requirements: +// +// * The index must be marked as read-only and have a cluster health status of +// green. +// * The target index must not exist. +// * The source index must have the same number of primary shards as the target +// index. +// * The node handling the clone process must have sufficient free disk space to +// accommodate a second copy of the existing index. +// +// The current write index on a data stream cannot be cloned. +// In order to clone the current write index, the data stream must first be +// rolled over so that a new write index is created and then the previous write +// index can be cloned. +// +// NOTE: Mappings cannot be specified in the `_clone` request. The mappings of +// the source index will be used for the target index. +// +// **Monitor the cloning process** +// +// The cloning process can be monitored with the cat recovery API or the cluster +// health API can be used to wait until all primary shards have been allocated +// by setting the `wait_for_status` parameter to `yellow`. +// +// The `_clone` API returns as soon as the target index has been added to the +// cluster state, before any shards have been allocated. +// At this point, all shards are in the state unassigned. +// If, for any reason, the target index can't be allocated, its primary shard +// will remain unassigned until it can be allocated on that node. +// +// Once the primary shard is allocated, it moves to state initializing, and the +// clone process begins. +// When the clone operation completes, the shard will become active. +// At that point, Elasticsearch will try to allocate any replicas and may decide +// to relocate the primary shard to another node. +// +// **Wait for active shards** +// +// Because the clone operation creates a new index to clone the shards to, the +// wait for active shards setting on index creation applies to the clone index +// action as well. +package clone + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 + + targetMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Clone struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + target string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClone type alias for index. +type NewClone func(index, target string) *Clone + +// NewCloneFunc returns a new instance of Clone with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCloneFunc(tp elastictransport.Interface) NewClone { + return func(index, target string) *Clone { + n := New(tp) + + n._index(index) + + n._target(target) + + return n + } +} + +// Clone an index. +// Clone an existing index into a new index. +// Each original primary shard is cloned into a new primary shard in the new +// index. +// +// IMPORTANT: Elasticsearch does not apply index templates to the resulting +// index. +// The API also does not copy index metadata from the original index. +// Index metadata includes aliases, index lifecycle management phase +// definitions, and cross-cluster replication (CCR) follower information. +// For example, if you clone a CCR follower index, the resulting clone will not +// be a follower index. +// +// The clone API copies most index settings from the source index to the +// resulting index, with the exception of `index.number_of_replicas` and +// `index.auto_expand_replicas`. +// To set the number of replicas in the resulting index, configure these +// settings in the clone request. +// +// Cloning works as follows: +// +// * First, it creates a new target index with the same definition as the source +// index. +// * Then it hard-links segments from the source index into the target index. If +// the file system does not support hard-linking, all segments are copied into +// the new index, which is a much more time consuming process. +// * Finally, it recovers the target index as though it were a closed index +// which had just been re-opened. +// +// IMPORTANT: Indices can only be cloned if they meet the following +// requirements: +// +// * The index must be marked as read-only and have a cluster health status of +// green. +// * The target index must not exist. +// * The source index must have the same number of primary shards as the target +// index. +// * The node handling the clone process must have sufficient free disk space to +// accommodate a second copy of the existing index. +// +// The current write index on a data stream cannot be cloned. +// In order to clone the current write index, the data stream must first be +// rolled over so that a new write index is created and then the previous write +// index can be cloned. +// +// NOTE: Mappings cannot be specified in the `_clone` request. The mappings of +// the source index will be used for the target index. +// +// **Monitor the cloning process** +// +// The cloning process can be monitored with the cat recovery API or the cluster +// health API can be used to wait until all primary shards have been allocated +// by setting the `wait_for_status` parameter to `yellow`. +// +// The `_clone` API returns as soon as the target index has been added to the +// cluster state, before any shards have been allocated. +// At this point, all shards are in the state unassigned. +// If, for any reason, the target index can't be allocated, its primary shard +// will remain unassigned until it can be allocated on that node. +// +// Once the primary shard is allocated, it moves to state initializing, and the +// clone process begins. +// When the clone operation completes, the shard will become active. +// At that point, Elasticsearch will try to allocate any replicas and may decide +// to relocate the primary shard to another node. +// +// **Wait for active shards** +// +// Because the clone operation creates a new index to clone the shards to, the +// wait for active shards setting on index creation applies to the clone index +// action as well. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone +func New(tp elastictransport.Interface) *Clone { + r := &Clone{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Clone) Raw(raw io.Reader) *Clone { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Clone) Request(req *Request) *Clone { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Clone) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Clone: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|targetMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_clone") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "target", r.target) + } + path.WriteString(r.target) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Clone) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.clone") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.clone") + if reader := instrument.RecordRequestBody(ctx, "indices.clone", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.clone") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Clone query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a clone.Response +func (r Clone) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.clone") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Clone headers map. +func (r *Clone) Header(key, value string) *Clone { + r.headers.Set(key, value) + + return r +} + +// Index Name of the source index to clone. +// API Name: index +func (r *Clone) _index(index string) *Clone { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Target Name of the target index to create. +// API Name: target +func (r *Clone) _target(target string) *Clone { + r.paramSet |= targetMask + r.target = target + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *Clone) MasterTimeout(duration string) *Clone { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *Clone) Timeout(duration string) *Clone { + r.values.Set("timeout", duration) + + return r +} + +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). +// API name: wait_for_active_shards +func (r *Clone) WaitForActiveShards(waitforactiveshards string) *Clone { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Clone) ErrorTrace(errortrace bool) *Clone { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Clone) FilterPath(filterpaths ...string) *Clone { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Clone) Human(human bool) *Clone { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Clone) Pretty(pretty bool) *Clone { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Aliases for the resulting index. +// API name: aliases +func (r *Clone) Aliases(aliases map[string]types.Alias) *Clone { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Aliases = aliases + return r +} + +func (r *Clone) AddAlias(key string, value types.AliasVariant) *Clone { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + + r.req.Aliases = tmp + return r +} + +// Configuration options for the target index. +// API name: settings +func (r *Clone) Settings(settings map[string]json.RawMessage) *Clone { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Settings = settings + return r +} + +func (r *Clone) AddSetting(key string, value json.RawMessage) *Clone { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + + r.req.Settings = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clone/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clone/request.go new file mode 100644 index 000000000..901e0774c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clone/request.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clone + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package clone +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/clone/IndicesCloneRequest.ts#L27-L127 +type Request struct { + + // Aliases Aliases for the resulting index. + Aliases map[string]types.Alias `json:"aliases,omitempty"` + // Settings Configuration options for the target index. + Settings map[string]json.RawMessage `json:"settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Aliases: make(map[string]types.Alias, 0), + Settings: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Clone request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clone/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clone/response.go new file mode 100644 index 000000000..257f3a5b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/clone/response.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clone + +// Response holds the response body struct for the package clone +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/clone/IndicesCloneResponse.ts#L22-L28 +type Response struct { + Acknowledged bool `json:"acknowledged"` + Index string `json:"index"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/close/close.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/close/close.go new file mode 100644 index 000000000..d053fb22c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/close/close.go @@ -0,0 +1,465 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Close an index. +// A closed index is blocked for read or write operations and does not allow all +// operations that opened indices allow. +// It is not possible to index documents or to search for documents in a closed +// index. +// Closed indices do not have to maintain internal data structures for indexing +// or searching documents, which results in a smaller overhead on the cluster. +// +// When opening or closing an index, the master node is responsible for +// restarting the index shards to reflect the new state of the index. +// The shards will then go through the normal recovery process. +// The data of opened and closed indices is automatically replicated by the +// cluster to ensure that enough shard copies are safely kept around at all +// times. +// +// You can open and close multiple indices. +// An error is thrown if the request explicitly refers to a missing index. +// This behaviour can be turned off using the `ignore_unavailable=true` +// parameter. +// +// By default, you must explicitly name the indices you are opening or closing. +// To open or close indices with `_all`, `*`, or other wildcard expressions, +// change the` action.destructive_requires_name` setting to `false`. This +// setting can also be changed with the cluster update settings API. +// +// Closed indices consume a significant amount of disk-space which can cause +// problems in managed environments. +// Closing indices can be turned off with the cluster settings API by setting +// `cluster.indices.close.enable` to `false`. +package close + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Close struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClose type alias for index. +type NewClose func(index string) *Close + +// NewCloseFunc returns a new instance of Close with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCloseFunc(tp elastictransport.Interface) NewClose { + return func(index string) *Close { + n := New(tp) + + n._index(index) + + return n + } +} + +// Close an index. +// A closed index is blocked for read or write operations and does not allow all +// operations that opened indices allow. +// It is not possible to index documents or to search for documents in a closed +// index. +// Closed indices do not have to maintain internal data structures for indexing +// or searching documents, which results in a smaller overhead on the cluster. +// +// When opening or closing an index, the master node is responsible for +// restarting the index shards to reflect the new state of the index. +// The shards will then go through the normal recovery process. +// The data of opened and closed indices is automatically replicated by the +// cluster to ensure that enough shard copies are safely kept around at all +// times. +// +// You can open and close multiple indices. +// An error is thrown if the request explicitly refers to a missing index. +// This behaviour can be turned off using the `ignore_unavailable=true` +// parameter. +// +// By default, you must explicitly name the indices you are opening or closing. +// To open or close indices with `_all`, `*`, or other wildcard expressions, +// change the` action.destructive_requires_name` setting to `false`. This +// setting can also be changed with the cluster update settings API. +// +// Closed indices consume a significant amount of disk-space which can cause +// problems in managed environments. +// Closing indices can be turned off with the cluster settings API by setting +// `cluster.indices.close.enable` to `false`. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close +func New(tp elastictransport.Interface) *Close { + r := &Close{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Close) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_close") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Close) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.close") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.close") + if reader := instrument.RecordRequestBody(ctx, "indices.close", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.close") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Close query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a close.Response +func (r Close) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.close") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Close) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.close") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Close query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Close headers map. +func (r *Close) Header(key, value string) *Close { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list or wildcard expression of index names used to limit the +// request. +// API Name: index +func (r *Close) _index(index string) *Close { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *Close) AllowNoIndices(allownoindices bool) *Close { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *Close) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Close { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *Close) IgnoreUnavailable(ignoreunavailable bool) *Close { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *Close) MasterTimeout(duration string) *Close { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *Close) Timeout(duration string) *Close { + r.values.Set("timeout", duration) + + return r +} + +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). +// API name: wait_for_active_shards +func (r *Close) WaitForActiveShards(waitforactiveshards string) *Close { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Close) ErrorTrace(errortrace bool) *Close { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Close) FilterPath(filterpaths ...string) *Close { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Close) Human(human bool) *Close { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Close) Pretty(pretty bool) *Close { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/close/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/close/response.go new file mode 100644 index 000000000..f544787fc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/close/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package close + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package close +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/close/CloseIndexResponse.ts#L24-L30 +type Response struct { + Acknowledged bool `json:"acknowledged"` + Indices map[string]types.CloseIndexResult `json:"indices"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Indices: make(map[string]types.CloseIndexResult, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/create/create.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/create/create.go new file mode 100644 index 000000000..42b6f046b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/create/create.go @@ -0,0 +1,525 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an index. +// You can use the create index API to add a new index to an Elasticsearch +// cluster. +// When creating an index, you can specify the following: +// +// * Settings for the index. +// * Mappings for fields in the index. +// * Index aliases +// +// **Wait for active shards** +// +// By default, index creation will only return a response to the client when the +// primary copies of each shard have been started, or the request times out. +// The index creation response will indicate what happened. +// For example, `acknowledged` indicates whether the index was successfully +// created in the cluster, `while shards_acknowledged` indicates whether the +// requisite number of shard copies were started for each shard in the index +// before timing out. +// Note that it is still possible for either `acknowledged` or +// `shards_acknowledged` to be `false`, but for the index creation to be +// successful. +// These values simply indicate whether the operation completed before the +// timeout. +// If `acknowledged` is false, the request timed out before the cluster state +// was updated with the newly created index, but it probably will be created +// sometime soon. +// If `shards_acknowledged` is false, then the request timed out before the +// requisite number of shards were started (by default just the primaries), even +// if the cluster state was successfully updated to reflect the newly created +// index (that is to say, `acknowledged` is `true`). +// +// You can change the default of only waiting for the primary shards to start +// through the index setting `index.write.wait_for_active_shards`. +// Note that changing this setting will also affect the `wait_for_active_shards` +// value on all subsequent write operations. +package create + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Create struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCreate type alias for index. +type NewCreate func(index string) *Create + +// NewCreateFunc returns a new instance of Create with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCreateFunc(tp elastictransport.Interface) NewCreate { + return func(index string) *Create { + n := New(tp) + + n._index(index) + + return n + } +} + +// Create an index. +// You can use the create index API to add a new index to an Elasticsearch +// cluster. +// When creating an index, you can specify the following: +// +// * Settings for the index. +// * Mappings for fields in the index. +// * Index aliases +// +// **Wait for active shards** +// +// By default, index creation will only return a response to the client when the +// primary copies of each shard have been started, or the request times out. +// The index creation response will indicate what happened. +// For example, `acknowledged` indicates whether the index was successfully +// created in the cluster, `while shards_acknowledged` indicates whether the +// requisite number of shard copies were started for each shard in the index +// before timing out. +// Note that it is still possible for either `acknowledged` or +// `shards_acknowledged` to be `false`, but for the index creation to be +// successful. +// These values simply indicate whether the operation completed before the +// timeout. +// If `acknowledged` is false, the request timed out before the cluster state +// was updated with the newly created index, but it probably will be created +// sometime soon. +// If `shards_acknowledged` is false, then the request timed out before the +// requisite number of shards were started (by default just the primaries), even +// if the cluster state was successfully updated to reflect the newly created +// index (that is to say, `acknowledged` is `true`). +// +// You can change the default of only waiting for the primary shards to start +// through the index setting `index.write.wait_for_active_shards`. +// Note that changing this setting will also affect the `wait_for_active_shards` +// value on all subsequent write operations. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create +func New(tp elastictransport.Interface) *Create { + r := &Create{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Create) Raw(raw io.Reader) *Create { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Create) Request(req *Request) *Create { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Create) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Create: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Create) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.create") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.create") + if reader := instrument.RecordRequestBody(ctx, "indices.create", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.create") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Create query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a create.Response +func (r Create) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.create") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Create headers map. +func (r *Create) Header(key, value string) *Create { + r.headers.Set(key, value) + + return r +} + +// Index Name of the index you wish to create. +// Index names must meet the following criteria: +// +// * Lowercase only +// * Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space +// character), `,`, or `#` +// * Indices prior to 7.0 could contain a colon (`:`), but that has been +// deprecated and will not be supported in later versions +// * Cannot start with `-`, `_`, or `+` +// * Cannot be `.` or `..` +// * Cannot be longer than 255 bytes (note thtat it is bytes, so multi-byte +// characters will reach the limit faster) +// * Names starting with `.` are deprecated, except for hidden indices and +// internal indices managed by plugins +// API Name: index +func (r *Create) _index(index string) *Create { + r.paramSet |= indexMask + r.index = index + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *Create) MasterTimeout(duration string) *Create { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *Create) Timeout(duration string) *Create { + r.values.Set("timeout", duration) + + return r +} + +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). +// API name: wait_for_active_shards +func (r *Create) WaitForActiveShards(waitforactiveshards string) *Create { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Create) ErrorTrace(errortrace bool) *Create { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Create) FilterPath(filterpaths ...string) *Create { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Create) Human(human bool) *Create { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Create) Pretty(pretty bool) *Create { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Aliases for the index. +// API name: aliases +func (r *Create) Aliases(aliases map[string]types.Alias) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Aliases = aliases + return r +} + +func (r *Create) AddAlias(key string, value types.AliasVariant) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + + r.req.Aliases = tmp + return r +} + +// Mapping for fields in the index. If specified, this mapping can include: +// - Field names +// - Field data types +// - Mapping parameters +// API name: mappings +func (r *Create) Mappings(mappings types.TypeMappingVariant) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Mappings = mappings.TypeMappingCaster() + + return r +} + +// Configuration options for the index. +// API name: settings +func (r *Create) Settings(settings types.IndexSettingsVariant) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Settings = settings.IndexSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/create/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/create/request.go new file mode 100644 index 000000000..411349084 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/create/request.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package create + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package create +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/create/IndicesCreateRequest.ts#L28-L115 +type Request struct { + + // Aliases Aliases for the index. + Aliases map[string]types.Alias `json:"aliases,omitempty"` + // Mappings Mapping for fields in the index. If specified, this mapping can include: + // - Field names + // - Field data types + // - Mapping parameters + Mappings *types.TypeMapping `json:"mappings,omitempty"` + // Settings Configuration options for the index. + Settings *types.IndexSettings `json:"settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Aliases: make(map[string]types.Alias, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Create request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/create/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/create/response.go new file mode 100644 index 000000000..48d706a45 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/create/response.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package create + +// Response holds the response body struct for the package create +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/create/IndicesCreateResponse.ts#L22-L28 +type Response struct { + Acknowledged bool `json:"acknowledged"` + Index string `json:"index"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createdatastream/create_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createdatastream/create_data_stream.go new file mode 100644 index 000000000..4e62206c2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createdatastream/create_data_stream.go @@ -0,0 +1,371 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a data stream. +// +// You must have a matching index template with data stream enabled. +package createdatastream + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CreateDataStream struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCreateDataStream type alias for index. +type NewCreateDataStream func(name string) *CreateDataStream + +// NewCreateDataStreamFunc returns a new instance of CreateDataStream with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCreateDataStreamFunc(tp elastictransport.Interface) NewCreateDataStream { + return func(name string) *CreateDataStream { + n := New(tp) + + n._name(name) + + return n + } +} + +// Create a data stream. +// +// You must have a matching index template with data stream enabled. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream +func New(tp elastictransport.Interface) *CreateDataStream { + r := &CreateDataStream{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CreateDataStream) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CreateDataStream) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.create_data_stream") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.create_data_stream") + if reader := instrument.RecordRequestBody(ctx, "indices.create_data_stream", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.create_data_stream") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CreateDataStream query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a createdatastream.Response +func (r CreateDataStream) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.create_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r CreateDataStream) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.create_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the CreateDataStream query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the CreateDataStream headers map. +func (r *CreateDataStream) Header(key, value string) *CreateDataStream { + r.headers.Set(key, value) + + return r +} + +// Name Name of the data stream, which must meet the following criteria: +// Lowercase only; +// Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a +// space character; +// Cannot start with `-`, `_`, `+`, or `.ds-`; +// Cannot be `.` or `..`; +// Cannot be longer than 255 bytes. Multi-byte characters count towards this +// limit faster. +// API Name: name +func (r *CreateDataStream) _name(name string) *CreateDataStream { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *CreateDataStream) MasterTimeout(duration string) *CreateDataStream { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *CreateDataStream) Timeout(duration string) *CreateDataStream { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CreateDataStream) ErrorTrace(errortrace bool) *CreateDataStream { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CreateDataStream) FilterPath(filterpaths ...string) *CreateDataStream { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CreateDataStream) Human(human bool) *CreateDataStream { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CreateDataStream) Pretty(pretty bool) *CreateDataStream { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createdatastream/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createdatastream/response.go new file mode 100644 index 000000000..0e748b935 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createdatastream/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package createdatastream + +// Response holds the response body struct for the package createdatastream +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/create_data_stream/IndicesCreateDataStreamResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createfrom/create_from.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createfrom/create_from.go new file mode 100644 index 000000000..0989b2162 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createfrom/create_from.go @@ -0,0 +1,421 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an index from a source index. +// +// Copy the mappings and settings from the source index to a destination index +// while allowing request settings and mappings to override the source values. +package createfrom + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + sourceMask = iota + 1 + + destMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CreateFrom struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + source string + dest string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCreateFrom type alias for index. +type NewCreateFrom func(source, dest string) *CreateFrom + +// NewCreateFromFunc returns a new instance of CreateFrom with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCreateFromFunc(tp elastictransport.Interface) NewCreateFrom { + return func(source, dest string) *CreateFrom { + n := New(tp) + + n._source(source) + + n._dest(dest) + + return n + } +} + +// Create an index from a source index. +// +// Copy the mappings and settings from the source index to a destination index +// while allowing request settings and mappings to override the source values. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-from +func New(tp elastictransport.Interface) *CreateFrom { + r := &CreateFrom{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *CreateFrom) Raw(raw io.Reader) *CreateFrom { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *CreateFrom) Request(req *Request) *CreateFrom { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CreateFrom) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for CreateFrom: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == sourceMask|destMask: + path.WriteString("/") + path.WriteString("_create_from") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "source", r.source) + } + path.WriteString(r.source) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "dest", r.dest) + } + path.WriteString(r.dest) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CreateFrom) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.create_from") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.create_from") + if reader := instrument.RecordRequestBody(ctx, "indices.create_from", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.create_from") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CreateFrom query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a createfrom.Response +func (r CreateFrom) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.create_from") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the CreateFrom headers map. +func (r *CreateFrom) Header(key, value string) *CreateFrom { + r.headers.Set(key, value) + + return r +} + +// Source The source index or data stream name +// API Name: source +func (r *CreateFrom) _source(source string) *CreateFrom { + r.paramSet |= sourceMask + r.source = source + + return r +} + +// Dest The destination index or data stream name +// API Name: dest +func (r *CreateFrom) _dest(dest string) *CreateFrom { + r.paramSet |= destMask + r.dest = dest + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CreateFrom) ErrorTrace(errortrace bool) *CreateFrom { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CreateFrom) FilterPath(filterpaths ...string) *CreateFrom { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CreateFrom) Human(human bool) *CreateFrom { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CreateFrom) Pretty(pretty bool) *CreateFrom { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Mappings overrides to be applied to the destination index (optional) +// API name: mappings_override +func (r *CreateFrom) MappingsOverride(mappingsoverride types.TypeMappingVariant) *CreateFrom { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MappingsOverride = mappingsoverride.TypeMappingCaster() + + return r +} + +// If index blocks should be removed when creating destination index (optional) +// API name: remove_index_blocks +func (r *CreateFrom) RemoveIndexBlocks(removeindexblocks bool) *CreateFrom { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RemoveIndexBlocks = &removeindexblocks + + return r +} + +// Settings overrides to be applied to the destination index (optional) +// API name: settings_override +func (r *CreateFrom) SettingsOverride(settingsoverride types.IndexSettingsVariant) *CreateFrom { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SettingsOverride = settingsoverride.IndexSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createfrom/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createfrom/request.go new file mode 100644 index 000000000..736ee7eb0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createfrom/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package createfrom + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package createfrom +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/create_from/MigrateCreateFromRequest.ts#L25-L44 +type Request = types.CreateFrom + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewCreateFrom() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createfrom/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createfrom/response.go new file mode 100644 index 000000000..35f084332 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/createfrom/response.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package createfrom + +// Response holds the response body struct for the package createfrom +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/create_from/MigrateCreateFromResponse.ts#L22-L28 +type Response struct { + Acknowledged bool `json:"acknowledged"` + Index string `json:"index"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/datastreamsstats/data_streams_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/datastreamsstats/data_streams_stats.go new file mode 100644 index 000000000..322edbb1c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/datastreamsstats/data_streams_stats.go @@ -0,0 +1,369 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get data stream stats. +// +// Get statistics for one or more data streams. +package datastreamsstats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DataStreamsStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDataStreamsStats type alias for index. +type NewDataStreamsStats func() *DataStreamsStats + +// NewDataStreamsStatsFunc returns a new instance of DataStreamsStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDataStreamsStatsFunc(tp elastictransport.Interface) NewDataStreamsStats { + return func() *DataStreamsStats { + n := New(tp) + + return n + } +} + +// Get data stream stats. +// +// Get statistics for one or more data streams. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1 +func New(tp elastictransport.Interface) *DataStreamsStats { + r := &DataStreamsStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DataStreamsStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DataStreamsStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.data_streams_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.data_streams_stats") + if reader := instrument.RecordRequestBody(ctx, "indices.data_streams_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.data_streams_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DataStreamsStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a datastreamsstats.Response +func (r DataStreamsStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.data_streams_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DataStreamsStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.data_streams_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DataStreamsStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DataStreamsStats headers map. +func (r *DataStreamsStats) Header(key, value string) *DataStreamsStats { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of data streams used to limit the request. +// Wildcard expressions (`*`) are supported. +// To target all data streams in a cluster, omit this parameter or use `*`. +// API Name: name +func (r *DataStreamsStats) Name(name string) *DataStreamsStats { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ExpandWildcards Type of data stream that wildcard patterns can match. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *DataStreamsStats) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *DataStreamsStats { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DataStreamsStats) ErrorTrace(errortrace bool) *DataStreamsStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DataStreamsStats) FilterPath(filterpaths ...string) *DataStreamsStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DataStreamsStats) Human(human bool) *DataStreamsStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DataStreamsStats) Pretty(pretty bool) *DataStreamsStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/datastreamsstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/datastreamsstats/response.go new file mode 100644 index 000000000..2471193fe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/datastreamsstats/response.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package datastreamsstats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package datastreamsstats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L25-L43 +type Response struct { + + // BackingIndices Total number of backing indices for the selected data streams. + BackingIndices int `json:"backing_indices"` + // DataStreamCount Total number of selected data streams. + DataStreamCount int `json:"data_stream_count"` + // DataStreams Contains statistics for the selected data streams. + DataStreams []types.DataStreamsStatsItem `json:"data_streams"` + // Shards_ Contains information about shards that attempted to execute the request. + Shards_ types.ShardStatistics `json:"_shards"` + // TotalStoreSizeBytes Total size, in bytes, of all shards for the selected data streams. + TotalStoreSizeBytes int64 `json:"total_store_size_bytes"` + // TotalStoreSizes Total size of all shards for the selected data streams. + // This property is included only if the `human` query parameter is `true` + TotalStoreSizes types.ByteSize `json:"total_store_sizes,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/delete/delete.go new file mode 100644 index 000000000..2fdfcc409 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/delete/delete.go @@ -0,0 +1,415 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete indices. +// Deleting an index deletes its documents, shards, and metadata. +// It does not delete related Kibana components, such as data views, +// visualizations, or dashboards. +// +// You cannot delete the current write index of a data stream. +// To delete the index, you must roll over the data stream so a new write index +// is created. +// You can then use the delete index API to delete the previous write index. +package delete + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Delete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDelete type alias for index. +type NewDelete func(index string) *Delete + +// NewDeleteFunc returns a new instance of Delete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteFunc(tp elastictransport.Interface) NewDelete { + return func(index string) *Delete { + n := New(tp) + + n._index(index) + + return n + } +} + +// Delete indices. +// Deleting an index deletes its documents, shards, and metadata. +// It does not delete related Kibana components, such as data views, +// visualizations, or dashboards. +// +// You cannot delete the current write index of a data stream. +// To delete the index, you must roll over the data stream so a new write index +// is created. +// You can then use the delete index API to delete the previous write index. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete +func New(tp elastictransport.Interface) *Delete { + r := &Delete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Delete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Delete) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.delete") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.delete") + if reader := instrument.RecordRequestBody(ctx, "indices.delete", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.delete") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Delete query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a delete.Response +func (r Delete) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Delete) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Delete query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Delete headers map. +func (r *Delete) Header(key, value string) *Delete { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of indices to delete. +// You cannot specify index aliases. +// By default, this parameter does not support wildcards (`*`) or `_all`. +// To use wildcards or `_all`, set the `action.destructive_requires_name` +// cluster setting to `false`. +// API Name: index +func (r *Delete) _index(index string) *Delete { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *Delete) AllowNoIndices(allownoindices bool) *Delete { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *Delete) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Delete { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *Delete) IgnoreUnavailable(ignoreunavailable bool) *Delete { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *Delete) MasterTimeout(duration string) *Delete { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *Delete) Timeout(duration string) *Delete { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Delete) ErrorTrace(errortrace bool) *Delete { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Delete) FilterPath(filterpaths ...string) *Delete { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Delete) Human(human bool) *Delete { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Delete) Pretty(pretty bool) *Delete { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/delete/response.go new file mode 100644 index 000000000..e6d95ed19 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/delete/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package delete + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package delete +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/delete/IndicesDeleteResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` + Shards_ *types.ShardStatistics `json:"_shards,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletealias/delete_alias.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletealias/delete_alias.go new file mode 100644 index 000000000..d09974d02 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletealias/delete_alias.go @@ -0,0 +1,403 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete an alias. +// Removes a data stream or index from an alias. +package deletealias + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 + + nameMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteAlias struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteAlias type alias for index. +type NewDeleteAlias func(index, name string) *DeleteAlias + +// NewDeleteAliasFunc returns a new instance of DeleteAlias with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteAliasFunc(tp elastictransport.Interface) NewDeleteAlias { + return func(index, name string) *DeleteAlias { + n := New(tp) + + n._index(index) + + n._name(name) + + return n + } +} + +// Delete an alias. +// Removes a data stream or index from an alias. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias +func New(tp elastictransport.Interface) *DeleteAlias { + r := &DeleteAlias{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteAlias) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|nameMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_alias") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + case r.paramSet == indexMask|nameMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_aliases") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteAlias) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.delete_alias") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.delete_alias") + if reader := instrument.RecordRequestBody(ctx, "indices.delete_alias", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.delete_alias") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteAlias query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletealias.Response +func (r DeleteAlias) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteAlias) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteAlias query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteAlias headers map. +func (r *DeleteAlias) Header(key, value string) *DeleteAlias { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams or indices used to limit the request. +// Supports wildcards (`*`). +// API Name: index +func (r *DeleteAlias) _index(index string) *DeleteAlias { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Name Comma-separated list of aliases to remove. +// Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. +// API Name: name +func (r *DeleteAlias) _name(name string) *DeleteAlias { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *DeleteAlias) MasterTimeout(duration string) *DeleteAlias { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *DeleteAlias) Timeout(duration string) *DeleteAlias { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteAlias) ErrorTrace(errortrace bool) *DeleteAlias { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteAlias) FilterPath(filterpaths ...string) *DeleteAlias { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteAlias) Human(human bool) *DeleteAlias { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteAlias) Pretty(pretty bool) *DeleteAlias { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletealias/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletealias/response.go new file mode 100644 index 000000000..ce78339b1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletealias/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletealias + +// Response holds the response body struct for the package deletealias +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/delete_alias/IndicesDeleteAliasResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` + Errors *bool `json:"errors,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatalifecycle/delete_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatalifecycle/delete_data_lifecycle.go new file mode 100644 index 000000000..6d11e5c41 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatalifecycle/delete_data_lifecycle.go @@ -0,0 +1,379 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete data stream lifecycles. +// Removes the data stream lifecycle from a data stream, rendering it not +// managed by the data stream lifecycle. +package deletedatalifecycle + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteDataLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteDataLifecycle type alias for index. +type NewDeleteDataLifecycle func(name string) *DeleteDataLifecycle + +// NewDeleteDataLifecycleFunc returns a new instance of DeleteDataLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteDataLifecycleFunc(tp elastictransport.Interface) NewDeleteDataLifecycle { + return func(name string) *DeleteDataLifecycle { + n := New(tp) + + n._name(name) + + return n + } +} + +// Delete data stream lifecycles. +// Removes the data stream lifecycle from a data stream, rendering it not +// managed by the data stream lifecycle. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle +func New(tp elastictransport.Interface) *DeleteDataLifecycle { + r := &DeleteDataLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteDataLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_lifecycle") + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteDataLifecycle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.delete_data_lifecycle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.delete_data_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "indices.delete_data_lifecycle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.delete_data_lifecycle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteDataLifecycle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletedatalifecycle.Response +func (r DeleteDataLifecycle) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_data_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteDataLifecycle) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_data_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteDataLifecycle query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteDataLifecycle headers map. +func (r *DeleteDataLifecycle) Header(key, value string) *DeleteDataLifecycle { + r.headers.Set(key, value) + + return r +} + +// Name A comma-separated list of data streams of which the data stream lifecycle +// will be deleted; use `*` to get all data streams +// API Name: name +func (r *DeleteDataLifecycle) _name(name string) *DeleteDataLifecycle { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ExpandWildcards Whether wildcard expressions should get expanded to open or closed indices +// (default: open) +// API name: expand_wildcards +func (r *DeleteDataLifecycle) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *DeleteDataLifecycle { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// MasterTimeout Specify timeout for connection to master +// API name: master_timeout +func (r *DeleteDataLifecycle) MasterTimeout(duration string) *DeleteDataLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Explicit timestamp for the document +// API name: timeout +func (r *DeleteDataLifecycle) Timeout(duration string) *DeleteDataLifecycle { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteDataLifecycle) ErrorTrace(errortrace bool) *DeleteDataLifecycle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteDataLifecycle) FilterPath(filterpaths ...string) *DeleteDataLifecycle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteDataLifecycle) Human(human bool) *DeleteDataLifecycle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteDataLifecycle) Pretty(pretty bool) *DeleteDataLifecycle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatalifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatalifecycle/response.go new file mode 100644 index 000000000..1e88fe3e7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatalifecycle/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletedatalifecycle + +// Response holds the response body struct for the package deletedatalifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/delete_data_lifecycle/IndicesDeleteDataLifecycleResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastream/delete_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastream/delete_data_stream.go new file mode 100644 index 000000000..4734d03df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastream/delete_data_stream.go @@ -0,0 +1,368 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete data streams. +// Deletes one or more data streams and their backing indices. +package deletedatastream + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteDataStream struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteDataStream type alias for index. +type NewDeleteDataStream func(name string) *DeleteDataStream + +// NewDeleteDataStreamFunc returns a new instance of DeleteDataStream with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteDataStreamFunc(tp elastictransport.Interface) NewDeleteDataStream { + return func(name string) *DeleteDataStream { + n := New(tp) + + n._name(name) + + return n + } +} + +// Delete data streams. +// Deletes one or more data streams and their backing indices. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream +func New(tp elastictransport.Interface) *DeleteDataStream { + r := &DeleteDataStream{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteDataStream) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteDataStream) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.delete_data_stream") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.delete_data_stream") + if reader := instrument.RecordRequestBody(ctx, "indices.delete_data_stream", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.delete_data_stream") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteDataStream query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletedatastream.Response +func (r DeleteDataStream) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteDataStream) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteDataStream query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteDataStream headers map. +func (r *DeleteDataStream) Header(key, value string) *DeleteDataStream { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of data streams to delete. Wildcard (`*`) expressions +// are supported. +// API Name: name +func (r *DeleteDataStream) _name(name string) *DeleteDataStream { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *DeleteDataStream) MasterTimeout(duration string) *DeleteDataStream { + r.values.Set("master_timeout", duration) + + return r +} + +// ExpandWildcards Type of data stream that wildcard patterns can match. Supports +// comma-separated values,such as `open,hidden`. +// API name: expand_wildcards +func (r *DeleteDataStream) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *DeleteDataStream { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteDataStream) ErrorTrace(errortrace bool) *DeleteDataStream { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteDataStream) FilterPath(filterpaths ...string) *DeleteDataStream { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteDataStream) Human(human bool) *DeleteDataStream { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteDataStream) Pretty(pretty bool) *DeleteDataStream { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastream/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastream/response.go new file mode 100644 index 000000000..ae38a1b57 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastream/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletedatastream + +// Response holds the response body struct for the package deletedatastream +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/delete_data_stream/IndicesDeleteDataStreamResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastreamoptions/delete_data_stream_options.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastreamoptions/delete_data_stream_options.go new file mode 100644 index 000000000..edb2b5755 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastreamoptions/delete_data_stream_options.go @@ -0,0 +1,377 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete data stream options. +// Removes the data stream options from a data stream. +package deletedatastreamoptions + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteDataStreamOptions struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteDataStreamOptions type alias for index. +type NewDeleteDataStreamOptions func(name string) *DeleteDataStreamOptions + +// NewDeleteDataStreamOptionsFunc returns a new instance of DeleteDataStreamOptions with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteDataStreamOptionsFunc(tp elastictransport.Interface) NewDeleteDataStreamOptions { + return func(name string) *DeleteDataStreamOptions { + n := New(tp) + + n._name(name) + + return n + } +} + +// Delete data stream options. +// Removes the data stream options from a data stream. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html +func New(tp elastictransport.Interface) *DeleteDataStreamOptions { + r := &DeleteDataStreamOptions{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteDataStreamOptions) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_options") + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteDataStreamOptions) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.delete_data_stream_options") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.delete_data_stream_options") + if reader := instrument.RecordRequestBody(ctx, "indices.delete_data_stream_options", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.delete_data_stream_options") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteDataStreamOptions query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletedatastreamoptions.Response +func (r DeleteDataStreamOptions) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_data_stream_options") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteDataStreamOptions) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_data_stream_options") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteDataStreamOptions query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteDataStreamOptions headers map. +func (r *DeleteDataStreamOptions) Header(key, value string) *DeleteDataStreamOptions { + r.headers.Set(key, value) + + return r +} + +// Name A comma-separated list of data streams of which the data stream options will +// be deleted; use `*` to get all data streams +// API Name: name +func (r *DeleteDataStreamOptions) _name(name string) *DeleteDataStreamOptions { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ExpandWildcards Whether wildcard expressions should get expanded to open or closed indices +// (default: open) +// API name: expand_wildcards +func (r *DeleteDataStreamOptions) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *DeleteDataStreamOptions { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// MasterTimeout Specify timeout for connection to master +// API name: master_timeout +func (r *DeleteDataStreamOptions) MasterTimeout(duration string) *DeleteDataStreamOptions { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Explicit timestamp for the document +// API name: timeout +func (r *DeleteDataStreamOptions) Timeout(duration string) *DeleteDataStreamOptions { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteDataStreamOptions) ErrorTrace(errortrace bool) *DeleteDataStreamOptions { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteDataStreamOptions) FilterPath(filterpaths ...string) *DeleteDataStreamOptions { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteDataStreamOptions) Human(human bool) *DeleteDataStreamOptions { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteDataStreamOptions) Pretty(pretty bool) *DeleteDataStreamOptions { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastreamoptions/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastreamoptions/response.go new file mode 100644 index 000000000..5e60d8a9e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastreamoptions/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletedatastreamoptions + +// Response holds the response body struct for the package deletedatastreamoptions +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/delete_data_stream_options/IndicesDeleteDataStreamOptionsResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deleteindextemplate/delete_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deleteindextemplate/delete_index_template.go new file mode 100644 index 000000000..eb351ba39 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deleteindextemplate/delete_index_template.go @@ -0,0 +1,371 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete an index template. +// The provided may contain multiple template names separated +// by a comma. If multiple template +// names are specified then there is no wildcard support and the provided names +// should match completely with +// existing templates. +package deleteindextemplate + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteIndexTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteIndexTemplate type alias for index. +type NewDeleteIndexTemplate func(name string) *DeleteIndexTemplate + +// NewDeleteIndexTemplateFunc returns a new instance of DeleteIndexTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteIndexTemplateFunc(tp elastictransport.Interface) NewDeleteIndexTemplate { + return func(name string) *DeleteIndexTemplate { + n := New(tp) + + n._name(name) + + return n + } +} + +// Delete an index template. +// The provided may contain multiple template names separated +// by a comma. If multiple template +// names are specified then there is no wildcard support and the provided names +// should match completely with +// existing templates. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template +func New(tp elastictransport.Interface) *DeleteIndexTemplate { + r := &DeleteIndexTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteIndexTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_index_template") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteIndexTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.delete_index_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.delete_index_template") + if reader := instrument.RecordRequestBody(ctx, "indices.delete_index_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.delete_index_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteIndexTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleteindextemplate.Response +func (r DeleteIndexTemplate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_index_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteIndexTemplate) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_index_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteIndexTemplate query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteIndexTemplate headers map. +func (r *DeleteIndexTemplate) Header(key, value string) *DeleteIndexTemplate { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of index template names used to limit the request. +// Wildcard (*) expressions are supported. +// API Name: name +func (r *DeleteIndexTemplate) _name(name string) *DeleteIndexTemplate { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *DeleteIndexTemplate) MasterTimeout(duration string) *DeleteIndexTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *DeleteIndexTemplate) Timeout(duration string) *DeleteIndexTemplate { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteIndexTemplate) ErrorTrace(errortrace bool) *DeleteIndexTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteIndexTemplate) FilterPath(filterpaths ...string) *DeleteIndexTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteIndexTemplate) Human(human bool) *DeleteIndexTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteIndexTemplate) Pretty(pretty bool) *DeleteIndexTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deleteindextemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deleteindextemplate/response.go new file mode 100644 index 000000000..de743cb3c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deleteindextemplate/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleteindextemplate + +// Response holds the response body struct for the package deleteindextemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/delete_index_template/IndicesDeleteIndexTemplateResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletetemplate/delete_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletetemplate/delete_template.go new file mode 100644 index 000000000..b39b8af81 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletetemplate/delete_template.go @@ -0,0 +1,369 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a legacy index template. +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +package deletetemplate + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteTemplate type alias for index. +type NewDeleteTemplate func(name string) *DeleteTemplate + +// NewDeleteTemplateFunc returns a new instance of DeleteTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteTemplateFunc(tp elastictransport.Interface) NewDeleteTemplate { + return func(name string) *DeleteTemplate { + n := New(tp) + + n._name(name) + + return n + } +} + +// Delete a legacy index template. +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template +func New(tp elastictransport.Interface) *DeleteTemplate { + r := &DeleteTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_template") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.delete_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.delete_template") + if reader := instrument.RecordRequestBody(ctx, "indices.delete_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.delete_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletetemplate.Response +func (r DeleteTemplate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteTemplate) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.delete_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteTemplate query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteTemplate headers map. +func (r *DeleteTemplate) Header(key, value string) *DeleteTemplate { + r.headers.Set(key, value) + + return r +} + +// Name The name of the legacy index template to delete. +// Wildcard (`*`) expressions are supported. +// API Name: name +func (r *DeleteTemplate) _name(name string) *DeleteTemplate { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *DeleteTemplate) MasterTimeout(duration string) *DeleteTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *DeleteTemplate) Timeout(duration string) *DeleteTemplate { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteTemplate) ErrorTrace(errortrace bool) *DeleteTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteTemplate) FilterPath(filterpaths ...string) *DeleteTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteTemplate) Human(human bool) *DeleteTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteTemplate) Pretty(pretty bool) *DeleteTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletetemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletetemplate/response.go new file mode 100644 index 000000000..522dffc81 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletetemplate/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletetemplate + +// Response holds the response body struct for the package deletetemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/delete_template/IndicesDeleteTemplateResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/diskusage/disk_usage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/diskusage/disk_usage.go new file mode 100644 index 000000000..d3b4a197d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/diskusage/disk_usage.go @@ -0,0 +1,427 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Analyze the index disk usage. +// Analyze the disk usage of each field of an index or data stream. +// This API might not support indices created in previous Elasticsearch +// versions. +// The result of a small index can be inaccurate as some parts of an index might +// not be analyzed by the API. +// +// NOTE: The total size of fields of the analyzed shards of the index in the +// response is usually smaller than the index `store_size` value because some +// small metadata files are ignored and some parts of data files might not be +// scanned by the API. +// Since stored fields are stored together in a compressed format, the sizes of +// stored fields are also estimates and can be inaccurate. +// The stored size of the `_id` field is likely underestimated while the +// `_source` field is overestimated. +package diskusage + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DiskUsage struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDiskUsage type alias for index. +type NewDiskUsage func(index string) *DiskUsage + +// NewDiskUsageFunc returns a new instance of DiskUsage with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDiskUsageFunc(tp elastictransport.Interface) NewDiskUsage { + return func(index string) *DiskUsage { + n := New(tp) + + n._index(index) + + return n + } +} + +// Analyze the index disk usage. +// Analyze the disk usage of each field of an index or data stream. +// This API might not support indices created in previous Elasticsearch +// versions. +// The result of a small index can be inaccurate as some parts of an index might +// not be analyzed by the API. +// +// NOTE: The total size of fields of the analyzed shards of the index in the +// response is usually smaller than the index `store_size` value because some +// small metadata files are ignored and some parts of data files might not be +// scanned by the API. +// Since stored fields are stored together in a compressed format, the sizes of +// stored fields are also estimates and can be inaccurate. +// The stored size of the `_id` field is likely underestimated while the +// `_source` field is overestimated. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage +func New(tp elastictransport.Interface) *DiskUsage { + r := &DiskUsage{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DiskUsage) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_disk_usage") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DiskUsage) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.disk_usage") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.disk_usage") + if reader := instrument.RecordRequestBody(ctx, "indices.disk_usage", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.disk_usage") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DiskUsage query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a diskusage.Response +func (r DiskUsage) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.disk_usage") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := new(Response) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return *response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DiskUsage) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.disk_usage") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DiskUsage query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DiskUsage headers map. +func (r *DiskUsage) Header(key, value string) *DiskUsage { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// It’s recommended to execute this API with a single index (or the latest +// backing index of a data stream) as the API consumes resources significantly. +// API Name: index +func (r *DiskUsage) _index(index string) *DiskUsage { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If false, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. +// API name: allow_no_indices +func (r *DiskUsage) AllowNoIndices(allownoindices bool) *DiskUsage { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *DiskUsage) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *DiskUsage { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// Flush If `true`, the API performs a flush before analysis. +// If `false`, the response may not include uncommitted data. +// API name: flush +func (r *DiskUsage) Flush(flush bool) *DiskUsage { + r.values.Set("flush", strconv.FormatBool(flush)) + + return r +} + +// IgnoreUnavailable If `true`, missing or closed indices are not included in the response. +// API name: ignore_unavailable +func (r *DiskUsage) IgnoreUnavailable(ignoreunavailable bool) *DiskUsage { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// RunExpensiveTasks Analyzing field disk usage is resource-intensive. +// To use the API, this parameter must be set to `true`. +// API name: run_expensive_tasks +func (r *DiskUsage) RunExpensiveTasks(runexpensivetasks bool) *DiskUsage { + r.values.Set("run_expensive_tasks", strconv.FormatBool(runexpensivetasks)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DiskUsage) ErrorTrace(errortrace bool) *DiskUsage { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DiskUsage) FilterPath(filterpaths ...string) *DiskUsage { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DiskUsage) Human(human bool) *DiskUsage { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DiskUsage) Pretty(pretty bool) *DiskUsage { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/diskusage/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/diskusage/response.go new file mode 100644 index 000000000..57fc46d24 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/diskusage/response.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package diskusage + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package diskusage +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/disk_usage/IndicesDiskUsageResponse.ts#L22-L25 + +type Response = json.RawMessage + +func NewResponse() *Response { + return new(Response) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/downsample/downsample.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/downsample/downsample.go new file mode 100644 index 000000000..6f79b1887 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/downsample/downsample.go @@ -0,0 +1,411 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Downsample an index. +// Aggregate a time series (TSDS) index and store pre-computed statistical +// summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric +// field grouped by a configured time interval. +// For example, a TSDS index that contains metrics sampled every 10 seconds can +// be downsampled to an hourly index. +// All documents within an hour interval are summarized and stored as a single +// document in the downsample index. +// +// NOTE: Only indices in a time series data stream are supported. +// Neither field nor document level security can be defined on the source index. +// The source index must be read only (`index.blocks.write: true`). +package downsample + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 + + targetindexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Downsample struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + targetindex string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDownsample type alias for index. +type NewDownsample func(index, targetindex string) *Downsample + +// NewDownsampleFunc returns a new instance of Downsample with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDownsampleFunc(tp elastictransport.Interface) NewDownsample { + return func(index, targetindex string) *Downsample { + n := New(tp) + + n._index(index) + + n._targetindex(targetindex) + + return n + } +} + +// Downsample an index. +// Aggregate a time series (TSDS) index and store pre-computed statistical +// summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric +// field grouped by a configured time interval. +// For example, a TSDS index that contains metrics sampled every 10 seconds can +// be downsampled to an hourly index. +// All documents within an hour interval are summarized and stored as a single +// document in the downsample index. +// +// NOTE: Only indices in a time series data stream are supported. +// Neither field nor document level security can be defined on the source index. +// The source index must be read only (`index.blocks.write: true`). +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample +func New(tp elastictransport.Interface) *Downsample { + r := &Downsample{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Downsample) Raw(raw io.Reader) *Downsample { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Downsample) Request(req *Request) *Downsample { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Downsample) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Downsample: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|targetindexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_downsample") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "targetindex", r.targetindex) + } + path.WriteString(r.targetindex) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Downsample) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.downsample") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.downsample") + if reader := instrument.RecordRequestBody(ctx, "indices.downsample", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.downsample") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Downsample query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a downsample.Response +func (r Downsample) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.downsample") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := new(Response) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return *response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Downsample headers map. +func (r *Downsample) Header(key, value string) *Downsample { + r.headers.Set(key, value) + + return r +} + +// Index Name of the time series index to downsample. +// API Name: index +func (r *Downsample) _index(index string) *Downsample { + r.paramSet |= indexMask + r.index = index + + return r +} + +// TargetIndex Name of the index to create. +// API Name: targetindex +func (r *Downsample) _targetindex(targetindex string) *Downsample { + r.paramSet |= targetindexMask + r.targetindex = targetindex + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Downsample) ErrorTrace(errortrace bool) *Downsample { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Downsample) FilterPath(filterpaths ...string) *Downsample { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Downsample) Human(human bool) *Downsample { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Downsample) Pretty(pretty bool) *Downsample { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The interval at which to aggregate the original time series index. +// API name: fixed_interval +func (r *Downsample) FixedInterval(durationlarge string) *Downsample { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FixedInterval = durationlarge + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/downsample/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/downsample/request.go new file mode 100644 index 000000000..edc46d716 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/downsample/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package downsample + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package downsample +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/downsample/Request.ts#L24-L58 +type Request = types.DownsampleConfig + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewDownsampleConfig() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/downsample/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/downsample/response.go new file mode 100644 index 000000000..bc2425e6b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/downsample/response.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package downsample + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package downsample +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/downsample/Response.ts#L22-L25 + +type Response = json.RawMessage + +func NewResponse() *Response { + return new(Response) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/exists/exists.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/exists/exists.go new file mode 100644 index 000000000..c3537bae5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/exists/exists.go @@ -0,0 +1,351 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Check indices. +// Check if one or more indices, index aliases, or data streams exist. +package exists + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Exists struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExists type alias for index. +type NewExists func(index string) *Exists + +// NewExistsFunc returns a new instance of Exists with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExistsFunc(tp elastictransport.Interface) NewExists { + return func(index string) *Exists { + n := New(tp) + + n._index(index) + + return n + } +} + +// Check indices. +// Check if one or more indices, index aliases, or data streams exist. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists +func New(tp elastictransport.Interface) *Exists { + r := &Exists{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Exists) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + + method = http.MethodHead + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Exists) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.exists") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.exists") + if reader := instrument.RecordRequestBody(ctx, "indices.exists", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.exists") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Exists query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a exists.Response +func (r Exists) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Exists) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.exists") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Exists query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Exists headers map. +func (r *Exists) Header(key, value string) *Exists { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases. Supports +// wildcards (`*`). +// API Name: index +func (r *Exists) _index(index string) *Exists { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *Exists) AllowNoIndices(allownoindices bool) *Exists { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *Exists) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Exists { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// FlatSettings If `true`, returns settings in flat format. +// API name: flat_settings +func (r *Exists) FlatSettings(flatsettings bool) *Exists { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *Exists) IgnoreUnavailable(ignoreunavailable bool) *Exists { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// IncludeDefaults If `true`, return all default settings in the response. +// API name: include_defaults +func (r *Exists) IncludeDefaults(includedefaults bool) *Exists { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// Local If `true`, the request retrieves information from the local node only. +// API name: local +func (r *Exists) Local(local bool) *Exists { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Exists) ErrorTrace(errortrace bool) *Exists { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Exists) FilterPath(filterpaths ...string) *Exists { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Exists) Human(human bool) *Exists { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Exists) Pretty(pretty bool) *Exists { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/existsalias/exists_alias.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/existsalias/exists_alias.go new file mode 100644 index 000000000..6f69e0d6f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/existsalias/exists_alias.go @@ -0,0 +1,372 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Check aliases. +// +// Check if one or more data stream or index aliases exist. +package existsalias + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 + + indexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ExistsAlias struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExistsAlias type alias for index. +type NewExistsAlias func(name string) *ExistsAlias + +// NewExistsAliasFunc returns a new instance of ExistsAlias with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExistsAliasFunc(tp elastictransport.Interface) NewExistsAlias { + return func(name string) *ExistsAlias { + n := New(tp) + + n._name(name) + + return n + } +} + +// Check aliases. +// +// Check if one or more data stream or index aliases exist. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias +func New(tp elastictransport.Interface) *ExistsAlias { + r := &ExistsAlias{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ExistsAlias) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_alias") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodHead + case r.paramSet == indexMask|nameMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_alias") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodHead + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ExistsAlias) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.exists_alias") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.exists_alias") + if reader := instrument.RecordRequestBody(ctx, "indices.exists_alias", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.exists_alias") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ExistsAlias query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a existsalias.Response +func (r ExistsAlias) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ExistsAlias) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.exists_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ExistsAlias query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ExistsAlias headers map. +func (r *ExistsAlias) Header(key, value string) *ExistsAlias { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of aliases to check. Supports wildcards (`*`). +// API Name: name +func (r *ExistsAlias) _name(name string) *ExistsAlias { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Index Comma-separated list of data streams or indices used to limit the request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *ExistsAlias) Index(index string) *ExistsAlias { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *ExistsAlias) AllowNoIndices(allownoindices bool) *ExistsAlias { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *ExistsAlias) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ExistsAlias { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, requests that include a missing data stream or index in the +// target indices or data streams return an error. +// API name: ignore_unavailable +func (r *ExistsAlias) IgnoreUnavailable(ignoreunavailable bool) *ExistsAlias { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *ExistsAlias) MasterTimeout(duration string) *ExistsAlias { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ExistsAlias) ErrorTrace(errortrace bool) *ExistsAlias { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ExistsAlias) FilterPath(filterpaths ...string) *ExistsAlias { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ExistsAlias) Human(human bool) *ExistsAlias { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ExistsAlias) Pretty(pretty bool) *ExistsAlias { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/existsindextemplate/exists_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/existsindextemplate/exists_index_template.go new file mode 100644 index 000000000..c5e1eb85f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/existsindextemplate/exists_index_template.go @@ -0,0 +1,322 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Check index templates. +// +// Check whether index templates exist. +package existsindextemplate + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ExistsIndexTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExistsIndexTemplate type alias for index. +type NewExistsIndexTemplate func(name string) *ExistsIndexTemplate + +// NewExistsIndexTemplateFunc returns a new instance of ExistsIndexTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExistsIndexTemplateFunc(tp elastictransport.Interface) NewExistsIndexTemplate { + return func(name string) *ExistsIndexTemplate { + n := New(tp) + + n._name(name) + + return n + } +} + +// Check index templates. +// +// Check whether index templates exist. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template +func New(tp elastictransport.Interface) *ExistsIndexTemplate { + r := &ExistsIndexTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ExistsIndexTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_index_template") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodHead + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ExistsIndexTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.exists_index_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.exists_index_template") + if reader := instrument.RecordRequestBody(ctx, "indices.exists_index_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.exists_index_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ExistsIndexTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a existsindextemplate.Response +func (r ExistsIndexTemplate) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ExistsIndexTemplate) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.exists_index_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ExistsIndexTemplate query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ExistsIndexTemplate headers map. +func (r *ExistsIndexTemplate) Header(key, value string) *ExistsIndexTemplate { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of index template names used to limit the request. +// Wildcard (*) expressions are supported. +// API Name: name +func (r *ExistsIndexTemplate) _name(name string) *ExistsIndexTemplate { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Local If true, the request retrieves information from the local node only. Defaults +// to false, which means information is retrieved from the master node. +// API name: local +func (r *ExistsIndexTemplate) Local(local bool) *ExistsIndexTemplate { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// FlatSettings If true, returns settings in flat format. +// API name: flat_settings +func (r *ExistsIndexTemplate) FlatSettings(flatsettings bool) *ExistsIndexTemplate { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *ExistsIndexTemplate) MasterTimeout(duration string) *ExistsIndexTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ExistsIndexTemplate) ErrorTrace(errortrace bool) *ExistsIndexTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ExistsIndexTemplate) FilterPath(filterpaths ...string) *ExistsIndexTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ExistsIndexTemplate) Human(human bool) *ExistsIndexTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ExistsIndexTemplate) Pretty(pretty bool) *ExistsIndexTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/existstemplate/exists_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/existstemplate/exists_template.go new file mode 100644 index 000000000..a2058a782 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/existstemplate/exists_template.go @@ -0,0 +1,333 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Check existence of index templates. +// Get information about whether index templates exist. +// Index templates define settings, mappings, and aliases that can be applied +// automatically to new indices. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +package existstemplate + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ExistsTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExistsTemplate type alias for index. +type NewExistsTemplate func(name string) *ExistsTemplate + +// NewExistsTemplateFunc returns a new instance of ExistsTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExistsTemplateFunc(tp elastictransport.Interface) NewExistsTemplate { + return func(name string) *ExistsTemplate { + n := New(tp) + + n._name(name) + + return n + } +} + +// Check existence of index templates. +// Get information about whether index templates exist. +// Index templates define settings, mappings, and aliases that can be applied +// automatically to new indices. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template +func New(tp elastictransport.Interface) *ExistsTemplate { + r := &ExistsTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ExistsTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_template") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodHead + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ExistsTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.exists_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.exists_template") + if reader := instrument.RecordRequestBody(ctx, "indices.exists_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.exists_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ExistsTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a existstemplate.Response +func (r ExistsTemplate) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ExistsTemplate) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.exists_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ExistsTemplate query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ExistsTemplate headers map. +func (r *ExistsTemplate) Header(key, value string) *ExistsTemplate { + r.headers.Set(key, value) + + return r +} + +// Name A comma-separated list of index template names used to limit the request. +// Wildcard (`*`) expressions are supported. +// API Name: name +func (r *ExistsTemplate) _name(name string) *ExistsTemplate { + r.paramSet |= nameMask + r.name = name + + return r +} + +// FlatSettings Indicates whether to use a flat format for the response. +// API name: flat_settings +func (r *ExistsTemplate) FlatSettings(flatsettings bool) *ExistsTemplate { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) + + return r +} + +// Local Indicates whether to get information from the local node only. +// API name: local +func (r *ExistsTemplate) Local(local bool) *ExistsTemplate { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *ExistsTemplate) MasterTimeout(duration string) *ExistsTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ExistsTemplate) ErrorTrace(errortrace bool) *ExistsTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ExistsTemplate) FilterPath(filterpaths ...string) *ExistsTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ExistsTemplate) Human(human bool) *ExistsTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ExistsTemplate) Pretty(pretty bool) *ExistsTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/explaindatalifecycle/explain_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/explaindatalifecycle/explain_data_lifecycle.go new file mode 100644 index 000000000..dd92ff8b9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/explaindatalifecycle/explain_data_lifecycle.go @@ -0,0 +1,369 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the status for a data stream lifecycle. +// Get information about an index or data stream's current data stream lifecycle +// status, such as time since index creation, time since rollover, the lifecycle +// configuration managing the index, or any errors encountered during lifecycle +// execution. +package explaindatalifecycle + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ExplainDataLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExplainDataLifecycle type alias for index. +type NewExplainDataLifecycle func(index string) *ExplainDataLifecycle + +// NewExplainDataLifecycleFunc returns a new instance of ExplainDataLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExplainDataLifecycleFunc(tp elastictransport.Interface) NewExplainDataLifecycle { + return func(index string) *ExplainDataLifecycle { + n := New(tp) + + n._index(index) + + return n + } +} + +// Get the status for a data stream lifecycle. +// Get information about an index or data stream's current data stream lifecycle +// status, such as time since index creation, time since rollover, the lifecycle +// configuration managing the index, or any errors encountered during lifecycle +// execution. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle +func New(tp elastictransport.Interface) *ExplainDataLifecycle { + r := &ExplainDataLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ExplainDataLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_lifecycle") + path.WriteString("/") + path.WriteString("explain") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ExplainDataLifecycle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.explain_data_lifecycle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.explain_data_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "indices.explain_data_lifecycle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.explain_data_lifecycle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ExplainDataLifecycle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a explaindatalifecycle.Response +func (r ExplainDataLifecycle) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.explain_data_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ExplainDataLifecycle) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.explain_data_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ExplainDataLifecycle query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ExplainDataLifecycle headers map. +func (r *ExplainDataLifecycle) Header(key, value string) *ExplainDataLifecycle { + r.headers.Set(key, value) + + return r +} + +// Index The name of the index to explain +// API Name: index +func (r *ExplainDataLifecycle) _index(index string) *ExplainDataLifecycle { + r.paramSet |= indexMask + r.index = index + + return r +} + +// IncludeDefaults indicates if the API should return the default values the system uses for the +// index's lifecycle +// API name: include_defaults +func (r *ExplainDataLifecycle) IncludeDefaults(includedefaults bool) *ExplainDataLifecycle { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// MasterTimeout Specify timeout for connection to master +// API name: master_timeout +func (r *ExplainDataLifecycle) MasterTimeout(duration string) *ExplainDataLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ExplainDataLifecycle) ErrorTrace(errortrace bool) *ExplainDataLifecycle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ExplainDataLifecycle) FilterPath(filterpaths ...string) *ExplainDataLifecycle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ExplainDataLifecycle) Human(human bool) *ExplainDataLifecycle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ExplainDataLifecycle) Pretty(pretty bool) *ExplainDataLifecycle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/explaindatalifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/explaindatalifecycle/response.go new file mode 100644 index 000000000..7a81b933f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/explaindatalifecycle/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package explaindatalifecycle + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package explaindatalifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L25-L29 +type Response struct { + Indices map[string]types.DataStreamLifecycleExplain `json:"indices"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Indices: make(map[string]types.DataStreamLifecycleExplain, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/fieldusagestats/field_usage_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/fieldusagestats/field_usage_stats.go new file mode 100644 index 000000000..a32dd29f0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/fieldusagestats/field_usage_stats.go @@ -0,0 +1,408 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get field usage stats. +// Get field usage information for each shard and field of an index. +// Field usage statistics are automatically captured when queries are running on +// a cluster. +// A shard-level search request that accesses a given field, even if multiple +// times during that request, is counted as a single use. +// +// The response body reports the per-shard usage count of the data structures +// that back the fields in the index. +// A given request will increment each count by a maximum value of 1, even if +// the request accesses the same field multiple times. +package fieldusagestats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type FieldUsageStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewFieldUsageStats type alias for index. +type NewFieldUsageStats func(index string) *FieldUsageStats + +// NewFieldUsageStatsFunc returns a new instance of FieldUsageStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewFieldUsageStatsFunc(tp elastictransport.Interface) NewFieldUsageStats { + return func(index string) *FieldUsageStats { + n := New(tp) + + n._index(index) + + return n + } +} + +// Get field usage stats. +// Get field usage information for each shard and field of an index. +// Field usage statistics are automatically captured when queries are running on +// a cluster. +// A shard-level search request that accesses a given field, even if multiple +// times during that request, is counted as a single use. +// +// The response body reports the per-shard usage count of the data structures +// that back the fields in the index. +// A given request will increment each count by a maximum value of 1, even if +// the request accesses the same field multiple times. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats +func New(tp elastictransport.Interface) *FieldUsageStats { + r := &FieldUsageStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *FieldUsageStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_field_usage_stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r FieldUsageStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.field_usage_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.field_usage_stats") + if reader := instrument.RecordRequestBody(ctx, "indices.field_usage_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.field_usage_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the FieldUsageStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a fieldusagestats.Response +func (r FieldUsageStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.field_usage_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r FieldUsageStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.field_usage_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the FieldUsageStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the FieldUsageStats headers map. +func (r *FieldUsageStats) Header(key, value string) *FieldUsageStats { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list or wildcard expression of index names used to limit the +// request. +// API Name: index +func (r *FieldUsageStats) _index(index string) *FieldUsageStats { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. +// API name: allow_no_indices +func (r *FieldUsageStats) AllowNoIndices(allownoindices bool) *FieldUsageStats { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *FieldUsageStats) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *FieldUsageStats { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `true`, missing or closed indices are not included in the response. +// API name: ignore_unavailable +func (r *FieldUsageStats) IgnoreUnavailable(ignoreunavailable bool) *FieldUsageStats { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Fields Comma-separated list or wildcard expressions of fields to include in the +// statistics. +// API name: fields +func (r *FieldUsageStats) Fields(fields ...string) *FieldUsageStats { + r.values.Set("fields", strings.Join(fields, ",")) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *FieldUsageStats) ErrorTrace(errortrace bool) *FieldUsageStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *FieldUsageStats) FilterPath(filterpaths ...string) *FieldUsageStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *FieldUsageStats) Human(human bool) *FieldUsageStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *FieldUsageStats) Pretty(pretty bool) *FieldUsageStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/fieldusagestats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/fieldusagestats/response.go new file mode 100644 index 000000000..a64f8ca2c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/fieldusagestats/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package fieldusagestats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package fieldusagestats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L28-L31 +type Response struct { + FieldsUsageBody map[string]types.UsageStatsIndex `json:"-"` + Shards_ types.ShardStatistics `json:"_shards"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + FieldsUsageBody: make(map[string]types.UsageStatsIndex, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/flush/flush.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/flush/flush.go new file mode 100644 index 000000000..8132c1b59 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/flush/flush.go @@ -0,0 +1,449 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Flush data streams or indices. +// Flushing a data stream or index is the process of making sure that any data +// that is currently only stored in the transaction log is also permanently +// stored in the Lucene index. +// When restarting, Elasticsearch replays any unflushed operations from the +// transaction log into the Lucene index to bring it back into the state that it +// was in before the restart. +// Elasticsearch automatically triggers flushes as needed, using heuristics that +// trade off the size of the unflushed transaction log against the cost of +// performing each flush. +// +// After each operation has been flushed it is permanently stored in the Lucene +// index. +// This may mean that there is no need to maintain an additional copy of it in +// the transaction log. +// The transaction log is made up of multiple files, called generations, and +// Elasticsearch will delete any generation files when they are no longer +// needed, freeing up disk space. +// +// It is also possible to trigger a flush on one or more indices using the flush +// API, although it is rare for users to need to call this API directly. +// If you call the flush API after indexing some documents then a successful +// response indicates that Elasticsearch has flushed all the documents that were +// indexed before the flush API was called. +package flush + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Flush struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewFlush type alias for index. +type NewFlush func() *Flush + +// NewFlushFunc returns a new instance of Flush with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewFlushFunc(tp elastictransport.Interface) NewFlush { + return func() *Flush { + n := New(tp) + + return n + } +} + +// Flush data streams or indices. +// Flushing a data stream or index is the process of making sure that any data +// that is currently only stored in the transaction log is also permanently +// stored in the Lucene index. +// When restarting, Elasticsearch replays any unflushed operations from the +// transaction log into the Lucene index to bring it back into the state that it +// was in before the restart. +// Elasticsearch automatically triggers flushes as needed, using heuristics that +// trade off the size of the unflushed transaction log against the cost of +// performing each flush. +// +// After each operation has been flushed it is permanently stored in the Lucene +// index. +// This may mean that there is no need to maintain an additional copy of it in +// the transaction log. +// The transaction log is made up of multiple files, called generations, and +// Elasticsearch will delete any generation files when they are no longer +// needed, freeing up disk space. +// +// It is also possible to trigger a flush on one or more indices using the flush +// API, although it is rare for users to need to call this API directly. +// If you call the flush API after indexing some documents then a successful +// response indicates that Elasticsearch has flushed all the documents that were +// indexed before the flush API was called. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush +func New(tp elastictransport.Interface) *Flush { + r := &Flush{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Flush) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_flush") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_flush") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Flush) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.flush") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.flush") + if reader := instrument.RecordRequestBody(ctx, "indices.flush", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.flush") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Flush query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a flush.Response +func (r Flush) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.flush") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Flush) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.flush") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Flush query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Flush headers map. +func (r *Flush) Header(key, value string) *Flush { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases to flush. +// Supports wildcards (`*`). +// To flush all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *Flush) Index(index string) *Flush { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *Flush) AllowNoIndices(allownoindices bool) *Flush { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *Flush) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Flush { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// Force If `true`, the request forces a flush even if there are no changes to commit +// to the index. +// API name: force +func (r *Flush) Force(force bool) *Flush { + r.values.Set("force", strconv.FormatBool(force)) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *Flush) IgnoreUnavailable(ignoreunavailable bool) *Flush { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// WaitIfOngoing If `true`, the flush operation blocks until execution when another flush +// operation is running. +// If `false`, Elasticsearch returns an error if you request a flush when +// another flush operation is running. +// API name: wait_if_ongoing +func (r *Flush) WaitIfOngoing(waitifongoing bool) *Flush { + r.values.Set("wait_if_ongoing", strconv.FormatBool(waitifongoing)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Flush) ErrorTrace(errortrace bool) *Flush { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Flush) FilterPath(filterpaths ...string) *Flush { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Flush) Human(human bool) *Flush { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Flush) Pretty(pretty bool) *Flush { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/flush/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/flush/response.go new file mode 100644 index 000000000..62e66ae38 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/flush/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package flush + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package flush +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/flush/IndicesFlushResponse.ts#L22-L25 +type Response struct { + Shards_ *types.ShardStatistics `json:"_shards,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/forcemerge/forcemerge.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/forcemerge/forcemerge.go new file mode 100644 index 000000000..4aa61d27e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/forcemerge/forcemerge.go @@ -0,0 +1,571 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Force a merge. +// Perform the force merge operation on the shards of one or more indices. +// For data streams, the API forces a merge on the shards of the stream's +// backing indices. +// +// Merging reduces the number of segments in each shard by merging some of them +// together and also frees up the space used by deleted documents. +// Merging normally happens automatically, but sometimes it is useful to trigger +// a merge manually. +// +// WARNING: We recommend force merging only a read-only index (meaning the index +// is no longer receiving writes). +// When documents are updated or deleted, the old version is not immediately +// removed but instead soft-deleted and marked with a "tombstone". +// These soft-deleted documents are automatically cleaned up during regular +// segment merges. +// But force merge can cause very large (greater than 5 GB) segments to be +// produced, which are not eligible for regular merges. +// So the number of soft-deleted documents can then grow rapidly, resulting in +// higher disk usage and worse search performance. +// If you regularly force merge an index receiving writes, this can also make +// snapshots more expensive, since the new documents can't be backed up +// incrementally. +// +// **Blocks during a force merge** +// +// Calls to this API block until the merge is complete (unless request contains +// `wait_for_completion=false`). +// If the client connection is lost before completion then the force merge +// process will continue in the background. +// Any new requests to force merge the same indices will also block until the +// ongoing force merge is complete. +// +// **Running force merge asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch performs +// some preflight checks, launches the request, and returns a task you can use +// to get the status of the task. +// However, you can not cancel this task as the force merge task is not +// cancelable. +// Elasticsearch creates a record of this task as a document at +// `_tasks/`. +// When you are done with a task, you should delete the task document so +// Elasticsearch can reclaim the space. +// +// **Force merging multiple indices** +// +// You can force merge multiple indices with a single request by targeting: +// +// * One or more data streams that contain multiple backing indices +// * Multiple indices +// * One or more aliases +// * All data streams and indices in a cluster +// +// Each targeted shard is force-merged separately using the force_merge +// threadpool. +// By default each node only has a single `force_merge` thread which means that +// the shards on that node are force-merged one at a time. +// If you expand the `force_merge` threadpool on a node then it will force merge +// its shards in parallel +// +// Force merge makes the storage for the shard being merged temporarily +// increase, as it may require free space up to triple its size in case +// `max_num_segments parameter` is set to `1`, to rewrite all segments into a +// new one. +// +// **Data streams and time-based indices** +// +// Force-merging is useful for managing a data stream's older backing indices +// and other time-based indices, particularly after a rollover. +// In these cases, each index only receives indexing traffic for a certain +// period of time. +// Once an index receive no more writes, its shards can be force-merged to a +// single segment. +// This can be a good idea because single-segment shards can sometimes use +// simpler and more efficient data structures to perform searches. +// For example: +// +// ``` +// POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +// ``` +package forcemerge + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Forcemerge struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewForcemerge type alias for index. +type NewForcemerge func() *Forcemerge + +// NewForcemergeFunc returns a new instance of Forcemerge with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewForcemergeFunc(tp elastictransport.Interface) NewForcemerge { + return func() *Forcemerge { + n := New(tp) + + return n + } +} + +// Force a merge. +// Perform the force merge operation on the shards of one or more indices. +// For data streams, the API forces a merge on the shards of the stream's +// backing indices. +// +// Merging reduces the number of segments in each shard by merging some of them +// together and also frees up the space used by deleted documents. +// Merging normally happens automatically, but sometimes it is useful to trigger +// a merge manually. +// +// WARNING: We recommend force merging only a read-only index (meaning the index +// is no longer receiving writes). +// When documents are updated or deleted, the old version is not immediately +// removed but instead soft-deleted and marked with a "tombstone". +// These soft-deleted documents are automatically cleaned up during regular +// segment merges. +// But force merge can cause very large (greater than 5 GB) segments to be +// produced, which are not eligible for regular merges. +// So the number of soft-deleted documents can then grow rapidly, resulting in +// higher disk usage and worse search performance. +// If you regularly force merge an index receiving writes, this can also make +// snapshots more expensive, since the new documents can't be backed up +// incrementally. +// +// **Blocks during a force merge** +// +// Calls to this API block until the merge is complete (unless request contains +// `wait_for_completion=false`). +// If the client connection is lost before completion then the force merge +// process will continue in the background. +// Any new requests to force merge the same indices will also block until the +// ongoing force merge is complete. +// +// **Running force merge asynchronously** +// +// If the request contains `wait_for_completion=false`, Elasticsearch performs +// some preflight checks, launches the request, and returns a task you can use +// to get the status of the task. +// However, you can not cancel this task as the force merge task is not +// cancelable. +// Elasticsearch creates a record of this task as a document at +// `_tasks/`. +// When you are done with a task, you should delete the task document so +// Elasticsearch can reclaim the space. +// +// **Force merging multiple indices** +// +// You can force merge multiple indices with a single request by targeting: +// +// * One or more data streams that contain multiple backing indices +// * Multiple indices +// * One or more aliases +// * All data streams and indices in a cluster +// +// Each targeted shard is force-merged separately using the force_merge +// threadpool. +// By default each node only has a single `force_merge` thread which means that +// the shards on that node are force-merged one at a time. +// If you expand the `force_merge` threadpool on a node then it will force merge +// its shards in parallel +// +// Force merge makes the storage for the shard being merged temporarily +// increase, as it may require free space up to triple its size in case +// `max_num_segments parameter` is set to `1`, to rewrite all segments into a +// new one. +// +// **Data streams and time-based indices** +// +// Force-merging is useful for managing a data stream's older backing indices +// and other time-based indices, particularly after a rollover. +// In these cases, each index only receives indexing traffic for a certain +// period of time. +// Once an index receive no more writes, its shards can be force-merged to a +// single segment. +// This can be a good idea because single-segment shards can sometimes use +// simpler and more efficient data structures to perform searches. +// For example: +// +// ``` +// POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +// ``` +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge +func New(tp elastictransport.Interface) *Forcemerge { + r := &Forcemerge{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Forcemerge) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_forcemerge") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_forcemerge") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Forcemerge) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.forcemerge") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.forcemerge") + if reader := instrument.RecordRequestBody(ctx, "indices.forcemerge", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.forcemerge") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Forcemerge query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a forcemerge.Response +func (r Forcemerge) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.forcemerge") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Forcemerge) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.forcemerge") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Forcemerge query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Forcemerge headers map. +func (r *Forcemerge) Header(key, value string) *Forcemerge { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of index names; use `_all` or empty string to perform +// the operation on all indices +// API Name: index +func (r *Forcemerge) Index(index string) *Forcemerge { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete +// indices. (This includes `_all` string or when no indices have been specified) +// API name: allow_no_indices +func (r *Forcemerge) AllowNoIndices(allownoindices bool) *Forcemerge { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, +// closed or both. +// API name: expand_wildcards +func (r *Forcemerge) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Forcemerge { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// Flush Specify whether the index should be flushed after performing the operation +// (default: true) +// API name: flush +func (r *Forcemerge) Flush(flush bool) *Forcemerge { + r.values.Set("flush", strconv.FormatBool(flush)) + + return r +} + +// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable +// (missing or closed) +// API name: ignore_unavailable +func (r *Forcemerge) IgnoreUnavailable(ignoreunavailable bool) *Forcemerge { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// MaxNumSegments The number of segments the index should be merged into (default: dynamic) +// API name: max_num_segments +func (r *Forcemerge) MaxNumSegments(maxnumsegments string) *Forcemerge { + r.values.Set("max_num_segments", maxnumsegments) + + return r +} + +// OnlyExpungeDeletes Specify whether the operation should only expunge deleted documents +// API name: only_expunge_deletes +func (r *Forcemerge) OnlyExpungeDeletes(onlyexpungedeletes bool) *Forcemerge { + r.values.Set("only_expunge_deletes", strconv.FormatBool(onlyexpungedeletes)) + + return r +} + +// WaitForCompletion Should the request wait until the force merge is completed. +// API name: wait_for_completion +func (r *Forcemerge) WaitForCompletion(waitforcompletion bool) *Forcemerge { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Forcemerge) ErrorTrace(errortrace bool) *Forcemerge { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Forcemerge) FilterPath(filterpaths ...string) *Forcemerge { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Forcemerge) Human(human bool) *Forcemerge { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Forcemerge) Pretty(pretty bool) *Forcemerge { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/forcemerge/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/forcemerge/response.go new file mode 100644 index 000000000..0633e0c59 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/forcemerge/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package forcemerge + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package forcemerge +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/forcemerge/IndicesForceMergeResponse.ts#L22-L25 +type Response struct { + Shards_ *types.ShardStatistics `json:"_shards,omitempty"` + // Task task contains a task id returned when wait_for_completion=false, + // you can use the task_id to get the status of the task at _tasks/ + Task *string `json:"task,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/get/get.go new file mode 100644 index 000000000..9ddc3782b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/get/get.go @@ -0,0 +1,433 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get index information. +// Get information about one or more indices. For data streams, the API returns +// information about the +// stream’s backing indices. +package get + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/feature" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Get struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGet type alias for index. +type NewGet func(index string) *Get + +// NewGetFunc returns a new instance of Get with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFunc(tp elastictransport.Interface) NewGet { + return func(index string) *Get { + n := New(tp) + + n._index(index) + + return n + } +} + +// Get index information. +// Get information about one or more indices. For data streams, the API returns +// information about the +// stream’s backing indices. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get +func New(tp elastictransport.Interface) *Get { + r := &Get{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Get) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Get) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get") + if reader := instrument.RecordRequestBody(ctx, "indices.get", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Get query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a get.Response +func (r Get) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Get) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Get query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Get headers map. +func (r *Get) Header(key, value string) *Get { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and index aliases used to +// limit the request. +// Wildcard expressions (*) are supported. +// API Name: index +func (r *Get) _index(index string) *Get { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If false, the request returns an error if any wildcard expression, index +// alias, or _all value targets only +// missing or closed indices. This behavior applies even if the request targets +// other open indices. For example, +// a request targeting foo*,bar* returns an error if an index starts with foo +// but no index starts with bar. +// API name: allow_no_indices +func (r *Get) AllowNoIndices(allownoindices bool) *Get { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard expressions can match. If the request can target +// data streams, this argument +// determines whether wildcard expressions match hidden data streams. Supports +// comma-separated values, +// such as open,hidden. +// API name: expand_wildcards +func (r *Get) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Get { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// FlatSettings If true, returns settings in flat format. +// API name: flat_settings +func (r *Get) FlatSettings(flatsettings bool) *Get { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) + + return r +} + +// IgnoreUnavailable If false, requests that target a missing index return an error. +// API name: ignore_unavailable +func (r *Get) IgnoreUnavailable(ignoreunavailable bool) *Get { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// IncludeDefaults If true, return all default settings in the response. +// API name: include_defaults +func (r *Get) IncludeDefaults(includedefaults bool) *Get { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// Local If true, the request retrieves information from the local node only. Defaults +// to false, which means information is retrieved from the master node. +// API name: local +func (r *Get) Local(local bool) *Get { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *Get) MasterTimeout(duration string) *Get { + r.values.Set("master_timeout", duration) + + return r +} + +// Features Return only information on specified index features +// API name: features +func (r *Get) Features(features ...feature.Feature) *Get { + tmp := []string{} + for _, item := range features { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Get) ErrorTrace(errortrace bool) *Get { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Get) FilterPath(filterpaths ...string) *Get { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Get) Human(human bool) *Get { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Get) Pretty(pretty bool) *Get { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/get/response.go new file mode 100644 index 000000000..45b9170d0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/get/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package get + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package get +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get/IndicesGetResponse.ts#L24-L27 + +type Response map[string]types.IndexState + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getalias/get_alias.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getalias/get_alias.go new file mode 100644 index 000000000..62fc7b91e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getalias/get_alias.go @@ -0,0 +1,437 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get aliases. +// Retrieves information for one or more data stream or index aliases. +package getalias + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 + + indexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetAlias struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetAlias type alias for index. +type NewGetAlias func() *GetAlias + +// NewGetAliasFunc returns a new instance of GetAlias with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetAliasFunc(tp elastictransport.Interface) NewGetAlias { + return func() *GetAlias { + n := New(tp) + + return n + } +} + +// Get aliases. +// Retrieves information for one or more data stream or index aliases. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias +func New(tp elastictransport.Interface) *GetAlias { + r := &GetAlias{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetAlias) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_alias") + + method = http.MethodGet + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_alias") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + case r.paramSet == indexMask|nameMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_alias") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_alias") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetAlias) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_alias") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_alias") + if reader := instrument.RecordRequestBody(ctx, "indices.get_alias", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_alias") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetAlias query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getalias.Response +func (r GetAlias) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetAlias) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetAlias query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetAlias headers map. +func (r *GetAlias) Header(key, value string) *GetAlias { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of aliases to retrieve. +// Supports wildcards (`*`). +// To retrieve all aliases, omit this parameter or use `*` or `_all`. +// API Name: name +func (r *GetAlias) Name(name string) *GetAlias { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Index Comma-separated list of data streams or indices used to limit the request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *GetAlias) Index(index string) *GetAlias { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *GetAlias) AllowNoIndices(allownoindices bool) *GetAlias { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *GetAlias) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *GetAlias { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *GetAlias) IgnoreUnavailable(ignoreunavailable bool) *GetAlias { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetAlias) MasterTimeout(duration string) *GetAlias { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetAlias) ErrorTrace(errortrace bool) *GetAlias { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetAlias) FilterPath(filterpaths ...string) *GetAlias { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetAlias) Human(human bool) *GetAlias { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetAlias) Pretty(pretty bool) *GetAlias { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getalias/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getalias/response.go new file mode 100644 index 000000000..67e8cd1b3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getalias/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getalias + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getalias +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_alias/IndicesGetAliasResponse.ts#L28-L37 + +type Response map[string]types.IndexAliases + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecycle/get_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecycle/get_data_lifecycle.go new file mode 100644 index 000000000..543ad7141 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecycle/get_data_lifecycle.go @@ -0,0 +1,381 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get data stream lifecycles. +// +// Get the data stream lifecycle configuration of one or more data streams. +package getdatalifecycle + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetDataLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetDataLifecycle type alias for index. +type NewGetDataLifecycle func(name string) *GetDataLifecycle + +// NewGetDataLifecycleFunc returns a new instance of GetDataLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetDataLifecycleFunc(tp elastictransport.Interface) NewGetDataLifecycle { + return func(name string) *GetDataLifecycle { + n := New(tp) + + n._name(name) + + return n + } +} + +// Get data stream lifecycles. +// +// Get the data stream lifecycle configuration of one or more data streams. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle +func New(tp elastictransport.Interface) *GetDataLifecycle { + r := &GetDataLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetDataLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_lifecycle") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetDataLifecycle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_data_lifecycle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_data_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "indices.get_data_lifecycle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_data_lifecycle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetDataLifecycle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getdatalifecycle.Response +func (r GetDataLifecycle) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetDataLifecycle) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetDataLifecycle query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetDataLifecycle headers map. +func (r *GetDataLifecycle) Header(key, value string) *GetDataLifecycle { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of data streams to limit the request. +// Supports wildcards (`*`). +// To target all data streams, omit this parameter or use `*` or `_all`. +// API Name: name +func (r *GetDataLifecycle) _name(name string) *GetDataLifecycle { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ExpandWildcards Type of data stream that wildcard patterns can match. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *GetDataLifecycle) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *GetDataLifecycle { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IncludeDefaults If `true`, return all default settings in the response. +// API name: include_defaults +func (r *GetDataLifecycle) IncludeDefaults(includedefaults bool) *GetDataLifecycle { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *GetDataLifecycle) MasterTimeout(duration string) *GetDataLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetDataLifecycle) ErrorTrace(errortrace bool) *GetDataLifecycle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetDataLifecycle) FilterPath(filterpaths ...string) *GetDataLifecycle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetDataLifecycle) Human(human bool) *GetDataLifecycle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetDataLifecycle) Pretty(pretty bool) *GetDataLifecycle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecycle/response.go new file mode 100644 index 000000000..d3895a2d6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecycle/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getdatalifecycle + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getdatalifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L23-L25 +type Response struct { + DataStreams []types.DataStreamWithLifecycle `json:"data_streams"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecyclestats/get_data_lifecycle_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecyclestats/get_data_lifecycle_stats.go new file mode 100644 index 000000000..defb984b1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecyclestats/get_data_lifecycle_stats.go @@ -0,0 +1,325 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get data stream lifecycle stats. +// Get statistics about the data streams that are managed by a data stream +// lifecycle. +package getdatalifecyclestats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetDataLifecycleStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetDataLifecycleStats type alias for index. +type NewGetDataLifecycleStats func() *GetDataLifecycleStats + +// NewGetDataLifecycleStatsFunc returns a new instance of GetDataLifecycleStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetDataLifecycleStatsFunc(tp elastictransport.Interface) NewGetDataLifecycleStats { + return func() *GetDataLifecycleStats { + n := New(tp) + + return n + } +} + +// Get data stream lifecycle stats. +// Get statistics about the data streams that are managed by a data stream +// lifecycle. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats +func New(tp elastictransport.Interface) *GetDataLifecycleStats { + r := &GetDataLifecycleStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetDataLifecycleStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_lifecycle") + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetDataLifecycleStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_data_lifecycle_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_data_lifecycle_stats") + if reader := instrument.RecordRequestBody(ctx, "indices.get_data_lifecycle_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_data_lifecycle_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetDataLifecycleStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getdatalifecyclestats.Response +func (r GetDataLifecycleStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_lifecycle_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetDataLifecycleStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_lifecycle_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetDataLifecycleStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetDataLifecycleStats headers map. +func (r *GetDataLifecycleStats) Header(key, value string) *GetDataLifecycleStats { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetDataLifecycleStats) ErrorTrace(errortrace bool) *GetDataLifecycleStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetDataLifecycleStats) FilterPath(filterpaths ...string) *GetDataLifecycleStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetDataLifecycleStats) Human(human bool) *GetDataLifecycleStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetDataLifecycleStats) Pretty(pretty bool) *GetDataLifecycleStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecyclestats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecyclestats/response.go new file mode 100644 index 000000000..44bf69c6e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecyclestats/response.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getdatalifecyclestats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getdatalifecyclestats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsResponse.ts#L24-L44 +type Response struct { + + // DataStreamCount The count of data streams currently being managed by the data stream + // lifecycle. + DataStreamCount int `json:"data_stream_count"` + // DataStreams Information about the data streams that are managed by the data stream + // lifecycle. + DataStreams []types.DataStreamStats `json:"data_streams"` + // LastRunDurationInMillis The duration of the last data stream lifecycle execution. + LastRunDurationInMillis *int64 `json:"last_run_duration_in_millis,omitempty"` + // TimeBetweenStartsInMillis The time that passed between the start of the last two data stream lifecycle + // executions. + // This value should amount approximately to + // `data_streams.lifecycle.poll_interval`. + TimeBetweenStartsInMillis *int64 `json:"time_between_starts_in_millis,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastream/get_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastream/get_data_stream.go new file mode 100644 index 000000000..f4bbc9d70 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastream/get_data_stream.go @@ -0,0 +1,391 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get data streams. +// +// Get information about one or more data streams. +package getdatastream + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetDataStream struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetDataStream type alias for index. +type NewGetDataStream func() *GetDataStream + +// NewGetDataStreamFunc returns a new instance of GetDataStream with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetDataStreamFunc(tp elastictransport.Interface) NewGetDataStream { + return func() *GetDataStream { + n := New(tp) + + return n + } +} + +// Get data streams. +// +// Get information about one or more data streams. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream +func New(tp elastictransport.Interface) *GetDataStream { + r := &GetDataStream{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetDataStream) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_data_stream") + + method = http.MethodGet + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetDataStream) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_data_stream") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_data_stream") + if reader := instrument.RecordRequestBody(ctx, "indices.get_data_stream", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_data_stream") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetDataStream query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getdatastream.Response +func (r GetDataStream) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetDataStream) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetDataStream query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetDataStream headers map. +func (r *GetDataStream) Header(key, value string) *GetDataStream { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of data stream names used to limit the request. +// Wildcard (`*`) expressions are supported. If omitted, all data streams are +// returned. +// API Name: name +func (r *GetDataStream) Name(name string) *GetDataStream { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ExpandWildcards Type of data stream that wildcard patterns can match. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *GetDataStream) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *GetDataStream { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IncludeDefaults If true, returns all relevant default configurations for the index template. +// API name: include_defaults +func (r *GetDataStream) IncludeDefaults(includedefaults bool) *GetDataStream { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *GetDataStream) MasterTimeout(duration string) *GetDataStream { + r.values.Set("master_timeout", duration) + + return r +} + +// Verbose Whether the maximum timestamp for each data stream should be calculated and +// returned. +// API name: verbose +func (r *GetDataStream) Verbose(verbose bool) *GetDataStream { + r.values.Set("verbose", strconv.FormatBool(verbose)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetDataStream) ErrorTrace(errortrace bool) *GetDataStream { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetDataStream) FilterPath(filterpaths ...string) *GetDataStream { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetDataStream) Human(human bool) *GetDataStream { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetDataStream) Pretty(pretty bool) *GetDataStream { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastream/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastream/response.go new file mode 100644 index 000000000..3ea8c900f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastream/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getdatastream + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getdatastream +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_data_stream/IndicesGetDataStreamResponse.ts#L22-L24 +type Response struct { + DataStreams []types.DataStream `json:"data_streams"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamoptions/get_data_stream_options.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamoptions/get_data_stream_options.go new file mode 100644 index 000000000..a9a9403ef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamoptions/get_data_stream_options.go @@ -0,0 +1,373 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get data stream options. +// +// Get the data stream options configuration of one or more data streams. +package getdatastreamoptions + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetDataStreamOptions struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetDataStreamOptions type alias for index. +type NewGetDataStreamOptions func(name string) *GetDataStreamOptions + +// NewGetDataStreamOptionsFunc returns a new instance of GetDataStreamOptions with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetDataStreamOptionsFunc(tp elastictransport.Interface) NewGetDataStreamOptions { + return func(name string) *GetDataStreamOptions { + n := New(tp) + + n._name(name) + + return n + } +} + +// Get data stream options. +// +// Get the data stream options configuration of one or more data streams. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html +func New(tp elastictransport.Interface) *GetDataStreamOptions { + r := &GetDataStreamOptions{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetDataStreamOptions) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_options") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetDataStreamOptions) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_data_stream_options") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_data_stream_options") + if reader := instrument.RecordRequestBody(ctx, "indices.get_data_stream_options", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_data_stream_options") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetDataStreamOptions query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getdatastreamoptions.Response +func (r GetDataStreamOptions) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_stream_options") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetDataStreamOptions) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_stream_options") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetDataStreamOptions query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetDataStreamOptions headers map. +func (r *GetDataStreamOptions) Header(key, value string) *GetDataStreamOptions { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of data streams to limit the request. +// Supports wildcards (`*`). +// To target all data streams, omit this parameter or use `*` or `_all`. +// API Name: name +func (r *GetDataStreamOptions) _name(name string) *GetDataStreamOptions { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ExpandWildcards Type of data stream that wildcard patterns can match. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *GetDataStreamOptions) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *GetDataStreamOptions { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *GetDataStreamOptions) MasterTimeout(duration string) *GetDataStreamOptions { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetDataStreamOptions) ErrorTrace(errortrace bool) *GetDataStreamOptions { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetDataStreamOptions) FilterPath(filterpaths ...string) *GetDataStreamOptions { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetDataStreamOptions) Human(human bool) *GetDataStreamOptions { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetDataStreamOptions) Pretty(pretty bool) *GetDataStreamOptions { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamoptions/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamoptions/response.go new file mode 100644 index 000000000..e6deb4965 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamoptions/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getdatastreamoptions + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getdatastreamoptions +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_data_stream_options/IndicesGetDataStreamOptionsResponse.ts#L23-L25 +type Response struct { + DataStreams []types.DataStreamWithOptions `json:"data_streams"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamsettings/get_data_stream_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamsettings/get_data_stream_settings.go new file mode 100644 index 000000000..7b1201cc3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamsettings/get_data_stream_settings.go @@ -0,0 +1,359 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get data stream settings. +// +// Get setting information for one or more data streams. +package getdatastreamsettings + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetDataStreamSettings struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetDataStreamSettings type alias for index. +type NewGetDataStreamSettings func(name string) *GetDataStreamSettings + +// NewGetDataStreamSettingsFunc returns a new instance of GetDataStreamSettings with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetDataStreamSettingsFunc(tp elastictransport.Interface) NewGetDataStreamSettings { + return func(name string) *GetDataStreamSettings { + n := New(tp) + + n._name(name) + + return n + } +} + +// Get data stream settings. +// +// Get setting information for one or more data streams. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-settings +func New(tp elastictransport.Interface) *GetDataStreamSettings { + r := &GetDataStreamSettings{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetDataStreamSettings) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_settings") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetDataStreamSettings) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_data_stream_settings") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_data_stream_settings") + if reader := instrument.RecordRequestBody(ctx, "indices.get_data_stream_settings", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_data_stream_settings") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetDataStreamSettings query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getdatastreamsettings.Response +func (r GetDataStreamSettings) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_stream_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetDataStreamSettings) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_data_stream_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetDataStreamSettings query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetDataStreamSettings headers map. +func (r *GetDataStreamSettings) Header(key, value string) *GetDataStreamSettings { + r.headers.Set(key, value) + + return r +} + +// Name A comma-separated list of data streams or data stream patterns. Supports +// wildcards (`*`). +// API Name: name +func (r *GetDataStreamSettings) _name(name string) *GetDataStreamSettings { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an +// error. +// API name: master_timeout +func (r *GetDataStreamSettings) MasterTimeout(duration string) *GetDataStreamSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetDataStreamSettings) ErrorTrace(errortrace bool) *GetDataStreamSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetDataStreamSettings) FilterPath(filterpaths ...string) *GetDataStreamSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetDataStreamSettings) Human(human bool) *GetDataStreamSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetDataStreamSettings) Pretty(pretty bool) *GetDataStreamSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamsettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamsettings/response.go new file mode 100644 index 000000000..13b7aafd7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamsettings/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getdatastreamsettings + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getdatastreamsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_data_stream_settings/IndicesGetDataStreamSettingsResponse.ts#L22-L27 +type Response struct { + DataStreams []types.DataStreamSettings `json:"data_streams"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getfieldmapping/get_field_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getfieldmapping/get_field_mapping.go new file mode 100644 index 000000000..7d65b3332 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getfieldmapping/get_field_mapping.go @@ -0,0 +1,436 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get mapping definitions. +// Retrieves mapping definitions for one or more fields. +// For data streams, the API retrieves field mappings for the stream’s backing +// indices. +// +// This API is useful if you don't need a complete mapping or if an index +// mapping contains a large number of fields. +package getfieldmapping + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + fieldsMask = iota + 1 + + indexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetFieldMapping struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + fields string + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetFieldMapping type alias for index. +type NewGetFieldMapping func(fields string) *GetFieldMapping + +// NewGetFieldMappingFunc returns a new instance of GetFieldMapping with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFieldMappingFunc(tp elastictransport.Interface) NewGetFieldMapping { + return func(fields string) *GetFieldMapping { + n := New(tp) + + n._fields(fields) + + return n + } +} + +// Get mapping definitions. +// Retrieves mapping definitions for one or more fields. +// For data streams, the API retrieves field mappings for the stream’s backing +// indices. +// +// This API is useful if you don't need a complete mapping or if an index +// mapping contains a large number of fields. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping +func New(tp elastictransport.Interface) *GetFieldMapping { + r := &GetFieldMapping{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetFieldMapping) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == fieldsMask: + path.WriteString("/") + path.WriteString("_mapping") + path.WriteString("/") + path.WriteString("field") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "fields", r.fields) + } + path.WriteString(r.fields) + + method = http.MethodGet + case r.paramSet == indexMask|fieldsMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_mapping") + path.WriteString("/") + path.WriteString("field") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "fields", r.fields) + } + path.WriteString(r.fields) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetFieldMapping) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_field_mapping") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_field_mapping") + if reader := instrument.RecordRequestBody(ctx, "indices.get_field_mapping", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_field_mapping") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetFieldMapping query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getfieldmapping.Response +func (r GetFieldMapping) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_field_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetFieldMapping) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_field_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetFieldMapping query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetFieldMapping headers map. +func (r *GetFieldMapping) Header(key, value string) *GetFieldMapping { + r.headers.Set(key, value) + + return r +} + +// Fields Comma-separated list or wildcard expression of fields used to limit returned +// information. +// Supports wildcards (`*`). +// API Name: fields +func (r *GetFieldMapping) _fields(fields string) *GetFieldMapping { + r.paramSet |= fieldsMask + r.fields = fields + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *GetFieldMapping) Index(index string) *GetFieldMapping { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *GetFieldMapping) AllowNoIndices(allownoindices bool) *GetFieldMapping { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *GetFieldMapping) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *GetFieldMapping { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *GetFieldMapping) IgnoreUnavailable(ignoreunavailable bool) *GetFieldMapping { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// IncludeDefaults If `true`, return all default settings in the response. +// API name: include_defaults +func (r *GetFieldMapping) IncludeDefaults(includedefaults bool) *GetFieldMapping { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetFieldMapping) ErrorTrace(errortrace bool) *GetFieldMapping { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetFieldMapping) FilterPath(filterpaths ...string) *GetFieldMapping { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetFieldMapping) Human(human bool) *GetFieldMapping { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetFieldMapping) Pretty(pretty bool) *GetFieldMapping { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getfieldmapping/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getfieldmapping/response.go new file mode 100644 index 000000000..01625e14b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getfieldmapping/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getfieldmapping + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getfieldmapping +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_field_mapping/IndicesGetFieldMappingResponse.ts#L24-L27 + +type Response map[string]types.TypeFieldMappings + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getindextemplate/get_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getindextemplate/get_index_template.go new file mode 100644 index 000000000..6df2b30d9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getindextemplate/get_index_template.go @@ -0,0 +1,382 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get index templates. +// Get information about one or more index templates. +package getindextemplate + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetIndexTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetIndexTemplate type alias for index. +type NewGetIndexTemplate func() *GetIndexTemplate + +// NewGetIndexTemplateFunc returns a new instance of GetIndexTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetIndexTemplateFunc(tp elastictransport.Interface) NewGetIndexTemplate { + return func() *GetIndexTemplate { + n := New(tp) + + return n + } +} + +// Get index templates. +// Get information about one or more index templates. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template +func New(tp elastictransport.Interface) *GetIndexTemplate { + r := &GetIndexTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetIndexTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_index_template") + + method = http.MethodGet + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_index_template") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetIndexTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_index_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_index_template") + if reader := instrument.RecordRequestBody(ctx, "indices.get_index_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_index_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetIndexTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getindextemplate.Response +func (r GetIndexTemplate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_index_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetIndexTemplate) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_index_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetIndexTemplate query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetIndexTemplate headers map. +func (r *GetIndexTemplate) Header(key, value string) *GetIndexTemplate { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of index template names used to limit the request. +// Wildcard (*) expressions are supported. +// API Name: name +func (r *GetIndexTemplate) Name(name string) *GetIndexTemplate { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Local If true, the request retrieves information from the local node only. Defaults +// to false, which means information is retrieved from the master node. +// API name: local +func (r *GetIndexTemplate) Local(local bool) *GetIndexTemplate { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// FlatSettings If true, returns settings in flat format. +// API name: flat_settings +func (r *GetIndexTemplate) FlatSettings(flatsettings bool) *GetIndexTemplate { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *GetIndexTemplate) MasterTimeout(duration string) *GetIndexTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// IncludeDefaults If true, returns all relevant default configurations for the index template. +// API name: include_defaults +func (r *GetIndexTemplate) IncludeDefaults(includedefaults bool) *GetIndexTemplate { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetIndexTemplate) ErrorTrace(errortrace bool) *GetIndexTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetIndexTemplate) FilterPath(filterpaths ...string) *GetIndexTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetIndexTemplate) Human(human bool) *GetIndexTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetIndexTemplate) Pretty(pretty bool) *GetIndexTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getindextemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getindextemplate/response.go new file mode 100644 index 000000000..4f8f633d4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getindextemplate/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getindextemplate + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getindextemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L23-L27 +type Response struct { + IndexTemplates []types.IndexTemplateItem `json:"index_templates"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmapping/get_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmapping/get_mapping.go new file mode 100644 index 000000000..cf013cfdc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmapping/get_mapping.go @@ -0,0 +1,406 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get mapping definitions. +// For data streams, the API retrieves mappings for the stream’s backing +// indices. +package getmapping + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetMapping struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetMapping type alias for index. +type NewGetMapping func() *GetMapping + +// NewGetMappingFunc returns a new instance of GetMapping with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetMappingFunc(tp elastictransport.Interface) NewGetMapping { + return func() *GetMapping { + n := New(tp) + + return n + } +} + +// Get mapping definitions. +// For data streams, the API retrieves mappings for the stream’s backing +// indices. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping +func New(tp elastictransport.Interface) *GetMapping { + r := &GetMapping{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetMapping) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_mapping") + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_mapping") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetMapping) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_mapping") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_mapping") + if reader := instrument.RecordRequestBody(ctx, "indices.get_mapping", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_mapping") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetMapping query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getmapping.Response +func (r GetMapping) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetMapping) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetMapping query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetMapping headers map. +func (r *GetMapping) Header(key, value string) *GetMapping { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *GetMapping) Index(index string) *GetMapping { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *GetMapping) AllowNoIndices(allownoindices bool) *GetMapping { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *GetMapping) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *GetMapping { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *GetMapping) IgnoreUnavailable(ignoreunavailable bool) *GetMapping { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Local If `true`, the request retrieves information from the local node only. +// API name: local +func (r *GetMapping) Local(local bool) *GetMapping { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetMapping) MasterTimeout(duration string) *GetMapping { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetMapping) ErrorTrace(errortrace bool) *GetMapping { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetMapping) FilterPath(filterpaths ...string) *GetMapping { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetMapping) Human(human bool) *GetMapping { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetMapping) Pretty(pretty bool) *GetMapping { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmapping/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmapping/response.go new file mode 100644 index 000000000..5d915e679 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmapping/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getmapping + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getmapping +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_mapping/IndicesGetMappingResponse.ts#L24-L27 + +type Response map[string]types.IndexMappingRecord + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmigratereindexstatus/get_migrate_reindex_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmigratereindexstatus/get_migrate_reindex_status.go new file mode 100644 index 000000000..ae4f90f6e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmigratereindexstatus/get_migrate_reindex_status.go @@ -0,0 +1,356 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the migration reindexing status. +// +// Get the status of a migration reindex attempt for a data stream or index. +package getmigratereindexstatus + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetMigrateReindexStatus struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetMigrateReindexStatus type alias for index. +type NewGetMigrateReindexStatus func(index string) *GetMigrateReindexStatus + +// NewGetMigrateReindexStatusFunc returns a new instance of GetMigrateReindexStatus with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetMigrateReindexStatusFunc(tp elastictransport.Interface) NewGetMigrateReindexStatus { + return func(index string) *GetMigrateReindexStatus { + n := New(tp) + + n._index(index) + + return n + } +} + +// Get the migration reindexing status. +// +// Get the status of a migration reindex attempt for a data stream or index. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-migration +func New(tp elastictransport.Interface) *GetMigrateReindexStatus { + r := &GetMigrateReindexStatus{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetMigrateReindexStatus) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_status") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetMigrateReindexStatus) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_migrate_reindex_status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_migrate_reindex_status") + if reader := instrument.RecordRequestBody(ctx, "indices.get_migrate_reindex_status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_migrate_reindex_status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetMigrateReindexStatus query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getmigratereindexstatus.Response +func (r GetMigrateReindexStatus) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_migrate_reindex_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetMigrateReindexStatus) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_migrate_reindex_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetMigrateReindexStatus query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetMigrateReindexStatus headers map. +func (r *GetMigrateReindexStatus) Header(key, value string) *GetMigrateReindexStatus { + r.headers.Set(key, value) + + return r +} + +// Index The index or data stream name. +// API Name: index +func (r *GetMigrateReindexStatus) _index(index string) *GetMigrateReindexStatus { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetMigrateReindexStatus) ErrorTrace(errortrace bool) *GetMigrateReindexStatus { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetMigrateReindexStatus) FilterPath(filterpaths ...string) *GetMigrateReindexStatus { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetMigrateReindexStatus) Human(human bool) *GetMigrateReindexStatus { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetMigrateReindexStatus) Pretty(pretty bool) *GetMigrateReindexStatus { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmigratereindexstatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmigratereindexstatus/response.go new file mode 100644 index 000000000..eeb35b539 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmigratereindexstatus/response.go @@ -0,0 +1,183 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getmigratereindexstatus + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getmigratereindexstatus +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_migrate_reindex_status/MigrateGetReindexStatusResponse.ts#L23-L36 +type Response struct { + Complete bool `json:"complete"` + Errors []types.StatusError `json:"errors"` + Exception *string `json:"exception,omitempty"` + InProgress []types.StatusInProgress `json:"in_progress"` + Pending int `json:"pending"` + StartTime types.DateTime `json:"start_time,omitempty"` + StartTimeMillis int64 `json:"start_time_millis"` + Successes int `json:"successes"` + TotalIndicesInDataStream int `json:"total_indices_in_data_stream"` + TotalIndicesRequiringUpgrade int `json:"total_indices_requiring_upgrade"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "complete": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Complete", err) + } + s.Complete = value + case bool: + s.Complete = v + } + + case "errors": + if err := dec.Decode(&s.Errors); err != nil { + return fmt.Errorf("%s | %w", "Errors", err) + } + + case "exception": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Exception", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Exception = &o + + case "in_progress": + if err := dec.Decode(&s.InProgress); err != nil { + return fmt.Errorf("%s | %w", "InProgress", err) + } + + case "pending": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Pending", err) + } + s.Pending = value + case float64: + f := int(v) + s.Pending = f + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + + case "start_time_millis": + if err := dec.Decode(&s.StartTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeMillis", err) + } + + case "successes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Successes", err) + } + s.Successes = value + case float64: + f := int(v) + s.Successes = f + } + + case "total_indices_in_data_stream": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalIndicesInDataStream", err) + } + s.TotalIndicesInDataStream = value + case float64: + f := int(v) + s.TotalIndicesInDataStream = f + } + + case "total_indices_requiring_upgrade": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalIndicesRequiringUpgrade", err) + } + s.TotalIndicesRequiringUpgrade = value + case float64: + f := int(v) + s.TotalIndicesRequiringUpgrade = f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getsettings/get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getsettings/get_settings.go new file mode 100644 index 000000000..6f3605669 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getsettings/get_settings.go @@ -0,0 +1,465 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get index settings. +// Get setting information for one or more indices. +// For data streams, it returns setting information for the stream's backing +// indices. +package getsettings + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 + + nameMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetSettings struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetSettings type alias for index. +type NewGetSettings func() *GetSettings + +// NewGetSettingsFunc returns a new instance of GetSettings with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetSettingsFunc(tp elastictransport.Interface) NewGetSettings { + return func() *GetSettings { + n := New(tp) + + return n + } +} + +// Get index settings. +// Get setting information for one or more indices. +// For data streams, it returns setting information for the stream's backing +// indices. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings +func New(tp elastictransport.Interface) *GetSettings { + r := &GetSettings{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetSettings) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_settings") + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_settings") + + method = http.MethodGet + case r.paramSet == indexMask|nameMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_settings") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_settings") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetSettings) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_settings") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_settings") + if reader := instrument.RecordRequestBody(ctx, "indices.get_settings", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_settings") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetSettings query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getsettings.Response +func (r GetSettings) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetSettings) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetSettings query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetSettings headers map. +func (r *GetSettings) Header(key, value string) *GetSettings { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases used to limit +// the request. Supports wildcards (`*`). To target all data streams and +// indices, omit this parameter or use `*` or `_all`. +// API Name: index +func (r *GetSettings) Index(index string) *GetSettings { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Name Comma-separated list or wildcard expression of settings to retrieve. +// API Name: name +func (r *GetSettings) Name(name string) *GetSettings { + r.paramSet |= nameMask + r.name = name + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. This +// behavior applies even if the request targets other open indices. For +// example, a request targeting `foo*,bar*` returns an error if an index +// starts with foo but no index starts with `bar`. +// API name: allow_no_indices +func (r *GetSettings) AllowNoIndices(allownoindices bool) *GetSettings { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *GetSettings) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *GetSettings { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// FlatSettings If `true`, returns settings in flat format. +// API name: flat_settings +func (r *GetSettings) FlatSettings(flatsettings bool) *GetSettings { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *GetSettings) IgnoreUnavailable(ignoreunavailable bool) *GetSettings { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// IncludeDefaults If `true`, return all default settings in the response. +// API name: include_defaults +func (r *GetSettings) IncludeDefaults(includedefaults bool) *GetSettings { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// Local If `true`, the request retrieves information from the local node only. If +// `false`, information is retrieved from the master node. +// API name: local +func (r *GetSettings) Local(local bool) *GetSettings { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an +// error. +// API name: master_timeout +func (r *GetSettings) MasterTimeout(duration string) *GetSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetSettings) ErrorTrace(errortrace bool) *GetSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetSettings) FilterPath(filterpaths ...string) *GetSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetSettings) Human(human bool) *GetSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetSettings) Pretty(pretty bool) *GetSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getsettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getsettings/response.go new file mode 100644 index 000000000..a17159e15 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/getsettings/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getsettings + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_settings/IndicesGetSettingsResponse.ts#L24-L27 + +type Response map[string]types.IndexState + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/gettemplate/get_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/gettemplate/get_template.go new file mode 100644 index 000000000..d9841bc58 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/gettemplate/get_template.go @@ -0,0 +1,384 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get legacy index templates. +// Get information about one or more index templates. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +package gettemplate + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetTemplate type alias for index. +type NewGetTemplate func() *GetTemplate + +// NewGetTemplateFunc returns a new instance of GetTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetTemplateFunc(tp elastictransport.Interface) NewGetTemplate { + return func() *GetTemplate { + n := New(tp) + + return n + } +} + +// Get legacy index templates. +// Get information about one or more index templates. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template +func New(tp elastictransport.Interface) *GetTemplate { + r := &GetTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_template") + + method = http.MethodGet + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_template") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.get_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.get_template") + if reader := instrument.RecordRequestBody(ctx, "indices.get_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.get_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a gettemplate.Response +func (r GetTemplate) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetTemplate) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.get_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetTemplate query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetTemplate headers map. +func (r *GetTemplate) Header(key, value string) *GetTemplate { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of index template names used to limit the request. +// Wildcard (`*`) expressions are supported. +// To return all index templates, omit this parameter or use a value of `_all` +// or `*`. +// API Name: name +func (r *GetTemplate) Name(name string) *GetTemplate { + r.paramSet |= nameMask + r.name = name + + return r +} + +// FlatSettings If `true`, returns settings in flat format. +// API name: flat_settings +func (r *GetTemplate) FlatSettings(flatsettings bool) *GetTemplate { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) + + return r +} + +// Local If `true`, the request retrieves information from the local node only. +// API name: local +func (r *GetTemplate) Local(local bool) *GetTemplate { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetTemplate) MasterTimeout(duration string) *GetTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetTemplate) ErrorTrace(errortrace bool) *GetTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetTemplate) FilterPath(filterpaths ...string) *GetTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetTemplate) Human(human bool) *GetTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetTemplate) Pretty(pretty bool) *GetTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/gettemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/gettemplate/response.go new file mode 100644 index 000000000..b36dfd212 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/gettemplate/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package gettemplate + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package gettemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_template/IndicesGetTemplateResponse.ts#L23-L26 + +type Response map[string]types.TemplateMapping + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratereindex/migrate_reindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratereindex/migrate_reindex.go new file mode 100644 index 000000000..708fdf5d9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratereindex/migrate_reindex.go @@ -0,0 +1,370 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Reindex legacy backing indices. +// +// Reindex all legacy backing indices for a data stream. +// This operation occurs in a persistent task. +// The persistent task ID is returned immediately and the reindexing work is +// completed in that task. +package migratereindex + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/modeenum" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type MigrateReindex struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMigrateReindex type alias for index. +type NewMigrateReindex func() *MigrateReindex + +// NewMigrateReindexFunc returns a new instance of MigrateReindex with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMigrateReindexFunc(tp elastictransport.Interface) NewMigrateReindex { + return func() *MigrateReindex { + n := New(tp) + + return n + } +} + +// Reindex legacy backing indices. +// +// Reindex all legacy backing indices for a data stream. +// This operation occurs in a persistent task. +// The persistent task ID is returned immediately and the reindexing work is +// completed in that task. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-reindex +func New(tp elastictransport.Interface) *MigrateReindex { + r := &MigrateReindex{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *MigrateReindex) Raw(raw io.Reader) *MigrateReindex { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *MigrateReindex) Request(req *Request) *MigrateReindex { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *MigrateReindex) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for MigrateReindex: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("reindex") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r MigrateReindex) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.migrate_reindex") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.migrate_reindex") + if reader := instrument.RecordRequestBody(ctx, "indices.migrate_reindex", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.migrate_reindex") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the MigrateReindex query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a migratereindex.Response +func (r MigrateReindex) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.migrate_reindex") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the MigrateReindex headers map. +func (r *MigrateReindex) Header(key, value string) *MigrateReindex { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *MigrateReindex) ErrorTrace(errortrace bool) *MigrateReindex { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *MigrateReindex) FilterPath(filterpaths ...string) *MigrateReindex { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *MigrateReindex) Human(human bool) *MigrateReindex { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *MigrateReindex) Pretty(pretty bool) *MigrateReindex { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Reindex mode. Currently only 'upgrade' is supported. +// API name: mode +func (r *MigrateReindex) Mode(mode modeenum.ModeEnum) *MigrateReindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Mode = mode + return r +} + +// The source index or data stream (only data streams are currently supported). +// API name: source +func (r *MigrateReindex) Source(source types.SourceIndexVariant) *MigrateReindex { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source = *source.SourceIndexCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratereindex/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratereindex/request.go new file mode 100644 index 000000000..5246cec84 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratereindex/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package migratereindex + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package migratereindex +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/migrate_reindex/MigrateReindexRequest.ts#L23-L37 +type Request = types.MigrateReindex + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewMigrateReindex() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratereindex/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratereindex/response.go new file mode 100644 index 000000000..14ca145e6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratereindex/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package migratereindex + +// Response holds the response body struct for the package migratereindex +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/migrate_reindex/MigrateReindexResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratetodatastream/migrate_to_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratetodatastream/migrate_to_data_stream.go new file mode 100644 index 000000000..0b1e2c5dd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratetodatastream/migrate_to_data_stream.go @@ -0,0 +1,386 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Convert an index alias to a data stream. +// Converts an index alias to a data stream. +// You must have a matching index template that is data stream enabled. +// The alias must meet the following criteria: +// The alias must have a write index; +// All indices for the alias must have a `@timestamp` field mapping of a `date` +// or `date_nanos` field type; +// The alias must not have any filters; +// The alias must not use custom routing. +// If successful, the request removes the alias and creates a data stream with +// the same name. +// The indices for the alias become hidden backing indices for the stream. +// The write index for the alias becomes the write index for the stream. +package migratetodatastream + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type MigrateToDataStream struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMigrateToDataStream type alias for index. +type NewMigrateToDataStream func(name string) *MigrateToDataStream + +// NewMigrateToDataStreamFunc returns a new instance of MigrateToDataStream with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMigrateToDataStreamFunc(tp elastictransport.Interface) NewMigrateToDataStream { + return func(name string) *MigrateToDataStream { + n := New(tp) + + n._name(name) + + return n + } +} + +// Convert an index alias to a data stream. +// Converts an index alias to a data stream. +// You must have a matching index template that is data stream enabled. +// The alias must meet the following criteria: +// The alias must have a write index; +// All indices for the alias must have a `@timestamp` field mapping of a `date` +// or `date_nanos` field type; +// The alias must not have any filters; +// The alias must not use custom routing. +// If successful, the request removes the alias and creates a data stream with +// the same name. +// The indices for the alias become hidden backing indices for the stream. +// The write index for the alias becomes the write index for the stream. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-to-data-stream +func New(tp elastictransport.Interface) *MigrateToDataStream { + r := &MigrateToDataStream{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *MigrateToDataStream) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString("_migrate") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r MigrateToDataStream) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.migrate_to_data_stream") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.migrate_to_data_stream") + if reader := instrument.RecordRequestBody(ctx, "indices.migrate_to_data_stream", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.migrate_to_data_stream") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the MigrateToDataStream query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a migratetodatastream.Response +func (r MigrateToDataStream) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.migrate_to_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r MigrateToDataStream) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.migrate_to_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the MigrateToDataStream query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the MigrateToDataStream headers map. +func (r *MigrateToDataStream) Header(key, value string) *MigrateToDataStream { + r.headers.Set(key, value) + + return r +} + +// Name Name of the index alias to convert to a data stream. +// API Name: name +func (r *MigrateToDataStream) _name(name string) *MigrateToDataStream { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *MigrateToDataStream) MasterTimeout(duration string) *MigrateToDataStream { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *MigrateToDataStream) Timeout(duration string) *MigrateToDataStream { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *MigrateToDataStream) ErrorTrace(errortrace bool) *MigrateToDataStream { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *MigrateToDataStream) FilterPath(filterpaths ...string) *MigrateToDataStream { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *MigrateToDataStream) Human(human bool) *MigrateToDataStream { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *MigrateToDataStream) Pretty(pretty bool) *MigrateToDataStream { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratetodatastream/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratetodatastream/response.go new file mode 100644 index 000000000..90b91fa9e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratetodatastream/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package migratetodatastream + +// Response holds the response body struct for the package migratetodatastream +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/migrate_to_data_stream/IndicesMigrateToDataStreamResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/modifydatastream/modify_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/modifydatastream/modify_data_stream.go new file mode 100644 index 000000000..700e566e6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/modifydatastream/modify_data_stream.go @@ -0,0 +1,354 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update data streams. +// Performs one or more data stream modification actions in a single atomic +// operation. +package modifydatastream + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ModifyDataStream struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewModifyDataStream type alias for index. +type NewModifyDataStream func() *ModifyDataStream + +// NewModifyDataStreamFunc returns a new instance of ModifyDataStream with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewModifyDataStreamFunc(tp elastictransport.Interface) NewModifyDataStream { + return func() *ModifyDataStream { + n := New(tp) + + return n + } +} + +// Update data streams. +// Performs one or more data stream modification actions in a single atomic +// operation. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-modify-data-stream +func New(tp elastictransport.Interface) *ModifyDataStream { + r := &ModifyDataStream{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ModifyDataStream) Raw(raw io.Reader) *ModifyDataStream { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ModifyDataStream) Request(req *Request) *ModifyDataStream { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ModifyDataStream) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ModifyDataStream: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString("_modify") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ModifyDataStream) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.modify_data_stream") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.modify_data_stream") + if reader := instrument.RecordRequestBody(ctx, "indices.modify_data_stream", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.modify_data_stream") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ModifyDataStream query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a modifydatastream.Response +func (r ModifyDataStream) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.modify_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ModifyDataStream headers map. +func (r *ModifyDataStream) Header(key, value string) *ModifyDataStream { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ModifyDataStream) ErrorTrace(errortrace bool) *ModifyDataStream { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ModifyDataStream) FilterPath(filterpaths ...string) *ModifyDataStream { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ModifyDataStream) Human(human bool) *ModifyDataStream { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ModifyDataStream) Pretty(pretty bool) *ModifyDataStream { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Actions to perform. +// API name: actions +func (r *ModifyDataStream) Actions(actions ...types.IndicesModifyActionVariant) *ModifyDataStream { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range actions { + + r.req.Actions = append(r.req.Actions, *v.IndicesModifyActionCaster()) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/modifydatastream/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/modifydatastream/request.go new file mode 100644 index 000000000..6381ba01f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/modifydatastream/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package modifydatastream + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package modifydatastream +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/modify_data_stream/IndicesModifyDataStreamRequest.ts#L23-L45 +type Request struct { + + // Actions Actions to perform. + Actions []types.IndicesModifyAction `json:"actions"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Modifydatastream request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/modifydatastream/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/modifydatastream/response.go new file mode 100644 index 000000000..56024f92a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/modifydatastream/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package modifydatastream + +// Response holds the response body struct for the package modifydatastream +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/modify_data_stream/IndicesModifyDataStreamResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/open/open.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/open/open.go new file mode 100644 index 000000000..da91f089c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/open/open.go @@ -0,0 +1,486 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Open a closed index. +// For data streams, the API opens any closed backing indices. +// +// A closed index is blocked for read/write operations and does not allow all +// operations that opened indices allow. +// It is not possible to index documents or to search for documents in a closed +// index. +// This allows closed indices to not have to maintain internal data structures +// for indexing or searching documents, resulting in a smaller overhead on the +// cluster. +// +// When opening or closing an index, the master is responsible for restarting +// the index shards to reflect the new state of the index. +// The shards will then go through the normal recovery process. +// The data of opened or closed indices is automatically replicated by the +// cluster to ensure that enough shard copies are safely kept around at all +// times. +// +// You can open and close multiple indices. +// An error is thrown if the request explicitly refers to a missing index. +// This behavior can be turned off by using the `ignore_unavailable=true` +// parameter. +// +// By default, you must explicitly name the indices you are opening or closing. +// To open or close indices with `_all`, `*`, or other wildcard expressions, +// change the `action.destructive_requires_name` setting to `false`. +// This setting can also be changed with the cluster update settings API. +// +// Closed indices consume a significant amount of disk-space which can cause +// problems in managed environments. +// Closing indices can be turned off with the cluster settings API by setting +// `cluster.indices.close.enable` to `false`. +// +// Because opening or closing an index allocates its shards, the +// `wait_for_active_shards` setting on index creation applies to the `_open` and +// `_close` index actions as well. +package open + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Open struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewOpen type alias for index. +type NewOpen func(index string) *Open + +// NewOpenFunc returns a new instance of Open with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewOpenFunc(tp elastictransport.Interface) NewOpen { + return func(index string) *Open { + n := New(tp) + + n._index(index) + + return n + } +} + +// Open a closed index. +// For data streams, the API opens any closed backing indices. +// +// A closed index is blocked for read/write operations and does not allow all +// operations that opened indices allow. +// It is not possible to index documents or to search for documents in a closed +// index. +// This allows closed indices to not have to maintain internal data structures +// for indexing or searching documents, resulting in a smaller overhead on the +// cluster. +// +// When opening or closing an index, the master is responsible for restarting +// the index shards to reflect the new state of the index. +// The shards will then go through the normal recovery process. +// The data of opened or closed indices is automatically replicated by the +// cluster to ensure that enough shard copies are safely kept around at all +// times. +// +// You can open and close multiple indices. +// An error is thrown if the request explicitly refers to a missing index. +// This behavior can be turned off by using the `ignore_unavailable=true` +// parameter. +// +// By default, you must explicitly name the indices you are opening or closing. +// To open or close indices with `_all`, `*`, or other wildcard expressions, +// change the `action.destructive_requires_name` setting to `false`. +// This setting can also be changed with the cluster update settings API. +// +// Closed indices consume a significant amount of disk-space which can cause +// problems in managed environments. +// Closing indices can be turned off with the cluster settings API by setting +// `cluster.indices.close.enable` to `false`. +// +// Because opening or closing an index allocates its shards, the +// `wait_for_active_shards` setting on index creation applies to the `_open` and +// `_close` index actions as well. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open +func New(tp elastictransport.Interface) *Open { + r := &Open{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Open) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_open") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Open) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.open") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.open") + if reader := instrument.RecordRequestBody(ctx, "indices.open", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.open") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Open query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a open.Response +func (r Open) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.open") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Open) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.open") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Open query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Open headers map. +func (r *Open) Header(key, value string) *Open { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). +// By default, you must explicitly name the indices you using to limit the +// request. +// To limit a request using `_all`, `*`, or other wildcard expressions, change +// the `action.destructive_requires_name` setting to false. +// You can update this setting in the `elasticsearch.yml` file or using the +// cluster update settings API. +// API Name: index +func (r *Open) _index(index string) *Open { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *Open) AllowNoIndices(allownoindices bool) *Open { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *Open) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Open { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *Open) IgnoreUnavailable(ignoreunavailable bool) *Open { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *Open) MasterTimeout(duration string) *Open { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *Open) Timeout(duration string) *Open { + r.values.Set("timeout", duration) + + return r +} + +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). +// API name: wait_for_active_shards +func (r *Open) WaitForActiveShards(waitforactiveshards string) *Open { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Open) ErrorTrace(errortrace bool) *Open { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Open) FilterPath(filterpaths ...string) *Open { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Open) Human(human bool) *Open { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Open) Pretty(pretty bool) *Open { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/open/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/open/response.go new file mode 100644 index 000000000..516ccd9f2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/open/response.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package open + +// Response holds the response body struct for the package open +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/open/IndicesOpenResponse.ts#L20-L25 +type Response struct { + Acknowledged bool `json:"acknowledged"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/promotedatastream/promote_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/promotedatastream/promote_data_stream.go new file mode 100644 index 000000000..e5872aa31 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/promotedatastream/promote_data_stream.go @@ -0,0 +1,389 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Promote a data stream. +// Promote a data stream from a replicated data stream managed by cross-cluster +// replication (CCR) to a regular data stream. +// +// With CCR auto following, a data stream from a remote cluster can be +// replicated to the local cluster. +// These data streams can't be rolled over in the local cluster. +// These replicated data streams roll over only if the upstream data stream +// rolls over. +// In the event that the remote cluster is no longer available, the data stream +// in the local cluster can be promoted to a regular data stream, which allows +// these data streams to be rolled over in the local cluster. +// +// NOTE: When promoting a data stream, ensure the local cluster has a data +// stream enabled index template that matches the data stream. +// If this is missing, the data stream will not be able to roll over until a +// matching index template is created. +// This will affect the lifecycle management of the data stream and interfere +// with the data stream size and retention. +package promotedatastream + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PromoteDataStream struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPromoteDataStream type alias for index. +type NewPromoteDataStream func(name string) *PromoteDataStream + +// NewPromoteDataStreamFunc returns a new instance of PromoteDataStream with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPromoteDataStreamFunc(tp elastictransport.Interface) NewPromoteDataStream { + return func(name string) *PromoteDataStream { + n := New(tp) + + n._name(name) + + return n + } +} + +// Promote a data stream. +// Promote a data stream from a replicated data stream managed by cross-cluster +// replication (CCR) to a regular data stream. +// +// With CCR auto following, a data stream from a remote cluster can be +// replicated to the local cluster. +// These data streams can't be rolled over in the local cluster. +// These replicated data streams roll over only if the upstream data stream +// rolls over. +// In the event that the remote cluster is no longer available, the data stream +// in the local cluster can be promoted to a regular data stream, which allows +// these data streams to be rolled over in the local cluster. +// +// NOTE: When promoting a data stream, ensure the local cluster has a data +// stream enabled index template that matches the data stream. +// If this is missing, the data stream will not be able to roll over until a +// matching index template is created. +// This will affect the lifecycle management of the data stream and interfere +// with the data stream size and retention. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-promote-data-stream +func New(tp elastictransport.Interface) *PromoteDataStream { + r := &PromoteDataStream{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PromoteDataStream) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString("_promote") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PromoteDataStream) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.promote_data_stream") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.promote_data_stream") + if reader := instrument.RecordRequestBody(ctx, "indices.promote_data_stream", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.promote_data_stream") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PromoteDataStream query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a promotedatastream.Response +func (r PromoteDataStream) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.promote_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := new(Response) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return *response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r PromoteDataStream) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.promote_data_stream") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the PromoteDataStream query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the PromoteDataStream headers map. +func (r *PromoteDataStream) Header(key, value string) *PromoteDataStream { + r.headers.Set(key, value) + + return r +} + +// Name The name of the data stream +// API Name: name +func (r *PromoteDataStream) _name(name string) *PromoteDataStream { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *PromoteDataStream) MasterTimeout(duration string) *PromoteDataStream { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PromoteDataStream) ErrorTrace(errortrace bool) *PromoteDataStream { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PromoteDataStream) FilterPath(filterpaths ...string) *PromoteDataStream { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PromoteDataStream) Human(human bool) *PromoteDataStream { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PromoteDataStream) Pretty(pretty bool) *PromoteDataStream { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/promotedatastream/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/promotedatastream/response.go new file mode 100644 index 000000000..834e5a102 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/promotedatastream/response.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package promotedatastream + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package promotedatastream +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/promote_data_stream/IndicesPromoteDataStreamResponse.ts#L22-L25 + +type Response = json.RawMessage + +func NewResponse() *Response { + return new(Response) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putalias/put_alias.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putalias/put_alias.go new file mode 100644 index 000000000..b1e36ea28 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putalias/put_alias.go @@ -0,0 +1,495 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update an alias. +// Adds a data stream or index to an alias. +package putalias + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 + + nameMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutAlias struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutAlias type alias for index. +type NewPutAlias func(index, name string) *PutAlias + +// NewPutAliasFunc returns a new instance of PutAlias with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutAliasFunc(tp elastictransport.Interface) NewPutAlias { + return func(index, name string) *PutAlias { + n := New(tp) + + n._index(index) + + n._name(name) + + return n + } +} + +// Create or update an alias. +// Adds a data stream or index to an alias. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-alias +func New(tp elastictransport.Interface) *PutAlias { + r := &PutAlias{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutAlias) Raw(raw io.Reader) *PutAlias { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutAlias) Request(req *Request) *PutAlias { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutAlias) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutAlias: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|nameMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_alias") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPut + case r.paramSet == indexMask|nameMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_aliases") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutAlias) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.put_alias") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_alias") + if reader := instrument.RecordRequestBody(ctx, "indices.put_alias", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_alias") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutAlias query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putalias.Response +func (r PutAlias) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutAlias headers map. +func (r *PutAlias) Header(key, value string) *PutAlias { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams or indices to add. +// Supports wildcards (`*`). +// Wildcard patterns that match both data streams and indices return an error. +// API Name: index +func (r *PutAlias) _index(index string) *PutAlias { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Name Alias to update. +// If the alias doesn’t exist, the request creates it. +// Index alias names support date math. +// API Name: name +func (r *PutAlias) _name(name string) *PutAlias { + r.paramSet |= nameMask + r.name = name + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *PutAlias) MasterTimeout(duration string) *PutAlias { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *PutAlias) Timeout(duration string) *PutAlias { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutAlias) ErrorTrace(errortrace bool) *PutAlias { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutAlias) FilterPath(filterpaths ...string) *PutAlias { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutAlias) Human(human bool) *PutAlias { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutAlias) Pretty(pretty bool) *PutAlias { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Query used to limit documents the alias can access. +// API name: filter +func (r *PutAlias) Filter(filter types.QueryVariant) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Filter = filter.QueryCaster() + + return r +} + +// Value used to route indexing operations to a specific shard. +// If specified, this overwrites the `routing` value for indexing operations. +// Data stream aliases don’t support this parameter. +// API name: index_routing +func (r *PutAlias) IndexRouting(routing string) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexRouting = &routing + + return r +} + +// If `true`, sets the write index or data stream for the alias. +// If an alias points to multiple indices or data streams and `is_write_index` +// isn’t set, the alias rejects write requests. +// If an index alias points to one index and `is_write_index` isn’t set, the +// index automatically acts as the write index. +// Data stream aliases don’t automatically set a write data stream, even if the +// alias points to one data stream. +// API name: is_write_index +func (r *PutAlias) IsWriteIndex(iswriteindex bool) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IsWriteIndex = &iswriteindex + + return r +} + +// Value used to route indexing and search operations to a specific shard. +// Data stream aliases don’t support this parameter. +// API name: routing +func (r *PutAlias) Routing(routing string) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Routing = &routing + + return r +} + +// Value used to route search operations to a specific shard. +// If specified, this overwrites the `routing` value for search operations. +// Data stream aliases don’t support this parameter. +// API name: search_routing +func (r *PutAlias) SearchRouting(routing string) *PutAlias { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SearchRouting = &routing + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putalias/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putalias/request.go new file mode 100644 index 000000000..a9c4ff430 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putalias/request.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putalias + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putalias +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_alias/IndicesPutAliasRequest.ts#L25-L103 +type Request struct { + + // Filter Query used to limit documents the alias can access. + Filter *types.Query `json:"filter,omitempty"` + // IndexRouting Value used to route indexing operations to a specific shard. + // If specified, this overwrites the `routing` value for indexing operations. + // Data stream aliases don’t support this parameter. + IndexRouting *string `json:"index_routing,omitempty"` + // IsWriteIndex If `true`, sets the write index or data stream for the alias. + // If an alias points to multiple indices or data streams and `is_write_index` + // isn’t set, the alias rejects write requests. + // If an index alias points to one index and `is_write_index` isn’t set, the + // index automatically acts as the write index. + // Data stream aliases don’t automatically set a write data stream, even if the + // alias points to one data stream. + IsWriteIndex *bool `json:"is_write_index,omitempty"` + // Routing Value used to route indexing and search operations to a specific shard. + // Data stream aliases don’t support this parameter. + Routing *string `json:"routing,omitempty"` + // SearchRouting Value used to route search operations to a specific shard. + // If specified, this overwrites the `routing` value for search operations. + // Data stream aliases don’t support this parameter. + SearchRouting *string `json:"search_routing,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putalias request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "index_routing": + if err := dec.Decode(&s.IndexRouting); err != nil { + return fmt.Errorf("%s | %w", "IndexRouting", err) + } + + case "is_write_index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsWriteIndex", err) + } + s.IsWriteIndex = &value + case bool: + s.IsWriteIndex = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "search_routing": + if err := dec.Decode(&s.SearchRouting); err != nil { + return fmt.Errorf("%s | %w", "SearchRouting", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putalias/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putalias/response.go new file mode 100644 index 000000000..4ca706408 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putalias/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putalias + +// Response holds the response body struct for the package putalias +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_alias/IndicesPutAliasResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatalifecycle/put_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatalifecycle/put_data_lifecycle.go new file mode 100644 index 000000000..1923e58d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatalifecycle/put_data_lifecycle.go @@ -0,0 +1,441 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update data stream lifecycles. +// Update the data stream lifecycle of the specified data streams. +package putdatalifecycle + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutDataLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutDataLifecycle type alias for index. +type NewPutDataLifecycle func(name string) *PutDataLifecycle + +// NewPutDataLifecycleFunc returns a new instance of PutDataLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutDataLifecycleFunc(tp elastictransport.Interface) NewPutDataLifecycle { + return func(name string) *PutDataLifecycle { + n := New(tp) + + n._name(name) + + return n + } +} + +// Update data stream lifecycles. +// Update the data stream lifecycle of the specified data streams. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle +func New(tp elastictransport.Interface) *PutDataLifecycle { + r := &PutDataLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutDataLifecycle) Raw(raw io.Reader) *PutDataLifecycle { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutDataLifecycle) Request(req *Request) *PutDataLifecycle { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutDataLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutDataLifecycle: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_lifecycle") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutDataLifecycle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.put_data_lifecycle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_data_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "indices.put_data_lifecycle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_data_lifecycle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutDataLifecycle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putdatalifecycle.Response +func (r PutDataLifecycle) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_data_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutDataLifecycle headers map. +func (r *PutDataLifecycle) Header(key, value string) *PutDataLifecycle { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of data streams used to limit the request. +// Supports wildcards (`*`). +// To target all data streams use `*` or `_all`. +// API Name: name +func (r *PutDataLifecycle) _name(name string) *PutDataLifecycle { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ExpandWildcards Type of data stream that wildcard patterns can match. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *PutDataLifecycle) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *PutDataLifecycle { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an +// error. +// API name: master_timeout +func (r *PutDataLifecycle) MasterTimeout(duration string) *PutDataLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *PutDataLifecycle) Timeout(duration string) *PutDataLifecycle { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutDataLifecycle) ErrorTrace(errortrace bool) *PutDataLifecycle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutDataLifecycle) FilterPath(filterpaths ...string) *PutDataLifecycle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutDataLifecycle) Human(human bool) *PutDataLifecycle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutDataLifecycle) Pretty(pretty bool) *PutDataLifecycle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// If defined, every document added to this data stream will be stored at least +// for this time frame. +// Any time after this duration the document could be deleted. +// When empty, every document in this data stream will be stored indefinitely. +// API name: data_retention +func (r *PutDataLifecycle) DataRetention(duration types.DurationVariant) *PutDataLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DataRetention = *duration.DurationCaster() + + return r +} + +// The downsampling configuration to execute for the managed backing index after +// rollover. +// API name: downsampling +func (r *PutDataLifecycle) Downsampling(downsampling types.DataStreamLifecycleDownsamplingVariant) *PutDataLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Downsampling = downsampling.DataStreamLifecycleDownsamplingCaster() + + return r +} + +// If defined, it turns data stream lifecycle on/off (`true`/`false`) for this +// data stream. A data stream lifecycle +// that's disabled (enabled: `false`) will have no effect on the data stream. +// API name: enabled +func (r *PutDataLifecycle) Enabled(enabled bool) *PutDataLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Enabled = &enabled + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatalifecycle/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatalifecycle/request.go new file mode 100644 index 000000000..5c97ac5fd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatalifecycle/request.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putdatalifecycle + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putdatalifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_data_lifecycle/IndicesPutDataLifecycleRequest.ts#L25-L93 +type Request struct { + + // DataRetention If defined, every document added to this data stream will be stored at least + // for this time frame. + // Any time after this duration the document could be deleted. + // When empty, every document in this data stream will be stored indefinitely. + DataRetention types.Duration `json:"data_retention,omitempty"` + // Downsampling The downsampling configuration to execute for the managed backing index after + // rollover. + Downsampling *types.DataStreamLifecycleDownsampling `json:"downsampling,omitempty"` + // Enabled If defined, it turns data stream lifecycle on/off (`true`/`false`) for this + // data stream. A data stream lifecycle + // that's disabled (enabled: `false`) will have no effect on the data stream. + Enabled *bool `json:"enabled,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putdatalifecycle request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_retention": + if err := dec.Decode(&s.DataRetention); err != nil { + return fmt.Errorf("%s | %w", "DataRetention", err) + } + + case "downsampling": + if err := dec.Decode(&s.Downsampling); err != nil { + return fmt.Errorf("%s | %w", "Downsampling", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatalifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatalifecycle/response.go new file mode 100644 index 000000000..655e866ca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatalifecycle/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putdatalifecycle + +// Response holds the response body struct for the package putdatalifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_data_lifecycle/IndicesPutDataLifecycleResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamoptions/put_data_stream_options.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamoptions/put_data_stream_options.go new file mode 100644 index 000000000..5221ac417 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamoptions/put_data_stream_options.go @@ -0,0 +1,410 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update data stream options. +// Update the data stream options of the specified data streams. +package putdatastreamoptions + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutDataStreamOptions struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutDataStreamOptions type alias for index. +type NewPutDataStreamOptions func(name string) *PutDataStreamOptions + +// NewPutDataStreamOptionsFunc returns a new instance of PutDataStreamOptions with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutDataStreamOptionsFunc(tp elastictransport.Interface) NewPutDataStreamOptions { + return func(name string) *PutDataStreamOptions { + n := New(tp) + + n._name(name) + + return n + } +} + +// Update data stream options. +// Update the data stream options of the specified data streams. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html +func New(tp elastictransport.Interface) *PutDataStreamOptions { + r := &PutDataStreamOptions{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutDataStreamOptions) Raw(raw io.Reader) *PutDataStreamOptions { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutDataStreamOptions) Request(req *Request) *PutDataStreamOptions { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutDataStreamOptions) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutDataStreamOptions: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_options") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutDataStreamOptions) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.put_data_stream_options") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_data_stream_options") + if reader := instrument.RecordRequestBody(ctx, "indices.put_data_stream_options", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_data_stream_options") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutDataStreamOptions query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putdatastreamoptions.Response +func (r PutDataStreamOptions) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_data_stream_options") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutDataStreamOptions headers map. +func (r *PutDataStreamOptions) Header(key, value string) *PutDataStreamOptions { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of data streams used to limit the request. +// Supports wildcards (`*`). +// To target all data streams use `*` or `_all`. +// API Name: name +func (r *PutDataStreamOptions) _name(name string) *PutDataStreamOptions { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ExpandWildcards Type of data stream that wildcard patterns can match. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *PutDataStreamOptions) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *PutDataStreamOptions { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an +// error. +// API name: master_timeout +func (r *PutDataStreamOptions) MasterTimeout(duration string) *PutDataStreamOptions { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *PutDataStreamOptions) Timeout(duration string) *PutDataStreamOptions { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutDataStreamOptions) ErrorTrace(errortrace bool) *PutDataStreamOptions { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutDataStreamOptions) FilterPath(filterpaths ...string) *PutDataStreamOptions { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutDataStreamOptions) Human(human bool) *PutDataStreamOptions { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutDataStreamOptions) Pretty(pretty bool) *PutDataStreamOptions { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// If defined, it will update the failure store configuration of every data +// stream resolved by the name expression. +// API name: failure_store +func (r *PutDataStreamOptions) FailureStore(failurestore types.DataStreamFailureStoreVariant) *PutDataStreamOptions { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FailureStore = failurestore.DataStreamFailureStoreCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamoptions/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamoptions/request.go new file mode 100644 index 000000000..24bb0663f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamoptions/request.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putdatastreamoptions + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putdatastreamoptions +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_data_stream_options/IndicesPutDataStreamOptionsRequest.ts#L25-L79 +type Request struct { + + // FailureStore If defined, it will update the failure store configuration of every data + // stream resolved by the name expression. + FailureStore *types.DataStreamFailureStore `json:"failure_store,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putdatastreamoptions request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamoptions/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamoptions/response.go new file mode 100644 index 000000000..033a4e99b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamoptions/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putdatastreamoptions + +// Response holds the response body struct for the package putdatastreamoptions +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_data_stream_options/IndicesPutDataStreamOptionsResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamsettings/put_data_stream_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamsettings/put_data_stream_settings.go new file mode 100644 index 000000000..a6ccfaa1e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamsettings/put_data_stream_settings.go @@ -0,0 +1,1130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update data stream settings. +// +// This API can be used to override settings on specific data streams. These +// overrides will take precedence over what +// is specified in the template that the data stream matches. To prevent your +// data stream from getting into an invalid state, +// only certain settings are allowed. If possible, the setting change is applied +// to all +// backing indices. Otherwise, it will be applied when the data stream is next +// rolled over. +package putdatastreamsettings + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexcheckonstartup" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutDataStreamSettings struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutDataStreamSettings type alias for index. +type NewPutDataStreamSettings func(name string) *PutDataStreamSettings + +// NewPutDataStreamSettingsFunc returns a new instance of PutDataStreamSettings with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutDataStreamSettingsFunc(tp elastictransport.Interface) NewPutDataStreamSettings { + return func(name string) *PutDataStreamSettings { + n := New(tp) + + n._name(name) + + return n + } +} + +// Update data stream settings. +// +// This API can be used to override settings on specific data streams. These +// overrides will take precedence over what +// is specified in the template that the data stream matches. To prevent your +// data stream from getting into an invalid state, +// only certain settings are allowed. If possible, the setting change is applied +// to all +// backing indices. Otherwise, it will be applied when the data stream is next +// rolled over. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-settings +func New(tp elastictransport.Interface) *PutDataStreamSettings { + r := &PutDataStreamSettings{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutDataStreamSettings) Raw(raw io.Reader) *PutDataStreamSettings { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutDataStreamSettings) Request(req *Request) *PutDataStreamSettings { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutDataStreamSettings) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutDataStreamSettings: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_settings") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutDataStreamSettings) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.put_data_stream_settings") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_data_stream_settings") + if reader := instrument.RecordRequestBody(ctx, "indices.put_data_stream_settings", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_data_stream_settings") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutDataStreamSettings query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putdatastreamsettings.Response +func (r PutDataStreamSettings) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_data_stream_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutDataStreamSettings headers map. +func (r *PutDataStreamSettings) Header(key, value string) *PutDataStreamSettings { + r.headers.Set(key, value) + + return r +} + +// Name A comma-separated list of data streams or data stream patterns. +// API Name: name +func (r *PutDataStreamSettings) _name(name string) *PutDataStreamSettings { + r.paramSet |= nameMask + r.name = name + + return r +} + +// DryRun If `true`, the request does not actually change the settings on any data +// streams or indices. Instead, it +// simulates changing the settings and reports back to the user what would have +// happened had these settings +// actually been applied. +// API name: dry_run +func (r *PutDataStreamSettings) DryRun(dryrun bool) *PutDataStreamSettings { + r.values.Set("dry_run", strconv.FormatBool(dryrun)) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an +// error. +// API name: master_timeout +func (r *PutDataStreamSettings) MasterTimeout(duration string) *PutDataStreamSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. If no response is received before the +// +// timeout expires, the request fails and returns an error. +// +// API name: timeout +func (r *PutDataStreamSettings) Timeout(duration string) *PutDataStreamSettings { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutDataStreamSettings) ErrorTrace(errortrace bool) *PutDataStreamSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutDataStreamSettings) FilterPath(filterpaths ...string) *PutDataStreamSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutDataStreamSettings) Human(human bool) *PutDataStreamSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutDataStreamSettings) Pretty(pretty bool) *PutDataStreamSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: analysis +func (r *PutDataStreamSettings) Analysis(analysis types.IndexSettingsAnalysisVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Analysis = analysis.IndexSettingsAnalysisCaster() + + return r +} + +// Settings to define analyzers, tokenizers, token filters and character +// filters. +// Refer to the linked documentation for step-by-step examples of updating +// analyzers on existing indices. +// API name: analyze +func (r *PutDataStreamSettings) Analyze(analyze types.SettingsAnalyzeVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Analyze = analyze.SettingsAnalyzeCaster() + + return r +} + +// API name: auto_expand_replicas +func (r *PutDataStreamSettings) AutoExpandReplicas(autoexpandreplicas any) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AutoExpandReplicas = autoexpandreplicas + + return r +} + +// API name: blocks +func (r *PutDataStreamSettings) Blocks(blocks types.IndexSettingBlocksVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Blocks = blocks.IndexSettingBlocksCaster() + + return r +} + +// API name: check_on_startup +func (r *PutDataStreamSettings) CheckOnStartup(checkonstartup indexcheckonstartup.IndexCheckOnStartup) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.CheckOnStartup = &checkonstartup + return r +} + +// API name: codec +func (r *PutDataStreamSettings) Codec(codec string) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Codec = &codec + + return r +} + +// API name: creation_date +func (r *PutDataStreamSettings) CreationDate(stringifiedepochtimeunitmillis types.StringifiedEpochTimeUnitMillisVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CreationDate = *stringifiedepochtimeunitmillis.StringifiedEpochTimeUnitMillisCaster() + + return r +} + +// API name: creation_date_string +func (r *PutDataStreamSettings) CreationDateString(datetime types.DateTimeVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CreationDateString = *datetime.DateTimeCaster() + + return r +} + +// API name: default_pipeline +func (r *PutDataStreamSettings) DefaultPipeline(pipelinename string) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DefaultPipeline = &pipelinename + + return r +} + +// API name: final_pipeline +func (r *PutDataStreamSettings) FinalPipeline(pipelinename string) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FinalPipeline = &pipelinename + + return r +} + +// API name: format +func (r *PutDataStreamSettings) Format(format string) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Format = &format + + return r +} + +// API name: gc_deletes +func (r *PutDataStreamSettings) GcDeletes(duration types.DurationVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.GcDeletes = *duration.DurationCaster() + + return r +} + +// API name: hidden +func (r *PutDataStreamSettings) Hidden(hidden string) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Hidden = &hidden + + return r +} + +// API name: highlight +func (r *PutDataStreamSettings) Highlight(highlight types.SettingsHighlightVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Highlight = highlight.SettingsHighlightCaster() + + return r +} + +// API name: index +func (r *PutDataStreamSettings) Index(index types.IndexSettingsVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Index = index.IndexSettingsCaster() + + return r +} + +// API name: IndexSettings +func (r *PutDataStreamSettings) IndexSettings(indexsettings map[string]json.RawMessage) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexSettings = indexsettings + return r +} + +func (r *PutDataStreamSettings) AddIndexSetting(key string, value json.RawMessage) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.IndexSettings == nil { + r.req.IndexSettings = make(map[string]json.RawMessage) + } else { + tmp = r.req.IndexSettings + } + + tmp[key] = value + + r.req.IndexSettings = tmp + return r +} + +// Configure indexing back pressure limits. +// API name: indexing_pressure +func (r *PutDataStreamSettings) IndexingPressure(indexingpressure types.IndicesIndexingPressureVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexingPressure = indexingpressure.IndicesIndexingPressureCaster() + + return r +} + +// API name: indexing.slowlog +func (r *PutDataStreamSettings) IndexingSlowlog(indexingslowlog types.IndexingSlowlogSettingsVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexingSlowlog = indexingslowlog.IndexingSlowlogSettingsCaster() + + return r +} + +// API name: lifecycle +func (r *PutDataStreamSettings) Lifecycle(lifecycle types.IndexSettingsLifecycleVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Lifecycle = lifecycle.IndexSettingsLifecycleCaster() + + return r +} + +// API name: load_fixed_bitset_filters_eagerly +func (r *PutDataStreamSettings) LoadFixedBitsetFiltersEagerly(loadfixedbitsetfilterseagerly bool) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LoadFixedBitsetFiltersEagerly = &loadfixedbitsetfilterseagerly + + return r +} + +// Enable or disable dynamic mapping for an index. +// API name: mapping +func (r *PutDataStreamSettings) Mapping(mapping types.MappingLimitSettingsVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Mapping = mapping.MappingLimitSettingsCaster() + + return r +} + +// API name: max_docvalue_fields_search +func (r *PutDataStreamSettings) MaxDocvalueFieldsSearch(maxdocvaluefieldssearch int) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxDocvalueFieldsSearch = &maxdocvaluefieldssearch + + return r +} + +// API name: max_inner_result_window +func (r *PutDataStreamSettings) MaxInnerResultWindow(maxinnerresultwindow int) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxInnerResultWindow = &maxinnerresultwindow + + return r +} + +// API name: max_ngram_diff +func (r *PutDataStreamSettings) MaxNgramDiff(maxngramdiff int) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxNgramDiff = &maxngramdiff + + return r +} + +// API name: max_refresh_listeners +func (r *PutDataStreamSettings) MaxRefreshListeners(maxrefreshlisteners int) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRefreshListeners = &maxrefreshlisteners + + return r +} + +// API name: max_regex_length +func (r *PutDataStreamSettings) MaxRegexLength(maxregexlength int) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRegexLength = &maxregexlength + + return r +} + +// API name: max_rescore_window +func (r *PutDataStreamSettings) MaxRescoreWindow(maxrescorewindow int) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRescoreWindow = &maxrescorewindow + + return r +} + +// API name: max_result_window +func (r *PutDataStreamSettings) MaxResultWindow(maxresultwindow int) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxResultWindow = &maxresultwindow + + return r +} + +// API name: max_script_fields +func (r *PutDataStreamSettings) MaxScriptFields(maxscriptfields int) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxScriptFields = &maxscriptfields + + return r +} + +// API name: max_shingle_diff +func (r *PutDataStreamSettings) MaxShingleDiff(maxshinglediff int) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxShingleDiff = &maxshinglediff + + return r +} + +// API name: max_slices_per_scroll +func (r *PutDataStreamSettings) MaxSlicesPerScroll(maxslicesperscroll int) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxSlicesPerScroll = &maxslicesperscroll + + return r +} + +// API name: max_terms_count +func (r *PutDataStreamSettings) MaxTermsCount(maxtermscount int) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxTermsCount = &maxtermscount + + return r +} + +// API name: merge +func (r *PutDataStreamSettings) Merge(merge types.MergeVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Merge = merge.MergeCaster() + + return r +} + +// API name: mode +func (r *PutDataStreamSettings) Mode(mode string) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Mode = &mode + + return r +} + +// API name: number_of_replicas +func (r *PutDataStreamSettings) NumberOfReplicas(numberofreplicas string) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.NumberOfReplicas = &numberofreplicas + + return r +} + +// API name: number_of_routing_shards +func (r *PutDataStreamSettings) NumberOfRoutingShards(numberofroutingshards int) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.NumberOfRoutingShards = &numberofroutingshards + + return r +} + +// API name: number_of_shards +func (r *PutDataStreamSettings) NumberOfShards(numberofshards string) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.NumberOfShards = &numberofshards + + return r +} + +// API name: priority +func (r *PutDataStreamSettings) Priority(priority string) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Priority = &priority + + return r +} + +// API name: provided_name +func (r *PutDataStreamSettings) ProvidedName(name string) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ProvidedName = &name + + return r +} + +// API name: queries +func (r *PutDataStreamSettings) Queries(queries types.QueriesVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Queries = queries.QueriesCaster() + + return r +} + +// API name: query_string +func (r *PutDataStreamSettings) QueryString(querystring types.SettingsQueryStringVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.QueryString = querystring.SettingsQueryStringCaster() + + return r +} + +// API name: refresh_interval +func (r *PutDataStreamSettings) RefreshInterval(duration types.DurationVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RefreshInterval = *duration.DurationCaster() + + return r +} + +// API name: routing +func (r *PutDataStreamSettings) Routing(routing types.IndexRoutingVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Routing = routing.IndexRoutingCaster() + + return r +} + +// API name: routing_partition_size +func (r *PutDataStreamSettings) RoutingPartitionSize(stringifiedinteger types.StringifiedintegerVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RoutingPartitionSize = *stringifiedinteger.StringifiedintegerCaster() + + return r +} + +// API name: routing_path +func (r *PutDataStreamSettings) RoutingPath(routingpaths ...string) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RoutingPath = make([]string, len(routingpaths)) + r.req.RoutingPath = routingpaths + + return r +} + +// API name: search +func (r *PutDataStreamSettings) Search(search types.SettingsSearchVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Search = search.SettingsSearchCaster() + + return r +} + +// API name: settings +func (r *PutDataStreamSettings) Settings(settings types.IndexSettingsVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Settings = settings.IndexSettingsCaster() + + return r +} + +// Configure custom similarity settings to customize how search results are +// scored. +// API name: similarity +func (r *PutDataStreamSettings) Similarity(similarity map[string]types.SettingsSimilarity) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Similarity = similarity + return r +} + +func (r *PutDataStreamSettings) AddSimilarity(key string, value types.SettingsSimilarityVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.SettingsSimilarity + if r.req.Similarity == nil { + r.req.Similarity = make(map[string]types.SettingsSimilarity) + } else { + tmp = r.req.Similarity + } + + tmp[key] = *value.SettingsSimilarityCaster() + + r.req.Similarity = tmp + return r +} + +// API name: soft_deletes +func (r *PutDataStreamSettings) SoftDeletes(softdeletes types.SoftDeletesVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SoftDeletes = softdeletes.SoftDeletesCaster() + + return r +} + +// API name: sort +func (r *PutDataStreamSettings) Sort(sort types.IndexSegmentSortVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Sort = sort.IndexSegmentSortCaster() + + return r +} + +// The store module allows you to control how index data is stored and accessed +// on disk. +// API name: store +func (r *PutDataStreamSettings) Store(store types.StorageVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Store = store.StorageCaster() + + return r +} + +// API name: time_series +func (r *PutDataStreamSettings) TimeSeries(timeseries types.IndexSettingsTimeSeriesVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TimeSeries = timeseries.IndexSettingsTimeSeriesCaster() + + return r +} + +// API name: top_metrics_max_size +func (r *PutDataStreamSettings) TopMetricsMaxSize(topmetricsmaxsize int) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TopMetricsMaxSize = &topmetricsmaxsize + + return r +} + +// API name: translog +func (r *PutDataStreamSettings) Translog(translog types.TranslogVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Translog = translog.TranslogCaster() + + return r +} + +// API name: uuid +func (r *PutDataStreamSettings) Uuid(uuid string) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Uuid = &uuid + + return r +} + +// API name: verified_before_close +func (r *PutDataStreamSettings) VerifiedBeforeClose(verifiedbeforeclose string) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.VerifiedBeforeClose = &verifiedbeforeclose + + return r +} + +// API name: version +func (r *PutDataStreamSettings) Version(version types.IndexVersioningVariant) *PutDataStreamSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Version = version.IndexVersioningCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamsettings/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamsettings/request.go new file mode 100644 index 000000000..34c7d039a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamsettings/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putdatastreamsettings + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putdatastreamsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_data_stream_settings/IndicesPutDataStreamSettingsRequest.ts#L25-L77 +type Request = types.IndexSettings + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewIndexSettings() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamsettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamsettings/response.go new file mode 100644 index 000000000..4ad58bcd6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamsettings/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putdatastreamsettings + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package putdatastreamsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_data_stream_settings/IndicesPutDataStreamSettingsResponse.ts#L23-L28 +type Response struct { + DataStreams []types.UpdatedDataStreamSettings `json:"data_streams"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putindextemplate/put_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putindextemplate/put_index_template.go new file mode 100644 index 000000000..857b4f4ee --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putindextemplate/put_index_template.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update an index template. +// Index templates define settings, mappings, and aliases that can be applied +// automatically to new indices. +// +// Elasticsearch applies templates to new indices based on an wildcard pattern +// that matches the index name. +// Index templates are applied during data stream or index creation. +// For data streams, these settings and mappings are applied when the stream's +// backing indices are created. +// Settings and mappings specified in a create index API request override any +// settings or mappings specified in an index template. +// Changes to index templates do not affect existing indices, including the +// existing backing indices of a data stream. +// +// You can use C-style `/* *\/` block comments in index templates. +// You can include comments anywhere in the request body, except before the +// opening curly bracket. +// +// **Multiple matching templates** +// +// If multiple index templates match the name of a new index or data stream, the +// template with the highest priority is used. +// +// Multiple templates with overlapping index patterns at the same priority are +// not allowed and an error will be thrown when attempting to create a template +// matching an existing index template at identical priorities. +// +// **Composing aliases, mappings, and settings** +// +// When multiple component templates are specified in the `composed_of` field +// for an index template, they are merged in the order specified, meaning that +// later component templates override earlier component templates. +// Any mappings, settings, or aliases from the parent index template are merged +// in next. +// Finally, any configuration on the index request itself is merged. +// Mapping definitions are merged recursively, which means that later mapping +// components can introduce new field mappings and update the mapping +// configuration. +// If a field mapping is already contained in an earlier component, its +// definition will be completely overwritten by the later one. +// This recursive merging strategy applies not only to field mappings, but also +// root options like `dynamic_templates` and `meta`. +// If an earlier component contains a `dynamic_templates` block, then by default +// new `dynamic_templates` entries are appended onto the end. +// If an entry already exists with the same key, then it is overwritten by the +// new definition. +package putindextemplate + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutIndexTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutIndexTemplate type alias for index. +type NewPutIndexTemplate func(name string) *PutIndexTemplate + +// NewPutIndexTemplateFunc returns a new instance of PutIndexTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutIndexTemplateFunc(tp elastictransport.Interface) NewPutIndexTemplate { + return func(name string) *PutIndexTemplate { + n := New(tp) + + n._name(name) + + return n + } +} + +// Create or update an index template. +// Index templates define settings, mappings, and aliases that can be applied +// automatically to new indices. +// +// Elasticsearch applies templates to new indices based on an wildcard pattern +// that matches the index name. +// Index templates are applied during data stream or index creation. +// For data streams, these settings and mappings are applied when the stream's +// backing indices are created. +// Settings and mappings specified in a create index API request override any +// settings or mappings specified in an index template. +// Changes to index templates do not affect existing indices, including the +// existing backing indices of a data stream. +// +// You can use C-style `/* *\/` block comments in index templates. +// You can include comments anywhere in the request body, except before the +// opening curly bracket. +// +// **Multiple matching templates** +// +// If multiple index templates match the name of a new index or data stream, the +// template with the highest priority is used. +// +// Multiple templates with overlapping index patterns at the same priority are +// not allowed and an error will be thrown when attempting to create a template +// matching an existing index template at identical priorities. +// +// **Composing aliases, mappings, and settings** +// +// When multiple component templates are specified in the `composed_of` field +// for an index template, they are merged in the order specified, meaning that +// later component templates override earlier component templates. +// Any mappings, settings, or aliases from the parent index template are merged +// in next. +// Finally, any configuration on the index request itself is merged. +// Mapping definitions are merged recursively, which means that later mapping +// components can introduce new field mappings and update the mapping +// configuration. +// If a field mapping is already contained in an earlier component, its +// definition will be completely overwritten by the later one. +// This recursive merging strategy applies not only to field mappings, but also +// root options like `dynamic_templates` and `meta`. +// If an earlier component contains a `dynamic_templates` block, then by default +// new `dynamic_templates` entries are appended onto the end. +// If an entry already exists with the same key, then it is overwritten by the +// new definition. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template +func New(tp elastictransport.Interface) *PutIndexTemplate { + r := &PutIndexTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutIndexTemplate) Raw(raw io.Reader) *PutIndexTemplate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutIndexTemplate) Request(req *Request) *PutIndexTemplate { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutIndexTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutIndexTemplate: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_index_template") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutIndexTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.put_index_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_index_template") + if reader := instrument.RecordRequestBody(ctx, "indices.put_index_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_index_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutIndexTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putindextemplate.Response +func (r PutIndexTemplate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_index_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutIndexTemplate headers map. +func (r *PutIndexTemplate) Header(key, value string) *PutIndexTemplate { + r.headers.Set(key, value) + + return r +} + +// Name Index or template name +// API Name: name +func (r *PutIndexTemplate) _name(name string) *PutIndexTemplate { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Create If `true`, this request cannot replace or update existing index templates. +// API name: create +func (r *PutIndexTemplate) Create(create bool) *PutIndexTemplate { + r.values.Set("create", strconv.FormatBool(create)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *PutIndexTemplate) MasterTimeout(duration string) *PutIndexTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// Cause User defined reason for creating/updating the index template +// API name: cause +func (r *PutIndexTemplate) Cause(cause string) *PutIndexTemplate { + r.values.Set("cause", cause) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutIndexTemplate) ErrorTrace(errortrace bool) *PutIndexTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutIndexTemplate) FilterPath(filterpaths ...string) *PutIndexTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutIndexTemplate) Human(human bool) *PutIndexTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutIndexTemplate) Pretty(pretty bool) *PutIndexTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// This setting overrides the value of the `action.auto_create_index` cluster +// setting. +// If set to `true` in a template, then indices can be automatically created +// using that template even if auto-creation of indices is disabled via +// `actions.auto_create_index`. +// If set to `false`, then indices or data streams matching the template must +// always be explicitly created, and may never be automatically created. +// API name: allow_auto_create +func (r *PutIndexTemplate) AllowAutoCreate(allowautocreate bool) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowAutoCreate = &allowautocreate + + return r +} + +// An ordered list of component template names. +// Component templates are merged in the order specified, meaning that the last +// component template specified has the highest precedence. +// API name: composed_of +func (r *PutIndexTemplate) ComposedOf(composedofs ...string) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range composedofs { + + r.req.ComposedOf = append(r.req.ComposedOf, v) + + } + return r +} + +// If this object is included, the template is used to create data streams and +// their backing indices. +// Supports an empty object. +// Data streams require a matching index template with a `data_stream` object. +// API name: data_stream +func (r *PutIndexTemplate) DataStream(datastream types.DataStreamVisibilityVariant) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DataStream = datastream.DataStreamVisibilityCaster() + + return r +} + +// Marks this index template as deprecated. When creating or updating a +// non-deprecated index template +// that uses deprecated components, Elasticsearch will emit a deprecation +// warning. +// API name: deprecated +func (r *PutIndexTemplate) Deprecated(deprecated bool) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Deprecated = &deprecated + + return r +} + +// The configuration option ignore_missing_component_templates can be used when +// an index template +// references a component template that might not exist +// API name: ignore_missing_component_templates +func (r *PutIndexTemplate) IgnoreMissingComponentTemplates(ignoremissingcomponenttemplates ...string) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ignoremissingcomponenttemplates { + + r.req.IgnoreMissingComponentTemplates = append(r.req.IgnoreMissingComponentTemplates, v) + + } + return r +} + +// Name of the index template to create. +// API name: index_patterns +func (r *PutIndexTemplate) IndexPatterns(indices ...string) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexPatterns = indices + + return r +} + +// Optional user metadata about the index template. +// It may have any contents. +// It is not automatically generated or used by Elasticsearch. +// This user-defined object is stored in the cluster state, so keeping it short +// is preferable +// To unset the metadata, replace the template without specifying it. +// API name: _meta +func (r *PutIndexTemplate) Meta_(metadata types.MetadataVariant) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() + + return r +} + +// Priority to determine index template precedence when a new data stream or +// index is created. +// The index template with the highest priority is chosen. +// If no priority is specified the template is treated as though it is of +// priority 0 (lowest priority). +// This number is not automatically generated by Elasticsearch. +// API name: priority +func (r *PutIndexTemplate) Priority(priority int64) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Priority = &priority + + return r +} + +// Template to be applied. +// It may optionally include an `aliases`, `mappings`, or `settings` +// configuration. +// API name: template +func (r *PutIndexTemplate) Template(template types.IndexTemplateMappingVariant) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Template = template.IndexTemplateMappingCaster() + + return r +} + +// Version number used to manage index templates externally. +// This number is not automatically generated by Elasticsearch. +// External systems can use these version numbers to simplify template +// management. +// To unset a version, replace the template without specifying one. +// API name: version +func (r *PutIndexTemplate) Version(versionnumber int64) *PutIndexTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Version = &versionnumber + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putindextemplate/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putindextemplate/request.go new file mode 100644 index 000000000..3970482cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putindextemplate/request.go @@ -0,0 +1,218 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putindextemplate + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putindextemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L37-L157 +type Request struct { + + // AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster + // setting. + // If set to `true` in a template, then indices can be automatically created + // using that template even if auto-creation of indices is disabled via + // `actions.auto_create_index`. + // If set to `false`, then indices or data streams matching the template must + // always be explicitly created, and may never be automatically created. + AllowAutoCreate *bool `json:"allow_auto_create,omitempty"` + // ComposedOf An ordered list of component template names. + // Component templates are merged in the order specified, meaning that the last + // component template specified has the highest precedence. + ComposedOf []string `json:"composed_of,omitempty"` + // DataStream If this object is included, the template is used to create data streams and + // their backing indices. + // Supports an empty object. + // Data streams require a matching index template with a `data_stream` object. + DataStream *types.DataStreamVisibility `json:"data_stream,omitempty"` + // Deprecated Marks this index template as deprecated. When creating or updating a + // non-deprecated index template + // that uses deprecated components, Elasticsearch will emit a deprecation + // warning. + Deprecated *bool `json:"deprecated,omitempty"` + // IgnoreMissingComponentTemplates The configuration option ignore_missing_component_templates can be used when + // an index template + // references a component template that might not exist + IgnoreMissingComponentTemplates []string `json:"ignore_missing_component_templates,omitempty"` + // IndexPatterns Name of the index template to create. + IndexPatterns []string `json:"index_patterns,omitempty"` + // Meta_ Optional user metadata about the index template. + // It may have any contents. + // It is not automatically generated or used by Elasticsearch. + // This user-defined object is stored in the cluster state, so keeping it short + // is preferable + // To unset the metadata, replace the template without specifying it. + Meta_ types.Metadata `json:"_meta,omitempty"` + // Priority Priority to determine index template precedence when a new data stream or + // index is created. + // The index template with the highest priority is chosen. + // If no priority is specified the template is treated as though it is of + // priority 0 (lowest priority). + // This number is not automatically generated by Elasticsearch. + Priority *int64 `json:"priority,omitempty"` + // Template Template to be applied. + // It may optionally include an `aliases`, `mappings`, or `settings` + // configuration. + Template *types.IndexTemplateMapping `json:"template,omitempty"` + // Version Version number used to manage index templates externally. + // This number is not automatically generated by Elasticsearch. + // External systems can use these version numbers to simplify template + // management. + // To unset a version, replace the template without specifying one. + Version *int64 `json:"version,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putindextemplate request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_auto_create": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowAutoCreate", err) + } + s.AllowAutoCreate = &value + case bool: + s.AllowAutoCreate = &v + } + + case "composed_of": + if err := dec.Decode(&s.ComposedOf); err != nil { + return fmt.Errorf("%s | %w", "ComposedOf", err) + } + + case "data_stream": + if err := dec.Decode(&s.DataStream); err != nil { + return fmt.Errorf("%s | %w", "DataStream", err) + } + + case "deprecated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deprecated", err) + } + s.Deprecated = &value + case bool: + s.Deprecated = &v + } + + case "ignore_missing_component_templates": + if err := dec.Decode(&s.IgnoreMissingComponentTemplates); err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissingComponentTemplates", err) + } + + case "index_patterns": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "IndexPatterns", err) + } + + s.IndexPatterns = append(s.IndexPatterns, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.IndexPatterns); err != nil { + return fmt.Errorf("%s | %w", "IndexPatterns", err) + } + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "priority": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + s.Priority = &value + case float64: + f := int64(v) + s.Priority = &f + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return fmt.Errorf("%s | %w", "Template", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putindextemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putindextemplate/response.go new file mode 100644 index 000000000..a5d290ec3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putindextemplate/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putindextemplate + +// Response holds the response body struct for the package putindextemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_index_template/IndicesPutIndexTemplateResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putmapping/put_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putmapping/put_mapping.go new file mode 100644 index 000000000..b50128ff9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putmapping/put_mapping.go @@ -0,0 +1,620 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update field mappings. +// Add new fields to an existing data stream or index. +// You can use the update mapping API to: +// +// - Add a new field to an existing index +// - Update mappings for multiple indices in a single request +// - Add new properties to an object field +// - Enable multi-fields for an existing field +// - Update supported mapping parameters +// - Change a field's mapping using reindexing +// - Rename a field using a field alias +// +// Learn how to use the update mapping API with practical examples in the +// [Update mapping API +// examples](https://www.elastic.co/docs//manage-data/data-store/mapping/update-mappings-examples) +// guide. +package putmapping + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutMapping struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutMapping type alias for index. +type NewPutMapping func(index string) *PutMapping + +// NewPutMappingFunc returns a new instance of PutMapping with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutMappingFunc(tp elastictransport.Interface) NewPutMapping { + return func(index string) *PutMapping { + n := New(tp) + + n._index(index) + + return n + } +} + +// Update field mappings. +// Add new fields to an existing data stream or index. +// You can use the update mapping API to: +// +// - Add a new field to an existing index +// - Update mappings for multiple indices in a single request +// - Add new properties to an object field +// - Enable multi-fields for an existing field +// - Update supported mapping parameters +// - Change a field's mapping using reindexing +// - Rename a field using a field alias +// +// Learn how to use the update mapping API with practical examples in the +// [Update mapping API +// examples](https://www.elastic.co/docs//manage-data/data-store/mapping/update-mappings-examples) +// guide. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping +func New(tp elastictransport.Interface) *PutMapping { + r := &PutMapping{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutMapping) Raw(raw io.Reader) *PutMapping { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutMapping) Request(req *Request) *PutMapping { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutMapping) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutMapping: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_mapping") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutMapping) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.put_mapping") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_mapping") + if reader := instrument.RecordRequestBody(ctx, "indices.put_mapping", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_mapping") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutMapping query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putmapping.Response +func (r PutMapping) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutMapping headers map. +func (r *PutMapping) Header(key, value string) *PutMapping { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +// API Name: index +func (r *PutMapping) _index(index string) *PutMapping { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *PutMapping) AllowNoIndices(allownoindices bool) *PutMapping { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *PutMapping) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *PutMapping { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *PutMapping) IgnoreUnavailable(ignoreunavailable bool) *PutMapping { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *PutMapping) MasterTimeout(duration string) *PutMapping { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *PutMapping) Timeout(duration string) *PutMapping { + r.values.Set("timeout", duration) + + return r +} + +// WriteIndexOnly If `true`, the mappings are applied only to the current write index for the +// target. +// API name: write_index_only +func (r *PutMapping) WriteIndexOnly(writeindexonly bool) *PutMapping { + r.values.Set("write_index_only", strconv.FormatBool(writeindexonly)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutMapping) ErrorTrace(errortrace bool) *PutMapping { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutMapping) FilterPath(filterpaths ...string) *PutMapping { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutMapping) Human(human bool) *PutMapping { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutMapping) Pretty(pretty bool) *PutMapping { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Controls whether dynamic date detection is enabled. +// API name: date_detection +func (r *PutMapping) DateDetection(datedetection bool) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DateDetection = &datedetection + + return r +} + +// Controls whether new fields are added dynamically. +// API name: dynamic +func (r *PutMapping) Dynamic(dynamic dynamicmapping.DynamicMapping) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Dynamic = &dynamic + return r +} + +// If date detection is enabled then new string fields are checked +// against 'dynamic_date_formats' and if the value matches then +// a new date field is added instead of string. +// API name: dynamic_date_formats +func (r *PutMapping) DynamicDateFormats(dynamicdateformats ...string) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range dynamicdateformats { + + r.req.DynamicDateFormats = append(r.req.DynamicDateFormats, v) + + } + return r +} + +// Specify dynamic templates for the mapping. +// API name: dynamic_templates +func (r *PutMapping) DynamicTemplates(dynamictemplates []map[string]types.DynamicTemplate) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DynamicTemplates = dynamictemplates + + return r +} + +// Control whether field names are enabled for the index. +// API name: _field_names +func (r *PutMapping) FieldNames_(fieldnames_ types.FieldNamesFieldVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FieldNames_ = fieldnames_.FieldNamesFieldCaster() + + return r +} + +// A mapping type can have custom meta data associated with it. These are +// not used at all by Elasticsearch, but can be used to store +// application-specific metadata. +// API name: _meta +func (r *PutMapping) Meta_(metadata types.MetadataVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() + + return r +} + +// Automatically map strings into numeric data types for all fields. +// API name: numeric_detection +func (r *PutMapping) NumericDetection(numericdetection bool) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.NumericDetection = &numericdetection + + return r +} + +// Mapping for a field. For new fields, this mapping can include: +// +// - Field name +// - Field data type +// - Mapping parameters +// API name: properties +func (r *PutMapping) Properties(properties map[string]types.Property) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Properties = properties + return r +} + +func (r *PutMapping) AddProperty(key string, value types.PropertyVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Property + if r.req.Properties == nil { + r.req.Properties = make(map[string]types.Property) + } else { + tmp = r.req.Properties + } + + tmp[key] = *value.PropertyCaster() + + r.req.Properties = tmp + return r +} + +// Enable making a routing value required on indexed documents. +// API name: _routing +func (r *PutMapping) Routing_(routing_ types.RoutingFieldVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Routing_ = routing_.RoutingFieldCaster() + + return r +} + +// Mapping of runtime fields for the index. +// API name: runtime +func (r *PutMapping) Runtime(runtimefields types.RuntimeFieldsVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Runtime = *runtimefields.RuntimeFieldsCaster() + + return r +} + +// Control whether the _source field is enabled on the index. +// API name: _source +func (r *PutMapping) Source_(source_ types.SourceFieldVariant) *PutMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source_ = source_.SourceFieldCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putmapping/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putmapping/request.go new file mode 100644 index 000000000..bd8d329fd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putmapping/request.go @@ -0,0 +1,515 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putmapping + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" +) + +// Request holds the request body struct for the package putmapping +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_mapping/IndicesPutMappingRequest.ts#L41-L164 +type Request struct { + + // DateDetection Controls whether dynamic date detection is enabled. + DateDetection *bool `json:"date_detection,omitempty"` + // Dynamic Controls whether new fields are added dynamically. + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + // DynamicDateFormats If date detection is enabled then new string fields are checked + // against 'dynamic_date_formats' and if the value matches then + // a new date field is added instead of string. + DynamicDateFormats []string `json:"dynamic_date_formats,omitempty"` + // DynamicTemplates Specify dynamic templates for the mapping. + DynamicTemplates []map[string]types.DynamicTemplate `json:"dynamic_templates,omitempty"` + // FieldNames_ Control whether field names are enabled for the index. + FieldNames_ *types.FieldNamesField `json:"_field_names,omitempty"` + // Meta_ A mapping type can have custom meta data associated with it. These are + // not used at all by Elasticsearch, but can be used to store + // application-specific metadata. + Meta_ types.Metadata `json:"_meta,omitempty"` + // NumericDetection Automatically map strings into numeric data types for all fields. + NumericDetection *bool `json:"numeric_detection,omitempty"` + // Properties Mapping for a field. For new fields, this mapping can include: + // + // - Field name + // - Field data type + // - Mapping parameters + Properties map[string]types.Property `json:"properties,omitempty"` + // Routing_ Enable making a routing value required on indexed documents. + Routing_ *types.RoutingField `json:"_routing,omitempty"` + // Runtime Mapping of runtime fields for the index. + Runtime types.RuntimeFields `json:"runtime,omitempty"` + // Source_ Control whether the _source field is enabled on the index. + Source_ *types.SourceField `json:"_source,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Properties: make(map[string]types.Property, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putmapping request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "date_detection": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DateDetection", err) + } + s.DateDetection = &value + case bool: + s.DateDetection = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "dynamic_date_formats": + if err := dec.Decode(&s.DynamicDateFormats); err != nil { + return fmt.Errorf("%s | %w", "DynamicDateFormats", err) + } + + case "dynamic_templates": + if err := dec.Decode(&s.DynamicTemplates); err != nil { + return fmt.Errorf("%s | %w", "DynamicTemplates", err) + } + + case "_field_names": + if err := dec.Decode(&s.FieldNames_); err != nil { + return fmt.Errorf("%s | %w", "FieldNames_", err) + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "numeric_detection": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumericDetection", err) + } + s.NumericDetection = &value + case bool: + s.NumericDetection = &v + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]types.Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := types.NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := types.NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := types.NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := types.NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := types.NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := types.NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := types.NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := types.NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := types.NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := types.NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := types.NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := types.NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := types.NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := types.NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := types.NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := types.NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := types.NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := types.NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := types.NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := types.NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := types.NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := types.NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := types.NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := types.NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := types.NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := types.NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := types.NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := types.NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := types.NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := types.NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := types.NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := types.NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := types.NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := types.NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := types.NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := types.NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := types.NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := types.NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := types.NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := types.NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := types.NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := types.NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := types.NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := types.NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := types.NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := types.NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := types.NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := types.NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := types.NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := types.NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := types.NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := types.NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(types.Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(types.Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "_routing": + if err := dec.Decode(&s.Routing_); err != nil { + return fmt.Errorf("%s | %w", "Routing_", err) + } + + case "runtime": + if err := dec.Decode(&s.Runtime); err != nil { + return fmt.Errorf("%s | %w", "Runtime", err) + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putmapping/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putmapping/response.go new file mode 100644 index 000000000..3db956b61 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putmapping/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putmapping + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package putmapping +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_mapping/IndicesPutMappingResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` + Shards_ *types.ShardStatistics `json:"_shards,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putsettings/put_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putsettings/put_settings.go new file mode 100644 index 000000000..f891a68a7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putsettings/put_settings.go @@ -0,0 +1,1327 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update index settings. +// Changes dynamic index settings in real time. +// For data streams, index setting changes are applied to all backing indices by +// default. +// +// To revert a setting to the default value, use a null value. +// The list of per-index settings that can be updated dynamically on live +// indices can be found in index settings documentation. +// To preserve existing settings from being updated, set the `preserve_existing` +// parameter to `true`. +// +// For performance optimization during bulk indexing, you can disable the +// refresh interval. +// Refer to [disable refresh +// interval](https://www.elastic.co/docs/deploy-manage/production-guidance/optimize-performance/indexing-speed#disable-refresh-interval) +// for an example. +// There are multiple valid ways to represent index settings in the request +// body. You can specify only the setting, for example: +// +// ``` +// +// { +// "number_of_replicas": 1 +// } +// +// ``` +// +// Or you can use an `index` setting object: +// ``` +// +// { +// "index": { +// "number_of_replicas": 1 +// } +// } +// +// ``` +// +// Or you can use dot annotation: +// ``` +// +// { +// "index.number_of_replicas": 1 +// } +// +// ``` +// +// Or you can embed any of the aforementioned options in a `settings` object. +// For example: +// +// ``` +// +// { +// "settings": { +// "index": { +// "number_of_replicas": 1 +// } +// } +// } +// +// ``` +// +// NOTE: You can only define new analyzers on closed indices. +// To add an analyzer, you must close the index, define the analyzer, and reopen +// the index. +// You cannot close the write index of a data stream. +// To update the analyzer for a data stream's write index and future backing +// indices, update the analyzer in the index template used by the stream. +// Then roll over the data stream to apply the new analyzer to the stream's +// write index and future backing indices. +// This affects searches and any new data added to the stream after the +// rollover. +// However, it does not affect the data stream's backing indices or their +// existing data. +// To change the analyzer for existing backing indices, you must create a new +// data stream and reindex your data into it. +// Refer to [updating analyzers on existing +// indices](https://www.elastic.co/docs/manage-data/data-store/text-analysis/specify-an-analyzer#update-analyzers-on-existing-indices) +// for step-by-step examples. +package putsettings + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexcheckonstartup" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutSettings struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutSettings type alias for index. +type NewPutSettings func() *PutSettings + +// NewPutSettingsFunc returns a new instance of PutSettings with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutSettingsFunc(tp elastictransport.Interface) NewPutSettings { + return func() *PutSettings { + n := New(tp) + + return n + } +} + +// Update index settings. +// Changes dynamic index settings in real time. +// For data streams, index setting changes are applied to all backing indices by +// default. +// +// To revert a setting to the default value, use a null value. +// The list of per-index settings that can be updated dynamically on live +// indices can be found in index settings documentation. +// To preserve existing settings from being updated, set the `preserve_existing` +// parameter to `true`. +// +// For performance optimization during bulk indexing, you can disable the +// refresh interval. +// Refer to [disable refresh +// interval](https://www.elastic.co/docs/deploy-manage/production-guidance/optimize-performance/indexing-speed#disable-refresh-interval) +// for an example. +// There are multiple valid ways to represent index settings in the request +// body. You can specify only the setting, for example: +// +// ``` +// +// { +// "number_of_replicas": 1 +// } +// +// ``` +// +// Or you can use an `index` setting object: +// ``` +// +// { +// "index": { +// "number_of_replicas": 1 +// } +// } +// +// ``` +// +// Or you can use dot annotation: +// ``` +// +// { +// "index.number_of_replicas": 1 +// } +// +// ``` +// +// Or you can embed any of the aforementioned options in a `settings` object. +// For example: +// +// ``` +// +// { +// "settings": { +// "index": { +// "number_of_replicas": 1 +// } +// } +// } +// +// ``` +// +// NOTE: You can only define new analyzers on closed indices. +// To add an analyzer, you must close the index, define the analyzer, and reopen +// the index. +// You cannot close the write index of a data stream. +// To update the analyzer for a data stream's write index and future backing +// indices, update the analyzer in the index template used by the stream. +// Then roll over the data stream to apply the new analyzer to the stream's +// write index and future backing indices. +// This affects searches and any new data added to the stream after the +// rollover. +// However, it does not affect the data stream's backing indices or their +// existing data. +// To change the analyzer for existing backing indices, you must create a new +// data stream and reindex your data into it. +// Refer to [updating analyzers on existing +// indices](https://www.elastic.co/docs/manage-data/data-store/text-analysis/specify-an-analyzer#update-analyzers-on-existing-indices) +// for step-by-step examples. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings +func New(tp elastictransport.Interface) *PutSettings { + r := &PutSettings{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutSettings) Raw(raw io.Reader) *PutSettings { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutSettings) Request(req *Request) *PutSettings { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutSettings) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutSettings: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_settings") + + method = http.MethodPut + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_settings") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutSettings) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.put_settings") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_settings") + if reader := instrument.RecordRequestBody(ctx, "indices.put_settings", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_settings") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutSettings query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putsettings.Response +func (r PutSettings) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutSettings headers map. +func (r *PutSettings) Header(key, value string) *PutSettings { + r.headers.Set(key, value) + + return r +} + +// Indices Comma-separated list of data streams, indices, and aliases used to limit +// the request. Supports wildcards (`*`). To target all data streams and +// indices, omit this parameter or use `*` or `_all`. +// API Name: index +func (r *PutSettings) Indices(index string) *PutSettings { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. This +// behavior applies even if the request targets other open indices. For +// example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. +// API name: allow_no_indices +func (r *PutSettings) AllowNoIndices(allownoindices bool) *PutSettings { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. If the request can target +// data streams, this argument determines whether wildcard expressions match +// hidden data streams. Supports comma-separated values, such as +// `open,hidden`. +// API name: expand_wildcards +func (r *PutSettings) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *PutSettings { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// FlatSettings If `true`, returns settings in flat format. +// API name: flat_settings +func (r *PutSettings) FlatSettings(flatsettings bool) *PutSettings { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) + + return r +} + +// IgnoreUnavailable If `true`, returns settings in flat format. +// API name: ignore_unavailable +func (r *PutSettings) IgnoreUnavailable(ignoreunavailable bool) *PutSettings { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an +// error. +// API name: master_timeout +func (r *PutSettings) MasterTimeout(duration string) *PutSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// PreserveExisting If `true`, existing index settings remain unchanged. +// API name: preserve_existing +func (r *PutSettings) PreserveExisting(preserveexisting bool) *PutSettings { + r.values.Set("preserve_existing", strconv.FormatBool(preserveexisting)) + + return r +} + +// Reopen Whether to close and reopen the index to apply non-dynamic settings. +// If set to `true` the indices to which the settings are being applied +// will be closed temporarily and then reopened in order to apply the changes. +// API name: reopen +func (r *PutSettings) Reopen(reopen bool) *PutSettings { + r.values.Set("reopen", strconv.FormatBool(reopen)) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the +// +// timeout expires, the request fails and returns an error. +// +// API name: timeout +func (r *PutSettings) Timeout(duration string) *PutSettings { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutSettings) ErrorTrace(errortrace bool) *PutSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutSettings) FilterPath(filterpaths ...string) *PutSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutSettings) Human(human bool) *PutSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutSettings) Pretty(pretty bool) *PutSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: analysis +func (r *PutSettings) Analysis(analysis types.IndexSettingsAnalysisVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Analysis = analysis.IndexSettingsAnalysisCaster() + + return r +} + +// Settings to define analyzers, tokenizers, token filters and character +// filters. +// Refer to the linked documentation for step-by-step examples of updating +// analyzers on existing indices. +// API name: analyze +func (r *PutSettings) Analyze(analyze types.SettingsAnalyzeVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Analyze = analyze.SettingsAnalyzeCaster() + + return r +} + +// API name: auto_expand_replicas +func (r *PutSettings) AutoExpandReplicas(autoexpandreplicas any) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AutoExpandReplicas = autoexpandreplicas + + return r +} + +// API name: blocks +func (r *PutSettings) Blocks(blocks types.IndexSettingBlocksVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Blocks = blocks.IndexSettingBlocksCaster() + + return r +} + +// API name: check_on_startup +func (r *PutSettings) CheckOnStartup(checkonstartup indexcheckonstartup.IndexCheckOnStartup) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.CheckOnStartup = &checkonstartup + return r +} + +// API name: codec +func (r *PutSettings) Codec(codec string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Codec = &codec + + return r +} + +// API name: creation_date +func (r *PutSettings) CreationDate(stringifiedepochtimeunitmillis types.StringifiedEpochTimeUnitMillisVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CreationDate = *stringifiedepochtimeunitmillis.StringifiedEpochTimeUnitMillisCaster() + + return r +} + +// API name: creation_date_string +func (r *PutSettings) CreationDateString(datetime types.DateTimeVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CreationDateString = *datetime.DateTimeCaster() + + return r +} + +// API name: default_pipeline +func (r *PutSettings) DefaultPipeline(pipelinename string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DefaultPipeline = &pipelinename + + return r +} + +// API name: final_pipeline +func (r *PutSettings) FinalPipeline(pipelinename string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FinalPipeline = &pipelinename + + return r +} + +// API name: format +func (r *PutSettings) Format(format string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Format = &format + + return r +} + +// API name: gc_deletes +func (r *PutSettings) GcDeletes(duration types.DurationVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.GcDeletes = *duration.DurationCaster() + + return r +} + +// API name: hidden +func (r *PutSettings) Hidden(hidden string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Hidden = &hidden + + return r +} + +// API name: highlight +func (r *PutSettings) Highlight(highlight types.SettingsHighlightVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Highlight = highlight.SettingsHighlightCaster() + + return r +} + +// API name: index +func (r *PutSettings) Index(index types.IndexSettingsVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Index = index.IndexSettingsCaster() + + return r +} + +// API name: IndexSettings +func (r *PutSettings) IndexSettings(indexsettings map[string]json.RawMessage) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexSettings = indexsettings + return r +} + +func (r *PutSettings) AddIndexSetting(key string, value json.RawMessage) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.IndexSettings == nil { + r.req.IndexSettings = make(map[string]json.RawMessage) + } else { + tmp = r.req.IndexSettings + } + + tmp[key] = value + + r.req.IndexSettings = tmp + return r +} + +// Configure indexing back pressure limits. +// API name: indexing_pressure +func (r *PutSettings) IndexingPressure(indexingpressure types.IndicesIndexingPressureVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexingPressure = indexingpressure.IndicesIndexingPressureCaster() + + return r +} + +// API name: indexing.slowlog +func (r *PutSettings) IndexingSlowlog(indexingslowlog types.IndexingSlowlogSettingsVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexingSlowlog = indexingslowlog.IndexingSlowlogSettingsCaster() + + return r +} + +// API name: lifecycle +func (r *PutSettings) Lifecycle(lifecycle types.IndexSettingsLifecycleVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Lifecycle = lifecycle.IndexSettingsLifecycleCaster() + + return r +} + +// API name: load_fixed_bitset_filters_eagerly +func (r *PutSettings) LoadFixedBitsetFiltersEagerly(loadfixedbitsetfilterseagerly bool) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LoadFixedBitsetFiltersEagerly = &loadfixedbitsetfilterseagerly + + return r +} + +// Enable or disable dynamic mapping for an index. +// API name: mapping +func (r *PutSettings) Mapping(mapping types.MappingLimitSettingsVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Mapping = mapping.MappingLimitSettingsCaster() + + return r +} + +// API name: max_docvalue_fields_search +func (r *PutSettings) MaxDocvalueFieldsSearch(maxdocvaluefieldssearch int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxDocvalueFieldsSearch = &maxdocvaluefieldssearch + + return r +} + +// API name: max_inner_result_window +func (r *PutSettings) MaxInnerResultWindow(maxinnerresultwindow int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxInnerResultWindow = &maxinnerresultwindow + + return r +} + +// API name: max_ngram_diff +func (r *PutSettings) MaxNgramDiff(maxngramdiff int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxNgramDiff = &maxngramdiff + + return r +} + +// API name: max_refresh_listeners +func (r *PutSettings) MaxRefreshListeners(maxrefreshlisteners int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRefreshListeners = &maxrefreshlisteners + + return r +} + +// API name: max_regex_length +func (r *PutSettings) MaxRegexLength(maxregexlength int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRegexLength = &maxregexlength + + return r +} + +// API name: max_rescore_window +func (r *PutSettings) MaxRescoreWindow(maxrescorewindow int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxRescoreWindow = &maxrescorewindow + + return r +} + +// API name: max_result_window +func (r *PutSettings) MaxResultWindow(maxresultwindow int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxResultWindow = &maxresultwindow + + return r +} + +// API name: max_script_fields +func (r *PutSettings) MaxScriptFields(maxscriptfields int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxScriptFields = &maxscriptfields + + return r +} + +// API name: max_shingle_diff +func (r *PutSettings) MaxShingleDiff(maxshinglediff int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxShingleDiff = &maxshinglediff + + return r +} + +// API name: max_slices_per_scroll +func (r *PutSettings) MaxSlicesPerScroll(maxslicesperscroll int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxSlicesPerScroll = &maxslicesperscroll + + return r +} + +// API name: max_terms_count +func (r *PutSettings) MaxTermsCount(maxtermscount int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxTermsCount = &maxtermscount + + return r +} + +// API name: merge +func (r *PutSettings) Merge(merge types.MergeVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Merge = merge.MergeCaster() + + return r +} + +// API name: mode +func (r *PutSettings) Mode(mode string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Mode = &mode + + return r +} + +// API name: number_of_replicas +func (r *PutSettings) NumberOfReplicas(numberofreplicas string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.NumberOfReplicas = &numberofreplicas + + return r +} + +// API name: number_of_routing_shards +func (r *PutSettings) NumberOfRoutingShards(numberofroutingshards int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.NumberOfRoutingShards = &numberofroutingshards + + return r +} + +// API name: number_of_shards +func (r *PutSettings) NumberOfShards(numberofshards string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.NumberOfShards = &numberofshards + + return r +} + +// API name: priority +func (r *PutSettings) Priority(priority string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Priority = &priority + + return r +} + +// API name: provided_name +func (r *PutSettings) ProvidedName(name string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ProvidedName = &name + + return r +} + +// API name: queries +func (r *PutSettings) Queries(queries types.QueriesVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Queries = queries.QueriesCaster() + + return r +} + +// API name: query_string +func (r *PutSettings) QueryString(querystring types.SettingsQueryStringVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.QueryString = querystring.SettingsQueryStringCaster() + + return r +} + +// API name: refresh_interval +func (r *PutSettings) RefreshInterval(duration types.DurationVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RefreshInterval = *duration.DurationCaster() + + return r +} + +// API name: routing +func (r *PutSettings) Routing(routing types.IndexRoutingVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Routing = routing.IndexRoutingCaster() + + return r +} + +// API name: routing_partition_size +func (r *PutSettings) RoutingPartitionSize(stringifiedinteger types.StringifiedintegerVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RoutingPartitionSize = *stringifiedinteger.StringifiedintegerCaster() + + return r +} + +// API name: routing_path +func (r *PutSettings) RoutingPath(routingpaths ...string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RoutingPath = make([]string, len(routingpaths)) + r.req.RoutingPath = routingpaths + + return r +} + +// API name: search +func (r *PutSettings) Search(search types.SettingsSearchVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Search = search.SettingsSearchCaster() + + return r +} + +// API name: settings +func (r *PutSettings) Settings(settings types.IndexSettingsVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Settings = settings.IndexSettingsCaster() + + return r +} + +// Configure custom similarity settings to customize how search results are +// scored. +// API name: similarity +func (r *PutSettings) Similarity(similarity map[string]types.SettingsSimilarity) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Similarity = similarity + return r +} + +func (r *PutSettings) AddSimilarity(key string, value types.SettingsSimilarityVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.SettingsSimilarity + if r.req.Similarity == nil { + r.req.Similarity = make(map[string]types.SettingsSimilarity) + } else { + tmp = r.req.Similarity + } + + tmp[key] = *value.SettingsSimilarityCaster() + + r.req.Similarity = tmp + return r +} + +// API name: soft_deletes +func (r *PutSettings) SoftDeletes(softdeletes types.SoftDeletesVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SoftDeletes = softdeletes.SoftDeletesCaster() + + return r +} + +// API name: sort +func (r *PutSettings) Sort(sort types.IndexSegmentSortVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Sort = sort.IndexSegmentSortCaster() + + return r +} + +// The store module allows you to control how index data is stored and accessed +// on disk. +// API name: store +func (r *PutSettings) Store(store types.StorageVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Store = store.StorageCaster() + + return r +} + +// API name: time_series +func (r *PutSettings) TimeSeries(timeseries types.IndexSettingsTimeSeriesVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TimeSeries = timeseries.IndexSettingsTimeSeriesCaster() + + return r +} + +// API name: top_metrics_max_size +func (r *PutSettings) TopMetricsMaxSize(topmetricsmaxsize int) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TopMetricsMaxSize = &topmetricsmaxsize + + return r +} + +// API name: translog +func (r *PutSettings) Translog(translog types.TranslogVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Translog = translog.TranslogCaster() + + return r +} + +// API name: uuid +func (r *PutSettings) Uuid(uuid string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Uuid = &uuid + + return r +} + +// API name: verified_before_close +func (r *PutSettings) VerifiedBeforeClose(verifiedbeforeclose string) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.VerifiedBeforeClose = &verifiedbeforeclose + + return r +} + +// API name: version +func (r *PutSettings) Version(version types.IndexVersioningVariant) *PutSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Version = version.IndexVersioningCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putsettings/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putsettings/request.go new file mode 100644 index 000000000..7a04a6eac --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putsettings/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putsettings + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_settings/IndicesPutSettingsRequest.ts#L25-L165 +type Request = types.IndexSettings + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewIndexSettings() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putsettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putsettings/response.go new file mode 100644 index 000000000..05c1dc478 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/putsettings/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putsettings + +// Response holds the response body struct for the package putsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_settings/IndicesPutSettingsResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/puttemplate/put_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/puttemplate/put_template.go new file mode 100644 index 000000000..36de0cf09 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/puttemplate/put_template.go @@ -0,0 +1,545 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a legacy index template. +// Index templates define settings, mappings, and aliases that can be applied +// automatically to new indices. +// Elasticsearch applies templates to new indices based on an index pattern that +// matches the index name. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +// +// Composable templates always take precedence over legacy templates. +// If no composable template matches a new index, matching legacy templates are +// applied according to their order. +// +// Index templates are only applied during index creation. +// Changes to index templates do not affect existing indices. +// Settings and mappings specified in create index API requests override any +// settings or mappings specified in an index template. +// +// You can use C-style `/* *\/` block comments in index templates. +// You can include comments anywhere in the request body, except before the +// opening curly bracket. +// +// **Indices matching multiple templates** +// +// Multiple index templates can potentially match an index, in this case, both +// the settings and mappings are merged into the final configuration of the +// index. +// The order of the merging can be controlled using the order parameter, with +// lower order being applied first, and higher orders overriding them. +// NOTE: Multiple matching templates with the same order value will result in a +// non-deterministic merging order. +package puttemplate + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutTemplate type alias for index. +type NewPutTemplate func(name string) *PutTemplate + +// NewPutTemplateFunc returns a new instance of PutTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutTemplateFunc(tp elastictransport.Interface) NewPutTemplate { + return func(name string) *PutTemplate { + n := New(tp) + + n._name(name) + + return n + } +} + +// Create or update a legacy index template. +// Index templates define settings, mappings, and aliases that can be applied +// automatically to new indices. +// Elasticsearch applies templates to new indices based on an index pattern that +// matches the index name. +// +// IMPORTANT: This documentation is about legacy index templates, which are +// deprecated and will be replaced by the composable templates introduced in +// Elasticsearch 7.8. +// +// Composable templates always take precedence over legacy templates. +// If no composable template matches a new index, matching legacy templates are +// applied according to their order. +// +// Index templates are only applied during index creation. +// Changes to index templates do not affect existing indices. +// Settings and mappings specified in create index API requests override any +// settings or mappings specified in an index template. +// +// You can use C-style `/* *\/` block comments in index templates. +// You can include comments anywhere in the request body, except before the +// opening curly bracket. +// +// **Indices matching multiple templates** +// +// Multiple index templates can potentially match an index, in this case, both +// the settings and mappings are merged into the final configuration of the +// index. +// The order of the merging can be controlled using the order parameter, with +// lower order being applied first, and higher orders overriding them. +// NOTE: Multiple matching templates with the same order value will result in a +// non-deterministic merging order. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template +func New(tp elastictransport.Interface) *PutTemplate { + r := &PutTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutTemplate) Raw(raw io.Reader) *PutTemplate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutTemplate) Request(req *Request) *PutTemplate { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutTemplate: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_template") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.put_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.put_template") + if reader := instrument.RecordRequestBody(ctx, "indices.put_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.put_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a puttemplate.Response +func (r PutTemplate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.put_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutTemplate headers map. +func (r *PutTemplate) Header(key, value string) *PutTemplate { + r.headers.Set(key, value) + + return r +} + +// Name The name of the template +// API Name: name +func (r *PutTemplate) _name(name string) *PutTemplate { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Create If true, this request cannot replace or update existing index templates. +// API name: create +func (r *PutTemplate) Create(create bool) *PutTemplate { + r.values.Set("create", strconv.FormatBool(create)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *PutTemplate) MasterTimeout(duration string) *PutTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// Cause User defined reason for creating/updating the index template +// API name: cause +func (r *PutTemplate) Cause(cause string) *PutTemplate { + r.values.Set("cause", cause) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutTemplate) ErrorTrace(errortrace bool) *PutTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutTemplate) FilterPath(filterpaths ...string) *PutTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutTemplate) Human(human bool) *PutTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutTemplate) Pretty(pretty bool) *PutTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Aliases for the index. +// API name: aliases +func (r *PutTemplate) Aliases(aliases map[string]types.Alias) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Aliases = aliases + return r +} + +func (r *PutTemplate) AddAlias(key string, value types.AliasVariant) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + + r.req.Aliases = tmp + return r +} + +// Array of wildcard expressions used to match the names +// of indices during creation. +// API name: index_patterns +func (r *PutTemplate) IndexPatterns(indexpatterns ...string) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexPatterns = make([]string, len(indexpatterns)) + r.req.IndexPatterns = indexpatterns + + return r +} + +// Mapping for fields in the index. +// API name: mappings +func (r *PutTemplate) Mappings(mappings types.TypeMappingVariant) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Mappings = mappings.TypeMappingCaster() + + return r +} + +// Order in which Elasticsearch applies this template if index +// matches multiple templates. +// +// Templates with lower 'order' values are merged first. Templates with higher +// 'order' values are merged later, overriding templates with lower values. +// API name: order +func (r *PutTemplate) Order(order int) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Order = &order + + return r +} + +// Configuration options for the index. +// API name: settings +func (r *PutTemplate) Settings(settings types.IndexSettingsVariant) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Settings = settings.IndexSettingsCaster() + + return r +} + +// Version number used to manage index templates externally. This number +// is not automatically generated by Elasticsearch. +// To unset a version, replace the template without specifying one. +// API name: version +func (r *PutTemplate) Version(versionnumber int64) *PutTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Version = &versionnumber + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/puttemplate/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/puttemplate/request.go new file mode 100644 index 000000000..d476017f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/puttemplate/request.go @@ -0,0 +1,153 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package puttemplate + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package puttemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_template/IndicesPutTemplateRequest.ts#L29-L124 +type Request struct { + + // Aliases Aliases for the index. + Aliases map[string]types.Alias `json:"aliases,omitempty"` + // IndexPatterns Array of wildcard expressions used to match the names + // of indices during creation. + IndexPatterns []string `json:"index_patterns,omitempty"` + // Mappings Mapping for fields in the index. + Mappings *types.TypeMapping `json:"mappings,omitempty"` + // Order Order in which Elasticsearch applies this template if index + // matches multiple templates. + // + // Templates with lower 'order' values are merged first. Templates with higher + // 'order' values are merged later, overriding templates with lower values. + Order *int `json:"order,omitempty"` + // Settings Configuration options for the index. + Settings *types.IndexSettings `json:"settings,omitempty"` + // Version Version number used to manage index templates externally. This number + // is not automatically generated by Elasticsearch. + // To unset a version, replace the template without specifying one. + Version *int64 `json:"version,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Aliases: make(map[string]types.Alias, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Puttemplate request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aliases": + if s.Aliases == nil { + s.Aliases = make(map[string]types.Alias, 0) + } + if err := dec.Decode(&s.Aliases); err != nil { + return fmt.Errorf("%s | %w", "Aliases", err) + } + + case "index_patterns": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "IndexPatterns", err) + } + + s.IndexPatterns = append(s.IndexPatterns, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.IndexPatterns); err != nil { + return fmt.Errorf("%s | %w", "IndexPatterns", err) + } + } + + case "mappings": + if err := dec.Decode(&s.Mappings); err != nil { + return fmt.Errorf("%s | %w", "Mappings", err) + } + + case "order": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + s.Order = &value + case float64: + f := int(v) + s.Order = &f + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/puttemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/puttemplate/response.go new file mode 100644 index 000000000..a57613007 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/puttemplate/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package puttemplate + +// Response holds the response body struct for the package puttemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_template/IndicesPutTemplateResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/recovery/recovery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/recovery/recovery.go new file mode 100644 index 000000000..b27b11252 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/recovery/recovery.go @@ -0,0 +1,470 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get index recovery information. +// Get information about ongoing and completed shard recoveries for one or more +// indices. +// For data streams, the API returns information for the stream's backing +// indices. +// +// All recoveries, whether ongoing or complete, are kept in the cluster state +// and may be reported on at any time. +// +// Shard recovery is the process of initializing a shard copy, such as restoring +// a primary shard from a snapshot or creating a replica shard from a primary +// shard. +// When a shard recovery completes, the recovered shard is available for search +// and indexing. +// +// Recovery automatically occurs during the following processes: +// +// * When creating an index for the first time. +// * When a node rejoins the cluster and starts up any missing primary shard +// copies using the data that it holds in its data path. +// * Creation of new replica shard copies from the primary. +// * Relocation of a shard copy to a different node in the same cluster. +// * A snapshot restore operation. +// * A clone, shrink, or split operation. +// +// You can determine the cause of a shard recovery using the recovery or cat +// recovery APIs. +// +// The index recovery API reports information about completed recoveries only +// for shard copies that currently exist in the cluster. +// It only reports the last recovery for each shard copy and does not report +// historical information about earlier recoveries, nor does it report +// information about the recoveries of shard copies that no longer exist. +// This means that if a shard copy completes a recovery and then Elasticsearch +// relocates it onto a different node then the information about the original +// recovery will not be shown in the recovery API. +package recovery + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Recovery struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRecovery type alias for index. +type NewRecovery func() *Recovery + +// NewRecoveryFunc returns a new instance of Recovery with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRecoveryFunc(tp elastictransport.Interface) NewRecovery { + return func() *Recovery { + n := New(tp) + + return n + } +} + +// Get index recovery information. +// Get information about ongoing and completed shard recoveries for one or more +// indices. +// For data streams, the API returns information for the stream's backing +// indices. +// +// All recoveries, whether ongoing or complete, are kept in the cluster state +// and may be reported on at any time. +// +// Shard recovery is the process of initializing a shard copy, such as restoring +// a primary shard from a snapshot or creating a replica shard from a primary +// shard. +// When a shard recovery completes, the recovered shard is available for search +// and indexing. +// +// Recovery automatically occurs during the following processes: +// +// * When creating an index for the first time. +// * When a node rejoins the cluster and starts up any missing primary shard +// copies using the data that it holds in its data path. +// * Creation of new replica shard copies from the primary. +// * Relocation of a shard copy to a different node in the same cluster. +// * A snapshot restore operation. +// * A clone, shrink, or split operation. +// +// You can determine the cause of a shard recovery using the recovery or cat +// recovery APIs. +// +// The index recovery API reports information about completed recoveries only +// for shard copies that currently exist in the cluster. +// It only reports the last recovery for each shard copy and does not report +// historical information about earlier recoveries, nor does it report +// information about the recoveries of shard copies that no longer exist. +// This means that if a shard copy completes a recovery and then Elasticsearch +// relocates it onto a different node then the information about the original +// recovery will not be shown in the recovery API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery +func New(tp elastictransport.Interface) *Recovery { + r := &Recovery{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Recovery) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_recovery") + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_recovery") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Recovery) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.recovery") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.recovery") + if reader := instrument.RecordRequestBody(ctx, "indices.recovery", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.recovery") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Recovery query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a recovery.Response +func (r Recovery) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.recovery") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Recovery) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.recovery") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Recovery query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Recovery headers map. +func (r *Recovery) Header(key, value string) *Recovery { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *Recovery) Index(index string) *Recovery { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ActiveOnly If `true`, the response only includes ongoing shard recoveries. +// API name: active_only +func (r *Recovery) ActiveOnly(activeonly bool) *Recovery { + r.values.Set("active_only", strconv.FormatBool(activeonly)) + + return r +} + +// Detailed If `true`, the response includes detailed information about shard recoveries. +// API name: detailed +func (r *Recovery) Detailed(detailed bool) *Recovery { + r.values.Set("detailed", strconv.FormatBool(detailed)) + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *Recovery) AllowNoIndices(allownoindices bool) *Recovery { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *Recovery) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Recovery { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *Recovery) IgnoreUnavailable(ignoreunavailable bool) *Recovery { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Recovery) ErrorTrace(errortrace bool) *Recovery { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Recovery) FilterPath(filterpaths ...string) *Recovery { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Recovery) Human(human bool) *Recovery { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Recovery) Pretty(pretty bool) *Recovery { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/recovery/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/recovery/response.go new file mode 100644 index 000000000..d49f433f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/recovery/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package recovery + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package recovery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/recovery/IndicesRecoveryResponse.ts#L24-L27 + +type Response map[string]types.RecoveryStatus + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/refresh/refresh.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/refresh/refresh.go new file mode 100644 index 000000000..fed0f96f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/refresh/refresh.go @@ -0,0 +1,432 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Refresh an index. +// A refresh makes recent operations performed on one or more indices available +// for search. +// For data streams, the API runs the refresh operation on the stream’s backing +// indices. +// +// By default, Elasticsearch periodically refreshes indices every second, but +// only on indices that have received one search request or more in the last 30 +// seconds. +// You can change this default interval with the `index.refresh_interval` +// setting. +// +// Refresh requests are synchronous and do not return a response until the +// refresh operation completes. +// +// Refreshes are resource-intensive. +// To ensure good cluster performance, it's recommended to wait for +// Elasticsearch's periodic refresh rather than performing an explicit refresh +// when possible. +// +// If your application workflow indexes documents and then runs a search to +// retrieve the indexed document, it's recommended to use the index API's +// `refresh=wait_for` query parameter option. +// This option ensures the indexing operation waits for a periodic refresh +// before running the search. +package refresh + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Refresh struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRefresh type alias for index. +type NewRefresh func() *Refresh + +// NewRefreshFunc returns a new instance of Refresh with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRefreshFunc(tp elastictransport.Interface) NewRefresh { + return func() *Refresh { + n := New(tp) + + return n + } +} + +// Refresh an index. +// A refresh makes recent operations performed on one or more indices available +// for search. +// For data streams, the API runs the refresh operation on the stream’s backing +// indices. +// +// By default, Elasticsearch periodically refreshes indices every second, but +// only on indices that have received one search request or more in the last 30 +// seconds. +// You can change this default interval with the `index.refresh_interval` +// setting. +// +// Refresh requests are synchronous and do not return a response until the +// refresh operation completes. +// +// Refreshes are resource-intensive. +// To ensure good cluster performance, it's recommended to wait for +// Elasticsearch's periodic refresh rather than performing an explicit refresh +// when possible. +// +// If your application workflow indexes documents and then runs a search to +// retrieve the indexed document, it's recommended to use the index API's +// `refresh=wait_for` query parameter option. +// This option ensures the indexing operation waits for a periodic refresh +// before running the search. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh +func New(tp elastictransport.Interface) *Refresh { + r := &Refresh{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Refresh) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_refresh") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_refresh") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Refresh) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.refresh") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.refresh") + if reader := instrument.RecordRequestBody(ctx, "indices.refresh", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.refresh") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Refresh query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a refresh.Response +func (r Refresh) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.refresh") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Refresh) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.refresh") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Refresh query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Refresh headers map. +func (r *Refresh) Header(key, value string) *Refresh { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *Refresh) Index(index string) *Refresh { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *Refresh) AllowNoIndices(allownoindices bool) *Refresh { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *Refresh) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Refresh { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *Refresh) IgnoreUnavailable(ignoreunavailable bool) *Refresh { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Refresh) ErrorTrace(errortrace bool) *Refresh { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Refresh) FilterPath(filterpaths ...string) *Refresh { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Refresh) Human(human bool) *Refresh { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Refresh) Pretty(pretty bool) *Refresh { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/refresh/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/refresh/response.go new file mode 100644 index 000000000..a02fbcc96 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/refresh/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package refresh + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package refresh +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/refresh/IndicesRefreshResponse.ts#L22-L25 +type Response struct { + Shards_ *types.ShardStatistics `json:"_shards,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go new file mode 100644 index 000000000..312bb29c1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go @@ -0,0 +1,428 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Reload search analyzers. +// Reload an index's search analyzers and their resources. +// For data streams, the API reloads search analyzers and resources for the +// stream's backing indices. +// +// IMPORTANT: After reloading the search analyzers you should clear the request +// cache to make sure it doesn't contain responses derived from the previous +// versions of the analyzer. +// +// You can use the reload search analyzers API to pick up changes to synonym +// files used in the `synonym_graph` or `synonym` token filter of a search +// analyzer. +// To be eligible, the token filter must have an `updateable` flag of `true` and +// only be used in search analyzers. +// +// NOTE: This API does not perform a reload for each shard of an index. +// Instead, it performs a reload for each node containing index shards. +// As a result, the total shard count returned by the API can differ from the +// number of index shards. +// Because reloading affects every node with an index shard, it is important to +// update the synonym file on every data node in the cluster--including nodes +// that don't contain a shard replica--before using this API. +// This ensures the synonym file is updated everywhere in the cluster in case +// shards are relocated in the future. +package reloadsearchanalyzers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ReloadSearchAnalyzers struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewReloadSearchAnalyzers type alias for index. +type NewReloadSearchAnalyzers func(index string) *ReloadSearchAnalyzers + +// NewReloadSearchAnalyzersFunc returns a new instance of ReloadSearchAnalyzers with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewReloadSearchAnalyzersFunc(tp elastictransport.Interface) NewReloadSearchAnalyzers { + return func(index string) *ReloadSearchAnalyzers { + n := New(tp) + + n._index(index) + + return n + } +} + +// Reload search analyzers. +// Reload an index's search analyzers and their resources. +// For data streams, the API reloads search analyzers and resources for the +// stream's backing indices. +// +// IMPORTANT: After reloading the search analyzers you should clear the request +// cache to make sure it doesn't contain responses derived from the previous +// versions of the analyzer. +// +// You can use the reload search analyzers API to pick up changes to synonym +// files used in the `synonym_graph` or `synonym` token filter of a search +// analyzer. +// To be eligible, the token filter must have an `updateable` flag of `true` and +// only be used in search analyzers. +// +// NOTE: This API does not perform a reload for each shard of an index. +// Instead, it performs a reload for each node containing index shards. +// As a result, the total shard count returned by the API can differ from the +// number of index shards. +// Because reloading affects every node with an index shard, it is important to +// update the synonym file on every data node in the cluster--including nodes +// that don't contain a shard replica--before using this API. +// This ensures the synonym file is updated everywhere in the cluster in case +// shards are relocated in the future. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers +func New(tp elastictransport.Interface) *ReloadSearchAnalyzers { + r := &ReloadSearchAnalyzers{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ReloadSearchAnalyzers) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_reload_search_analyzers") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ReloadSearchAnalyzers) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.reload_search_analyzers") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.reload_search_analyzers") + if reader := instrument.RecordRequestBody(ctx, "indices.reload_search_analyzers", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.reload_search_analyzers") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ReloadSearchAnalyzers query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a reloadsearchanalyzers.Response +func (r ReloadSearchAnalyzers) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.reload_search_analyzers") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ReloadSearchAnalyzers) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.reload_search_analyzers") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ReloadSearchAnalyzers query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ReloadSearchAnalyzers headers map. +func (r *ReloadSearchAnalyzers) Header(key, value string) *ReloadSearchAnalyzers { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of index names to reload analyzers for +// API Name: index +func (r *ReloadSearchAnalyzers) _index(index string) *ReloadSearchAnalyzers { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete +// indices. (This includes `_all` string or when no indices have been specified) +// API name: allow_no_indices +func (r *ReloadSearchAnalyzers) AllowNoIndices(allownoindices bool) *ReloadSearchAnalyzers { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, +// closed or both. +// API name: expand_wildcards +func (r *ReloadSearchAnalyzers) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ReloadSearchAnalyzers { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable +// (missing or closed) +// API name: ignore_unavailable +func (r *ReloadSearchAnalyzers) IgnoreUnavailable(ignoreunavailable bool) *ReloadSearchAnalyzers { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Resource Changed resource to reload analyzers from if applicable +// API name: resource +func (r *ReloadSearchAnalyzers) Resource(resource string) *ReloadSearchAnalyzers { + r.values.Set("resource", resource) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ReloadSearchAnalyzers) ErrorTrace(errortrace bool) *ReloadSearchAnalyzers { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ReloadSearchAnalyzers) FilterPath(filterpaths ...string) *ReloadSearchAnalyzers { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ReloadSearchAnalyzers) Human(human bool) *ReloadSearchAnalyzers { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ReloadSearchAnalyzers) Pretty(pretty bool) *ReloadSearchAnalyzers { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/reloadsearchanalyzers/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/reloadsearchanalyzers/response.go new file mode 100644 index 000000000..2ad5cc181 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/reloadsearchanalyzers/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package reloadsearchanalyzers + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package reloadsearchanalyzers +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/reload_search_analyzers/ReloadSearchAnalyzersResponse.ts#L22-L25 +type Response struct { + ReloadDetails []types.ReloadDetails `json:"reload_details"` + Shards_ types.ShardStatistics `json:"_shards"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/removeblock/remove_block.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/removeblock/remove_block.go new file mode 100644 index 000000000..6a73449fd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/removeblock/remove_block.go @@ -0,0 +1,439 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Remove an index block. +// +// Remove an index block from an index. +// Index blocks limit the operations allowed on an index by blocking specific +// operation types. +package removeblock + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 + + blockMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RemoveBlock struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + block string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRemoveBlock type alias for index. +type NewRemoveBlock func(index, block string) *RemoveBlock + +// NewRemoveBlockFunc returns a new instance of RemoveBlock with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRemoveBlockFunc(tp elastictransport.Interface) NewRemoveBlock { + return func(index, block string) *RemoveBlock { + n := New(tp) + + n._index(index) + + n._block(block) + + return n + } +} + +// Remove an index block. +// +// Remove an index block from an index. +// Index blocks limit the operations allowed on an index by blocking specific +// operation types. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-remove-block +func New(tp elastictransport.Interface) *RemoveBlock { + r := &RemoveBlock{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RemoveBlock) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|blockMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_block") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "block", r.block) + } + path.WriteString(r.block) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RemoveBlock) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.remove_block") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.remove_block") + if reader := instrument.RecordRequestBody(ctx, "indices.remove_block", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.remove_block") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RemoveBlock query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a removeblock.Response +func (r RemoveBlock) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.remove_block") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r RemoveBlock) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.remove_block") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the RemoveBlock query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the RemoveBlock headers map. +func (r *RemoveBlock) Header(key, value string) *RemoveBlock { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list or wildcard expression of index names used to limit +// the request. +// By default, you must explicitly name the indices you are removing blocks +// from. +// To allow the removal of blocks from indices with `_all`, `*`, or other +// wildcard expressions, change the `action.destructive_requires_name` setting +// to `false`. +// You can update this setting in the `elasticsearch.yml` file or by using the +// cluster update settings API. +// API Name: index +func (r *RemoveBlock) _index(index string) *RemoveBlock { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Block The block type to remove from the index. +// API Name: block +func (r *RemoveBlock) _block(block string) *RemoveBlock { + r.paramSet |= blockMask + r.block = block + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. +// API name: allow_no_indices +func (r *RemoveBlock) AllowNoIndices(allownoindices bool) *RemoveBlock { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards The type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// It supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *RemoveBlock) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *RemoveBlock { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *RemoveBlock) IgnoreUnavailable(ignoreunavailable bool) *RemoveBlock { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: master_timeout +func (r *RemoveBlock) MasterTimeout(duration string) *RemoveBlock { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response will indicate that it was not +// completely acknowledged. +// It can also be set to `-1` to indicate that the request should never timeout. +// API name: timeout +func (r *RemoveBlock) Timeout(duration string) *RemoveBlock { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RemoveBlock) ErrorTrace(errortrace bool) *RemoveBlock { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RemoveBlock) FilterPath(filterpaths ...string) *RemoveBlock { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RemoveBlock) Human(human bool) *RemoveBlock { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RemoveBlock) Pretty(pretty bool) *RemoveBlock { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/removeblock/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/removeblock/response.go new file mode 100644 index 000000000..abcebf9cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/removeblock/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package removeblock + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package removeblock +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/remove_block/IndicesRemoveBlockResponse.ts#L23-L28 +type Response struct { + Acknowledged bool `json:"acknowledged"` + Indices []types.RemoveIndicesBlockStatus `json:"indices"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolvecluster/resolve_cluster.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolvecluster/resolve_cluster.go new file mode 100644 index 000000000..b53b35932 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolvecluster/resolve_cluster.go @@ -0,0 +1,597 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Resolve the cluster. +// +// Resolve the specified index expressions to return information about each +// cluster, including the local "querying" cluster, if included. +// If no index expression is provided, the API will return information about all +// the remote clusters that are configured on the querying cluster. +// +// This endpoint is useful before doing a cross-cluster search in order to +// determine which remote clusters should be included in a search. +// +// You use the same index expression with this endpoint as you would for +// cross-cluster search. +// Index and cluster exclusions are also supported with this endpoint. +// +// For each cluster in the index expression, information is returned about: +// +// * Whether the querying ("local") cluster is currently connected to each +// remote cluster specified in the index expression. Note that this endpoint +// actively attempts to contact the remote clusters, unlike the `remote/info` +// endpoint. +// * Whether each remote cluster is configured with `skip_unavailable` as `true` +// or `false`. +// * Whether there are any indices, aliases, or data streams on that cluster +// that match the index expression. +// * Whether the search is likely to have errors returned when you do the +// cross-cluster search (including any authorization errors if you do not have +// permission to query the index). +// * Cluster version information, including the Elasticsearch server version. +// +// For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns +// information about the local cluster and all remotely configured clusters that +// start with the alias `cluster*`. +// Each cluster returns information about whether it has any indices, aliases or +// data streams that match `my-index-*`. +// +// ## Note on backwards compatibility +// The ability to query without an index expression was added in version 8.18, +// so when +// querying remote clusters older than that, the local cluster will send the +// index +// expression `dummy*` to those remote clusters. Thus, if an errors occur, you +// may see a reference +// to that index expression even though you didn't request it. If it causes a +// problem, you can +// instead include an index expression like `*:*` to bypass the issue. +// +// ## Advantages of using this endpoint before a cross-cluster search +// +// You may want to exclude a cluster or index from a search when: +// +// * A remote cluster is not currently connected and is configured with +// `skip_unavailable=false`. Running a cross-cluster search under those +// conditions will cause the entire search to fail. +// * A cluster has no matching indices, aliases or data streams for the index +// expression (or your user does not have permissions to search them). For +// example, suppose your index expression is `logs*,remote1:logs*` and the +// remote1 cluster has no indices, aliases or data streams that match `logs*`. +// In that case, that cluster will return no results from that cluster if you +// include it in a cross-cluster search. +// * The index expression (combined with any query parameters you specify) will +// likely cause an exception to be thrown when you do the search. In these +// cases, the "error" field in the `_resolve/cluster` response will be present. +// (This is also where security/permission errors will be shown.) +// * A remote cluster is an older version that does not support the feature you +// want to use in your search. +// +// ## Test availability of remote clusters +// +// The `remote/info` endpoint is commonly used to test whether the "local" +// cluster (the cluster being queried) is connected to its remote clusters, but +// it does not necessarily reflect whether the remote cluster is available or +// not. +// The remote cluster may be available, while the local cluster is not currently +// connected to it. +// +// You can use the `_resolve/cluster` API to attempt to reconnect to remote +// clusters. +// For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. +// The `connected` field in the response will indicate whether it was +// successful. +// If a connection was (re-)established, this will also cause the `remote/info` +// endpoint to now indicate a connected status. +package resolvecluster + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ResolveCluster struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewResolveCluster type alias for index. +type NewResolveCluster func() *ResolveCluster + +// NewResolveClusterFunc returns a new instance of ResolveCluster with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewResolveClusterFunc(tp elastictransport.Interface) NewResolveCluster { + return func() *ResolveCluster { + n := New(tp) + + return n + } +} + +// Resolve the cluster. +// +// Resolve the specified index expressions to return information about each +// cluster, including the local "querying" cluster, if included. +// If no index expression is provided, the API will return information about all +// the remote clusters that are configured on the querying cluster. +// +// This endpoint is useful before doing a cross-cluster search in order to +// determine which remote clusters should be included in a search. +// +// You use the same index expression with this endpoint as you would for +// cross-cluster search. +// Index and cluster exclusions are also supported with this endpoint. +// +// For each cluster in the index expression, information is returned about: +// +// * Whether the querying ("local") cluster is currently connected to each +// remote cluster specified in the index expression. Note that this endpoint +// actively attempts to contact the remote clusters, unlike the `remote/info` +// endpoint. +// * Whether each remote cluster is configured with `skip_unavailable` as `true` +// or `false`. +// * Whether there are any indices, aliases, or data streams on that cluster +// that match the index expression. +// * Whether the search is likely to have errors returned when you do the +// cross-cluster search (including any authorization errors if you do not have +// permission to query the index). +// * Cluster version information, including the Elasticsearch server version. +// +// For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns +// information about the local cluster and all remotely configured clusters that +// start with the alias `cluster*`. +// Each cluster returns information about whether it has any indices, aliases or +// data streams that match `my-index-*`. +// +// ## Note on backwards compatibility +// The ability to query without an index expression was added in version 8.18, +// so when +// querying remote clusters older than that, the local cluster will send the +// index +// expression `dummy*` to those remote clusters. Thus, if an errors occur, you +// may see a reference +// to that index expression even though you didn't request it. If it causes a +// problem, you can +// instead include an index expression like `*:*` to bypass the issue. +// +// ## Advantages of using this endpoint before a cross-cluster search +// +// You may want to exclude a cluster or index from a search when: +// +// * A remote cluster is not currently connected and is configured with +// `skip_unavailable=false`. Running a cross-cluster search under those +// conditions will cause the entire search to fail. +// * A cluster has no matching indices, aliases or data streams for the index +// expression (or your user does not have permissions to search them). For +// example, suppose your index expression is `logs*,remote1:logs*` and the +// remote1 cluster has no indices, aliases or data streams that match `logs*`. +// In that case, that cluster will return no results from that cluster if you +// include it in a cross-cluster search. +// * The index expression (combined with any query parameters you specify) will +// likely cause an exception to be thrown when you do the search. In these +// cases, the "error" field in the `_resolve/cluster` response will be present. +// (This is also where security/permission errors will be shown.) +// * A remote cluster is an older version that does not support the feature you +// want to use in your search. +// +// ## Test availability of remote clusters +// +// The `remote/info` endpoint is commonly used to test whether the "local" +// cluster (the cluster being queried) is connected to its remote clusters, but +// it does not necessarily reflect whether the remote cluster is available or +// not. +// The remote cluster may be available, while the local cluster is not currently +// connected to it. +// +// You can use the `_resolve/cluster` API to attempt to reconnect to remote +// clusters. +// For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. +// The `connected` field in the response will indicate whether it was +// successful. +// If a connection was (re-)established, this will also cause the `remote/info` +// endpoint to now indicate a connected status. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster +func New(tp elastictransport.Interface) *ResolveCluster { + r := &ResolveCluster{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ResolveCluster) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_resolve") + path.WriteString("/") + path.WriteString("cluster") + + method = http.MethodGet + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_resolve") + path.WriteString("/") + path.WriteString("cluster") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ResolveCluster) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.resolve_cluster") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.resolve_cluster") + if reader := instrument.RecordRequestBody(ctx, "indices.resolve_cluster", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.resolve_cluster") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ResolveCluster query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a resolvecluster.Response +func (r ResolveCluster) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.resolve_cluster") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ResolveCluster) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.resolve_cluster") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ResolveCluster query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ResolveCluster headers map. +func (r *ResolveCluster) Header(key, value string) *ResolveCluster { + r.headers.Set(key, value) + + return r +} + +// Name A comma-separated list of names or index patterns for the indices, aliases, +// and data streams to resolve. +// Resources on remote clusters can be specified using the ``:`` +// syntax. +// Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. +// If no index expression is specified, information about all remote clusters +// configured on the local cluster +// is returned without doing any index matching +// API Name: name +func (r *ResolveCluster) Name(name string) *ResolveCluster { + r.paramSet |= nameMask + r.name = name + + return r +} + +// AllowNoIndices If false, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing +// or closed indices. This behavior applies even if the request targets other +// open indices. For example, a request +// targeting `foo*,bar*` returns an error if an index starts with `foo` but no +// index starts with `bar`. +// NOTE: This option is only supported when specifying an index expression. You +// will get an error if you specify index +// options to the `_resolve/cluster` API endpoint that takes no index +// expression. +// API name: allow_no_indices +func (r *ResolveCluster) AllowNoIndices(allownoindices bool) *ResolveCluster { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// NOTE: This option is only supported when specifying an index expression. You +// will get an error if you specify index +// options to the `_resolve/cluster` API endpoint that takes no index +// expression. +// API name: expand_wildcards +func (r *ResolveCluster) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ResolveCluster { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreThrottled If true, concrete, expanded, or aliased indices are ignored when frozen. +// NOTE: This option is only supported when specifying an index expression. You +// will get an error if you specify index +// options to the `_resolve/cluster` API endpoint that takes no index +// expression. +// API name: ignore_throttled +func (r *ResolveCluster) IgnoreThrottled(ignorethrottled bool) *ResolveCluster { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// IgnoreUnavailable If false, the request returns an error if it targets a missing or closed +// index. +// NOTE: This option is only supported when specifying an index expression. You +// will get an error if you specify index +// options to the `_resolve/cluster` API endpoint that takes no index +// expression. +// API name: ignore_unavailable +func (r *ResolveCluster) IgnoreUnavailable(ignoreunavailable bool) *ResolveCluster { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Timeout The maximum time to wait for remote clusters to respond. +// If a remote cluster does not respond within this timeout period, the API +// response +// will show the cluster as not connected and include an error message that the +// request timed out. +// +// The default timeout is unset and the query can take +// as long as the networking layer is configured to wait for remote clusters +// that are +// not responding (typically 30 seconds). +// API name: timeout +func (r *ResolveCluster) Timeout(duration string) *ResolveCluster { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ResolveCluster) ErrorTrace(errortrace bool) *ResolveCluster { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ResolveCluster) FilterPath(filterpaths ...string) *ResolveCluster { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ResolveCluster) Human(human bool) *ResolveCluster { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ResolveCluster) Pretty(pretty bool) *ResolveCluster { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolvecluster/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolvecluster/response.go new file mode 100644 index 000000000..6001c48f6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolvecluster/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package resolvecluster + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package resolvecluster +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/resolve_cluster/ResolveClusterResponse.ts#L24-L27 + +type Response map[string]types.ResolveClusterInfo + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolveindex/resolve_index.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolveindex/resolve_index.go new file mode 100644 index 000000000..821f7e3f1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolveindex/resolve_index.go @@ -0,0 +1,390 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Resolve indices. +// Resolve the names and/or index patterns for indices, aliases, and data +// streams. +// Multiple patterns and remote clusters are supported. +package resolveindex + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ResolveIndex struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewResolveIndex type alias for index. +type NewResolveIndex func(name string) *ResolveIndex + +// NewResolveIndexFunc returns a new instance of ResolveIndex with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewResolveIndexFunc(tp elastictransport.Interface) NewResolveIndex { + return func(name string) *ResolveIndex { + n := New(tp) + + n._name(name) + + return n + } +} + +// Resolve indices. +// Resolve the names and/or index patterns for indices, aliases, and data +// streams. +// Multiple patterns and remote clusters are supported. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index +func New(tp elastictransport.Interface) *ResolveIndex { + r := &ResolveIndex{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ResolveIndex) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_resolve") + path.WriteString("/") + path.WriteString("index") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ResolveIndex) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.resolve_index") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.resolve_index") + if reader := instrument.RecordRequestBody(ctx, "indices.resolve_index", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.resolve_index") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ResolveIndex query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a resolveindex.Response +func (r ResolveIndex) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.resolve_index") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ResolveIndex) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.resolve_index") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ResolveIndex query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ResolveIndex headers map. +func (r *ResolveIndex) Header(key, value string) *ResolveIndex { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated name(s) or index pattern(s) of the indices, aliases, and data +// streams to resolve. +// Resources on remote clusters can be specified using the ``:`` +// syntax. +// API Name: name +func (r *ResolveIndex) _name(name string) *ResolveIndex { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *ResolveIndex) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ResolveIndex { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *ResolveIndex) IgnoreUnavailable(ignoreunavailable bool) *ResolveIndex { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. +// API name: allow_no_indices +func (r *ResolveIndex) AllowNoIndices(allownoindices bool) *ResolveIndex { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ResolveIndex) ErrorTrace(errortrace bool) *ResolveIndex { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ResolveIndex) FilterPath(filterpaths ...string) *ResolveIndex { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ResolveIndex) Human(human bool) *ResolveIndex { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ResolveIndex) Pretty(pretty bool) *ResolveIndex { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolveindex/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolveindex/response.go new file mode 100644 index 000000000..04c8a02e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolveindex/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package resolveindex + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package resolveindex +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/resolve_index/ResolveIndexResponse.ts#L22-L28 +type Response struct { + Aliases []types.ResolveIndexAliasItem `json:"aliases"` + DataStreams []types.ResolveIndexDataStreamsItem `json:"data_streams"` + Indices []types.ResolveIndexItem `json:"indices"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/rollover/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/rollover/request.go new file mode 100644 index 000000000..341da3ab2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/rollover/request.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package rollover + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package rollover +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/rollover/IndicesRolloverRequest.ts#L29-L153 +type Request struct { + + // Aliases Aliases for the target index. + // Data streams do not support this parameter. + Aliases map[string]types.Alias `json:"aliases,omitempty"` + // Conditions Conditions for the rollover. + // If specified, Elasticsearch only performs the rollover if the current index + // satisfies these conditions. + // If this parameter is not specified, Elasticsearch performs the rollover + // unconditionally. + // If conditions are specified, at least one of them must be a `max_*` + // condition. + // The index will rollover if any `max_*` condition is satisfied and all `min_*` + // conditions are satisfied. + Conditions *types.RolloverConditions `json:"conditions,omitempty"` + // Mappings Mapping for fields in the index. + // If specified, this mapping can include field names, field data types, and + // mapping paramaters. + Mappings *types.TypeMapping `json:"mappings,omitempty"` + // Settings Configuration options for the index. + // Data streams do not support this parameter. + Settings map[string]json.RawMessage `json:"settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Aliases: make(map[string]types.Alias, 0), + Settings: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Rollover request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/rollover/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/rollover/response.go new file mode 100644 index 000000000..c14870472 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/rollover/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package rollover + +// Response holds the response body struct for the package rollover +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/rollover/IndicesRolloverResponse.ts#L22-L32 +type Response struct { + Acknowledged bool `json:"acknowledged"` + Conditions map[string]bool `json:"conditions"` + DryRun bool `json:"dry_run"` + NewIndex string `json:"new_index"` + OldIndex string `json:"old_index"` + RolledOver bool `json:"rolled_over"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Conditions: make(map[string]bool, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/rollover/rollover.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/rollover/rollover.go new file mode 100644 index 000000000..4d29fac83 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/rollover/rollover.go @@ -0,0 +1,643 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Roll over to a new index. +// TIP: It is recommended to use the index lifecycle rollover action to automate +// rollovers. +// +// The rollover API creates a new index for a data stream or index alias. +// The API behavior depends on the rollover target. +// +// **Roll over a data stream** +// +// If you roll over a data stream, the API creates a new write index for the +// stream. +// The stream's previous write index becomes a regular backing index. +// A rollover also increments the data stream's generation. +// +// **Roll over an index alias with a write index** +// +// TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a +// write index to manage time series data. +// Data streams replace this functionality, require less maintenance, and +// automatically integrate with data tiers. +// +// If an index alias points to multiple indices, one of the indices must be a +// write index. +// The rollover API creates a new write index for the alias with +// `is_write_index` set to `true`. +// The API also `sets is_write_index` to `false` for the previous write index. +// +// **Roll over an index alias with one index** +// +// If you roll over an index alias that points to only one index, the API +// creates a new index for the alias and removes the original index from the +// alias. +// +// NOTE: A rollover creates a new index and is subject to the +// `wait_for_active_shards` setting. +// +// **Increment index names for an alias** +// +// When you roll over an index alias, you can specify a name for the new index. +// If you don't specify a name and the current index ends with `-` and a number, +// such as `my-index-000001` or `my-index-3`, the new index name increments that +// number. +// For example, if you roll over an alias with a current index of +// `my-index-000001`, the rollover creates a new index named `my-index-000002`. +// This number is always six characters and zero-padded, regardless of the +// previous index's name. +// +// If you use an index alias for time series data, you can use date math in the +// index name to track the rollover date. +// For example, you can create an alias that points to an index named +// ``. +// If you create the index on May 6, 2099, the index's name is +// `my-index-2099.05.06-000001`. +// If you roll over the alias on May 7, 2099, the new index's name is +// `my-index-2099.05.07-000002`. +package rollover + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + aliasMask = iota + 1 + + newindexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Rollover struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + alias string + newindex string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRollover type alias for index. +type NewRollover func(alias string) *Rollover + +// NewRolloverFunc returns a new instance of Rollover with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRolloverFunc(tp elastictransport.Interface) NewRollover { + return func(alias string) *Rollover { + n := New(tp) + + n._alias(alias) + + return n + } +} + +// Roll over to a new index. +// TIP: It is recommended to use the index lifecycle rollover action to automate +// rollovers. +// +// The rollover API creates a new index for a data stream or index alias. +// The API behavior depends on the rollover target. +// +// **Roll over a data stream** +// +// If you roll over a data stream, the API creates a new write index for the +// stream. +// The stream's previous write index becomes a regular backing index. +// A rollover also increments the data stream's generation. +// +// **Roll over an index alias with a write index** +// +// TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a +// write index to manage time series data. +// Data streams replace this functionality, require less maintenance, and +// automatically integrate with data tiers. +// +// If an index alias points to multiple indices, one of the indices must be a +// write index. +// The rollover API creates a new write index for the alias with +// `is_write_index` set to `true`. +// The API also `sets is_write_index` to `false` for the previous write index. +// +// **Roll over an index alias with one index** +// +// If you roll over an index alias that points to only one index, the API +// creates a new index for the alias and removes the original index from the +// alias. +// +// NOTE: A rollover creates a new index and is subject to the +// `wait_for_active_shards` setting. +// +// **Increment index names for an alias** +// +// When you roll over an index alias, you can specify a name for the new index. +// If you don't specify a name and the current index ends with `-` and a number, +// such as `my-index-000001` or `my-index-3`, the new index name increments that +// number. +// For example, if you roll over an alias with a current index of +// `my-index-000001`, the rollover creates a new index named `my-index-000002`. +// This number is always six characters and zero-padded, regardless of the +// previous index's name. +// +// If you use an index alias for time series data, you can use date math in the +// index name to track the rollover date. +// For example, you can create an alias that points to an index named +// ``. +// If you create the index on May 6, 2099, the index's name is +// `my-index-2099.05.06-000001`. +// If you roll over the alias on May 7, 2099, the new index's name is +// `my-index-2099.05.07-000002`. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover +func New(tp elastictransport.Interface) *Rollover { + r := &Rollover{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Rollover) Raw(raw io.Reader) *Rollover { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Rollover) Request(req *Request) *Rollover { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Rollover) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Rollover: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == aliasMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "alias", r.alias) + } + path.WriteString(r.alias) + path.WriteString("/") + path.WriteString("_rollover") + + method = http.MethodPost + case r.paramSet == aliasMask|newindexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "alias", r.alias) + } + path.WriteString(r.alias) + path.WriteString("/") + path.WriteString("_rollover") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "newindex", r.newindex) + } + path.WriteString(r.newindex) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Rollover) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.rollover") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.rollover") + if reader := instrument.RecordRequestBody(ctx, "indices.rollover", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.rollover") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Rollover query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a rollover.Response +func (r Rollover) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.rollover") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Rollover headers map. +func (r *Rollover) Header(key, value string) *Rollover { + r.headers.Set(key, value) + + return r +} + +// Alias Name of the data stream or index alias to roll over. +// API Name: alias +func (r *Rollover) _alias(alias string) *Rollover { + r.paramSet |= aliasMask + r.alias = alias + + return r +} + +// NewIndex Name of the index to create. +// Supports date math. +// Data streams do not support this parameter. +// API Name: newindex +func (r *Rollover) NewIndex(newindex string) *Rollover { + r.paramSet |= newindexMask + r.newindex = newindex + + return r +} + +// DryRun If `true`, checks whether the current index satisfies the specified +// conditions but does not perform a rollover. +// API name: dry_run +func (r *Rollover) DryRun(dryrun bool) *Rollover { + r.values.Set("dry_run", strconv.FormatBool(dryrun)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *Rollover) MasterTimeout(duration string) *Rollover { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *Rollover) Timeout(duration string) *Rollover { + r.values.Set("timeout", duration) + + return r +} + +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to all or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). +// API name: wait_for_active_shards +func (r *Rollover) WaitForActiveShards(waitforactiveshards string) *Rollover { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// Lazy If set to true, the rollover action will only mark a data stream to signal +// that it needs to be rolled over at the next write. +// Only allowed on data streams. +// API name: lazy +func (r *Rollover) Lazy(lazy bool) *Rollover { + r.values.Set("lazy", strconv.FormatBool(lazy)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Rollover) ErrorTrace(errortrace bool) *Rollover { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Rollover) FilterPath(filterpaths ...string) *Rollover { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Rollover) Human(human bool) *Rollover { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Rollover) Pretty(pretty bool) *Rollover { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Aliases for the target index. +// Data streams do not support this parameter. +// API name: aliases +func (r *Rollover) Aliases(aliases map[string]types.Alias) *Rollover { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Aliases = aliases + return r +} + +func (r *Rollover) AddAlias(key string, value types.AliasVariant) *Rollover { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + + r.req.Aliases = tmp + return r +} + +// Conditions for the rollover. +// If specified, Elasticsearch only performs the rollover if the current index +// satisfies these conditions. +// If this parameter is not specified, Elasticsearch performs the rollover +// unconditionally. +// If conditions are specified, at least one of them must be a `max_*` +// condition. +// The index will rollover if any `max_*` condition is satisfied and all `min_*` +// conditions are satisfied. +// API name: conditions +func (r *Rollover) Conditions(conditions types.RolloverConditionsVariant) *Rollover { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Conditions = conditions.RolloverConditionsCaster() + + return r +} + +// Mapping for fields in the index. +// If specified, this mapping can include field names, field data types, and +// mapping paramaters. +// API name: mappings +func (r *Rollover) Mappings(mappings types.TypeMappingVariant) *Rollover { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Mappings = mappings.TypeMappingCaster() + + return r +} + +// Configuration options for the index. +// Data streams do not support this parameter. +// API name: settings +func (r *Rollover) Settings(settings map[string]json.RawMessage) *Rollover { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Settings = settings + return r +} + +func (r *Rollover) AddSetting(key string, value json.RawMessage) *Rollover { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + + r.req.Settings = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/segments/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/segments/response.go new file mode 100644 index 000000000..19427b478 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/segments/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package segments + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package segments +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/segments/IndicesSegmentsResponse.ts#L24-L29 +type Response struct { + Indices map[string]types.IndexSegment `json:"indices"` + Shards_ types.ShardStatistics `json:"_shards"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Indices: make(map[string]types.IndexSegment, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/segments/segments.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/segments/segments.go new file mode 100644 index 000000000..82d17637c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/segments/segments.go @@ -0,0 +1,390 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get index segments. +// Get low-level information about the Lucene segments in index shards. +// For data streams, the API returns information about the stream's backing +// indices. +package segments + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Segments struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSegments type alias for index. +type NewSegments func() *Segments + +// NewSegmentsFunc returns a new instance of Segments with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSegmentsFunc(tp elastictransport.Interface) NewSegments { + return func() *Segments { + n := New(tp) + + return n + } +} + +// Get index segments. +// Get low-level information about the Lucene segments in index shards. +// For data streams, the API returns information about the stream's backing +// indices. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments +func New(tp elastictransport.Interface) *Segments { + r := &Segments{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Segments) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_segments") + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_segments") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Segments) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.segments") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.segments") + if reader := instrument.RecordRequestBody(ctx, "indices.segments", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.segments") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Segments query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a segments.Response +func (r Segments) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.segments") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Segments) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.segments") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Segments query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Segments headers map. +func (r *Segments) Header(key, value string) *Segments { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *Segments) Index(index string) *Segments { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *Segments) AllowNoIndices(allownoindices bool) *Segments { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *Segments) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Segments { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *Segments) IgnoreUnavailable(ignoreunavailable bool) *Segments { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Segments) ErrorTrace(errortrace bool) *Segments { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Segments) FilterPath(filterpaths ...string) *Segments { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Segments) Human(human bool) *Segments { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Segments) Pretty(pretty bool) *Segments { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shardstores/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shardstores/response.go new file mode 100644 index 000000000..251a94cfc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shardstores/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package shardstores + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package shardstores +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/shard_stores/IndicesShardStoresResponse.ts#L24-L26 +type Response struct { + Indices map[string]types.IndicesShardStores `json:"indices"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Indices: make(map[string]types.IndicesShardStores, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shardstores/shard_stores.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shardstores/shard_stores.go new file mode 100644 index 000000000..10429bc86 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shardstores/shard_stores.go @@ -0,0 +1,422 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get index shard stores. +// Get store information about replica shards in one or more indices. +// For data streams, the API retrieves store information for the stream's +// backing indices. +// +// The index shard stores API returns the following information: +// +// * The node on which each replica shard exists. +// * The allocation ID for each replica shard. +// * A unique ID for each replica shard. +// * Any errors encountered while opening the shard index or from an earlier +// failure. +// +// By default, the API returns store information only for primary shards that +// are unassigned or have one or more unassigned replica shards. +package shardstores + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstorestatus" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ShardStores struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewShardStores type alias for index. +type NewShardStores func() *ShardStores + +// NewShardStoresFunc returns a new instance of ShardStores with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewShardStoresFunc(tp elastictransport.Interface) NewShardStores { + return func() *ShardStores { + n := New(tp) + + return n + } +} + +// Get index shard stores. +// Get store information about replica shards in one or more indices. +// For data streams, the API retrieves store information for the stream's +// backing indices. +// +// The index shard stores API returns the following information: +// +// * The node on which each replica shard exists. +// * The allocation ID for each replica shard. +// * A unique ID for each replica shard. +// * Any errors encountered while opening the shard index or from an earlier +// failure. +// +// By default, the API returns store information only for primary shards that +// are unassigned or have one or more unassigned replica shards. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores +func New(tp elastictransport.Interface) *ShardStores { + r := &ShardStores{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ShardStores) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_shard_stores") + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_shard_stores") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ShardStores) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.shard_stores") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.shard_stores") + if reader := instrument.RecordRequestBody(ctx, "indices.shard_stores", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.shard_stores") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ShardStores query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a shardstores.Response +func (r ShardStores) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.shard_stores") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ShardStores) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.shard_stores") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ShardStores query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ShardStores headers map. +func (r *ShardStores) Header(key, value string) *ShardStores { + r.headers.Set(key, value) + + return r +} + +// Index List of data streams, indices, and aliases used to limit the request. +// API Name: index +func (r *ShardStores) Index(index string) *ShardStores { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If false, the request returns an error if any wildcard expression, index +// alias, or _all +// value targets only missing or closed indices. This behavior applies even if +// the request +// targets other open indices. +// API name: allow_no_indices +func (r *ShardStores) AllowNoIndices(allownoindices bool) *ShardStores { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. If the request can target +// data streams, +// this argument determines whether wildcard expressions match hidden data +// streams. +// API name: expand_wildcards +func (r *ShardStores) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ShardStores { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreUnavailable If true, missing or closed indices are not included in the response. +// API name: ignore_unavailable +func (r *ShardStores) IgnoreUnavailable(ignoreunavailable bool) *ShardStores { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Status List of shard health statuses used to limit the request. +// API name: status +func (r *ShardStores) Status(statuses ...shardstorestatus.ShardStoreStatus) *ShardStores { + tmp := []string{} + for _, item := range statuses { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("status", strings.Join(tmp, ",")) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ShardStores) ErrorTrace(errortrace bool) *ShardStores { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ShardStores) FilterPath(filterpaths ...string) *ShardStores { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ShardStores) Human(human bool) *ShardStores { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ShardStores) Pretty(pretty bool) *ShardStores { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shrink/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shrink/request.go new file mode 100644 index 000000000..9d33f126f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shrink/request.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package shrink + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package shrink +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/shrink/IndicesShrinkRequest.ts#L27-L113 +type Request struct { + + // Aliases The key is the alias name. + // Index alias names support date math. + Aliases map[string]types.Alias `json:"aliases,omitempty"` + // Settings Configuration options for the target index. + Settings map[string]json.RawMessage `json:"settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Aliases: make(map[string]types.Alias, 0), + Settings: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Shrink request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shrink/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shrink/response.go new file mode 100644 index 000000000..6212420c7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shrink/response.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package shrink + +// Response holds the response body struct for the package shrink +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/shrink/IndicesShrinkResponse.ts#L22-L28 +type Response struct { + Acknowledged bool `json:"acknowledged"` + Index string `json:"index"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shrink/shrink.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shrink/shrink.go new file mode 100644 index 000000000..f42e4ae5b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/shrink/shrink.go @@ -0,0 +1,578 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Shrink an index. +// Shrink an index into a new index with fewer primary shards. +// +// Before you can shrink an index: +// +// * The index must be read-only. +// * A copy of every shard in the index must reside on the same node. +// * The index must have a green health status. +// +// To make shard allocation easier, we recommend you also remove the index's +// replica shards. +// You can later re-add replica shards as part of the shrink operation. +// +// The requested number of primary shards in the target index must be a factor +// of the number of shards in the source index. +// For example an index with 8 primary shards can be shrunk into 4, 2 or 1 +// primary shards or an index with 15 primary shards can be shrunk into 5, 3 or +// 1. +// If the number of shards in the index is a prime number it can only be shrunk +// into a single primary shard +// +// Before shrinking, a (primary or replica) copy of every shard in the index +// +// must be present on the same node. +// +// The current write index on a data stream cannot be shrunk. In order to shrink +// the current write index, the data stream must first be rolled over so that a +// new write index is created and then the previous write index can be shrunk. +// +// A shrink operation: +// +// * Creates a new target index with the same definition as the source index, +// but with a smaller number of primary shards. +// * Hard-links segments from the source index into the target index. If the +// file system does not support hard-linking, then all segments are copied into +// the new index, which is a much more time consuming process. Also if using +// multiple data paths, shards on different data paths require a full copy of +// segment files if they are not on the same disk since hardlinks do not work +// across disks. +// * Recovers the target index as though it were a closed index which had just +// been re-opened. Recovers shards to the +// `.routing.allocation.initial_recovery._id` index setting. +// +// IMPORTANT: Indices can only be shrunk if they satisfy the following +// requirements: +// +// * The target index must not exist. +// * The source index must have more primary shards than the target index. +// * The number of primary shards in the target index must be a factor of the +// number of primary shards in the source index. The source index must have more +// primary shards than the target index. +// * The index must not contain more than 2,147,483,519 documents in total +// across all shards that will be shrunk into a single shard on the target index +// as this is the maximum number of docs that can fit into a single shard. +// * The node handling the shrink process must have sufficient free disk space +// to accommodate a second copy of the existing index. +package shrink + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 + + targetMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Shrink struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + target string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewShrink type alias for index. +type NewShrink func(index, target string) *Shrink + +// NewShrinkFunc returns a new instance of Shrink with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewShrinkFunc(tp elastictransport.Interface) NewShrink { + return func(index, target string) *Shrink { + n := New(tp) + + n._index(index) + + n._target(target) + + return n + } +} + +// Shrink an index. +// Shrink an index into a new index with fewer primary shards. +// +// Before you can shrink an index: +// +// * The index must be read-only. +// * A copy of every shard in the index must reside on the same node. +// * The index must have a green health status. +// +// To make shard allocation easier, we recommend you also remove the index's +// replica shards. +// You can later re-add replica shards as part of the shrink operation. +// +// The requested number of primary shards in the target index must be a factor +// of the number of shards in the source index. +// For example an index with 8 primary shards can be shrunk into 4, 2 or 1 +// primary shards or an index with 15 primary shards can be shrunk into 5, 3 or +// 1. +// If the number of shards in the index is a prime number it can only be shrunk +// into a single primary shard +// +// Before shrinking, a (primary or replica) copy of every shard in the index +// +// must be present on the same node. +// +// The current write index on a data stream cannot be shrunk. In order to shrink +// the current write index, the data stream must first be rolled over so that a +// new write index is created and then the previous write index can be shrunk. +// +// A shrink operation: +// +// * Creates a new target index with the same definition as the source index, +// but with a smaller number of primary shards. +// * Hard-links segments from the source index into the target index. If the +// file system does not support hard-linking, then all segments are copied into +// the new index, which is a much more time consuming process. Also if using +// multiple data paths, shards on different data paths require a full copy of +// segment files if they are not on the same disk since hardlinks do not work +// across disks. +// * Recovers the target index as though it were a closed index which had just +// been re-opened. Recovers shards to the +// `.routing.allocation.initial_recovery._id` index setting. +// +// IMPORTANT: Indices can only be shrunk if they satisfy the following +// requirements: +// +// * The target index must not exist. +// * The source index must have more primary shards than the target index. +// * The number of primary shards in the target index must be a factor of the +// number of primary shards in the source index. The source index must have more +// primary shards than the target index. +// * The index must not contain more than 2,147,483,519 documents in total +// across all shards that will be shrunk into a single shard on the target index +// as this is the maximum number of docs that can fit into a single shard. +// * The node handling the shrink process must have sufficient free disk space +// to accommodate a second copy of the existing index. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink +func New(tp elastictransport.Interface) *Shrink { + r := &Shrink{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Shrink) Raw(raw io.Reader) *Shrink { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Shrink) Request(req *Request) *Shrink { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Shrink) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Shrink: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|targetMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_shrink") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "target", r.target) + } + path.WriteString(r.target) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Shrink) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.shrink") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.shrink") + if reader := instrument.RecordRequestBody(ctx, "indices.shrink", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.shrink") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Shrink query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a shrink.Response +func (r Shrink) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.shrink") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Shrink headers map. +func (r *Shrink) Header(key, value string) *Shrink { + r.headers.Set(key, value) + + return r +} + +// Index Name of the source index to shrink. +// API Name: index +func (r *Shrink) _index(index string) *Shrink { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Target Name of the target index to create. +// API Name: target +func (r *Shrink) _target(target string) *Shrink { + r.paramSet |= targetMask + r.target = target + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *Shrink) MasterTimeout(duration string) *Shrink { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *Shrink) Timeout(duration string) *Shrink { + r.values.Set("timeout", duration) + + return r +} + +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). +// API name: wait_for_active_shards +func (r *Shrink) WaitForActiveShards(waitforactiveshards string) *Shrink { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Shrink) ErrorTrace(errortrace bool) *Shrink { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Shrink) FilterPath(filterpaths ...string) *Shrink { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Shrink) Human(human bool) *Shrink { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Shrink) Pretty(pretty bool) *Shrink { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The key is the alias name. +// Index alias names support date math. +// API name: aliases +func (r *Shrink) Aliases(aliases map[string]types.Alias) *Shrink { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Aliases = aliases + return r +} + +func (r *Shrink) AddAlias(key string, value types.AliasVariant) *Shrink { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + + r.req.Aliases = tmp + return r +} + +// Configuration options for the target index. +// API name: settings +func (r *Shrink) Settings(settings map[string]json.RawMessage) *Shrink { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Settings = settings + return r +} + +func (r *Shrink) AddSetting(key string, value json.RawMessage) *Shrink { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + + r.req.Settings = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulateindextemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulateindextemplate/response.go new file mode 100644 index 000000000..4387ecf87 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulateindextemplate/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package simulateindextemplate + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package simulateindextemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateResponse.ts#L25-L30 +type Response struct { + Overlapping []types.Overlapping `json:"overlapping,omitempty"` + Template types.Template `json:"template"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulateindextemplate/simulate_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulateindextemplate/simulate_index_template.go new file mode 100644 index 000000000..2d549345c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulateindextemplate/simulate_index_template.go @@ -0,0 +1,389 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Simulate an index. +// Get the index configuration that would be applied to the specified index from +// an existing index template. +package simulateindextemplate + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SimulateIndexTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSimulateIndexTemplate type alias for index. +type NewSimulateIndexTemplate func(name string) *SimulateIndexTemplate + +// NewSimulateIndexTemplateFunc returns a new instance of SimulateIndexTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSimulateIndexTemplateFunc(tp elastictransport.Interface) NewSimulateIndexTemplate { + return func(name string) *SimulateIndexTemplate { + n := New(tp) + + n._name(name) + + return n + } +} + +// Simulate an index. +// Get the index configuration that would be applied to the specified index from +// an existing index template. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template +func New(tp elastictransport.Interface) *SimulateIndexTemplate { + r := &SimulateIndexTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SimulateIndexTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_index_template") + path.WriteString("/") + path.WriteString("_simulate_index") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SimulateIndexTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.simulate_index_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.simulate_index_template") + if reader := instrument.RecordRequestBody(ctx, "indices.simulate_index_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.simulate_index_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SimulateIndexTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a simulateindextemplate.Response +func (r SimulateIndexTemplate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.simulate_index_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r SimulateIndexTemplate) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.simulate_index_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the SimulateIndexTemplate query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the SimulateIndexTemplate headers map. +func (r *SimulateIndexTemplate) Header(key, value string) *SimulateIndexTemplate { + r.headers.Set(key, value) + + return r +} + +// Name Name of the index to simulate +// API Name: name +func (r *SimulateIndexTemplate) _name(name string) *SimulateIndexTemplate { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Create Whether the index template we optionally defined in the body should only be +// dry-run added if new or can also replace an existing one +// API name: create +func (r *SimulateIndexTemplate) Create(create bool) *SimulateIndexTemplate { + r.values.Set("create", strconv.FormatBool(create)) + + return r +} + +// Cause User defined reason for dry-run creating the new template for simulation +// purposes +// API name: cause +func (r *SimulateIndexTemplate) Cause(cause string) *SimulateIndexTemplate { + r.values.Set("cause", cause) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *SimulateIndexTemplate) MasterTimeout(duration string) *SimulateIndexTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// IncludeDefaults If true, returns all relevant default configurations for the index template. +// API name: include_defaults +func (r *SimulateIndexTemplate) IncludeDefaults(includedefaults bool) *SimulateIndexTemplate { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SimulateIndexTemplate) ErrorTrace(errortrace bool) *SimulateIndexTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SimulateIndexTemplate) FilterPath(filterpaths ...string) *SimulateIndexTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SimulateIndexTemplate) Human(human bool) *SimulateIndexTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SimulateIndexTemplate) Pretty(pretty bool) *SimulateIndexTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulatetemplate/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulatetemplate/request.go new file mode 100644 index 000000000..1ca1f7152 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulatetemplate/request.go @@ -0,0 +1,213 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package simulatetemplate + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package simulatetemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/simulate_template/IndicesSimulateTemplateRequest.ts#L27-L135 +type Request struct { + + // AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster + // setting. + // If set to `true` in a template, then indices can be automatically created + // using that template even if auto-creation of indices is disabled via + // `actions.auto_create_index`. + // If set to `false`, then indices or data streams matching the template must + // always be explicitly created, and may never be automatically created. + AllowAutoCreate *bool `json:"allow_auto_create,omitempty"` + // ComposedOf An ordered list of component template names. + // Component templates are merged in the order specified, meaning that the last + // component template specified has the highest precedence. + ComposedOf []string `json:"composed_of,omitempty"` + // DataStream If this object is included, the template is used to create data streams and + // their backing indices. + // Supports an empty object. + // Data streams require a matching index template with a `data_stream` object. + DataStream *types.DataStreamVisibility `json:"data_stream,omitempty"` + // Deprecated Marks this index template as deprecated. When creating or updating a + // non-deprecated index template + // that uses deprecated components, Elasticsearch will emit a deprecation + // warning. + Deprecated *bool `json:"deprecated,omitempty"` + // IgnoreMissingComponentTemplates The configuration option ignore_missing_component_templates can be used when + // an index template + // references a component template that might not exist + IgnoreMissingComponentTemplates []string `json:"ignore_missing_component_templates,omitempty"` + // IndexPatterns Array of wildcard (`*`) expressions used to match the names of data streams + // and indices during creation. + IndexPatterns []string `json:"index_patterns,omitempty"` + // Meta_ Optional user metadata about the index template. + // May have any contents. + // This map is not automatically generated by Elasticsearch. + Meta_ types.Metadata `json:"_meta,omitempty"` + // Priority Priority to determine index template precedence when a new data stream or + // index is created. + // The index template with the highest priority is chosen. + // If no priority is specified the template is treated as though it is of + // priority 0 (lowest priority). + // This number is not automatically generated by Elasticsearch. + Priority *int64 `json:"priority,omitempty"` + // Template Template to be applied. + // It may optionally include an `aliases`, `mappings`, or `settings` + // configuration. + Template *types.IndexTemplateMapping `json:"template,omitempty"` + // Version Version number used to manage index templates externally. + // This number is not automatically generated by Elasticsearch. + Version *int64 `json:"version,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Simulatetemplate request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_auto_create": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowAutoCreate", err) + } + s.AllowAutoCreate = &value + case bool: + s.AllowAutoCreate = &v + } + + case "composed_of": + if err := dec.Decode(&s.ComposedOf); err != nil { + return fmt.Errorf("%s | %w", "ComposedOf", err) + } + + case "data_stream": + if err := dec.Decode(&s.DataStream); err != nil { + return fmt.Errorf("%s | %w", "DataStream", err) + } + + case "deprecated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deprecated", err) + } + s.Deprecated = &value + case bool: + s.Deprecated = &v + } + + case "ignore_missing_component_templates": + if err := dec.Decode(&s.IgnoreMissingComponentTemplates); err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissingComponentTemplates", err) + } + + case "index_patterns": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "IndexPatterns", err) + } + + s.IndexPatterns = append(s.IndexPatterns, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.IndexPatterns); err != nil { + return fmt.Errorf("%s | %w", "IndexPatterns", err) + } + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "priority": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + s.Priority = &value + case float64: + f := int64(v) + s.Priority = &f + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return fmt.Errorf("%s | %w", "Template", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulatetemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulatetemplate/response.go new file mode 100644 index 000000000..01f00855d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulatetemplate/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package simulatetemplate + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package simulatetemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L26-L31 +type Response struct { + Overlapping []types.Overlapping `json:"overlapping,omitempty"` + Template types.Template `json:"template"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulatetemplate/simulate_template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulatetemplate/simulate_template.go new file mode 100644 index 000000000..62f1b15ff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulatetemplate/simulate_template.go @@ -0,0 +1,567 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Simulate an index template. +// Get the index configuration that would be applied by a particular index +// template. +package simulatetemplate + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SimulateTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSimulateTemplate type alias for index. +type NewSimulateTemplate func() *SimulateTemplate + +// NewSimulateTemplateFunc returns a new instance of SimulateTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSimulateTemplateFunc(tp elastictransport.Interface) NewSimulateTemplate { + return func() *SimulateTemplate { + n := New(tp) + + return n + } +} + +// Simulate an index template. +// Get the index configuration that would be applied by a particular index +// template. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template +func New(tp elastictransport.Interface) *SimulateTemplate { + r := &SimulateTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SimulateTemplate) Raw(raw io.Reader) *SimulateTemplate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SimulateTemplate) Request(req *Request) *SimulateTemplate { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SimulateTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SimulateTemplate: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_index_template") + path.WriteString("/") + path.WriteString("_simulate") + + method = http.MethodPost + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_index_template") + path.WriteString("/") + path.WriteString("_simulate") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SimulateTemplate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.simulate_template") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.simulate_template") + if reader := instrument.RecordRequestBody(ctx, "indices.simulate_template", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.simulate_template") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SimulateTemplate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a simulatetemplate.Response +func (r SimulateTemplate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.simulate_template") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SimulateTemplate headers map. +func (r *SimulateTemplate) Header(key, value string) *SimulateTemplate { + r.headers.Set(key, value) + + return r +} + +// Name Name of the index template to simulate. To test a template configuration +// before you add it to the cluster, omit +// this parameter and specify the template configuration in the request body. +// API Name: name +func (r *SimulateTemplate) Name(name string) *SimulateTemplate { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Create If true, the template passed in the body is only used if no existing +// templates match the same index patterns. If false, the simulation uses the +// template with the highest priority. Note that the template is not permanently +// added or updated in either case; it is only used for the simulation. +// API name: create +func (r *SimulateTemplate) Create(create bool) *SimulateTemplate { + r.values.Set("create", strconv.FormatBool(create)) + + return r +} + +// Cause User defined reason for dry-run creating the new template for simulation +// purposes +// API name: cause +func (r *SimulateTemplate) Cause(cause string) *SimulateTemplate { + r.values.Set("cause", cause) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *SimulateTemplate) MasterTimeout(duration string) *SimulateTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// IncludeDefaults If true, returns all relevant default configurations for the index template. +// API name: include_defaults +func (r *SimulateTemplate) IncludeDefaults(includedefaults bool) *SimulateTemplate { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SimulateTemplate) ErrorTrace(errortrace bool) *SimulateTemplate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SimulateTemplate) FilterPath(filterpaths ...string) *SimulateTemplate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SimulateTemplate) Human(human bool) *SimulateTemplate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SimulateTemplate) Pretty(pretty bool) *SimulateTemplate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// This setting overrides the value of the `action.auto_create_index` cluster +// setting. +// If set to `true` in a template, then indices can be automatically created +// using that template even if auto-creation of indices is disabled via +// `actions.auto_create_index`. +// If set to `false`, then indices or data streams matching the template must +// always be explicitly created, and may never be automatically created. +// API name: allow_auto_create +func (r *SimulateTemplate) AllowAutoCreate(allowautocreate bool) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowAutoCreate = &allowautocreate + + return r +} + +// An ordered list of component template names. +// Component templates are merged in the order specified, meaning that the last +// component template specified has the highest precedence. +// API name: composed_of +func (r *SimulateTemplate) ComposedOf(composedofs ...string) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range composedofs { + + r.req.ComposedOf = append(r.req.ComposedOf, v) + + } + return r +} + +// If this object is included, the template is used to create data streams and +// their backing indices. +// Supports an empty object. +// Data streams require a matching index template with a `data_stream` object. +// API name: data_stream +func (r *SimulateTemplate) DataStream(datastream types.DataStreamVisibilityVariant) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DataStream = datastream.DataStreamVisibilityCaster() + + return r +} + +// Marks this index template as deprecated. When creating or updating a +// non-deprecated index template +// that uses deprecated components, Elasticsearch will emit a deprecation +// warning. +// API name: deprecated +func (r *SimulateTemplate) Deprecated(deprecated bool) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Deprecated = &deprecated + + return r +} + +// The configuration option ignore_missing_component_templates can be used when +// an index template +// references a component template that might not exist +// API name: ignore_missing_component_templates +func (r *SimulateTemplate) IgnoreMissingComponentTemplates(ignoremissingcomponenttemplates ...string) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ignoremissingcomponenttemplates { + + r.req.IgnoreMissingComponentTemplates = append(r.req.IgnoreMissingComponentTemplates, v) + + } + return r +} + +// Array of wildcard (`*`) expressions used to match the names of data streams +// and indices during creation. +// API name: index_patterns +func (r *SimulateTemplate) IndexPatterns(indices ...string) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexPatterns = indices + + return r +} + +// Optional user metadata about the index template. +// May have any contents. +// This map is not automatically generated by Elasticsearch. +// API name: _meta +func (r *SimulateTemplate) Meta_(metadata types.MetadataVariant) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() + + return r +} + +// Priority to determine index template precedence when a new data stream or +// index is created. +// The index template with the highest priority is chosen. +// If no priority is specified the template is treated as though it is of +// priority 0 (lowest priority). +// This number is not automatically generated by Elasticsearch. +// API name: priority +func (r *SimulateTemplate) Priority(priority int64) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Priority = &priority + + return r +} + +// Template to be applied. +// It may optionally include an `aliases`, `mappings`, or `settings` +// configuration. +// API name: template +func (r *SimulateTemplate) Template(template types.IndexTemplateMappingVariant) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Template = template.IndexTemplateMappingCaster() + + return r +} + +// Version number used to manage index templates externally. +// This number is not automatically generated by Elasticsearch. +// API name: version +func (r *SimulateTemplate) Version(versionnumber int64) *SimulateTemplate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Version = &versionnumber + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/split/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/split/request.go new file mode 100644 index 000000000..8a8d5da6e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/split/request.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package split + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package split +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/split/IndicesSplitRequest.ts#L27-L113 +type Request struct { + + // Aliases Aliases for the resulting index. + Aliases map[string]types.Alias `json:"aliases,omitempty"` + // Settings Configuration options for the target index. + Settings map[string]json.RawMessage `json:"settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Aliases: make(map[string]types.Alias, 0), + Settings: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Split request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/split/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/split/response.go new file mode 100644 index 000000000..2ff6d7afd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/split/response.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package split + +// Response holds the response body struct for the package split +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/split/IndicesSplitResponse.ts#L22-L28 +type Response struct { + Acknowledged bool `json:"acknowledged"` + Index string `json:"index"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/split/split.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/split/split.go new file mode 100644 index 000000000..48f25c746 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/split/split.go @@ -0,0 +1,561 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Split an index. +// Split an index into a new index with more primary shards. +// * Before you can split an index: +// +// * The index must be read-only. +// * The cluster health status must be green. +// +// You can do make an index read-only with the following request using the add +// index block API: +// +// ``` +// PUT /my_source_index/_block/write +// ``` +// +// The current write index on a data stream cannot be split. +// In order to split the current write index, the data stream must first be +// rolled over so that a new write index is created and then the previous write +// index can be split. +// +// The number of times the index can be split (and the number of shards that +// each original shard can be split into) is determined by the +// `index.number_of_routing_shards` setting. +// The number of routing shards specifies the hashing space that is used +// internally to distribute documents across shards with consistent hashing. +// For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x +// 2 x 3) could be split by a factor of 2 or 3. +// +// A split operation: +// +// * Creates a new target index with the same definition as the source index, +// but with a larger number of primary shards. +// * Hard-links segments from the source index into the target index. If the +// file system doesn't support hard-linking, all segments are copied into the +// new index, which is a much more time consuming process. +// * Hashes all documents again, after low level files are created, to delete +// documents that belong to a different shard. +// * Recovers the target index as though it were a closed index which had just +// been re-opened. +// +// IMPORTANT: Indices can only be split if they satisfy the following +// requirements: +// +// * The target index must not exist. +// * The source index must have fewer primary shards than the target index. +// * The number of primary shards in the target index must be a multiple of the +// number of primary shards in the source index. +// * The node handling the split process must have sufficient free disk space to +// accommodate a second copy of the existing index. +package split + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 + + targetMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Split struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + target string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSplit type alias for index. +type NewSplit func(index, target string) *Split + +// NewSplitFunc returns a new instance of Split with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSplitFunc(tp elastictransport.Interface) NewSplit { + return func(index, target string) *Split { + n := New(tp) + + n._index(index) + + n._target(target) + + return n + } +} + +// Split an index. +// Split an index into a new index with more primary shards. +// * Before you can split an index: +// +// * The index must be read-only. +// * The cluster health status must be green. +// +// You can do make an index read-only with the following request using the add +// index block API: +// +// ``` +// PUT /my_source_index/_block/write +// ``` +// +// The current write index on a data stream cannot be split. +// In order to split the current write index, the data stream must first be +// rolled over so that a new write index is created and then the previous write +// index can be split. +// +// The number of times the index can be split (and the number of shards that +// each original shard can be split into) is determined by the +// `index.number_of_routing_shards` setting. +// The number of routing shards specifies the hashing space that is used +// internally to distribute documents across shards with consistent hashing. +// For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x +// 2 x 3) could be split by a factor of 2 or 3. +// +// A split operation: +// +// * Creates a new target index with the same definition as the source index, +// but with a larger number of primary shards. +// * Hard-links segments from the source index into the target index. If the +// file system doesn't support hard-linking, all segments are copied into the +// new index, which is a much more time consuming process. +// * Hashes all documents again, after low level files are created, to delete +// documents that belong to a different shard. +// * Recovers the target index as though it were a closed index which had just +// been re-opened. +// +// IMPORTANT: Indices can only be split if they satisfy the following +// requirements: +// +// * The target index must not exist. +// * The source index must have fewer primary shards than the target index. +// * The number of primary shards in the target index must be a multiple of the +// number of primary shards in the source index. +// * The node handling the split process must have sufficient free disk space to +// accommodate a second copy of the existing index. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split +func New(tp elastictransport.Interface) *Split { + r := &Split{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Split) Raw(raw io.Reader) *Split { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Split) Request(req *Request) *Split { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Split) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Split: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask|targetMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_split") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "target", r.target) + } + path.WriteString(r.target) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Split) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.split") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.split") + if reader := instrument.RecordRequestBody(ctx, "indices.split", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.split") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Split query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a split.Response +func (r Split) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.split") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Split headers map. +func (r *Split) Header(key, value string) *Split { + r.headers.Set(key, value) + + return r +} + +// Index Name of the source index to split. +// API Name: index +func (r *Split) _index(index string) *Split { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Target Name of the target index to create. +// API Name: target +func (r *Split) _target(target string) *Split { + r.paramSet |= targetMask + r.target = target + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *Split) MasterTimeout(duration string) *Split { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *Split) Timeout(duration string) *Split { + r.values.Set("timeout", duration) + + return r +} + +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). +// API name: wait_for_active_shards +func (r *Split) WaitForActiveShards(waitforactiveshards string) *Split { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Split) ErrorTrace(errortrace bool) *Split { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Split) FilterPath(filterpaths ...string) *Split { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Split) Human(human bool) *Split { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Split) Pretty(pretty bool) *Split { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Aliases for the resulting index. +// API name: aliases +func (r *Split) Aliases(aliases map[string]types.Alias) *Split { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Aliases = aliases + return r +} + +func (r *Split) AddAlias(key string, value types.AliasVariant) *Split { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Alias + if r.req.Aliases == nil { + r.req.Aliases = make(map[string]types.Alias) + } else { + tmp = r.req.Aliases + } + + tmp[key] = *value.AliasCaster() + + r.req.Aliases = tmp + return r +} + +// Configuration options for the target index. +// API name: settings +func (r *Split) Settings(settings map[string]json.RawMessage) *Split { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Settings = settings + return r +} + +func (r *Split) AddSetting(key string, value json.RawMessage) *Split { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Settings == nil { + r.req.Settings = make(map[string]json.RawMessage) + } else { + tmp = r.req.Settings + } + + tmp[key] = value + + r.req.Settings = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/stats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/stats/response.go new file mode 100644 index 000000000..23f39d2eb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/stats/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package stats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/IndicesStatsResponse.ts#L24-L30 +type Response struct { + All_ types.IndicesStats `json:"_all"` + Indices map[string]types.IndicesStats `json:"indices,omitempty"` + Shards_ types.ShardStatistics `json:"_shards"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Indices: make(map[string]types.IndicesStats, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/stats/stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/stats/stats.go new file mode 100644 index 000000000..1b4888cdd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/stats/stats.go @@ -0,0 +1,506 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get index statistics. +// For data streams, the API retrieves statistics for the stream's backing +// indices. +// +// By default, the returned statistics are index-level with `primaries` and +// `total` aggregations. +// `primaries` are the values for only the primary shards. +// `total` are the accumulated values for both primary and replica shards. +// +// To get shard-level statistics, set the `level` parameter to `shards`. +// +// NOTE: When moving to another node, the shard-level statistics for a shard are +// cleared. +// Although the shard is no longer part of the node, that node retains any +// node-level statistics to which the shard contributed. +package stats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/level" +) + +const ( + metricMask = iota + 1 + + indexMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Stats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + metric string + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStats type alias for index. +type NewStats func() *Stats + +// NewStatsFunc returns a new instance of Stats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStatsFunc(tp elastictransport.Interface) NewStats { + return func() *Stats { + n := New(tp) + + return n + } +} + +// Get index statistics. +// For data streams, the API retrieves statistics for the stream's backing +// indices. +// +// By default, the returned statistics are index-level with `primaries` and +// `total` aggregations. +// `primaries` are the values for only the primary shards. +// `total` are the accumulated values for both primary and replica shards. +// +// To get shard-level statistics, set the `level` parameter to `shards`. +// +// NOTE: When moving to another node, the shard-level statistics for a shard are +// cleared. +// Although the shard is no longer part of the node, that node retains any +// node-level statistics to which the shard contributed. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats +func New(tp elastictransport.Interface) *Stats { + r := &Stats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Stats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + case r.paramSet == metricMask: + path.WriteString("/") + path.WriteString("_stats") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", r.metric) + } + path.WriteString(r.metric) + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + case r.paramSet == indexMask|metricMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_stats") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", r.metric) + } + path.WriteString(r.metric) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Stats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.stats") + if reader := instrument.RecordRequestBody(ctx, "indices.stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Stats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stats.Response +func (r Stats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Stats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Stats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Stats headers map. +func (r *Stats) Header(key, value string) *Stats { + r.headers.Set(key, value) + + return r +} + +// Metric Limit the information returned the specific metrics. +// API Name: metric +func (r *Stats) Metric(metric string) *Stats { + r.paramSet |= metricMask + r.metric = metric + + return r +} + +// Index A comma-separated list of index names; use `_all` or empty string to perform +// the operation on all indices +// API Name: index +func (r *Stats) Index(index string) *Stats { + r.paramSet |= indexMask + r.index = index + + return r +} + +// CompletionFields Comma-separated list or wildcard expressions of fields to include in +// fielddata and suggest statistics. +// API name: completion_fields +func (r *Stats) CompletionFields(fields ...string) *Stats { + r.values.Set("completion_fields", strings.Join(fields, ",")) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. If the request can target +// data streams, this argument +// determines whether wildcard expressions match hidden data streams. Supports +// comma-separated values, +// such as `open,hidden`. +// API name: expand_wildcards +func (r *Stats) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Stats { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// FielddataFields Comma-separated list or wildcard expressions of fields to include in +// fielddata statistics. +// API name: fielddata_fields +func (r *Stats) FielddataFields(fields ...string) *Stats { + r.values.Set("fielddata_fields", strings.Join(fields, ",")) + + return r +} + +// Fields Comma-separated list or wildcard expressions of fields to include in the +// statistics. +// API name: fields +func (r *Stats) Fields(fields ...string) *Stats { + r.values.Set("fields", strings.Join(fields, ",")) + + return r +} + +// ForbidClosedIndices If true, statistics are not collected from closed indices. +// API name: forbid_closed_indices +func (r *Stats) ForbidClosedIndices(forbidclosedindices bool) *Stats { + r.values.Set("forbid_closed_indices", strconv.FormatBool(forbidclosedindices)) + + return r +} + +// Groups Comma-separated list of search groups to include in the search statistics. +// API name: groups +func (r *Stats) Groups(groups ...string) *Stats { + tmp := []string{} + for _, item := range groups { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("groups", strings.Join(tmp, ",")) + + return r +} + +// IncludeSegmentFileSizes If true, the call reports the aggregated disk usage of each one of the Lucene +// index files (only applies if segment stats are requested). +// API name: include_segment_file_sizes +func (r *Stats) IncludeSegmentFileSizes(includesegmentfilesizes bool) *Stats { + r.values.Set("include_segment_file_sizes", strconv.FormatBool(includesegmentfilesizes)) + + return r +} + +// IncludeUnloadedSegments If true, the response includes information from segments that are not loaded +// into memory. +// API name: include_unloaded_segments +func (r *Stats) IncludeUnloadedSegments(includeunloadedsegments bool) *Stats { + r.values.Set("include_unloaded_segments", strconv.FormatBool(includeunloadedsegments)) + + return r +} + +// Level Indicates whether statistics are aggregated at the cluster, index, or shard +// level. +// API name: level +func (r *Stats) Level(level level.Level) *Stats { + r.values.Set("level", level.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Stats) ErrorTrace(errortrace bool) *Stats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Stats) FilterPath(filterpaths ...string) *Stats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Stats) Human(human bool) *Stats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Stats) Pretty(pretty bool) *Stats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/updatealiases/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/updatealiases/request.go new file mode 100644 index 000000000..1e38c601b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/updatealiases/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatealiases + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package updatealiases +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/update_aliases/IndicesUpdateAliasesRequest.ts#L24-L59 +type Request struct { + + // Actions Actions to perform. + Actions []types.IndicesAction `json:"actions,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatealiases request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/updatealiases/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/updatealiases/response.go new file mode 100644 index 000000000..9445b95f4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/updatealiases/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatealiases + +// Response holds the response body struct for the package updatealiases +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/update_aliases/IndicesUpdateAliasesResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/updatealiases/update_aliases.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/updatealiases/update_aliases.go new file mode 100644 index 000000000..21f8a820e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/updatealiases/update_aliases.go @@ -0,0 +1,370 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update an alias. +// Adds a data stream or index to an alias. +package updatealiases + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateAliases struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateAliases type alias for index. +type NewUpdateAliases func() *UpdateAliases + +// NewUpdateAliasesFunc returns a new instance of UpdateAliases with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateAliasesFunc(tp elastictransport.Interface) NewUpdateAliases { + return func() *UpdateAliases { + n := New(tp) + + return n + } +} + +// Create or update an alias. +// Adds a data stream or index to an alias. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases +func New(tp elastictransport.Interface) *UpdateAliases { + r := &UpdateAliases{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateAliases) Raw(raw io.Reader) *UpdateAliases { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateAliases) Request(req *Request) *UpdateAliases { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateAliases) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateAliases: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_aliases") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateAliases) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.update_aliases") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.update_aliases") + if reader := instrument.RecordRequestBody(ctx, "indices.update_aliases", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.update_aliases") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateAliases query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatealiases.Response +func (r UpdateAliases) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.update_aliases") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateAliases headers map. +func (r *UpdateAliases) Header(key, value string) *UpdateAliases { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *UpdateAliases) MasterTimeout(duration string) *UpdateAliases { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *UpdateAliases) Timeout(duration string) *UpdateAliases { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateAliases) ErrorTrace(errortrace bool) *UpdateAliases { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateAliases) FilterPath(filterpaths ...string) *UpdateAliases { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateAliases) Human(human bool) *UpdateAliases { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateAliases) Pretty(pretty bool) *UpdateAliases { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Actions to perform. +// API name: actions +func (r *UpdateAliases) Actions(actions ...types.IndicesActionVariant) *UpdateAliases { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range actions { + + r.req.Actions = append(r.req.Actions, *v.IndicesActionCaster()) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/validatequery/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/validatequery/request.go new file mode 100644 index 000000000..4bd5012e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/validatequery/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package validatequery + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package validatequery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/validate_query/IndicesValidateQueryRequest.ts#L25-L122 +type Request struct { + + // Query Query in the Lucene query string syntax. + Query *types.Query `json:"query,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Validatequery request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/validatequery/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/validatequery/response.go new file mode 100644 index 000000000..e8348c42c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/validatequery/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package validatequery + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package validatequery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/validate_query/IndicesValidateQueryResponse.ts#L23-L30 +type Response struct { + Error *string `json:"error,omitempty"` + Explanations []types.IndicesValidationExplanation `json:"explanations,omitempty"` + Shards_ *types.ShardStatistics `json:"_shards,omitempty"` + Valid bool `json:"valid"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/validatequery/validate_query.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/validatequery/validate_query.go new file mode 100644 index 000000000..eec10eede --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/indices/validatequery/validate_query.go @@ -0,0 +1,497 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Validate a query. +// Validates a query without running it. +package validatequery + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ValidateQuery struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewValidateQuery type alias for index. +type NewValidateQuery func() *ValidateQuery + +// NewValidateQueryFunc returns a new instance of ValidateQuery with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewValidateQueryFunc(tp elastictransport.Interface) NewValidateQuery { + return func() *ValidateQuery { + n := New(tp) + + return n + } +} + +// Validate a query. +// Validates a query without running it. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-validate-query +func New(tp elastictransport.Interface) *ValidateQuery { + r := &ValidateQuery{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ValidateQuery) Raw(raw io.Reader) *ValidateQuery { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ValidateQuery) Request(req *Request) *ValidateQuery { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ValidateQuery) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ValidateQuery: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_validate") + path.WriteString("/") + path.WriteString("query") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_validate") + path.WriteString("/") + path.WriteString("query") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ValidateQuery) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "indices.validate_query") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "indices.validate_query") + if reader := instrument.RecordRequestBody(ctx, "indices.validate_query", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "indices.validate_query") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ValidateQuery query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a validatequery.Response +func (r ValidateQuery) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "indices.validate_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ValidateQuery headers map. +func (r *ValidateQuery) Header(key, value string) *ValidateQuery { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases to search. +// Supports wildcards (`*`). +// To search all data streams or indices, omit this parameter or use `*` or +// `_all`. +// API Name: index +func (r *ValidateQuery) Index(index string) *ValidateQuery { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// API name: allow_no_indices +func (r *ValidateQuery) AllowNoIndices(allownoindices bool) *ValidateQuery { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// AllShards If `true`, the validation is executed on all shards instead of one random +// shard per index. +// API name: all_shards +func (r *ValidateQuery) AllShards(allshards bool) *ValidateQuery { + r.values.Set("all_shards", strconv.FormatBool(allshards)) + + return r +} + +// Analyzer Analyzer to use for the query string. +// This parameter can only be used when the `q` query string parameter is +// specified. +// API name: analyzer +func (r *ValidateQuery) Analyzer(analyzer string) *ValidateQuery { + r.values.Set("analyzer", analyzer) + + return r +} + +// AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// API name: analyze_wildcard +func (r *ValidateQuery) AnalyzeWildcard(analyzewildcard bool) *ValidateQuery { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) + + return r +} + +// DefaultOperator The default operator for query string query: `AND` or `OR`. +// API name: default_operator +func (r *ValidateQuery) DefaultOperator(defaultoperator operator.Operator) *ValidateQuery { + r.values.Set("default_operator", defaultoperator.String()) + + return r +} + +// Df Field to use as default where no field prefix is given in the query string. +// This parameter can only be used when the `q` query string parameter is +// specified. +// API name: df +func (r *ValidateQuery) Df(df string) *ValidateQuery { + r.values.Set("df", df) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// API name: expand_wildcards +func (r *ValidateQuery) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ValidateQuery { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// Explain If `true`, the response returns detailed information if an error has +// occurred. +// API name: explain +func (r *ValidateQuery) Explain(explain bool) *ValidateQuery { + r.values.Set("explain", strconv.FormatBool(explain)) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. +// API name: ignore_unavailable +func (r *ValidateQuery) IgnoreUnavailable(ignoreunavailable bool) *ValidateQuery { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Lenient If `true`, format-based query failures (such as providing text to a numeric +// field) in the query string will be ignored. +// API name: lenient +func (r *ValidateQuery) Lenient(lenient bool) *ValidateQuery { + r.values.Set("lenient", strconv.FormatBool(lenient)) + + return r +} + +// Rewrite If `true`, returns a more detailed explanation showing the actual Lucene +// query that will be executed. +// API name: rewrite +func (r *ValidateQuery) Rewrite(rewrite bool) *ValidateQuery { + r.values.Set("rewrite", strconv.FormatBool(rewrite)) + + return r +} + +// Q Query in the Lucene query string syntax. +// API name: q +func (r *ValidateQuery) Q(q string) *ValidateQuery { + r.values.Set("q", q) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ValidateQuery) ErrorTrace(errortrace bool) *ValidateQuery { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ValidateQuery) FilterPath(filterpaths ...string) *ValidateQuery { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ValidateQuery) Human(human bool) *ValidateQuery { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ValidateQuery) Pretty(pretty bool) *ValidateQuery { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Query in the Lucene query string syntax. +// API name: query +func (r *ValidateQuery) Query(query types.QueryVariant) *ValidateQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/chatcompletionunified/chat_completion_unified.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/chatcompletionunified/chat_completion_unified.go new file mode 100644 index 000000000..5b78fa2c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/chatcompletionunified/chat_completion_unified.go @@ -0,0 +1,558 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Perform chat completion inference +// +// The chat completion inference API enables real-time responses for chat +// completion tasks by delivering answers incrementally, reducing response times +// during computation. +// It only works with the `chat_completion` task type for `openai` and `elastic` +// inference services. +// +// NOTE: The `chat_completion` task type is only available within the _stream +// API and only supports streaming. +// The Chat completion inference API and the Stream inference API differ in +// their response structure and capabilities. +// The Chat completion inference API provides more comprehensive customization +// options through more fields and function calling support. +// If you use the `openai`, `hugging_face` or the `elastic` service, use the +// Chat completion inference API. +package chatcompletionunified + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ChatCompletionUnified struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewChatCompletionUnified type alias for index. +type NewChatCompletionUnified func(inferenceid string) *ChatCompletionUnified + +// NewChatCompletionUnifiedFunc returns a new instance of ChatCompletionUnified with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewChatCompletionUnifiedFunc(tp elastictransport.Interface) NewChatCompletionUnified { + return func(inferenceid string) *ChatCompletionUnified { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform chat completion inference +// +// The chat completion inference API enables real-time responses for chat +// completion tasks by delivering answers incrementally, reducing response times +// during computation. +// It only works with the `chat_completion` task type for `openai` and `elastic` +// inference services. +// +// NOTE: The `chat_completion` task type is only available within the _stream +// API and only supports streaming. +// The Chat completion inference API and the Stream inference API differ in +// their response structure and capabilities. +// The Chat completion inference API provides more comprehensive customization +// options through more fields and function calling support. +// If you use the `openai`, `hugging_face` or the `elastic` service, use the +// Chat completion inference API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference +func New(tp elastictransport.Interface) *ChatCompletionUnified { + r := &ChatCompletionUnified{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ChatCompletionUnified) Raw(raw io.Reader) *ChatCompletionUnified { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ChatCompletionUnified) Request(req *Request) *ChatCompletionUnified { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ChatCompletionUnified) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ChatCompletionUnified: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("chat_completion") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + path.WriteString("/") + path.WriteString("_stream") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "text/event-stream") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ChatCompletionUnified) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.chat_completion_unified") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.chat_completion_unified") + if reader := instrument.RecordRequestBody(ctx, "inference.chat_completion_unified", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.chat_completion_unified") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ChatCompletionUnified query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a chatcompletionunified.Response +func (r ChatCompletionUnified) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.chat_completion_unified") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ChatCompletionUnified headers map. +func (r *ChatCompletionUnified) Header(key, value string) *ChatCompletionUnified { + r.headers.Set(key, value) + + return r +} + +// InferenceId The inference Id +// API Name: inferenceid +func (r *ChatCompletionUnified) _inferenceid(inferenceid string) *ChatCompletionUnified { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference request to complete. +// API name: timeout +func (r *ChatCompletionUnified) Timeout(duration string) *ChatCompletionUnified { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ChatCompletionUnified) ErrorTrace(errortrace bool) *ChatCompletionUnified { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ChatCompletionUnified) FilterPath(filterpaths ...string) *ChatCompletionUnified { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ChatCompletionUnified) Human(human bool) *ChatCompletionUnified { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ChatCompletionUnified) Pretty(pretty bool) *ChatCompletionUnified { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The upper bound limit for the number of tokens that can be generated for a +// completion request. +// API name: max_completion_tokens +func (r *ChatCompletionUnified) MaxCompletionTokens(maxcompletiontokens int64) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxCompletionTokens = &maxcompletiontokens + + return r +} + +// A list of objects representing the conversation. +// Requests should generally only add new messages from the user (role `user`). +// The other message roles (`assistant`, `system`, or `tool`) should generally +// only be copied from the response to a previous completion request, such that +// the messages array is built up throughout a conversation. +// API name: messages +func (r *ChatCompletionUnified) Messages(messages ...types.MessageVariant) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range messages { + + r.req.Messages = append(r.req.Messages, *v.MessageCaster()) + + } + return r +} + +// The ID of the model to use. +// API name: model +func (r *ChatCompletionUnified) Model(model string) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Model = &model + + return r +} + +// A sequence of strings to control when the model should stop generating +// additional tokens. +// API name: stop +func (r *ChatCompletionUnified) Stop(stops ...string) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range stops { + + r.req.Stop = append(r.req.Stop, v) + + } + return r +} + +// The sampling temperature to use. +// API name: temperature +func (r *ChatCompletionUnified) Temperature(temperature float32) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Temperature = &temperature + + return r +} + +// Controls which tool is called by the model. +// String representation: One of `auto`, `none`, or `requrired`. `auto` allows +// the model to choose between calling tools and generating a message. `none` +// causes the model to not call any tools. `required` forces the model to call +// one or more tools. +// Example (object representation): +// ``` +// +// { +// "tool_choice": { +// "type": "function", +// "function": { +// "name": "get_current_weather" +// } +// } +// } +// +// ``` +// API name: tool_choice +func (r *ChatCompletionUnified) ToolChoice(completiontooltype types.CompletionToolTypeVariant) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ToolChoice = *completiontooltype.CompletionToolTypeCaster() + + return r +} + +// A list of tools that the model can call. +// Example: +// ``` +// +// { +// "tools": [ +// { +// "type": "function", +// "function": { +// "name": "get_price_of_item", +// "description": "Get the current price of an item", +// "parameters": { +// "type": "object", +// "properties": { +// "item": { +// "id": "12345" +// }, +// "unit": { +// "type": "currency" +// } +// } +// } +// } +// } +// ] +// } +// +// ``` +// API name: tools +func (r *ChatCompletionUnified) Tools(tools ...types.CompletionToolVariant) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range tools { + + r.req.Tools = append(r.req.Tools, *v.CompletionToolCaster()) + + } + return r +} + +// Nucleus sampling, an alternative to sampling with temperature. +// API name: top_p +func (r *ChatCompletionUnified) TopP(topp float32) *ChatCompletionUnified { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TopP = &topp + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/chatcompletionunified/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/chatcompletionunified/request.go new file mode 100644 index 000000000..2516a4cb9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/chatcompletionunified/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package chatcompletionunified + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package chatcompletionunified +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/chat_completion_unified/UnifiedRequest.ts#L24-L61 +type Request = types.RequestChatCompletion + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewRequestChatCompletion() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/chatcompletionunified/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/chatcompletionunified/response.go new file mode 100644 index 000000000..d1a04b791 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/chatcompletionunified/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package chatcompletionunified + +// Response holds the response body struct for the package chatcompletionunified +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/chat_completion_unified/UnifiedResponse.ts#L22-L25 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/completion/completion.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/completion/completion.go new file mode 100644 index 000000000..86dec8342 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/completion/completion.go @@ -0,0 +1,393 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Perform completion inference on the service +package completion + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Completion struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCompletion type alias for index. +type NewCompletion func(inferenceid string) *Completion + +// NewCompletionFunc returns a new instance of Completion with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCompletionFunc(tp elastictransport.Interface) NewCompletion { + return func(inferenceid string) *Completion { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform completion inference on the service +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference +func New(tp elastictransport.Interface) *Completion { + r := &Completion{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Completion) Raw(raw io.Reader) *Completion { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Completion) Request(req *Request) *Completion { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Completion) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Completion: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("completion") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Completion) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.completion") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.completion") + if reader := instrument.RecordRequestBody(ctx, "inference.completion", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.completion") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Completion query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a completion.Response +func (r Completion) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.completion") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Completion headers map. +func (r *Completion) Header(key, value string) *Completion { + r.headers.Set(key, value) + + return r +} + +// InferenceId The inference Id +// API Name: inferenceid +func (r *Completion) _inferenceid(inferenceid string) *Completion { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference request to complete. +// API name: timeout +func (r *Completion) Timeout(duration string) *Completion { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Completion) ErrorTrace(errortrace bool) *Completion { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Completion) FilterPath(filterpaths ...string) *Completion { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Completion) Human(human bool) *Completion { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Completion) Pretty(pretty bool) *Completion { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Inference input. +// Either a string or an array of strings. +// API name: input +func (r *Completion) Input(inputs ...string) *Completion { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) + r.req.Input = inputs + + return r +} + +// Optional task settings +// API name: task_settings +func (r *Completion) TaskSettings(tasksettings json.RawMessage) *Completion { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/completion/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/completion/request.go new file mode 100644 index 000000000..316a39efb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/completion/request.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package completion + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package completion +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/completion/CompletionRequest.ts#L25-L63 +type Request struct { + + // Input Inference input. + // Either a string or an array of strings. + Input []string `json:"input"` + // TaskSettings Optional task settings + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Completion request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + s.Input = append(s.Input, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + } + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/completion/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/completion/response.go new file mode 100644 index 000000000..9a5e35ec3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/completion/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package completion + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package completion +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/completion/CompletionResponse.ts#L22-L25 + +type Response []types.CompletionResult + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/delete/delete.go new file mode 100644 index 000000000..952813ed9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/delete/delete.go @@ -0,0 +1,389 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete an inference endpoint +package delete + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + tasktypeMask = iota + 1 + + inferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Delete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + tasktype string + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDelete type alias for index. +type NewDelete func(inferenceid string) *Delete + +// NewDeleteFunc returns a new instance of Delete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteFunc(tp elastictransport.Interface) NewDelete { + return func(inferenceid string) *Delete { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Delete an inference endpoint +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete +func New(tp elastictransport.Interface) *Delete { + r := &Delete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Delete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodDelete + case r.paramSet == tasktypeMask|inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Delete) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.delete") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.delete") + if reader := instrument.RecordRequestBody(ctx, "inference.delete", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.delete") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Delete query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a delete.Response +func (r Delete) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Delete) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Delete query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Delete headers map. +func (r *Delete) Header(key, value string) *Delete { + r.headers.Set(key, value) + + return r +} + +// TaskType The task type +// API Name: tasktype +func (r *Delete) TaskType(tasktype string) *Delete { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// InferenceId The inference identifier. +// API Name: inferenceid +func (r *Delete) _inferenceid(inferenceid string) *Delete { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// DryRun When true, the endpoint is not deleted and a list of ingest processors which +// reference this endpoint is returned. +// API name: dry_run +func (r *Delete) DryRun(dryrun bool) *Delete { + r.values.Set("dry_run", strconv.FormatBool(dryrun)) + + return r +} + +// Force When true, the inference endpoint is forcefully deleted even if it is still +// being used by ingest processors or semantic text fields. +// API name: force +func (r *Delete) Force(force bool) *Delete { + r.values.Set("force", strconv.FormatBool(force)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Delete) ErrorTrace(errortrace bool) *Delete { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Delete) FilterPath(filterpaths ...string) *Delete { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Delete) Human(human bool) *Delete { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Delete) Pretty(pretty bool) *Delete { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/delete/response.go new file mode 100644 index 000000000..c9a3d8d34 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/delete/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package delete + +// Response holds the response body struct for the package delete +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/delete/DeleteResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` + Pipelines []string `json:"pipelines"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/get/get.go new file mode 100644 index 000000000..1aa1f30ae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/get/get.go @@ -0,0 +1,374 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get an inference endpoint +package get + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + tasktypeMask = iota + 1 + + inferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Get struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + tasktype string + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGet type alias for index. +type NewGet func() *Get + +// NewGetFunc returns a new instance of Get with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFunc(tp elastictransport.Interface) NewGet { + return func() *Get { + n := New(tp) + + return n + } +} + +// Get an inference endpoint +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get +func New(tp elastictransport.Interface) *Get { + r := &Get{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Get) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_inference") + + method = http.MethodGet + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodGet + case r.paramSet == tasktypeMask|inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Get) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.get") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.get") + if reader := instrument.RecordRequestBody(ctx, "inference.get", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.get") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Get query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a get.Response +func (r Get) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Get) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Get query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Get headers map. +func (r *Get) Header(key, value string) *Get { + r.headers.Set(key, value) + + return r +} + +// TaskType The task type +// API Name: tasktype +func (r *Get) TaskType(tasktype string) *Get { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// InferenceId The inference Id +// API Name: inferenceid +func (r *Get) InferenceId(inferenceid string) *Get { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Get) ErrorTrace(errortrace bool) *Get { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Get) FilterPath(filterpaths ...string) *Get { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Get) Human(human bool) *Get { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Get) Pretty(pretty bool) *Get { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/get/response.go new file mode 100644 index 000000000..8bb06126d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/get/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package get + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package get +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/get/GetResponse.ts#L22-L26 +type Response struct { + Endpoints []types.InferenceEndpointInfo `json:"endpoints"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/inference/inference.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/inference/inference.go new file mode 100644 index 000000000..3ecdcaa66 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/inference/inference.go @@ -0,0 +1,506 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Perform inference on the service. +// +// This API enables you to use machine learning models to perform specific tasks +// on data that you provide as an input. +// It returns a response with the results of the tasks. +// The inference endpoint you use can perform one specific task that has been +// defined when the endpoint was created with the create inference API. +// +// For details about using this API with a service, such as Amazon Bedrock, +// Anthropic, or HuggingFace, refer to the service-specific documentation. +// +// > info +// > The inference APIs enable you to use certain services, such as built-in +// machine learning models (ELSER, E5), models uploaded through Eland, Cohere, +// OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or +// Hugging Face. For built-in models and models uploaded through Eland, the +// inference APIs offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. +package inference + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + tasktypeMask = iota + 1 + + inferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Inference struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewInference type alias for index. +type NewInference func(inferenceid string) *Inference + +// NewInferenceFunc returns a new instance of Inference with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewInferenceFunc(tp elastictransport.Interface) NewInference { + return func(inferenceid string) *Inference { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform inference on the service. +// +// This API enables you to use machine learning models to perform specific tasks +// on data that you provide as an input. +// It returns a response with the results of the tasks. +// The inference endpoint you use can perform one specific task that has been +// defined when the endpoint was created with the create inference API. +// +// For details about using this API with a service, such as Amazon Bedrock, +// Anthropic, or HuggingFace, refer to the service-specific documentation. +// +// > info +// > The inference APIs enable you to use certain services, such as built-in +// machine learning models (ELSER, E5), models uploaded through Eland, Cohere, +// OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or +// Hugging Face. For built-in models and models uploaded through Eland, the +// inference APIs offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference +func New(tp elastictransport.Interface) *Inference { + r := &Inference{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Inference) Raw(raw io.Reader) *Inference { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Inference) Request(req *Request) *Inference { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Inference) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Inference: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodPost + case r.paramSet == tasktypeMask|inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Inference) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.inference") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.inference") + if reader := instrument.RecordRequestBody(ctx, "inference.inference", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.inference") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Inference query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a inference.Response +func (r Inference) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.inference") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Inference headers map. +func (r *Inference) Header(key, value string) *Inference { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of inference task that the model performs. +// API Name: tasktype +func (r *Inference) TaskType(tasktype string) *Inference { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// InferenceId The unique identifier for the inference endpoint. +// API Name: inferenceid +func (r *Inference) _inferenceid(inferenceid string) *Inference { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout The amount of time to wait for the inference request to complete. +// API name: timeout +func (r *Inference) Timeout(duration string) *Inference { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Inference) ErrorTrace(errortrace bool) *Inference { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Inference) FilterPath(filterpaths ...string) *Inference { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Inference) Human(human bool) *Inference { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Inference) Pretty(pretty bool) *Inference { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The text on which you want to perform the inference task. +// It can be a single string or an array. +// +// > info +// > Inference endpoints for the `completion` task type currently only support a +// single string as input. +// API name: input +func (r *Inference) Input(inputs ...string) *Inference { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) + r.req.Input = inputs + + return r +} + +// Specifies the input data type for the text embedding model. The `input_type` +// parameter only applies to Inference Endpoints with the `text_embedding` task +// type. Possible values include: +// * `SEARCH` +// * `INGEST` +// * `CLASSIFICATION` +// * `CLUSTERING` +// Not all services support all values. Unsupported values will trigger a +// validation exception. +// Accepted values depend on the configured inference service, refer to the +// relevant service-specific documentation for more info. +// +// > info +// > The `input_type` parameter specified on the root level of the request body +// will take precedence over the `input_type` parameter specified in +// `task_settings`. +// API name: input_type +func (r *Inference) InputType(inputtype string) *Inference { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.InputType = &inputtype + + return r +} + +// The query input, which is required only for the `rerank` task. +// It is not required for other tasks. +// API name: query +func (r *Inference) Query(query string) *Inference { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = &query + + return r +} + +// Task settings for the individual inference request. +// These settings are specific to the task type you specified and override the +// task settings specified when initializing the service. +// API name: task_settings +func (r *Inference) TaskSettings(tasksettings json.RawMessage) *Inference { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/inference/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/inference/request.go new file mode 100644 index 000000000..927c8e5c0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/inference/request.go @@ -0,0 +1,151 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package inference + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Request holds the request body struct for the package inference +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/inference/InferenceRequest.ts#L26-L104 +type Request struct { + + // Input The text on which you want to perform the inference task. + // It can be a single string or an array. + // + // > info + // > Inference endpoints for the `completion` task type currently only support a + // single string as input. + Input []string `json:"input"` + // InputType Specifies the input data type for the text embedding model. The `input_type` + // parameter only applies to Inference Endpoints with the `text_embedding` task + // type. Possible values include: + // * `SEARCH` + // * `INGEST` + // * `CLASSIFICATION` + // * `CLUSTERING` + // Not all services support all values. Unsupported values will trigger a + // validation exception. + // Accepted values depend on the configured inference service, refer to the + // relevant service-specific documentation for more info. + // + // > info + // > The `input_type` parameter specified on the root level of the request body + // will take precedence over the `input_type` parameter specified in + // `task_settings`. + InputType *string `json:"input_type,omitempty"` + // Query The query input, which is required only for the `rerank` task. + // It is not required for other tasks. + Query *string `json:"query,omitempty"` + // TaskSettings Task settings for the individual inference request. + // These settings are specific to the task type you specified and override the + // task settings specified when initializing the service. + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Inference request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + s.Input = append(s.Input, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + } + + case "input_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "InputType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InputType = &o + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = &o + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/inference/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/inference/response.go new file mode 100644 index 000000000..1cd85f8c6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/inference/response.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package inference + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package inference +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/inference/InferenceResponse.ts#L22-L25 +type Response struct { + AdditionalInferenceResultProperty map[string]json.RawMessage `json:"-"` + Completion []types.CompletionResult `json:"completion,omitempty"` + Rerank []types.RankedDocument `json:"rerank,omitempty"` + SparseEmbedding []types.SparseEmbeddingResult `json:"sparse_embedding,omitempty"` + TextEmbedding []types.TextEmbeddingResult `json:"text_embedding,omitempty"` + TextEmbeddingBits []types.TextEmbeddingByteResult `json:"text_embedding_bits,omitempty"` + TextEmbeddingBytes []types.TextEmbeddingByteResult `json:"text_embedding_bytes,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + AdditionalInferenceResultProperty: make(map[string]json.RawMessage, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/put/put.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/put/put.go new file mode 100644 index 000000000..2d81f4e08 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/put/put.go @@ -0,0 +1,515 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an inference endpoint. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, +// Anthropic, Watsonx.ai, or Hugging Face. +// For built-in models and models uploaded through Eland, the inference APIs +// offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. +// +// The following integrations are available through the inference API. You can +// find the available task types next to the integration name: +// * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, +// `text_embedding`) +// * Amazon Bedrock (`completion`, `text_embedding`) +// * Amazon SageMaker (`chat_completion`, `completion`, `rerank`, +// `sparse_embedding`, `text_embedding`) +// * Anthropic (`completion`) +// * Azure AI Studio (`completion`, `text_embedding`) +// * Azure OpenAI (`completion`, `text_embedding`) +// * Cohere (`completion`, `rerank`, `text_embedding`) +// * DeepSeek (`completion`, `chat_completion`) +// * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this +// service is for built-in models and models uploaded through Eland) +// * ELSER (`sparse_embedding`) +// * Google AI Studio (`completion`, `text_embedding`) +// * Google Vertex AI (`rerank`, `text_embedding`) +// * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) +// * Mistral (`chat_completion`, `completion`, `text_embedding`) +// * OpenAI (`chat_completion`, `completion`, `text_embedding`) +// * VoyageAI (`text_embedding`, `rerank`) +// * Watsonx inference integration (`text_embedding`) +// * JinaAI (`text_embedding`, `rerank`) +package put + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + tasktypeMask = iota + 1 + + inferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Put struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPut type alias for index. +type NewPut func(inferenceid string) *Put + +// NewPutFunc returns a new instance of Put with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutFunc(tp elastictransport.Interface) NewPut { + return func(inferenceid string) *Put { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Create an inference endpoint. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, +// Anthropic, Watsonx.ai, or Hugging Face. +// For built-in models and models uploaded through Eland, the inference APIs +// offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. +// +// The following integrations are available through the inference API. You can +// find the available task types next to the integration name: +// * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, +// `text_embedding`) +// * Amazon Bedrock (`completion`, `text_embedding`) +// * Amazon SageMaker (`chat_completion`, `completion`, `rerank`, +// `sparse_embedding`, `text_embedding`) +// * Anthropic (`completion`) +// * Azure AI Studio (`completion`, `text_embedding`) +// * Azure OpenAI (`completion`, `text_embedding`) +// * Cohere (`completion`, `rerank`, `text_embedding`) +// * DeepSeek (`completion`, `chat_completion`) +// * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this +// service is for built-in models and models uploaded through Eland) +// * ELSER (`sparse_embedding`) +// * Google AI Studio (`completion`, `text_embedding`) +// * Google Vertex AI (`rerank`, `text_embedding`) +// * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) +// * Mistral (`chat_completion`, `completion`, `text_embedding`) +// * OpenAI (`chat_completion`, `completion`, `text_embedding`) +// * VoyageAI (`text_embedding`, `rerank`) +// * Watsonx inference integration (`text_embedding`) +// * JinaAI (`text_embedding`, `rerank`) +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put +func New(tp elastictransport.Interface) *Put { + r := &Put{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Put) Raw(raw io.Reader) *Put { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Put) Request(req *Request) *Put { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Put) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Put: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodPut + case r.paramSet == tasktypeMask|inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Put) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put") + if reader := instrument.RecordRequestBody(ctx, "inference.put", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Put query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a put.Response +func (r Put) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Put headers map. +func (r *Put) Header(key, value string) *Put { + r.headers.Set(key, value) + + return r +} + +// TaskType The task type. Refer to the integration list in the API description for the +// available task types. +// API Name: tasktype +func (r *Put) TaskType(tasktype string) *Put { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// InferenceId The inference Id +// API Name: inferenceid +func (r *Put) _inferenceid(inferenceid string) *Put { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *Put) Timeout(duration string) *Put { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Put) ErrorTrace(errortrace bool) *Put { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Put) FilterPath(filterpaths ...string) *Put { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Put) Human(human bool) *Put { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Put) Pretty(pretty bool) *Put { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Chunking configuration object +// API name: chunking_settings +func (r *Put) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The service type +// API name: service +func (r *Put) Service(service string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Service = service + + return r +} + +// Settings specific to the service +// API name: service_settings +func (r *Put) ServiceSettings(servicesettings json.RawMessage) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = servicesettings + + return r +} + +// Task settings specific to the service and task type +// API name: task_settings +func (r *Put) TaskSettings(tasksettings json.RawMessage) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/put/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/put/request.go new file mode 100644 index 000000000..6515b5b4c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/put/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package put + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package put +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put/PutRequest.ts#L26-L88 +type Request = types.InferenceEndpoint + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewInferenceEndpoint() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/put/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/put/response.go new file mode 100644 index 000000000..d32644bf9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/put/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package put + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktype" +) + +// Response holds the response body struct for the package put +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put/PutResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktype.TaskType `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putalibabacloud/put_alibabacloud.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putalibabacloud/put_alibabacloud.go new file mode 100644 index 000000000..25e538377 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putalibabacloud/put_alibabacloud.go @@ -0,0 +1,445 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an AlibabaCloud AI Search inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `alibabacloud-ai-search` service. +package putalibabacloud + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/alibabacloudservicetype" +) + +const ( + tasktypeMask = iota + 1 + + alibabacloudinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutAlibabacloud struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + alibabacloudinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutAlibabacloud type alias for index. +type NewPutAlibabacloud func(tasktype, alibabacloudinferenceid string) *PutAlibabacloud + +// NewPutAlibabacloudFunc returns a new instance of PutAlibabacloud with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutAlibabacloudFunc(tp elastictransport.Interface) NewPutAlibabacloud { + return func(tasktype, alibabacloudinferenceid string) *PutAlibabacloud { + n := New(tp) + + n._tasktype(tasktype) + + n._alibabacloudinferenceid(alibabacloudinferenceid) + + return n + } +} + +// Create an AlibabaCloud AI Search inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `alibabacloud-ai-search` service. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud +func New(tp elastictransport.Interface) *PutAlibabacloud { + r := &PutAlibabacloud{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutAlibabacloud) Raw(raw io.Reader) *PutAlibabacloud { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutAlibabacloud) Request(req *Request) *PutAlibabacloud { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutAlibabacloud) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutAlibabacloud: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|alibabacloudinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "alibabacloudinferenceid", r.alibabacloudinferenceid) + } + path.WriteString(r.alibabacloudinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutAlibabacloud) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_alibabacloud") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_alibabacloud") + if reader := instrument.RecordRequestBody(ctx, "inference.put_alibabacloud", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_alibabacloud") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutAlibabacloud query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putalibabacloud.Response +func (r PutAlibabacloud) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_alibabacloud") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutAlibabacloud headers map. +func (r *PutAlibabacloud) Header(key, value string) *PutAlibabacloud { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutAlibabacloud) _tasktype(tasktype string) *PutAlibabacloud { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// AlibabacloudInferenceId The unique identifier of the inference endpoint. +// API Name: alibabacloudinferenceid +func (r *PutAlibabacloud) _alibabacloudinferenceid(alibabacloudinferenceid string) *PutAlibabacloud { + r.paramSet |= alibabacloudinferenceidMask + r.alibabacloudinferenceid = alibabacloudinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutAlibabacloud) Timeout(duration string) *PutAlibabacloud { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutAlibabacloud) ErrorTrace(errortrace bool) *PutAlibabacloud { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutAlibabacloud) FilterPath(filterpaths ...string) *PutAlibabacloud { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutAlibabacloud) Human(human bool) *PutAlibabacloud { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutAlibabacloud) Pretty(pretty bool) *PutAlibabacloud { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutAlibabacloud) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutAlibabacloud { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `alibabacloud-ai-search`. +// API name: service +func (r *PutAlibabacloud) Service(service alibabacloudservicetype.AlibabaCloudServiceType) *PutAlibabacloud { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `alibabacloud-ai-search` service. +// API name: service_settings +func (r *PutAlibabacloud) ServiceSettings(servicesettings types.AlibabaCloudServiceSettingsVariant) *PutAlibabacloud { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.AlibabaCloudServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutAlibabacloud) TaskSettings(tasksettings types.AlibabaCloudTaskSettingsVariant) *PutAlibabacloud { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.AlibabaCloudTaskSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putalibabacloud/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putalibabacloud/request.go new file mode 100644 index 000000000..3e6d1b3cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putalibabacloud/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putalibabacloud + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/alibabacloudservicetype" +) + +// Request holds the request body struct for the package putalibabacloud +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_alibabacloud/PutAlibabaCloudRequest.ts#L31-L85 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `alibabacloud-ai-search`. + Service alibabacloudservicetype.AlibabaCloudServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `alibabacloud-ai-search` service. + ServiceSettings types.AlibabaCloudServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.AlibabaCloudTaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putalibabacloud request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putalibabacloud/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putalibabacloud/response.go new file mode 100644 index 000000000..22687dfda --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putalibabacloud/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putalibabacloud + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypealibabacloudai" +) + +// Response holds the response body struct for the package putalibabacloud +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_alibabacloud/PutAlibabaCloudResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypealibabacloudai.TaskTypeAlibabaCloudAI `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonbedrock/put_amazonbedrock.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonbedrock/put_amazonbedrock.go new file mode 100644 index 000000000..71703ac2e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonbedrock/put_amazonbedrock.go @@ -0,0 +1,461 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an Amazon Bedrock inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `amazonbedrock` service. +// +// >info +// > You need to provide the access and secret keys only once, during the +// inference model creation. The get inference API does not retrieve your access +// or secret keys. After creating the inference model, you cannot change the +// associated key pairs. If you want to use a different access and secret key +// pair, delete the inference model and recreate it with the same name and the +// updated keys. +package putamazonbedrock + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonbedrockservicetype" +) + +const ( + tasktypeMask = iota + 1 + + amazonbedrockinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutAmazonbedrock struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + amazonbedrockinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutAmazonbedrock type alias for index. +type NewPutAmazonbedrock func(tasktype, amazonbedrockinferenceid string) *PutAmazonbedrock + +// NewPutAmazonbedrockFunc returns a new instance of PutAmazonbedrock with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutAmazonbedrockFunc(tp elastictransport.Interface) NewPutAmazonbedrock { + return func(tasktype, amazonbedrockinferenceid string) *PutAmazonbedrock { + n := New(tp) + + n._tasktype(tasktype) + + n._amazonbedrockinferenceid(amazonbedrockinferenceid) + + return n + } +} + +// Create an Amazon Bedrock inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `amazonbedrock` service. +// +// >info +// > You need to provide the access and secret keys only once, during the +// inference model creation. The get inference API does not retrieve your access +// or secret keys. After creating the inference model, you cannot change the +// associated key pairs. If you want to use a different access and secret key +// pair, delete the inference model and recreate it with the same name and the +// updated keys. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock +func New(tp elastictransport.Interface) *PutAmazonbedrock { + r := &PutAmazonbedrock{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutAmazonbedrock) Raw(raw io.Reader) *PutAmazonbedrock { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutAmazonbedrock) Request(req *Request) *PutAmazonbedrock { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutAmazonbedrock) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutAmazonbedrock: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|amazonbedrockinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "amazonbedrockinferenceid", r.amazonbedrockinferenceid) + } + path.WriteString(r.amazonbedrockinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutAmazonbedrock) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_amazonbedrock") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_amazonbedrock") + if reader := instrument.RecordRequestBody(ctx, "inference.put_amazonbedrock", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_amazonbedrock") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutAmazonbedrock query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putamazonbedrock.Response +func (r PutAmazonbedrock) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_amazonbedrock") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutAmazonbedrock headers map. +func (r *PutAmazonbedrock) Header(key, value string) *PutAmazonbedrock { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutAmazonbedrock) _tasktype(tasktype string) *PutAmazonbedrock { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// AmazonbedrockInferenceId The unique identifier of the inference endpoint. +// API Name: amazonbedrockinferenceid +func (r *PutAmazonbedrock) _amazonbedrockinferenceid(amazonbedrockinferenceid string) *PutAmazonbedrock { + r.paramSet |= amazonbedrockinferenceidMask + r.amazonbedrockinferenceid = amazonbedrockinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutAmazonbedrock) Timeout(duration string) *PutAmazonbedrock { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutAmazonbedrock) ErrorTrace(errortrace bool) *PutAmazonbedrock { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutAmazonbedrock) FilterPath(filterpaths ...string) *PutAmazonbedrock { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutAmazonbedrock) Human(human bool) *PutAmazonbedrock { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutAmazonbedrock) Pretty(pretty bool) *PutAmazonbedrock { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutAmazonbedrock) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutAmazonbedrock { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `amazonbedrock`. +// API name: service +func (r *PutAmazonbedrock) Service(service amazonbedrockservicetype.AmazonBedrockServiceType) *PutAmazonbedrock { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `amazonbedrock` service. +// API name: service_settings +func (r *PutAmazonbedrock) ServiceSettings(servicesettings types.AmazonBedrockServiceSettingsVariant) *PutAmazonbedrock { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.AmazonBedrockServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutAmazonbedrock) TaskSettings(tasksettings types.AmazonBedrockTaskSettingsVariant) *PutAmazonbedrock { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.AmazonBedrockTaskSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonbedrock/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonbedrock/request.go new file mode 100644 index 000000000..ca09707a4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonbedrock/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putamazonbedrock + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonbedrockservicetype" +) + +// Request holds the request body struct for the package putamazonbedrock +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L31-L88 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `amazonbedrock`. + Service amazonbedrockservicetype.AmazonBedrockServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `amazonbedrock` service. + ServiceSettings types.AmazonBedrockServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.AmazonBedrockTaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putamazonbedrock request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonbedrock/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonbedrock/response.go new file mode 100644 index 000000000..5b35cd0a5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonbedrock/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putamazonbedrock + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeamazonbedrock" +) + +// Response holds the response body struct for the package putamazonbedrock +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_amazonbedrock/PutAmazonBedrockResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypeamazonbedrock.TaskTypeAmazonBedrock `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonsagemaker/put_amazonsagemaker.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonsagemaker/put_amazonsagemaker.go new file mode 100644 index 000000000..262094745 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonsagemaker/put_amazonsagemaker.go @@ -0,0 +1,447 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an Amazon SageMaker inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `amazon_sagemaker` service. +package putamazonsagemaker + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonsagemakerservicetype" +) + +const ( + tasktypeMask = iota + 1 + + amazonsagemakerinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutAmazonsagemaker struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + amazonsagemakerinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutAmazonsagemaker type alias for index. +type NewPutAmazonsagemaker func(tasktype, amazonsagemakerinferenceid string) *PutAmazonsagemaker + +// NewPutAmazonsagemakerFunc returns a new instance of PutAmazonsagemaker with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutAmazonsagemakerFunc(tp elastictransport.Interface) NewPutAmazonsagemaker { + return func(tasktype, amazonsagemakerinferenceid string) *PutAmazonsagemaker { + n := New(tp) + + n._tasktype(tasktype) + + n._amazonsagemakerinferenceid(amazonsagemakerinferenceid) + + return n + } +} + +// Create an Amazon SageMaker inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `amazon_sagemaker` service. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonsagemaker +func New(tp elastictransport.Interface) *PutAmazonsagemaker { + r := &PutAmazonsagemaker{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutAmazonsagemaker) Raw(raw io.Reader) *PutAmazonsagemaker { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutAmazonsagemaker) Request(req *Request) *PutAmazonsagemaker { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutAmazonsagemaker) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutAmazonsagemaker: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|amazonsagemakerinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "amazonsagemakerinferenceid", r.amazonsagemakerinferenceid) + } + path.WriteString(r.amazonsagemakerinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutAmazonsagemaker) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_amazonsagemaker") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_amazonsagemaker") + if reader := instrument.RecordRequestBody(ctx, "inference.put_amazonsagemaker", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_amazonsagemaker") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutAmazonsagemaker query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putamazonsagemaker.Response +func (r PutAmazonsagemaker) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_amazonsagemaker") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutAmazonsagemaker headers map. +func (r *PutAmazonsagemaker) Header(key, value string) *PutAmazonsagemaker { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutAmazonsagemaker) _tasktype(tasktype string) *PutAmazonsagemaker { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// AmazonsagemakerInferenceId The unique identifier of the inference endpoint. +// API Name: amazonsagemakerinferenceid +func (r *PutAmazonsagemaker) _amazonsagemakerinferenceid(amazonsagemakerinferenceid string) *PutAmazonsagemaker { + r.paramSet |= amazonsagemakerinferenceidMask + r.amazonsagemakerinferenceid = amazonsagemakerinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutAmazonsagemaker) Timeout(duration string) *PutAmazonsagemaker { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutAmazonsagemaker) ErrorTrace(errortrace bool) *PutAmazonsagemaker { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutAmazonsagemaker) FilterPath(filterpaths ...string) *PutAmazonsagemaker { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutAmazonsagemaker) Human(human bool) *PutAmazonsagemaker { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutAmazonsagemaker) Pretty(pretty bool) *PutAmazonsagemaker { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutAmazonsagemaker) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutAmazonsagemaker { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `amazon_sagemaker`. +// API name: service +func (r *PutAmazonsagemaker) Service(service amazonsagemakerservicetype.AmazonSageMakerServiceType) *PutAmazonsagemaker { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. +// These settings are specific to the `amazon_sagemaker` service and +// `service_settings.api` you specified. +// API name: service_settings +func (r *PutAmazonsagemaker) ServiceSettings(servicesettings types.AmazonSageMakerServiceSettingsVariant) *PutAmazonsagemaker { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.AmazonSageMakerServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type and `service_settings.api` you +// specified. +// API name: task_settings +func (r *PutAmazonsagemaker) TaskSettings(tasksettings types.AmazonSageMakerTaskSettingsVariant) *PutAmazonsagemaker { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.AmazonSageMakerTaskSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonsagemaker/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonsagemaker/request.go new file mode 100644 index 000000000..6711f0ac8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonsagemaker/request.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putamazonsagemaker + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonsagemakerservicetype" +) + +// Request holds the request body struct for the package putamazonsagemaker +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_amazonsagemaker/PutAmazonSageMakerRequest.ts#L31-L86 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `amazon_sagemaker`. + Service amazonsagemakerservicetype.AmazonSageMakerServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. + // These settings are specific to the `amazon_sagemaker` service and + // `service_settings.api` you specified. + ServiceSettings types.AmazonSageMakerServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type and `service_settings.api` you + // specified. + TaskSettings *types.AmazonSageMakerTaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putamazonsagemaker request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonsagemaker/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonsagemaker/response.go new file mode 100644 index 000000000..99b3d414f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonsagemaker/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putamazonsagemaker + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeamazonsagemaker" +) + +// Response holds the response body struct for the package putamazonsagemaker +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_amazonsagemaker/PutAmazonSageMakerResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypeamazonsagemaker.TaskTypeAmazonSageMaker `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putanthropic/put_anthropic.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putanthropic/put_anthropic.go new file mode 100644 index 000000000..714d658df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putanthropic/put_anthropic.go @@ -0,0 +1,446 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an Anthropic inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `anthropic` service. +package putanthropic + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/anthropicservicetype" +) + +const ( + tasktypeMask = iota + 1 + + anthropicinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutAnthropic struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + anthropicinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutAnthropic type alias for index. +type NewPutAnthropic func(tasktype, anthropicinferenceid string) *PutAnthropic + +// NewPutAnthropicFunc returns a new instance of PutAnthropic with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutAnthropicFunc(tp elastictransport.Interface) NewPutAnthropic { + return func(tasktype, anthropicinferenceid string) *PutAnthropic { + n := New(tp) + + n._tasktype(tasktype) + + n._anthropicinferenceid(anthropicinferenceid) + + return n + } +} + +// Create an Anthropic inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `anthropic` service. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic +func New(tp elastictransport.Interface) *PutAnthropic { + r := &PutAnthropic{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutAnthropic) Raw(raw io.Reader) *PutAnthropic { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutAnthropic) Request(req *Request) *PutAnthropic { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutAnthropic) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutAnthropic: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|anthropicinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "anthropicinferenceid", r.anthropicinferenceid) + } + path.WriteString(r.anthropicinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutAnthropic) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_anthropic") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_anthropic") + if reader := instrument.RecordRequestBody(ctx, "inference.put_anthropic", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_anthropic") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutAnthropic query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putanthropic.Response +func (r PutAnthropic) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_anthropic") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutAnthropic headers map. +func (r *PutAnthropic) Header(key, value string) *PutAnthropic { + r.headers.Set(key, value) + + return r +} + +// TaskType The task type. +// The only valid task type for the model to perform is `completion`. +// API Name: tasktype +func (r *PutAnthropic) _tasktype(tasktype string) *PutAnthropic { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// AnthropicInferenceId The unique identifier of the inference endpoint. +// API Name: anthropicinferenceid +func (r *PutAnthropic) _anthropicinferenceid(anthropicinferenceid string) *PutAnthropic { + r.paramSet |= anthropicinferenceidMask + r.anthropicinferenceid = anthropicinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutAnthropic) Timeout(duration string) *PutAnthropic { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutAnthropic) ErrorTrace(errortrace bool) *PutAnthropic { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutAnthropic) FilterPath(filterpaths ...string) *PutAnthropic { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutAnthropic) Human(human bool) *PutAnthropic { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutAnthropic) Pretty(pretty bool) *PutAnthropic { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutAnthropic) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutAnthropic { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `anthropic`. +// API name: service +func (r *PutAnthropic) Service(service anthropicservicetype.AnthropicServiceType) *PutAnthropic { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `watsonxai` service. +// API name: service_settings +func (r *PutAnthropic) ServiceSettings(servicesettings types.AnthropicServiceSettingsVariant) *PutAnthropic { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.AnthropicServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutAnthropic) TaskSettings(tasksettings types.AnthropicTaskSettingsVariant) *PutAnthropic { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.AnthropicTaskSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putanthropic/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putanthropic/request.go new file mode 100644 index 000000000..a2f9b0dac --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putanthropic/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putanthropic + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/anthropicservicetype" +) + +// Request holds the request body struct for the package putanthropic +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_anthropic/PutAnthropicRequest.ts#L31-L86 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `anthropic`. + Service anthropicservicetype.AnthropicServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `watsonxai` service. + ServiceSettings types.AnthropicServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.AnthropicTaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putanthropic request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putanthropic/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putanthropic/response.go new file mode 100644 index 000000000..58169eb05 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putanthropic/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putanthropic + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeanthropic" +) + +// Response holds the response body struct for the package putanthropic +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_anthropic/PutAnthropicResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypeanthropic.TaskTypeAnthropic `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureaistudio/put_azureaistudio.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureaistudio/put_azureaistudio.go new file mode 100644 index 000000000..45e36cecc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureaistudio/put_azureaistudio.go @@ -0,0 +1,445 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an Azure AI studio inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `azureaistudio` service. +package putazureaistudio + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/azureaistudioservicetype" +) + +const ( + tasktypeMask = iota + 1 + + azureaistudioinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutAzureaistudio struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + azureaistudioinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutAzureaistudio type alias for index. +type NewPutAzureaistudio func(tasktype, azureaistudioinferenceid string) *PutAzureaistudio + +// NewPutAzureaistudioFunc returns a new instance of PutAzureaistudio with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutAzureaistudioFunc(tp elastictransport.Interface) NewPutAzureaistudio { + return func(tasktype, azureaistudioinferenceid string) *PutAzureaistudio { + n := New(tp) + + n._tasktype(tasktype) + + n._azureaistudioinferenceid(azureaistudioinferenceid) + + return n + } +} + +// Create an Azure AI studio inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `azureaistudio` service. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio +func New(tp elastictransport.Interface) *PutAzureaistudio { + r := &PutAzureaistudio{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutAzureaistudio) Raw(raw io.Reader) *PutAzureaistudio { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutAzureaistudio) Request(req *Request) *PutAzureaistudio { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutAzureaistudio) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutAzureaistudio: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|azureaistudioinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "azureaistudioinferenceid", r.azureaistudioinferenceid) + } + path.WriteString(r.azureaistudioinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutAzureaistudio) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_azureaistudio") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_azureaistudio") + if reader := instrument.RecordRequestBody(ctx, "inference.put_azureaistudio", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_azureaistudio") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutAzureaistudio query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putazureaistudio.Response +func (r PutAzureaistudio) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_azureaistudio") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutAzureaistudio headers map. +func (r *PutAzureaistudio) Header(key, value string) *PutAzureaistudio { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutAzureaistudio) _tasktype(tasktype string) *PutAzureaistudio { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// AzureaistudioInferenceId The unique identifier of the inference endpoint. +// API Name: azureaistudioinferenceid +func (r *PutAzureaistudio) _azureaistudioinferenceid(azureaistudioinferenceid string) *PutAzureaistudio { + r.paramSet |= azureaistudioinferenceidMask + r.azureaistudioinferenceid = azureaistudioinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutAzureaistudio) Timeout(duration string) *PutAzureaistudio { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutAzureaistudio) ErrorTrace(errortrace bool) *PutAzureaistudio { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutAzureaistudio) FilterPath(filterpaths ...string) *PutAzureaistudio { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutAzureaistudio) Human(human bool) *PutAzureaistudio { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutAzureaistudio) Pretty(pretty bool) *PutAzureaistudio { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutAzureaistudio) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutAzureaistudio { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `azureaistudio`. +// API name: service +func (r *PutAzureaistudio) Service(service azureaistudioservicetype.AzureAiStudioServiceType) *PutAzureaistudio { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `openai` service. +// API name: service_settings +func (r *PutAzureaistudio) ServiceSettings(servicesettings types.AzureAiStudioServiceSettingsVariant) *PutAzureaistudio { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.AzureAiStudioServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutAzureaistudio) TaskSettings(tasksettings types.AzureAiStudioTaskSettingsVariant) *PutAzureaistudio { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.AzureAiStudioTaskSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureaistudio/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureaistudio/request.go new file mode 100644 index 000000000..ce483c04b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureaistudio/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putazureaistudio + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/azureaistudioservicetype" +) + +// Request holds the request body struct for the package putazureaistudio +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_azureaistudio/PutAzureAiStudioRequest.ts#L31-L85 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `azureaistudio`. + Service azureaistudioservicetype.AzureAiStudioServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `openai` service. + ServiceSettings types.AzureAiStudioServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.AzureAiStudioTaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putazureaistudio request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureaistudio/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureaistudio/response.go new file mode 100644 index 000000000..f23f1a2ee --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureaistudio/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putazureaistudio + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeazureaistudio" +) + +// Response holds the response body struct for the package putazureaistudio +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_azureaistudio/PutAzureAiStudioResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypeazureaistudio.TaskTypeAzureAIStudio `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureopenai/put_azureopenai.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureopenai/put_azureopenai.go new file mode 100644 index 000000000..ee6fdbe47 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureopenai/put_azureopenai.go @@ -0,0 +1,471 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an Azure OpenAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `azureopenai` service. +// +// The list of chat completion models that you can choose from in your Azure +// OpenAI deployment include: +// +// * [GPT-4 and GPT-4 Turbo +// models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) +// * +// [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) +// +// The list of embeddings models that you can choose from in your deployment can +// be found in the [Azure models +// documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). +package putazureopenai + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/azureopenaiservicetype" +) + +const ( + tasktypeMask = iota + 1 + + azureopenaiinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutAzureopenai struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + azureopenaiinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutAzureopenai type alias for index. +type NewPutAzureopenai func(tasktype, azureopenaiinferenceid string) *PutAzureopenai + +// NewPutAzureopenaiFunc returns a new instance of PutAzureopenai with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutAzureopenaiFunc(tp elastictransport.Interface) NewPutAzureopenai { + return func(tasktype, azureopenaiinferenceid string) *PutAzureopenai { + n := New(tp) + + n._tasktype(tasktype) + + n._azureopenaiinferenceid(azureopenaiinferenceid) + + return n + } +} + +// Create an Azure OpenAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `azureopenai` service. +// +// The list of chat completion models that you can choose from in your Azure +// OpenAI deployment include: +// +// * [GPT-4 and GPT-4 Turbo +// models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) +// * +// [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) +// +// The list of embeddings models that you can choose from in your deployment can +// be found in the [Azure models +// documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai +func New(tp elastictransport.Interface) *PutAzureopenai { + r := &PutAzureopenai{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutAzureopenai) Raw(raw io.Reader) *PutAzureopenai { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutAzureopenai) Request(req *Request) *PutAzureopenai { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutAzureopenai) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutAzureopenai: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|azureopenaiinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "azureopenaiinferenceid", r.azureopenaiinferenceid) + } + path.WriteString(r.azureopenaiinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutAzureopenai) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_azureopenai") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_azureopenai") + if reader := instrument.RecordRequestBody(ctx, "inference.put_azureopenai", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_azureopenai") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutAzureopenai query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putazureopenai.Response +func (r PutAzureopenai) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_azureopenai") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutAzureopenai headers map. +func (r *PutAzureopenai) Header(key, value string) *PutAzureopenai { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// NOTE: The `chat_completion` task type only supports streaming and only +// through the _stream API. +// API Name: tasktype +func (r *PutAzureopenai) _tasktype(tasktype string) *PutAzureopenai { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// AzureopenaiInferenceId The unique identifier of the inference endpoint. +// API Name: azureopenaiinferenceid +func (r *PutAzureopenai) _azureopenaiinferenceid(azureopenaiinferenceid string) *PutAzureopenai { + r.paramSet |= azureopenaiinferenceidMask + r.azureopenaiinferenceid = azureopenaiinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutAzureopenai) Timeout(duration string) *PutAzureopenai { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutAzureopenai) ErrorTrace(errortrace bool) *PutAzureopenai { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutAzureopenai) FilterPath(filterpaths ...string) *PutAzureopenai { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutAzureopenai) Human(human bool) *PutAzureopenai { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutAzureopenai) Pretty(pretty bool) *PutAzureopenai { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutAzureopenai) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutAzureopenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `azureopenai`. +// API name: service +func (r *PutAzureopenai) Service(service azureopenaiservicetype.AzureOpenAIServiceType) *PutAzureopenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `azureopenai` service. +// API name: service_settings +func (r *PutAzureopenai) ServiceSettings(servicesettings types.AzureOpenAIServiceSettingsVariant) *PutAzureopenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.AzureOpenAIServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutAzureopenai) TaskSettings(tasksettings types.AzureOpenAITaskSettingsVariant) *PutAzureopenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.AzureOpenAITaskSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureopenai/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureopenai/request.go new file mode 100644 index 000000000..6881ad088 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureopenai/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putazureopenai + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/azureopenaiservicetype" +) + +// Request holds the request body struct for the package putazureopenai +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_azureopenai/PutAzureOpenAiRequest.ts#L31-L93 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `azureopenai`. + Service azureopenaiservicetype.AzureOpenAIServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `azureopenai` service. + ServiceSettings types.AzureOpenAIServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.AzureOpenAITaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putazureopenai request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureopenai/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureopenai/response.go new file mode 100644 index 000000000..c2ba42c35 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureopenai/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putazureopenai + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeazureopenai" +) + +// Response holds the response body struct for the package putazureopenai +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_azureopenai/PutAzureOpenAiResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypeazureopenai.TaskTypeAzureOpenAI `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcohere/put_cohere.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcohere/put_cohere.go new file mode 100644 index 000000000..b8e23b21d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcohere/put_cohere.go @@ -0,0 +1,445 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a Cohere inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `cohere` +// service. +package putcohere + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereservicetype" +) + +const ( + tasktypeMask = iota + 1 + + cohereinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutCohere struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + cohereinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutCohere type alias for index. +type NewPutCohere func(tasktype, cohereinferenceid string) *PutCohere + +// NewPutCohereFunc returns a new instance of PutCohere with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutCohereFunc(tp elastictransport.Interface) NewPutCohere { + return func(tasktype, cohereinferenceid string) *PutCohere { + n := New(tp) + + n._tasktype(tasktype) + + n._cohereinferenceid(cohereinferenceid) + + return n + } +} + +// Create a Cohere inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `cohere` +// service. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere +func New(tp elastictransport.Interface) *PutCohere { + r := &PutCohere{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutCohere) Raw(raw io.Reader) *PutCohere { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutCohere) Request(req *Request) *PutCohere { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutCohere) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutCohere: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|cohereinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "cohereinferenceid", r.cohereinferenceid) + } + path.WriteString(r.cohereinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutCohere) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_cohere") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_cohere") + if reader := instrument.RecordRequestBody(ctx, "inference.put_cohere", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_cohere") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutCohere query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putcohere.Response +func (r PutCohere) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_cohere") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutCohere headers map. +func (r *PutCohere) Header(key, value string) *PutCohere { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutCohere) _tasktype(tasktype string) *PutCohere { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// CohereInferenceId The unique identifier of the inference endpoint. +// API Name: cohereinferenceid +func (r *PutCohere) _cohereinferenceid(cohereinferenceid string) *PutCohere { + r.paramSet |= cohereinferenceidMask + r.cohereinferenceid = cohereinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutCohere) Timeout(duration string) *PutCohere { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutCohere) ErrorTrace(errortrace bool) *PutCohere { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutCohere) FilterPath(filterpaths ...string) *PutCohere { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutCohere) Human(human bool) *PutCohere { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutCohere) Pretty(pretty bool) *PutCohere { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutCohere) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutCohere { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `cohere`. +// API name: service +func (r *PutCohere) Service(service cohereservicetype.CohereServiceType) *PutCohere { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. +// These settings are specific to the `cohere` service. +// API name: service_settings +func (r *PutCohere) ServiceSettings(servicesettings types.CohereServiceSettingsVariant) *PutCohere { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.CohereServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutCohere) TaskSettings(tasksettings types.CohereTaskSettingsVariant) *PutCohere { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.CohereTaskSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcohere/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcohere/request.go new file mode 100644 index 000000000..e3ce0d134 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcohere/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putcohere + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereservicetype" +) + +// Request holds the request body struct for the package putcohere +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_cohere/PutCohereRequest.ts#L31-L86 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `cohere`. + Service cohereservicetype.CohereServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. + // These settings are specific to the `cohere` service. + ServiceSettings types.CohereServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.CohereTaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putcohere request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcohere/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcohere/response.go new file mode 100644 index 000000000..da9e40bda --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcohere/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putcohere + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypecohere" +) + +// Response holds the response body struct for the package putcohere +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_cohere/PutCohereResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypecohere.TaskTypeCohere `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcustom/put_custom.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcustom/put_custom.go new file mode 100644 index 000000000..dc5bf71c3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcustom/put_custom.go @@ -0,0 +1,540 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a custom inference endpoint. +// +// The custom service gives more control over how to interact with external +// inference services that aren't explicitly supported through dedicated +// integrations. +// The custom service gives you the ability to define the headers, url, query +// parameters, request body, and secrets. +// The custom service supports the template replacement functionality, which +// enables you to define a template that can be replaced with the value +// associated with that key. +// Templates are portions of a string that start with `${` and end with `}`. +// The parameters `secret_parameters` and `task_settings` are checked for keys +// for template replacement. Template replacement is supported in the `request`, +// `headers`, `url`, and `query_parameters`. +// If the definition (key) is not found for a template, an error message is +// returned. +// In case of an endpoint definition like the following: +// ``` +// PUT _inference/text_embedding/test-text-embedding +// +// { +// "service": "custom", +// "service_settings": { +// "secret_parameters": { +// "api_key": "" +// }, +// "url": "...endpoints.huggingface.cloud/v1/embeddings", +// "headers": { +// "Authorization": "Bearer ${api_key}", +// "Content-Type": "application/json" +// }, +// "request": "{\"input\": ${input}}", +// "response": { +// "json_parser": { +// "text_embeddings":"$.data[*].embedding[*]" +// } +// } +// } +// } +// +// ``` +// To replace `${api_key}` the `secret_parameters` and `task_settings` are +// checked for a key named `api_key`. +// +// > info +// > Templates should not be surrounded by quotes. +// +// Pre-defined templates: +// * `${input}` refers to the array of input strings that comes from the `input` +// field of the subsequent inference requests. +// * `${input_type}` refers to the input type translation values. +// * `${query}` refers to the query field used specifically for reranking tasks. +// * `${top_n}` refers to the `top_n` field available when performing rerank +// requests. +// * `${return_documents}` refers to the `return_documents` field available when +// performing rerank requests. +package putcustom + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/customservicetype" +) + +const ( + tasktypeMask = iota + 1 + + custominferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutCustom struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + custominferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutCustom type alias for index. +type NewPutCustom func(tasktype, custominferenceid string) *PutCustom + +// NewPutCustomFunc returns a new instance of PutCustom with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutCustomFunc(tp elastictransport.Interface) NewPutCustom { + return func(tasktype, custominferenceid string) *PutCustom { + n := New(tp) + + n._tasktype(tasktype) + + n._custominferenceid(custominferenceid) + + return n + } +} + +// Create a custom inference endpoint. +// +// The custom service gives more control over how to interact with external +// inference services that aren't explicitly supported through dedicated +// integrations. +// The custom service gives you the ability to define the headers, url, query +// parameters, request body, and secrets. +// The custom service supports the template replacement functionality, which +// enables you to define a template that can be replaced with the value +// associated with that key. +// Templates are portions of a string that start with `${` and end with `}`. +// The parameters `secret_parameters` and `task_settings` are checked for keys +// for template replacement. Template replacement is supported in the `request`, +// `headers`, `url`, and `query_parameters`. +// If the definition (key) is not found for a template, an error message is +// returned. +// In case of an endpoint definition like the following: +// ``` +// PUT _inference/text_embedding/test-text-embedding +// +// { +// "service": "custom", +// "service_settings": { +// "secret_parameters": { +// "api_key": "" +// }, +// "url": "...endpoints.huggingface.cloud/v1/embeddings", +// "headers": { +// "Authorization": "Bearer ${api_key}", +// "Content-Type": "application/json" +// }, +// "request": "{\"input\": ${input}}", +// "response": { +// "json_parser": { +// "text_embeddings":"$.data[*].embedding[*]" +// } +// } +// } +// } +// +// ``` +// To replace `${api_key}` the `secret_parameters` and `task_settings` are +// checked for a key named `api_key`. +// +// > info +// > Templates should not be surrounded by quotes. +// +// Pre-defined templates: +// * `${input}` refers to the array of input strings that comes from the `input` +// field of the subsequent inference requests. +// * `${input_type}` refers to the input type translation values. +// * `${query}` refers to the query field used specifically for reranking tasks. +// * `${top_n}` refers to the `top_n` field available when performing rerank +// requests. +// * `${return_documents}` refers to the `return_documents` field available when +// performing rerank requests. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-custom +func New(tp elastictransport.Interface) *PutCustom { + r := &PutCustom{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutCustom) Raw(raw io.Reader) *PutCustom { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutCustom) Request(req *Request) *PutCustom { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutCustom) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutCustom: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|custominferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "custominferenceid", r.custominferenceid) + } + path.WriteString(r.custominferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutCustom) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_custom") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_custom") + if reader := instrument.RecordRequestBody(ctx, "inference.put_custom", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_custom") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutCustom query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putcustom.Response +func (r PutCustom) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_custom") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutCustom headers map. +func (r *PutCustom) Header(key, value string) *PutCustom { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutCustom) _tasktype(tasktype string) *PutCustom { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// CustomInferenceId The unique identifier of the inference endpoint. +// API Name: custominferenceid +func (r *PutCustom) _custominferenceid(custominferenceid string) *PutCustom { + r.paramSet |= custominferenceidMask + r.custominferenceid = custominferenceid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutCustom) ErrorTrace(errortrace bool) *PutCustom { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutCustom) FilterPath(filterpaths ...string) *PutCustom { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutCustom) Human(human bool) *PutCustom { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutCustom) Pretty(pretty bool) *PutCustom { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutCustom) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutCustom { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `custom`. +// API name: service +func (r *PutCustom) Service(service customservicetype.CustomServiceType) *PutCustom { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. +// These settings are specific to the `custom` service. +// API name: service_settings +func (r *PutCustom) ServiceSettings(servicesettings types.CustomServiceSettingsVariant) *PutCustom { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.CustomServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutCustom) TaskSettings(tasksettings types.CustomTaskSettingsVariant) *PutCustom { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.CustomTaskSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcustom/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcustom/request.go new file mode 100644 index 000000000..68c388769 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcustom/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putcustom + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/customservicetype" +) + +// Request holds the request body struct for the package putcustom +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_custom/PutCustomRequest.ts#L30-L117 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `custom`. + Service customservicetype.CustomServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. + // These settings are specific to the `custom` service. + ServiceSettings types.CustomServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.CustomTaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putcustom request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcustom/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcustom/response.go new file mode 100644 index 000000000..f8145ec08 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcustom/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putcustom + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypecustom" +) + +// Response holds the response body struct for the package putcustom +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_custom/PutCustomResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypecustom.TaskTypeCustom `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putdeepseek/put_deepseek.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putdeepseek/put_deepseek.go new file mode 100644 index 000000000..21e19534b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putdeepseek/put_deepseek.go @@ -0,0 +1,431 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a DeepSeek inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `deepseek` +// service. +package putdeepseek + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deepseekservicetype" +) + +const ( + tasktypeMask = iota + 1 + + deepseekinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutDeepseek struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + deepseekinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutDeepseek type alias for index. +type NewPutDeepseek func(tasktype, deepseekinferenceid string) *PutDeepseek + +// NewPutDeepseekFunc returns a new instance of PutDeepseek with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutDeepseekFunc(tp elastictransport.Interface) NewPutDeepseek { + return func(tasktype, deepseekinferenceid string) *PutDeepseek { + n := New(tp) + + n._tasktype(tasktype) + + n._deepseekinferenceid(deepseekinferenceid) + + return n + } +} + +// Create a DeepSeek inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `deepseek` +// service. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-deepseek +func New(tp elastictransport.Interface) *PutDeepseek { + r := &PutDeepseek{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutDeepseek) Raw(raw io.Reader) *PutDeepseek { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutDeepseek) Request(req *Request) *PutDeepseek { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutDeepseek) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutDeepseek: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|deepseekinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "deepseekinferenceid", r.deepseekinferenceid) + } + path.WriteString(r.deepseekinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutDeepseek) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_deepseek") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_deepseek") + if reader := instrument.RecordRequestBody(ctx, "inference.put_deepseek", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_deepseek") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutDeepseek query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putdeepseek.Response +func (r PutDeepseek) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_deepseek") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutDeepseek headers map. +func (r *PutDeepseek) Header(key, value string) *PutDeepseek { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutDeepseek) _tasktype(tasktype string) *PutDeepseek { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// DeepseekInferenceId The unique identifier of the inference endpoint. +// API Name: deepseekinferenceid +func (r *PutDeepseek) _deepseekinferenceid(deepseekinferenceid string) *PutDeepseek { + r.paramSet |= deepseekinferenceidMask + r.deepseekinferenceid = deepseekinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutDeepseek) Timeout(duration string) *PutDeepseek { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutDeepseek) ErrorTrace(errortrace bool) *PutDeepseek { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutDeepseek) FilterPath(filterpaths ...string) *PutDeepseek { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutDeepseek) Human(human bool) *PutDeepseek { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutDeepseek) Pretty(pretty bool) *PutDeepseek { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutDeepseek) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutDeepseek { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `deepseek`. +// API name: service +func (r *PutDeepseek) Service(service deepseekservicetype.DeepSeekServiceType) *PutDeepseek { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. +// These settings are specific to the `deepseek` service. +// API name: service_settings +func (r *PutDeepseek) ServiceSettings(servicesettings types.DeepSeekServiceSettingsVariant) *PutDeepseek { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.DeepSeekServiceSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putdeepseek/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putdeepseek/request.go new file mode 100644 index 000000000..e3ee4448d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putdeepseek/request.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putdeepseek + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deepseekservicetype" +) + +// Request holds the request body struct for the package putdeepseek +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_deepseek/PutDeepSeekRequest.ts#L30-L80 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `deepseek`. + Service deepseekservicetype.DeepSeekServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. + // These settings are specific to the `deepseek` service. + ServiceSettings types.DeepSeekServiceSettings `json:"service_settings"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putdeepseek request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putdeepseek/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putdeepseek/response.go new file mode 100644 index 000000000..7eb0c8a8c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putdeepseek/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putdeepseek + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypedeepseek" +) + +// Response holds the response body struct for the package putdeepseek +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_deepseek/PutDeepSeekResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypedeepseek.TaskTypeDeepSeek `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelasticsearch/put_elasticsearch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelasticsearch/put_elasticsearch.go new file mode 100644 index 000000000..03f4706ae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelasticsearch/put_elasticsearch.go @@ -0,0 +1,494 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an Elasticsearch inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `elasticsearch` service. +// +// > info +// > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference +// endpoints, you only need to create the enpoints using the API if you want to +// customize the settings. +// +// If you use the ELSER or the E5 model through the `elasticsearch` service, the +// API request will automatically download and deploy the model if it isn't +// downloaded yet. +// +// > info +// > You might see a 502 bad gateway error in the response when using the Kibana +// Console. This error usually just reflects a timeout, while the model +// downloads in the background. You can check the download progress in the +// Machine Learning UI. If using the Python client, you can set the timeout +// parameter to a higher value. +// +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +package putelasticsearch + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/elasticsearchservicetype" +) + +const ( + tasktypeMask = iota + 1 + + elasticsearchinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutElasticsearch struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + elasticsearchinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutElasticsearch type alias for index. +type NewPutElasticsearch func(tasktype, elasticsearchinferenceid string) *PutElasticsearch + +// NewPutElasticsearchFunc returns a new instance of PutElasticsearch with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutElasticsearchFunc(tp elastictransport.Interface) NewPutElasticsearch { + return func(tasktype, elasticsearchinferenceid string) *PutElasticsearch { + n := New(tp) + + n._tasktype(tasktype) + + n._elasticsearchinferenceid(elasticsearchinferenceid) + + return n + } +} + +// Create an Elasticsearch inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `elasticsearch` service. +// +// > info +// > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference +// endpoints, you only need to create the enpoints using the API if you want to +// customize the settings. +// +// If you use the ELSER or the E5 model through the `elasticsearch` service, the +// API request will automatically download and deploy the model if it isn't +// downloaded yet. +// +// > info +// > You might see a 502 bad gateway error in the response when using the Kibana +// Console. This error usually just reflects a timeout, while the model +// downloads in the background. You can check the download progress in the +// Machine Learning UI. If using the Python client, you can set the timeout +// parameter to a higher value. +// +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch +func New(tp elastictransport.Interface) *PutElasticsearch { + r := &PutElasticsearch{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutElasticsearch) Raw(raw io.Reader) *PutElasticsearch { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutElasticsearch) Request(req *Request) *PutElasticsearch { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutElasticsearch) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutElasticsearch: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|elasticsearchinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "elasticsearchinferenceid", r.elasticsearchinferenceid) + } + path.WriteString(r.elasticsearchinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutElasticsearch) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_elasticsearch") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_elasticsearch") + if reader := instrument.RecordRequestBody(ctx, "inference.put_elasticsearch", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_elasticsearch") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutElasticsearch query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putelasticsearch.Response +func (r PutElasticsearch) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_elasticsearch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutElasticsearch headers map. +func (r *PutElasticsearch) Header(key, value string) *PutElasticsearch { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutElasticsearch) _tasktype(tasktype string) *PutElasticsearch { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// ElasticsearchInferenceId The unique identifier of the inference endpoint. +// The must not match the `model_id`. +// API Name: elasticsearchinferenceid +func (r *PutElasticsearch) _elasticsearchinferenceid(elasticsearchinferenceid string) *PutElasticsearch { + r.paramSet |= elasticsearchinferenceidMask + r.elasticsearchinferenceid = elasticsearchinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutElasticsearch) Timeout(duration string) *PutElasticsearch { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutElasticsearch) ErrorTrace(errortrace bool) *PutElasticsearch { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutElasticsearch) FilterPath(filterpaths ...string) *PutElasticsearch { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutElasticsearch) Human(human bool) *PutElasticsearch { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutElasticsearch) Pretty(pretty bool) *PutElasticsearch { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutElasticsearch) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutElasticsearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `elasticsearch`. +// API name: service +func (r *PutElasticsearch) Service(service elasticsearchservicetype.ElasticsearchServiceType) *PutElasticsearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `elasticsearch` service. +// API name: service_settings +func (r *PutElasticsearch) ServiceSettings(servicesettings types.ElasticsearchServiceSettingsVariant) *PutElasticsearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.ElasticsearchServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutElasticsearch) TaskSettings(tasksettings types.ElasticsearchTaskSettingsVariant) *PutElasticsearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.ElasticsearchTaskSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelasticsearch/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelasticsearch/request.go new file mode 100644 index 000000000..78c5e400b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelasticsearch/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putelasticsearch + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/elasticsearchservicetype" +) + +// Request holds the request body struct for the package putelasticsearch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_elasticsearch/PutElasticsearchRequest.ts#L31-L99 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `elasticsearch`. + Service elasticsearchservicetype.ElasticsearchServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `elasticsearch` service. + ServiceSettings types.ElasticsearchServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.ElasticsearchTaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putelasticsearch request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelasticsearch/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelasticsearch/response.go new file mode 100644 index 000000000..d06c255b2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelasticsearch/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putelasticsearch + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeelasticsearch" +) + +// Response holds the response body struct for the package putelasticsearch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_elasticsearch/PutElasticsearchResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypeelasticsearch.TaskTypeElasticsearch `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelser/put_elser.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelser/put_elser.go new file mode 100644 index 000000000..8c9c0ffef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelser/put_elser.go @@ -0,0 +1,479 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an ELSER inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `elser` +// service. +// You can also deploy ELSER by using the Elasticsearch inference integration. +// +// > info +// > Your Elasticsearch deployment contains a preconfigured ELSER inference +// endpoint, you only need to create the enpoint using the API if you want to +// customize the settings. +// +// The API request will automatically download and deploy the ELSER model if it +// isn't already downloaded. +// +// > info +// > You might see a 502 bad gateway error in the response when using the Kibana +// Console. This error usually just reflects a timeout, while the model +// downloads in the background. You can check the download progress in the +// Machine Learning UI. If using the Python client, you can set the timeout +// parameter to a higher value. +// +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +package putelser + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/elserservicetype" +) + +const ( + tasktypeMask = iota + 1 + + elserinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutElser struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + elserinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutElser type alias for index. +type NewPutElser func(tasktype, elserinferenceid string) *PutElser + +// NewPutElserFunc returns a new instance of PutElser with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutElserFunc(tp elastictransport.Interface) NewPutElser { + return func(tasktype, elserinferenceid string) *PutElser { + n := New(tp) + + n._tasktype(tasktype) + + n._elserinferenceid(elserinferenceid) + + return n + } +} + +// Create an ELSER inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `elser` +// service. +// You can also deploy ELSER by using the Elasticsearch inference integration. +// +// > info +// > Your Elasticsearch deployment contains a preconfigured ELSER inference +// endpoint, you only need to create the enpoint using the API if you want to +// customize the settings. +// +// The API request will automatically download and deploy the ELSER model if it +// isn't already downloaded. +// +// > info +// > You might see a 502 bad gateway error in the response when using the Kibana +// Console. This error usually just reflects a timeout, while the model +// downloads in the background. You can check the download progress in the +// Machine Learning UI. If using the Python client, you can set the timeout +// parameter to a higher value. +// +// After creating the endpoint, wait for the model deployment to complete before +// using it. +// To verify the deployment status, use the get trained model statistics API. +// Look for `"state": "fully_allocated"` in the response and ensure that the +// `"allocation_count"` matches the `"target_allocation_count"`. +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elser +func New(tp elastictransport.Interface) *PutElser { + r := &PutElser{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutElser) Raw(raw io.Reader) *PutElser { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutElser) Request(req *Request) *PutElser { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutElser) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutElser: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|elserinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "elserinferenceid", r.elserinferenceid) + } + path.WriteString(r.elserinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutElser) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_elser") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_elser") + if reader := instrument.RecordRequestBody(ctx, "inference.put_elser", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_elser") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutElser query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putelser.Response +func (r PutElser) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_elser") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutElser headers map. +func (r *PutElser) Header(key, value string) *PutElser { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutElser) _tasktype(tasktype string) *PutElser { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// ElserInferenceId The unique identifier of the inference endpoint. +// API Name: elserinferenceid +func (r *PutElser) _elserinferenceid(elserinferenceid string) *PutElser { + r.paramSet |= elserinferenceidMask + r.elserinferenceid = elserinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutElser) Timeout(duration string) *PutElser { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutElser) ErrorTrace(errortrace bool) *PutElser { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutElser) FilterPath(filterpaths ...string) *PutElser { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutElser) Human(human bool) *PutElser { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutElser) Pretty(pretty bool) *PutElser { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutElser) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutElser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `elser`. +// API name: service +func (r *PutElser) Service(service elserservicetype.ElserServiceType) *PutElser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `elser` service. +// API name: service_settings +func (r *PutElser) ServiceSettings(servicesettings types.ElserServiceSettingsVariant) *PutElser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.ElserServiceSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelser/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelser/request.go new file mode 100644 index 000000000..846ded6a3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelser/request.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putelser + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/elserservicetype" +) + +// Request holds the request body struct for the package putelser +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_elser/PutElserRequest.ts#L30-L94 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `elser`. + Service elserservicetype.ElserServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `elser` service. + ServiceSettings types.ElserServiceSettings `json:"service_settings"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putelser request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelser/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelser/response.go new file mode 100644 index 000000000..7c790901f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelser/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putelser + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeelser" +) + +// Response holds the response body struct for the package putelser +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_elser/PutElserResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypeelser.TaskTypeELSER `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgoogleaistudio/put_googleaistudio.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgoogleaistudio/put_googleaistudio.go new file mode 100644 index 000000000..0ff855457 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgoogleaistudio/put_googleaistudio.go @@ -0,0 +1,431 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an Google AI Studio inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `googleaistudio` service. +package putgoogleaistudio + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/googleaiservicetype" +) + +const ( + tasktypeMask = iota + 1 + + googleaistudioinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutGoogleaistudio struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + googleaistudioinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutGoogleaistudio type alias for index. +type NewPutGoogleaistudio func(tasktype, googleaistudioinferenceid string) *PutGoogleaistudio + +// NewPutGoogleaistudioFunc returns a new instance of PutGoogleaistudio with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutGoogleaistudioFunc(tp elastictransport.Interface) NewPutGoogleaistudio { + return func(tasktype, googleaistudioinferenceid string) *PutGoogleaistudio { + n := New(tp) + + n._tasktype(tasktype) + + n._googleaistudioinferenceid(googleaistudioinferenceid) + + return n + } +} + +// Create an Google AI Studio inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `googleaistudio` service. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio +func New(tp elastictransport.Interface) *PutGoogleaistudio { + r := &PutGoogleaistudio{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutGoogleaistudio) Raw(raw io.Reader) *PutGoogleaistudio { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutGoogleaistudio) Request(req *Request) *PutGoogleaistudio { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutGoogleaistudio) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutGoogleaistudio: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|googleaistudioinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "googleaistudioinferenceid", r.googleaistudioinferenceid) + } + path.WriteString(r.googleaistudioinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutGoogleaistudio) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_googleaistudio") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_googleaistudio") + if reader := instrument.RecordRequestBody(ctx, "inference.put_googleaistudio", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_googleaistudio") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutGoogleaistudio query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putgoogleaistudio.Response +func (r PutGoogleaistudio) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_googleaistudio") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutGoogleaistudio headers map. +func (r *PutGoogleaistudio) Header(key, value string) *PutGoogleaistudio { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutGoogleaistudio) _tasktype(tasktype string) *PutGoogleaistudio { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// GoogleaistudioInferenceId The unique identifier of the inference endpoint. +// API Name: googleaistudioinferenceid +func (r *PutGoogleaistudio) _googleaistudioinferenceid(googleaistudioinferenceid string) *PutGoogleaistudio { + r.paramSet |= googleaistudioinferenceidMask + r.googleaistudioinferenceid = googleaistudioinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutGoogleaistudio) Timeout(duration string) *PutGoogleaistudio { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutGoogleaistudio) ErrorTrace(errortrace bool) *PutGoogleaistudio { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutGoogleaistudio) FilterPath(filterpaths ...string) *PutGoogleaistudio { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutGoogleaistudio) Human(human bool) *PutGoogleaistudio { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutGoogleaistudio) Pretty(pretty bool) *PutGoogleaistudio { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutGoogleaistudio) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutGoogleaistudio { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `googleaistudio`. +// API name: service +func (r *PutGoogleaistudio) Service(service googleaiservicetype.GoogleAiServiceType) *PutGoogleaistudio { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `googleaistudio` service. +// API name: service_settings +func (r *PutGoogleaistudio) ServiceSettings(servicesettings types.GoogleAiStudioServiceSettingsVariant) *PutGoogleaistudio { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.GoogleAiStudioServiceSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgoogleaistudio/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgoogleaistudio/request.go new file mode 100644 index 000000000..4fde8ff45 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgoogleaistudio/request.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putgoogleaistudio + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/googleaiservicetype" +) + +// Request holds the request body struct for the package putgoogleaistudio +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_googleaistudio/PutGoogleAiStudioRequest.ts#L30-L79 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `googleaistudio`. + Service googleaiservicetype.GoogleAiServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `googleaistudio` service. + ServiceSettings types.GoogleAiStudioServiceSettings `json:"service_settings"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putgoogleaistudio request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgoogleaistudio/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgoogleaistudio/response.go new file mode 100644 index 000000000..f5b5af773 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgoogleaistudio/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putgoogleaistudio + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypegoogleaistudio" +) + +// Response holds the response body struct for the package putgoogleaistudio +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_googleaistudio/PutGoogleAiStudioResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypegoogleaistudio.TaskTypeGoogleAIStudio `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgooglevertexai/put_googlevertexai.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgooglevertexai/put_googlevertexai.go new file mode 100644 index 000000000..7ee85bb46 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgooglevertexai/put_googlevertexai.go @@ -0,0 +1,445 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a Google Vertex AI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `googlevertexai` service. +package putgooglevertexai + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/googlevertexaiservicetype" +) + +const ( + tasktypeMask = iota + 1 + + googlevertexaiinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutGooglevertexai struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + googlevertexaiinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutGooglevertexai type alias for index. +type NewPutGooglevertexai func(tasktype, googlevertexaiinferenceid string) *PutGooglevertexai + +// NewPutGooglevertexaiFunc returns a new instance of PutGooglevertexai with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutGooglevertexaiFunc(tp elastictransport.Interface) NewPutGooglevertexai { + return func(tasktype, googlevertexaiinferenceid string) *PutGooglevertexai { + n := New(tp) + + n._tasktype(tasktype) + + n._googlevertexaiinferenceid(googlevertexaiinferenceid) + + return n + } +} + +// Create a Google Vertex AI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `googlevertexai` service. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai +func New(tp elastictransport.Interface) *PutGooglevertexai { + r := &PutGooglevertexai{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutGooglevertexai) Raw(raw io.Reader) *PutGooglevertexai { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutGooglevertexai) Request(req *Request) *PutGooglevertexai { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutGooglevertexai) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutGooglevertexai: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|googlevertexaiinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "googlevertexaiinferenceid", r.googlevertexaiinferenceid) + } + path.WriteString(r.googlevertexaiinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutGooglevertexai) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_googlevertexai") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_googlevertexai") + if reader := instrument.RecordRequestBody(ctx, "inference.put_googlevertexai", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_googlevertexai") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutGooglevertexai query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putgooglevertexai.Response +func (r PutGooglevertexai) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_googlevertexai") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutGooglevertexai headers map. +func (r *PutGooglevertexai) Header(key, value string) *PutGooglevertexai { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutGooglevertexai) _tasktype(tasktype string) *PutGooglevertexai { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// GooglevertexaiInferenceId The unique identifier of the inference endpoint. +// API Name: googlevertexaiinferenceid +func (r *PutGooglevertexai) _googlevertexaiinferenceid(googlevertexaiinferenceid string) *PutGooglevertexai { + r.paramSet |= googlevertexaiinferenceidMask + r.googlevertexaiinferenceid = googlevertexaiinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutGooglevertexai) Timeout(duration string) *PutGooglevertexai { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutGooglevertexai) ErrorTrace(errortrace bool) *PutGooglevertexai { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutGooglevertexai) FilterPath(filterpaths ...string) *PutGooglevertexai { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutGooglevertexai) Human(human bool) *PutGooglevertexai { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutGooglevertexai) Pretty(pretty bool) *PutGooglevertexai { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutGooglevertexai) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutGooglevertexai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `googlevertexai`. +// API name: service +func (r *PutGooglevertexai) Service(service googlevertexaiservicetype.GoogleVertexAIServiceType) *PutGooglevertexai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `googlevertexai` service. +// API name: service_settings +func (r *PutGooglevertexai) ServiceSettings(servicesettings types.GoogleVertexAIServiceSettingsVariant) *PutGooglevertexai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.GoogleVertexAIServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutGooglevertexai) TaskSettings(tasksettings types.GoogleVertexAITaskSettingsVariant) *PutGooglevertexai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.GoogleVertexAITaskSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgooglevertexai/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgooglevertexai/request.go new file mode 100644 index 000000000..b3193fd14 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgooglevertexai/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putgooglevertexai + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/googlevertexaiservicetype" +) + +// Request holds the request body struct for the package putgooglevertexai +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_googlevertexai/PutGoogleVertexAiRequest.ts#L31-L85 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `googlevertexai`. + Service googlevertexaiservicetype.GoogleVertexAIServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `googlevertexai` service. + ServiceSettings types.GoogleVertexAIServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.GoogleVertexAITaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putgooglevertexai request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgooglevertexai/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgooglevertexai/response.go new file mode 100644 index 000000000..ea6556c5a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgooglevertexai/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putgooglevertexai + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypegooglevertexai" +) + +// Response holds the response body struct for the package putgooglevertexai +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_googlevertexai/PutGoogleVertexAiResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypegooglevertexai.TaskTypeGoogleVertexAI `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/puthuggingface/put_hugging_face.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/puthuggingface/put_hugging_face.go new file mode 100644 index 000000000..159785067 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/puthuggingface/put_hugging_face.go @@ -0,0 +1,537 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a Hugging Face inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `hugging_face` service. +// Supported tasks include: `text_embedding`, `completion`, and +// `chat_completion`. +// +// To configure the endpoint, first visit the Hugging Face Inference Endpoints +// page and create a new endpoint. +// Select a model that supports the task you intend to use. +// +// For Elastic's `text_embedding` task: +// The selected model must support the `Sentence Embeddings` task. On the new +// endpoint creation page, select the `Sentence Embeddings` task under the +// `Advanced Configuration` section. +// After the endpoint has initialized, copy the generated endpoint URL. +// Recommended models for `text_embedding` task: +// +// * `all-MiniLM-L6-v2` +// * `all-MiniLM-L12-v2` +// * `all-mpnet-base-v2` +// * `e5-base-v2` +// * `e5-small-v2` +// * `multilingual-e5-base` +// * `multilingual-e5-small` +// +// For Elastic's `chat_completion` and `completion` tasks: +// The selected model must support the `Text Generation` task and expose OpenAI +// API. HuggingFace supports both serverless and dedicated endpoints for `Text +// Generation`. When creating dedicated endpoint select the `Text Generation` +// task. +// After the endpoint is initialized (for dedicated) or ready (for serverless), +// ensure it supports the OpenAI API and includes `/v1/chat/completions` part in +// URL. Then, copy the full endpoint URL for use. +// Recommended models for `chat_completion` and `completion` tasks: +// +// * `Mistral-7B-Instruct-v0.2` +// * `QwQ-32B` +// * `Phi-3-mini-128k-instruct` +// +// For Elastic's `rerank` task: +// The selected model must support the `sentence-ranking` task and expose OpenAI +// API. +// HuggingFace supports only dedicated (not serverless) endpoints for `Rerank` +// so far. +// After the endpoint is initialized, copy the full endpoint URL for use. +// Tested models for `rerank` task: +// +// * `bge-reranker-base` +// * `jina-reranker-v1-turbo-en-GGUF` +package puthuggingface + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/huggingfaceservicetype" +) + +const ( + tasktypeMask = iota + 1 + + huggingfaceinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutHuggingFace struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + huggingfaceinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutHuggingFace type alias for index. +type NewPutHuggingFace func(tasktype, huggingfaceinferenceid string) *PutHuggingFace + +// NewPutHuggingFaceFunc returns a new instance of PutHuggingFace with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutHuggingFaceFunc(tp elastictransport.Interface) NewPutHuggingFace { + return func(tasktype, huggingfaceinferenceid string) *PutHuggingFace { + n := New(tp) + + n._tasktype(tasktype) + + n._huggingfaceinferenceid(huggingfaceinferenceid) + + return n + } +} + +// Create a Hugging Face inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `hugging_face` service. +// Supported tasks include: `text_embedding`, `completion`, and +// `chat_completion`. +// +// To configure the endpoint, first visit the Hugging Face Inference Endpoints +// page and create a new endpoint. +// Select a model that supports the task you intend to use. +// +// For Elastic's `text_embedding` task: +// The selected model must support the `Sentence Embeddings` task. On the new +// endpoint creation page, select the `Sentence Embeddings` task under the +// `Advanced Configuration` section. +// After the endpoint has initialized, copy the generated endpoint URL. +// Recommended models for `text_embedding` task: +// +// * `all-MiniLM-L6-v2` +// * `all-MiniLM-L12-v2` +// * `all-mpnet-base-v2` +// * `e5-base-v2` +// * `e5-small-v2` +// * `multilingual-e5-base` +// * `multilingual-e5-small` +// +// For Elastic's `chat_completion` and `completion` tasks: +// The selected model must support the `Text Generation` task and expose OpenAI +// API. HuggingFace supports both serverless and dedicated endpoints for `Text +// Generation`. When creating dedicated endpoint select the `Text Generation` +// task. +// After the endpoint is initialized (for dedicated) or ready (for serverless), +// ensure it supports the OpenAI API and includes `/v1/chat/completions` part in +// URL. Then, copy the full endpoint URL for use. +// Recommended models for `chat_completion` and `completion` tasks: +// +// * `Mistral-7B-Instruct-v0.2` +// * `QwQ-32B` +// * `Phi-3-mini-128k-instruct` +// +// For Elastic's `rerank` task: +// The selected model must support the `sentence-ranking` task and expose OpenAI +// API. +// HuggingFace supports only dedicated (not serverless) endpoints for `Rerank` +// so far. +// After the endpoint is initialized, copy the full endpoint URL for use. +// Tested models for `rerank` task: +// +// * `bge-reranker-base` +// * `jina-reranker-v1-turbo-en-GGUF` +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face +func New(tp elastictransport.Interface) *PutHuggingFace { + r := &PutHuggingFace{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutHuggingFace) Raw(raw io.Reader) *PutHuggingFace { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutHuggingFace) Request(req *Request) *PutHuggingFace { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutHuggingFace) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutHuggingFace: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|huggingfaceinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "huggingfaceinferenceid", r.huggingfaceinferenceid) + } + path.WriteString(r.huggingfaceinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutHuggingFace) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_hugging_face") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_hugging_face") + if reader := instrument.RecordRequestBody(ctx, "inference.put_hugging_face", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_hugging_face") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutHuggingFace query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a puthuggingface.Response +func (r PutHuggingFace) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_hugging_face") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutHuggingFace headers map. +func (r *PutHuggingFace) Header(key, value string) *PutHuggingFace { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutHuggingFace) _tasktype(tasktype string) *PutHuggingFace { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// HuggingfaceInferenceId The unique identifier of the inference endpoint. +// API Name: huggingfaceinferenceid +func (r *PutHuggingFace) _huggingfaceinferenceid(huggingfaceinferenceid string) *PutHuggingFace { + r.paramSet |= huggingfaceinferenceidMask + r.huggingfaceinferenceid = huggingfaceinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutHuggingFace) Timeout(duration string) *PutHuggingFace { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutHuggingFace) ErrorTrace(errortrace bool) *PutHuggingFace { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutHuggingFace) FilterPath(filterpaths ...string) *PutHuggingFace { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutHuggingFace) Human(human bool) *PutHuggingFace { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutHuggingFace) Pretty(pretty bool) *PutHuggingFace { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutHuggingFace) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutHuggingFace { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `hugging_face`. +// API name: service +func (r *PutHuggingFace) Service(service huggingfaceservicetype.HuggingFaceServiceType) *PutHuggingFace { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `hugging_face` service. +// API name: service_settings +func (r *PutHuggingFace) ServiceSettings(servicesettings types.HuggingFaceServiceSettingsVariant) *PutHuggingFace { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.HuggingFaceServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutHuggingFace) TaskSettings(tasksettings types.HuggingFaceTaskSettingsVariant) *PutHuggingFace { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.HuggingFaceTaskSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/puthuggingface/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/puthuggingface/request.go new file mode 100644 index 000000000..b5113fdb8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/puthuggingface/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package puthuggingface + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/huggingfaceservicetype" +) + +// Request holds the request body struct for the package puthuggingface +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_hugging_face/PutHuggingFaceRequest.ts#L31-L121 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `hugging_face`. + Service huggingfaceservicetype.HuggingFaceServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `hugging_face` service. + ServiceSettings types.HuggingFaceServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.HuggingFaceTaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Puthuggingface request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/puthuggingface/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/puthuggingface/response.go new file mode 100644 index 000000000..55fd638f1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/puthuggingface/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package puthuggingface + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypehuggingface" +) + +// Response holds the response body struct for the package puthuggingface +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_hugging_face/PutHuggingFaceResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypehuggingface.TaskTypeHuggingFace `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putjinaai/put_jinaai.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putjinaai/put_jinaai.go new file mode 100644 index 000000000..1fb16a160 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putjinaai/put_jinaai.go @@ -0,0 +1,453 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an JinaAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `jinaai` +// service. +// +// To review the available `rerank` models, refer to . +// To review the available `text_embedding` models, refer to the +// . +package putjinaai + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaiservicetype" +) + +const ( + tasktypeMask = iota + 1 + + jinaaiinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutJinaai struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + jinaaiinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutJinaai type alias for index. +type NewPutJinaai func(tasktype, jinaaiinferenceid string) *PutJinaai + +// NewPutJinaaiFunc returns a new instance of PutJinaai with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutJinaaiFunc(tp elastictransport.Interface) NewPutJinaai { + return func(tasktype, jinaaiinferenceid string) *PutJinaai { + n := New(tp) + + n._tasktype(tasktype) + + n._jinaaiinferenceid(jinaaiinferenceid) + + return n + } +} + +// Create an JinaAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `jinaai` +// service. +// +// To review the available `rerank` models, refer to . +// To review the available `text_embedding` models, refer to the +// . +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai +func New(tp elastictransport.Interface) *PutJinaai { + r := &PutJinaai{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutJinaai) Raw(raw io.Reader) *PutJinaai { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutJinaai) Request(req *Request) *PutJinaai { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutJinaai) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutJinaai: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|jinaaiinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jinaaiinferenceid", r.jinaaiinferenceid) + } + path.WriteString(r.jinaaiinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutJinaai) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_jinaai") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_jinaai") + if reader := instrument.RecordRequestBody(ctx, "inference.put_jinaai", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_jinaai") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutJinaai query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putjinaai.Response +func (r PutJinaai) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_jinaai") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutJinaai headers map. +func (r *PutJinaai) Header(key, value string) *PutJinaai { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutJinaai) _tasktype(tasktype string) *PutJinaai { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// JinaaiInferenceId The unique identifier of the inference endpoint. +// API Name: jinaaiinferenceid +func (r *PutJinaai) _jinaaiinferenceid(jinaaiinferenceid string) *PutJinaai { + r.paramSet |= jinaaiinferenceidMask + r.jinaaiinferenceid = jinaaiinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutJinaai) Timeout(duration string) *PutJinaai { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutJinaai) ErrorTrace(errortrace bool) *PutJinaai { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutJinaai) FilterPath(filterpaths ...string) *PutJinaai { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutJinaai) Human(human bool) *PutJinaai { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutJinaai) Pretty(pretty bool) *PutJinaai { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutJinaai) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutJinaai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `jinaai`. +// API name: service +func (r *PutJinaai) Service(service jinaaiservicetype.JinaAIServiceType) *PutJinaai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `jinaai` service. +// API name: service_settings +func (r *PutJinaai) ServiceSettings(servicesettings types.JinaAIServiceSettingsVariant) *PutJinaai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.JinaAIServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutJinaai) TaskSettings(tasksettings types.JinaAITaskSettingsVariant) *PutJinaai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.JinaAITaskSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putjinaai/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putjinaai/request.go new file mode 100644 index 000000000..3011bb9a8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putjinaai/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putjinaai + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaiservicetype" +) + +// Request holds the request body struct for the package putjinaai +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_jinaai/PutJinaAiRequest.ts#L31-L88 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `jinaai`. + Service jinaaiservicetype.JinaAIServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `jinaai` service. + ServiceSettings types.JinaAIServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.JinaAITaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putjinaai request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putjinaai/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putjinaai/response.go new file mode 100644 index 000000000..446562c48 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putjinaai/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putjinaai + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypejinaai" +) + +// Response holds the response body struct for the package putjinaai +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_jinaai/PutJinaAiResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypejinaai.TaskTypeJinaAi `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putmistral/put_mistral.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putmistral/put_mistral.go new file mode 100644 index 000000000..a23411319 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putmistral/put_mistral.go @@ -0,0 +1,431 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a Mistral inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `mistral` +// service. +package putmistral + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/mistralservicetype" +) + +const ( + tasktypeMask = iota + 1 + + mistralinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutMistral struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + mistralinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutMistral type alias for index. +type NewPutMistral func(tasktype, mistralinferenceid string) *PutMistral + +// NewPutMistralFunc returns a new instance of PutMistral with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutMistralFunc(tp elastictransport.Interface) NewPutMistral { + return func(tasktype, mistralinferenceid string) *PutMistral { + n := New(tp) + + n._tasktype(tasktype) + + n._mistralinferenceid(mistralinferenceid) + + return n + } +} + +// Create a Mistral inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `mistral` +// service. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral +func New(tp elastictransport.Interface) *PutMistral { + r := &PutMistral{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutMistral) Raw(raw io.Reader) *PutMistral { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutMistral) Request(req *Request) *PutMistral { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutMistral) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutMistral: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|mistralinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "mistralinferenceid", r.mistralinferenceid) + } + path.WriteString(r.mistralinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutMistral) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_mistral") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_mistral") + if reader := instrument.RecordRequestBody(ctx, "inference.put_mistral", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_mistral") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutMistral query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putmistral.Response +func (r PutMistral) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_mistral") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutMistral headers map. +func (r *PutMistral) Header(key, value string) *PutMistral { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutMistral) _tasktype(tasktype string) *PutMistral { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// MistralInferenceId The unique identifier of the inference endpoint. +// API Name: mistralinferenceid +func (r *PutMistral) _mistralinferenceid(mistralinferenceid string) *PutMistral { + r.paramSet |= mistralinferenceidMask + r.mistralinferenceid = mistralinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutMistral) Timeout(duration string) *PutMistral { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutMistral) ErrorTrace(errortrace bool) *PutMistral { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutMistral) FilterPath(filterpaths ...string) *PutMistral { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutMistral) Human(human bool) *PutMistral { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutMistral) Pretty(pretty bool) *PutMistral { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutMistral) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutMistral { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `mistral`. +// API name: service +func (r *PutMistral) Service(service mistralservicetype.MistralServiceType) *PutMistral { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `mistral` service. +// API name: service_settings +func (r *PutMistral) ServiceSettings(servicesettings types.MistralServiceSettingsVariant) *PutMistral { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.MistralServiceSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putmistral/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putmistral/request.go new file mode 100644 index 000000000..3db59c384 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putmistral/request.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putmistral + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/mistralservicetype" +) + +// Request holds the request body struct for the package putmistral +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_mistral/PutMistralRequest.ts#L30-L79 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `mistral`. + Service mistralservicetype.MistralServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `mistral` service. + ServiceSettings types.MistralServiceSettings `json:"service_settings"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putmistral request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putmistral/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putmistral/response.go new file mode 100644 index 000000000..2e686c6d8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putmistral/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putmistral + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypemistral" +) + +// Response holds the response body struct for the package putmistral +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_mistral/PutMistralResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypemistral.TaskTypeMistral `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putopenai/put_openai.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putopenai/put_openai.go new file mode 100644 index 000000000..56a2fc00e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putopenai/put_openai.go @@ -0,0 +1,447 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an OpenAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `openai` +// service or `openai` compatible APIs. +package putopenai + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/openaiservicetype" +) + +const ( + tasktypeMask = iota + 1 + + openaiinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutOpenai struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + openaiinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutOpenai type alias for index. +type NewPutOpenai func(tasktype, openaiinferenceid string) *PutOpenai + +// NewPutOpenaiFunc returns a new instance of PutOpenai with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutOpenaiFunc(tp elastictransport.Interface) NewPutOpenai { + return func(tasktype, openaiinferenceid string) *PutOpenai { + n := New(tp) + + n._tasktype(tasktype) + + n._openaiinferenceid(openaiinferenceid) + + return n + } +} + +// Create an OpenAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `openai` +// service or `openai` compatible APIs. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai +func New(tp elastictransport.Interface) *PutOpenai { + r := &PutOpenai{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutOpenai) Raw(raw io.Reader) *PutOpenai { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutOpenai) Request(req *Request) *PutOpenai { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutOpenai) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutOpenai: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|openaiinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "openaiinferenceid", r.openaiinferenceid) + } + path.WriteString(r.openaiinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutOpenai) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_openai") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_openai") + if reader := instrument.RecordRequestBody(ctx, "inference.put_openai", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_openai") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutOpenai query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putopenai.Response +func (r PutOpenai) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_openai") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutOpenai headers map. +func (r *PutOpenai) Header(key, value string) *PutOpenai { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// NOTE: The `chat_completion` task type only supports streaming and only +// through the _stream API. +// API Name: tasktype +func (r *PutOpenai) _tasktype(tasktype string) *PutOpenai { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// OpenaiInferenceId The unique identifier of the inference endpoint. +// API Name: openaiinferenceid +func (r *PutOpenai) _openaiinferenceid(openaiinferenceid string) *PutOpenai { + r.paramSet |= openaiinferenceidMask + r.openaiinferenceid = openaiinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutOpenai) Timeout(duration string) *PutOpenai { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutOpenai) ErrorTrace(errortrace bool) *PutOpenai { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutOpenai) FilterPath(filterpaths ...string) *PutOpenai { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutOpenai) Human(human bool) *PutOpenai { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutOpenai) Pretty(pretty bool) *PutOpenai { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutOpenai) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutOpenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `openai`. +// API name: service +func (r *PutOpenai) Service(service openaiservicetype.OpenAIServiceType) *PutOpenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `openai` service. +// API name: service_settings +func (r *PutOpenai) ServiceSettings(servicesettings types.OpenAIServiceSettingsVariant) *PutOpenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.OpenAIServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutOpenai) TaskSettings(tasksettings types.OpenAITaskSettingsVariant) *PutOpenai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.OpenAITaskSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putopenai/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putopenai/request.go new file mode 100644 index 000000000..33f64befa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putopenai/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putopenai + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/openaiservicetype" +) + +// Request holds the request body struct for the package putopenai +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_openai/PutOpenAiRequest.ts#L31-L86 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `openai`. + Service openaiservicetype.OpenAIServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `openai` service. + ServiceSettings types.OpenAIServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.OpenAITaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putopenai request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putopenai/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putopenai/response.go new file mode 100644 index 000000000..6618b3ca6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putopenai/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putopenai + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeopenai" +) + +// Response holds the response body struct for the package putopenai +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_openai/PutOpenAiResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypeopenai.TaskTypeOpenAI `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putvoyageai/put_voyageai.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putvoyageai/put_voyageai.go new file mode 100644 index 000000000..cd3d13394 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putvoyageai/put_voyageai.go @@ -0,0 +1,451 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a VoyageAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `voyageai` +// service. +// +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +package putvoyageai + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/voyageaiservicetype" +) + +const ( + tasktypeMask = iota + 1 + + voyageaiinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutVoyageai struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + voyageaiinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutVoyageai type alias for index. +type NewPutVoyageai func(tasktype, voyageaiinferenceid string) *PutVoyageai + +// NewPutVoyageaiFunc returns a new instance of PutVoyageai with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutVoyageaiFunc(tp elastictransport.Interface) NewPutVoyageai { + return func(tasktype, voyageaiinferenceid string) *PutVoyageai { + n := New(tp) + + n._tasktype(tasktype) + + n._voyageaiinferenceid(voyageaiinferenceid) + + return n + } +} + +// Create a VoyageAI inference endpoint. +// +// Create an inference endpoint to perform an inference task with the `voyageai` +// service. +// +// Avoid creating multiple endpoints for the same model unless required, as each +// endpoint consumes significant resources. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-voyageai +func New(tp elastictransport.Interface) *PutVoyageai { + r := &PutVoyageai{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutVoyageai) Raw(raw io.Reader) *PutVoyageai { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutVoyageai) Request(req *Request) *PutVoyageai { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutVoyageai) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutVoyageai: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|voyageaiinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "voyageaiinferenceid", r.voyageaiinferenceid) + } + path.WriteString(r.voyageaiinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutVoyageai) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_voyageai") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_voyageai") + if reader := instrument.RecordRequestBody(ctx, "inference.put_voyageai", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_voyageai") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutVoyageai query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putvoyageai.Response +func (r PutVoyageai) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_voyageai") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutVoyageai headers map. +func (r *PutVoyageai) Header(key, value string) *PutVoyageai { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutVoyageai) _tasktype(tasktype string) *PutVoyageai { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// VoyageaiInferenceId The unique identifier of the inference endpoint. +// API Name: voyageaiinferenceid +func (r *PutVoyageai) _voyageaiinferenceid(voyageaiinferenceid string) *PutVoyageai { + r.paramSet |= voyageaiinferenceidMask + r.voyageaiinferenceid = voyageaiinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutVoyageai) Timeout(duration string) *PutVoyageai { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutVoyageai) ErrorTrace(errortrace bool) *PutVoyageai { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutVoyageai) FilterPath(filterpaths ...string) *PutVoyageai { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutVoyageai) Human(human bool) *PutVoyageai { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutVoyageai) Pretty(pretty bool) *PutVoyageai { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The chunking configuration object. +// API name: chunking_settings +func (r *PutVoyageai) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *PutVoyageai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The type of service supported for the specified task type. In this case, +// `voyageai`. +// API name: service +func (r *PutVoyageai) Service(service voyageaiservicetype.VoyageAIServiceType) *PutVoyageai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `voyageai` service. +// API name: service_settings +func (r *PutVoyageai) ServiceSettings(servicesettings types.VoyageAIServiceSettingsVariant) *PutVoyageai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.VoyageAIServiceSettingsCaster() + + return r +} + +// Settings to configure the inference task. +// These settings are specific to the task type you specified. +// API name: task_settings +func (r *PutVoyageai) TaskSettings(tasksettings types.VoyageAITaskSettingsVariant) *PutVoyageai { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings.VoyageAITaskSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putvoyageai/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putvoyageai/request.go new file mode 100644 index 000000000..d5a15cf54 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putvoyageai/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putvoyageai + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/voyageaiservicetype" +) + +// Request holds the request body struct for the package putvoyageai +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_voyageai/PutVoyageAIRequest.ts#L31-L87 +type Request struct { + + // ChunkingSettings The chunking configuration object. + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The type of service supported for the specified task type. In this case, + // `voyageai`. + Service voyageaiservicetype.VoyageAIServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `voyageai` service. + ServiceSettings types.VoyageAIServiceSettings `json:"service_settings"` + // TaskSettings Settings to configure the inference task. + // These settings are specific to the task type you specified. + TaskSettings *types.VoyageAITaskSettings `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putvoyageai request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putvoyageai/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putvoyageai/response.go new file mode 100644 index 000000000..2fb1b1c75 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putvoyageai/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putvoyageai + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypevoyageai" +) + +// Response holds the response body struct for the package putvoyageai +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_voyageai/PutVoyageAIResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypevoyageai.TaskTypeVoyageAI `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putwatsonx/put_watsonx.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putwatsonx/put_watsonx.go new file mode 100644 index 000000000..a9e2bb06c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putwatsonx/put_watsonx.go @@ -0,0 +1,426 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a Watsonx inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `watsonxai` service. +// You need an IBM Cloud Databases for Elasticsearch deployment to use the +// `watsonxai` inference service. +// You can provision one through the IBM catalog, the Cloud Databases CLI +// plug-in, the Cloud Databases API, or Terraform. +package putwatsonx + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/watsonxservicetype" +) + +const ( + tasktypeMask = iota + 1 + + watsonxinferenceidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutWatsonx struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + tasktype string + watsonxinferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutWatsonx type alias for index. +type NewPutWatsonx func(tasktype, watsonxinferenceid string) *PutWatsonx + +// NewPutWatsonxFunc returns a new instance of PutWatsonx with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutWatsonxFunc(tp elastictransport.Interface) NewPutWatsonx { + return func(tasktype, watsonxinferenceid string) *PutWatsonx { + n := New(tp) + + n._tasktype(tasktype) + + n._watsonxinferenceid(watsonxinferenceid) + + return n + } +} + +// Create a Watsonx inference endpoint. +// +// Create an inference endpoint to perform an inference task with the +// `watsonxai` service. +// You need an IBM Cloud Databases for Elasticsearch deployment to use the +// `watsonxai` inference service. +// You can provision one through the IBM catalog, the Cloud Databases CLI +// plug-in, the Cloud Databases API, or Terraform. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx +func New(tp elastictransport.Interface) *PutWatsonx { + r := &PutWatsonx{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutWatsonx) Raw(raw io.Reader) *PutWatsonx { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutWatsonx) Request(req *Request) *PutWatsonx { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutWatsonx) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutWatsonx: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == tasktypeMask|watsonxinferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "watsonxinferenceid", r.watsonxinferenceid) + } + path.WriteString(r.watsonxinferenceid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutWatsonx) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.put_watsonx") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.put_watsonx") + if reader := instrument.RecordRequestBody(ctx, "inference.put_watsonx", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.put_watsonx") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutWatsonx query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putwatsonx.Response +func (r PutWatsonx) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.put_watsonx") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutWatsonx headers map. +func (r *PutWatsonx) Header(key, value string) *PutWatsonx { + r.headers.Set(key, value) + + return r +} + +// TaskType The type of the inference task that the model will perform. +// API Name: tasktype +func (r *PutWatsonx) _tasktype(tasktype string) *PutWatsonx { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// WatsonxInferenceId The unique identifier of the inference endpoint. +// API Name: watsonxinferenceid +func (r *PutWatsonx) _watsonxinferenceid(watsonxinferenceid string) *PutWatsonx { + r.paramSet |= watsonxinferenceidMask + r.watsonxinferenceid = watsonxinferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference endpoint to be +// created. +// API name: timeout +func (r *PutWatsonx) Timeout(duration string) *PutWatsonx { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutWatsonx) ErrorTrace(errortrace bool) *PutWatsonx { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutWatsonx) FilterPath(filterpaths ...string) *PutWatsonx { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutWatsonx) Human(human bool) *PutWatsonx { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutWatsonx) Pretty(pretty bool) *PutWatsonx { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The type of service supported for the specified task type. In this case, +// `watsonxai`. +// API name: service +func (r *PutWatsonx) Service(service watsonxservicetype.WatsonxServiceType) *PutWatsonx { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Service = service + return r +} + +// Settings used to install the inference model. These settings are specific to +// the `watsonxai` service. +// API name: service_settings +func (r *PutWatsonx) ServiceSettings(servicesettings types.WatsonxServiceSettingsVariant) *PutWatsonx { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = *servicesettings.WatsonxServiceSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putwatsonx/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putwatsonx/request.go new file mode 100644 index 000000000..09216f568 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putwatsonx/request.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putwatsonx + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/watsonxservicetype" +) + +// Request holds the request body struct for the package putwatsonx +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_watsonx/PutWatsonxRequest.ts#L29-L75 +type Request struct { + + // Service The type of service supported for the specified task type. In this case, + // `watsonxai`. + Service watsonxservicetype.WatsonxServiceType `json:"service"` + // ServiceSettings Settings used to install the inference model. These settings are specific to + // the `watsonxai` service. + ServiceSettings types.WatsonxServiceSettings `json:"service_settings"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putwatsonx request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putwatsonx/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putwatsonx/response.go new file mode 100644 index 000000000..d3183c4a3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/putwatsonx/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putwatsonx + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypewatsonx" +) + +// Response holds the response body struct for the package putwatsonx +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/put_watsonx/PutWatsonxResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktypewatsonx.TaskTypeWatsonx `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/rerank/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/rerank/request.go new file mode 100644 index 000000000..611e6f209 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/rerank/request.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package rerank + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Request holds the request body struct for the package rerank +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/rerank/RerankRequest.ts#L25-L72 +type Request struct { + + // Input The text on which you want to perform the inference task. + // It can be a single string or an array. + // + // > info + // > Inference endpoints for the `completion` task type currently only support a + // single string as input. + Input []string `json:"input"` + // Query Query input. + Query string `json:"query"` + // TaskSettings Task settings for the individual inference request. + // These settings are specific to the task type you specified and override the + // task settings specified when initializing the service. + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Rerank request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + s.Input = append(s.Input, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/rerank/rerank.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/rerank/rerank.go new file mode 100644 index 000000000..13406fcd3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/rerank/rerank.go @@ -0,0 +1,412 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Perform reranking inference on the service +package rerank + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Rerank struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRerank type alias for index. +type NewRerank func(inferenceid string) *Rerank + +// NewRerankFunc returns a new instance of Rerank with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRerankFunc(tp elastictransport.Interface) NewRerank { + return func(inferenceid string) *Rerank { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform reranking inference on the service +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference +func New(tp elastictransport.Interface) *Rerank { + r := &Rerank{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Rerank) Raw(raw io.Reader) *Rerank { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Rerank) Request(req *Request) *Rerank { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Rerank) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Rerank: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("rerank") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Rerank) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.rerank") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.rerank") + if reader := instrument.RecordRequestBody(ctx, "inference.rerank", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.rerank") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Rerank query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a rerank.Response +func (r Rerank) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.rerank") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Rerank headers map. +func (r *Rerank) Header(key, value string) *Rerank { + r.headers.Set(key, value) + + return r +} + +// InferenceId The unique identifier for the inference endpoint. +// API Name: inferenceid +func (r *Rerank) _inferenceid(inferenceid string) *Rerank { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout The amount of time to wait for the inference request to complete. +// API name: timeout +func (r *Rerank) Timeout(duration string) *Rerank { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Rerank) ErrorTrace(errortrace bool) *Rerank { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Rerank) FilterPath(filterpaths ...string) *Rerank { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Rerank) Human(human bool) *Rerank { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Rerank) Pretty(pretty bool) *Rerank { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The text on which you want to perform the inference task. +// It can be a single string or an array. +// +// > info +// > Inference endpoints for the `completion` task type currently only support a +// single string as input. +// API name: input +func (r *Rerank) Input(inputs ...string) *Rerank { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) + r.req.Input = inputs + + return r +} + +// Query input. +// API name: query +func (r *Rerank) Query(query string) *Rerank { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query + + return r +} + +// Task settings for the individual inference request. +// These settings are specific to the task type you specified and override the +// task settings specified when initializing the service. +// API name: task_settings +func (r *Rerank) TaskSettings(tasksettings json.RawMessage) *Rerank { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/rerank/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/rerank/response.go new file mode 100644 index 000000000..343af38ab --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/rerank/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package rerank + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package rerank +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/rerank/RerankResponse.ts#L22-L25 + +type Response []types.RankedDocument + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/sparseembedding/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/sparseembedding/request.go new file mode 100644 index 000000000..43e31ded0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/sparseembedding/request.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package sparseembedding + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package sparseembedding +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/sparse_embedding/SparseEmbeddingRequest.ts#L25-L63 +type Request struct { + + // Input Inference input. + // Either a string or an array of strings. + Input []string `json:"input"` + // TaskSettings Optional task settings + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Sparseembedding request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + s.Input = append(s.Input, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + } + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/sparseembedding/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/sparseembedding/response.go new file mode 100644 index 000000000..f91e0d548 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/sparseembedding/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package sparseembedding + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package sparseembedding +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/sparse_embedding/SparseEmbeddingResponse.ts#L22-L25 + +type Response []types.SparseEmbeddingResult + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/sparseembedding/sparse_embedding.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/sparseembedding/sparse_embedding.go new file mode 100644 index 000000000..490ba880c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/sparseembedding/sparse_embedding.go @@ -0,0 +1,393 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Perform sparse embedding inference on the service +package sparseembedding + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SparseEmbedding struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSparseEmbedding type alias for index. +type NewSparseEmbedding func(inferenceid string) *SparseEmbedding + +// NewSparseEmbeddingFunc returns a new instance of SparseEmbedding with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSparseEmbeddingFunc(tp elastictransport.Interface) NewSparseEmbedding { + return func(inferenceid string) *SparseEmbedding { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform sparse embedding inference on the service +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference +func New(tp elastictransport.Interface) *SparseEmbedding { + r := &SparseEmbedding{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SparseEmbedding) Raw(raw io.Reader) *SparseEmbedding { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SparseEmbedding) Request(req *Request) *SparseEmbedding { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SparseEmbedding) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SparseEmbedding: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("sparse_embedding") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SparseEmbedding) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.sparse_embedding") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.sparse_embedding") + if reader := instrument.RecordRequestBody(ctx, "inference.sparse_embedding", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.sparse_embedding") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SparseEmbedding query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a sparseembedding.Response +func (r SparseEmbedding) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.sparse_embedding") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SparseEmbedding headers map. +func (r *SparseEmbedding) Header(key, value string) *SparseEmbedding { + r.headers.Set(key, value) + + return r +} + +// InferenceId The inference Id +// API Name: inferenceid +func (r *SparseEmbedding) _inferenceid(inferenceid string) *SparseEmbedding { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference request to complete. +// API name: timeout +func (r *SparseEmbedding) Timeout(duration string) *SparseEmbedding { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SparseEmbedding) ErrorTrace(errortrace bool) *SparseEmbedding { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SparseEmbedding) FilterPath(filterpaths ...string) *SparseEmbedding { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SparseEmbedding) Human(human bool) *SparseEmbedding { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SparseEmbedding) Pretty(pretty bool) *SparseEmbedding { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Inference input. +// Either a string or an array of strings. +// API name: input +func (r *SparseEmbedding) Input(inputs ...string) *SparseEmbedding { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) + r.req.Input = inputs + + return r +} + +// Optional task settings +// API name: task_settings +func (r *SparseEmbedding) TaskSettings(tasksettings json.RawMessage) *SparseEmbedding { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/streamcompletion/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/streamcompletion/request.go new file mode 100644 index 000000000..c87085e34 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/streamcompletion/request.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package streamcompletion + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package streamcompletion +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/stream_completion/StreamInferenceRequest.ts#L25-L71 +type Request struct { + + // Input The text on which you want to perform the inference task. + // It can be a single string or an array. + // + // NOTE: Inference endpoints for the completion task type currently only support + // a single string as input. + Input []string `json:"input"` + // TaskSettings Optional task settings + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Streamcompletion request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + s.Input = append(s.Input, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + } + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/streamcompletion/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/streamcompletion/response.go new file mode 100644 index 000000000..dbadc515d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/streamcompletion/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package streamcompletion + +// Response holds the response body struct for the package streamcompletion +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/stream_completion/StreamInferenceResponse.ts#L22-L25 + +type Response = []byte + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/streamcompletion/stream_completion.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/streamcompletion/stream_completion.go new file mode 100644 index 000000000..50d34da1c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/streamcompletion/stream_completion.go @@ -0,0 +1,430 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Perform streaming inference. +// Get real-time responses for completion tasks by delivering answers +// incrementally, reducing response times during computation. +// This API works only with the completion task type. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, +// Watsonx.ai, or Hugging Face. For built-in models and models uploaded through +// Eland, the inference APIs offer an alternative way to use and manage trained +// models. However, if you do not plan to use the inference APIs to use these +// models or if you want to use non-NLP models, use the machine learning trained +// model APIs. +// +// This API requires the `monitor_inference` cluster privilege (the built-in +// `inference_admin` and `inference_user` roles grant this privilege). You must +// use a client that supports streaming. +package streamcompletion + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type StreamCompletion struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStreamCompletion type alias for index. +type NewStreamCompletion func(inferenceid string) *StreamCompletion + +// NewStreamCompletionFunc returns a new instance of StreamCompletion with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStreamCompletionFunc(tp elastictransport.Interface) NewStreamCompletion { + return func(inferenceid string) *StreamCompletion { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform streaming inference. +// Get real-time responses for completion tasks by delivering answers +// incrementally, reducing response times during computation. +// This API works only with the completion task type. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, +// Watsonx.ai, or Hugging Face. For built-in models and models uploaded through +// Eland, the inference APIs offer an alternative way to use and manage trained +// models. However, if you do not plan to use the inference APIs to use these +// models or if you want to use non-NLP models, use the machine learning trained +// model APIs. +// +// This API requires the `monitor_inference` cluster privilege (the built-in +// `inference_admin` and `inference_user` roles grant this privilege). You must +// use a client that supports streaming. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference +func New(tp elastictransport.Interface) *StreamCompletion { + r := &StreamCompletion{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *StreamCompletion) Raw(raw io.Reader) *StreamCompletion { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *StreamCompletion) Request(req *Request) *StreamCompletion { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *StreamCompletion) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for StreamCompletion: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("completion") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + path.WriteString("/") + path.WriteString("_stream") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "text/event-stream") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r StreamCompletion) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.stream_completion") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.stream_completion") + if reader := instrument.RecordRequestBody(ctx, "inference.stream_completion", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.stream_completion") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the StreamCompletion query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a streamcompletion.Response +func (r StreamCompletion) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.stream_completion") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + response, err = io.ReadAll(res.Body) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the StreamCompletion headers map. +func (r *StreamCompletion) Header(key, value string) *StreamCompletion { + r.headers.Set(key, value) + + return r +} + +// InferenceId The unique identifier for the inference endpoint. +// API Name: inferenceid +func (r *StreamCompletion) _inferenceid(inferenceid string) *StreamCompletion { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout The amount of time to wait for the inference request to complete. +// API name: timeout +func (r *StreamCompletion) Timeout(duration string) *StreamCompletion { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *StreamCompletion) ErrorTrace(errortrace bool) *StreamCompletion { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *StreamCompletion) FilterPath(filterpaths ...string) *StreamCompletion { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *StreamCompletion) Human(human bool) *StreamCompletion { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *StreamCompletion) Pretty(pretty bool) *StreamCompletion { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The text on which you want to perform the inference task. +// It can be a single string or an array. +// +// NOTE: Inference endpoints for the completion task type currently only support +// a single string as input. +// API name: input +func (r *StreamCompletion) Input(inputs ...string) *StreamCompletion { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) + r.req.Input = inputs + + return r +} + +// Optional task settings +// API name: task_settings +func (r *StreamCompletion) TaskSettings(tasksettings json.RawMessage) *StreamCompletion { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/textembedding/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/textembedding/request.go new file mode 100644 index 000000000..b02940b0d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/textembedding/request.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package textembedding + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package textembedding +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/text_embedding/TextEmbeddingRequest.ts#L25-L63 +type Request struct { + + // Input Inference input. + // Either a string or an array of strings. + Input []string `json:"input"` + // TaskSettings Optional task settings + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Textembedding request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + s.Input = append(s.Input, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + } + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/textembedding/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/textembedding/response.go new file mode 100644 index 000000000..a0e7fd47c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/textembedding/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package textembedding + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package textembedding +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/text_embedding/TextEmbeddingResponse.ts#L22-L25 +type Response struct { + AdditionalTextEmbeddingInferenceResultProperty map[string]json.RawMessage `json:"-"` + TextEmbedding []types.TextEmbeddingResult `json:"text_embedding,omitempty"` + TextEmbeddingBits []types.TextEmbeddingByteResult `json:"text_embedding_bits,omitempty"` + TextEmbeddingBytes []types.TextEmbeddingByteResult `json:"text_embedding_bytes,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + AdditionalTextEmbeddingInferenceResultProperty: make(map[string]json.RawMessage, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/textembedding/text_embedding.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/textembedding/text_embedding.go new file mode 100644 index 000000000..0e6d68ace --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/textembedding/text_embedding.go @@ -0,0 +1,393 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Perform text embedding inference on the service +package textembedding + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type TextEmbedding struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewTextEmbedding type alias for index. +type NewTextEmbedding func(inferenceid string) *TextEmbedding + +// NewTextEmbeddingFunc returns a new instance of TextEmbedding with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewTextEmbeddingFunc(tp elastictransport.Interface) NewTextEmbedding { + return func(inferenceid string) *TextEmbedding { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Perform text embedding inference on the service +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference +func New(tp elastictransport.Interface) *TextEmbedding { + r := &TextEmbedding{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *TextEmbedding) Raw(raw io.Reader) *TextEmbedding { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *TextEmbedding) Request(req *Request) *TextEmbedding { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *TextEmbedding) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for TextEmbedding: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString("text_embedding") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r TextEmbedding) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.text_embedding") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.text_embedding") + if reader := instrument.RecordRequestBody(ctx, "inference.text_embedding", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.text_embedding") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the TextEmbedding query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a textembedding.Response +func (r TextEmbedding) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.text_embedding") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the TextEmbedding headers map. +func (r *TextEmbedding) Header(key, value string) *TextEmbedding { + r.headers.Set(key, value) + + return r +} + +// InferenceId The inference Id +// API Name: inferenceid +func (r *TextEmbedding) _inferenceid(inferenceid string) *TextEmbedding { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// Timeout Specifies the amount of time to wait for the inference request to complete. +// API name: timeout +func (r *TextEmbedding) Timeout(duration string) *TextEmbedding { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *TextEmbedding) ErrorTrace(errortrace bool) *TextEmbedding { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *TextEmbedding) FilterPath(filterpaths ...string) *TextEmbedding { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *TextEmbedding) Human(human bool) *TextEmbedding { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *TextEmbedding) Pretty(pretty bool) *TextEmbedding { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Inference input. +// Either a string or an array of strings. +// API name: input +func (r *TextEmbedding) Input(inputs ...string) *TextEmbedding { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Input = make([]string, len(inputs)) + r.req.Input = inputs + + return r +} + +// Optional task settings +// API name: task_settings +func (r *TextEmbedding) TaskSettings(tasksettings json.RawMessage) *TextEmbedding { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/update/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/update/request.go new file mode 100644 index 000000000..7b6640636 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/update/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package update + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package update +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/update/UpdateInferenceRequest.ts#L25-L61 +type Request = types.InferenceEndpoint + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewInferenceEndpoint() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/update/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/update/response.go new file mode 100644 index 000000000..48ca89ee7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/update/response.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package update + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktype" +) + +// Response holds the response body struct for the package update +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/update/UpdateInferenceResponse.ts#L22-L25 +type Response struct { + + // ChunkingSettings Chunking configuration object + ChunkingSettings *types.InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktype.TaskType `json:"task_type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/update/update.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/update/update.go new file mode 100644 index 000000000..17c4752ee --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/inference/update/update.go @@ -0,0 +1,469 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update an inference endpoint. +// +// Modify `task_settings`, secrets (within `service_settings`), or +// `num_allocations` for an inference endpoint, depending on the specific +// endpoint service and `task_type`. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, +// Watsonx.ai, or Hugging Face. +// For built-in models and models uploaded through Eland, the inference APIs +// offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. +package update + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + inferenceidMask = iota + 1 + + tasktypeMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Update struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + inferenceid string + tasktype string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdate type alias for index. +type NewUpdate func(inferenceid string) *Update + +// NewUpdateFunc returns a new instance of Update with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateFunc(tp elastictransport.Interface) NewUpdate { + return func(inferenceid string) *Update { + n := New(tp) + + n._inferenceid(inferenceid) + + return n + } +} + +// Update an inference endpoint. +// +// Modify `task_settings`, secrets (within `service_settings`), or +// `num_allocations` for an inference endpoint, depending on the specific +// endpoint service and `task_type`. +// +// IMPORTANT: The inference APIs enable you to use certain services, such as +// built-in machine learning models (ELSER, E5), models uploaded through Eland, +// Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, +// Watsonx.ai, or Hugging Face. +// For built-in models and models uploaded through Eland, the inference APIs +// offer an alternative way to use and manage trained models. +// However, if you do not plan to use the inference APIs to use these models or +// if you want to use non-NLP models, use the machine learning trained model +// APIs. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update +func New(tp elastictransport.Interface) *Update { + r := &Update{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Update) Raw(raw io.Reader) *Update { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Update) Request(req *Request) *Update { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Update) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Update: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + path.WriteString("/") + path.WriteString("_update") + + method = http.MethodPut + case r.paramSet == tasktypeMask|inferenceidMask: + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "tasktype", r.tasktype) + } + path.WriteString(r.tasktype) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "inferenceid", r.inferenceid) + } + path.WriteString(r.inferenceid) + path.WriteString("/") + path.WriteString("_update") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Update) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "inference.update") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "inference.update") + if reader := instrument.RecordRequestBody(ctx, "inference.update", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "inference.update") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Update query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a update.Response +func (r Update) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "inference.update") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Update headers map. +func (r *Update) Header(key, value string) *Update { + r.headers.Set(key, value) + + return r +} + +// InferenceId The unique identifier of the inference endpoint. +// API Name: inferenceid +func (r *Update) _inferenceid(inferenceid string) *Update { + r.paramSet |= inferenceidMask + r.inferenceid = inferenceid + + return r +} + +// TaskType The type of inference task that the model performs. +// API Name: tasktype +func (r *Update) TaskType(tasktype string) *Update { + r.paramSet |= tasktypeMask + r.tasktype = tasktype + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Update) ErrorTrace(errortrace bool) *Update { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Update) FilterPath(filterpaths ...string) *Update { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Update) Human(human bool) *Update { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Update) Pretty(pretty bool) *Update { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Chunking configuration object +// API name: chunking_settings +func (r *Update) ChunkingSettings(chunkingsettings types.InferenceChunkingSettingsVariant) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingSettings = chunkingsettings.InferenceChunkingSettingsCaster() + + return r +} + +// The service type +// API name: service +func (r *Update) Service(service string) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Service = service + + return r +} + +// Settings specific to the service +// API name: service_settings +func (r *Update) ServiceSettings(servicesettings json.RawMessage) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ServiceSettings = servicesettings + + return r +} + +// Task settings specific to the service and task type +// API name: task_settings +func (r *Update) TaskSettings(tasksettings json.RawMessage) *Update { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TaskSettings = tasksettings + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletegeoipdatabase/delete_geoip_database.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletegeoipdatabase/delete_geoip_database.go new file mode 100644 index 000000000..519fbe264 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletegeoipdatabase/delete_geoip_database.go @@ -0,0 +1,369 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete GeoIP database configurations. +// +// Delete one or more IP geolocation database configurations. +package deletegeoipdatabase + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteGeoipDatabase struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteGeoipDatabase type alias for index. +type NewDeleteGeoipDatabase func(id string) *DeleteGeoipDatabase + +// NewDeleteGeoipDatabaseFunc returns a new instance of DeleteGeoipDatabase with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteGeoipDatabaseFunc(tp elastictransport.Interface) NewDeleteGeoipDatabase { + return func(id string) *DeleteGeoipDatabase { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete GeoIP database configurations. +// +// Delete one or more IP geolocation database configurations. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-geoip-database +func New(tp elastictransport.Interface) *DeleteGeoipDatabase { + r := &DeleteGeoipDatabase{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteGeoipDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("geoip") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteGeoipDatabase) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.delete_geoip_database") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.delete_geoip_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.delete_geoip_database", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.delete_geoip_database") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteGeoipDatabase query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletegeoipdatabase.Response +func (r DeleteGeoipDatabase) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.delete_geoip_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteGeoipDatabase) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.delete_geoip_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteGeoipDatabase query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteGeoipDatabase headers map. +func (r *DeleteGeoipDatabase) Header(key, value string) *DeleteGeoipDatabase { + r.headers.Set(key, value) + + return r +} + +// Id A comma-separated list of geoip database configurations to delete +// API Name: id +func (r *DeleteGeoipDatabase) _id(id string) *DeleteGeoipDatabase { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *DeleteGeoipDatabase) MasterTimeout(duration string) *DeleteGeoipDatabase { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. +// API name: timeout +func (r *DeleteGeoipDatabase) Timeout(duration string) *DeleteGeoipDatabase { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteGeoipDatabase) ErrorTrace(errortrace bool) *DeleteGeoipDatabase { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteGeoipDatabase) FilterPath(filterpaths ...string) *DeleteGeoipDatabase { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteGeoipDatabase) Human(human bool) *DeleteGeoipDatabase { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteGeoipDatabase) Pretty(pretty bool) *DeleteGeoipDatabase { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletegeoipdatabase/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletegeoipdatabase/response.go new file mode 100644 index 000000000..37e517cc8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletegeoipdatabase/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletegeoipdatabase + +// Response holds the response body struct for the package deletegeoipdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/delete_geoip_database/DeleteGeoipDatabaseResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deleteiplocationdatabase/delete_ip_location_database.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deleteiplocationdatabase/delete_ip_location_database.go new file mode 100644 index 000000000..0a20a5bd7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deleteiplocationdatabase/delete_ip_location_database.go @@ -0,0 +1,368 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete IP geolocation database configurations. +package deleteiplocationdatabase + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteIpLocationDatabase struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteIpLocationDatabase type alias for index. +type NewDeleteIpLocationDatabase func(id string) *DeleteIpLocationDatabase + +// NewDeleteIpLocationDatabaseFunc returns a new instance of DeleteIpLocationDatabase with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteIpLocationDatabaseFunc(tp elastictransport.Interface) NewDeleteIpLocationDatabase { + return func(id string) *DeleteIpLocationDatabase { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete IP geolocation database configurations. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database +func New(tp elastictransport.Interface) *DeleteIpLocationDatabase { + r := &DeleteIpLocationDatabase{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteIpLocationDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteIpLocationDatabase) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.delete_ip_location_database") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.delete_ip_location_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.delete_ip_location_database", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.delete_ip_location_database") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteIpLocationDatabase query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleteiplocationdatabase.Response +func (r DeleteIpLocationDatabase) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.delete_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteIpLocationDatabase) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.delete_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteIpLocationDatabase query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteIpLocationDatabase headers map. +func (r *DeleteIpLocationDatabase) Header(key, value string) *DeleteIpLocationDatabase { + r.headers.Set(key, value) + + return r +} + +// Id A comma-separated list of IP location database configurations. +// API Name: id +func (r *DeleteIpLocationDatabase) _id(id string) *DeleteIpLocationDatabase { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// A value of `-1` indicates that the request should never time out. +// API name: master_timeout +func (r *DeleteIpLocationDatabase) MasterTimeout(duration string) *DeleteIpLocationDatabase { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// A value of `-1` indicates that the request should never time out. +// API name: timeout +func (r *DeleteIpLocationDatabase) Timeout(duration string) *DeleteIpLocationDatabase { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteIpLocationDatabase) ErrorTrace(errortrace bool) *DeleteIpLocationDatabase { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteIpLocationDatabase) FilterPath(filterpaths ...string) *DeleteIpLocationDatabase { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteIpLocationDatabase) Human(human bool) *DeleteIpLocationDatabase { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteIpLocationDatabase) Pretty(pretty bool) *DeleteIpLocationDatabase { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deleteiplocationdatabase/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deleteiplocationdatabase/response.go new file mode 100644 index 000000000..e6ca56734 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deleteiplocationdatabase/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleteiplocationdatabase + +// Response holds the response body struct for the package deleteiplocationdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/delete_ip_location_database/DeleteIpLocationDatabaseResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletepipeline/delete_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletepipeline/delete_pipeline.go new file mode 100644 index 000000000..bed879ec8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletepipeline/delete_pipeline.go @@ -0,0 +1,367 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete pipelines. +// Delete one or more ingest pipelines. +package deletepipeline + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeletePipeline struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeletePipeline type alias for index. +type NewDeletePipeline func(id string) *DeletePipeline + +// NewDeletePipelineFunc returns a new instance of DeletePipeline with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeletePipelineFunc(tp elastictransport.Interface) NewDeletePipeline { + return func(id string) *DeletePipeline { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete pipelines. +// Delete one or more ingest pipelines. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline +func New(tp elastictransport.Interface) *DeletePipeline { + r := &DeletePipeline{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeletePipeline) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("pipeline") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeletePipeline) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.delete_pipeline") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.delete_pipeline") + if reader := instrument.RecordRequestBody(ctx, "ingest.delete_pipeline", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.delete_pipeline") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeletePipeline query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletepipeline.Response +func (r DeletePipeline) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.delete_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeletePipeline) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.delete_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeletePipeline query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeletePipeline headers map. +func (r *DeletePipeline) Header(key, value string) *DeletePipeline { + r.headers.Set(key, value) + + return r +} + +// Id Pipeline ID or wildcard expression of pipeline IDs used to limit the request. +// To delete all ingest pipelines in a cluster, use a value of `*`. +// API Name: id +func (r *DeletePipeline) _id(id string) *DeletePipeline { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *DeletePipeline) MasterTimeout(duration string) *DeletePipeline { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *DeletePipeline) Timeout(duration string) *DeletePipeline { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeletePipeline) ErrorTrace(errortrace bool) *DeletePipeline { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeletePipeline) FilterPath(filterpaths ...string) *DeletePipeline { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeletePipeline) Human(human bool) *DeletePipeline { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeletePipeline) Pretty(pretty bool) *DeletePipeline { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletepipeline/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletepipeline/response.go new file mode 100644 index 000000000..8dfe33106 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletepipeline/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletepipeline + +// Response holds the response body struct for the package deletepipeline +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/delete_pipeline/DeletePipelineResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/geoipstats/geo_ip_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/geoipstats/geo_ip_stats.go new file mode 100644 index 000000000..21785b138 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/geoipstats/geo_ip_stats.go @@ -0,0 +1,327 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get GeoIP statistics. +// Get download statistics for GeoIP2 databases that are used with the GeoIP +// processor. +package geoipstats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GeoIpStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGeoIpStats type alias for index. +type NewGeoIpStats func() *GeoIpStats + +// NewGeoIpStatsFunc returns a new instance of GeoIpStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGeoIpStatsFunc(tp elastictransport.Interface) NewGeoIpStats { + return func() *GeoIpStats { + n := New(tp) + + return n + } +} + +// Get GeoIP statistics. +// Get download statistics for GeoIP2 databases that are used with the GeoIP +// processor. +// +// https://www.elastic.co/docs/reference/enrich-processor/geoip-processor +func New(tp elastictransport.Interface) *GeoIpStats { + r := &GeoIpStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GeoIpStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("geoip") + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GeoIpStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.geo_ip_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.geo_ip_stats") + if reader := instrument.RecordRequestBody(ctx, "ingest.geo_ip_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.geo_ip_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GeoIpStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a geoipstats.Response +func (r GeoIpStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.geo_ip_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GeoIpStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.geo_ip_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GeoIpStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GeoIpStats headers map. +func (r *GeoIpStats) Header(key, value string) *GeoIpStats { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GeoIpStats) ErrorTrace(errortrace bool) *GeoIpStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GeoIpStats) FilterPath(filterpaths ...string) *GeoIpStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GeoIpStats) Human(human bool) *GeoIpStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GeoIpStats) Pretty(pretty bool) *GeoIpStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/geoipstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/geoipstats/response.go new file mode 100644 index 000000000..394271500 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/geoipstats/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package geoipstats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package geoipstats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/geo_ip_stats/IngestGeoIpStatsResponse.ts#L24-L31 +type Response struct { + + // Nodes Downloaded GeoIP2 databases for each node. + Nodes map[string]types.GeoIpNodeDatabases `json:"nodes"` + // Stats Download statistics for all GeoIP2 databases. + Stats types.GeoIpDownloadStatistics `json:"stats"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.GeoIpNodeDatabases, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getgeoipdatabase/get_geoip_database.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getgeoipdatabase/get_geoip_database.go new file mode 100644 index 000000000..ecce081d5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getgeoipdatabase/get_geoip_database.go @@ -0,0 +1,359 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get GeoIP database configurations. +// +// Get information about one or more IP geolocation database configurations. +package getgeoipdatabase + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetGeoipDatabase struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetGeoipDatabase type alias for index. +type NewGetGeoipDatabase func() *GetGeoipDatabase + +// NewGetGeoipDatabaseFunc returns a new instance of GetGeoipDatabase with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetGeoipDatabaseFunc(tp elastictransport.Interface) NewGetGeoipDatabase { + return func() *GetGeoipDatabase { + n := New(tp) + + return n + } +} + +// Get GeoIP database configurations. +// +// Get information about one or more IP geolocation database configurations. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database +func New(tp elastictransport.Interface) *GetGeoipDatabase { + r := &GetGeoipDatabase{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetGeoipDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("geoip") + path.WriteString("/") + path.WriteString("database") + + method = http.MethodGet + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("geoip") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetGeoipDatabase) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.get_geoip_database") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.get_geoip_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.get_geoip_database", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.get_geoip_database") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetGeoipDatabase query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getgeoipdatabase.Response +func (r GetGeoipDatabase) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_geoip_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetGeoipDatabase) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_geoip_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetGeoipDatabase query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetGeoipDatabase headers map. +func (r *GetGeoipDatabase) Header(key, value string) *GetGeoipDatabase { + r.headers.Set(key, value) + + return r +} + +// Id A comma-separated list of database configuration IDs to retrieve. +// Wildcard (`*`) expressions are supported. +// To get all database configurations, omit this parameter or use `*`. +// API Name: id +func (r *GetGeoipDatabase) Id(id string) *GetGeoipDatabase { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetGeoipDatabase) ErrorTrace(errortrace bool) *GetGeoipDatabase { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetGeoipDatabase) FilterPath(filterpaths ...string) *GetGeoipDatabase { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetGeoipDatabase) Human(human bool) *GetGeoipDatabase { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetGeoipDatabase) Pretty(pretty bool) *GetGeoipDatabase { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getgeoipdatabase/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getgeoipdatabase/response.go new file mode 100644 index 000000000..b890e0ecf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getgeoipdatabase/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getgeoipdatabase + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getgeoipdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/get_geoip_database/GetGeoipDatabaseResponse.ts#L25-L27 +type Response struct { + Databases []types.GeoipDatabaseConfigurationMetadata `json:"databases"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getiplocationdatabase/get_ip_location_database.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getiplocationdatabase/get_ip_location_database.go new file mode 100644 index 000000000..eaf2e79cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getiplocationdatabase/get_ip_location_database.go @@ -0,0 +1,355 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get IP geolocation database configurations. +package getiplocationdatabase + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetIpLocationDatabase struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetIpLocationDatabase type alias for index. +type NewGetIpLocationDatabase func() *GetIpLocationDatabase + +// NewGetIpLocationDatabaseFunc returns a new instance of GetIpLocationDatabase with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetIpLocationDatabaseFunc(tp elastictransport.Interface) NewGetIpLocationDatabase { + return func() *GetIpLocationDatabase { + n := New(tp) + + return n + } +} + +// Get IP geolocation database configurations. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database +func New(tp elastictransport.Interface) *GetIpLocationDatabase { + r := &GetIpLocationDatabase{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetIpLocationDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + + method = http.MethodGet + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetIpLocationDatabase) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.get_ip_location_database") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.get_ip_location_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.get_ip_location_database", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.get_ip_location_database") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetIpLocationDatabase query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getiplocationdatabase.Response +func (r GetIpLocationDatabase) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetIpLocationDatabase) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetIpLocationDatabase query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetIpLocationDatabase headers map. +func (r *GetIpLocationDatabase) Header(key, value string) *GetIpLocationDatabase { + r.headers.Set(key, value) + + return r +} + +// Id Comma-separated list of database configuration IDs to retrieve. +// Wildcard (`*`) expressions are supported. +// To get all database configurations, omit this parameter or use `*`. +// API Name: id +func (r *GetIpLocationDatabase) Id(id string) *GetIpLocationDatabase { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetIpLocationDatabase) ErrorTrace(errortrace bool) *GetIpLocationDatabase { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetIpLocationDatabase) FilterPath(filterpaths ...string) *GetIpLocationDatabase { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetIpLocationDatabase) Human(human bool) *GetIpLocationDatabase { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetIpLocationDatabase) Pretty(pretty bool) *GetIpLocationDatabase { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getiplocationdatabase/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getiplocationdatabase/response.go new file mode 100644 index 000000000..bf3e6ea9b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getiplocationdatabase/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getiplocationdatabase + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getiplocationdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/get_ip_location_database/GetIpLocationDatabaseResponse.ts#L24-L26 +type Response struct { + Databases []types.IpLocationDatabaseConfigurationMetadata `json:"databases"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getpipeline/get_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getpipeline/get_pipeline.go new file mode 100644 index 000000000..67e4ef49f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getpipeline/get_pipeline.go @@ -0,0 +1,375 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get pipelines. +// +// Get information about one or more ingest pipelines. +// This API returns a local reference of the pipeline. +package getpipeline + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetPipeline struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetPipeline type alias for index. +type NewGetPipeline func() *GetPipeline + +// NewGetPipelineFunc returns a new instance of GetPipeline with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetPipelineFunc(tp elastictransport.Interface) NewGetPipeline { + return func() *GetPipeline { + n := New(tp) + + return n + } +} + +// Get pipelines. +// +// Get information about one or more ingest pipelines. +// This API returns a local reference of the pipeline. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline +func New(tp elastictransport.Interface) *GetPipeline { + r := &GetPipeline{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetPipeline) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("pipeline") + + method = http.MethodGet + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("pipeline") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetPipeline) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.get_pipeline") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.get_pipeline") + if reader := instrument.RecordRequestBody(ctx, "ingest.get_pipeline", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.get_pipeline") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetPipeline query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getpipeline.Response +func (r GetPipeline) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetPipeline) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.get_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetPipeline query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetPipeline headers map. +func (r *GetPipeline) Header(key, value string) *GetPipeline { + r.headers.Set(key, value) + + return r +} + +// Id Comma-separated list of pipeline IDs to retrieve. +// Wildcard (`*`) expressions are supported. +// To get all ingest pipelines, omit this parameter or use `*`. +// API Name: id +func (r *GetPipeline) Id(id string) *GetPipeline { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetPipeline) MasterTimeout(duration string) *GetPipeline { + r.values.Set("master_timeout", duration) + + return r +} + +// Summary Return pipelines without their definitions (default: false) +// API name: summary +func (r *GetPipeline) Summary(summary bool) *GetPipeline { + r.values.Set("summary", strconv.FormatBool(summary)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetPipeline) ErrorTrace(errortrace bool) *GetPipeline { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetPipeline) FilterPath(filterpaths ...string) *GetPipeline { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetPipeline) Human(human bool) *GetPipeline { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetPipeline) Pretty(pretty bool) *GetPipeline { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getpipeline/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getpipeline/response.go new file mode 100644 index 000000000..66179dcf6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getpipeline/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getpipeline + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getpipeline +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/get_pipeline/GetPipelineResponse.ts#L23-L26 + +type Response map[string]types.IngestPipeline + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/processorgrok/processor_grok.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/processorgrok/processor_grok.go new file mode 100644 index 000000000..59a945efe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/processorgrok/processor_grok.go @@ -0,0 +1,333 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run a grok processor. +// Extract structured fields out of a single text field within a document. +// You must choose which field to extract matched fields from, as well as the +// grok pattern you expect will match. +// A grok pattern is like a regular expression that supports aliased expressions +// that can be reused. +package processorgrok + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ProcessorGrok struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewProcessorGrok type alias for index. +type NewProcessorGrok func() *ProcessorGrok + +// NewProcessorGrokFunc returns a new instance of ProcessorGrok with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewProcessorGrokFunc(tp elastictransport.Interface) NewProcessorGrok { + return func() *ProcessorGrok { + n := New(tp) + + return n + } +} + +// Run a grok processor. +// Extract structured fields out of a single text field within a document. +// You must choose which field to extract matched fields from, as well as the +// grok pattern you expect will match. +// A grok pattern is like a regular expression that supports aliased expressions +// that can be reused. +// +// https://www.elastic.co/docs/reference/enrich-processor/grok-processor +func New(tp elastictransport.Interface) *ProcessorGrok { + r := &ProcessorGrok{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ProcessorGrok) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("processor") + path.WriteString("/") + path.WriteString("grok") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ProcessorGrok) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.processor_grok") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.processor_grok") + if reader := instrument.RecordRequestBody(ctx, "ingest.processor_grok", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.processor_grok") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ProcessorGrok query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a processorgrok.Response +func (r ProcessorGrok) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.processor_grok") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ProcessorGrok) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.processor_grok") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ProcessorGrok query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ProcessorGrok headers map. +func (r *ProcessorGrok) Header(key, value string) *ProcessorGrok { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ProcessorGrok) ErrorTrace(errortrace bool) *ProcessorGrok { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ProcessorGrok) FilterPath(filterpaths ...string) *ProcessorGrok { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ProcessorGrok) Human(human bool) *ProcessorGrok { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ProcessorGrok) Pretty(pretty bool) *ProcessorGrok { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/processorgrok/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/processorgrok/response.go new file mode 100644 index 000000000..733dab732 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/processorgrok/response.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package processorgrok + +// Response holds the response body struct for the package processorgrok +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/processor_grok/GrokProcessorPatternsResponse.ts#L22-L24 +type Response struct { + Patterns map[string]string `json:"patterns"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Patterns: make(map[string]string, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putgeoipdatabase/put_geoip_database.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putgeoipdatabase/put_geoip_database.go new file mode 100644 index 000000000..031d025f4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putgeoipdatabase/put_geoip_database.go @@ -0,0 +1,413 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a GeoIP database configuration. +// +// Refer to the create or update IP geolocation database configuration API. +package putgeoipdatabase + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutGeoipDatabase struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutGeoipDatabase type alias for index. +type NewPutGeoipDatabase func(id string) *PutGeoipDatabase + +// NewPutGeoipDatabaseFunc returns a new instance of PutGeoipDatabase with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutGeoipDatabaseFunc(tp elastictransport.Interface) NewPutGeoipDatabase { + return func(id string) *PutGeoipDatabase { + n := New(tp) + + n._id(id) + + return n + } +} + +// Create or update a GeoIP database configuration. +// +// Refer to the create or update IP geolocation database configuration API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-geoip-database +func New(tp elastictransport.Interface) *PutGeoipDatabase { + r := &PutGeoipDatabase{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutGeoipDatabase) Raw(raw io.Reader) *PutGeoipDatabase { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutGeoipDatabase) Request(req *Request) *PutGeoipDatabase { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutGeoipDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutGeoipDatabase: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("geoip") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutGeoipDatabase) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.put_geoip_database") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.put_geoip_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.put_geoip_database", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.put_geoip_database") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutGeoipDatabase query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putgeoipdatabase.Response +func (r PutGeoipDatabase) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.put_geoip_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutGeoipDatabase headers map. +func (r *PutGeoipDatabase) Header(key, value string) *PutGeoipDatabase { + r.headers.Set(key, value) + + return r +} + +// Id ID of the database configuration to create or update. +// API Name: id +func (r *PutGeoipDatabase) _id(id string) *PutGeoipDatabase { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *PutGeoipDatabase) MasterTimeout(duration string) *PutGeoipDatabase { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *PutGeoipDatabase) Timeout(duration string) *PutGeoipDatabase { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutGeoipDatabase) ErrorTrace(errortrace bool) *PutGeoipDatabase { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutGeoipDatabase) FilterPath(filterpaths ...string) *PutGeoipDatabase { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutGeoipDatabase) Human(human bool) *PutGeoipDatabase { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutGeoipDatabase) Pretty(pretty bool) *PutGeoipDatabase { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The configuration necessary to identify which IP geolocation provider to use +// to download the database, as well as any provider-specific configuration +// necessary for such downloading. +// At present, the only supported provider is maxmind, and the maxmind provider +// requires that an account_id (string) is configured. +// API name: maxmind +func (r *PutGeoipDatabase) Maxmind(maxmind types.MaxmindVariant) *PutGeoipDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Maxmind = *maxmind.MaxmindCaster() + + return r +} + +// The provider-assigned name of the IP geolocation database to download. +// API name: name +func (r *PutGeoipDatabase) Name(name string) *PutGeoipDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Name = name + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putgeoipdatabase/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putgeoipdatabase/request.go new file mode 100644 index 000000000..8f1e8aea9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putgeoipdatabase/request.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putgeoipdatabase + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putgeoipdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/put_geoip_database/PutGeoipDatabaseRequest.ts#L25-L66 +type Request struct { + + // Maxmind The configuration necessary to identify which IP geolocation provider to use + // to download the database, as well as any provider-specific configuration + // necessary for such downloading. + // At present, the only supported provider is maxmind, and the maxmind provider + // requires that an account_id (string) is configured. + Maxmind types.Maxmind `json:"maxmind"` + // Name The provider-assigned name of the IP geolocation database to download. + Name string `json:"name"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putgeoipdatabase request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "maxmind": + if err := dec.Decode(&s.Maxmind); err != nil { + return fmt.Errorf("%s | %w", "Maxmind", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putgeoipdatabase/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putgeoipdatabase/response.go new file mode 100644 index 000000000..9eae23993 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putgeoipdatabase/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putgeoipdatabase + +// Response holds the response body struct for the package putgeoipdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/put_geoip_database/PutGeoipDatabaseResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putiplocationdatabase/put_ip_location_database.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putiplocationdatabase/put_ip_location_database.go new file mode 100644 index 000000000..3ed807ec9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putiplocationdatabase/put_ip_location_database.go @@ -0,0 +1,438 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update an IP geolocation database configuration. +package putiplocationdatabase + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutIpLocationDatabase struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutIpLocationDatabase type alias for index. +type NewPutIpLocationDatabase func(id string) *PutIpLocationDatabase + +// NewPutIpLocationDatabaseFunc returns a new instance of PutIpLocationDatabase with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutIpLocationDatabaseFunc(tp elastictransport.Interface) NewPutIpLocationDatabase { + return func(id string) *PutIpLocationDatabase { + n := New(tp) + + n._id(id) + + return n + } +} + +// Create or update an IP geolocation database configuration. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database +func New(tp elastictransport.Interface) *PutIpLocationDatabase { + r := &PutIpLocationDatabase{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutIpLocationDatabase) Raw(raw io.Reader) *PutIpLocationDatabase { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutIpLocationDatabase) Request(req *Request) *PutIpLocationDatabase { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutIpLocationDatabase) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutIpLocationDatabase: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("ip_location") + path.WriteString("/") + path.WriteString("database") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutIpLocationDatabase) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.put_ip_location_database") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.put_ip_location_database") + if reader := instrument.RecordRequestBody(ctx, "ingest.put_ip_location_database", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.put_ip_location_database") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutIpLocationDatabase query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putiplocationdatabase.Response +func (r PutIpLocationDatabase) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.put_ip_location_database") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutIpLocationDatabase headers map. +func (r *PutIpLocationDatabase) Header(key, value string) *PutIpLocationDatabase { + r.headers.Set(key, value) + + return r +} + +// Id The database configuration identifier. +// API Name: id +func (r *PutIpLocationDatabase) _id(id string) *PutIpLocationDatabase { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// A value of `-1` indicates that the request should never time out. +// API name: master_timeout +func (r *PutIpLocationDatabase) MasterTimeout(duration string) *PutIpLocationDatabase { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response indicates that it was not completely +// acknowledged. +// A value of `-1` indicates that the request should never time out. +// API name: timeout +func (r *PutIpLocationDatabase) Timeout(duration string) *PutIpLocationDatabase { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutIpLocationDatabase) ErrorTrace(errortrace bool) *PutIpLocationDatabase { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutIpLocationDatabase) FilterPath(filterpaths ...string) *PutIpLocationDatabase { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutIpLocationDatabase) Human(human bool) *PutIpLocationDatabase { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutIpLocationDatabase) Pretty(pretty bool) *PutIpLocationDatabase { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: AdditionalDatabaseConfigurationProperty +// AdditionalDatabaseConfigurationProperty is a single key dictionnary. +// It will replace the current value on each call. +func (r *PutIpLocationDatabase) AdditionalDatabaseConfigurationProperty(key string, value json.RawMessage) *PutIpLocationDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + tmp := make(map[string]json.RawMessage) + + tmp[key] = value + + r.req.AdditionalDatabaseConfigurationProperty = tmp + return r +} + +// API name: ipinfo +func (r *PutIpLocationDatabase) Ipinfo(ipinfo types.IpinfoVariant) *PutIpLocationDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Ipinfo = ipinfo.IpinfoCaster() + + return r +} + +// API name: maxmind +func (r *PutIpLocationDatabase) Maxmind(maxmind types.MaxmindVariant) *PutIpLocationDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Maxmind = maxmind.MaxmindCaster() + + return r +} + +// The provider-assigned name of the IP geolocation database to download. +// API name: name +func (r *PutIpLocationDatabase) Name(name string) *PutIpLocationDatabase { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Name = name + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putiplocationdatabase/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putiplocationdatabase/request.go new file mode 100644 index 000000000..9f31c8341 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putiplocationdatabase/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putiplocationdatabase + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putiplocationdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/put_ip_location_database/PutIpLocationDatabaseRequest.ts#L25-L62 +type Request = types.DatabaseConfiguration + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewDatabaseConfiguration() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putiplocationdatabase/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putiplocationdatabase/response.go new file mode 100644 index 000000000..735e6632a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putiplocationdatabase/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putiplocationdatabase + +// Response holds the response body struct for the package putiplocationdatabase +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/put_ip_location_database/PutIpLocationDatabaseResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putpipeline/put_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putpipeline/put_pipeline.go new file mode 100644 index 000000000..0dea3ea0f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putpipeline/put_pipeline.go @@ -0,0 +1,480 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a pipeline. +// Changes made using this API take effect immediately. +package putpipeline + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutPipeline struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutPipeline type alias for index. +type NewPutPipeline func(id string) *PutPipeline + +// NewPutPipelineFunc returns a new instance of PutPipeline with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutPipelineFunc(tp elastictransport.Interface) NewPutPipeline { + return func(id string) *PutPipeline { + n := New(tp) + + n._id(id) + + return n + } +} + +// Create or update a pipeline. +// Changes made using this API take effect immediately. +// +// https://www.elastic.co/docs/manage-data/ingest/transform-enrich/ingest-pipelines +func New(tp elastictransport.Interface) *PutPipeline { + r := &PutPipeline{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutPipeline) Raw(raw io.Reader) *PutPipeline { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutPipeline) Request(req *Request) *PutPipeline { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutPipeline) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutPipeline: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("pipeline") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutPipeline) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.put_pipeline") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.put_pipeline") + if reader := instrument.RecordRequestBody(ctx, "ingest.put_pipeline", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.put_pipeline") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutPipeline query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putpipeline.Response +func (r PutPipeline) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.put_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutPipeline headers map. +func (r *PutPipeline) Header(key, value string) *PutPipeline { + r.headers.Set(key, value) + + return r +} + +// Id ID of the ingest pipeline to create or update. +// API Name: id +func (r *PutPipeline) _id(id string) *PutPipeline { + r.paramSet |= idMask + r.id = id + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *PutPipeline) MasterTimeout(duration string) *PutPipeline { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *PutPipeline) Timeout(duration string) *PutPipeline { + r.values.Set("timeout", duration) + + return r +} + +// IfVersion Required version for optimistic concurrency control for pipeline updates +// API name: if_version +func (r *PutPipeline) IfVersion(versionnumber string) *PutPipeline { + r.values.Set("if_version", versionnumber) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutPipeline) ErrorTrace(errortrace bool) *PutPipeline { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutPipeline) FilterPath(filterpaths ...string) *PutPipeline { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutPipeline) Human(human bool) *PutPipeline { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutPipeline) Pretty(pretty bool) *PutPipeline { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Marks this ingest pipeline as deprecated. +// When a deprecated ingest pipeline is referenced as the default or final +// pipeline when creating or updating a non-deprecated index template, +// Elasticsearch will emit a deprecation warning. +// API name: deprecated +func (r *PutPipeline) Deprecated(deprecated bool) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Deprecated = &deprecated + + return r +} + +// Description of the ingest pipeline. +// API name: description +func (r *PutPipeline) Description(description string) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// Optional metadata about the ingest pipeline. May have any contents. This map +// is not automatically generated by Elasticsearch. +// API name: _meta +func (r *PutPipeline) Meta_(metadata types.MetadataVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() + + return r +} + +// Processors to run immediately after a processor failure. Each processor +// supports a processor-level `on_failure` value. If a processor without an +// `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as +// a fallback. The processors in this parameter run sequentially in the order +// specified. Elasticsearch will not attempt to run the pipeline's remaining +// processors. +// API name: on_failure +func (r *PutPipeline) OnFailure(onfailures ...types.ProcessorContainerVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range onfailures { + + r.req.OnFailure = append(r.req.OnFailure, *v.ProcessorContainerCaster()) + + } + return r +} + +// Processors used to perform transformations on documents before indexing. +// Processors run sequentially in the order specified. +// API name: processors +func (r *PutPipeline) Processors(processors ...types.ProcessorContainerVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range processors { + + r.req.Processors = append(r.req.Processors, *v.ProcessorContainerCaster()) + + } + return r +} + +// Version number used by external systems to track ingest pipelines. This +// parameter is intended for external systems only. Elasticsearch does not use +// or validate pipeline version numbers. +// API name: version +func (r *PutPipeline) Version(versionnumber int64) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Version = &versionnumber + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putpipeline/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putpipeline/request.go new file mode 100644 index 000000000..d2eea1fcf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putpipeline/request.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putpipeline + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putpipeline +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/put_pipeline/PutPipelineRequest.ts#L25-L90 +type Request struct { + + // Deprecated Marks this ingest pipeline as deprecated. + // When a deprecated ingest pipeline is referenced as the default or final + // pipeline when creating or updating a non-deprecated index template, + // Elasticsearch will emit a deprecation warning. + Deprecated *bool `json:"deprecated,omitempty"` + // Description Description of the ingest pipeline. + Description *string `json:"description,omitempty"` + // Meta_ Optional metadata about the ingest pipeline. May have any contents. This map + // is not automatically generated by Elasticsearch. + Meta_ types.Metadata `json:"_meta,omitempty"` + // OnFailure Processors to run immediately after a processor failure. Each processor + // supports a processor-level `on_failure` value. If a processor without an + // `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as + // a fallback. The processors in this parameter run sequentially in the order + // specified. Elasticsearch will not attempt to run the pipeline's remaining + // processors. + OnFailure []types.ProcessorContainer `json:"on_failure,omitempty"` + // Processors Processors used to perform transformations on documents before indexing. + // Processors run sequentially in the order specified. + Processors []types.ProcessorContainer `json:"processors,omitempty"` + // Version Version number used by external systems to track ingest pipelines. This + // parameter is intended for external systems only. Elasticsearch does not use + // or validate pipeline version numbers. + Version *int64 `json:"version,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putpipeline request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deprecated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deprecated", err) + } + s.Deprecated = &value + case bool: + s.Deprecated = &v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "processors": + if err := dec.Decode(&s.Processors); err != nil { + return fmt.Errorf("%s | %w", "Processors", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putpipeline/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putpipeline/response.go new file mode 100644 index 000000000..51ca75ed9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putpipeline/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putpipeline + +// Response holds the response body struct for the package putpipeline +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/put_pipeline/PutPipelineResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/simulate/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/simulate/request.go new file mode 100644 index 000000000..4845aaffb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/simulate/request.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package simulate + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package simulate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/simulate/SimulatePipelineRequest.ts#L25-L72 +type Request struct { + + // Docs Sample documents to test in the pipeline. + Docs []types.Document `json:"docs"` + // Pipeline The pipeline to test. + // If you don't specify the `pipeline` request path parameter, this parameter is + // required. + // If you specify both this and the request path parameter, the API only uses + // the request path parameter. + Pipeline *types.IngestPipeline `json:"pipeline,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Simulate request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/simulate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/simulate/response.go new file mode 100644 index 000000000..6b3914bac --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/simulate/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package simulate + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package simulate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/simulate/SimulatePipelineResponse.ts#L22-L24 +type Response struct { + Docs []types.SimulateDocumentResult `json:"docs"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/simulate/simulate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/simulate/simulate.go new file mode 100644 index 000000000..9f4c23227 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ingest/simulate/simulate.go @@ -0,0 +1,418 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Simulate a pipeline. +// +// Run an ingest pipeline against a set of provided documents. +// You can either specify an existing pipeline to use with the provided +// documents or supply a pipeline definition in the body of the request. +package simulate + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Simulate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSimulate type alias for index. +type NewSimulate func() *Simulate + +// NewSimulateFunc returns a new instance of Simulate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSimulateFunc(tp elastictransport.Interface) NewSimulate { + return func() *Simulate { + n := New(tp) + + return n + } +} + +// Simulate a pipeline. +// +// Run an ingest pipeline against a set of provided documents. +// You can either specify an existing pipeline to use with the provided +// documents or supply a pipeline definition in the body of the request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate +func New(tp elastictransport.Interface) *Simulate { + r := &Simulate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Simulate) Raw(raw io.Reader) *Simulate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Simulate) Request(req *Request) *Simulate { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Simulate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Simulate: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("pipeline") + path.WriteString("/") + path.WriteString("_simulate") + + method = http.MethodPost + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("pipeline") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + path.WriteString("/") + path.WriteString("_simulate") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Simulate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ingest.simulate") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ingest.simulate") + if reader := instrument.RecordRequestBody(ctx, "ingest.simulate", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ingest.simulate") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Simulate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a simulate.Response +func (r Simulate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ingest.simulate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Simulate headers map. +func (r *Simulate) Header(key, value string) *Simulate { + r.headers.Set(key, value) + + return r +} + +// Id The pipeline to test. +// If you don't specify a `pipeline` in the request body, this parameter is +// required. +// API Name: id +func (r *Simulate) Id(id string) *Simulate { + r.paramSet |= idMask + r.id = id + + return r +} + +// Verbose If `true`, the response includes output data for each processor in the +// executed pipeline. +// API name: verbose +func (r *Simulate) Verbose(verbose bool) *Simulate { + r.values.Set("verbose", strconv.FormatBool(verbose)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Simulate) ErrorTrace(errortrace bool) *Simulate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Simulate) FilterPath(filterpaths ...string) *Simulate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Simulate) Human(human bool) *Simulate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Simulate) Pretty(pretty bool) *Simulate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Sample documents to test in the pipeline. +// API name: docs +func (r *Simulate) Docs(docs ...types.DocumentVariant) *Simulate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docs { + + r.req.Docs = append(r.req.Docs, *v.DocumentCaster()) + + } + return r +} + +// The pipeline to test. +// If you don't specify the `pipeline` request path parameter, this parameter is +// required. +// If you specify both this and the request path parameter, the API only uses +// the request path parameter. +// API name: pipeline +func (r *Simulate) Pipeline(pipeline types.IngestPipelineVariant) *Simulate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Pipeline = pipeline.IngestPipelineCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/delete/delete.go new file mode 100644 index 000000000..cb1a2c062 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/delete/delete.go @@ -0,0 +1,346 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete the license. +// +// When the license expires, your subscription level reverts to Basic. +// +// If the operator privileges feature is enabled, only operator users can use +// this API. +package delete + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Delete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDelete type alias for index. +type NewDelete func() *Delete + +// NewDeleteFunc returns a new instance of Delete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteFunc(tp elastictransport.Interface) NewDelete { + return func() *Delete { + n := New(tp) + + return n + } +} + +// Delete the license. +// +// When the license expires, your subscription level reverts to Basic. +// +// If the operator privileges feature is enabled, only operator users can use +// this API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete +func New(tp elastictransport.Interface) *Delete { + r := &Delete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Delete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_license") + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Delete) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "license.delete") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "license.delete") + if reader := instrument.RecordRequestBody(ctx, "license.delete", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "license.delete") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Delete query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a delete.Response +func (r Delete) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Delete) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Delete query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Delete headers map. +func (r *Delete) Header(key, value string) *Delete { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// API name: master_timeout +func (r *Delete) MasterTimeout(duration string) *Delete { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. +// API name: timeout +func (r *Delete) Timeout(duration string) *Delete { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Delete) ErrorTrace(errortrace bool) *Delete { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Delete) FilterPath(filterpaths ...string) *Delete { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Delete) Human(human bool) *Delete { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Delete) Pretty(pretty bool) *Delete { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/delete/response.go new file mode 100644 index 000000000..4b4d81778 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/delete/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package delete + +// Response holds the response body struct for the package delete +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/license/delete/DeleteLicenseResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/get/get.go new file mode 100644 index 000000000..555387fd9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/get/get.go @@ -0,0 +1,357 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get license information. +// +// Get information about your Elastic license including its type, its status, +// when it was issued, and when it expires. +// +// >info +// > If the master node is generating a new cluster state, the get license API +// may return a `404 Not Found` response. +// > If you receive an unexpected 404 response after cluster startup, wait a +// short period and retry the request. +package get + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Get struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGet type alias for index. +type NewGet func() *Get + +// NewGetFunc returns a new instance of Get with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFunc(tp elastictransport.Interface) NewGet { + return func() *Get { + n := New(tp) + + return n + } +} + +// Get license information. +// +// Get information about your Elastic license including its type, its status, +// when it was issued, and when it expires. +// +// >info +// > If the master node is generating a new cluster state, the get license API +// may return a `404 Not Found` response. +// > If you receive an unexpected 404 response after cluster startup, wait a +// short period and retry the request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get +func New(tp elastictransport.Interface) *Get { + r := &Get{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Get) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_license") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Get) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "license.get") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "license.get") + if reader := instrument.RecordRequestBody(ctx, "license.get", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "license.get") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Get query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a get.Response +func (r Get) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Get) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Get query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Get headers map. +func (r *Get) Header(key, value string) *Get { + r.headers.Set(key, value) + + return r +} + +// AcceptEnterprise If `true`, this parameter returns enterprise for Enterprise license types. If +// `false`, this parameter returns platinum for both platinum and enterprise +// license types. This behavior is maintained for backwards compatibility. +// This parameter is deprecated and will always be set to true in 8.x. +// API name: accept_enterprise +func (r *Get) AcceptEnterprise(acceptenterprise bool) *Get { + r.values.Set("accept_enterprise", strconv.FormatBool(acceptenterprise)) + + return r +} + +// Local Specifies whether to retrieve local information. The default value is +// `false`, which means the information is retrieved from the master node. +// API name: local +func (r *Get) Local(local bool) *Get { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Get) ErrorTrace(errortrace bool) *Get { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Get) FilterPath(filterpaths ...string) *Get { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Get) Human(human bool) *Get { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Get) Pretty(pretty bool) *Get { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/get/response.go new file mode 100644 index 000000000..a36319a5d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/get/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package get + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package get +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/license/get/GetLicenseResponse.ts#L22-L24 +type Response struct { + License types.LicenseInformation `json:"license"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/getbasicstatus/get_basic_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/getbasicstatus/get_basic_status.go new file mode 100644 index 000000000..6efb84542 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/getbasicstatus/get_basic_status.go @@ -0,0 +1,321 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the basic license status. +package getbasicstatus + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetBasicStatus struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetBasicStatus type alias for index. +type NewGetBasicStatus func() *GetBasicStatus + +// NewGetBasicStatusFunc returns a new instance of GetBasicStatus with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetBasicStatusFunc(tp elastictransport.Interface) NewGetBasicStatus { + return func() *GetBasicStatus { + n := New(tp) + + return n + } +} + +// Get the basic license status. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status +func New(tp elastictransport.Interface) *GetBasicStatus { + r := &GetBasicStatus{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetBasicStatus) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_license") + path.WriteString("/") + path.WriteString("basic_status") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetBasicStatus) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "license.get_basic_status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "license.get_basic_status") + if reader := instrument.RecordRequestBody(ctx, "license.get_basic_status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "license.get_basic_status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetBasicStatus query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getbasicstatus.Response +func (r GetBasicStatus) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.get_basic_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetBasicStatus) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.get_basic_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetBasicStatus query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetBasicStatus headers map. +func (r *GetBasicStatus) Header(key, value string) *GetBasicStatus { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetBasicStatus) ErrorTrace(errortrace bool) *GetBasicStatus { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetBasicStatus) FilterPath(filterpaths ...string) *GetBasicStatus { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetBasicStatus) Human(human bool) *GetBasicStatus { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetBasicStatus) Pretty(pretty bool) *GetBasicStatus { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/getbasicstatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/getbasicstatus/response.go new file mode 100644 index 000000000..93a1c0f6a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/getbasicstatus/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getbasicstatus + +// Response holds the response body struct for the package getbasicstatus +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/license/get_basic_status/GetBasicLicenseStatusResponse.ts#L20-L22 +type Response struct { + EligibleToStartBasic bool `json:"eligible_to_start_basic"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/gettrialstatus/get_trial_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/gettrialstatus/get_trial_status.go new file mode 100644 index 000000000..3ee6f96d3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/gettrialstatus/get_trial_status.go @@ -0,0 +1,321 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the trial status. +package gettrialstatus + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetTrialStatus struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetTrialStatus type alias for index. +type NewGetTrialStatus func() *GetTrialStatus + +// NewGetTrialStatusFunc returns a new instance of GetTrialStatus with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetTrialStatusFunc(tp elastictransport.Interface) NewGetTrialStatus { + return func() *GetTrialStatus { + n := New(tp) + + return n + } +} + +// Get the trial status. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status +func New(tp elastictransport.Interface) *GetTrialStatus { + r := &GetTrialStatus{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetTrialStatus) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_license") + path.WriteString("/") + path.WriteString("trial_status") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetTrialStatus) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "license.get_trial_status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "license.get_trial_status") + if reader := instrument.RecordRequestBody(ctx, "license.get_trial_status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "license.get_trial_status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetTrialStatus query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a gettrialstatus.Response +func (r GetTrialStatus) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.get_trial_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetTrialStatus) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.get_trial_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetTrialStatus query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetTrialStatus headers map. +func (r *GetTrialStatus) Header(key, value string) *GetTrialStatus { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetTrialStatus) ErrorTrace(errortrace bool) *GetTrialStatus { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetTrialStatus) FilterPath(filterpaths ...string) *GetTrialStatus { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetTrialStatus) Human(human bool) *GetTrialStatus { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetTrialStatus) Pretty(pretty bool) *GetTrialStatus { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/gettrialstatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/gettrialstatus/response.go new file mode 100644 index 000000000..84c000dba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/gettrialstatus/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package gettrialstatus + +// Response holds the response body struct for the package gettrialstatus +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/license/get_trial_status/GetTrialLicenseStatusResponse.ts#L20-L22 +type Response struct { + EligibleToStartTrial bool `json:"eligible_to_start_trial"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/post/post.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/post/post.go new file mode 100644 index 000000000..25320acc0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/post/post.go @@ -0,0 +1,413 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update the license. +// +// You can update your license at runtime without shutting down your nodes. +// License updates take effect immediately. +// If the license you are installing does not support all of the features that +// were available with your previous license, however, you are notified in the +// response. +// You must then re-submit the API request with the acknowledge parameter set to +// true. +// +// NOTE: If Elasticsearch security features are enabled and you are installing a +// gold or higher license, you must enable TLS on the transport networking layer +// before you install the license. +// If the operator privileges feature is enabled, only operator users can use +// this API. +package post + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Post struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPost type alias for index. +type NewPost func() *Post + +// NewPostFunc returns a new instance of Post with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPostFunc(tp elastictransport.Interface) NewPost { + return func() *Post { + n := New(tp) + + return n + } +} + +// Update the license. +// +// You can update your license at runtime without shutting down your nodes. +// License updates take effect immediately. +// If the license you are installing does not support all of the features that +// were available with your previous license, however, you are notified in the +// response. +// You must then re-submit the API request with the acknowledge parameter set to +// true. +// +// NOTE: If Elasticsearch security features are enabled and you are installing a +// gold or higher license, you must enable TLS on the transport networking layer +// before you install the license. +// If the operator privileges feature is enabled, only operator users can use +// this API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post +func New(tp elastictransport.Interface) *Post { + r := &Post{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Post) Raw(raw io.Reader) *Post { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Post) Request(req *Request) *Post { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Post) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Post: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_license") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Post) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "license.post") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "license.post") + if reader := instrument.RecordRequestBody(ctx, "license.post", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "license.post") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Post query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a post.Response +func (r Post) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.post") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Post headers map. +func (r *Post) Header(key, value string) *Post { + r.headers.Set(key, value) + + return r +} + +// Acknowledge Specifies whether you acknowledge the license changes. +// API name: acknowledge +func (r *Post) Acknowledge(acknowledge bool) *Post { + r.values.Set("acknowledge", strconv.FormatBool(acknowledge)) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// API name: master_timeout +func (r *Post) MasterTimeout(duration string) *Post { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. +// API name: timeout +func (r *Post) Timeout(duration string) *Post { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Post) ErrorTrace(errortrace bool) *Post { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Post) FilterPath(filterpaths ...string) *Post { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Post) Human(human bool) *Post { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Post) Pretty(pretty bool) *Post { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: license +func (r *Post) License(license types.LicenseVariant) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.License = license.LicenseCaster() + + return r +} + +// A sequence of one or more JSON documents containing the license information. +// API name: licenses +func (r *Post) Licenses(licenses ...types.LicenseVariant) *Post { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range licenses { + + r.req.Licenses = append(r.req.Licenses, *v.LicenseCaster()) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/post/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/post/request.go new file mode 100644 index 000000000..490ce033d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/post/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package post + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package post +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/license/post/PostLicenseRequest.ts#L24-L70 +type Request struct { + License *types.License `json:"license,omitempty"` + // Licenses A sequence of one or more JSON documents containing the license information. + Licenses []types.License `json:"licenses,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Post request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/post/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/post/response.go new file mode 100644 index 000000000..b18cc0437 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/post/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package post + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensestatus" +) + +// Response holds the response body struct for the package post +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/license/post/PostLicenseResponse.ts#L23-L29 +type Response struct { + Acknowledge *types.Acknowledgement `json:"acknowledge,omitempty"` + Acknowledged bool `json:"acknowledged"` + LicenseStatus licensestatus.LicenseStatus `json:"license_status"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/poststartbasic/post_start_basic.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/poststartbasic/post_start_basic.go new file mode 100644 index 000000000..c2e0e1dc0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/poststartbasic/post_start_basic.go @@ -0,0 +1,372 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Start a basic license. +// +// Start an indefinite basic license, which gives access to all the basic +// features. +// +// NOTE: In order to start a basic license, you must not currently have a basic +// license. +// +// If the basic license does not support all of the features that are available +// with your current license, however, you are notified in the response. +// You must then re-submit the API request with the `acknowledge` parameter set +// to `true`. +// +// To check the status of your basic license, use the get basic license API. +package poststartbasic + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PostStartBasic struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPostStartBasic type alias for index. +type NewPostStartBasic func() *PostStartBasic + +// NewPostStartBasicFunc returns a new instance of PostStartBasic with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPostStartBasicFunc(tp elastictransport.Interface) NewPostStartBasic { + return func() *PostStartBasic { + n := New(tp) + + return n + } +} + +// Start a basic license. +// +// Start an indefinite basic license, which gives access to all the basic +// features. +// +// NOTE: In order to start a basic license, you must not currently have a basic +// license. +// +// If the basic license does not support all of the features that are available +// with your current license, however, you are notified in the response. +// You must then re-submit the API request with the `acknowledge` parameter set +// to `true`. +// +// To check the status of your basic license, use the get basic license API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic +func New(tp elastictransport.Interface) *PostStartBasic { + r := &PostStartBasic{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PostStartBasic) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_license") + path.WriteString("/") + path.WriteString("start_basic") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PostStartBasic) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "license.post_start_basic") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "license.post_start_basic") + if reader := instrument.RecordRequestBody(ctx, "license.post_start_basic", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "license.post_start_basic") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PostStartBasic query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a poststartbasic.Response +func (r PostStartBasic) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.post_start_basic") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r PostStartBasic) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.post_start_basic") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the PostStartBasic query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the PostStartBasic headers map. +func (r *PostStartBasic) Header(key, value string) *PostStartBasic { + r.headers.Set(key, value) + + return r +} + +// Acknowledge whether the user has acknowledged acknowledge messages (default: false) +// API name: acknowledge +func (r *PostStartBasic) Acknowledge(acknowledge bool) *PostStartBasic { + r.values.Set("acknowledge", strconv.FormatBool(acknowledge)) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PostStartBasic) MasterTimeout(duration string) *PostStartBasic { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *PostStartBasic) Timeout(duration string) *PostStartBasic { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PostStartBasic) ErrorTrace(errortrace bool) *PostStartBasic { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PostStartBasic) FilterPath(filterpaths ...string) *PostStartBasic { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PostStartBasic) Human(human bool) *PostStartBasic { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PostStartBasic) Pretty(pretty bool) *PostStartBasic { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/poststartbasic/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/poststartbasic/response.go new file mode 100644 index 000000000..1fbdf732f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/poststartbasic/response.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package poststartbasic + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensetype" +) + +// Response holds the response body struct for the package poststartbasic +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/license/post_start_basic/StartBasicLicenseResponse.ts#L23-L31 +type Response struct { + Acknowledge map[string][]string `json:"acknowledge,omitempty"` + Acknowledged bool `json:"acknowledged"` + BasicWasStarted bool `json:"basic_was_started"` + ErrorMessage *string `json:"error_message,omitempty"` + Type *licensetype.LicenseType `json:"type,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Acknowledge: make(map[string][]string, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "acknowledge": + if s.Acknowledge == nil { + s.Acknowledge = make(map[string][]string, 0) + } + rawMsg := make(map[string]json.RawMessage, 0) + dec.Decode(&rawMsg) + for key, value := range rawMsg { + switch { + case bytes.HasPrefix(value, []byte("\"")), bytes.HasPrefix(value, []byte("{")): + o := new(string) + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return fmt.Errorf("%s | %w", "Acknowledge", err) + } + s.Acknowledge[key] = append(s.Acknowledge[key], *o) + default: + o := []string{} + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return fmt.Errorf("%s | %w", "Acknowledge", err) + } + s.Acknowledge[key] = o + } + } + + case "acknowledged": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Acknowledged", err) + } + s.Acknowledged = value + case bool: + s.Acknowledged = v + } + + case "basic_was_started": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "BasicWasStarted", err) + } + s.BasicWasStarted = value + case bool: + s.BasicWasStarted = v + } + + case "error_message": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ErrorMessage", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ErrorMessage = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/poststarttrial/post_start_trial.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/poststarttrial/post_start_trial.go new file mode 100644 index 000000000..7240ae798 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/poststarttrial/post_start_trial.go @@ -0,0 +1,363 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Start a trial. +// Start a 30-day trial, which gives access to all subscription features. +// +// NOTE: You are allowed to start a trial only if your cluster has not already +// activated a trial for the current major product version. +// For example, if you have already activated a trial for v8.0, you cannot start +// a new trial until v9.0. You can, however, request an extended trial at +// https://www.elastic.co/trialextension. +// +// To check the status of your trial, use the get trial status API. +package poststarttrial + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PostStartTrial struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPostStartTrial type alias for index. +type NewPostStartTrial func() *PostStartTrial + +// NewPostStartTrialFunc returns a new instance of PostStartTrial with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPostStartTrialFunc(tp elastictransport.Interface) NewPostStartTrial { + return func() *PostStartTrial { + n := New(tp) + + return n + } +} + +// Start a trial. +// Start a 30-day trial, which gives access to all subscription features. +// +// NOTE: You are allowed to start a trial only if your cluster has not already +// activated a trial for the current major product version. +// For example, if you have already activated a trial for v8.0, you cannot start +// a new trial until v9.0. You can, however, request an extended trial at +// https://www.elastic.co/trialextension. +// +// To check the status of your trial, use the get trial status API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial +func New(tp elastictransport.Interface) *PostStartTrial { + r := &PostStartTrial{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PostStartTrial) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_license") + path.WriteString("/") + path.WriteString("start_trial") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PostStartTrial) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "license.post_start_trial") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "license.post_start_trial") + if reader := instrument.RecordRequestBody(ctx, "license.post_start_trial", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "license.post_start_trial") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PostStartTrial query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a poststarttrial.Response +func (r PostStartTrial) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.post_start_trial") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r PostStartTrial) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "license.post_start_trial") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the PostStartTrial query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the PostStartTrial headers map. +func (r *PostStartTrial) Header(key, value string) *PostStartTrial { + r.headers.Set(key, value) + + return r +} + +// Acknowledge whether the user has acknowledged acknowledge messages (default: false) +// API name: acknowledge +func (r *PostStartTrial) Acknowledge(acknowledge bool) *PostStartTrial { + r.values.Set("acknowledge", strconv.FormatBool(acknowledge)) + + return r +} + +// Type The type of trial license to generate (default: "trial") +// API name: type +func (r *PostStartTrial) Type(type_ string) *PostStartTrial { + r.values.Set("type", type_) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *PostStartTrial) MasterTimeout(duration string) *PostStartTrial { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PostStartTrial) ErrorTrace(errortrace bool) *PostStartTrial { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PostStartTrial) FilterPath(filterpaths ...string) *PostStartTrial { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PostStartTrial) Human(human bool) *PostStartTrial { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PostStartTrial) Pretty(pretty bool) *PostStartTrial { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/poststarttrial/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/poststarttrial/response.go new file mode 100644 index 000000000..52064635b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/license/poststarttrial/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package poststarttrial + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensetype" +) + +// Response holds the response body struct for the package poststarttrial +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/license/post_start_trial/StartTrialLicenseResponse.ts#L22-L29 +type Response struct { + Acknowledged bool `json:"acknowledged"` + ErrorMessage *string `json:"error_message,omitempty"` + TrialWasStarted bool `json:"trial_was_started"` + Type *licensetype.LicenseType `json:"type,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/deletepipeline/delete_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/deletepipeline/delete_pipeline.go new file mode 100644 index 000000000..3a0673140 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/deletepipeline/delete_pipeline.go @@ -0,0 +1,299 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a Logstash pipeline. +// Delete a pipeline that is used for Logstash Central Management. +// If the request succeeds, you receive an empty response with an appropriate +// status code. +package deletepipeline + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeletePipeline struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeletePipeline type alias for index. +type NewDeletePipeline func(id string) *DeletePipeline + +// NewDeletePipelineFunc returns a new instance of DeletePipeline with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeletePipelineFunc(tp elastictransport.Interface) NewDeletePipeline { + return func(id string) *DeletePipeline { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete a Logstash pipeline. +// Delete a pipeline that is used for Logstash Central Management. +// If the request succeeds, you receive an empty response with an appropriate +// status code. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline +func New(tp elastictransport.Interface) *DeletePipeline { + r := &DeletePipeline{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeletePipeline) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_logstash") + path.WriteString("/") + path.WriteString("pipeline") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeletePipeline) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "logstash.delete_pipeline") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "logstash.delete_pipeline") + if reader := instrument.RecordRequestBody(ctx, "logstash.delete_pipeline", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "logstash.delete_pipeline") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeletePipeline query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletepipeline.Response +func (r DeletePipeline) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeletePipeline) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "logstash.delete_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeletePipeline query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeletePipeline headers map. +func (r *DeletePipeline) Header(key, value string) *DeletePipeline { + r.headers.Set(key, value) + + return r +} + +// Id An identifier for the pipeline. +// API Name: id +func (r *DeletePipeline) _id(id string) *DeletePipeline { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeletePipeline) ErrorTrace(errortrace bool) *DeletePipeline { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeletePipeline) FilterPath(filterpaths ...string) *DeletePipeline { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeletePipeline) Human(human bool) *DeletePipeline { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeletePipeline) Pretty(pretty bool) *DeletePipeline { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/getpipeline/get_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/getpipeline/get_pipeline.go new file mode 100644 index 000000000..40c248f54 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/getpipeline/get_pipeline.go @@ -0,0 +1,351 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get Logstash pipelines. +// Get pipelines that are used for Logstash Central Management. +package getpipeline + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetPipeline struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetPipeline type alias for index. +type NewGetPipeline func() *GetPipeline + +// NewGetPipelineFunc returns a new instance of GetPipeline with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetPipelineFunc(tp elastictransport.Interface) NewGetPipeline { + return func() *GetPipeline { + n := New(tp) + + return n + } +} + +// Get Logstash pipelines. +// Get pipelines that are used for Logstash Central Management. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline +func New(tp elastictransport.Interface) *GetPipeline { + r := &GetPipeline{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetPipeline) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_logstash") + path.WriteString("/") + path.WriteString("pipeline") + + method = http.MethodGet + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_logstash") + path.WriteString("/") + path.WriteString("pipeline") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetPipeline) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "logstash.get_pipeline") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "logstash.get_pipeline") + if reader := instrument.RecordRequestBody(ctx, "logstash.get_pipeline", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "logstash.get_pipeline") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetPipeline query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getpipeline.Response +func (r GetPipeline) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "logstash.get_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetPipeline) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "logstash.get_pipeline") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetPipeline query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetPipeline headers map. +func (r *GetPipeline) Header(key, value string) *GetPipeline { + r.headers.Set(key, value) + + return r +} + +// Id A comma-separated list of pipeline identifiers. +// API Name: id +func (r *GetPipeline) Id(id string) *GetPipeline { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetPipeline) ErrorTrace(errortrace bool) *GetPipeline { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetPipeline) FilterPath(filterpaths ...string) *GetPipeline { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetPipeline) Human(human bool) *GetPipeline { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetPipeline) Pretty(pretty bool) *GetPipeline { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/getpipeline/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/getpipeline/response.go new file mode 100644 index 000000000..757529ea5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/getpipeline/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getpipeline + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getpipeline +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/logstash/get_pipeline/LogstashGetPipelineResponse.ts#L24-L27 + +type Response map[string]types.LogstashPipeline + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/putpipeline/put_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/putpipeline/put_pipeline.go new file mode 100644 index 000000000..4efe5377d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/putpipeline/put_pipeline.go @@ -0,0 +1,392 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a Logstash pipeline. +// +// Create a pipeline that is used for Logstash Central Management. +// If the specified pipeline exists, it is replaced. +package putpipeline + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutPipeline struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutPipeline type alias for index. +type NewPutPipeline func(id string) *PutPipeline + +// NewPutPipelineFunc returns a new instance of PutPipeline with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutPipelineFunc(tp elastictransport.Interface) NewPutPipeline { + return func(id string) *PutPipeline { + n := New(tp) + + n._id(id) + + return n + } +} + +// Create or update a Logstash pipeline. +// +// Create a pipeline that is used for Logstash Central Management. +// If the specified pipeline exists, it is replaced. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline +func New(tp elastictransport.Interface) *PutPipeline { + r := &PutPipeline{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutPipeline) Raw(raw io.Reader) *PutPipeline { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutPipeline) Request(req *Request) *PutPipeline { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutPipeline) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutPipeline: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_logstash") + path.WriteString("/") + path.WriteString("pipeline") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutPipeline) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "logstash.put_pipeline") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "logstash.put_pipeline") + if reader := instrument.RecordRequestBody(ctx, "logstash.put_pipeline", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "logstash.put_pipeline") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutPipeline query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Header set a key, value pair in the PutPipeline headers map. +func (r *PutPipeline) Header(key, value string) *PutPipeline { + r.headers.Set(key, value) + + return r +} + +// Id An identifier for the pipeline. +// API Name: id +func (r *PutPipeline) _id(id string) *PutPipeline { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutPipeline) ErrorTrace(errortrace bool) *PutPipeline { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutPipeline) FilterPath(filterpaths ...string) *PutPipeline { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutPipeline) Human(human bool) *PutPipeline { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutPipeline) Pretty(pretty bool) *PutPipeline { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A description of the pipeline. +// This description is not used by Elasticsearch or Logstash. +// API name: description +func (r *PutPipeline) Description(description string) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = description + + return r +} + +// The date the pipeline was last updated. +// It must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. +// API name: last_modified +func (r *PutPipeline) LastModified(datetime types.DateTimeVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LastModified = *datetime.DateTimeCaster() + + return r +} + +// The configuration for the pipeline. +// API name: pipeline +func (r *PutPipeline) Pipeline(pipeline string) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Pipeline = pipeline + + return r +} + +// Optional metadata about the pipeline, which can have any contents. +// This metadata is not generated or used by Elasticsearch or Logstash. +// API name: pipeline_metadata +func (r *PutPipeline) PipelineMetadata(pipelinemetadata types.PipelineMetadataVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PipelineMetadata = *pipelinemetadata.PipelineMetadataCaster() + + return r +} + +// Settings for the pipeline. +// It supports only flat keys in dot notation. +// API name: pipeline_settings +func (r *PutPipeline) PipelineSettings(pipelinesettings types.PipelineSettingsVariant) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PipelineSettings = *pipelinesettings.PipelineSettingsCaster() + + return r +} + +// The user who last updated the pipeline. +// API name: username +func (r *PutPipeline) Username(username string) *PutPipeline { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Username = username + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/putpipeline/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/putpipeline/request.go new file mode 100644 index 000000000..eeb6b7e0b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/logstash/putpipeline/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putpipeline + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putpipeline +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/logstash/put_pipeline/LogstashPutPipelineRequest.ts#L24-L51 +type Request = types.LogstashPipeline + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewLogstashPipeline() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/deprecations/deprecations.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/deprecations/deprecations.go new file mode 100644 index 000000000..bbddc112c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/deprecations/deprecations.go @@ -0,0 +1,362 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get deprecation information. +// Get information about different cluster, node, and index level settings that +// use deprecated features that will be removed or changed in the next major +// version. +// +// TIP: This APIs is designed for indirect use by the Upgrade Assistant. +// You are strongly recommended to use the Upgrade Assistant. +package deprecations + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Deprecations struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeprecations type alias for index. +type NewDeprecations func() *Deprecations + +// NewDeprecationsFunc returns a new instance of Deprecations with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeprecationsFunc(tp elastictransport.Interface) NewDeprecations { + return func() *Deprecations { + n := New(tp) + + return n + } +} + +// Get deprecation information. +// Get information about different cluster, node, and index level settings that +// use deprecated features that will be removed or changed in the next major +// version. +// +// TIP: This APIs is designed for indirect use by the Upgrade Assistant. +// You are strongly recommended to use the Upgrade Assistant. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations +func New(tp elastictransport.Interface) *Deprecations { + r := &Deprecations{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Deprecations) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("deprecations") + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("deprecations") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Deprecations) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "migration.deprecations") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "migration.deprecations") + if reader := instrument.RecordRequestBody(ctx, "migration.deprecations", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "migration.deprecations") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Deprecations query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deprecations.Response +func (r Deprecations) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "migration.deprecations") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Deprecations) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "migration.deprecations") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Deprecations query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Deprecations headers map. +func (r *Deprecations) Header(key, value string) *Deprecations { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separate list of data streams or indices to check. Wildcard (*) +// expressions are supported. +// API Name: index +func (r *Deprecations) Index(index string) *Deprecations { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Deprecations) ErrorTrace(errortrace bool) *Deprecations { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Deprecations) FilterPath(filterpaths ...string) *Deprecations { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Deprecations) Human(human bool) *Deprecations { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Deprecations) Pretty(pretty bool) *Deprecations { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/deprecations/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/deprecations/response.go new file mode 100644 index 000000000..5d602de53 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/deprecations/response.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deprecations + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package deprecations +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/migration/deprecations/DeprecationInfoResponse.ts#L23-L54 +type Response struct { + + // ClusterSettings Cluster-level deprecation warnings. + ClusterSettings []types.Deprecation `json:"cluster_settings"` + DataStreams map[string][]types.Deprecation `json:"data_streams"` + // IlmPolicies ILM policy warnings are sectioned off per policy. + IlmPolicies map[string][]types.Deprecation `json:"ilm_policies"` + // IndexSettings Index warnings are sectioned off per index and can be filtered using an + // index-pattern in the query. + // This section includes warnings for the backing indices of data streams + // specified in the request path. + IndexSettings map[string][]types.Deprecation `json:"index_settings"` + // MlSettings Machine learning-related deprecation warnings. + MlSettings []types.Deprecation `json:"ml_settings"` + // NodeSettings Node-level deprecation warnings. + // Since only a subset of your nodes might incorporate these settings, it is + // important to read the details section for more information about which nodes + // are affected. + NodeSettings []types.Deprecation `json:"node_settings"` + // Templates Template warnings are sectioned off per template and include deprecations for + // both component templates and + // index templates. + Templates map[string][]types.Deprecation `json:"templates"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + DataStreams: make(map[string][]types.Deprecation, 0), + IlmPolicies: make(map[string][]types.Deprecation, 0), + IndexSettings: make(map[string][]types.Deprecation, 0), + Templates: make(map[string][]types.Deprecation, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go new file mode 100644 index 000000000..10b2d6b1d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go @@ -0,0 +1,335 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get feature migration information. +// Version upgrades sometimes require changes to how features store +// configuration information and data in system indices. +// Check which features need to be migrated and the status of any migrations +// that are in progress. +// +// TIP: This API is designed for indirect use by the Upgrade Assistant. +// You are strongly recommended to use the Upgrade Assistant. +package getfeatureupgradestatus + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetFeatureUpgradeStatus struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetFeatureUpgradeStatus type alias for index. +type NewGetFeatureUpgradeStatus func() *GetFeatureUpgradeStatus + +// NewGetFeatureUpgradeStatusFunc returns a new instance of GetFeatureUpgradeStatus with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFeatureUpgradeStatusFunc(tp elastictransport.Interface) NewGetFeatureUpgradeStatus { + return func() *GetFeatureUpgradeStatus { + n := New(tp) + + return n + } +} + +// Get feature migration information. +// Version upgrades sometimes require changes to how features store +// configuration information and data in system indices. +// Check which features need to be migrated and the status of any migrations +// that are in progress. +// +// TIP: This API is designed for indirect use by the Upgrade Assistant. +// You are strongly recommended to use the Upgrade Assistant. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status +func New(tp elastictransport.Interface) *GetFeatureUpgradeStatus { + r := &GetFeatureUpgradeStatus{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetFeatureUpgradeStatus) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("system_features") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetFeatureUpgradeStatus) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "migration.get_feature_upgrade_status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "migration.get_feature_upgrade_status") + if reader := instrument.RecordRequestBody(ctx, "migration.get_feature_upgrade_status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "migration.get_feature_upgrade_status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetFeatureUpgradeStatus query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getfeatureupgradestatus.Response +func (r GetFeatureUpgradeStatus) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "migration.get_feature_upgrade_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetFeatureUpgradeStatus) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "migration.get_feature_upgrade_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetFeatureUpgradeStatus query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetFeatureUpgradeStatus headers map. +func (r *GetFeatureUpgradeStatus) Header(key, value string) *GetFeatureUpgradeStatus { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetFeatureUpgradeStatus) ErrorTrace(errortrace bool) *GetFeatureUpgradeStatus { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetFeatureUpgradeStatus) FilterPath(filterpaths ...string) *GetFeatureUpgradeStatus { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetFeatureUpgradeStatus) Human(human bool) *GetFeatureUpgradeStatus { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetFeatureUpgradeStatus) Pretty(pretty bool) *GetFeatureUpgradeStatus { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/getfeatureupgradestatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/getfeatureupgradestatus/response.go new file mode 100644 index 000000000..9e35d1b25 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/getfeatureupgradestatus/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getfeatureupgradestatus + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/migrationstatus" +) + +// Response holds the response body struct for the package getfeatureupgradestatus +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L23-L28 +type Response struct { + Features []types.GetMigrationFeature `json:"features"` + MigrationStatus migrationstatus.MigrationStatus `json:"migration_status"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go new file mode 100644 index 000000000..4db8f8f44 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go @@ -0,0 +1,339 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Start the feature migration. +// Version upgrades sometimes require changes to how features store +// configuration information and data in system indices. +// This API starts the automatic migration process. +// +// Some functionality might be temporarily unavailable during the migration +// process. +// +// TIP: The API is designed for indirect use by the Upgrade Assistant. We +// strongly recommend you use the Upgrade Assistant. +package postfeatureupgrade + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PostFeatureUpgrade struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPostFeatureUpgrade type alias for index. +type NewPostFeatureUpgrade func() *PostFeatureUpgrade + +// NewPostFeatureUpgradeFunc returns a new instance of PostFeatureUpgrade with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPostFeatureUpgradeFunc(tp elastictransport.Interface) NewPostFeatureUpgrade { + return func() *PostFeatureUpgrade { + n := New(tp) + + return n + } +} + +// Start the feature migration. +// Version upgrades sometimes require changes to how features store +// configuration information and data in system indices. +// This API starts the automatic migration process. +// +// Some functionality might be temporarily unavailable during the migration +// process. +// +// TIP: The API is designed for indirect use by the Upgrade Assistant. We +// strongly recommend you use the Upgrade Assistant. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status +func New(tp elastictransport.Interface) *PostFeatureUpgrade { + r := &PostFeatureUpgrade{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PostFeatureUpgrade) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_migration") + path.WriteString("/") + path.WriteString("system_features") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PostFeatureUpgrade) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "migration.post_feature_upgrade") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "migration.post_feature_upgrade") + if reader := instrument.RecordRequestBody(ctx, "migration.post_feature_upgrade", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "migration.post_feature_upgrade") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PostFeatureUpgrade query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a postfeatureupgrade.Response +func (r PostFeatureUpgrade) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "migration.post_feature_upgrade") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r PostFeatureUpgrade) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "migration.post_feature_upgrade") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the PostFeatureUpgrade query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the PostFeatureUpgrade headers map. +func (r *PostFeatureUpgrade) Header(key, value string) *PostFeatureUpgrade { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PostFeatureUpgrade) ErrorTrace(errortrace bool) *PostFeatureUpgrade { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PostFeatureUpgrade) FilterPath(filterpaths ...string) *PostFeatureUpgrade { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PostFeatureUpgrade) Human(human bool) *PostFeatureUpgrade { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PostFeatureUpgrade) Pretty(pretty bool) *PostFeatureUpgrade { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/postfeatureupgrade/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/postfeatureupgrade/response.go new file mode 100644 index 000000000..7b5dd3375 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/migration/postfeatureupgrade/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package postfeatureupgrade + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package postfeatureupgrade +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L20-L26 +type Response struct { + Accepted bool `json:"accepted"` + Features []types.PostMigrationFeature `json:"features,omitempty"` + Reason *string `json:"reason,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go new file mode 100644 index 000000000..6278c22ff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go @@ -0,0 +1,368 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clear trained model deployment cache. +// +// Cache will be cleared on all nodes where the trained model is assigned. +// A trained model deployment may have an inference cache enabled. +// As requests are handled by each allocated node, their responses may be cached +// on that individual node. +// Calling this API clears the caches without restarting the deployment. +package cleartrainedmodeldeploymentcache + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + modelidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ClearTrainedModelDeploymentCache struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + modelid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClearTrainedModelDeploymentCache type alias for index. +type NewClearTrainedModelDeploymentCache func(modelid string) *ClearTrainedModelDeploymentCache + +// NewClearTrainedModelDeploymentCacheFunc returns a new instance of ClearTrainedModelDeploymentCache with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewClearTrainedModelDeploymentCacheFunc(tp elastictransport.Interface) NewClearTrainedModelDeploymentCache { + return func(modelid string) *ClearTrainedModelDeploymentCache { + n := New(tp) + + n._modelid(modelid) + + return n + } +} + +// Clear trained model deployment cache. +// +// Cache will be cleared on all nodes where the trained model is assigned. +// A trained model deployment may have an inference cache enabled. +// As requests are handled by each allocated node, their responses may be cached +// on that individual node. +// Calling this API clears the caches without restarting the deployment. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache +func New(tp elastictransport.Interface) *ClearTrainedModelDeploymentCache { + r := &ClearTrainedModelDeploymentCache{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ClearTrainedModelDeploymentCache) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == modelidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelid", r.modelid) + } + path.WriteString(r.modelid) + path.WriteString("/") + path.WriteString("deployment") + path.WriteString("/") + path.WriteString("cache") + path.WriteString("/") + path.WriteString("_clear") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ClearTrainedModelDeploymentCache) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.clear_trained_model_deployment_cache") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.clear_trained_model_deployment_cache") + if reader := instrument.RecordRequestBody(ctx, "ml.clear_trained_model_deployment_cache", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.clear_trained_model_deployment_cache") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ClearTrainedModelDeploymentCache query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a cleartrainedmodeldeploymentcache.Response +func (r ClearTrainedModelDeploymentCache) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.clear_trained_model_deployment_cache") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ClearTrainedModelDeploymentCache) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.clear_trained_model_deployment_cache") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ClearTrainedModelDeploymentCache query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ClearTrainedModelDeploymentCache headers map. +func (r *ClearTrainedModelDeploymentCache) Header(key, value string) *ClearTrainedModelDeploymentCache { + r.headers.Set(key, value) + + return r +} + +// ModelId The unique identifier of the trained model. +// API Name: modelid +func (r *ClearTrainedModelDeploymentCache) _modelid(modelid string) *ClearTrainedModelDeploymentCache { + r.paramSet |= modelidMask + r.modelid = modelid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ClearTrainedModelDeploymentCache) ErrorTrace(errortrace bool) *ClearTrainedModelDeploymentCache { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ClearTrainedModelDeploymentCache) FilterPath(filterpaths ...string) *ClearTrainedModelDeploymentCache { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ClearTrainedModelDeploymentCache) Human(human bool) *ClearTrainedModelDeploymentCache { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ClearTrainedModelDeploymentCache) Pretty(pretty bool) *ClearTrainedModelDeploymentCache { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/cleartrainedmodeldeploymentcache/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/cleartrainedmodeldeploymentcache/response.go new file mode 100644 index 000000000..858034385 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/cleartrainedmodeldeploymentcache/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package cleartrainedmodeldeploymentcache + +// Response holds the response body struct for the package cleartrainedmodeldeploymentcache +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/clear_trained_model_deployment_cache/MlClearTrainedModelDeploymentCacheResponse.ts#L20-L24 +type Response struct { + Cleared bool `json:"cleared"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/closejob/close_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/closejob/close_job.go new file mode 100644 index 000000000..c2f9560a0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/closejob/close_job.go @@ -0,0 +1,437 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Close anomaly detection jobs. +// +// A job can be opened and closed multiple times throughout its lifecycle. A +// closed job cannot receive data or perform analysis operations, but you can +// still explore and navigate results. +// When you close a job, it runs housekeeping tasks such as pruning the model +// history, flushing buffers, calculating final results and persisting the model +// snapshots. Depending upon the size of the job, it could take several minutes +// to close and the equivalent time to re-open. After it is closed, the job has +// a minimal overhead on the cluster except for maintaining its meta data. +// Therefore it is a best practice to close jobs that are no longer required to +// process data. +// If you close an anomaly detection job whose datafeed is running, the request +// first tries to stop the datafeed. This behavior is equivalent to calling stop +// datafeed API with the same timeout and force parameters as the close job +// request. +// When a datafeed that has a specified end date stops, it automatically closes +// its associated job. +package closejob + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CloseJob struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCloseJob type alias for index. +type NewCloseJob func(jobid string) *CloseJob + +// NewCloseJobFunc returns a new instance of CloseJob with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCloseJobFunc(tp elastictransport.Interface) NewCloseJob { + return func(jobid string) *CloseJob { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Close anomaly detection jobs. +// +// A job can be opened and closed multiple times throughout its lifecycle. A +// closed job cannot receive data or perform analysis operations, but you can +// still explore and navigate results. +// When you close a job, it runs housekeeping tasks such as pruning the model +// history, flushing buffers, calculating final results and persisting the model +// snapshots. Depending upon the size of the job, it could take several minutes +// to close and the equivalent time to re-open. After it is closed, the job has +// a minimal overhead on the cluster except for maintaining its meta data. +// Therefore it is a best practice to close jobs that are no longer required to +// process data. +// If you close an anomaly detection job whose datafeed is running, the request +// first tries to stop the datafeed. This behavior is equivalent to calling stop +// datafeed API with the same timeout and force parameters as the close job +// request. +// When a datafeed that has a specified end date stops, it automatically closes +// its associated job. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job +func New(tp elastictransport.Interface) *CloseJob { + r := &CloseJob{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *CloseJob) Raw(raw io.Reader) *CloseJob { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *CloseJob) Request(req *Request) *CloseJob { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CloseJob) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for CloseJob: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("_close") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CloseJob) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.close_job") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.close_job") + if reader := instrument.RecordRequestBody(ctx, "ml.close_job", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.close_job") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CloseJob query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a closejob.Response +func (r CloseJob) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.close_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the CloseJob headers map. +func (r *CloseJob) Header(key, value string) *CloseJob { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. It can be a job identifier, a group +// name, or a wildcard expression. You can close multiple anomaly detection jobs +// in a single API request by using a group name, a comma-separated list of +// jobs, or a wildcard expression. You can close all jobs by using `_all` or by +// specifying `*` as the job identifier. +// API Name: jobid +func (r *CloseJob) _jobid(jobid string) *CloseJob { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CloseJob) ErrorTrace(errortrace bool) *CloseJob { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CloseJob) FilterPath(filterpaths ...string) *CloseJob { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CloseJob) Human(human bool) *CloseJob { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CloseJob) Pretty(pretty bool) *CloseJob { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Refer to the description for the `allow_no_match` query parameter. +// API name: allow_no_match +func (r *CloseJob) AllowNoMatch(allownomatch bool) *CloseJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowNoMatch = &allownomatch + + return r +} + +// Refer to the descriptiion for the `force` query parameter. +// API name: force +func (r *CloseJob) Force(force bool) *CloseJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Force = &force + + return r +} + +// Refer to the description for the `timeout` query parameter. +// API name: timeout +func (r *CloseJob) Timeout(duration types.DurationVariant) *CloseJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/closejob/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/closejob/request.go new file mode 100644 index 000000000..c2a5e0fdb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/closejob/request.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package closejob + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package closejob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/close_job/MlCloseJobRequest.ts#L24-L85 +type Request struct { + + // AllowNoMatch Refer to the description for the `allow_no_match` query parameter. + AllowNoMatch *bool `json:"allow_no_match,omitempty"` + // Force Refer to the descriptiion for the `force` query parameter. + Force *bool `json:"force,omitempty"` + // Timeout Refer to the description for the `timeout` query parameter. + Timeout types.Duration `json:"timeout,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Closejob request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_no_match": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowNoMatch", err) + } + s.AllowNoMatch = &value + case bool: + s.AllowNoMatch = &v + } + + case "force": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Force", err) + } + s.Force = &value + case bool: + s.Force = &v + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/closejob/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/closejob/response.go new file mode 100644 index 000000000..0131eb782 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/closejob/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package closejob + +// Response holds the response body struct for the package closejob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/close_job/MlCloseJobResponse.ts#L20-L22 +type Response struct { + Closed bool `json:"closed"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendar/delete_calendar.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendar/delete_calendar.go new file mode 100644 index 000000000..c54635ae7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendar/delete_calendar.go @@ -0,0 +1,348 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a calendar. +// +// Remove all scheduled events from a calendar, then delete it. +package deletecalendar + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + calendaridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteCalendar struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + calendarid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteCalendar type alias for index. +type NewDeleteCalendar func(calendarid string) *DeleteCalendar + +// NewDeleteCalendarFunc returns a new instance of DeleteCalendar with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteCalendarFunc(tp elastictransport.Interface) NewDeleteCalendar { + return func(calendarid string) *DeleteCalendar { + n := New(tp) + + n._calendarid(calendarid) + + return n + } +} + +// Delete a calendar. +// +// Remove all scheduled events from a calendar, then delete it. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar +func New(tp elastictransport.Interface) *DeleteCalendar { + r := &DeleteCalendar{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteCalendar) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == calendaridMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendarid", r.calendarid) + } + path.WriteString(r.calendarid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteCalendar) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.delete_calendar") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_calendar") + if reader := instrument.RecordRequestBody(ctx, "ml.delete_calendar", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_calendar") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteCalendar query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletecalendar.Response +func (r DeleteCalendar) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_calendar") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteCalendar) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_calendar") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteCalendar query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteCalendar headers map. +func (r *DeleteCalendar) Header(key, value string) *DeleteCalendar { + r.headers.Set(key, value) + + return r +} + +// CalendarId A string that uniquely identifies a calendar. +// API Name: calendarid +func (r *DeleteCalendar) _calendarid(calendarid string) *DeleteCalendar { + r.paramSet |= calendaridMask + r.calendarid = calendarid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteCalendar) ErrorTrace(errortrace bool) *DeleteCalendar { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteCalendar) FilterPath(filterpaths ...string) *DeleteCalendar { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteCalendar) Human(human bool) *DeleteCalendar { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteCalendar) Pretty(pretty bool) *DeleteCalendar { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendar/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendar/response.go new file mode 100644 index 000000000..5bff6de55 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendar/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletecalendar + +// Response holds the response body struct for the package deletecalendar +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/delete_calendar/MlDeleteCalendarResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarevent/delete_calendar_event.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarevent/delete_calendar_event.go new file mode 100644 index 000000000..644d5c9b7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarevent/delete_calendar_event.go @@ -0,0 +1,367 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete events from a calendar. +package deletecalendarevent + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + calendaridMask = iota + 1 + + eventidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteCalendarEvent struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + calendarid string + eventid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteCalendarEvent type alias for index. +type NewDeleteCalendarEvent func(calendarid, eventid string) *DeleteCalendarEvent + +// NewDeleteCalendarEventFunc returns a new instance of DeleteCalendarEvent with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteCalendarEventFunc(tp elastictransport.Interface) NewDeleteCalendarEvent { + return func(calendarid, eventid string) *DeleteCalendarEvent { + n := New(tp) + + n._calendarid(calendarid) + + n._eventid(eventid) + + return n + } +} + +// Delete events from a calendar. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event +func New(tp elastictransport.Interface) *DeleteCalendarEvent { + r := &DeleteCalendarEvent{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteCalendarEvent) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == calendaridMask|eventidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendarid", r.calendarid) + } + path.WriteString(r.calendarid) + path.WriteString("/") + path.WriteString("events") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "eventid", r.eventid) + } + path.WriteString(r.eventid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteCalendarEvent) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.delete_calendar_event") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_calendar_event") + if reader := instrument.RecordRequestBody(ctx, "ml.delete_calendar_event", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_calendar_event") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteCalendarEvent query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletecalendarevent.Response +func (r DeleteCalendarEvent) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_calendar_event") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteCalendarEvent) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_calendar_event") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteCalendarEvent query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteCalendarEvent headers map. +func (r *DeleteCalendarEvent) Header(key, value string) *DeleteCalendarEvent { + r.headers.Set(key, value) + + return r +} + +// CalendarId A string that uniquely identifies a calendar. +// API Name: calendarid +func (r *DeleteCalendarEvent) _calendarid(calendarid string) *DeleteCalendarEvent { + r.paramSet |= calendaridMask + r.calendarid = calendarid + + return r +} + +// EventId Identifier for the scheduled event. +// You can obtain this identifier by using the get calendar events API. +// API Name: eventid +func (r *DeleteCalendarEvent) _eventid(eventid string) *DeleteCalendarEvent { + r.paramSet |= eventidMask + r.eventid = eventid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteCalendarEvent) ErrorTrace(errortrace bool) *DeleteCalendarEvent { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteCalendarEvent) FilterPath(filterpaths ...string) *DeleteCalendarEvent { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteCalendarEvent) Human(human bool) *DeleteCalendarEvent { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteCalendarEvent) Pretty(pretty bool) *DeleteCalendarEvent { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarevent/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarevent/response.go new file mode 100644 index 000000000..e5c8f3bc6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarevent/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletecalendarevent + +// Response holds the response body struct for the package deletecalendarevent +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/delete_calendar_event/MlDeleteCalendarEventResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarjob/delete_calendar_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarjob/delete_calendar_job.go new file mode 100644 index 000000000..bb769af4a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarjob/delete_calendar_job.go @@ -0,0 +1,368 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete anomaly jobs from a calendar. +package deletecalendarjob + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + calendaridMask = iota + 1 + + jobidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteCalendarJob struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + calendarid string + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteCalendarJob type alias for index. +type NewDeleteCalendarJob func(calendarid, jobid string) *DeleteCalendarJob + +// NewDeleteCalendarJobFunc returns a new instance of DeleteCalendarJob with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteCalendarJobFunc(tp elastictransport.Interface) NewDeleteCalendarJob { + return func(calendarid, jobid string) *DeleteCalendarJob { + n := New(tp) + + n._calendarid(calendarid) + + n._jobid(jobid) + + return n + } +} + +// Delete anomaly jobs from a calendar. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job +func New(tp elastictransport.Interface) *DeleteCalendarJob { + r := &DeleteCalendarJob{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteCalendarJob) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == calendaridMask|jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendarid", r.calendarid) + } + path.WriteString(r.calendarid) + path.WriteString("/") + path.WriteString("jobs") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteCalendarJob) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.delete_calendar_job") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_calendar_job") + if reader := instrument.RecordRequestBody(ctx, "ml.delete_calendar_job", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_calendar_job") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteCalendarJob query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletecalendarjob.Response +func (r DeleteCalendarJob) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_calendar_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteCalendarJob) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_calendar_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteCalendarJob query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteCalendarJob headers map. +func (r *DeleteCalendarJob) Header(key, value string) *DeleteCalendarJob { + r.headers.Set(key, value) + + return r +} + +// CalendarId A string that uniquely identifies a calendar. +// API Name: calendarid +func (r *DeleteCalendarJob) _calendarid(calendarid string) *DeleteCalendarJob { + r.paramSet |= calendaridMask + r.calendarid = calendarid + + return r +} + +// JobId An identifier for the anomaly detection jobs. It can be a job identifier, a +// group name, or a +// comma-separated list of jobs or groups. +// API Name: jobid +func (r *DeleteCalendarJob) _jobid(jobid string) *DeleteCalendarJob { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteCalendarJob) ErrorTrace(errortrace bool) *DeleteCalendarJob { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteCalendarJob) FilterPath(filterpaths ...string) *DeleteCalendarJob { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteCalendarJob) Human(human bool) *DeleteCalendarJob { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteCalendarJob) Pretty(pretty bool) *DeleteCalendarJob { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarjob/response.go new file mode 100644 index 000000000..b44c4683a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarjob/response.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletecalendarjob + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Response holds the response body struct for the package deletecalendarjob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/delete_calendar_job/MlDeleteCalendarJobResponse.ts#L22-L31 +type Response struct { + + // CalendarId A string that uniquely identifies a calendar. + CalendarId string `json:"calendar_id"` + // Description A description of the calendar. + Description *string `json:"description,omitempty"` + // JobIds A list of anomaly detection job identifiers or group names. + JobIds []string `json:"job_ids"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_id": + if err := dec.Decode(&s.CalendarId); err != nil { + return fmt.Errorf("%s | %w", "CalendarId", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "job_ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "JobIds", err) + } + + s.JobIds = append(s.JobIds, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.JobIds); err != nil { + return fmt.Errorf("%s | %w", "JobIds", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedatafeed/delete_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedatafeed/delete_datafeed.go new file mode 100644 index 000000000..3f5a5e390 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedatafeed/delete_datafeed.go @@ -0,0 +1,356 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a datafeed. +package deletedatafeed + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + datafeedidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteDatafeed struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + datafeedid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteDatafeed type alias for index. +type NewDeleteDatafeed func(datafeedid string) *DeleteDatafeed + +// NewDeleteDatafeedFunc returns a new instance of DeleteDatafeed with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteDatafeedFunc(tp elastictransport.Interface) NewDeleteDatafeed { + return func(datafeedid string) *DeleteDatafeed { + n := New(tp) + + n._datafeedid(datafeedid) + + return n + } +} + +// Delete a datafeed. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed +func New(tp elastictransport.Interface) *DeleteDatafeed { + r := &DeleteDatafeed{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteDatafeed) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == datafeedidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeedid", r.datafeedid) + } + path.WriteString(r.datafeedid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteDatafeed) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.delete_datafeed") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_datafeed") + if reader := instrument.RecordRequestBody(ctx, "ml.delete_datafeed", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_datafeed") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteDatafeed query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletedatafeed.Response +func (r DeleteDatafeed) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_datafeed") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteDatafeed) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_datafeed") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteDatafeed query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteDatafeed headers map. +func (r *DeleteDatafeed) Header(key, value string) *DeleteDatafeed { + r.headers.Set(key, value) + + return r +} + +// DatafeedId A numerical character string that uniquely identifies the datafeed. This +// identifier can contain lowercase alphanumeric characters (a-z and 0-9), +// hyphens, and underscores. It must start and end with alphanumeric +// characters. +// API Name: datafeedid +func (r *DeleteDatafeed) _datafeedid(datafeedid string) *DeleteDatafeed { + r.paramSet |= datafeedidMask + r.datafeedid = datafeedid + + return r +} + +// Force Use to forcefully delete a started datafeed; this method is quicker than +// stopping and deleting the datafeed. +// API name: force +func (r *DeleteDatafeed) Force(force bool) *DeleteDatafeed { + r.values.Set("force", strconv.FormatBool(force)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteDatafeed) ErrorTrace(errortrace bool) *DeleteDatafeed { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteDatafeed) FilterPath(filterpaths ...string) *DeleteDatafeed { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteDatafeed) Human(human bool) *DeleteDatafeed { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteDatafeed) Pretty(pretty bool) *DeleteDatafeed { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedatafeed/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedatafeed/response.go new file mode 100644 index 000000000..38549eaac --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedatafeed/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletedatafeed + +// Response holds the response body struct for the package deletedatafeed +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/delete_datafeed/MlDeleteDatafeedResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go new file mode 100644 index 000000000..45bf3ab85 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go @@ -0,0 +1,363 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a data frame analytics job. +package deletedataframeanalytics + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteDataFrameAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteDataFrameAnalytics type alias for index. +type NewDeleteDataFrameAnalytics func(id string) *DeleteDataFrameAnalytics + +// NewDeleteDataFrameAnalyticsFunc returns a new instance of DeleteDataFrameAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteDataFrameAnalyticsFunc(tp elastictransport.Interface) NewDeleteDataFrameAnalytics { + return func(id string) *DeleteDataFrameAnalytics { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete a data frame analytics job. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics +func New(tp elastictransport.Interface) *DeleteDataFrameAnalytics { + r := &DeleteDataFrameAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteDataFrameAnalytics) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.delete_data_frame_analytics") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "ml.delete_data_frame_analytics", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_data_frame_analytics") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteDataFrameAnalytics query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletedataframeanalytics.Response +func (r DeleteDataFrameAnalytics) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteDataFrameAnalytics) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteDataFrameAnalytics query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteDataFrameAnalytics headers map. +func (r *DeleteDataFrameAnalytics) Header(key, value string) *DeleteDataFrameAnalytics { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the data frame analytics job. +// API Name: id +func (r *DeleteDataFrameAnalytics) _id(id string) *DeleteDataFrameAnalytics { + r.paramSet |= idMask + r.id = id + + return r +} + +// Force If `true`, it deletes a job that is not stopped; this method is quicker than +// stopping and deleting the job. +// API name: force +func (r *DeleteDataFrameAnalytics) Force(force bool) *DeleteDataFrameAnalytics { + r.values.Set("force", strconv.FormatBool(force)) + + return r +} + +// Timeout The time to wait for the job to be deleted. +// API name: timeout +func (r *DeleteDataFrameAnalytics) Timeout(duration string) *DeleteDataFrameAnalytics { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteDataFrameAnalytics) ErrorTrace(errortrace bool) *DeleteDataFrameAnalytics { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteDataFrameAnalytics) FilterPath(filterpaths ...string) *DeleteDataFrameAnalytics { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteDataFrameAnalytics) Human(human bool) *DeleteDataFrameAnalytics { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteDataFrameAnalytics) Pretty(pretty bool) *DeleteDataFrameAnalytics { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedataframeanalytics/response.go new file mode 100644 index 000000000..369ac1679 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedataframeanalytics/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletedataframeanalytics + +// Response holds the response body struct for the package deletedataframeanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/delete_data_frame_analytics/MlDeleteDataFrameAnalyticsResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteexpireddata/delete_expired_data.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteexpireddata/delete_expired_data.go new file mode 100644 index 000000000..2804070c7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteexpireddata/delete_expired_data.go @@ -0,0 +1,409 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete expired ML data. +// +// Delete all job results, model snapshots and forecast data that have exceeded +// their retention days period. Machine learning state documents that are not +// associated with any job are also deleted. +// You can limit the request to a single or set of anomaly detection jobs by +// using a job identifier, a group name, a comma-separated list of jobs, or a +// wildcard expression. You can delete expired data for all anomaly detection +// jobs by using `_all`, by specifying `*` as the ``, or by omitting the +// ``. +package deleteexpireddata + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteExpiredData struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteExpiredData type alias for index. +type NewDeleteExpiredData func() *DeleteExpiredData + +// NewDeleteExpiredDataFunc returns a new instance of DeleteExpiredData with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteExpiredDataFunc(tp elastictransport.Interface) NewDeleteExpiredData { + return func() *DeleteExpiredData { + n := New(tp) + + return n + } +} + +// Delete expired ML data. +// +// Delete all job results, model snapshots and forecast data that have exceeded +// their retention days period. Machine learning state documents that are not +// associated with any job are also deleted. +// You can limit the request to a single or set of anomaly detection jobs by +// using a job identifier, a group name, a comma-separated list of jobs, or a +// wildcard expression. You can delete expired data for all anomaly detection +// jobs by using `_all`, by specifying `*` as the ``, or by omitting the +// ``. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data +func New(tp elastictransport.Interface) *DeleteExpiredData { + r := &DeleteExpiredData{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *DeleteExpiredData) Raw(raw io.Reader) *DeleteExpiredData { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *DeleteExpiredData) Request(req *Request) *DeleteExpiredData { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteExpiredData) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for DeleteExpiredData: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("_delete_expired_data") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + + method = http.MethodDelete + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("_delete_expired_data") + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteExpiredData) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.delete_expired_data") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_expired_data") + if reader := instrument.RecordRequestBody(ctx, "ml.delete_expired_data", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_expired_data") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteExpiredData query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleteexpireddata.Response +func (r DeleteExpiredData) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_expired_data") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the DeleteExpiredData headers map. +func (r *DeleteExpiredData) Header(key, value string) *DeleteExpiredData { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for an anomaly detection job. It can be a job identifier, a +// group name, or a wildcard expression. +// API Name: jobid +func (r *DeleteExpiredData) JobId(jobid string) *DeleteExpiredData { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteExpiredData) ErrorTrace(errortrace bool) *DeleteExpiredData { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteExpiredData) FilterPath(filterpaths ...string) *DeleteExpiredData { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteExpiredData) Human(human bool) *DeleteExpiredData { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteExpiredData) Pretty(pretty bool) *DeleteExpiredData { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The desired requests per second for the deletion processes. The default +// behavior is no throttling. +// API name: requests_per_second +func (r *DeleteExpiredData) RequestsPerSecond(requestspersecond float32) *DeleteExpiredData { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RequestsPerSecond = &requestspersecond + + return r +} + +// How long can the underlying delete processes run until they are canceled. +// API name: timeout +func (r *DeleteExpiredData) Timeout(duration types.DurationVariant) *DeleteExpiredData { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteexpireddata/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteexpireddata/request.go new file mode 100644 index 000000000..10d95c497 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteexpireddata/request.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleteexpireddata + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package deleteexpireddata +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/delete_expired_data/MlDeleteExpiredDataRequest.ts#L25-L85 +type Request struct { + + // RequestsPerSecond The desired requests per second for the deletion processes. The default + // behavior is no throttling. + RequestsPerSecond *float32 `json:"requests_per_second,omitempty"` + // Timeout How long can the underlying delete processes run until they are canceled. + Timeout types.Duration `json:"timeout,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Deleteexpireddata request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "requests_per_second": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "RequestsPerSecond", err) + } + f := float32(value) + s.RequestsPerSecond = &f + case float64: + f := float32(v) + s.RequestsPerSecond = &f + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteexpireddata/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteexpireddata/response.go new file mode 100644 index 000000000..3ddbdcba9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteexpireddata/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleteexpireddata + +// Response holds the response body struct for the package deleteexpireddata +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/delete_expired_data/MlDeleteExpiredDataResponse.ts#L20-L22 +type Response struct { + Deleted bool `json:"deleted"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletefilter/delete_filter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletefilter/delete_filter.go new file mode 100644 index 000000000..9be18b2e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletefilter/delete_filter.go @@ -0,0 +1,350 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a filter. +// +// If an anomaly detection job references the filter, you cannot delete the +// filter. You must update or delete the job before you can delete the filter. +package deletefilter + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + filteridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteFilter struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + filterid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteFilter type alias for index. +type NewDeleteFilter func(filterid string) *DeleteFilter + +// NewDeleteFilterFunc returns a new instance of DeleteFilter with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteFilterFunc(tp elastictransport.Interface) NewDeleteFilter { + return func(filterid string) *DeleteFilter { + n := New(tp) + + n._filterid(filterid) + + return n + } +} + +// Delete a filter. +// +// If an anomaly detection job references the filter, you cannot delete the +// filter. You must update or delete the job before you can delete the filter. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter +func New(tp elastictransport.Interface) *DeleteFilter { + r := &DeleteFilter{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteFilter) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == filteridMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("filters") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "filterid", r.filterid) + } + path.WriteString(r.filterid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteFilter) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.delete_filter") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_filter") + if reader := instrument.RecordRequestBody(ctx, "ml.delete_filter", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_filter") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteFilter query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletefilter.Response +func (r DeleteFilter) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_filter") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteFilter) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_filter") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteFilter query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteFilter headers map. +func (r *DeleteFilter) Header(key, value string) *DeleteFilter { + r.headers.Set(key, value) + + return r +} + +// FilterId A string that uniquely identifies a filter. +// API Name: filterid +func (r *DeleteFilter) _filterid(filterid string) *DeleteFilter { + r.paramSet |= filteridMask + r.filterid = filterid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteFilter) ErrorTrace(errortrace bool) *DeleteFilter { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteFilter) FilterPath(filterpaths ...string) *DeleteFilter { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteFilter) Human(human bool) *DeleteFilter { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteFilter) Pretty(pretty bool) *DeleteFilter { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletefilter/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletefilter/response.go new file mode 100644 index 000000000..ee1b09d2b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletefilter/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletefilter + +// Response holds the response body struct for the package deletefilter +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/delete_filter/MlDeleteFilterResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteforecast/delete_forecast.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteforecast/delete_forecast.go new file mode 100644 index 000000000..e87c11dff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteforecast/delete_forecast.go @@ -0,0 +1,412 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete forecasts from a job. +// +// By default, forecasts are retained for 14 days. You can specify a +// different retention period with the `expires_in` parameter in the forecast +// jobs API. The delete forecast API enables you to delete one or more +// forecasts before they expire. +package deleteforecast + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 + + forecastidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteForecast struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + jobid string + forecastid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteForecast type alias for index. +type NewDeleteForecast func(jobid string) *DeleteForecast + +// NewDeleteForecastFunc returns a new instance of DeleteForecast with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteForecastFunc(tp elastictransport.Interface) NewDeleteForecast { + return func(jobid string) *DeleteForecast { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Delete forecasts from a job. +// +// By default, forecasts are retained for 14 days. You can specify a +// different retention period with the `expires_in` parameter in the forecast +// jobs API. The delete forecast API enables you to delete one or more +// forecasts before they expire. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast +func New(tp elastictransport.Interface) *DeleteForecast { + r := &DeleteForecast{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteForecast) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("_forecast") + + method = http.MethodDelete + case r.paramSet == jobidMask|forecastidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("_forecast") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "forecastid", r.forecastid) + } + path.WriteString(r.forecastid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteForecast) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.delete_forecast") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_forecast") + if reader := instrument.RecordRequestBody(ctx, "ml.delete_forecast", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_forecast") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteForecast query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleteforecast.Response +func (r DeleteForecast) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_forecast") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteForecast) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_forecast") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteForecast query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteForecast headers map. +func (r *DeleteForecast) Header(key, value string) *DeleteForecast { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *DeleteForecast) _jobid(jobid string) *DeleteForecast { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// ForecastId A comma-separated list of forecast identifiers. If you do not specify +// this optional parameter or if you specify `_all` or `*` the API deletes +// all forecasts from the job. +// API Name: forecastid +func (r *DeleteForecast) ForecastId(forecastid string) *DeleteForecast { + r.paramSet |= forecastidMask + r.forecastid = forecastid + + return r +} + +// AllowNoForecasts Specifies whether an error occurs when there are no forecasts. In +// particular, if this parameter is set to `false` and there are no +// forecasts associated with the job, attempts to delete all forecasts +// return an error. +// API name: allow_no_forecasts +func (r *DeleteForecast) AllowNoForecasts(allownoforecasts bool) *DeleteForecast { + r.values.Set("allow_no_forecasts", strconv.FormatBool(allownoforecasts)) + + return r +} + +// Timeout Specifies the period of time to wait for the completion of the delete +// operation. When this period of time elapses, the API fails and returns an +// error. +// API name: timeout +func (r *DeleteForecast) Timeout(duration string) *DeleteForecast { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteForecast) ErrorTrace(errortrace bool) *DeleteForecast { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteForecast) FilterPath(filterpaths ...string) *DeleteForecast { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteForecast) Human(human bool) *DeleteForecast { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteForecast) Pretty(pretty bool) *DeleteForecast { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteforecast/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteforecast/response.go new file mode 100644 index 000000000..cb3fbf726 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteforecast/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleteforecast + +// Response holds the response body struct for the package deleteforecast +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/delete_forecast/MlDeleteForecastResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletejob/delete_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletejob/delete_job.go new file mode 100644 index 000000000..62f75e28d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletejob/delete_job.go @@ -0,0 +1,387 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete an anomaly detection job. +// +// All job configuration, model state and results are deleted. +// It is not currently possible to delete multiple jobs using wildcards or a +// comma separated list. If you delete a job that has a datafeed, the request +// first tries to delete the datafeed. This behavior is equivalent to calling +// the delete datafeed API with the same timeout and force parameters as the +// delete job request. +package deletejob + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteJob struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteJob type alias for index. +type NewDeleteJob func(jobid string) *DeleteJob + +// NewDeleteJobFunc returns a new instance of DeleteJob with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteJobFunc(tp elastictransport.Interface) NewDeleteJob { + return func(jobid string) *DeleteJob { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Delete an anomaly detection job. +// +// All job configuration, model state and results are deleted. +// It is not currently possible to delete multiple jobs using wildcards or a +// comma separated list. If you delete a job that has a datafeed, the request +// first tries to delete the datafeed. This behavior is equivalent to calling +// the delete datafeed API with the same timeout and force parameters as the +// delete job request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job +func New(tp elastictransport.Interface) *DeleteJob { + r := &DeleteJob{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteJob) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteJob) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.delete_job") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_job") + if reader := instrument.RecordRequestBody(ctx, "ml.delete_job", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_job") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteJob query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletejob.Response +func (r DeleteJob) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteJob) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteJob query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteJob headers map. +func (r *DeleteJob) Header(key, value string) *DeleteJob { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *DeleteJob) _jobid(jobid string) *DeleteJob { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// Force Use to forcefully delete an opened job; this method is quicker than +// closing and deleting the job. +// API name: force +func (r *DeleteJob) Force(force bool) *DeleteJob { + r.values.Set("force", strconv.FormatBool(force)) + + return r +} + +// DeleteUserAnnotations Specifies whether annotations that have been added by the +// user should be deleted along with any auto-generated annotations when the job +// is +// reset. +// API name: delete_user_annotations +func (r *DeleteJob) DeleteUserAnnotations(deleteuserannotations bool) *DeleteJob { + r.values.Set("delete_user_annotations", strconv.FormatBool(deleteuserannotations)) + + return r +} + +// WaitForCompletion Specifies whether the request should return immediately or wait until the +// job deletion completes. +// API name: wait_for_completion +func (r *DeleteJob) WaitForCompletion(waitforcompletion bool) *DeleteJob { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteJob) ErrorTrace(errortrace bool) *DeleteJob { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteJob) FilterPath(filterpaths ...string) *DeleteJob { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteJob) Human(human bool) *DeleteJob { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteJob) Pretty(pretty bool) *DeleteJob { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletejob/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletejob/response.go new file mode 100644 index 000000000..0891a0e4f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletejob/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletejob + +// Response holds the response body struct for the package deletejob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/delete_job/MlDeleteJobResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go new file mode 100644 index 000000000..e6feb6ac6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go @@ -0,0 +1,374 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a model snapshot. +// +// You cannot delete the active model snapshot. To delete that snapshot, first +// revert to a different one. To identify the active model snapshot, refer to +// the `model_snapshot_id` in the results from the get jobs API. +package deletemodelsnapshot + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 + + snapshotidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteModelSnapshot struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + jobid string + snapshotid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteModelSnapshot type alias for index. +type NewDeleteModelSnapshot func(jobid, snapshotid string) *DeleteModelSnapshot + +// NewDeleteModelSnapshotFunc returns a new instance of DeleteModelSnapshot with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteModelSnapshotFunc(tp elastictransport.Interface) NewDeleteModelSnapshot { + return func(jobid, snapshotid string) *DeleteModelSnapshot { + n := New(tp) + + n._jobid(jobid) + + n._snapshotid(snapshotid) + + return n + } +} + +// Delete a model snapshot. +// +// You cannot delete the active model snapshot. To delete that snapshot, first +// revert to a different one. To identify the active model snapshot, refer to +// the `model_snapshot_id` in the results from the get jobs API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot +func New(tp elastictransport.Interface) *DeleteModelSnapshot { + r := &DeleteModelSnapshot{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteModelSnapshot) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask|snapshotidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("model_snapshots") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshotid", r.snapshotid) + } + path.WriteString(r.snapshotid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteModelSnapshot) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.delete_model_snapshot") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_model_snapshot") + if reader := instrument.RecordRequestBody(ctx, "ml.delete_model_snapshot", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_model_snapshot") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteModelSnapshot query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletemodelsnapshot.Response +func (r DeleteModelSnapshot) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_model_snapshot") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteModelSnapshot) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_model_snapshot") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteModelSnapshot query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteModelSnapshot headers map. +func (r *DeleteModelSnapshot) Header(key, value string) *DeleteModelSnapshot { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *DeleteModelSnapshot) _jobid(jobid string) *DeleteModelSnapshot { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// SnapshotId Identifier for the model snapshot. +// API Name: snapshotid +func (r *DeleteModelSnapshot) _snapshotid(snapshotid string) *DeleteModelSnapshot { + r.paramSet |= snapshotidMask + r.snapshotid = snapshotid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteModelSnapshot) ErrorTrace(errortrace bool) *DeleteModelSnapshot { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteModelSnapshot) FilterPath(filterpaths ...string) *DeleteModelSnapshot { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteModelSnapshot) Human(human bool) *DeleteModelSnapshot { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteModelSnapshot) Pretty(pretty bool) *DeleteModelSnapshot { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletemodelsnapshot/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletemodelsnapshot/response.go new file mode 100644 index 000000000..fce16ebca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletemodelsnapshot/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletemodelsnapshot + +// Response holds the response body struct for the package deletemodelsnapshot +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/delete_model_snapshot/MlDeleteModelSnapshotResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodel/delete_trained_model.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodel/delete_trained_model.go new file mode 100644 index 000000000..260fb321a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodel/delete_trained_model.go @@ -0,0 +1,368 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete an unreferenced trained model. +// +// The request deletes a trained inference model that is not referenced by an +// ingest pipeline. +package deletetrainedmodel + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + modelidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteTrainedModel struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + modelid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteTrainedModel type alias for index. +type NewDeleteTrainedModel func(modelid string) *DeleteTrainedModel + +// NewDeleteTrainedModelFunc returns a new instance of DeleteTrainedModel with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteTrainedModelFunc(tp elastictransport.Interface) NewDeleteTrainedModel { + return func(modelid string) *DeleteTrainedModel { + n := New(tp) + + n._modelid(modelid) + + return n + } +} + +// Delete an unreferenced trained model. +// +// The request deletes a trained inference model that is not referenced by an +// ingest pipeline. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model +func New(tp elastictransport.Interface) *DeleteTrainedModel { + r := &DeleteTrainedModel{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteTrainedModel) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == modelidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelid", r.modelid) + } + path.WriteString(r.modelid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteTrainedModel) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.delete_trained_model") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_trained_model") + if reader := instrument.RecordRequestBody(ctx, "ml.delete_trained_model", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_trained_model") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteTrainedModel query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletetrainedmodel.Response +func (r DeleteTrainedModel) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_trained_model") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteTrainedModel) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_trained_model") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteTrainedModel query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteTrainedModel headers map. +func (r *DeleteTrainedModel) Header(key, value string) *DeleteTrainedModel { + r.headers.Set(key, value) + + return r +} + +// ModelId The unique identifier of the trained model. +// API Name: modelid +func (r *DeleteTrainedModel) _modelid(modelid string) *DeleteTrainedModel { + r.paramSet |= modelidMask + r.modelid = modelid + + return r +} + +// Force Forcefully deletes a trained model that is referenced by ingest pipelines or +// has a started deployment. +// API name: force +func (r *DeleteTrainedModel) Force(force bool) *DeleteTrainedModel { + r.values.Set("force", strconv.FormatBool(force)) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *DeleteTrainedModel) Timeout(duration string) *DeleteTrainedModel { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteTrainedModel) ErrorTrace(errortrace bool) *DeleteTrainedModel { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteTrainedModel) FilterPath(filterpaths ...string) *DeleteTrainedModel { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteTrainedModel) Human(human bool) *DeleteTrainedModel { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteTrainedModel) Pretty(pretty bool) *DeleteTrainedModel { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodel/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodel/response.go new file mode 100644 index 000000000..792e853b3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodel/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletetrainedmodel + +// Response holds the response body struct for the package deletetrainedmodel +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/delete_trained_model/MlDeleteTrainedModelResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go new file mode 100644 index 000000000..fd5bb703b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go @@ -0,0 +1,380 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a trained model alias. +// +// This API deletes an existing model alias that refers to a trained model. If +// the model alias is missing or refers to a model other than the one identified +// by the `model_id`, this API returns an error. +package deletetrainedmodelalias + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + modelaliasMask = iota + 1 + + modelidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteTrainedModelAlias struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + modelalias string + modelid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteTrainedModelAlias type alias for index. +type NewDeleteTrainedModelAlias func(modelid, modelalias string) *DeleteTrainedModelAlias + +// NewDeleteTrainedModelAliasFunc returns a new instance of DeleteTrainedModelAlias with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteTrainedModelAliasFunc(tp elastictransport.Interface) NewDeleteTrainedModelAlias { + return func(modelid, modelalias string) *DeleteTrainedModelAlias { + n := New(tp) + + n._modelalias(modelalias) + + n._modelid(modelid) + + return n + } +} + +// Delete a trained model alias. +// +// This API deletes an existing model alias that refers to a trained model. If +// the model alias is missing or refers to a model other than the one identified +// by the `model_id`, this API returns an error. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias +func New(tp elastictransport.Interface) *DeleteTrainedModelAlias { + r := &DeleteTrainedModelAlias{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteTrainedModelAlias) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == modelidMask|modelaliasMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelid", r.modelid) + } + path.WriteString(r.modelid) + path.WriteString("/") + path.WriteString("model_aliases") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelalias", r.modelalias) + } + path.WriteString(r.modelalias) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteTrainedModelAlias) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.delete_trained_model_alias") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.delete_trained_model_alias") + if reader := instrument.RecordRequestBody(ctx, "ml.delete_trained_model_alias", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.delete_trained_model_alias") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteTrainedModelAlias query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletetrainedmodelalias.Response +func (r DeleteTrainedModelAlias) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_trained_model_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteTrainedModelAlias) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.delete_trained_model_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteTrainedModelAlias query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteTrainedModelAlias headers map. +func (r *DeleteTrainedModelAlias) Header(key, value string) *DeleteTrainedModelAlias { + r.headers.Set(key, value) + + return r +} + +// ModelAlias The model alias to delete. +// API Name: modelalias +func (r *DeleteTrainedModelAlias) _modelalias(modelalias string) *DeleteTrainedModelAlias { + r.paramSet |= modelaliasMask + r.modelalias = modelalias + + return r +} + +// ModelId The trained model ID to which the model alias refers. +// API Name: modelid +func (r *DeleteTrainedModelAlias) _modelid(modelid string) *DeleteTrainedModelAlias { + r.paramSet |= modelidMask + r.modelid = modelid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteTrainedModelAlias) ErrorTrace(errortrace bool) *DeleteTrainedModelAlias { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteTrainedModelAlias) FilterPath(filterpaths ...string) *DeleteTrainedModelAlias { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteTrainedModelAlias) Human(human bool) *DeleteTrainedModelAlias { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteTrainedModelAlias) Pretty(pretty bool) *DeleteTrainedModelAlias { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodelalias/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodelalias/response.go new file mode 100644 index 000000000..49ddefb69 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodelalias/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletetrainedmodelalias + +// Response holds the response body struct for the package deletetrainedmodelalias +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/delete_trained_model_alias/MlDeleteTrainedModelAliasResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/estimatemodelmemory/estimate_model_memory.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/estimatemodelmemory/estimate_model_memory.go new file mode 100644 index 000000000..88735c511 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/estimatemodelmemory/estimate_model_memory.go @@ -0,0 +1,431 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Estimate job model memory usage. +// +// Make an estimation of the memory usage for an anomaly detection job model. +// The estimate is based on analysis configuration details for the job and +// cardinality +// estimates for the fields it references. +package estimatemodelmemory + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type EstimateModelMemory struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewEstimateModelMemory type alias for index. +type NewEstimateModelMemory func() *EstimateModelMemory + +// NewEstimateModelMemoryFunc returns a new instance of EstimateModelMemory with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewEstimateModelMemoryFunc(tp elastictransport.Interface) NewEstimateModelMemory { + return func() *EstimateModelMemory { + n := New(tp) + + return n + } +} + +// Estimate job model memory usage. +// +// Make an estimation of the memory usage for an anomaly detection job model. +// The estimate is based on analysis configuration details for the job and +// cardinality +// estimates for the fields it references. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-estimate-model-memory +func New(tp elastictransport.Interface) *EstimateModelMemory { + r := &EstimateModelMemory{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *EstimateModelMemory) Raw(raw io.Reader) *EstimateModelMemory { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *EstimateModelMemory) Request(req *Request) *EstimateModelMemory { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *EstimateModelMemory) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for EstimateModelMemory: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString("_estimate_model_memory") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r EstimateModelMemory) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.estimate_model_memory") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.estimate_model_memory") + if reader := instrument.RecordRequestBody(ctx, "ml.estimate_model_memory", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.estimate_model_memory") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the EstimateModelMemory query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a estimatemodelmemory.Response +func (r EstimateModelMemory) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.estimate_model_memory") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the EstimateModelMemory headers map. +func (r *EstimateModelMemory) Header(key, value string) *EstimateModelMemory { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *EstimateModelMemory) ErrorTrace(errortrace bool) *EstimateModelMemory { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *EstimateModelMemory) FilterPath(filterpaths ...string) *EstimateModelMemory { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *EstimateModelMemory) Human(human bool) *EstimateModelMemory { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *EstimateModelMemory) Pretty(pretty bool) *EstimateModelMemory { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// For a list of the properties that you can specify in the +// `analysis_config` component of the body of this API. +// API name: analysis_config +func (r *EstimateModelMemory) AnalysisConfig(analysisconfig types.AnalysisConfigVariant) *EstimateModelMemory { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AnalysisConfig = analysisconfig.AnalysisConfigCaster() + + return r +} + +// Estimates of the highest cardinality in a single bucket that is observed +// for influencer fields over the time period that the job analyzes data. +// To produce a good answer, values must be provided for all influencer +// fields. Providing values for fields that are not listed as `influencers` +// has no effect on the estimation. +// API name: max_bucket_cardinality +func (r *EstimateModelMemory) MaxBucketCardinality(maxbucketcardinality map[string]int64) *EstimateModelMemory { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MaxBucketCardinality = maxbucketcardinality + return r +} + +func (r *EstimateModelMemory) AddMaxBucketCardinality(key string, value int64) *EstimateModelMemory { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]int64 + if r.req.MaxBucketCardinality == nil { + r.req.MaxBucketCardinality = make(map[string]int64) + } else { + tmp = r.req.MaxBucketCardinality + } + + tmp[key] = value + + r.req.MaxBucketCardinality = tmp + return r +} + +// Estimates of the cardinality that is observed for fields over the whole +// time period that the job analyzes data. To produce a good answer, values +// must be provided for fields referenced in the `by_field_name`, +// `over_field_name` and `partition_field_name` of any detectors. Providing +// values for other fields has no effect on the estimation. It can be +// omitted from the request if no detectors have a `by_field_name`, +// `over_field_name` or `partition_field_name`. +// API name: overall_cardinality +func (r *EstimateModelMemory) OverallCardinality(overallcardinality map[string]int64) *EstimateModelMemory { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.OverallCardinality = overallcardinality + return r +} + +func (r *EstimateModelMemory) AddOverallCardinality(key string, value int64) *EstimateModelMemory { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]int64 + if r.req.OverallCardinality == nil { + r.req.OverallCardinality = make(map[string]int64) + } else { + tmp = r.req.OverallCardinality + } + + tmp[key] = value + + r.req.OverallCardinality = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/estimatemodelmemory/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/estimatemodelmemory/request.go new file mode 100644 index 000000000..4a38f3d6b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/estimatemodelmemory/request.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package estimatemodelmemory + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package estimatemodelmemory +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/estimate_model_memory/MlEstimateModelMemoryRequest.ts#L26-L71 +type Request struct { + + // AnalysisConfig For a list of the properties that you can specify in the + // `analysis_config` component of the body of this API. + AnalysisConfig *types.AnalysisConfig `json:"analysis_config,omitempty"` + // MaxBucketCardinality Estimates of the highest cardinality in a single bucket that is observed + // for influencer fields over the time period that the job analyzes data. + // To produce a good answer, values must be provided for all influencer + // fields. Providing values for fields that are not listed as `influencers` + // has no effect on the estimation. + MaxBucketCardinality map[string]int64 `json:"max_bucket_cardinality,omitempty"` + // OverallCardinality Estimates of the cardinality that is observed for fields over the whole + // time period that the job analyzes data. To produce a good answer, values + // must be provided for fields referenced in the `by_field_name`, + // `over_field_name` and `partition_field_name` of any detectors. Providing + // values for other fields has no effect on the estimation. It can be + // omitted from the request if no detectors have a `by_field_name`, + // `over_field_name` or `partition_field_name`. + OverallCardinality map[string]int64 `json:"overall_cardinality,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + MaxBucketCardinality: make(map[string]int64, 0), + OverallCardinality: make(map[string]int64, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Estimatemodelmemory request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/estimatemodelmemory/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/estimatemodelmemory/response.go new file mode 100644 index 000000000..e4a803d37 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/estimatemodelmemory/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package estimatemodelmemory + +// Response holds the response body struct for the package estimatemodelmemory +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/estimate_model_memory/MlEstimateModelMemoryResponse.ts#L20-L24 +type Response struct { + ModelMemoryEstimate string `json:"model_memory_estimate"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/evaluatedataframe/evaluate_data_frame.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/evaluatedataframe/evaluate_data_frame.go new file mode 100644 index 000000000..f4a321b4f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/evaluatedataframe/evaluate_data_frame.go @@ -0,0 +1,386 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Evaluate data frame analytics. +// +// The API packages together commonly used evaluation metrics for various types +// of machine learning features. This has been designed for use on indexes +// created by data frame analytics. Evaluation requires both a ground truth +// field and an analytics result field to be present. +package evaluatedataframe + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type EvaluateDataFrame struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewEvaluateDataFrame type alias for index. +type NewEvaluateDataFrame func() *EvaluateDataFrame + +// NewEvaluateDataFrameFunc returns a new instance of EvaluateDataFrame with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewEvaluateDataFrameFunc(tp elastictransport.Interface) NewEvaluateDataFrame { + return func() *EvaluateDataFrame { + n := New(tp) + + return n + } +} + +// Evaluate data frame analytics. +// +// The API packages together commonly used evaluation metrics for various types +// of machine learning features. This has been designed for use on indexes +// created by data frame analytics. Evaluation requires both a ground truth +// field and an analytics result field to be present. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame +func New(tp elastictransport.Interface) *EvaluateDataFrame { + r := &EvaluateDataFrame{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *EvaluateDataFrame) Raw(raw io.Reader) *EvaluateDataFrame { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *EvaluateDataFrame) Request(req *Request) *EvaluateDataFrame { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *EvaluateDataFrame) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for EvaluateDataFrame: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("_evaluate") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r EvaluateDataFrame) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.evaluate_data_frame") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.evaluate_data_frame") + if reader := instrument.RecordRequestBody(ctx, "ml.evaluate_data_frame", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.evaluate_data_frame") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the EvaluateDataFrame query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a evaluatedataframe.Response +func (r EvaluateDataFrame) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.evaluate_data_frame") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the EvaluateDataFrame headers map. +func (r *EvaluateDataFrame) Header(key, value string) *EvaluateDataFrame { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *EvaluateDataFrame) ErrorTrace(errortrace bool) *EvaluateDataFrame { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *EvaluateDataFrame) FilterPath(filterpaths ...string) *EvaluateDataFrame { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *EvaluateDataFrame) Human(human bool) *EvaluateDataFrame { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *EvaluateDataFrame) Pretty(pretty bool) *EvaluateDataFrame { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Defines the type of evaluation you want to perform. +// API name: evaluation +func (r *EvaluateDataFrame) Evaluation(evaluation types.DataframeEvaluationContainerVariant) *EvaluateDataFrame { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Evaluation = *evaluation.DataframeEvaluationContainerCaster() + + return r +} + +// Defines the `index` in which the evaluation will be performed. +// API name: index +func (r *EvaluateDataFrame) Index(indexname string) *EvaluateDataFrame { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Index = indexname + + return r +} + +// A query clause that retrieves a subset of data from the source index. +// API name: query +func (r *EvaluateDataFrame) Query(query types.QueryVariant) *EvaluateDataFrame { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/evaluatedataframe/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/evaluatedataframe/request.go new file mode 100644 index 000000000..b27112f1f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/evaluatedataframe/request.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package evaluatedataframe + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package evaluatedataframe +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/MlEvaluateDataFrameRequest.ts#L25-L61 +type Request struct { + + // Evaluation Defines the type of evaluation you want to perform. + Evaluation types.DataframeEvaluationContainer `json:"evaluation"` + // Index Defines the `index` in which the evaluation will be performed. + Index string `json:"index"` + // Query A query clause that retrieves a subset of data from the source index. + Query *types.Query `json:"query,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Evaluatedataframe request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "evaluation": + if err := dec.Decode(&s.Evaluation); err != nil { + return fmt.Errorf("%s | %w", "Evaluation", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/evaluatedataframe/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/evaluatedataframe/response.go new file mode 100644 index 000000000..436f1117f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/evaluatedataframe/response.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package evaluatedataframe + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package evaluatedataframe +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/MlEvaluateDataFrameResponse.ts#L26-L44 +type Response struct { + + // Classification Evaluation results for a classification analysis. + // It outputs a prediction that identifies to which of the classes each document + // belongs. + Classification *types.DataframeClassificationSummary `json:"classification,omitempty"` + // OutlierDetection Evaluation results for an outlier detection analysis. + // It outputs the probability that each document is an outlier. + OutlierDetection *types.DataframeOutlierDetectionSummary `json:"outlier_detection,omitempty"` + // Regression Evaluation results for a regression analysis which outputs a prediction of + // values. + Regression *types.DataframeRegressionSummary `json:"regression,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go new file mode 100644 index 000000000..dc51955b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go @@ -0,0 +1,511 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Explain data frame analytics config. +// +// This API provides explanations for a data frame analytics config that either +// exists already or one that has not been created yet. The following +// explanations are provided: +// * which fields are included or not in the analysis and why, +// * how much memory is estimated to be required. The estimate can be used when +// deciding the appropriate value for model_memory_limit setting later on. +// If you have object fields or fields that are excluded via source filtering, +// they are not included in the explanation. +package explaindataframeanalytics + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ExplainDataFrameAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExplainDataFrameAnalytics type alias for index. +type NewExplainDataFrameAnalytics func() *ExplainDataFrameAnalytics + +// NewExplainDataFrameAnalyticsFunc returns a new instance of ExplainDataFrameAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExplainDataFrameAnalyticsFunc(tp elastictransport.Interface) NewExplainDataFrameAnalytics { + return func() *ExplainDataFrameAnalytics { + n := New(tp) + + return n + } +} + +// Explain data frame analytics config. +// +// This API provides explanations for a data frame analytics config that either +// exists already or one that has not been created yet. The following +// explanations are provided: +// * which fields are included or not in the analysis and why, +// * how much memory is estimated to be required. The estimate can be used when +// deciding the appropriate value for model_memory_limit setting later on. +// If you have object fields or fields that are excluded via source filtering, +// they are not included in the explanation. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics +func New(tp elastictransport.Interface) *ExplainDataFrameAnalytics { + r := &ExplainDataFrameAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ExplainDataFrameAnalytics) Raw(raw io.Reader) *ExplainDataFrameAnalytics { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ExplainDataFrameAnalytics) Request(req *Request) *ExplainDataFrameAnalytics { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ExplainDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ExplainDataFrameAnalytics: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + path.WriteString("_explain") + + method = http.MethodPost + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + path.WriteString("/") + path.WriteString("_explain") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ExplainDataFrameAnalytics) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.explain_data_frame_analytics") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.explain_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "ml.explain_data_frame_analytics", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.explain_data_frame_analytics") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ExplainDataFrameAnalytics query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a explaindataframeanalytics.Response +func (r ExplainDataFrameAnalytics) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.explain_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ExplainDataFrameAnalytics headers map. +func (r *ExplainDataFrameAnalytics) Header(key, value string) *ExplainDataFrameAnalytics { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the data frame analytics job. This identifier can contain +// lowercase alphanumeric characters (a-z and 0-9), hyphens, and +// underscores. It must start and end with alphanumeric characters. +// API Name: id +func (r *ExplainDataFrameAnalytics) Id(id string) *ExplainDataFrameAnalytics { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ExplainDataFrameAnalytics) ErrorTrace(errortrace bool) *ExplainDataFrameAnalytics { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ExplainDataFrameAnalytics) FilterPath(filterpaths ...string) *ExplainDataFrameAnalytics { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ExplainDataFrameAnalytics) Human(human bool) *ExplainDataFrameAnalytics { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ExplainDataFrameAnalytics) Pretty(pretty bool) *ExplainDataFrameAnalytics { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Specifies whether this job can start when there is insufficient machine +// learning node capacity for it to be immediately assigned to a node. +// API name: allow_lazy_start +func (r *ExplainDataFrameAnalytics) AllowLazyStart(allowlazystart bool) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowLazyStart = &allowlazystart + + return r +} + +// The analysis configuration, which contains the information necessary to +// perform one of the following types of analysis: classification, outlier +// detection, or regression. +// API name: analysis +func (r *ExplainDataFrameAnalytics) Analysis(analysis types.DataframeAnalysisContainerVariant) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Analysis = analysis.DataframeAnalysisContainerCaster() + + return r +} + +// Specify includes and/or excludes patterns to select which fields will be +// included in the analysis. The patterns specified in excludes are applied +// last, therefore excludes takes precedence. In other words, if the same +// field is specified in both includes and excludes, then the field will not +// be included in the analysis. +// API name: analyzed_fields +func (r *ExplainDataFrameAnalytics) AnalyzedFields(analyzedfields types.DataframeAnalysisAnalyzedFieldsVariant) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AnalyzedFields = analyzedfields.DataframeAnalysisAnalyzedFieldsCaster() + + return r +} + +// A description of the job. +// API name: description +func (r *ExplainDataFrameAnalytics) Description(description string) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// The destination configuration, consisting of index and optionally +// results_field (ml by default). +// API name: dest +func (r *ExplainDataFrameAnalytics) Dest(dest types.DataframeAnalyticsDestinationVariant) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Dest = dest.DataframeAnalyticsDestinationCaster() + + return r +} + +// The maximum number of threads to be used by the analysis. Using more +// threads may decrease the time necessary to complete the analysis at the +// cost of using more CPU. Note that the process may use additional threads +// for operational functionality other than the analysis itself. +// API name: max_num_threads +func (r *ExplainDataFrameAnalytics) MaxNumThreads(maxnumthreads int) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxNumThreads = &maxnumthreads + + return r +} + +// The approximate maximum amount of memory resources that are permitted for +// analytical processing. If your `elasticsearch.yml` file contains an +// `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to +// create data frame analytics jobs that have `model_memory_limit` values +// greater than that setting. +// API name: model_memory_limit +func (r *ExplainDataFrameAnalytics) ModelMemoryLimit(modelmemorylimit string) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ModelMemoryLimit = &modelmemorylimit + + return r +} + +// The configuration of how to source the analysis data. It requires an +// index. Optionally, query and _source may be specified. +// API name: source +func (r *ExplainDataFrameAnalytics) Source(source types.DataframeAnalyticsSourceVariant) *ExplainDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source = source.DataframeAnalyticsSourceCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/explaindataframeanalytics/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/explaindataframeanalytics/request.go new file mode 100644 index 000000000..b88423cd6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/explaindataframeanalytics/request.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package explaindataframeanalytics + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package explaindataframeanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsRequest.ts#L30-L120 +type Request struct { + + // AllowLazyStart Specifies whether this job can start when there is insufficient machine + // learning node capacity for it to be immediately assigned to a node. + AllowLazyStart *bool `json:"allow_lazy_start,omitempty"` + // Analysis The analysis configuration, which contains the information necessary to + // perform one of the following types of analysis: classification, outlier + // detection, or regression. + Analysis *types.DataframeAnalysisContainer `json:"analysis,omitempty"` + // AnalyzedFields Specify includes and/or excludes patterns to select which fields will be + // included in the analysis. The patterns specified in excludes are applied + // last, therefore excludes takes precedence. In other words, if the same + // field is specified in both includes and excludes, then the field will not + // be included in the analysis. + AnalyzedFields *types.DataframeAnalysisAnalyzedFields `json:"analyzed_fields,omitempty"` + // Description A description of the job. + Description *string `json:"description,omitempty"` + // Dest The destination configuration, consisting of index and optionally + // results_field (ml by default). + Dest *types.DataframeAnalyticsDestination `json:"dest,omitempty"` + // MaxNumThreads The maximum number of threads to be used by the analysis. Using more + // threads may decrease the time necessary to complete the analysis at the + // cost of using more CPU. Note that the process may use additional threads + // for operational functionality other than the analysis itself. + MaxNumThreads *int `json:"max_num_threads,omitempty"` + // ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for + // analytical processing. If your `elasticsearch.yml` file contains an + // `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to + // create data frame analytics jobs that have `model_memory_limit` values + // greater than that setting. + ModelMemoryLimit *string `json:"model_memory_limit,omitempty"` + // Source The configuration of how to source the analysis data. It requires an + // index. Optionally, query and _source may be specified. + Source *types.DataframeAnalyticsSource `json:"source,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Explaindataframeanalytics request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/explaindataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/explaindataframeanalytics/response.go new file mode 100644 index 000000000..a697c42c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/explaindataframeanalytics/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package explaindataframeanalytics + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package explaindataframeanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsResponse.ts#L25-L32 +type Response struct { + + // FieldSelection An array of objects that explain selection for each field, sorted by the + // field names. + FieldSelection []types.DataframeAnalyticsFieldSelection `json:"field_selection"` + // MemoryEstimation An array of objects that explain selection for each field, sorted by the + // field names. + MemoryEstimation types.DataframeAnalyticsMemoryEstimation `json:"memory_estimation"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/flushjob/flush_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/flushjob/flush_job.go new file mode 100644 index 000000000..bf7380883 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/flushjob/flush_job.go @@ -0,0 +1,441 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Force buffered data to be processed. +// The flush jobs API is only applicable when sending data for analysis using +// the post data API. Depending on the content of the buffer, then it might +// additionally calculate new results. Both flush and close operations are +// similar, however the flush is more efficient if you are expecting to send +// more data for analysis. When flushing, the job remains open and is available +// to continue analyzing data. A close operation additionally prunes and +// persists the model state to disk and the job must be opened again before +// analyzing further data. +package flushjob + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type FlushJob struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewFlushJob type alias for index. +type NewFlushJob func(jobid string) *FlushJob + +// NewFlushJobFunc returns a new instance of FlushJob with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewFlushJobFunc(tp elastictransport.Interface) NewFlushJob { + return func(jobid string) *FlushJob { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Force buffered data to be processed. +// The flush jobs API is only applicable when sending data for analysis using +// the post data API. Depending on the content of the buffer, then it might +// additionally calculate new results. Both flush and close operations are +// similar, however the flush is more efficient if you are expecting to send +// more data for analysis. When flushing, the job remains open and is available +// to continue analyzing data. A close operation additionally prunes and +// persists the model state to disk and the job must be opened again before +// analyzing further data. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job +func New(tp elastictransport.Interface) *FlushJob { + r := &FlushJob{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *FlushJob) Raw(raw io.Reader) *FlushJob { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *FlushJob) Request(req *Request) *FlushJob { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *FlushJob) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for FlushJob: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("_flush") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r FlushJob) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.flush_job") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.flush_job") + if reader := instrument.RecordRequestBody(ctx, "ml.flush_job", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.flush_job") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the FlushJob query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a flushjob.Response +func (r FlushJob) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.flush_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the FlushJob headers map. +func (r *FlushJob) Header(key, value string) *FlushJob { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *FlushJob) _jobid(jobid string) *FlushJob { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *FlushJob) ErrorTrace(errortrace bool) *FlushJob { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *FlushJob) FilterPath(filterpaths ...string) *FlushJob { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *FlushJob) Human(human bool) *FlushJob { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *FlushJob) Pretty(pretty bool) *FlushJob { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Refer to the description for the `advance_time` query parameter. +// API name: advance_time +func (r *FlushJob) AdvanceTime(datetime types.DateTimeVariant) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AdvanceTime = *datetime.DateTimeCaster() + + return r +} + +// Refer to the description for the `calc_interim` query parameter. +// API name: calc_interim +func (r *FlushJob) CalcInterim(calcinterim bool) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CalcInterim = &calcinterim + + return r +} + +// Refer to the description for the `end` query parameter. +// API name: end +func (r *FlushJob) End(datetime types.DateTimeVariant) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() + + return r +} + +// Refer to the description for the `skip_time` query parameter. +// API name: skip_time +func (r *FlushJob) SkipTime(datetime types.DateTimeVariant) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SkipTime = *datetime.DateTimeCaster() + + return r +} + +// Refer to the description for the `start` query parameter. +// API name: start +func (r *FlushJob) Start(datetime types.DateTimeVariant) *FlushJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/flushjob/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/flushjob/request.go new file mode 100644 index 000000000..124e96d31 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/flushjob/request.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package flushjob + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package flushjob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/flush_job/MlFlushJobRequest.ts#L24-L108 +type Request struct { + + // AdvanceTime Refer to the description for the `advance_time` query parameter. + AdvanceTime types.DateTime `json:"advance_time,omitempty"` + // CalcInterim Refer to the description for the `calc_interim` query parameter. + CalcInterim *bool `json:"calc_interim,omitempty"` + // End Refer to the description for the `end` query parameter. + End types.DateTime `json:"end,omitempty"` + // SkipTime Refer to the description for the `skip_time` query parameter. + SkipTime types.DateTime `json:"skip_time,omitempty"` + // Start Refer to the description for the `start` query parameter. + Start types.DateTime `json:"start,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Flushjob request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "advance_time": + if err := dec.Decode(&s.AdvanceTime); err != nil { + return fmt.Errorf("%s | %w", "AdvanceTime", err) + } + + case "calc_interim": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CalcInterim", err) + } + s.CalcInterim = &value + case bool: + s.CalcInterim = &v + } + + case "end": + if err := dec.Decode(&s.End); err != nil { + return fmt.Errorf("%s | %w", "End", err) + } + + case "skip_time": + if err := dec.Decode(&s.SkipTime); err != nil { + return fmt.Errorf("%s | %w", "SkipTime", err) + } + + case "start": + if err := dec.Decode(&s.Start); err != nil { + return fmt.Errorf("%s | %w", "Start", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/flushjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/flushjob/response.go new file mode 100644 index 000000000..1d2bb7a14 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/flushjob/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package flushjob + +// Response holds the response body struct for the package flushjob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/flush_job/MlFlushJobResponse.ts#L22-L31 +type Response struct { + Flushed bool `json:"flushed"` + // LastFinalizedBucketEnd Provides the timestamp (in milliseconds since the epoch) of the end of + // the last bucket that was processed. + LastFinalizedBucketEnd *int `json:"last_finalized_bucket_end,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/forecast/forecast.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/forecast/forecast.go new file mode 100644 index 000000000..53c11fcc2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/forecast/forecast.go @@ -0,0 +1,410 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Predict future behavior of a time series. +// +// Forecasts are not supported for jobs that perform population analysis; an +// error occurs if you try to create a forecast for a job that has an +// `over_field_name` in its configuration. Forcasts predict future behavior +// based on historical data. +package forecast + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Forecast struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewForecast type alias for index. +type NewForecast func(jobid string) *Forecast + +// NewForecastFunc returns a new instance of Forecast with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewForecastFunc(tp elastictransport.Interface) NewForecast { + return func(jobid string) *Forecast { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Predict future behavior of a time series. +// +// Forecasts are not supported for jobs that perform population analysis; an +// error occurs if you try to create a forecast for a job that has an +// `over_field_name` in its configuration. Forcasts predict future behavior +// based on historical data. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast +func New(tp elastictransport.Interface) *Forecast { + r := &Forecast{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Forecast) Raw(raw io.Reader) *Forecast { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Forecast) Request(req *Request) *Forecast { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Forecast) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Forecast: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("_forecast") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Forecast) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.forecast") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.forecast") + if reader := instrument.RecordRequestBody(ctx, "ml.forecast", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.forecast") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Forecast query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a forecast.Response +func (r Forecast) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.forecast") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Forecast headers map. +func (r *Forecast) Header(key, value string) *Forecast { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. The job must be open when you +// create a forecast; otherwise, an error occurs. +// API Name: jobid +func (r *Forecast) _jobid(jobid string) *Forecast { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Forecast) ErrorTrace(errortrace bool) *Forecast { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Forecast) FilterPath(filterpaths ...string) *Forecast { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Forecast) Human(human bool) *Forecast { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Forecast) Pretty(pretty bool) *Forecast { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Refer to the description for the `duration` query parameter. +// API name: duration +func (r *Forecast) Duration(duration types.DurationVariant) *Forecast { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Duration = *duration.DurationCaster() + + return r +} + +// Refer to the description for the `expires_in` query parameter. +// API name: expires_in +func (r *Forecast) ExpiresIn(duration types.DurationVariant) *Forecast { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ExpiresIn = *duration.DurationCaster() + + return r +} + +// Refer to the description for the `max_model_memory` query parameter. +// API name: max_model_memory +func (r *Forecast) MaxModelMemory(maxmodelmemory string) *Forecast { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxModelMemory = &maxmodelmemory + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/forecast/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/forecast/request.go new file mode 100644 index 000000000..69c6a445e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/forecast/request.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package forecast + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package forecast +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/forecast/MlForecastJobRequest.ts#L24-L95 +type Request struct { + + // Duration Refer to the description for the `duration` query parameter. + Duration types.Duration `json:"duration,omitempty"` + // ExpiresIn Refer to the description for the `expires_in` query parameter. + ExpiresIn types.Duration `json:"expires_in,omitempty"` + // MaxModelMemory Refer to the description for the `max_model_memory` query parameter. + MaxModelMemory *string `json:"max_model_memory,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Forecast request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "duration": + if err := dec.Decode(&s.Duration); err != nil { + return fmt.Errorf("%s | %w", "Duration", err) + } + + case "expires_in": + if err := dec.Decode(&s.ExpiresIn); err != nil { + return fmt.Errorf("%s | %w", "ExpiresIn", err) + } + + case "max_model_memory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxModelMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxModelMemory = &o + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/forecast/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/forecast/response.go new file mode 100644 index 000000000..4be5f1ffb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/forecast/response.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package forecast + +// Response holds the response body struct for the package forecast +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/forecast/MlForecastJobResponse.ts#L22-L27 +type Response struct { + Acknowledged bool `json:"acknowledged"` + ForecastId string `json:"forecast_id"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getbuckets/get_buckets.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getbuckets/get_buckets.go new file mode 100644 index 000000000..0781cbbab --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getbuckets/get_buckets.go @@ -0,0 +1,519 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get anomaly detection job results for buckets. +// The API presents a chronological view of the records, grouped by bucket. +package getbuckets + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 + + timestampMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetBuckets struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + timestamp string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetBuckets type alias for index. +type NewGetBuckets func(jobid string) *GetBuckets + +// NewGetBucketsFunc returns a new instance of GetBuckets with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetBucketsFunc(tp elastictransport.Interface) NewGetBuckets { + return func(jobid string) *GetBuckets { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Get anomaly detection job results for buckets. +// The API presents a chronological view of the records, grouped by bucket. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets +func New(tp elastictransport.Interface) *GetBuckets { + r := &GetBuckets{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *GetBuckets) Raw(raw io.Reader) *GetBuckets { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *GetBuckets) Request(req *Request) *GetBuckets { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetBuckets) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for GetBuckets: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask|timestampMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("results") + path.WriteString("/") + path.WriteString("buckets") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "timestamp", r.timestamp) + } + path.WriteString(r.timestamp) + + method = http.MethodPost + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("results") + path.WriteString("/") + path.WriteString("buckets") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetBuckets) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_buckets") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_buckets") + if reader := instrument.RecordRequestBody(ctx, "ml.get_buckets", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_buckets") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetBuckets query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getbuckets.Response +func (r GetBuckets) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_buckets") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the GetBuckets headers map. +func (r *GetBuckets) Header(key, value string) *GetBuckets { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *GetBuckets) _jobid(jobid string) *GetBuckets { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// Timestamp The timestamp of a single bucket result. If you do not specify this +// parameter, the API returns information about all buckets. +// API Name: timestamp +func (r *GetBuckets) Timestamp(timestamp string) *GetBuckets { + r.paramSet |= timestampMask + r.timestamp = timestamp + + return r +} + +// From Skips the specified number of buckets. +// API name: from +func (r *GetBuckets) From(from int) *GetBuckets { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size Specifies the maximum number of buckets to obtain. +// API name: size +func (r *GetBuckets) Size(size int) *GetBuckets { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetBuckets) ErrorTrace(errortrace bool) *GetBuckets { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetBuckets) FilterPath(filterpaths ...string) *GetBuckets { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetBuckets) Human(human bool) *GetBuckets { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetBuckets) Pretty(pretty bool) *GetBuckets { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Refer to the description for the `anomaly_score` query parameter. +// API name: anomaly_score +func (r *GetBuckets) AnomalyScore(anomalyscore types.Float64) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AnomalyScore = &anomalyscore + + return r +} + +// Refer to the description for the `desc` query parameter. +// API name: desc +func (r *GetBuckets) Desc(desc bool) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Desc = &desc + + return r +} + +// Refer to the description for the `end` query parameter. +// API name: end +func (r *GetBuckets) End(datetime types.DateTimeVariant) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() + + return r +} + +// Refer to the description for the `exclude_interim` query parameter. +// API name: exclude_interim +func (r *GetBuckets) ExcludeInterim(excludeinterim bool) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ExcludeInterim = &excludeinterim + + return r +} + +// Refer to the description for the `expand` query parameter. +// API name: expand +func (r *GetBuckets) Expand(expand bool) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expand = &expand + + return r +} + +// API name: page +func (r *GetBuckets) Page(page types.PageVariant) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Page = page.PageCaster() + + return r +} + +// Refer to the desription for the `sort` query parameter. +// API name: sort +func (r *GetBuckets) Sort(field string) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Sort = &field + + return r +} + +// Refer to the description for the `start` query parameter. +// API name: start +func (r *GetBuckets) Start(datetime types.DateTimeVariant) *GetBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getbuckets/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getbuckets/request.go new file mode 100644 index 000000000..c4216999c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getbuckets/request.go @@ -0,0 +1,170 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getbuckets + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package getbuckets +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_buckets/MlGetBucketsRequest.ts#L26-L145 +type Request struct { + + // AnomalyScore Refer to the description for the `anomaly_score` query parameter. + AnomalyScore *types.Float64 `json:"anomaly_score,omitempty"` + // Desc Refer to the description for the `desc` query parameter. + Desc *bool `json:"desc,omitempty"` + // End Refer to the description for the `end` query parameter. + End types.DateTime `json:"end,omitempty"` + // ExcludeInterim Refer to the description for the `exclude_interim` query parameter. + ExcludeInterim *bool `json:"exclude_interim,omitempty"` + // Expand Refer to the description for the `expand` query parameter. + Expand *bool `json:"expand,omitempty"` + Page *types.Page `json:"page,omitempty"` + // Sort Refer to the desription for the `sort` query parameter. + Sort *string `json:"sort,omitempty"` + // Start Refer to the description for the `start` query parameter. + Start types.DateTime `json:"start,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Getbuckets request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "anomaly_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AnomalyScore", err) + } + f := types.Float64(value) + s.AnomalyScore = &f + case float64: + f := types.Float64(v) + s.AnomalyScore = &f + } + + case "desc": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Desc", err) + } + s.Desc = &value + case bool: + s.Desc = &v + } + + case "end": + if err := dec.Decode(&s.End); err != nil { + return fmt.Errorf("%s | %w", "End", err) + } + + case "exclude_interim": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ExcludeInterim", err) + } + s.ExcludeInterim = &value + case bool: + s.ExcludeInterim = &v + } + + case "expand": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Expand", err) + } + s.Expand = &value + case bool: + s.Expand = &v + } + + case "page": + if err := dec.Decode(&s.Page); err != nil { + return fmt.Errorf("%s | %w", "Page", err) + } + + case "sort": + if err := dec.Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + case "start": + if err := dec.Decode(&s.Start); err != nil { + return fmt.Errorf("%s | %w", "Start", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getbuckets/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getbuckets/response.go new file mode 100644 index 000000000..b8fec7af6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getbuckets/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getbuckets + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getbuckets +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_buckets/MlGetBucketsResponse.ts#L23-L28 +type Response struct { + Buckets []types.BucketSummary `json:"buckets"` + Count int64 `json:"count"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendarevents/get_calendar_events.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendarevents/get_calendar_events.go new file mode 100644 index 000000000..a18abfc68 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendarevents/get_calendar_events.go @@ -0,0 +1,390 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get info about events in calendars. +package getcalendarevents + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + calendaridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetCalendarEvents struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + calendarid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetCalendarEvents type alias for index. +type NewGetCalendarEvents func(calendarid string) *GetCalendarEvents + +// NewGetCalendarEventsFunc returns a new instance of GetCalendarEvents with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetCalendarEventsFunc(tp elastictransport.Interface) NewGetCalendarEvents { + return func(calendarid string) *GetCalendarEvents { + n := New(tp) + + n._calendarid(calendarid) + + return n + } +} + +// Get info about events in calendars. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events +func New(tp elastictransport.Interface) *GetCalendarEvents { + r := &GetCalendarEvents{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetCalendarEvents) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == calendaridMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendarid", r.calendarid) + } + path.WriteString(r.calendarid) + path.WriteString("/") + path.WriteString("events") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetCalendarEvents) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_calendar_events") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_calendar_events") + if reader := instrument.RecordRequestBody(ctx, "ml.get_calendar_events", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_calendar_events") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetCalendarEvents query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getcalendarevents.Response +func (r GetCalendarEvents) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_calendar_events") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetCalendarEvents) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_calendar_events") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetCalendarEvents query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetCalendarEvents headers map. +func (r *GetCalendarEvents) Header(key, value string) *GetCalendarEvents { + r.headers.Set(key, value) + + return r +} + +// CalendarId A string that uniquely identifies a calendar. You can get information for +// multiple calendars by using a comma-separated list of ids or a wildcard +// expression. You can get information for all calendars by using `_all` or `*` +// or by omitting the calendar identifier. +// API Name: calendarid +func (r *GetCalendarEvents) _calendarid(calendarid string) *GetCalendarEvents { + r.paramSet |= calendaridMask + r.calendarid = calendarid + + return r +} + +// End Specifies to get events with timestamps earlier than this time. +// API name: end +func (r *GetCalendarEvents) End(datetime string) *GetCalendarEvents { + r.values.Set("end", datetime) + + return r +} + +// From Skips the specified number of events. +// API name: from +func (r *GetCalendarEvents) From(from int) *GetCalendarEvents { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// JobId Specifies to get events for a specific anomaly detection job identifier or +// job group. It must be used with a calendar identifier of `_all` or `*`. +// API name: job_id +func (r *GetCalendarEvents) JobId(id string) *GetCalendarEvents { + r.values.Set("job_id", id) + + return r +} + +// Size Specifies the maximum number of events to obtain. +// API name: size +func (r *GetCalendarEvents) Size(size int) *GetCalendarEvents { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// Start Specifies to get events with timestamps after this time. +// API name: start +func (r *GetCalendarEvents) Start(datetime string) *GetCalendarEvents { + r.values.Set("start", datetime) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetCalendarEvents) ErrorTrace(errortrace bool) *GetCalendarEvents { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetCalendarEvents) FilterPath(filterpaths ...string) *GetCalendarEvents { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetCalendarEvents) Human(human bool) *GetCalendarEvents { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetCalendarEvents) Pretty(pretty bool) *GetCalendarEvents { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendarevents/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendarevents/response.go new file mode 100644 index 000000000..66ae8851f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendarevents/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getcalendarevents + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getcalendarevents +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_calendar_events/MlGetCalendarEventsResponse.ts#L23-L28 +type Response struct { + Count int64 `json:"count"` + Events []types.CalendarEvent `json:"events"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendars/get_calendars.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendars/get_calendars.go new file mode 100644 index 000000000..95da3c064 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendars/get_calendars.go @@ -0,0 +1,397 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get calendar configuration info. +package getcalendars + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + calendaridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetCalendars struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + calendarid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetCalendars type alias for index. +type NewGetCalendars func() *GetCalendars + +// NewGetCalendarsFunc returns a new instance of GetCalendars with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetCalendarsFunc(tp elastictransport.Interface) NewGetCalendars { + return func() *GetCalendars { + n := New(tp) + + return n + } +} + +// Get calendar configuration info. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars +func New(tp elastictransport.Interface) *GetCalendars { + r := &GetCalendars{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *GetCalendars) Raw(raw io.Reader) *GetCalendars { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *GetCalendars) Request(req *Request) *GetCalendars { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetCalendars) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for GetCalendars: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + + method = http.MethodPost + case r.paramSet == calendaridMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendarid", r.calendarid) + } + path.WriteString(r.calendarid) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetCalendars) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_calendars") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_calendars") + if reader := instrument.RecordRequestBody(ctx, "ml.get_calendars", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_calendars") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetCalendars query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getcalendars.Response +func (r GetCalendars) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_calendars") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the GetCalendars headers map. +func (r *GetCalendars) Header(key, value string) *GetCalendars { + r.headers.Set(key, value) + + return r +} + +// CalendarId A string that uniquely identifies a calendar. You can get information for +// multiple calendars by using a comma-separated list of ids or a wildcard +// expression. You can get information for all calendars by using `_all` or `*` +// or by omitting the calendar identifier. +// API Name: calendarid +func (r *GetCalendars) CalendarId(calendarid string) *GetCalendars { + r.paramSet |= calendaridMask + r.calendarid = calendarid + + return r +} + +// From Skips the specified number of calendars. This parameter is supported only +// when you omit the calendar identifier. +// API name: from +func (r *GetCalendars) From(from int) *GetCalendars { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size Specifies the maximum number of calendars to obtain. This parameter is +// supported only when you omit the calendar identifier. +// API name: size +func (r *GetCalendars) Size(size int) *GetCalendars { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetCalendars) ErrorTrace(errortrace bool) *GetCalendars { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetCalendars) FilterPath(filterpaths ...string) *GetCalendars { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetCalendars) Human(human bool) *GetCalendars { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetCalendars) Pretty(pretty bool) *GetCalendars { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// This object is supported only when you omit the calendar identifier. +// API name: page +func (r *GetCalendars) Page(page types.PageVariant) *GetCalendars { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Page = page.PageCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendars/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendars/request.go new file mode 100644 index 000000000..8f082090b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendars/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getcalendars + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package getcalendars +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_calendars/MlGetCalendarsRequest.ts#L25-L63 +type Request struct { + + // Page This object is supported only when you omit the calendar identifier. + Page *types.Page `json:"page,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Getcalendars request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendars/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendars/response.go new file mode 100644 index 000000000..2d7cd005a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendars/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getcalendars + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getcalendars +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_calendars/MlGetCalendarsResponse.ts#L23-L25 +type Response struct { + Calendars []types.Calendar `json:"calendars"` + Count int64 `json:"count"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcategories/get_categories.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcategories/get_categories.go new file mode 100644 index 000000000..4297b275a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcategories/get_categories.go @@ -0,0 +1,439 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get anomaly detection job results for categories. +package getcategories + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 + + categoryidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetCategories struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + categoryid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetCategories type alias for index. +type NewGetCategories func(jobid string) *GetCategories + +// NewGetCategoriesFunc returns a new instance of GetCategories with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetCategoriesFunc(tp elastictransport.Interface) NewGetCategories { + return func(jobid string) *GetCategories { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Get anomaly detection job results for categories. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories +func New(tp elastictransport.Interface) *GetCategories { + r := &GetCategories{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *GetCategories) Raw(raw io.Reader) *GetCategories { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *GetCategories) Request(req *Request) *GetCategories { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetCategories) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for GetCategories: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask|categoryidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("results") + path.WriteString("/") + path.WriteString("categories") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "categoryid", r.categoryid) + } + path.WriteString(r.categoryid) + + method = http.MethodPost + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("results") + path.WriteString("/") + path.WriteString("categories") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetCategories) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_categories") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_categories") + if reader := instrument.RecordRequestBody(ctx, "ml.get_categories", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_categories") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetCategories query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getcategories.Response +func (r GetCategories) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_categories") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the GetCategories headers map. +func (r *GetCategories) Header(key, value string) *GetCategories { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *GetCategories) _jobid(jobid string) *GetCategories { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// CategoryId Identifier for the category, which is unique in the job. If you specify +// neither the category ID nor the partition_field_value, the API returns +// information about all categories. If you specify only the +// partition_field_value, it returns information about all categories for +// the specified partition. +// API Name: categoryid +func (r *GetCategories) CategoryId(categoryid string) *GetCategories { + r.paramSet |= categoryidMask + r.categoryid = categoryid + + return r +} + +// From Skips the specified number of categories. +// API name: from +func (r *GetCategories) From(from int) *GetCategories { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// PartitionFieldValue Only return categories for the specified partition. +// API name: partition_field_value +func (r *GetCategories) PartitionFieldValue(partitionfieldvalue string) *GetCategories { + r.values.Set("partition_field_value", partitionfieldvalue) + + return r +} + +// Size Specifies the maximum number of categories to obtain. +// API name: size +func (r *GetCategories) Size(size int) *GetCategories { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetCategories) ErrorTrace(errortrace bool) *GetCategories { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetCategories) FilterPath(filterpaths ...string) *GetCategories { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetCategories) Human(human bool) *GetCategories { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetCategories) Pretty(pretty bool) *GetCategories { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Configures pagination. +// This parameter has the `from` and `size` properties. +// API name: page +func (r *GetCategories) Page(page types.PageVariant) *GetCategories { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Page = page.PageCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcategories/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcategories/request.go new file mode 100644 index 000000000..355580446 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcategories/request.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getcategories + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package getcategories +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_categories/MlGetCategoriesRequest.ts#L25-L82 +type Request struct { + + // Page Configures pagination. + // This parameter has the `from` and `size` properties. + Page *types.Page `json:"page,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Getcategories request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcategories/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcategories/response.go new file mode 100644 index 000000000..44f8f5b85 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcategories/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getcategories + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getcategories +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_categories/MlGetCategoriesResponse.ts#L23-L28 +type Response struct { + Categories []types.Category `json:"categories"` + Count int64 `json:"count"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeeds/get_datafeeds.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeeds/get_datafeeds.go new file mode 100644 index 000000000..6c2ed8172 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeeds/get_datafeeds.go @@ -0,0 +1,388 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get datafeeds configuration info. +// You can get information for multiple datafeeds in a single API request by +// using a comma-separated list of datafeeds or a wildcard expression. You can +// get information for all datafeeds by using `_all`, by specifying `*` as the +// ``, or by omitting the ``. +// This API returns a maximum of 10,000 datafeeds. +package getdatafeeds + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + datafeedidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetDatafeeds struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + datafeedid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetDatafeeds type alias for index. +type NewGetDatafeeds func() *GetDatafeeds + +// NewGetDatafeedsFunc returns a new instance of GetDatafeeds with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetDatafeedsFunc(tp elastictransport.Interface) NewGetDatafeeds { + return func() *GetDatafeeds { + n := New(tp) + + return n + } +} + +// Get datafeeds configuration info. +// You can get information for multiple datafeeds in a single API request by +// using a comma-separated list of datafeeds or a wildcard expression. You can +// get information for all datafeeds by using `_all`, by specifying `*` as the +// ``, or by omitting the ``. +// This API returns a maximum of 10,000 datafeeds. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds +func New(tp elastictransport.Interface) *GetDatafeeds { + r := &GetDatafeeds{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetDatafeeds) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == datafeedidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeedid", r.datafeedid) + } + path.WriteString(r.datafeedid) + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetDatafeeds) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_datafeeds") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_datafeeds") + if reader := instrument.RecordRequestBody(ctx, "ml.get_datafeeds", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_datafeeds") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetDatafeeds query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getdatafeeds.Response +func (r GetDatafeeds) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_datafeeds") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetDatafeeds) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_datafeeds") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetDatafeeds query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetDatafeeds headers map. +func (r *GetDatafeeds) Header(key, value string) *GetDatafeeds { + r.headers.Set(key, value) + + return r +} + +// DatafeedId Identifier for the datafeed. It can be a datafeed identifier or a +// wildcard expression. If you do not specify one of these options, the API +// returns information about all datafeeds. +// API Name: datafeedid +func (r *GetDatafeeds) DatafeedId(datafeedid string) *GetDatafeeds { + r.paramSet |= datafeedidMask + r.datafeedid = datafeedid + + return r +} + +// AllowNoMatch Specifies what to do when the request: +// +// 1. Contains wildcard expressions and there are no datafeeds that match. +// 2. Contains the `_all` string or no identifiers and there are no matches. +// 3. Contains wildcard expressions and there are only partial matches. +// +// The default value is `true`, which returns an empty `datafeeds` array +// when there are no matches and the subset of results when there are +// partial matches. If this parameter is `false`, the request returns a +// `404` status code when there are no matches or only partial matches. +// API name: allow_no_match +func (r *GetDatafeeds) AllowNoMatch(allownomatch bool) *GetDatafeeds { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// ExcludeGenerated Indicates if certain fields should be removed from the configuration on +// retrieval. This allows the configuration to be in an acceptable format to +// be retrieved and then added to another cluster. +// API name: exclude_generated +func (r *GetDatafeeds) ExcludeGenerated(excludegenerated bool) *GetDatafeeds { + r.values.Set("exclude_generated", strconv.FormatBool(excludegenerated)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetDatafeeds) ErrorTrace(errortrace bool) *GetDatafeeds { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetDatafeeds) FilterPath(filterpaths ...string) *GetDatafeeds { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetDatafeeds) Human(human bool) *GetDatafeeds { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetDatafeeds) Pretty(pretty bool) *GetDatafeeds { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeeds/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeeds/response.go new file mode 100644 index 000000000..ad7f38ca7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeeds/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getdatafeeds + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getdatafeeds +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_datafeeds/MlGetDatafeedsResponse.ts#L23-L28 +type Response struct { + Count int64 `json:"count"` + Datafeeds []types.MLDatafeed `json:"datafeeds"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeedstats/get_datafeed_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeedstats/get_datafeed_stats.go new file mode 100644 index 000000000..41582a6a2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeedstats/get_datafeed_stats.go @@ -0,0 +1,384 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get datafeed stats. +// You can get statistics for multiple datafeeds in a single API request by +// using a comma-separated list of datafeeds or a wildcard expression. You can +// get statistics for all datafeeds by using `_all`, by specifying `*` as the +// ``, or by omitting the ``. If the datafeed is stopped, the +// only information you receive is the `datafeed_id` and the `state`. +// This API returns a maximum of 10,000 datafeeds. +package getdatafeedstats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + datafeedidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetDatafeedStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + datafeedid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetDatafeedStats type alias for index. +type NewGetDatafeedStats func() *GetDatafeedStats + +// NewGetDatafeedStatsFunc returns a new instance of GetDatafeedStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetDatafeedStatsFunc(tp elastictransport.Interface) NewGetDatafeedStats { + return func() *GetDatafeedStats { + n := New(tp) + + return n + } +} + +// Get datafeed stats. +// You can get statistics for multiple datafeeds in a single API request by +// using a comma-separated list of datafeeds or a wildcard expression. You can +// get statistics for all datafeeds by using `_all`, by specifying `*` as the +// ``, or by omitting the ``. If the datafeed is stopped, the +// only information you receive is the `datafeed_id` and the `state`. +// This API returns a maximum of 10,000 datafeeds. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats +func New(tp elastictransport.Interface) *GetDatafeedStats { + r := &GetDatafeedStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetDatafeedStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == datafeedidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeedid", r.datafeedid) + } + path.WriteString(r.datafeedid) + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetDatafeedStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_datafeed_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_datafeed_stats") + if reader := instrument.RecordRequestBody(ctx, "ml.get_datafeed_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_datafeed_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetDatafeedStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getdatafeedstats.Response +func (r GetDatafeedStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_datafeed_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetDatafeedStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_datafeed_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetDatafeedStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetDatafeedStats headers map. +func (r *GetDatafeedStats) Header(key, value string) *GetDatafeedStats { + r.headers.Set(key, value) + + return r +} + +// DatafeedId Identifier for the datafeed. It can be a datafeed identifier or a +// wildcard expression. If you do not specify one of these options, the API +// returns information about all datafeeds. +// API Name: datafeedid +func (r *GetDatafeedStats) DatafeedId(datafeedid string) *GetDatafeedStats { + r.paramSet |= datafeedidMask + r.datafeedid = datafeedid + + return r +} + +// AllowNoMatch Specifies what to do when the request: +// +// 1. Contains wildcard expressions and there are no datafeeds that match. +// 2. Contains the `_all` string or no identifiers and there are no matches. +// 3. Contains wildcard expressions and there are only partial matches. +// +// The default value is `true`, which returns an empty `datafeeds` array +// when there are no matches and the subset of results when there are +// partial matches. If this parameter is `false`, the request returns a +// `404` status code when there are no matches or only partial matches. +// API name: allow_no_match +func (r *GetDatafeedStats) AllowNoMatch(allownomatch bool) *GetDatafeedStats { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetDatafeedStats) ErrorTrace(errortrace bool) *GetDatafeedStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetDatafeedStats) FilterPath(filterpaths ...string) *GetDatafeedStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetDatafeedStats) Human(human bool) *GetDatafeedStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetDatafeedStats) Pretty(pretty bool) *GetDatafeedStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeedstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeedstats/response.go new file mode 100644 index 000000000..ba8f5a1f6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeedstats/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getdatafeedstats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getdatafeedstats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_datafeed_stats/MlGetDatafeedStatsResponse.ts#L23-L28 +type Response struct { + Count int64 `json:"count"` + Datafeeds []types.DatafeedStats `json:"datafeeds"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go new file mode 100644 index 000000000..d6a790fb6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go @@ -0,0 +1,405 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get data frame analytics job configuration info. +// You can get information for multiple data frame analytics jobs in a single +// API request by using a comma-separated list of data frame analytics jobs or a +// wildcard expression. +package getdataframeanalytics + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetDataFrameAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetDataFrameAnalytics type alias for index. +type NewGetDataFrameAnalytics func() *GetDataFrameAnalytics + +// NewGetDataFrameAnalyticsFunc returns a new instance of GetDataFrameAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetDataFrameAnalyticsFunc(tp elastictransport.Interface) NewGetDataFrameAnalytics { + return func() *GetDataFrameAnalytics { + n := New(tp) + + return n + } +} + +// Get data frame analytics job configuration info. +// You can get information for multiple data frame analytics jobs in a single +// API request by using a comma-separated list of data frame analytics jobs or a +// wildcard expression. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics +func New(tp elastictransport.Interface) *GetDataFrameAnalytics { + r := &GetDataFrameAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetDataFrameAnalytics) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_data_frame_analytics") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "ml.get_data_frame_analytics", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_data_frame_analytics") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetDataFrameAnalytics query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getdataframeanalytics.Response +func (r GetDataFrameAnalytics) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetDataFrameAnalytics) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetDataFrameAnalytics query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetDataFrameAnalytics headers map. +func (r *GetDataFrameAnalytics) Header(key, value string) *GetDataFrameAnalytics { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the data frame analytics job. If you do not specify this +// option, the API returns information for the first hundred data frame +// analytics jobs. +// API Name: id +func (r *GetDataFrameAnalytics) Id(id string) *GetDataFrameAnalytics { + r.paramSet |= idMask + r.id = id + + return r +} + +// AllowNoMatch Specifies what to do when the request: +// +// 1. Contains wildcard expressions and there are no data frame analytics +// jobs that match. +// 2. Contains the `_all` string or no identifiers and there are no matches. +// 3. Contains wildcard expressions and there are only partial matches. +// +// The default value returns an empty data_frame_analytics array when there +// are no matches and the subset of results when there are partial matches. +// If this parameter is `false`, the request returns a 404 status code when +// there are no matches or only partial matches. +// API name: allow_no_match +func (r *GetDataFrameAnalytics) AllowNoMatch(allownomatch bool) *GetDataFrameAnalytics { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// From Skips the specified number of data frame analytics jobs. +// API name: from +func (r *GetDataFrameAnalytics) From(from int) *GetDataFrameAnalytics { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size Specifies the maximum number of data frame analytics jobs to obtain. +// API name: size +func (r *GetDataFrameAnalytics) Size(size int) *GetDataFrameAnalytics { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// ExcludeGenerated Indicates if certain fields should be removed from the configuration on +// retrieval. This allows the configuration to be in an acceptable format to +// be retrieved and then added to another cluster. +// API name: exclude_generated +func (r *GetDataFrameAnalytics) ExcludeGenerated(excludegenerated bool) *GetDataFrameAnalytics { + r.values.Set("exclude_generated", strconv.FormatBool(excludegenerated)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetDataFrameAnalytics) ErrorTrace(errortrace bool) *GetDataFrameAnalytics { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetDataFrameAnalytics) FilterPath(filterpaths ...string) *GetDataFrameAnalytics { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetDataFrameAnalytics) Human(human bool) *GetDataFrameAnalytics { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetDataFrameAnalytics) Pretty(pretty bool) *GetDataFrameAnalytics { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalytics/response.go new file mode 100644 index 000000000..5ffc5205a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalytics/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getdataframeanalytics + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getdataframeanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_data_frame_analytics/MlGetDataFrameAnalyticsResponse.ts#L23-L29 +type Response struct { + Count int `json:"count"` + // DataFrameAnalytics An array of data frame analytics job resources, which are sorted by the id + // value in ascending order. + DataFrameAnalytics []types.DataframeAnalyticsSummary `json:"data_frame_analytics"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go new file mode 100644 index 000000000..286600e2b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go @@ -0,0 +1,401 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get data frame analytics job stats. +package getdataframeanalyticsstats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetDataFrameAnalyticsStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetDataFrameAnalyticsStats type alias for index. +type NewGetDataFrameAnalyticsStats func() *GetDataFrameAnalyticsStats + +// NewGetDataFrameAnalyticsStatsFunc returns a new instance of GetDataFrameAnalyticsStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetDataFrameAnalyticsStatsFunc(tp elastictransport.Interface) NewGetDataFrameAnalyticsStats { + return func() *GetDataFrameAnalyticsStats { + n := New(tp) + + return n + } +} + +// Get data frame analytics job stats. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats +func New(tp elastictransport.Interface) *GetDataFrameAnalyticsStats { + r := &GetDataFrameAnalyticsStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetDataFrameAnalyticsStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetDataFrameAnalyticsStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_data_frame_analytics_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_data_frame_analytics_stats") + if reader := instrument.RecordRequestBody(ctx, "ml.get_data_frame_analytics_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_data_frame_analytics_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetDataFrameAnalyticsStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getdataframeanalyticsstats.Response +func (r GetDataFrameAnalyticsStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_data_frame_analytics_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetDataFrameAnalyticsStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_data_frame_analytics_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetDataFrameAnalyticsStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetDataFrameAnalyticsStats headers map. +func (r *GetDataFrameAnalyticsStats) Header(key, value string) *GetDataFrameAnalyticsStats { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the data frame analytics job. If you do not specify this +// option, the API returns information for the first hundred data frame +// analytics jobs. +// API Name: id +func (r *GetDataFrameAnalyticsStats) Id(id string) *GetDataFrameAnalyticsStats { + r.paramSet |= idMask + r.id = id + + return r +} + +// AllowNoMatch Specifies what to do when the request: +// +// 1. Contains wildcard expressions and there are no data frame analytics +// jobs that match. +// 2. Contains the `_all` string or no identifiers and there are no matches. +// 3. Contains wildcard expressions and there are only partial matches. +// +// The default value returns an empty data_frame_analytics array when there +// are no matches and the subset of results when there are partial matches. +// If this parameter is `false`, the request returns a 404 status code when +// there are no matches or only partial matches. +// API name: allow_no_match +func (r *GetDataFrameAnalyticsStats) AllowNoMatch(allownomatch bool) *GetDataFrameAnalyticsStats { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// From Skips the specified number of data frame analytics jobs. +// API name: from +func (r *GetDataFrameAnalyticsStats) From(from int) *GetDataFrameAnalyticsStats { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size Specifies the maximum number of data frame analytics jobs to obtain. +// API name: size +func (r *GetDataFrameAnalyticsStats) Size(size int) *GetDataFrameAnalyticsStats { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// Verbose Defines whether the stats response should be verbose. +// API name: verbose +func (r *GetDataFrameAnalyticsStats) Verbose(verbose bool) *GetDataFrameAnalyticsStats { + r.values.Set("verbose", strconv.FormatBool(verbose)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetDataFrameAnalyticsStats) ErrorTrace(errortrace bool) *GetDataFrameAnalyticsStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetDataFrameAnalyticsStats) FilterPath(filterpaths ...string) *GetDataFrameAnalyticsStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetDataFrameAnalyticsStats) Human(human bool) *GetDataFrameAnalyticsStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetDataFrameAnalyticsStats) Pretty(pretty bool) *GetDataFrameAnalyticsStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalyticsstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalyticsstats/response.go new file mode 100644 index 000000000..99eff6cc9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalyticsstats/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getdataframeanalyticsstats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getdataframeanalyticsstats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_data_frame_analytics_stats/MlGetDataFrameAnalyticsStatsResponse.ts#L23-L29 +type Response struct { + Count int64 `json:"count"` + // DataFrameAnalytics An array of objects that contain usage information for data frame analytics + // jobs, which are sorted by the id value in ascending order. + DataFrameAnalytics []types.DataframeAnalytics `json:"data_frame_analytics"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getfilters/get_filters.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getfilters/get_filters.go new file mode 100644 index 000000000..1ae1d5130 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getfilters/get_filters.go @@ -0,0 +1,367 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get filters. +// You can get a single filter or all filters. +package getfilters + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + filteridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetFilters struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + filterid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetFilters type alias for index. +type NewGetFilters func() *GetFilters + +// NewGetFiltersFunc returns a new instance of GetFilters with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFiltersFunc(tp elastictransport.Interface) NewGetFilters { + return func() *GetFilters { + n := New(tp) + + return n + } +} + +// Get filters. +// You can get a single filter or all filters. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters +func New(tp elastictransport.Interface) *GetFilters { + r := &GetFilters{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetFilters) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("filters") + + method = http.MethodGet + case r.paramSet == filteridMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("filters") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "filterid", r.filterid) + } + path.WriteString(r.filterid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetFilters) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_filters") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_filters") + if reader := instrument.RecordRequestBody(ctx, "ml.get_filters", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_filters") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetFilters query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getfilters.Response +func (r GetFilters) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_filters") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetFilters) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_filters") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetFilters query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetFilters headers map. +func (r *GetFilters) Header(key, value string) *GetFilters { + r.headers.Set(key, value) + + return r +} + +// FilterId A string that uniquely identifies a filter. +// API Name: filterid +func (r *GetFilters) FilterId(filterid string) *GetFilters { + r.paramSet |= filteridMask + r.filterid = filterid + + return r +} + +// From Skips the specified number of filters. +// API name: from +func (r *GetFilters) From(from int) *GetFilters { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size Specifies the maximum number of filters to obtain. +// API name: size +func (r *GetFilters) Size(size int) *GetFilters { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetFilters) ErrorTrace(errortrace bool) *GetFilters { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetFilters) FilterPath(filterpaths ...string) *GetFilters { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetFilters) Human(human bool) *GetFilters { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetFilters) Pretty(pretty bool) *GetFilters { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getfilters/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getfilters/response.go new file mode 100644 index 000000000..d8b03aba5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getfilters/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getfilters + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getfilters +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_filters/MlGetFiltersResponse.ts#L23-L28 +type Response struct { + Count int64 `json:"count"` + Filters []types.MLFilter `json:"filters"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getinfluencers/get_influencers.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getinfluencers/get_influencers.go new file mode 100644 index 000000000..ad8a8be6d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getinfluencers/get_influencers.go @@ -0,0 +1,452 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get anomaly detection job results for influencers. +// Influencers are the entities that have contributed to, or are to blame for, +// the anomalies. Influencer results are available only if an +// `influencer_field_name` is specified in the job configuration. +package getinfluencers + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetInfluencers struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetInfluencers type alias for index. +type NewGetInfluencers func(jobid string) *GetInfluencers + +// NewGetInfluencersFunc returns a new instance of GetInfluencers with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetInfluencersFunc(tp elastictransport.Interface) NewGetInfluencers { + return func(jobid string) *GetInfluencers { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Get anomaly detection job results for influencers. +// Influencers are the entities that have contributed to, or are to blame for, +// the anomalies. Influencer results are available only if an +// `influencer_field_name` is specified in the job configuration. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers +func New(tp elastictransport.Interface) *GetInfluencers { + r := &GetInfluencers{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *GetInfluencers) Raw(raw io.Reader) *GetInfluencers { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *GetInfluencers) Request(req *Request) *GetInfluencers { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetInfluencers) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for GetInfluencers: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("results") + path.WriteString("/") + path.WriteString("influencers") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetInfluencers) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_influencers") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_influencers") + if reader := instrument.RecordRequestBody(ctx, "ml.get_influencers", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_influencers") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetInfluencers query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getinfluencers.Response +func (r GetInfluencers) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_influencers") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the GetInfluencers headers map. +func (r *GetInfluencers) Header(key, value string) *GetInfluencers { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *GetInfluencers) _jobid(jobid string) *GetInfluencers { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// Desc If true, the results are sorted in descending order. +// API name: desc +func (r *GetInfluencers) Desc(desc bool) *GetInfluencers { + r.values.Set("desc", strconv.FormatBool(desc)) + + return r +} + +// End Returns influencers with timestamps earlier than this time. +// The default value means it is unset and results are not limited to +// specific timestamps. +// API name: end +func (r *GetInfluencers) End(datetime string) *GetInfluencers { + r.values.Set("end", datetime) + + return r +} + +// ExcludeInterim If true, the output excludes interim results. By default, interim results +// are included. +// API name: exclude_interim +func (r *GetInfluencers) ExcludeInterim(excludeinterim bool) *GetInfluencers { + r.values.Set("exclude_interim", strconv.FormatBool(excludeinterim)) + + return r +} + +// InfluencerScore Returns influencers with anomaly scores greater than or equal to this +// value. +// API name: influencer_score +func (r *GetInfluencers) InfluencerScore(influencerscore string) *GetInfluencers { + r.values.Set("influencer_score", influencerscore) + + return r +} + +// From Skips the specified number of influencers. +// API name: from +func (r *GetInfluencers) From(from int) *GetInfluencers { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size Specifies the maximum number of influencers to obtain. +// API name: size +func (r *GetInfluencers) Size(size int) *GetInfluencers { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// Sort Specifies the sort field for the requested influencers. By default, the +// influencers are sorted by the `influencer_score` value. +// API name: sort +func (r *GetInfluencers) Sort(field string) *GetInfluencers { + r.values.Set("sort", field) + + return r +} + +// Start Returns influencers with timestamps after this time. The default value +// means it is unset and results are not limited to specific timestamps. +// API name: start +func (r *GetInfluencers) Start(datetime string) *GetInfluencers { + r.values.Set("start", datetime) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetInfluencers) ErrorTrace(errortrace bool) *GetInfluencers { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetInfluencers) FilterPath(filterpaths ...string) *GetInfluencers { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetInfluencers) Human(human bool) *GetInfluencers { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetInfluencers) Pretty(pretty bool) *GetInfluencers { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Configures pagination. +// This parameter has the `from` and `size` properties. +// API name: page +func (r *GetInfluencers) Page(page types.PageVariant) *GetInfluencers { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Page = page.PageCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getinfluencers/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getinfluencers/request.go new file mode 100644 index 000000000..1c871cc2a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getinfluencers/request.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getinfluencers + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package getinfluencers +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_influencers/MlGetInfluencersRequest.ts#L26-L105 +type Request struct { + + // Page Configures pagination. + // This parameter has the `from` and `size` properties. + Page *types.Page `json:"page,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Getinfluencers request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getinfluencers/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getinfluencers/response.go new file mode 100644 index 000000000..6b2c23167 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getinfluencers/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getinfluencers + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getinfluencers +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_influencers/MlGetInfluencersResponse.ts#L23-L29 +type Response struct { + Count int64 `json:"count"` + // Influencers Array of influencer objects + Influencers []types.Influencer `json:"influencers"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobs/get_jobs.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobs/get_jobs.go new file mode 100644 index 000000000..ce4604266 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobs/get_jobs.go @@ -0,0 +1,386 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get anomaly detection jobs configuration info. +// You can get information for multiple anomaly detection jobs in a single API +// request by using a group name, a comma-separated list of jobs, or a wildcard +// expression. You can get information for all anomaly detection jobs by using +// `_all`, by specifying `*` as the ``, or by omitting the ``. +package getjobs + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetJobs struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetJobs type alias for index. +type NewGetJobs func() *GetJobs + +// NewGetJobsFunc returns a new instance of GetJobs with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetJobsFunc(tp elastictransport.Interface) NewGetJobs { + return func() *GetJobs { + n := New(tp) + + return n + } +} + +// Get anomaly detection jobs configuration info. +// You can get information for multiple anomaly detection jobs in a single API +// request by using a group name, a comma-separated list of jobs, or a wildcard +// expression. You can get information for all anomaly detection jobs by using +// `_all`, by specifying `*` as the ``, or by omitting the ``. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs +func New(tp elastictransport.Interface) *GetJobs { + r := &GetJobs{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetJobs) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetJobs) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_jobs") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_jobs") + if reader := instrument.RecordRequestBody(ctx, "ml.get_jobs", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_jobs") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetJobs query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getjobs.Response +func (r GetJobs) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_jobs") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetJobs) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_jobs") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetJobs query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetJobs headers map. +func (r *GetJobs) Header(key, value string) *GetJobs { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. It can be a job identifier, a +// group name, or a wildcard expression. If you do not specify one of these +// options, the API returns information for all anomaly detection jobs. +// API Name: jobid +func (r *GetJobs) JobId(jobid string) *GetJobs { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// AllowNoMatch Specifies what to do when the request: +// +// 1. Contains wildcard expressions and there are no jobs that match. +// 2. Contains the _all string or no identifiers and there are no matches. +// 3. Contains wildcard expressions and there are only partial matches. +// +// The default value is `true`, which returns an empty `jobs` array when +// there are no matches and the subset of results when there are partial +// matches. If this parameter is `false`, the request returns a `404` status +// code when there are no matches or only partial matches. +// API name: allow_no_match +func (r *GetJobs) AllowNoMatch(allownomatch bool) *GetJobs { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// ExcludeGenerated Indicates if certain fields should be removed from the configuration on +// retrieval. This allows the configuration to be in an acceptable format to +// be retrieved and then added to another cluster. +// API name: exclude_generated +func (r *GetJobs) ExcludeGenerated(excludegenerated bool) *GetJobs { + r.values.Set("exclude_generated", strconv.FormatBool(excludegenerated)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetJobs) ErrorTrace(errortrace bool) *GetJobs { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetJobs) FilterPath(filterpaths ...string) *GetJobs { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetJobs) Human(human bool) *GetJobs { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetJobs) Pretty(pretty bool) *GetJobs { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobs/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobs/response.go new file mode 100644 index 000000000..31c4312e8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobs/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getjobs + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getjobs +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_jobs/MlGetJobsResponse.ts#L23-L28 +type Response struct { + Count int64 `json:"count"` + Jobs []types.Job `json:"jobs"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobstats/get_job_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobstats/get_job_stats.go new file mode 100644 index 000000000..32c61e1b9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobstats/get_job_stats.go @@ -0,0 +1,373 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get anomaly detection job stats. +package getjobstats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetJobStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetJobStats type alias for index. +type NewGetJobStats func() *GetJobStats + +// NewGetJobStatsFunc returns a new instance of GetJobStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetJobStatsFunc(tp elastictransport.Interface) NewGetJobStats { + return func() *GetJobStats { + n := New(tp) + + return n + } +} + +// Get anomaly detection job stats. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats +func New(tp elastictransport.Interface) *GetJobStats { + r := &GetJobStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetJobStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetJobStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_job_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_job_stats") + if reader := instrument.RecordRequestBody(ctx, "ml.get_job_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_job_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetJobStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getjobstats.Response +func (r GetJobStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_job_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetJobStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_job_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetJobStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetJobStats headers map. +func (r *GetJobStats) Header(key, value string) *GetJobStats { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. It can be a job identifier, a +// group name, a comma-separated list of jobs, or a wildcard expression. If +// you do not specify one of these options, the API returns information for +// all anomaly detection jobs. +// API Name: jobid +func (r *GetJobStats) JobId(jobid string) *GetJobStats { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// AllowNoMatch Specifies what to do when the request: +// +// 1. Contains wildcard expressions and there are no jobs that match. +// 2. Contains the _all string or no identifiers and there are no matches. +// 3. Contains wildcard expressions and there are only partial matches. +// +// If `true`, the API returns an empty `jobs` array when +// there are no matches and the subset of results when there are partial +// matches. If `false`, the API returns a `404` status +// code when there are no matches or only partial matches. +// API name: allow_no_match +func (r *GetJobStats) AllowNoMatch(allownomatch bool) *GetJobStats { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetJobStats) ErrorTrace(errortrace bool) *GetJobStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetJobStats) FilterPath(filterpaths ...string) *GetJobStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetJobStats) Human(human bool) *GetJobStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetJobStats) Pretty(pretty bool) *GetJobStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobstats/response.go new file mode 100644 index 000000000..d8518e703 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobstats/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getjobstats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getjobstats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_job_stats/MlGetJobStatsResponse.ts#L23-L28 +type Response struct { + Count int64 `json:"count"` + Jobs []types.JobStats `json:"jobs"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmemorystats/get_memory_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmemorystats/get_memory_stats.go new file mode 100644 index 000000000..417ea92a8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmemorystats/get_memory_stats.go @@ -0,0 +1,381 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get machine learning memory usage info. +// Get information about how machine learning jobs and trained models are using +// memory, +// on each node, both within the JVM heap, and natively, outside of the JVM. +package getmemorystats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nodeidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetMemoryStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + nodeid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetMemoryStats type alias for index. +type NewGetMemoryStats func() *GetMemoryStats + +// NewGetMemoryStatsFunc returns a new instance of GetMemoryStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetMemoryStatsFunc(tp elastictransport.Interface) NewGetMemoryStats { + return func() *GetMemoryStats { + n := New(tp) + + return n + } +} + +// Get machine learning memory usage info. +// Get information about how machine learning jobs and trained models are using +// memory, +// on each node, both within the JVM heap, and natively, outside of the JVM. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats +func New(tp elastictransport.Interface) *GetMemoryStats { + r := &GetMemoryStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetMemoryStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("memory") + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + case r.paramSet == nodeidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("memory") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetMemoryStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_memory_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_memory_stats") + if reader := instrument.RecordRequestBody(ctx, "ml.get_memory_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_memory_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetMemoryStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getmemorystats.Response +func (r GetMemoryStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_memory_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetMemoryStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_memory_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetMemoryStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetMemoryStats headers map. +func (r *GetMemoryStats) Header(key, value string) *GetMemoryStats { + r.headers.Set(key, value) + + return r +} + +// NodeId The names of particular nodes in the cluster to target. For example, +// `nodeId1,nodeId2` or +// `ml:true` +// API Name: nodeid +func (r *GetMemoryStats) NodeId(nodeid string) *GetMemoryStats { + r.paramSet |= nodeidMask + r.nodeid = nodeid + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout +// expires, the request fails and returns an error. +// API name: master_timeout +func (r *GetMemoryStats) MasterTimeout(duration string) *GetMemoryStats { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request +// fails and returns an error. +// API name: timeout +func (r *GetMemoryStats) Timeout(duration string) *GetMemoryStats { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetMemoryStats) ErrorTrace(errortrace bool) *GetMemoryStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetMemoryStats) FilterPath(filterpaths ...string) *GetMemoryStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetMemoryStats) Human(human bool) *GetMemoryStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetMemoryStats) Pretty(pretty bool) *GetMemoryStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmemorystats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmemorystats/response.go new file mode 100644 index 000000000..3df24f4ee --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmemorystats/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getmemorystats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getmemorystats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_memory_stats/MlGetMemoryStatsResponse.ts#L25-L31 +type Response struct { + ClusterName string `json:"cluster_name"` + NodeStats types.NodeStatistics `json:"_nodes"` + Nodes map[string]types.Memory `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.Memory, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshots/get_model_snapshots.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshots/get_model_snapshots.go new file mode 100644 index 000000000..010e9aa7c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshots/get_model_snapshots.go @@ -0,0 +1,477 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get model snapshots info. +package getmodelsnapshots + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 + + snapshotidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetModelSnapshots struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + snapshotid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetModelSnapshots type alias for index. +type NewGetModelSnapshots func(jobid string) *GetModelSnapshots + +// NewGetModelSnapshotsFunc returns a new instance of GetModelSnapshots with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetModelSnapshotsFunc(tp elastictransport.Interface) NewGetModelSnapshots { + return func(jobid string) *GetModelSnapshots { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Get model snapshots info. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots +func New(tp elastictransport.Interface) *GetModelSnapshots { + r := &GetModelSnapshots{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *GetModelSnapshots) Raw(raw io.Reader) *GetModelSnapshots { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *GetModelSnapshots) Request(req *Request) *GetModelSnapshots { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetModelSnapshots) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for GetModelSnapshots: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask|snapshotidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("model_snapshots") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshotid", r.snapshotid) + } + path.WriteString(r.snapshotid) + + method = http.MethodPost + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("model_snapshots") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetModelSnapshots) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_model_snapshots") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_model_snapshots") + if reader := instrument.RecordRequestBody(ctx, "ml.get_model_snapshots", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_model_snapshots") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetModelSnapshots query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getmodelsnapshots.Response +func (r GetModelSnapshots) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_model_snapshots") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the GetModelSnapshots headers map. +func (r *GetModelSnapshots) Header(key, value string) *GetModelSnapshots { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *GetModelSnapshots) _jobid(jobid string) *GetModelSnapshots { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// SnapshotId A numerical character string that uniquely identifies the model snapshot. You +// can get information for multiple +// snapshots by using a comma-separated list or a wildcard expression. You can +// get all snapshots by using `_all`, +// by specifying `*` as the snapshot ID, or by omitting the snapshot ID. +// API Name: snapshotid +func (r *GetModelSnapshots) SnapshotId(snapshotid string) *GetModelSnapshots { + r.paramSet |= snapshotidMask + r.snapshotid = snapshotid + + return r +} + +// From Skips the specified number of snapshots. +// API name: from +func (r *GetModelSnapshots) From(from int) *GetModelSnapshots { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size Specifies the maximum number of snapshots to obtain. +// API name: size +func (r *GetModelSnapshots) Size(size int) *GetModelSnapshots { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetModelSnapshots) ErrorTrace(errortrace bool) *GetModelSnapshots { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetModelSnapshots) FilterPath(filterpaths ...string) *GetModelSnapshots { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetModelSnapshots) Human(human bool) *GetModelSnapshots { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetModelSnapshots) Pretty(pretty bool) *GetModelSnapshots { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Refer to the description for the `desc` query parameter. +// API name: desc +func (r *GetModelSnapshots) Desc(desc bool) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Desc = &desc + + return r +} + +// Refer to the description for the `end` query parameter. +// API name: end +func (r *GetModelSnapshots) End(datetime types.DateTimeVariant) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() + + return r +} + +// API name: page +func (r *GetModelSnapshots) Page(page types.PageVariant) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Page = page.PageCaster() + + return r +} + +// Refer to the description for the `sort` query parameter. +// API name: sort +func (r *GetModelSnapshots) Sort(field string) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Sort = &field + + return r +} + +// Refer to the description for the `start` query parameter. +// API name: start +func (r *GetModelSnapshots) Start(datetime types.DateTimeVariant) *GetModelSnapshots { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshots/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshots/request.go new file mode 100644 index 000000000..3b0e2dedb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshots/request.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getmodelsnapshots + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package getmodelsnapshots +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_model_snapshots/MlGetModelSnapshotsRequest.ts#L26-L108 +type Request struct { + + // Desc Refer to the description for the `desc` query parameter. + Desc *bool `json:"desc,omitempty"` + // End Refer to the description for the `end` query parameter. + End types.DateTime `json:"end,omitempty"` + Page *types.Page `json:"page,omitempty"` + // Sort Refer to the description for the `sort` query parameter. + Sort *string `json:"sort,omitempty"` + // Start Refer to the description for the `start` query parameter. + Start types.DateTime `json:"start,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Getmodelsnapshots request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "desc": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Desc", err) + } + s.Desc = &value + case bool: + s.Desc = &v + } + + case "end": + if err := dec.Decode(&s.End); err != nil { + return fmt.Errorf("%s | %w", "End", err) + } + + case "page": + if err := dec.Decode(&s.Page); err != nil { + return fmt.Errorf("%s | %w", "Page", err) + } + + case "sort": + if err := dec.Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + case "start": + if err := dec.Decode(&s.Start); err != nil { + return fmt.Errorf("%s | %w", "Start", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshots/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshots/response.go new file mode 100644 index 000000000..ff7e76fa3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshots/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getmodelsnapshots + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getmodelsnapshots +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_model_snapshots/MlGetModelSnapshotsResponse.ts#L23-L28 +type Response struct { + Count int64 `json:"count"` + ModelSnapshots []types.ModelSnapshot `json:"model_snapshots"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go new file mode 100644 index 000000000..607114e90 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go @@ -0,0 +1,392 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get anomaly detection job model snapshot upgrade usage info. +package getmodelsnapshotupgradestats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 + + snapshotidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetModelSnapshotUpgradeStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + jobid string + snapshotid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetModelSnapshotUpgradeStats type alias for index. +type NewGetModelSnapshotUpgradeStats func(jobid, snapshotid string) *GetModelSnapshotUpgradeStats + +// NewGetModelSnapshotUpgradeStatsFunc returns a new instance of GetModelSnapshotUpgradeStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetModelSnapshotUpgradeStatsFunc(tp elastictransport.Interface) NewGetModelSnapshotUpgradeStats { + return func(jobid, snapshotid string) *GetModelSnapshotUpgradeStats { + n := New(tp) + + n._jobid(jobid) + + n._snapshotid(snapshotid) + + return n + } +} + +// Get anomaly detection job model snapshot upgrade usage info. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats +func New(tp elastictransport.Interface) *GetModelSnapshotUpgradeStats { + r := &GetModelSnapshotUpgradeStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetModelSnapshotUpgradeStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask|snapshotidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("model_snapshots") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshotid", r.snapshotid) + } + path.WriteString(r.snapshotid) + path.WriteString("/") + path.WriteString("_upgrade") + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetModelSnapshotUpgradeStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_model_snapshot_upgrade_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_model_snapshot_upgrade_stats") + if reader := instrument.RecordRequestBody(ctx, "ml.get_model_snapshot_upgrade_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_model_snapshot_upgrade_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetModelSnapshotUpgradeStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getmodelsnapshotupgradestats.Response +func (r GetModelSnapshotUpgradeStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_model_snapshot_upgrade_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetModelSnapshotUpgradeStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_model_snapshot_upgrade_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetModelSnapshotUpgradeStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetModelSnapshotUpgradeStats headers map. +func (r *GetModelSnapshotUpgradeStats) Header(key, value string) *GetModelSnapshotUpgradeStats { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *GetModelSnapshotUpgradeStats) _jobid(jobid string) *GetModelSnapshotUpgradeStats { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// SnapshotId A numerical character string that uniquely identifies the model snapshot. You +// can get information for multiple +// snapshots by using a comma-separated list or a wildcard expression. You can +// get all snapshots by using `_all`, +// by specifying `*` as the snapshot ID, or by omitting the snapshot ID. +// API Name: snapshotid +func (r *GetModelSnapshotUpgradeStats) _snapshotid(snapshotid string) *GetModelSnapshotUpgradeStats { + r.paramSet |= snapshotidMask + r.snapshotid = snapshotid + + return r +} + +// AllowNoMatch Specifies what to do when the request: +// +// - Contains wildcard expressions and there are no jobs that match. +// - Contains the _all string or no identifiers and there are no matches. +// - Contains wildcard expressions and there are only partial matches. +// +// The default value is true, which returns an empty jobs array when there are +// no matches and the subset of results +// when there are partial matches. If this parameter is false, the request +// returns a 404 status code when there are +// no matches or only partial matches. +// API name: allow_no_match +func (r *GetModelSnapshotUpgradeStats) AllowNoMatch(allownomatch bool) *GetModelSnapshotUpgradeStats { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetModelSnapshotUpgradeStats) ErrorTrace(errortrace bool) *GetModelSnapshotUpgradeStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetModelSnapshotUpgradeStats) FilterPath(filterpaths ...string) *GetModelSnapshotUpgradeStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetModelSnapshotUpgradeStats) Human(human bool) *GetModelSnapshotUpgradeStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetModelSnapshotUpgradeStats) Pretty(pretty bool) *GetModelSnapshotUpgradeStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshotupgradestats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshotupgradestats/response.go new file mode 100644 index 000000000..d91c79ac1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshotupgradestats/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getmodelsnapshotupgradestats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getmodelsnapshotupgradestats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_model_snapshot_upgrade_stats/MlGetModelSnapshotUpgradeStatsResponse.ts#L23-L28 +type Response struct { + Count int64 `json:"count"` + ModelSnapshotUpgrades []types.ModelSnapshotUpgrade `json:"model_snapshot_upgrades"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getoverallbuckets/get_overall_buckets.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getoverallbuckets/get_overall_buckets.go new file mode 100644 index 000000000..5a5f0447c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getoverallbuckets/get_overall_buckets.go @@ -0,0 +1,494 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get overall bucket results. +// +// Retrievs overall bucket results that summarize the bucket results of +// multiple anomaly detection jobs. +// +// The `overall_score` is calculated by combining the scores of all the +// buckets within the overall bucket span. First, the maximum +// `anomaly_score` per anomaly detection job in the overall bucket is +// calculated. Then the `top_n` of those scores are averaged to result in +// the `overall_score`. This means that you can fine-tune the +// `overall_score` so that it is more or less sensitive to the number of +// jobs that detect an anomaly at the same time. For example, if you set +// `top_n` to `1`, the `overall_score` is the maximum bucket score in the +// overall bucket. Alternatively, if you set `top_n` to the number of jobs, +// the `overall_score` is high only when all jobs detect anomalies in that +// overall bucket. If you set the `bucket_span` parameter (to a value +// greater than its default), the `overall_score` is the maximum +// `overall_score` of the overall buckets that have a span equal to the +// jobs' largest bucket span. +package getoverallbuckets + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetOverallBuckets struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetOverallBuckets type alias for index. +type NewGetOverallBuckets func(jobid string) *GetOverallBuckets + +// NewGetOverallBucketsFunc returns a new instance of GetOverallBuckets with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetOverallBucketsFunc(tp elastictransport.Interface) NewGetOverallBuckets { + return func(jobid string) *GetOverallBuckets { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Get overall bucket results. +// +// Retrievs overall bucket results that summarize the bucket results of +// multiple anomaly detection jobs. +// +// The `overall_score` is calculated by combining the scores of all the +// buckets within the overall bucket span. First, the maximum +// `anomaly_score` per anomaly detection job in the overall bucket is +// calculated. Then the `top_n` of those scores are averaged to result in +// the `overall_score`. This means that you can fine-tune the +// `overall_score` so that it is more or less sensitive to the number of +// jobs that detect an anomaly at the same time. For example, if you set +// `top_n` to `1`, the `overall_score` is the maximum bucket score in the +// overall bucket. Alternatively, if you set `top_n` to the number of jobs, +// the `overall_score` is high only when all jobs detect anomalies in that +// overall bucket. If you set the `bucket_span` parameter (to a value +// greater than its default), the `overall_score` is the maximum +// `overall_score` of the overall buckets that have a span equal to the +// jobs' largest bucket span. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets +func New(tp elastictransport.Interface) *GetOverallBuckets { + r := &GetOverallBuckets{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *GetOverallBuckets) Raw(raw io.Reader) *GetOverallBuckets { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *GetOverallBuckets) Request(req *Request) *GetOverallBuckets { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetOverallBuckets) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for GetOverallBuckets: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("results") + path.WriteString("/") + path.WriteString("overall_buckets") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetOverallBuckets) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_overall_buckets") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_overall_buckets") + if reader := instrument.RecordRequestBody(ctx, "ml.get_overall_buckets", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_overall_buckets") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetOverallBuckets query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getoverallbuckets.Response +func (r GetOverallBuckets) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_overall_buckets") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the GetOverallBuckets headers map. +func (r *GetOverallBuckets) Header(key, value string) *GetOverallBuckets { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. It can be a job identifier, a +// group name, a comma-separated list of jobs or groups, or a wildcard +// expression. +// +// You can summarize the bucket results for all anomaly detection jobs by +// using `_all` or by specifying `*` as the ``. +// API Name: jobid +func (r *GetOverallBuckets) _jobid(jobid string) *GetOverallBuckets { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetOverallBuckets) ErrorTrace(errortrace bool) *GetOverallBuckets { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetOverallBuckets) FilterPath(filterpaths ...string) *GetOverallBuckets { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetOverallBuckets) Human(human bool) *GetOverallBuckets { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetOverallBuckets) Pretty(pretty bool) *GetOverallBuckets { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Refer to the description for the `allow_no_match` query parameter. +// API name: allow_no_match +func (r *GetOverallBuckets) AllowNoMatch(allownomatch bool) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowNoMatch = &allownomatch + + return r +} + +// Refer to the description for the `bucket_span` query parameter. +// API name: bucket_span +func (r *GetOverallBuckets) BucketSpan(duration types.DurationVariant) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.BucketSpan = *duration.DurationCaster() + + return r +} + +// Refer to the description for the `end` query parameter. +// API name: end +func (r *GetOverallBuckets) End(datetime types.DateTimeVariant) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() + + return r +} + +// Refer to the description for the `exclude_interim` query parameter. +// API name: exclude_interim +func (r *GetOverallBuckets) ExcludeInterim(excludeinterim bool) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ExcludeInterim = &excludeinterim + + return r +} + +// Refer to the description for the `overall_score` query parameter. +// API name: overall_score +func (r *GetOverallBuckets) OverallScore(overallscore string) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.OverallScore = &overallscore + + return r +} + +// Refer to the description for the `start` query parameter. +// API name: start +func (r *GetOverallBuckets) Start(datetime types.DateTimeVariant) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() + + return r +} + +// Refer to the description for the `top_n` query parameter. +// API name: top_n +func (r *GetOverallBuckets) TopN(topn int) *GetOverallBuckets { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TopN = &topn + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getoverallbuckets/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getoverallbuckets/request.go new file mode 100644 index 000000000..ba6bef836 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getoverallbuckets/request.go @@ -0,0 +1,162 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getoverallbuckets + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package getoverallbuckets +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_overall_buckets/MlGetOverallBucketsRequest.ts#L25-L153 +type Request struct { + + // AllowNoMatch Refer to the description for the `allow_no_match` query parameter. + AllowNoMatch *bool `json:"allow_no_match,omitempty"` + // BucketSpan Refer to the description for the `bucket_span` query parameter. + BucketSpan types.Duration `json:"bucket_span,omitempty"` + // End Refer to the description for the `end` query parameter. + End types.DateTime `json:"end,omitempty"` + // ExcludeInterim Refer to the description for the `exclude_interim` query parameter. + ExcludeInterim *bool `json:"exclude_interim,omitempty"` + // OverallScore Refer to the description for the `overall_score` query parameter. + OverallScore *string `json:"overall_score,omitempty"` + // Start Refer to the description for the `start` query parameter. + Start types.DateTime `json:"start,omitempty"` + // TopN Refer to the description for the `top_n` query parameter. + TopN *int `json:"top_n,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Getoverallbuckets request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_no_match": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowNoMatch", err) + } + s.AllowNoMatch = &value + case bool: + s.AllowNoMatch = &v + } + + case "bucket_span": + if err := dec.Decode(&s.BucketSpan); err != nil { + return fmt.Errorf("%s | %w", "BucketSpan", err) + } + + case "end": + if err := dec.Decode(&s.End); err != nil { + return fmt.Errorf("%s | %w", "End", err) + } + + case "exclude_interim": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ExcludeInterim", err) + } + s.ExcludeInterim = &value + case bool: + s.ExcludeInterim = &v + } + + case "overall_score": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "OverallScore", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OverallScore = &o + + case "start": + if err := dec.Decode(&s.Start); err != nil { + return fmt.Errorf("%s | %w", "Start", err) + } + + case "top_n": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TopN", err) + } + s.TopN = &value + case float64: + f := int(v) + s.TopN = &f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getoverallbuckets/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getoverallbuckets/response.go new file mode 100644 index 000000000..efc0f8bbb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getoverallbuckets/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getoverallbuckets + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getoverallbuckets +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_overall_buckets/MlGetOverallBucketsResponse.ts#L23-L29 +type Response struct { + Count int64 `json:"count"` + // OverallBuckets Array of overall bucket objects + OverallBuckets []types.OverallBucket `json:"overall_buckets"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getrecords/get_records.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getrecords/get_records.go new file mode 100644 index 000000000..9cadbfe36 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getrecords/get_records.go @@ -0,0 +1,488 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get anomaly records for an anomaly detection job. +// Records contain the detailed analytical results. They describe the anomalous +// activity that has been identified in the input data based on the detector +// configuration. +// There can be many anomaly records depending on the characteristics and size +// of the input data. In practice, there are often too many to be able to +// manually process them. The machine learning features therefore perform a +// sophisticated aggregation of the anomaly records into buckets. +// The number of record results depends on the number of anomalies found in each +// bucket, which relates to the number of time series being modeled and the +// number of detectors. +package getrecords + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetRecords struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetRecords type alias for index. +type NewGetRecords func(jobid string) *GetRecords + +// NewGetRecordsFunc returns a new instance of GetRecords with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetRecordsFunc(tp elastictransport.Interface) NewGetRecords { + return func(jobid string) *GetRecords { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Get anomaly records for an anomaly detection job. +// Records contain the detailed analytical results. They describe the anomalous +// activity that has been identified in the input data based on the detector +// configuration. +// There can be many anomaly records depending on the characteristics and size +// of the input data. In practice, there are often too many to be able to +// manually process them. The machine learning features therefore perform a +// sophisticated aggregation of the anomaly records into buckets. +// The number of record results depends on the number of anomalies found in each +// bucket, which relates to the number of time series being modeled and the +// number of detectors. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records +func New(tp elastictransport.Interface) *GetRecords { + r := &GetRecords{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *GetRecords) Raw(raw io.Reader) *GetRecords { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *GetRecords) Request(req *Request) *GetRecords { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetRecords) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for GetRecords: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("results") + path.WriteString("/") + path.WriteString("records") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetRecords) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_records") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_records") + if reader := instrument.RecordRequestBody(ctx, "ml.get_records", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_records") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetRecords query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getrecords.Response +func (r GetRecords) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_records") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the GetRecords headers map. +func (r *GetRecords) Header(key, value string) *GetRecords { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *GetRecords) _jobid(jobid string) *GetRecords { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// From Skips the specified number of records. +// API name: from +func (r *GetRecords) From(from int) *GetRecords { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size Specifies the maximum number of records to obtain. +// API name: size +func (r *GetRecords) Size(size int) *GetRecords { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetRecords) ErrorTrace(errortrace bool) *GetRecords { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetRecords) FilterPath(filterpaths ...string) *GetRecords { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetRecords) Human(human bool) *GetRecords { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetRecords) Pretty(pretty bool) *GetRecords { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Refer to the description for the `desc` query parameter. +// API name: desc +func (r *GetRecords) Desc(desc bool) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Desc = &desc + + return r +} + +// Refer to the description for the `end` query parameter. +// API name: end +func (r *GetRecords) End(datetime types.DateTimeVariant) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() + + return r +} + +// Refer to the description for the `exclude_interim` query parameter. +// API name: exclude_interim +func (r *GetRecords) ExcludeInterim(excludeinterim bool) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ExcludeInterim = &excludeinterim + + return r +} + +// API name: page +func (r *GetRecords) Page(page types.PageVariant) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Page = page.PageCaster() + + return r +} + +// Refer to the description for the `record_score` query parameter. +// API name: record_score +func (r *GetRecords) RecordScore(recordscore types.Float64) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RecordScore = &recordscore + + return r +} + +// Refer to the description for the `sort` query parameter. +// API name: sort +func (r *GetRecords) Sort(field string) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Sort = &field + + return r +} + +// Refer to the description for the `start` query parameter. +// API name: start +func (r *GetRecords) Start(datetime types.DateTimeVariant) *GetRecords { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getrecords/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getrecords/request.go new file mode 100644 index 000000000..fde4d2782 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getrecords/request.go @@ -0,0 +1,154 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getrecords + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package getrecords +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_records/MlGetAnomalyRecordsRequest.ts#L26-L135 +type Request struct { + + // Desc Refer to the description for the `desc` query parameter. + Desc *bool `json:"desc,omitempty"` + // End Refer to the description for the `end` query parameter. + End types.DateTime `json:"end,omitempty"` + // ExcludeInterim Refer to the description for the `exclude_interim` query parameter. + ExcludeInterim *bool `json:"exclude_interim,omitempty"` + Page *types.Page `json:"page,omitempty"` + // RecordScore Refer to the description for the `record_score` query parameter. + RecordScore *types.Float64 `json:"record_score,omitempty"` + // Sort Refer to the description for the `sort` query parameter. + Sort *string `json:"sort,omitempty"` + // Start Refer to the description for the `start` query parameter. + Start types.DateTime `json:"start,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Getrecords request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "desc": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Desc", err) + } + s.Desc = &value + case bool: + s.Desc = &v + } + + case "end": + if err := dec.Decode(&s.End); err != nil { + return fmt.Errorf("%s | %w", "End", err) + } + + case "exclude_interim": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ExcludeInterim", err) + } + s.ExcludeInterim = &value + case bool: + s.ExcludeInterim = &v + } + + case "page": + if err := dec.Decode(&s.Page); err != nil { + return fmt.Errorf("%s | %w", "Page", err) + } + + case "record_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RecordScore", err) + } + f := types.Float64(value) + s.RecordScore = &f + case float64: + f := types.Float64(v) + s.RecordScore = &f + } + + case "sort": + if err := dec.Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + case "start": + if err := dec.Decode(&s.Start); err != nil { + return fmt.Errorf("%s | %w", "Start", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getrecords/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getrecords/response.go new file mode 100644 index 000000000..880fc1736 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/getrecords/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getrecords + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getrecords +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_records/MlGetAnomalyRecordsResponse.ts#L23-L28 +type Response struct { + Count int64 `json:"count"` + Records []types.Anomaly `json:"records"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodels/get_trained_models.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodels/get_trained_models.go new file mode 100644 index 000000000..818152931 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodels/get_trained_models.go @@ -0,0 +1,427 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get trained model configuration info. +package gettrainedmodels + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/include" +) + +const ( + modelidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetTrainedModels struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + modelid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetTrainedModels type alias for index. +type NewGetTrainedModels func() *GetTrainedModels + +// NewGetTrainedModelsFunc returns a new instance of GetTrainedModels with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetTrainedModelsFunc(tp elastictransport.Interface) NewGetTrainedModels { + return func() *GetTrainedModels { + n := New(tp) + + return n + } +} + +// Get trained model configuration info. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models +func New(tp elastictransport.Interface) *GetTrainedModels { + r := &GetTrainedModels{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetTrainedModels) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == modelidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelid", r.modelid) + } + path.WriteString(r.modelid) + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetTrainedModels) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_trained_models") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_trained_models") + if reader := instrument.RecordRequestBody(ctx, "ml.get_trained_models", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_trained_models") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetTrainedModels query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a gettrainedmodels.Response +func (r GetTrainedModels) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_trained_models") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetTrainedModels) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_trained_models") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetTrainedModels query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetTrainedModels headers map. +func (r *GetTrainedModels) Header(key, value string) *GetTrainedModels { + r.headers.Set(key, value) + + return r +} + +// ModelId The unique identifier of the trained model or a model alias. +// +// You can get information for multiple trained models in a single API +// request by using a comma-separated list of model IDs or a wildcard +// expression. +// API Name: modelid +func (r *GetTrainedModels) ModelId(modelid string) *GetTrainedModels { + r.paramSet |= modelidMask + r.modelid = modelid + + return r +} + +// AllowNoMatch Specifies what to do when the request: +// +// - Contains wildcard expressions and there are no models that match. +// - Contains the _all string or no identifiers and there are no matches. +// - Contains wildcard expressions and there are only partial matches. +// +// If true, it returns an empty array when there are no matches and the +// subset of results when there are partial matches. +// API name: allow_no_match +func (r *GetTrainedModels) AllowNoMatch(allownomatch bool) *GetTrainedModels { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// DecompressDefinition Specifies whether the included model definition should be returned as a +// JSON map (true) or in a custom compressed format (false). +// API name: decompress_definition +func (r *GetTrainedModels) DecompressDefinition(decompressdefinition bool) *GetTrainedModels { + r.values.Set("decompress_definition", strconv.FormatBool(decompressdefinition)) + + return r +} + +// ExcludeGenerated Indicates if certain fields should be removed from the configuration on +// retrieval. This allows the configuration to be in an acceptable format to +// be retrieved and then added to another cluster. +// API name: exclude_generated +func (r *GetTrainedModels) ExcludeGenerated(excludegenerated bool) *GetTrainedModels { + r.values.Set("exclude_generated", strconv.FormatBool(excludegenerated)) + + return r +} + +// From Skips the specified number of models. +// API name: from +func (r *GetTrainedModels) From(from int) *GetTrainedModels { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Include A comma delimited string of optional fields to include in the response +// body. +// API name: include +func (r *GetTrainedModels) Include(include include.Include) *GetTrainedModels { + r.values.Set("include", include.String()) + + return r +} + +// Size Specifies the maximum number of models to obtain. +// API name: size +func (r *GetTrainedModels) Size(size int) *GetTrainedModels { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// Tags A comma delimited string of tags. A trained model can have many tags, or +// none. When supplied, only trained models that contain all the supplied +// tags are returned. +// API name: tags +func (r *GetTrainedModels) Tags(tags ...string) *GetTrainedModels { + tmp := []string{} + for _, item := range tags { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("tags", strings.Join(tmp, ",")) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetTrainedModels) ErrorTrace(errortrace bool) *GetTrainedModels { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetTrainedModels) FilterPath(filterpaths ...string) *GetTrainedModels { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetTrainedModels) Human(human bool) *GetTrainedModels { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetTrainedModels) Pretty(pretty bool) *GetTrainedModels { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodels/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodels/response.go new file mode 100644 index 000000000..6a352bdc3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodels/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package gettrainedmodels + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package gettrainedmodels +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_trained_models/MlGetTrainedModelResponse.ts#L23-L34 +type Response struct { + Count int `json:"count"` + // TrainedModelConfigs An array of trained model resources, which are sorted by the model_id value + // in ascending order. + TrainedModelConfigs []types.TrainedModelConfig `json:"trained_model_configs"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go new file mode 100644 index 000000000..e77f712d3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go @@ -0,0 +1,391 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get trained models usage info. +// You can get usage information for multiple trained +// models in a single API request by using a comma-separated list of model IDs +// or a wildcard expression. +package gettrainedmodelsstats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + modelidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetTrainedModelsStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + modelid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetTrainedModelsStats type alias for index. +type NewGetTrainedModelsStats func() *GetTrainedModelsStats + +// NewGetTrainedModelsStatsFunc returns a new instance of GetTrainedModelsStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetTrainedModelsStatsFunc(tp elastictransport.Interface) NewGetTrainedModelsStats { + return func() *GetTrainedModelsStats { + n := New(tp) + + return n + } +} + +// Get trained models usage info. +// You can get usage information for multiple trained +// models in a single API request by using a comma-separated list of model IDs +// or a wildcard expression. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats +func New(tp elastictransport.Interface) *GetTrainedModelsStats { + r := &GetTrainedModelsStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetTrainedModelsStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == modelidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelid", r.modelid) + } + path.WriteString(r.modelid) + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetTrainedModelsStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.get_trained_models_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.get_trained_models_stats") + if reader := instrument.RecordRequestBody(ctx, "ml.get_trained_models_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.get_trained_models_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetTrainedModelsStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a gettrainedmodelsstats.Response +func (r GetTrainedModelsStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_trained_models_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetTrainedModelsStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.get_trained_models_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetTrainedModelsStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetTrainedModelsStats headers map. +func (r *GetTrainedModelsStats) Header(key, value string) *GetTrainedModelsStats { + r.headers.Set(key, value) + + return r +} + +// ModelId The unique identifier of the trained model or a model alias. It can be a +// comma-separated list or a wildcard expression. +// API Name: modelid +func (r *GetTrainedModelsStats) ModelId(modelid string) *GetTrainedModelsStats { + r.paramSet |= modelidMask + r.modelid = modelid + + return r +} + +// AllowNoMatch Specifies what to do when the request: +// +// - Contains wildcard expressions and there are no models that match. +// - Contains the _all string or no identifiers and there are no matches. +// - Contains wildcard expressions and there are only partial matches. +// +// If true, it returns an empty array when there are no matches and the +// subset of results when there are partial matches. +// API name: allow_no_match +func (r *GetTrainedModelsStats) AllowNoMatch(allownomatch bool) *GetTrainedModelsStats { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// From Skips the specified number of models. +// API name: from +func (r *GetTrainedModelsStats) From(from int) *GetTrainedModelsStats { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size Specifies the maximum number of models to obtain. +// API name: size +func (r *GetTrainedModelsStats) Size(size int) *GetTrainedModelsStats { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetTrainedModelsStats) ErrorTrace(errortrace bool) *GetTrainedModelsStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetTrainedModelsStats) FilterPath(filterpaths ...string) *GetTrainedModelsStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetTrainedModelsStats) Human(human bool) *GetTrainedModelsStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetTrainedModelsStats) Pretty(pretty bool) *GetTrainedModelsStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodelsstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodelsstats/response.go new file mode 100644 index 000000000..3a05d54e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodelsstats/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package gettrainedmodelsstats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package gettrainedmodelsstats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_trained_models_stats/MlGetTrainedModelStatsResponse.ts#L23-L33 +type Response struct { + + // Count The total number of trained model statistics that matched the requested ID + // patterns. Could be higher than the number of items in the trained_model_stats + // array as the size of the array is restricted by the supplied size parameter. + Count int `json:"count"` + // TrainedModelStats An array of trained model statistics, which are sorted by the model_id value + // in ascending order. + TrainedModelStats []types.TrainedModelStats `json:"trained_model_stats"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/infertrainedmodel/infer_trained_model.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/infertrainedmodel/infer_trained_model.go new file mode 100644 index 000000000..7a58950ea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/infertrainedmodel/infer_trained_model.go @@ -0,0 +1,398 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Evaluate a trained model. +package infertrainedmodel + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + modelidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type InferTrainedModel struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + modelid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewInferTrainedModel type alias for index. +type NewInferTrainedModel func(modelid string) *InferTrainedModel + +// NewInferTrainedModelFunc returns a new instance of InferTrainedModel with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewInferTrainedModelFunc(tp elastictransport.Interface) NewInferTrainedModel { + return func(modelid string) *InferTrainedModel { + n := New(tp) + + n._modelid(modelid) + + return n + } +} + +// Evaluate a trained model. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model +func New(tp elastictransport.Interface) *InferTrainedModel { + r := &InferTrainedModel{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *InferTrainedModel) Raw(raw io.Reader) *InferTrainedModel { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *InferTrainedModel) Request(req *Request) *InferTrainedModel { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *InferTrainedModel) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for InferTrainedModel: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == modelidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelid", r.modelid) + } + path.WriteString(r.modelid) + path.WriteString("/") + path.WriteString("_infer") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r InferTrainedModel) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.infer_trained_model") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.infer_trained_model") + if reader := instrument.RecordRequestBody(ctx, "ml.infer_trained_model", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.infer_trained_model") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the InferTrainedModel query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a infertrainedmodel.Response +func (r InferTrainedModel) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.infer_trained_model") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the InferTrainedModel headers map. +func (r *InferTrainedModel) Header(key, value string) *InferTrainedModel { + r.headers.Set(key, value) + + return r +} + +// ModelId The unique identifier of the trained model. +// API Name: modelid +func (r *InferTrainedModel) _modelid(modelid string) *InferTrainedModel { + r.paramSet |= modelidMask + r.modelid = modelid + + return r +} + +// Timeout Controls the amount of time to wait for inference results. +// API name: timeout +func (r *InferTrainedModel) Timeout(duration string) *InferTrainedModel { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *InferTrainedModel) ErrorTrace(errortrace bool) *InferTrainedModel { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *InferTrainedModel) FilterPath(filterpaths ...string) *InferTrainedModel { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *InferTrainedModel) Human(human bool) *InferTrainedModel { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *InferTrainedModel) Pretty(pretty bool) *InferTrainedModel { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// An array of objects to pass to the model for inference. The objects should +// contain a fields matching your +// configured trained model input. Typically, for NLP models, the field name is +// `text_field`. +// Currently, for NLP models, only a single value is allowed. +// API name: docs +func (r *InferTrainedModel) Docs(docs []map[string]json.RawMessage) *InferTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Docs = docs + + return r +} + +// The inference configuration updates to apply on the API call +// API name: inference_config +func (r *InferTrainedModel) InferenceConfig(inferenceconfig types.InferenceConfigUpdateContainerVariant) *InferTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.InferenceConfig = inferenceconfig.InferenceConfigUpdateContainerCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/infertrainedmodel/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/infertrainedmodel/request.go new file mode 100644 index 000000000..fb5fff912 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/infertrainedmodel/request.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package infertrainedmodel + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package infertrainedmodel +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/infer_trained_model/MlInferTrainedModelRequest.ts#L27-L67 +type Request struct { + + // Docs An array of objects to pass to the model for inference. The objects should + // contain a fields matching your + // configured trained model input. Typically, for NLP models, the field name is + // `text_field`. + // Currently, for NLP models, only a single value is allowed. + Docs []map[string]json.RawMessage `json:"docs"` + // InferenceConfig The inference configuration updates to apply on the API call + InferenceConfig *types.InferenceConfigUpdateContainer `json:"inference_config,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Infertrainedmodel request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/infertrainedmodel/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/infertrainedmodel/response.go new file mode 100644 index 000000000..a44f15aba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/infertrainedmodel/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package infertrainedmodel + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package infertrainedmodel +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/infer_trained_model/MlInferTrainedModelResponse.ts#L22-L26 +type Response struct { + InferenceResults []types.InferenceResponseResult `json:"inference_results"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/info/info.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/info/info.go new file mode 100644 index 000000000..111188f5e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/info/info.go @@ -0,0 +1,335 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get machine learning information. +// Get defaults and limits used by machine learning. +// This endpoint is designed to be used by a user interface that needs to fully +// understand machine learning configurations where some options are not +// specified, meaning that the defaults should be used. This endpoint may be +// used to find out what those defaults are. It also provides information about +// the maximum size of machine learning jobs that could run in the current +// cluster configuration. +package info + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Info struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewInfo type alias for index. +type NewInfo func() *Info + +// NewInfoFunc returns a new instance of Info with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewInfoFunc(tp elastictransport.Interface) NewInfo { + return func() *Info { + n := New(tp) + + return n + } +} + +// Get machine learning information. +// Get defaults and limits used by machine learning. +// This endpoint is designed to be used by a user interface that needs to fully +// understand machine learning configurations where some options are not +// specified, meaning that the defaults should be used. This endpoint may be +// used to find out what those defaults are. It also provides information about +// the maximum size of machine learning jobs that could run in the current +// cluster configuration. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info +func New(tp elastictransport.Interface) *Info { + r := &Info{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Info) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("info") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Info) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.info") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.info") + if reader := instrument.RecordRequestBody(ctx, "ml.info", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.info") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Info query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a info.Response +func (r Info) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Info) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Info query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Info headers map. +func (r *Info) Header(key, value string) *Info { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Info) ErrorTrace(errortrace bool) *Info { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Info) FilterPath(filterpaths ...string) *Info { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Info) Human(human bool) *Info { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Info) Pretty(pretty bool) *Info { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/info/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/info/response.go new file mode 100644 index 000000000..82e12b07f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/info/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package info + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package info +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/info/MlInfoResponse.ts#L22-L29 +type Response struct { + Defaults types.Defaults `json:"defaults"` + Limits types.Limits `json:"limits"` + NativeCode types.NativeCode `json:"native_code"` + UpgradeMode bool `json:"upgrade_mode"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/openjob/open_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/openjob/open_job.go new file mode 100644 index 000000000..a3426ab74 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/openjob/open_job.go @@ -0,0 +1,387 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Open anomaly detection jobs. +// +// An anomaly detection job must be opened to be ready to receive and analyze +// data. It can be opened and closed multiple times throughout its lifecycle. +// When you open a new job, it starts with an empty model. +// When you open an existing job, the most recent model state is automatically +// loaded. The job is ready to resume its analysis from where it left off, once +// new data is received. +package openjob + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type OpenJob struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewOpenJob type alias for index. +type NewOpenJob func(jobid string) *OpenJob + +// NewOpenJobFunc returns a new instance of OpenJob with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewOpenJobFunc(tp elastictransport.Interface) NewOpenJob { + return func(jobid string) *OpenJob { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Open anomaly detection jobs. +// +// An anomaly detection job must be opened to be ready to receive and analyze +// data. It can be opened and closed multiple times throughout its lifecycle. +// When you open a new job, it starts with an empty model. +// When you open an existing job, the most recent model state is automatically +// loaded. The job is ready to resume its analysis from where it left off, once +// new data is received. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job +func New(tp elastictransport.Interface) *OpenJob { + r := &OpenJob{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *OpenJob) Raw(raw io.Reader) *OpenJob { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *OpenJob) Request(req *Request) *OpenJob { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *OpenJob) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for OpenJob: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("_open") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r OpenJob) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.open_job") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.open_job") + if reader := instrument.RecordRequestBody(ctx, "ml.open_job", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.open_job") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the OpenJob query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a openjob.Response +func (r OpenJob) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.open_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the OpenJob headers map. +func (r *OpenJob) Header(key, value string) *OpenJob { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *OpenJob) _jobid(jobid string) *OpenJob { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *OpenJob) ErrorTrace(errortrace bool) *OpenJob { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *OpenJob) FilterPath(filterpaths ...string) *OpenJob { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *OpenJob) Human(human bool) *OpenJob { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *OpenJob) Pretty(pretty bool) *OpenJob { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Refer to the description for the `timeout` query parameter. +// API name: timeout +func (r *OpenJob) Timeout(duration types.DurationVariant) *OpenJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/openjob/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/openjob/request.go new file mode 100644 index 000000000..ca8f26613 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/openjob/request.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package openjob + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package openjob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/open_job/MlOpenJobRequest.ts#L24-L67 +type Request struct { + + // Timeout Refer to the description for the `timeout` query parameter. + Timeout types.Duration `json:"timeout,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Openjob request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/openjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/openjob/response.go new file mode 100644 index 000000000..75b8e3efb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/openjob/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package openjob + +// Response holds the response body struct for the package openjob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/open_job/MlOpenJobResponse.ts#L22-L31 +type Response struct { + + // Node The ID of the node that the job was started on. In serverless this will be + // the "serverless". + // If the job is allowed to open lazily and has not yet been assigned to a node, + // this value is an empty string. + Node string `json:"node"` + Opened bool `json:"opened"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postcalendarevents/post_calendar_events.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postcalendarevents/post_calendar_events.go new file mode 100644 index 000000000..43b95048f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postcalendarevents/post_calendar_events.go @@ -0,0 +1,377 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Add scheduled events to the calendar. +package postcalendarevents + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + calendaridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PostCalendarEvents struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + calendarid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPostCalendarEvents type alias for index. +type NewPostCalendarEvents func(calendarid string) *PostCalendarEvents + +// NewPostCalendarEventsFunc returns a new instance of PostCalendarEvents with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPostCalendarEventsFunc(tp elastictransport.Interface) NewPostCalendarEvents { + return func(calendarid string) *PostCalendarEvents { + n := New(tp) + + n._calendarid(calendarid) + + return n + } +} + +// Add scheduled events to the calendar. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events +func New(tp elastictransport.Interface) *PostCalendarEvents { + r := &PostCalendarEvents{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PostCalendarEvents) Raw(raw io.Reader) *PostCalendarEvents { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PostCalendarEvents) Request(req *Request) *PostCalendarEvents { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PostCalendarEvents) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PostCalendarEvents: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == calendaridMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendarid", r.calendarid) + } + path.WriteString(r.calendarid) + path.WriteString("/") + path.WriteString("events") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PostCalendarEvents) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.post_calendar_events") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.post_calendar_events") + if reader := instrument.RecordRequestBody(ctx, "ml.post_calendar_events", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.post_calendar_events") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PostCalendarEvents query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a postcalendarevents.Response +func (r PostCalendarEvents) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.post_calendar_events") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PostCalendarEvents headers map. +func (r *PostCalendarEvents) Header(key, value string) *PostCalendarEvents { + r.headers.Set(key, value) + + return r +} + +// CalendarId A string that uniquely identifies a calendar. +// API Name: calendarid +func (r *PostCalendarEvents) _calendarid(calendarid string) *PostCalendarEvents { + r.paramSet |= calendaridMask + r.calendarid = calendarid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PostCalendarEvents) ErrorTrace(errortrace bool) *PostCalendarEvents { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PostCalendarEvents) FilterPath(filterpaths ...string) *PostCalendarEvents { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PostCalendarEvents) Human(human bool) *PostCalendarEvents { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PostCalendarEvents) Pretty(pretty bool) *PostCalendarEvents { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A list of one of more scheduled events. The event’s start and end times can +// be specified as integer milliseconds since the epoch or as a string in ISO +// 8601 format. +// API name: events +func (r *PostCalendarEvents) Events(events ...types.CalendarEventVariant) *PostCalendarEvents { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range events { + + r.req.Events = append(r.req.Events, *v.CalendarEventCaster()) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postcalendarevents/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postcalendarevents/request.go new file mode 100644 index 000000000..4f3b6a0bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postcalendarevents/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package postcalendarevents + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package postcalendarevents +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/post_calendar_events/MlPostCalendarEventsRequest.ts#L24-L48 +type Request struct { + + // Events A list of one of more scheduled events. The event’s start and end times can + // be specified as integer milliseconds since the epoch or as a string in ISO + // 8601 format. + Events []types.CalendarEvent `json:"events"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Postcalendarevents request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postcalendarevents/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postcalendarevents/response.go new file mode 100644 index 000000000..381c98bf1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postcalendarevents/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package postcalendarevents + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package postcalendarevents +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/post_calendar_events/MlPostCalendarEventsResponse.ts#L22-L24 +type Response struct { + Events []types.CalendarEvent `json:"events"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postdata/post_data.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postdata/post_data.go new file mode 100644 index 000000000..57ae45d68 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postdata/post_data.go @@ -0,0 +1,387 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Send data to an anomaly detection job for analysis. +// +// IMPORTANT: For each job, data can be accepted from only a single connection +// at a time. +// It is not currently possible to post data to multiple jobs using wildcards or +// a comma-separated list. +package postdata + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PostData struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPostData type alias for index. +type NewPostData func(jobid string) *PostData + +// NewPostDataFunc returns a new instance of PostData with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPostDataFunc(tp elastictransport.Interface) NewPostData { + return func(jobid string) *PostData { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Send data to an anomaly detection job for analysis. +// +// IMPORTANT: For each job, data can be accepted from only a single connection +// at a time. +// It is not currently possible to post data to multiple jobs using wildcards or +// a comma-separated list. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data +func New(tp elastictransport.Interface) *PostData { + r := &PostData{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PostData) Raw(raw io.Reader) *PostData { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PostData) Request(req *Request) *PostData { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PostData) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PostData: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("_data") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PostData) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.post_data") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.post_data") + if reader := instrument.RecordRequestBody(ctx, "ml.post_data", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.post_data") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PostData query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a postdata.Response +func (r PostData) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.post_data") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PostData headers map. +func (r *PostData) Header(key, value string) *PostData { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. The job must have a state of open +// to receive and process the data. +// API Name: jobid +func (r *PostData) _jobid(jobid string) *PostData { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// ResetEnd Specifies the end of the bucket resetting range. +// API name: reset_end +func (r *PostData) ResetEnd(datetime string) *PostData { + r.values.Set("reset_end", datetime) + + return r +} + +// ResetStart Specifies the start of the bucket resetting range. +// API name: reset_start +func (r *PostData) ResetStart(datetime string) *PostData { + r.values.Set("reset_start", datetime) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PostData) ErrorTrace(errortrace bool) *PostData { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PostData) FilterPath(filterpaths ...string) *PostData { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PostData) Human(human bool) *PostData { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PostData) Pretty(pretty bool) *PostData { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postdata/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postdata/request.go new file mode 100644 index 000000000..60fabe223 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postdata/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package postdata + +import ( + "encoding/json" +) + +// Request holds the request body struct for the package postdata +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/post_data/MlPostJobDataRequest.ts#L24-L77 +type Request = []json.RawMessage + +// NewRequest returns a Request +func NewRequest() *Request { + r := make([]json.RawMessage, 0) + + return &r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postdata/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postdata/response.go new file mode 100644 index 000000000..29cd52acb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/postdata/response.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package postdata + +// Response holds the response body struct for the package postdata +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/post_data/MlPostJobDataResponse.ts#L24-L45 +type Response struct { + BucketCount int64 `json:"bucket_count"` + EarliestRecordTimestamp *int64 `json:"earliest_record_timestamp,omitempty"` + EmptyBucketCount int64 `json:"empty_bucket_count"` + InputBytes int64 `json:"input_bytes"` + InputFieldCount int64 `json:"input_field_count"` + InputRecordCount int64 `json:"input_record_count"` + InvalidDateCount int64 `json:"invalid_date_count"` + JobId string `json:"job_id"` + LastDataTime *int64 `json:"last_data_time,omitempty"` + LatestEmptyBucketTimestamp *int64 `json:"latest_empty_bucket_timestamp,omitempty"` + LatestRecordTimestamp *int64 `json:"latest_record_timestamp,omitempty"` + LatestSparseBucketTimestamp *int64 `json:"latest_sparse_bucket_timestamp,omitempty"` + LogTime *int64 `json:"log_time,omitempty"` + MissingFieldCount int64 `json:"missing_field_count"` + OutOfOrderTimestampCount int64 `json:"out_of_order_timestamp_count"` + ProcessedFieldCount int64 `json:"processed_field_count"` + ProcessedRecordCount int64 `json:"processed_record_count"` + SparseBucketCount int64 `json:"sparse_bucket_count"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdatafeed/preview_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdatafeed/preview_datafeed.go new file mode 100644 index 000000000..998bb5abc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdatafeed/preview_datafeed.go @@ -0,0 +1,448 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Preview a datafeed. +// This API returns the first "page" of search results from a datafeed. +// You can preview an existing datafeed or provide configuration details for a +// datafeed +// and anomaly detection job in the API. The preview shows the structure of the +// data +// that will be passed to the anomaly detection engine. +// IMPORTANT: When Elasticsearch security features are enabled, the preview uses +// the credentials of the user that +// called the API. However, when the datafeed starts it uses the roles of the +// last user that created or updated the +// datafeed. To get a preview that accurately reflects the behavior of the +// datafeed, use the appropriate credentials. +// You can also use secondary authorization headers to supply the credentials. +package previewdatafeed + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + datafeedidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PreviewDatafeed struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + datafeedid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPreviewDatafeed type alias for index. +type NewPreviewDatafeed func() *PreviewDatafeed + +// NewPreviewDatafeedFunc returns a new instance of PreviewDatafeed with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPreviewDatafeedFunc(tp elastictransport.Interface) NewPreviewDatafeed { + return func() *PreviewDatafeed { + n := New(tp) + + return n + } +} + +// Preview a datafeed. +// This API returns the first "page" of search results from a datafeed. +// You can preview an existing datafeed or provide configuration details for a +// datafeed +// and anomaly detection job in the API. The preview shows the structure of the +// data +// that will be passed to the anomaly detection engine. +// IMPORTANT: When Elasticsearch security features are enabled, the preview uses +// the credentials of the user that +// called the API. However, when the datafeed starts it uses the roles of the +// last user that created or updated the +// datafeed. To get a preview that accurately reflects the behavior of the +// datafeed, use the appropriate credentials. +// You can also use secondary authorization headers to supply the credentials. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed +func New(tp elastictransport.Interface) *PreviewDatafeed { + r := &PreviewDatafeed{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PreviewDatafeed) Raw(raw io.Reader) *PreviewDatafeed { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PreviewDatafeed) Request(req *Request) *PreviewDatafeed { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PreviewDatafeed) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PreviewDatafeed: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == datafeedidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeedid", r.datafeedid) + } + path.WriteString(r.datafeedid) + path.WriteString("/") + path.WriteString("_preview") + + method = http.MethodPost + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + path.WriteString("_preview") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PreviewDatafeed) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.preview_datafeed") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.preview_datafeed") + if reader := instrument.RecordRequestBody(ctx, "ml.preview_datafeed", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.preview_datafeed") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PreviewDatafeed query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a previewdatafeed.Response +func (r PreviewDatafeed) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.preview_datafeed") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PreviewDatafeed headers map. +func (r *PreviewDatafeed) Header(key, value string) *PreviewDatafeed { + r.headers.Set(key, value) + + return r +} + +// DatafeedId A numerical character string that uniquely identifies the datafeed. This +// identifier can contain lowercase +// alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must +// start and end with alphanumeric +// characters. NOTE: If you use this path parameter, you cannot provide datafeed +// or anomaly detection job +// configuration details in the request body. +// API Name: datafeedid +func (r *PreviewDatafeed) DatafeedId(datafeedid string) *PreviewDatafeed { + r.paramSet |= datafeedidMask + r.datafeedid = datafeedid + + return r +} + +// Start The start time from where the datafeed preview should begin +// API name: start +func (r *PreviewDatafeed) Start(datetime string) *PreviewDatafeed { + r.values.Set("start", datetime) + + return r +} + +// End The end time when the datafeed preview should stop +// API name: end +func (r *PreviewDatafeed) End(datetime string) *PreviewDatafeed { + r.values.Set("end", datetime) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PreviewDatafeed) ErrorTrace(errortrace bool) *PreviewDatafeed { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PreviewDatafeed) FilterPath(filterpaths ...string) *PreviewDatafeed { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PreviewDatafeed) Human(human bool) *PreviewDatafeed { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PreviewDatafeed) Pretty(pretty bool) *PreviewDatafeed { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The datafeed definition to preview. +// API name: datafeed_config +func (r *PreviewDatafeed) DatafeedConfig(datafeedconfig types.DatafeedConfigVariant) *PreviewDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DatafeedConfig = datafeedconfig.DatafeedConfigCaster() + + return r +} + +// The configuration details for the anomaly detection job that is associated +// with the datafeed. If the +// `datafeed_config` object does not include a `job_id` that references an +// existing anomaly detection job, you must +// supply this `job_config` object. If you include both a `job_id` and a +// `job_config`, the latter information is +// used. You cannot specify a `job_config` object unless you also supply a +// `datafeed_config` object. +// API name: job_config +func (r *PreviewDatafeed) JobConfig(jobconfig types.JobConfigVariant) *PreviewDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.JobConfig = jobconfig.JobConfigCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdatafeed/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdatafeed/request.go new file mode 100644 index 000000000..424b8f018 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdatafeed/request.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package previewdatafeed + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package previewdatafeed +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/preview_datafeed/MlPreviewDatafeedRequest.ts#L26-L81 +type Request struct { + + // DatafeedConfig The datafeed definition to preview. + DatafeedConfig *types.DatafeedConfig `json:"datafeed_config,omitempty"` + // JobConfig The configuration details for the anomaly detection job that is associated + // with the datafeed. If the + // `datafeed_config` object does not include a `job_id` that references an + // existing anomaly detection job, you must + // supply this `job_config` object. If you include both a `job_id` and a + // `job_config`, the latter information is + // used. You cannot specify a `job_config` object unless you also supply a + // `datafeed_config` object. + JobConfig *types.JobConfig `json:"job_config,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Previewdatafeed request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdatafeed/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdatafeed/response.go new file mode 100644 index 000000000..5873830cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdatafeed/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package previewdatafeed + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package previewdatafeed +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/preview_datafeed/MlPreviewDatafeedResponse.ts#L20-L23 + +type Response []json.RawMessage + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go new file mode 100644 index 000000000..df345336f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go @@ -0,0 +1,388 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Preview features used by data frame analytics. +// Preview the extracted features used by a data frame analytics config. +package previewdataframeanalytics + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PreviewDataFrameAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPreviewDataFrameAnalytics type alias for index. +type NewPreviewDataFrameAnalytics func() *PreviewDataFrameAnalytics + +// NewPreviewDataFrameAnalyticsFunc returns a new instance of PreviewDataFrameAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPreviewDataFrameAnalyticsFunc(tp elastictransport.Interface) NewPreviewDataFrameAnalytics { + return func() *PreviewDataFrameAnalytics { + n := New(tp) + + return n + } +} + +// Preview features used by data frame analytics. +// Preview the extracted features used by a data frame analytics config. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics +func New(tp elastictransport.Interface) *PreviewDataFrameAnalytics { + r := &PreviewDataFrameAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PreviewDataFrameAnalytics) Raw(raw io.Reader) *PreviewDataFrameAnalytics { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PreviewDataFrameAnalytics) Request(req *Request) *PreviewDataFrameAnalytics { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PreviewDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PreviewDataFrameAnalytics: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + path.WriteString("_preview") + + method = http.MethodPost + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + path.WriteString("/") + path.WriteString("_preview") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PreviewDataFrameAnalytics) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.preview_data_frame_analytics") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.preview_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "ml.preview_data_frame_analytics", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.preview_data_frame_analytics") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PreviewDataFrameAnalytics query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a previewdataframeanalytics.Response +func (r PreviewDataFrameAnalytics) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.preview_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PreviewDataFrameAnalytics headers map. +func (r *PreviewDataFrameAnalytics) Header(key, value string) *PreviewDataFrameAnalytics { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the data frame analytics job. +// API Name: id +func (r *PreviewDataFrameAnalytics) Id(id string) *PreviewDataFrameAnalytics { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PreviewDataFrameAnalytics) ErrorTrace(errortrace bool) *PreviewDataFrameAnalytics { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PreviewDataFrameAnalytics) FilterPath(filterpaths ...string) *PreviewDataFrameAnalytics { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PreviewDataFrameAnalytics) Human(human bool) *PreviewDataFrameAnalytics { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PreviewDataFrameAnalytics) Pretty(pretty bool) *PreviewDataFrameAnalytics { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A data frame analytics config as described in create data frame analytics +// jobs. Note that `id` and `dest` don’t need to be provided in the context of +// this API. +// API name: config +func (r *PreviewDataFrameAnalytics) Config(config types.DataframePreviewConfigVariant) *PreviewDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Config = config.DataframePreviewConfigCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdataframeanalytics/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdataframeanalytics/request.go new file mode 100644 index 000000000..dc81dd6f1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdataframeanalytics/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package previewdataframeanalytics + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package previewdataframeanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsRequest.ts#L24-L60 +type Request struct { + + // Config A data frame analytics config as described in create data frame analytics + // jobs. Note that `id` and `dest` don’t need to be provided in the context of + // this API. + Config *types.DataframePreviewConfig `json:"config,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Previewdataframeanalytics request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdataframeanalytics/response.go new file mode 100644 index 000000000..ea9800779 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdataframeanalytics/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package previewdataframeanalytics + +// Response holds the response body struct for the package previewdataframeanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsResponse.ts#L23-L28 +type Response struct { + + // FeatureValues An array of objects that contain feature name and value pairs. The features + // have been processed and indicate what will be sent to the model for training. + FeatureValues []map[string]string `json:"feature_values"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendar/put_calendar.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendar/put_calendar.go new file mode 100644 index 000000000..d077fb1b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendar/put_calendar.go @@ -0,0 +1,386 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a calendar. +package putcalendar + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + calendaridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutCalendar struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + calendarid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutCalendar type alias for index. +type NewPutCalendar func(calendarid string) *PutCalendar + +// NewPutCalendarFunc returns a new instance of PutCalendar with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutCalendarFunc(tp elastictransport.Interface) NewPutCalendar { + return func(calendarid string) *PutCalendar { + n := New(tp) + + n._calendarid(calendarid) + + return n + } +} + +// Create a calendar. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar +func New(tp elastictransport.Interface) *PutCalendar { + r := &PutCalendar{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutCalendar) Raw(raw io.Reader) *PutCalendar { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutCalendar) Request(req *Request) *PutCalendar { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutCalendar) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutCalendar: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == calendaridMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendarid", r.calendarid) + } + path.WriteString(r.calendarid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutCalendar) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.put_calendar") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_calendar") + if reader := instrument.RecordRequestBody(ctx, "ml.put_calendar", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_calendar") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutCalendar query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putcalendar.Response +func (r PutCalendar) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_calendar") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutCalendar headers map. +func (r *PutCalendar) Header(key, value string) *PutCalendar { + r.headers.Set(key, value) + + return r +} + +// CalendarId A string that uniquely identifies a calendar. +// API Name: calendarid +func (r *PutCalendar) _calendarid(calendarid string) *PutCalendar { + r.paramSet |= calendaridMask + r.calendarid = calendarid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutCalendar) ErrorTrace(errortrace bool) *PutCalendar { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutCalendar) FilterPath(filterpaths ...string) *PutCalendar { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutCalendar) Human(human bool) *PutCalendar { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutCalendar) Pretty(pretty bool) *PutCalendar { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A description of the calendar. +// API name: description +func (r *PutCalendar) Description(description string) *PutCalendar { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// An array of anomaly detection job identifiers. +// API name: job_ids +func (r *PutCalendar) JobIds(jobids ...string) *PutCalendar { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range jobids { + + r.req.JobIds = append(r.req.JobIds, v) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendar/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendar/request.go new file mode 100644 index 000000000..ffc2c009f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendar/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putcalendar + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package putcalendar +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_calendar/MlPutCalendarRequest.ts#L23-L51 +type Request struct { + + // Description A description of the calendar. + Description *string `json:"description,omitempty"` + // JobIds An array of anomaly detection job identifiers. + JobIds []string `json:"job_ids,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putcalendar request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendar/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendar/response.go new file mode 100644 index 000000000..2b018dc2f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendar/response.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putcalendar + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Response holds the response body struct for the package putcalendar +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_calendar/MlPutCalendarResponse.ts#L22-L31 +type Response struct { + + // CalendarId A string that uniquely identifies a calendar. + CalendarId string `json:"calendar_id"` + // Description A description of the calendar. + Description *string `json:"description,omitempty"` + // JobIds A list of anomaly detection job identifiers or group names. + JobIds []string `json:"job_ids"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_id": + if err := dec.Decode(&s.CalendarId); err != nil { + return fmt.Errorf("%s | %w", "CalendarId", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "job_ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "JobIds", err) + } + + s.JobIds = append(s.JobIds, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.JobIds); err != nil { + return fmt.Errorf("%s | %w", "JobIds", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendarjob/put_calendar_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendarjob/put_calendar_job.go new file mode 100644 index 000000000..31b7d774b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendarjob/put_calendar_job.go @@ -0,0 +1,367 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Add anomaly detection job to calendar. +package putcalendarjob + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + calendaridMask = iota + 1 + + jobidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutCalendarJob struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + calendarid string + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutCalendarJob type alias for index. +type NewPutCalendarJob func(calendarid, jobid string) *PutCalendarJob + +// NewPutCalendarJobFunc returns a new instance of PutCalendarJob with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutCalendarJobFunc(tp elastictransport.Interface) NewPutCalendarJob { + return func(calendarid, jobid string) *PutCalendarJob { + n := New(tp) + + n._calendarid(calendarid) + + n._jobid(jobid) + + return n + } +} + +// Add anomaly detection job to calendar. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job +func New(tp elastictransport.Interface) *PutCalendarJob { + r := &PutCalendarJob{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutCalendarJob) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == calendaridMask|jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("calendars") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "calendarid", r.calendarid) + } + path.WriteString(r.calendarid) + path.WriteString("/") + path.WriteString("jobs") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutCalendarJob) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.put_calendar_job") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_calendar_job") + if reader := instrument.RecordRequestBody(ctx, "ml.put_calendar_job", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_calendar_job") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutCalendarJob query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putcalendarjob.Response +func (r PutCalendarJob) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_calendar_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r PutCalendarJob) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_calendar_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the PutCalendarJob query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the PutCalendarJob headers map. +func (r *PutCalendarJob) Header(key, value string) *PutCalendarJob { + r.headers.Set(key, value) + + return r +} + +// CalendarId A string that uniquely identifies a calendar. +// API Name: calendarid +func (r *PutCalendarJob) _calendarid(calendarid string) *PutCalendarJob { + r.paramSet |= calendaridMask + r.calendarid = calendarid + + return r +} + +// JobId An identifier for the anomaly detection jobs. It can be a job identifier, a +// group name, or a comma-separated list of jobs or groups. +// API Name: jobid +func (r *PutCalendarJob) _jobid(jobid string) *PutCalendarJob { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutCalendarJob) ErrorTrace(errortrace bool) *PutCalendarJob { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutCalendarJob) FilterPath(filterpaths ...string) *PutCalendarJob { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutCalendarJob) Human(human bool) *PutCalendarJob { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutCalendarJob) Pretty(pretty bool) *PutCalendarJob { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendarjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendarjob/response.go new file mode 100644 index 000000000..f075a84c8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendarjob/response.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putcalendarjob + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Response holds the response body struct for the package putcalendarjob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_calendar_job/MlPutCalendarJobResponse.ts#L22-L31 +type Response struct { + + // CalendarId A string that uniquely identifies a calendar. + CalendarId string `json:"calendar_id"` + // Description A description of the calendar. + Description *string `json:"description,omitempty"` + // JobIds A list of anomaly detection job identifiers or group names. + JobIds []string `json:"job_ids"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_id": + if err := dec.Decode(&s.CalendarId); err != nil { + return fmt.Errorf("%s | %w", "CalendarId", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "job_ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "JobIds", err) + } + + s.JobIds = append(s.JobIds, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.JobIds); err != nil { + return fmt.Errorf("%s | %w", "JobIds", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdatafeed/put_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdatafeed/put_datafeed.go new file mode 100644 index 000000000..e564038e8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdatafeed/put_datafeed.go @@ -0,0 +1,705 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a datafeed. +// Datafeeds retrieve data from Elasticsearch for analysis by an anomaly +// detection job. +// You can associate only one datafeed with each anomaly detection job. +// The datafeed contains a query that runs at a defined interval (`frequency`). +// If you are concerned about delayed data, you can add a delay (`query_delay') +// at each interval. +// By default, the datafeed uses the following query: `{"match_all": {"boost": +// 1}}`. +// +// When Elasticsearch security features are enabled, your datafeed remembers +// which roles the user who created it had +// at the time of creation and runs the query using those same roles. If you +// provide secondary authorization headers, +// those credentials are used instead. +// You must use Kibana, this API, or the create anomaly detection jobs API to +// create a datafeed. Do not add a datafeed +// directly to the `.ml-config` index. Do not give users `write` privileges on +// the `.ml-config` index. +package putdatafeed + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + datafeedidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutDatafeed struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + datafeedid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutDatafeed type alias for index. +type NewPutDatafeed func(datafeedid string) *PutDatafeed + +// NewPutDatafeedFunc returns a new instance of PutDatafeed with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutDatafeedFunc(tp elastictransport.Interface) NewPutDatafeed { + return func(datafeedid string) *PutDatafeed { + n := New(tp) + + n._datafeedid(datafeedid) + + return n + } +} + +// Create a datafeed. +// Datafeeds retrieve data from Elasticsearch for analysis by an anomaly +// detection job. +// You can associate only one datafeed with each anomaly detection job. +// The datafeed contains a query that runs at a defined interval (`frequency`). +// If you are concerned about delayed data, you can add a delay (`query_delay') +// at each interval. +// By default, the datafeed uses the following query: `{"match_all": {"boost": +// 1}}`. +// +// When Elasticsearch security features are enabled, your datafeed remembers +// which roles the user who created it had +// at the time of creation and runs the query using those same roles. If you +// provide secondary authorization headers, +// those credentials are used instead. +// You must use Kibana, this API, or the create anomaly detection jobs API to +// create a datafeed. Do not add a datafeed +// directly to the `.ml-config` index. Do not give users `write` privileges on +// the `.ml-config` index. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed +func New(tp elastictransport.Interface) *PutDatafeed { + r := &PutDatafeed{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutDatafeed) Raw(raw io.Reader) *PutDatafeed { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutDatafeed) Request(req *Request) *PutDatafeed { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutDatafeed) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutDatafeed: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == datafeedidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeedid", r.datafeedid) + } + path.WriteString(r.datafeedid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutDatafeed) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.put_datafeed") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_datafeed") + if reader := instrument.RecordRequestBody(ctx, "ml.put_datafeed", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_datafeed") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutDatafeed query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putdatafeed.Response +func (r PutDatafeed) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_datafeed") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutDatafeed headers map. +func (r *PutDatafeed) Header(key, value string) *PutDatafeed { + r.headers.Set(key, value) + + return r +} + +// DatafeedId A numerical character string that uniquely identifies the datafeed. +// This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +// hyphens, and underscores. +// It must start and end with alphanumeric characters. +// API Name: datafeedid +func (r *PutDatafeed) _datafeedid(datafeedid string) *PutDatafeed { + r.paramSet |= datafeedidMask + r.datafeedid = datafeedid + + return r +} + +// AllowNoIndices If true, wildcard indices expressions that resolve into no concrete indices +// are ignored. This includes the `_all` +// string or when no indices are specified. +// API name: allow_no_indices +func (r *PutDatafeed) AllowNoIndices(allownoindices bool) *PutDatafeed { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. If the request can target +// data streams, this argument determines +// whether wildcard expressions match hidden data streams. Supports +// comma-separated values. +// API name: expand_wildcards +func (r *PutDatafeed) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *PutDatafeed { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreThrottled If true, concrete, expanded, or aliased indices are ignored when frozen. +// API name: ignore_throttled +func (r *PutDatafeed) IgnoreThrottled(ignorethrottled bool) *PutDatafeed { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// IgnoreUnavailable If true, unavailable indices (missing or closed) are ignored. +// API name: ignore_unavailable +func (r *PutDatafeed) IgnoreUnavailable(ignoreunavailable bool) *PutDatafeed { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutDatafeed) ErrorTrace(errortrace bool) *PutDatafeed { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutDatafeed) FilterPath(filterpaths ...string) *PutDatafeed { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutDatafeed) Human(human bool) *PutDatafeed { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutDatafeed) Pretty(pretty bool) *PutDatafeed { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// If set, the datafeed performs aggregation searches. +// Support for aggregations is limited and should be used only with low +// cardinality data. +// API name: aggregations +func (r *PutDatafeed) Aggregations(aggregations map[string]types.Aggregations) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Aggregations = aggregations + return r +} + +func (r *PutDatafeed) AddAggregation(key string, value types.AggregationsVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp + return r +} + +// Datafeeds might be required to search over long time periods, for several +// months or years. +// This search is split into time chunks in order to ensure the load on +// Elasticsearch is managed. +// Chunking configuration controls how the size of these time chunks are +// calculated; +// it is an advanced configuration option. +// API name: chunking_config +func (r *PutDatafeed) ChunkingConfig(chunkingconfig types.ChunkingConfigVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingConfig = chunkingconfig.ChunkingConfigCaster() + + return r +} + +// Specifies whether the datafeed checks for missing data and the size of the +// window. +// The datafeed can optionally search over indices that have already been read +// in an effort to determine whether +// any data has subsequently been added to the index. If missing data is found, +// it is a good indication that the +// `query_delay` is set too low and the data is being indexed after the datafeed +// has passed that moment in time. +// This check runs only on real-time datafeeds. +// API name: delayed_data_check_config +func (r *PutDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig types.DelayedDataCheckConfigVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DelayedDataCheckConfig = delayeddatacheckconfig.DelayedDataCheckConfigCaster() + + return r +} + +// The interval at which scheduled queries are made while the datafeed runs in +// real time. +// The default value is either the bucket span for short bucket spans, or, for +// longer bucket spans, a sensible +// fraction of the bucket span. When `frequency` is shorter than the bucket +// span, interim results for the last +// (partial) bucket are written then eventually overwritten by the full bucket +// results. If the datafeed uses +// aggregations, this value must be divisible by the interval of the date +// histogram aggregation. +// API name: frequency +func (r *PutDatafeed) Frequency(duration types.DurationVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() + + return r +} + +// API name: headers +func (r *PutDatafeed) Headers(httpheaders types.HttpHeadersVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Headers = *httpheaders.HttpHeadersCaster() + + return r +} + +// An array of index names. Wildcards are supported. If any of the indices are +// in remote clusters, the master +// nodes and the machine learning nodes must have the `remote_cluster_client` +// role. +// API name: indices +func (r *PutDatafeed) Indices(indices ...string) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Indices = indices + + return r +} + +// Specifies index expansion options that are used during search +// API name: indices_options +func (r *PutDatafeed) IndicesOptions(indicesoptions types.IndicesOptionsVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndicesOptions = indicesoptions.IndicesOptionsCaster() + + return r +} + +// Identifier for the anomaly detection job. +// API name: job_id +func (r *PutDatafeed) JobId(id string) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.JobId = &id + + return r +} + +// If a real-time datafeed has never seen any data (including during any initial +// training period), it automatically +// stops and closes the associated job after this many real-time searches return +// no documents. In other words, +// it stops after `frequency` times `max_empty_searches` of real-time operation. +// If not set, a datafeed with no +// end time that sees no data remains started until it is explicitly stopped. By +// default, it is not set. +// API name: max_empty_searches +func (r *PutDatafeed) MaxEmptySearches(maxemptysearches int) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxEmptySearches = &maxemptysearches + + return r +} + +// The Elasticsearch query domain-specific language (DSL). This value +// corresponds to the query object in an +// Elasticsearch search POST body. All the options that are supported by +// Elasticsearch can be used, as this +// object is passed verbatim to Elasticsearch. +// API name: query +func (r *PutDatafeed) Query(query types.QueryVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} + +// The number of seconds behind real time that data is queried. For example, if +// data from 10:04 a.m. might +// not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 +// seconds. The default +// value is randomly selected between `60s` and `120s`. This randomness improves +// the query performance +// when there are multiple jobs running on the same node. +// API name: query_delay +func (r *PutDatafeed) QueryDelay(duration types.DurationVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.QueryDelay = *duration.DurationCaster() + + return r +} + +// Specifies runtime fields for the datafeed search. +// API name: runtime_mappings +func (r *PutDatafeed) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() + + return r +} + +// Specifies scripts that evaluate custom expressions and returns script fields +// to the datafeed. +// The detector configuration objects in a job can contain functions that use +// these script fields. +// API name: script_fields +func (r *PutDatafeed) ScriptFields(scriptfields map[string]types.ScriptField) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScriptFields = scriptfields + return r +} + +func (r *PutDatafeed) AddScriptField(key string, value types.ScriptFieldVariant) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + + r.req.ScriptFields = tmp + return r +} + +// The size parameter that is used in Elasticsearch searches when the datafeed +// does not use aggregations. +// The maximum value is the value of `index.max_result_window`, which is 10,000 +// by default. +// API name: scroll_size +func (r *PutDatafeed) ScrollSize(scrollsize int) *PutDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ScrollSize = &scrollsize + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdatafeed/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdatafeed/request.go new file mode 100644 index 000000000..bda8a986e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdatafeed/request.go @@ -0,0 +1,267 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putdatafeed + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putdatafeed +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_datafeed/MlPutDatafeedRequest.ts#L37-L184 +type Request struct { + + // Aggregations If set, the datafeed performs aggregation searches. + // Support for aggregations is limited and should be used only with low + // cardinality data. + Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` + // ChunkingConfig Datafeeds might be required to search over long time periods, for several + // months or years. + // This search is split into time chunks in order to ensure the load on + // Elasticsearch is managed. + // Chunking configuration controls how the size of these time chunks are + // calculated; + // it is an advanced configuration option. + ChunkingConfig *types.ChunkingConfig `json:"chunking_config,omitempty"` + // DelayedDataCheckConfig Specifies whether the datafeed checks for missing data and the size of the + // window. + // The datafeed can optionally search over indices that have already been read + // in an effort to determine whether + // any data has subsequently been added to the index. If missing data is found, + // it is a good indication that the + // `query_delay` is set too low and the data is being indexed after the datafeed + // has passed that moment in time. + // This check runs only on real-time datafeeds. + DelayedDataCheckConfig *types.DelayedDataCheckConfig `json:"delayed_data_check_config,omitempty"` + // Frequency The interval at which scheduled queries are made while the datafeed runs in + // real time. + // The default value is either the bucket span for short bucket spans, or, for + // longer bucket spans, a sensible + // fraction of the bucket span. When `frequency` is shorter than the bucket + // span, interim results for the last + // (partial) bucket are written then eventually overwritten by the full bucket + // results. If the datafeed uses + // aggregations, this value must be divisible by the interval of the date + // histogram aggregation. + Frequency types.Duration `json:"frequency,omitempty"` + Headers types.HttpHeaders `json:"headers,omitempty"` + // Indices An array of index names. Wildcards are supported. If any of the indices are + // in remote clusters, the master + // nodes and the machine learning nodes must have the `remote_cluster_client` + // role. + Indices []string `json:"indices,omitempty"` + // IndicesOptions Specifies index expansion options that are used during search + IndicesOptions *types.IndicesOptions `json:"indices_options,omitempty"` + // JobId Identifier for the anomaly detection job. + JobId *string `json:"job_id,omitempty"` + // MaxEmptySearches If a real-time datafeed has never seen any data (including during any initial + // training period), it automatically + // stops and closes the associated job after this many real-time searches return + // no documents. In other words, + // it stops after `frequency` times `max_empty_searches` of real-time operation. + // If not set, a datafeed with no + // end time that sees no data remains started until it is explicitly stopped. By + // default, it is not set. + MaxEmptySearches *int `json:"max_empty_searches,omitempty"` + // Query The Elasticsearch query domain-specific language (DSL). This value + // corresponds to the query object in an + // Elasticsearch search POST body. All the options that are supported by + // Elasticsearch can be used, as this + // object is passed verbatim to Elasticsearch. + Query *types.Query `json:"query,omitempty"` + // QueryDelay The number of seconds behind real time that data is queried. For example, if + // data from 10:04 a.m. might + // not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 + // seconds. The default + // value is randomly selected between `60s` and `120s`. This randomness improves + // the query performance + // when there are multiple jobs running on the same node. + QueryDelay types.Duration `json:"query_delay,omitempty"` + // RuntimeMappings Specifies runtime fields for the datafeed search. + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` + // ScriptFields Specifies scripts that evaluate custom expressions and returns script fields + // to the datafeed. + // The detector configuration objects in a job can contain functions that use + // these script fields. + ScriptFields map[string]types.ScriptField `json:"script_fields,omitempty"` + // ScrollSize The size parameter that is used in Elasticsearch searches when the datafeed + // does not use aggregations. + // The maximum value is the value of `index.max_result_window`, which is 10,000 + // by default. + ScrollSize *int `json:"scroll_size,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Aggregations: make(map[string]types.Aggregations, 0), + ScriptFields: make(map[string]types.ScriptField, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putdatafeed request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "chunking_config": + if err := dec.Decode(&s.ChunkingConfig); err != nil { + return fmt.Errorf("%s | %w", "ChunkingConfig", err) + } + + case "delayed_data_check_config": + if err := dec.Decode(&s.DelayedDataCheckConfig); err != nil { + return fmt.Errorf("%s | %w", "DelayedDataCheckConfig", err) + } + + case "frequency": + if err := dec.Decode(&s.Frequency); err != nil { + return fmt.Errorf("%s | %w", "Frequency", err) + } + + case "headers": + if err := dec.Decode(&s.Headers); err != nil { + return fmt.Errorf("%s | %w", "Headers", err) + } + + case "indices", "indexes": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + case "indices_options": + if err := dec.Decode(&s.IndicesOptions); err != nil { + return fmt.Errorf("%s | %w", "IndicesOptions", err) + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "max_empty_searches": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxEmptySearches", err) + } + s.MaxEmptySearches = &value + case float64: + f := int(v) + s.MaxEmptySearches = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "query_delay": + if err := dec.Decode(&s.QueryDelay); err != nil { + return fmt.Errorf("%s | %w", "QueryDelay", err) + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]types.ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return fmt.Errorf("%s | %w", "ScriptFields", err) + } + + case "scroll_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ScrollSize", err) + } + s.ScrollSize = &value + case float64: + f := int(v) + s.ScrollSize = &f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdatafeed/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdatafeed/response.go new file mode 100644 index 000000000..bc13346b9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdatafeed/response.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putdatafeed + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package putdatafeed +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_datafeed/MlPutDatafeedResponse.ts#L31-L49 +type Response struct { + Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` + Authorization *types.DatafeedAuthorization `json:"authorization,omitempty"` + ChunkingConfig types.ChunkingConfig `json:"chunking_config"` + DatafeedId string `json:"datafeed_id"` + DelayedDataCheckConfig *types.DelayedDataCheckConfig `json:"delayed_data_check_config,omitempty"` + Frequency types.Duration `json:"frequency,omitempty"` + Indices []string `json:"indices"` + IndicesOptions *types.IndicesOptions `json:"indices_options,omitempty"` + JobId string `json:"job_id"` + MaxEmptySearches *int `json:"max_empty_searches,omitempty"` + Query types.Query `json:"query"` + QueryDelay types.Duration `json:"query_delay"` + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` + ScriptFields map[string]types.ScriptField `json:"script_fields,omitempty"` + ScrollSize int `json:"scroll_size"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Aggregations: make(map[string]types.Aggregations, 0), + ScriptFields: make(map[string]types.ScriptField, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go new file mode 100644 index 000000000..7ff3b48b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go @@ -0,0 +1,567 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a data frame analytics job. +// This API creates a data frame analytics job that performs an analysis on the +// source indices and stores the outcome in a destination index. +// By default, the query used in the source configuration is `{"match_all": +// {}}`. +// +// If the destination index does not exist, it is created automatically when you +// start the job. +// +// If you supply only a subset of the regression or classification parameters, +// hyperparameter optimization occurs. It determines a value for each of the +// undefined parameters. +package putdataframeanalytics + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutDataFrameAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutDataFrameAnalytics type alias for index. +type NewPutDataFrameAnalytics func(id string) *PutDataFrameAnalytics + +// NewPutDataFrameAnalyticsFunc returns a new instance of PutDataFrameAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutDataFrameAnalyticsFunc(tp elastictransport.Interface) NewPutDataFrameAnalytics { + return func(id string) *PutDataFrameAnalytics { + n := New(tp) + + n._id(id) + + return n + } +} + +// Create a data frame analytics job. +// This API creates a data frame analytics job that performs an analysis on the +// source indices and stores the outcome in a destination index. +// By default, the query used in the source configuration is `{"match_all": +// {}}`. +// +// If the destination index does not exist, it is created automatically when you +// start the job. +// +// If you supply only a subset of the regression or classification parameters, +// hyperparameter optimization occurs. It determines a value for each of the +// undefined parameters. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics +func New(tp elastictransport.Interface) *PutDataFrameAnalytics { + r := &PutDataFrameAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutDataFrameAnalytics) Raw(raw io.Reader) *PutDataFrameAnalytics { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutDataFrameAnalytics) Request(req *Request) *PutDataFrameAnalytics { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutDataFrameAnalytics: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutDataFrameAnalytics) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.put_data_frame_analytics") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "ml.put_data_frame_analytics", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_data_frame_analytics") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutDataFrameAnalytics query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putdataframeanalytics.Response +func (r PutDataFrameAnalytics) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutDataFrameAnalytics headers map. +func (r *PutDataFrameAnalytics) Header(key, value string) *PutDataFrameAnalytics { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the data frame analytics job. This identifier can contain +// lowercase alphanumeric characters (a-z and 0-9), hyphens, and +// underscores. It must start and end with alphanumeric characters. +// API Name: id +func (r *PutDataFrameAnalytics) _id(id string) *PutDataFrameAnalytics { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutDataFrameAnalytics) ErrorTrace(errortrace bool) *PutDataFrameAnalytics { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutDataFrameAnalytics) FilterPath(filterpaths ...string) *PutDataFrameAnalytics { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutDataFrameAnalytics) Human(human bool) *PutDataFrameAnalytics { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutDataFrameAnalytics) Pretty(pretty bool) *PutDataFrameAnalytics { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Specifies whether this job can start when there is insufficient machine +// learning node capacity for it to be immediately assigned to a node. If +// set to `false` and a machine learning node with capacity to run the job +// cannot be immediately found, the API returns an error. If set to `true`, +// the API does not return an error; the job waits in the `starting` state +// until sufficient machine learning node capacity is available. This +// behavior is also affected by the cluster-wide +// `xpack.ml.max_lazy_ml_nodes` setting. +// API name: allow_lazy_start +func (r *PutDataFrameAnalytics) AllowLazyStart(allowlazystart bool) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowLazyStart = &allowlazystart + + return r +} + +// The analysis configuration, which contains the information necessary to +// perform one of the following types of analysis: classification, outlier +// detection, or regression. +// API name: analysis +func (r *PutDataFrameAnalytics) Analysis(analysis types.DataframeAnalysisContainerVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Analysis = *analysis.DataframeAnalysisContainerCaster() + + return r +} + +// Specifies `includes` and/or `excludes` patterns to select which fields +// will be included in the analysis. The patterns specified in `excludes` +// are applied last, therefore `excludes` takes precedence. In other words, +// if the same field is specified in both `includes` and `excludes`, then +// the field will not be included in the analysis. If `analyzed_fields` is +// not set, only the relevant fields will be included. For example, all the +// numeric fields for outlier detection. +// The supported fields vary for each type of analysis. Outlier detection +// requires numeric or `boolean` data to analyze. The algorithms don’t +// support missing values therefore fields that have data types other than +// numeric or boolean are ignored. Documents where included fields contain +// missing values, null values, or an array are also ignored. Therefore the +// `dest` index may contain documents that don’t have an outlier score. +// Regression supports fields that are numeric, `boolean`, `text`, +// `keyword`, and `ip` data types. It is also tolerant of missing values. +// Fields that are supported are included in the analysis, other fields are +// ignored. Documents where included fields contain an array with two or +// more values are also ignored. Documents in the `dest` index that don’t +// contain a results field are not included in the regression analysis. +// Classification supports fields that are numeric, `boolean`, `text`, +// `keyword`, and `ip` data types. It is also tolerant of missing values. +// Fields that are supported are included in the analysis, other fields are +// ignored. Documents where included fields contain an array with two or +// more values are also ignored. Documents in the `dest` index that don’t +// contain a results field are not included in the classification analysis. +// Classification analysis can be improved by mapping ordinal variable +// values to a single number. For example, in case of age ranges, you can +// model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. +// API name: analyzed_fields +func (r *PutDataFrameAnalytics) AnalyzedFields(analyzedfields types.DataframeAnalysisAnalyzedFieldsVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AnalyzedFields = analyzedfields.DataframeAnalysisAnalyzedFieldsCaster() + + return r +} + +// A description of the job. +// API name: description +func (r *PutDataFrameAnalytics) Description(description string) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// The destination configuration. +// API name: dest +func (r *PutDataFrameAnalytics) Dest(dest types.DataframeAnalyticsDestinationVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Dest = *dest.DataframeAnalyticsDestinationCaster() + + return r +} + +// API name: headers +func (r *PutDataFrameAnalytics) Headers(httpheaders types.HttpHeadersVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Headers = *httpheaders.HttpHeadersCaster() + + return r +} + +// The maximum number of threads to be used by the analysis. Using more +// threads may decrease the time necessary to complete the analysis at the +// cost of using more CPU. Note that the process may use additional threads +// for operational functionality other than the analysis itself. +// API name: max_num_threads +func (r *PutDataFrameAnalytics) MaxNumThreads(maxnumthreads int) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxNumThreads = &maxnumthreads + + return r +} + +// API name: _meta +func (r *PutDataFrameAnalytics) Meta_(metadata types.MetadataVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() + + return r +} + +// The approximate maximum amount of memory resources that are permitted for +// analytical processing. If your `elasticsearch.yml` file contains an +// `xpack.ml.max_model_memory_limit` setting, an error occurs when you try +// to create data frame analytics jobs that have `model_memory_limit` values +// greater than that setting. +// API name: model_memory_limit +func (r *PutDataFrameAnalytics) ModelMemoryLimit(modelmemorylimit string) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ModelMemoryLimit = &modelmemorylimit + + return r +} + +// The configuration of how to source the analysis data. +// API name: source +func (r *PutDataFrameAnalytics) Source(source types.DataframeAnalyticsSourceVariant) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source = *source.DataframeAnalyticsSourceCaster() + + return r +} + +// API name: version +func (r *PutDataFrameAnalytics) Version(versionstring string) *PutDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Version = &versionstring + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdataframeanalytics/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdataframeanalytics/request.go new file mode 100644 index 000000000..53f66d8f6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdataframeanalytics/request.go @@ -0,0 +1,228 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putdataframeanalytics + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putdataframeanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_data_frame_analytics/MlPutDataFrameAnalyticsRequest.ts#L30-L155 +type Request struct { + + // AllowLazyStart Specifies whether this job can start when there is insufficient machine + // learning node capacity for it to be immediately assigned to a node. If + // set to `false` and a machine learning node with capacity to run the job + // cannot be immediately found, the API returns an error. If set to `true`, + // the API does not return an error; the job waits in the `starting` state + // until sufficient machine learning node capacity is available. This + // behavior is also affected by the cluster-wide + // `xpack.ml.max_lazy_ml_nodes` setting. + AllowLazyStart *bool `json:"allow_lazy_start,omitempty"` + // Analysis The analysis configuration, which contains the information necessary to + // perform one of the following types of analysis: classification, outlier + // detection, or regression. + Analysis types.DataframeAnalysisContainer `json:"analysis"` + // AnalyzedFields Specifies `includes` and/or `excludes` patterns to select which fields + // will be included in the analysis. The patterns specified in `excludes` + // are applied last, therefore `excludes` takes precedence. In other words, + // if the same field is specified in both `includes` and `excludes`, then + // the field will not be included in the analysis. If `analyzed_fields` is + // not set, only the relevant fields will be included. For example, all the + // numeric fields for outlier detection. + // The supported fields vary for each type of analysis. Outlier detection + // requires numeric or `boolean` data to analyze. The algorithms don’t + // support missing values therefore fields that have data types other than + // numeric or boolean are ignored. Documents where included fields contain + // missing values, null values, or an array are also ignored. Therefore the + // `dest` index may contain documents that don’t have an outlier score. + // Regression supports fields that are numeric, `boolean`, `text`, + // `keyword`, and `ip` data types. It is also tolerant of missing values. + // Fields that are supported are included in the analysis, other fields are + // ignored. Documents where included fields contain an array with two or + // more values are also ignored. Documents in the `dest` index that don’t + // contain a results field are not included in the regression analysis. + // Classification supports fields that are numeric, `boolean`, `text`, + // `keyword`, and `ip` data types. It is also tolerant of missing values. + // Fields that are supported are included in the analysis, other fields are + // ignored. Documents where included fields contain an array with two or + // more values are also ignored. Documents in the `dest` index that don’t + // contain a results field are not included in the classification analysis. + // Classification analysis can be improved by mapping ordinal variable + // values to a single number. For example, in case of age ranges, you can + // model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. + AnalyzedFields *types.DataframeAnalysisAnalyzedFields `json:"analyzed_fields,omitempty"` + // Description A description of the job. + Description *string `json:"description,omitempty"` + // Dest The destination configuration. + Dest types.DataframeAnalyticsDestination `json:"dest"` + Headers types.HttpHeaders `json:"headers,omitempty"` + // MaxNumThreads The maximum number of threads to be used by the analysis. Using more + // threads may decrease the time necessary to complete the analysis at the + // cost of using more CPU. Note that the process may use additional threads + // for operational functionality other than the analysis itself. + MaxNumThreads *int `json:"max_num_threads,omitempty"` + Meta_ types.Metadata `json:"_meta,omitempty"` + // ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for + // analytical processing. If your `elasticsearch.yml` file contains an + // `xpack.ml.max_model_memory_limit` setting, an error occurs when you try + // to create data frame analytics jobs that have `model_memory_limit` values + // greater than that setting. + ModelMemoryLimit *string `json:"model_memory_limit,omitempty"` + // Source The configuration of how to source the analysis data. + Source types.DataframeAnalyticsSource `json:"source"` + Version *string `json:"version,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putdataframeanalytics request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_lazy_start": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowLazyStart", err) + } + s.AllowLazyStart = &value + case bool: + s.AllowLazyStart = &v + } + + case "analysis": + if err := dec.Decode(&s.Analysis); err != nil { + return fmt.Errorf("%s | %w", "Analysis", err) + } + + case "analyzed_fields": + if err := dec.Decode(&s.AnalyzedFields); err != nil { + return fmt.Errorf("%s | %w", "AnalyzedFields", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "dest": + if err := dec.Decode(&s.Dest); err != nil { + return fmt.Errorf("%s | %w", "Dest", err) + } + + case "headers": + if err := dec.Decode(&s.Headers); err != nil { + return fmt.Errorf("%s | %w", "Headers", err) + } + + case "max_num_threads": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNumThreads", err) + } + s.MaxNumThreads = &value + case float64: + f := int(v) + s.MaxNumThreads = &f + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "model_memory_limit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelMemoryLimit", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelMemoryLimit = &o + + case "source": + if err := dec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdataframeanalytics/response.go new file mode 100644 index 000000000..78cd40a76 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdataframeanalytics/response.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putdataframeanalytics + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package putdataframeanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_data_frame_analytics/MlPutDataFrameAnalyticsResponse.ts#L31-L47 +type Response struct { + AllowLazyStart bool `json:"allow_lazy_start"` + Analysis types.DataframeAnalysisContainer `json:"analysis"` + AnalyzedFields *types.DataframeAnalysisAnalyzedFields `json:"analyzed_fields,omitempty"` + Authorization *types.DataframeAnalyticsAuthorization `json:"authorization,omitempty"` + CreateTime int64 `json:"create_time"` + Description *string `json:"description,omitempty"` + Dest types.DataframeAnalyticsDestination `json:"dest"` + Id string `json:"id"` + MaxNumThreads int `json:"max_num_threads"` + Meta_ types.Metadata `json:"_meta,omitempty"` + ModelMemoryLimit string `json:"model_memory_limit"` + Source types.DataframeAnalyticsSource `json:"source"` + Version string `json:"version"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putfilter/put_filter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putfilter/put_filter.go new file mode 100644 index 000000000..275bc4bae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putfilter/put_filter.go @@ -0,0 +1,396 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a filter. +// A filter contains a list of strings. It can be used by one or more anomaly +// detection jobs. +// Specifically, filters are referenced in the `custom_rules` property of +// detector configuration objects. +package putfilter + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + filteridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutFilter struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + filterid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutFilter type alias for index. +type NewPutFilter func(filterid string) *PutFilter + +// NewPutFilterFunc returns a new instance of PutFilter with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutFilterFunc(tp elastictransport.Interface) NewPutFilter { + return func(filterid string) *PutFilter { + n := New(tp) + + n._filterid(filterid) + + return n + } +} + +// Create a filter. +// A filter contains a list of strings. It can be used by one or more anomaly +// detection jobs. +// Specifically, filters are referenced in the `custom_rules` property of +// detector configuration objects. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter +func New(tp elastictransport.Interface) *PutFilter { + r := &PutFilter{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutFilter) Raw(raw io.Reader) *PutFilter { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutFilter) Request(req *Request) *PutFilter { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutFilter) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutFilter: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == filteridMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("filters") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "filterid", r.filterid) + } + path.WriteString(r.filterid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutFilter) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.put_filter") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_filter") + if reader := instrument.RecordRequestBody(ctx, "ml.put_filter", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_filter") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutFilter query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putfilter.Response +func (r PutFilter) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_filter") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutFilter headers map. +func (r *PutFilter) Header(key, value string) *PutFilter { + r.headers.Set(key, value) + + return r +} + +// FilterId A string that uniquely identifies a filter. +// API Name: filterid +func (r *PutFilter) _filterid(filterid string) *PutFilter { + r.paramSet |= filteridMask + r.filterid = filterid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutFilter) ErrorTrace(errortrace bool) *PutFilter { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutFilter) FilterPath(filterpaths ...string) *PutFilter { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutFilter) Human(human bool) *PutFilter { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutFilter) Pretty(pretty bool) *PutFilter { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A description of the filter. +// API name: description +func (r *PutFilter) Description(description string) *PutFilter { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// The items of the filter. A wildcard `*` can be used at the beginning or the +// end of an item. +// Up to 10000 items are allowed in each filter. +// API name: items +func (r *PutFilter) Items(items ...string) *PutFilter { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range items { + + r.req.Items = append(r.req.Items, v) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putfilter/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putfilter/request.go new file mode 100644 index 000000000..491ead6e6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putfilter/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putfilter + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package putfilter +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_filter/MlPutFilterRequest.ts#L23-L58 +type Request struct { + + // Description A description of the filter. + Description *string `json:"description,omitempty"` + // Items The items of the filter. A wildcard `*` can be used at the beginning or the + // end of an item. + // Up to 10000 items are allowed in each filter. + Items []string `json:"items,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putfilter request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putfilter/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putfilter/response.go new file mode 100644 index 000000000..5ee23ff48 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putfilter/response.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putfilter + +// Response holds the response body struct for the package putfilter +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_filter/MlPutFilterResponse.ts#L22-L28 +type Response struct { + Description string `json:"description"` + FilterId string `json:"filter_id"` + Items []string `json:"items"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putjob/put_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putjob/put_job.go new file mode 100644 index 000000000..dfc46fe10 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putjob/put_job.go @@ -0,0 +1,658 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an anomaly detection job. +// +// If you include a `datafeed_config`, you must have read index privileges on +// the source index. +// If you include a `datafeed_config` but do not provide a query, the datafeed +// uses `{"match_all": {"boost": 1}}`. +package putjob + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutJob struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutJob type alias for index. +type NewPutJob func(jobid string) *PutJob + +// NewPutJobFunc returns a new instance of PutJob with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutJobFunc(tp elastictransport.Interface) NewPutJob { + return func(jobid string) *PutJob { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Create an anomaly detection job. +// +// If you include a `datafeed_config`, you must have read index privileges on +// the source index. +// If you include a `datafeed_config` but do not provide a query, the datafeed +// uses `{"match_all": {"boost": 1}}`. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job +func New(tp elastictransport.Interface) *PutJob { + r := &PutJob{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutJob) Raw(raw io.Reader) *PutJob { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutJob) Request(req *Request) *PutJob { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutJob) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutJob: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutJob) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.put_job") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_job") + if reader := instrument.RecordRequestBody(ctx, "ml.put_job", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_job") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutJob query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putjob.Response +func (r PutJob) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutJob headers map. +func (r *PutJob) Header(key, value string) *PutJob { + r.headers.Set(key, value) + + return r +} + +// JobId The identifier for the anomaly detection job. This identifier can contain +// lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It +// must start and end with alphanumeric characters. +// API Name: jobid +func (r *PutJob) _jobid(jobid string) *PutJob { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// AllowNoIndices If `true`, wildcard indices expressions that resolve into no concrete indices +// are ignored. This includes the +// `_all` string or when no indices are specified. +// API name: allow_no_indices +func (r *PutJob) AllowNoIndices(allownoindices bool) *PutJob { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. If the request can target +// data streams, this argument determines +// whether wildcard expressions match hidden data streams. Supports +// comma-separated values. +// API name: expand_wildcards +func (r *PutJob) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *PutJob { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreThrottled If `true`, concrete, expanded or aliased indices are ignored when frozen. +// API name: ignore_throttled +func (r *PutJob) IgnoreThrottled(ignorethrottled bool) *PutJob { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// IgnoreUnavailable If `true`, unavailable indices (missing or closed) are ignored. +// API name: ignore_unavailable +func (r *PutJob) IgnoreUnavailable(ignoreunavailable bool) *PutJob { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutJob) ErrorTrace(errortrace bool) *PutJob { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutJob) FilterPath(filterpaths ...string) *PutJob { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutJob) Human(human bool) *PutJob { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutJob) Pretty(pretty bool) *PutJob { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Advanced configuration option. Specifies whether this job can open when there +// is insufficient machine learning node capacity for it to be immediately +// assigned to a node. By default, if a machine learning node with capacity to +// run the job cannot immediately be found, the open anomaly detection jobs API +// returns an error. However, this is also subject to the cluster-wide +// `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open +// anomaly detection jobs API does not return an error and the job waits in the +// opening state until sufficient machine learning node capacity is available. +// API name: allow_lazy_open +func (r *PutJob) AllowLazyOpen(allowlazyopen bool) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowLazyOpen = &allowlazyopen + + return r +} + +// Specifies how to analyze the data. After you create a job, you cannot change +// the analysis configuration; all the properties are informational. +// API name: analysis_config +func (r *PutJob) AnalysisConfig(analysisconfig types.AnalysisConfigVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AnalysisConfig = *analysisconfig.AnalysisConfigCaster() + + return r +} + +// Limits can be applied for the resources required to hold the mathematical +// models in memory. These limits are approximate and can be set per job. They +// do not control the memory used by other processes, for example the +// Elasticsearch Java processes. +// API name: analysis_limits +func (r *PutJob) AnalysisLimits(analysislimits types.AnalysisLimitsVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AnalysisLimits = analysislimits.AnalysisLimitsCaster() + + return r +} + +// Advanced configuration option. The time between each periodic persistence of +// the model. The default value is a randomized value between 3 to 4 hours, +// which avoids all jobs persisting at exactly the same time. The smallest +// allowed value is 1 hour. For very large models (several GB), persistence +// could take 10-20 minutes, so do not set the `background_persist_interval` +// value too low. +// API name: background_persist_interval +func (r *PutJob) BackgroundPersistInterval(duration types.DurationVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.BackgroundPersistInterval = *duration.DurationCaster() + + return r +} + +// Advanced configuration option. Contains custom meta data about the job. +// API name: custom_settings +func (r *PutJob) CustomSettings(customsettings json.RawMessage) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CustomSettings = customsettings + + return r +} + +// Advanced configuration option, which affects the automatic removal of old +// model snapshots for this job. It specifies a period of time (in days) after +// which only the first snapshot per day is retained. This period is relative to +// the timestamp of the most recent snapshot for this job. Valid values range +// from 0 to `model_snapshot_retention_days`. +// API name: daily_model_snapshot_retention_after_days +func (r *PutJob) DailyModelSnapshotRetentionAfterDays(dailymodelsnapshotretentionafterdays int64) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DailyModelSnapshotRetentionAfterDays = &dailymodelsnapshotretentionafterdays + + return r +} + +// Defines the format of the input data when you send data to the job by using +// the post data API. Note that when configure a datafeed, these properties are +// automatically set. When data is received via the post data API, it is not +// stored in Elasticsearch. Only the results for anomaly detection are retained. +// API name: data_description +func (r *PutJob) DataDescription(datadescription types.DataDescriptionVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DataDescription = *datadescription.DataDescriptionCaster() + + return r +} + +// Defines a datafeed for the anomaly detection job. If Elasticsearch security +// features are enabled, your datafeed remembers which roles the user who +// created it had at the time of creation and runs the query using those same +// roles. If you provide secondary authorization headers, those credentials are +// used instead. +// API name: datafeed_config +func (r *PutJob) DatafeedConfig(datafeedconfig types.DatafeedConfigVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DatafeedConfig = datafeedconfig.DatafeedConfigCaster() + + return r +} + +// A description of the job. +// API name: description +func (r *PutJob) Description(description string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// A list of job groups. A job can belong to no groups or many. +// API name: groups +func (r *PutJob) Groups(groups ...string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range groups { + + r.req.Groups = append(r.req.Groups, v) + + } + return r +} + +// This advanced configuration option stores model information along with the +// results. It provides a more detailed view into anomaly detection. If you +// enable model plot it can add considerable overhead to the performance of the +// system; it is not feasible for jobs with many entities. Model plot provides a +// simplified and indicative view of the model and its bounds. It does not +// display complex features such as multivariate correlations or multimodal +// data. As such, anomalies may occasionally be reported which cannot be seen in +// the model plot. Model plot config can be configured when the job is created +// or updated later. It must be disabled if performance issues are experienced. +// API name: model_plot_config +func (r *PutJob) ModelPlotConfig(modelplotconfig types.ModelPlotConfigVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ModelPlotConfig = modelplotconfig.ModelPlotConfigCaster() + + return r +} + +// Advanced configuration option, which affects the automatic removal of old +// model snapshots for this job. It specifies the maximum period of time (in +// days) that snapshots are retained. This period is relative to the timestamp +// of the most recent snapshot for this job. By default, snapshots ten days +// older than the newest snapshot are deleted. +// API name: model_snapshot_retention_days +func (r *PutJob) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ModelSnapshotRetentionDays = &modelsnapshotretentiondays + + return r +} + +// Advanced configuration option. The period over which adjustments to the score +// are applied, as new data is seen. The default value is the longer of 30 days +// or 100 bucket spans. +// API name: renormalization_window_days +func (r *PutJob) RenormalizationWindowDays(renormalizationwindowdays int64) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RenormalizationWindowDays = &renormalizationwindowdays + + return r +} + +// A text string that affects the name of the machine learning results index. By +// default, the job generates an index named `.ml-anomalies-shared`. +// API name: results_index_name +func (r *PutJob) ResultsIndexName(indexname string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ResultsIndexName = &indexname + + return r +} + +// Advanced configuration option. The period of time (in days) that results are +// retained. Age is calculated relative to the timestamp of the latest bucket +// result. If this property has a non-null value, once per day at 00:30 (server +// time), results that are the specified number of days older than the latest +// bucket result are deleted from Elasticsearch. The default value is null, +// which means all results are retained. Annotations generated by the system +// also count as results for retention purposes; they are deleted after the same +// number of days as results. Annotations added by users are retained forever. +// API name: results_retention_days +func (r *PutJob) ResultsRetentionDays(resultsretentiondays int64) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ResultsRetentionDays = &resultsretentiondays + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putjob/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putjob/request.go new file mode 100644 index 000000000..94182f716 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putjob/request.go @@ -0,0 +1,296 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putjob + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putjob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_job/MlPutJobRequest.ts#L30-L151 +type Request struct { + + // AllowLazyOpen Advanced configuration option. Specifies whether this job can open when there + // is insufficient machine learning node capacity for it to be immediately + // assigned to a node. By default, if a machine learning node with capacity to + // run the job cannot immediately be found, the open anomaly detection jobs API + // returns an error. However, this is also subject to the cluster-wide + // `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open + // anomaly detection jobs API does not return an error and the job waits in the + // opening state until sufficient machine learning node capacity is available. + AllowLazyOpen *bool `json:"allow_lazy_open,omitempty"` + // AnalysisConfig Specifies how to analyze the data. After you create a job, you cannot change + // the analysis configuration; all the properties are informational. + AnalysisConfig types.AnalysisConfig `json:"analysis_config"` + // AnalysisLimits Limits can be applied for the resources required to hold the mathematical + // models in memory. These limits are approximate and can be set per job. They + // do not control the memory used by other processes, for example the + // Elasticsearch Java processes. + AnalysisLimits *types.AnalysisLimits `json:"analysis_limits,omitempty"` + // BackgroundPersistInterval Advanced configuration option. The time between each periodic persistence of + // the model. The default value is a randomized value between 3 to 4 hours, + // which avoids all jobs persisting at exactly the same time. The smallest + // allowed value is 1 hour. For very large models (several GB), persistence + // could take 10-20 minutes, so do not set the `background_persist_interval` + // value too low. + BackgroundPersistInterval types.Duration `json:"background_persist_interval,omitempty"` + // CustomSettings Advanced configuration option. Contains custom meta data about the job. + CustomSettings json.RawMessage `json:"custom_settings,omitempty"` + // DailyModelSnapshotRetentionAfterDays Advanced configuration option, which affects the automatic removal of old + // model snapshots for this job. It specifies a period of time (in days) after + // which only the first snapshot per day is retained. This period is relative to + // the timestamp of the most recent snapshot for this job. Valid values range + // from 0 to `model_snapshot_retention_days`. + DailyModelSnapshotRetentionAfterDays *int64 `json:"daily_model_snapshot_retention_after_days,omitempty"` + // DataDescription Defines the format of the input data when you send data to the job by using + // the post data API. Note that when configure a datafeed, these properties are + // automatically set. When data is received via the post data API, it is not + // stored in Elasticsearch. Only the results for anomaly detection are retained. + DataDescription types.DataDescription `json:"data_description"` + // DatafeedConfig Defines a datafeed for the anomaly detection job. If Elasticsearch security + // features are enabled, your datafeed remembers which roles the user who + // created it had at the time of creation and runs the query using those same + // roles. If you provide secondary authorization headers, those credentials are + // used instead. + DatafeedConfig *types.DatafeedConfig `json:"datafeed_config,omitempty"` + // Description A description of the job. + Description *string `json:"description,omitempty"` + // Groups A list of job groups. A job can belong to no groups or many. + Groups []string `json:"groups,omitempty"` + // JobId The identifier for the anomaly detection job. This identifier can contain + // lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It + // must start and end with alphanumeric characters. + JobId *string `json:"job_id,omitempty"` + // ModelPlotConfig This advanced configuration option stores model information along with the + // results. It provides a more detailed view into anomaly detection. If you + // enable model plot it can add considerable overhead to the performance of the + // system; it is not feasible for jobs with many entities. Model plot provides a + // simplified and indicative view of the model and its bounds. It does not + // display complex features such as multivariate correlations or multimodal + // data. As such, anomalies may occasionally be reported which cannot be seen in + // the model plot. Model plot config can be configured when the job is created + // or updated later. It must be disabled if performance issues are experienced. + ModelPlotConfig *types.ModelPlotConfig `json:"model_plot_config,omitempty"` + // ModelSnapshotRetentionDays Advanced configuration option, which affects the automatic removal of old + // model snapshots for this job. It specifies the maximum period of time (in + // days) that snapshots are retained. This period is relative to the timestamp + // of the most recent snapshot for this job. By default, snapshots ten days + // older than the newest snapshot are deleted. + ModelSnapshotRetentionDays *int64 `json:"model_snapshot_retention_days,omitempty"` + // RenormalizationWindowDays Advanced configuration option. The period over which adjustments to the score + // are applied, as new data is seen. The default value is the longer of 30 days + // or 100 bucket spans. + RenormalizationWindowDays *int64 `json:"renormalization_window_days,omitempty"` + // ResultsIndexName A text string that affects the name of the machine learning results index. By + // default, the job generates an index named `.ml-anomalies-shared`. + ResultsIndexName *string `json:"results_index_name,omitempty"` + // ResultsRetentionDays Advanced configuration option. The period of time (in days) that results are + // retained. Age is calculated relative to the timestamp of the latest bucket + // result. If this property has a non-null value, once per day at 00:30 (server + // time), results that are the specified number of days older than the latest + // bucket result are deleted from Elasticsearch. The default value is null, + // which means all results are retained. Annotations generated by the system + // also count as results for retention purposes; they are deleted after the same + // number of days as results. Annotations added by users are retained forever. + ResultsRetentionDays *int64 `json:"results_retention_days,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putjob request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_lazy_open": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowLazyOpen", err) + } + s.AllowLazyOpen = &value + case bool: + s.AllowLazyOpen = &v + } + + case "analysis_config": + if err := dec.Decode(&s.AnalysisConfig); err != nil { + return fmt.Errorf("%s | %w", "AnalysisConfig", err) + } + + case "analysis_limits": + if err := dec.Decode(&s.AnalysisLimits); err != nil { + return fmt.Errorf("%s | %w", "AnalysisLimits", err) + } + + case "background_persist_interval": + if err := dec.Decode(&s.BackgroundPersistInterval); err != nil { + return fmt.Errorf("%s | %w", "BackgroundPersistInterval", err) + } + + case "custom_settings": + if err := dec.Decode(&s.CustomSettings); err != nil { + return fmt.Errorf("%s | %w", "CustomSettings", err) + } + + case "daily_model_snapshot_retention_after_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DailyModelSnapshotRetentionAfterDays", err) + } + s.DailyModelSnapshotRetentionAfterDays = &value + case float64: + f := int64(v) + s.DailyModelSnapshotRetentionAfterDays = &f + } + + case "data_description": + if err := dec.Decode(&s.DataDescription); err != nil { + return fmt.Errorf("%s | %w", "DataDescription", err) + } + + case "datafeed_config": + if err := dec.Decode(&s.DatafeedConfig); err != nil { + return fmt.Errorf("%s | %w", "DatafeedConfig", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "groups": + if err := dec.Decode(&s.Groups); err != nil { + return fmt.Errorf("%s | %w", "Groups", err) + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "model_plot_config": + if err := dec.Decode(&s.ModelPlotConfig); err != nil { + return fmt.Errorf("%s | %w", "ModelPlotConfig", err) + } + + case "model_snapshot_retention_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ModelSnapshotRetentionDays", err) + } + s.ModelSnapshotRetentionDays = &value + case float64: + f := int64(v) + s.ModelSnapshotRetentionDays = &f + } + + case "renormalization_window_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RenormalizationWindowDays", err) + } + s.RenormalizationWindowDays = &value + case float64: + f := int64(v) + s.RenormalizationWindowDays = &f + } + + case "results_index_name": + if err := dec.Decode(&s.ResultsIndexName); err != nil { + return fmt.Errorf("%s | %w", "ResultsIndexName", err) + } + + case "results_retention_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ResultsRetentionDays", err) + } + s.ResultsRetentionDays = &value + case float64: + f := int64(v) + s.ResultsRetentionDays = &f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putjob/response.go new file mode 100644 index 000000000..b22dabf63 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/putjob/response.go @@ -0,0 +1,260 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putjob + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package putjob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_job/MlPutJobResponse.ts#L29-L52 +type Response struct { + AllowLazyOpen bool `json:"allow_lazy_open"` + AnalysisConfig types.AnalysisConfigRead `json:"analysis_config"` + AnalysisLimits types.AnalysisLimits `json:"analysis_limits"` + BackgroundPersistInterval types.Duration `json:"background_persist_interval,omitempty"` + CreateTime types.DateTime `json:"create_time"` + CustomSettings json.RawMessage `json:"custom_settings,omitempty"` + DailyModelSnapshotRetentionAfterDays int64 `json:"daily_model_snapshot_retention_after_days"` + DataDescription types.DataDescription `json:"data_description"` + DatafeedConfig *types.MLDatafeed `json:"datafeed_config,omitempty"` + Description *string `json:"description,omitempty"` + Groups []string `json:"groups,omitempty"` + JobId string `json:"job_id"` + JobType string `json:"job_type"` + JobVersion string `json:"job_version"` + ModelPlotConfig *types.ModelPlotConfig `json:"model_plot_config,omitempty"` + ModelSnapshotId *string `json:"model_snapshot_id,omitempty"` + ModelSnapshotRetentionDays int64 `json:"model_snapshot_retention_days"` + RenormalizationWindowDays *int64 `json:"renormalization_window_days,omitempty"` + ResultsIndexName string `json:"results_index_name"` + ResultsRetentionDays *int64 `json:"results_retention_days,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_lazy_open": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowLazyOpen", err) + } + s.AllowLazyOpen = value + case bool: + s.AllowLazyOpen = v + } + + case "analysis_config": + if err := dec.Decode(&s.AnalysisConfig); err != nil { + return fmt.Errorf("%s | %w", "AnalysisConfig", err) + } + + case "analysis_limits": + if err := dec.Decode(&s.AnalysisLimits); err != nil { + return fmt.Errorf("%s | %w", "AnalysisLimits", err) + } + + case "background_persist_interval": + if err := dec.Decode(&s.BackgroundPersistInterval); err != nil { + return fmt.Errorf("%s | %w", "BackgroundPersistInterval", err) + } + + case "create_time": + if err := dec.Decode(&s.CreateTime); err != nil { + return fmt.Errorf("%s | %w", "CreateTime", err) + } + + case "custom_settings": + if err := dec.Decode(&s.CustomSettings); err != nil { + return fmt.Errorf("%s | %w", "CustomSettings", err) + } + + case "daily_model_snapshot_retention_after_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DailyModelSnapshotRetentionAfterDays", err) + } + s.DailyModelSnapshotRetentionAfterDays = value + case float64: + f := int64(v) + s.DailyModelSnapshotRetentionAfterDays = f + } + + case "data_description": + if err := dec.Decode(&s.DataDescription); err != nil { + return fmt.Errorf("%s | %w", "DataDescription", err) + } + + case "datafeed_config": + if err := dec.Decode(&s.DatafeedConfig); err != nil { + return fmt.Errorf("%s | %w", "DatafeedConfig", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "groups": + if err := dec.Decode(&s.Groups); err != nil { + return fmt.Errorf("%s | %w", "Groups", err) + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "job_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "JobType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.JobType = o + + case "job_version": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "JobVersion", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.JobVersion = o + + case "model_plot_config": + if err := dec.Decode(&s.ModelPlotConfig); err != nil { + return fmt.Errorf("%s | %w", "ModelPlotConfig", err) + } + + case "model_snapshot_id": + if err := dec.Decode(&s.ModelSnapshotId); err != nil { + return fmt.Errorf("%s | %w", "ModelSnapshotId", err) + } + + case "model_snapshot_retention_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ModelSnapshotRetentionDays", err) + } + s.ModelSnapshotRetentionDays = value + case float64: + f := int64(v) + s.ModelSnapshotRetentionDays = f + } + + case "renormalization_window_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RenormalizationWindowDays", err) + } + s.RenormalizationWindowDays = &value + case float64: + f := int64(v) + s.RenormalizationWindowDays = &f + } + + case "results_index_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsIndexName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsIndexName = o + + case "results_retention_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ResultsRetentionDays", err) + } + s.ResultsRetentionDays = &value + case float64: + f := int64(v) + s.ResultsRetentionDays = &f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodel/put_trained_model.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodel/put_trained_model.go new file mode 100644 index 000000000..cafe9d8ed --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodel/put_trained_model.go @@ -0,0 +1,552 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a trained model. +// Enable you to supply a trained model that is not created by data frame +// analytics. +package puttrainedmodel + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainedmodeltype" +) + +const ( + modelidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutTrainedModel struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + modelid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutTrainedModel type alias for index. +type NewPutTrainedModel func(modelid string) *PutTrainedModel + +// NewPutTrainedModelFunc returns a new instance of PutTrainedModel with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutTrainedModelFunc(tp elastictransport.Interface) NewPutTrainedModel { + return func(modelid string) *PutTrainedModel { + n := New(tp) + + n._modelid(modelid) + + return n + } +} + +// Create a trained model. +// Enable you to supply a trained model that is not created by data frame +// analytics. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model +func New(tp elastictransport.Interface) *PutTrainedModel { + r := &PutTrainedModel{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutTrainedModel) Raw(raw io.Reader) *PutTrainedModel { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutTrainedModel) Request(req *Request) *PutTrainedModel { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutTrainedModel) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutTrainedModel: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == modelidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelid", r.modelid) + } + path.WriteString(r.modelid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutTrainedModel) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.put_trained_model") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_trained_model") + if reader := instrument.RecordRequestBody(ctx, "ml.put_trained_model", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_trained_model") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutTrainedModel query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a puttrainedmodel.Response +func (r PutTrainedModel) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_trained_model") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutTrainedModel headers map. +func (r *PutTrainedModel) Header(key, value string) *PutTrainedModel { + r.headers.Set(key, value) + + return r +} + +// ModelId The unique identifier of the trained model. +// API Name: modelid +func (r *PutTrainedModel) _modelid(modelid string) *PutTrainedModel { + r.paramSet |= modelidMask + r.modelid = modelid + + return r +} + +// DeferDefinitionDecompression If set to `true` and a `compressed_definition` is provided, +// the request defers definition decompression and skips relevant +// validations. +// API name: defer_definition_decompression +func (r *PutTrainedModel) DeferDefinitionDecompression(deferdefinitiondecompression bool) *PutTrainedModel { + r.values.Set("defer_definition_decompression", strconv.FormatBool(deferdefinitiondecompression)) + + return r +} + +// WaitForCompletion Whether to wait for all child operations (e.g. model download) +// to complete. +// API name: wait_for_completion +func (r *PutTrainedModel) WaitForCompletion(waitforcompletion bool) *PutTrainedModel { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutTrainedModel) ErrorTrace(errortrace bool) *PutTrainedModel { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutTrainedModel) FilterPath(filterpaths ...string) *PutTrainedModel { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutTrainedModel) Human(human bool) *PutTrainedModel { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutTrainedModel) Pretty(pretty bool) *PutTrainedModel { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The compressed (GZipped and Base64 encoded) inference definition of the +// model. If compressed_definition is specified, then definition cannot be +// specified. +// API name: compressed_definition +func (r *PutTrainedModel) CompressedDefinition(compresseddefinition string) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.CompressedDefinition = &compresseddefinition + + return r +} + +// The inference definition for the model. If definition is specified, then +// compressed_definition cannot be specified. +// API name: definition +func (r *PutTrainedModel) Definition(definition types.DefinitionVariant) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Definition = definition.DefinitionCaster() + + return r +} + +// A human-readable description of the inference trained model. +// API name: description +func (r *PutTrainedModel) Description(description string) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// The default configuration for inference. This can be either a regression +// or classification configuration. It must match the underlying +// definition.trained_model's target_type. For pre-packaged models such as +// ELSER the config is not required. +// API name: inference_config +func (r *PutTrainedModel) InferenceConfig(inferenceconfig types.InferenceConfigCreateContainerVariant) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.InferenceConfig = inferenceconfig.InferenceConfigCreateContainerCaster() + + return r +} + +// The input field names for the model definition. +// API name: input +func (r *PutTrainedModel) Input(input types.InputVariant) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Input = input.InputCaster() + + return r +} + +// An object map that contains metadata about the model. +// API name: metadata +func (r *PutTrainedModel) Metadata(metadata any) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + switch casted := metadata.(type) { + case json.RawMessage: + r.req.Metadata = casted + default: + r.deferred = append(r.deferred, func(request *Request) error { + data, err := json.Marshal(metadata) + if err != nil { + return err + } + r.req.Metadata = data + return nil + }) + } + return r +} + +// The estimated memory usage in bytes to keep the trained model in memory. +// This property is supported only if defer_definition_decompression is true +// or the model definition is not supplied. +// API name: model_size_bytes +func (r *PutTrainedModel) ModelSizeBytes(modelsizebytes int64) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ModelSizeBytes = &modelsizebytes + + return r +} + +// The model type. +// API name: model_type +func (r *PutTrainedModel) ModelType(modeltype trainedmodeltype.TrainedModelType) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ModelType = &modeltype + return r +} + +// The platform architecture (if applicable) of the trained mode. If the model +// only works on one platform, because it is heavily optimized for a particular +// processor architecture and OS combination, then this field specifies which. +// The format of the string must match the platform identifiers used by +// Elasticsearch, +// so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, +// `darwin-aarch64`, +// or `windows-x86_64`. For portable models (those that work independent of +// processor +// architecture or OS features), leave this field unset. +// API name: platform_architecture +func (r *PutTrainedModel) PlatformArchitecture(platformarchitecture string) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PlatformArchitecture = &platformarchitecture + + return r +} + +// Optional prefix strings applied at inference +// API name: prefix_strings +func (r *PutTrainedModel) PrefixStrings(prefixstrings types.TrainedModelPrefixStringsVariant) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PrefixStrings = prefixstrings.TrainedModelPrefixStringsCaster() + + return r +} + +// An array of tags to organize the model. +// API name: tags +func (r *PutTrainedModel) Tags(tags ...string) *PutTrainedModel { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range tags { + + r.req.Tags = append(r.req.Tags, v) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodel/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodel/request.go new file mode 100644 index 000000000..40ab5be89 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodel/request.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package puttrainedmodel + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainedmodeltype" +) + +// Request holds the request body struct for the package puttrainedmodel +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model/MlPutTrainedModelRequest.ts#L31-L135 +type Request struct { + + // CompressedDefinition The compressed (GZipped and Base64 encoded) inference definition of the + // model. If compressed_definition is specified, then definition cannot be + // specified. + CompressedDefinition *string `json:"compressed_definition,omitempty"` + // Definition The inference definition for the model. If definition is specified, then + // compressed_definition cannot be specified. + Definition *types.Definition `json:"definition,omitempty"` + // Description A human-readable description of the inference trained model. + Description *string `json:"description,omitempty"` + // InferenceConfig The default configuration for inference. This can be either a regression + // or classification configuration. It must match the underlying + // definition.trained_model's target_type. For pre-packaged models such as + // ELSER the config is not required. + InferenceConfig *types.InferenceConfigCreateContainer `json:"inference_config,omitempty"` + // Input The input field names for the model definition. + Input *types.Input `json:"input,omitempty"` + // Metadata An object map that contains metadata about the model. + Metadata json.RawMessage `json:"metadata,omitempty"` + // ModelSizeBytes The estimated memory usage in bytes to keep the trained model in memory. + // This property is supported only if defer_definition_decompression is true + // or the model definition is not supplied. + ModelSizeBytes *int64 `json:"model_size_bytes,omitempty"` + // ModelType The model type. + ModelType *trainedmodeltype.TrainedModelType `json:"model_type,omitempty"` + // PlatformArchitecture The platform architecture (if applicable) of the trained mode. If the model + // only works on one platform, because it is heavily optimized for a particular + // processor architecture and OS combination, then this field specifies which. + // The format of the string must match the platform identifiers used by + // Elasticsearch, + // so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, + // `darwin-aarch64`, + // or `windows-x86_64`. For portable models (those that work independent of + // processor + // architecture or OS features), leave this field unset. + PlatformArchitecture *string `json:"platform_architecture,omitempty"` + // PrefixStrings Optional prefix strings applied at inference + PrefixStrings *types.TrainedModelPrefixStrings `json:"prefix_strings,omitempty"` + // Tags An array of tags to organize the model. + Tags []string `json:"tags,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Puttrainedmodel request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodel/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodel/response.go new file mode 100644 index 000000000..8c0db1aa2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodel/response.go @@ -0,0 +1,281 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package puttrainedmodel + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainedmodeltype" +) + +// Response holds the response body struct for the package puttrainedmodel +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model/MlPutTrainedModelResponse.ts#L22-L25 +type Response struct { + CompressedDefinition *string `json:"compressed_definition,omitempty"` + // CreateTime The time when the trained model was created. + CreateTime types.DateTime `json:"create_time,omitempty"` + // CreatedBy Information on the creator of the trained model. + CreatedBy *string `json:"created_by,omitempty"` + // DefaultFieldMap Any field map described in the inference configuration takes precedence. + DefaultFieldMap map[string]string `json:"default_field_map,omitempty"` + // Description The free-text description of the trained model. + Description *string `json:"description,omitempty"` + // EstimatedHeapMemoryUsageBytes The estimated heap usage in bytes to keep the trained model in memory. + EstimatedHeapMemoryUsageBytes *int `json:"estimated_heap_memory_usage_bytes,omitempty"` + // EstimatedOperations The estimated number of operations to use the trained model. + EstimatedOperations *int `json:"estimated_operations,omitempty"` + // FullyDefined True if the full model definition is present. + FullyDefined *bool `json:"fully_defined,omitempty"` + // InferenceConfig The default configuration for inference. This can be either a regression, + // classification, or one of the many NLP focused configurations. It must match + // the underlying definition.trained_model's target_type. For pre-packaged + // models such as ELSER the config is not required. + InferenceConfig *types.InferenceConfigCreateContainer `json:"inference_config,omitempty"` + // Input The input field names for the model definition. + Input types.TrainedModelConfigInput `json:"input"` + // LicenseLevel The license level of the trained model. + LicenseLevel *string `json:"license_level,omitempty"` + Location *types.TrainedModelLocation `json:"location,omitempty"` + // Metadata An object containing metadata about the trained model. For example, models + // created by data frame analytics contain analysis_config and input objects. + Metadata *types.TrainedModelConfigMetadata `json:"metadata,omitempty"` + // ModelId Identifier for the trained model. + ModelId string `json:"model_id"` + ModelPackage *types.ModelPackageConfig `json:"model_package,omitempty"` + ModelSizeBytes types.ByteSize `json:"model_size_bytes,omitempty"` + // ModelType The model type + ModelType *trainedmodeltype.TrainedModelType `json:"model_type,omitempty"` + PlatformArchitecture *string `json:"platform_architecture,omitempty"` + PrefixStrings *types.TrainedModelPrefixStrings `json:"prefix_strings,omitempty"` + // Tags A comma delimited string of tags. A trained model can have many tags, or + // none. + Tags []string `json:"tags"` + // Version The Elasticsearch version number in which the trained model was created. + Version *string `json:"version,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + DefaultFieldMap: make(map[string]string, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compressed_definition": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CompressedDefinition", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CompressedDefinition = &o + + case "create_time": + if err := dec.Decode(&s.CreateTime); err != nil { + return fmt.Errorf("%s | %w", "CreateTime", err) + } + + case "created_by": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CreatedBy", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreatedBy = &o + + case "default_field_map": + if s.DefaultFieldMap == nil { + s.DefaultFieldMap = make(map[string]string, 0) + } + if err := dec.Decode(&s.DefaultFieldMap); err != nil { + return fmt.Errorf("%s | %w", "DefaultFieldMap", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "estimated_heap_memory_usage_bytes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "EstimatedHeapMemoryUsageBytes", err) + } + s.EstimatedHeapMemoryUsageBytes = &value + case float64: + f := int(v) + s.EstimatedHeapMemoryUsageBytes = &f + } + + case "estimated_operations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "EstimatedOperations", err) + } + s.EstimatedOperations = &value + case float64: + f := int(v) + s.EstimatedOperations = &f + } + + case "fully_defined": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FullyDefined", err) + } + s.FullyDefined = &value + case bool: + s.FullyDefined = &v + } + + case "inference_config": + if err := dec.Decode(&s.InferenceConfig); err != nil { + return fmt.Errorf("%s | %w", "InferenceConfig", err) + } + + case "input": + if err := dec.Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + case "license_level": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LicenseLevel", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LicenseLevel = &o + + case "location": + if err := dec.Decode(&s.Location); err != nil { + return fmt.Errorf("%s | %w", "Location", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "model_id": + if err := dec.Decode(&s.ModelId); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + + case "model_package": + if err := dec.Decode(&s.ModelPackage); err != nil { + return fmt.Errorf("%s | %w", "ModelPackage", err) + } + + case "model_size_bytes": + if err := dec.Decode(&s.ModelSizeBytes); err != nil { + return fmt.Errorf("%s | %w", "ModelSizeBytes", err) + } + + case "model_type": + if err := dec.Decode(&s.ModelType); err != nil { + return fmt.Errorf("%s | %w", "ModelType", err) + } + + case "platform_architecture": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PlatformArchitecture", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PlatformArchitecture = &o + + case "prefix_strings": + if err := dec.Decode(&s.PrefixStrings); err != nil { + return fmt.Errorf("%s | %w", "PrefixStrings", err) + } + + case "tags": + if err := dec.Decode(&s.Tags); err != nil { + return fmt.Errorf("%s | %w", "Tags", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go new file mode 100644 index 000000000..ea67adddd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go @@ -0,0 +1,414 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a trained model alias. +// A trained model alias is a logical name used to reference a single trained +// model. +// You can use aliases instead of trained model identifiers to make it easier to +// reference your models. For example, you can use aliases in inference +// aggregations and processors. +// An alias must be unique and refer to only a single trained model. However, +// you can have multiple aliases for each trained model. +// If you use this API to update an alias such that it references a different +// trained model ID and the model uses a different type of data frame analytics, +// an error occurs. For example, this situation occurs if you have a trained +// model for regression analysis and a trained model for classification +// analysis; you cannot reassign an alias from one type of trained model to +// another. +// If you use this API to update an alias and there are very few input fields in +// common between the old and new trained models for the model alias, the API +// returns a warning. +package puttrainedmodelalias + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + modelaliasMask = iota + 1 + + modelidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutTrainedModelAlias struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + modelalias string + modelid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutTrainedModelAlias type alias for index. +type NewPutTrainedModelAlias func(modelid, modelalias string) *PutTrainedModelAlias + +// NewPutTrainedModelAliasFunc returns a new instance of PutTrainedModelAlias with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutTrainedModelAliasFunc(tp elastictransport.Interface) NewPutTrainedModelAlias { + return func(modelid, modelalias string) *PutTrainedModelAlias { + n := New(tp) + + n._modelalias(modelalias) + + n._modelid(modelid) + + return n + } +} + +// Create or update a trained model alias. +// A trained model alias is a logical name used to reference a single trained +// model. +// You can use aliases instead of trained model identifiers to make it easier to +// reference your models. For example, you can use aliases in inference +// aggregations and processors. +// An alias must be unique and refer to only a single trained model. However, +// you can have multiple aliases for each trained model. +// If you use this API to update an alias such that it references a different +// trained model ID and the model uses a different type of data frame analytics, +// an error occurs. For example, this situation occurs if you have a trained +// model for regression analysis and a trained model for classification +// analysis; you cannot reassign an alias from one type of trained model to +// another. +// If you use this API to update an alias and there are very few input fields in +// common between the old and new trained models for the model alias, the API +// returns a warning. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias +func New(tp elastictransport.Interface) *PutTrainedModelAlias { + r := &PutTrainedModelAlias{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutTrainedModelAlias) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == modelidMask|modelaliasMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelid", r.modelid) + } + path.WriteString(r.modelid) + path.WriteString("/") + path.WriteString("model_aliases") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelalias", r.modelalias) + } + path.WriteString(r.modelalias) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutTrainedModelAlias) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.put_trained_model_alias") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_trained_model_alias") + if reader := instrument.RecordRequestBody(ctx, "ml.put_trained_model_alias", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_trained_model_alias") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutTrainedModelAlias query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a puttrainedmodelalias.Response +func (r PutTrainedModelAlias) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_trained_model_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r PutTrainedModelAlias) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_trained_model_alias") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the PutTrainedModelAlias query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the PutTrainedModelAlias headers map. +func (r *PutTrainedModelAlias) Header(key, value string) *PutTrainedModelAlias { + r.headers.Set(key, value) + + return r +} + +// ModelAlias The alias to create or update. This value cannot end in numbers. +// API Name: modelalias +func (r *PutTrainedModelAlias) _modelalias(modelalias string) *PutTrainedModelAlias { + r.paramSet |= modelaliasMask + r.modelalias = modelalias + + return r +} + +// ModelId The identifier for the trained model that the alias refers to. +// API Name: modelid +func (r *PutTrainedModelAlias) _modelid(modelid string) *PutTrainedModelAlias { + r.paramSet |= modelidMask + r.modelid = modelid + + return r +} + +// Reassign Specifies whether the alias gets reassigned to the specified trained +// model if it is already assigned to a different model. If the alias is +// already assigned and this parameter is false, the API returns an error. +// API name: reassign +func (r *PutTrainedModelAlias) Reassign(reassign bool) *PutTrainedModelAlias { + r.values.Set("reassign", strconv.FormatBool(reassign)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutTrainedModelAlias) ErrorTrace(errortrace bool) *PutTrainedModelAlias { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutTrainedModelAlias) FilterPath(filterpaths ...string) *PutTrainedModelAlias { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutTrainedModelAlias) Human(human bool) *PutTrainedModelAlias { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutTrainedModelAlias) Pretty(pretty bool) *PutTrainedModelAlias { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelalias/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelalias/response.go new file mode 100644 index 000000000..d318b8c31 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelalias/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package puttrainedmodelalias + +// Response holds the response body struct for the package puttrainedmodelalias +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model_alias/MlPutTrainedModelAliasResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go new file mode 100644 index 000000000..4bae4ba92 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go @@ -0,0 +1,422 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create part of a trained model definition. +package puttrainedmodeldefinitionpart + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + modelidMask = iota + 1 + + partMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutTrainedModelDefinitionPart struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + modelid string + part string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutTrainedModelDefinitionPart type alias for index. +type NewPutTrainedModelDefinitionPart func(modelid, part string) *PutTrainedModelDefinitionPart + +// NewPutTrainedModelDefinitionPartFunc returns a new instance of PutTrainedModelDefinitionPart with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutTrainedModelDefinitionPartFunc(tp elastictransport.Interface) NewPutTrainedModelDefinitionPart { + return func(modelid, part string) *PutTrainedModelDefinitionPart { + n := New(tp) + + n._modelid(modelid) + + n._part(part) + + return n + } +} + +// Create part of a trained model definition. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part +func New(tp elastictransport.Interface) *PutTrainedModelDefinitionPart { + r := &PutTrainedModelDefinitionPart{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutTrainedModelDefinitionPart) Raw(raw io.Reader) *PutTrainedModelDefinitionPart { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutTrainedModelDefinitionPart) Request(req *Request) *PutTrainedModelDefinitionPart { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutTrainedModelDefinitionPart) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutTrainedModelDefinitionPart: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == modelidMask|partMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelid", r.modelid) + } + path.WriteString(r.modelid) + path.WriteString("/") + path.WriteString("definition") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "part", r.part) + } + path.WriteString(r.part) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutTrainedModelDefinitionPart) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.put_trained_model_definition_part") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_trained_model_definition_part") + if reader := instrument.RecordRequestBody(ctx, "ml.put_trained_model_definition_part", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_trained_model_definition_part") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutTrainedModelDefinitionPart query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a puttrainedmodeldefinitionpart.Response +func (r PutTrainedModelDefinitionPart) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_trained_model_definition_part") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutTrainedModelDefinitionPart headers map. +func (r *PutTrainedModelDefinitionPart) Header(key, value string) *PutTrainedModelDefinitionPart { + r.headers.Set(key, value) + + return r +} + +// ModelId The unique identifier of the trained model. +// API Name: modelid +func (r *PutTrainedModelDefinitionPart) _modelid(modelid string) *PutTrainedModelDefinitionPart { + r.paramSet |= modelidMask + r.modelid = modelid + + return r +} + +// Part The definition part number. When the definition is loaded for inference the +// definition parts are streamed in the +// order of their part number. The first part must be `0` and the final part +// must be `total_parts - 1`. +// API Name: part +func (r *PutTrainedModelDefinitionPart) _part(part string) *PutTrainedModelDefinitionPart { + r.paramSet |= partMask + r.part = part + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutTrainedModelDefinitionPart) ErrorTrace(errortrace bool) *PutTrainedModelDefinitionPart { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutTrainedModelDefinitionPart) FilterPath(filterpaths ...string) *PutTrainedModelDefinitionPart { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutTrainedModelDefinitionPart) Human(human bool) *PutTrainedModelDefinitionPart { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutTrainedModelDefinitionPart) Pretty(pretty bool) *PutTrainedModelDefinitionPart { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The definition part for the model. Must be a base64 encoded string. +// API name: definition +func (r *PutTrainedModelDefinitionPart) Definition(definition string) *PutTrainedModelDefinitionPart { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Definition = definition + + return r +} + +// The total uncompressed definition length in bytes. Not base64 encoded. +// API name: total_definition_length +func (r *PutTrainedModelDefinitionPart) TotalDefinitionLength(totaldefinitionlength int64) *PutTrainedModelDefinitionPart { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TotalDefinitionLength = totaldefinitionlength + + return r +} + +// The total number of parts that will be uploaded. Must be greater than 0. +// API name: total_parts +func (r *PutTrainedModelDefinitionPart) TotalParts(totalparts int) *PutTrainedModelDefinitionPart { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TotalParts = totalparts + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodeldefinitionpart/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodeldefinitionpart/request.go new file mode 100644 index 000000000..3cd6616a9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodeldefinitionpart/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package puttrainedmodeldefinitionpart + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package puttrainedmodeldefinitionpart +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartRequest.ts#L24-L65 +type Request struct { + + // Definition The definition part for the model. Must be a base64 encoded string. + Definition string `json:"definition"` + // TotalDefinitionLength The total uncompressed definition length in bytes. Not base64 encoded. + TotalDefinitionLength int64 `json:"total_definition_length"` + // TotalParts The total number of parts that will be uploaded. Must be greater than 0. + TotalParts int `json:"total_parts"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Puttrainedmodeldefinitionpart request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodeldefinitionpart/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodeldefinitionpart/response.go new file mode 100644 index 000000000..84c2b4d18 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodeldefinitionpart/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package puttrainedmodeldefinitionpart + +// Response holds the response body struct for the package puttrainedmodeldefinitionpart +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go new file mode 100644 index 000000000..74abcd873 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go @@ -0,0 +1,411 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a trained model vocabulary. +// This API is supported only for natural language processing (NLP) models. +// The vocabulary is stored in the index as described in +// `inference_config.*.vocabulary` of the trained model definition. +package puttrainedmodelvocabulary + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + modelidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutTrainedModelVocabulary struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + modelid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutTrainedModelVocabulary type alias for index. +type NewPutTrainedModelVocabulary func(modelid string) *PutTrainedModelVocabulary + +// NewPutTrainedModelVocabularyFunc returns a new instance of PutTrainedModelVocabulary with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutTrainedModelVocabularyFunc(tp elastictransport.Interface) NewPutTrainedModelVocabulary { + return func(modelid string) *PutTrainedModelVocabulary { + n := New(tp) + + n._modelid(modelid) + + return n + } +} + +// Create a trained model vocabulary. +// This API is supported only for natural language processing (NLP) models. +// The vocabulary is stored in the index as described in +// `inference_config.*.vocabulary` of the trained model definition. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary +func New(tp elastictransport.Interface) *PutTrainedModelVocabulary { + r := &PutTrainedModelVocabulary{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutTrainedModelVocabulary) Raw(raw io.Reader) *PutTrainedModelVocabulary { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutTrainedModelVocabulary) Request(req *Request) *PutTrainedModelVocabulary { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutTrainedModelVocabulary) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutTrainedModelVocabulary: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == modelidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelid", r.modelid) + } + path.WriteString(r.modelid) + path.WriteString("/") + path.WriteString("vocabulary") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutTrainedModelVocabulary) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.put_trained_model_vocabulary") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.put_trained_model_vocabulary") + if reader := instrument.RecordRequestBody(ctx, "ml.put_trained_model_vocabulary", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.put_trained_model_vocabulary") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutTrainedModelVocabulary query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a puttrainedmodelvocabulary.Response +func (r PutTrainedModelVocabulary) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.put_trained_model_vocabulary") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutTrainedModelVocabulary headers map. +func (r *PutTrainedModelVocabulary) Header(key, value string) *PutTrainedModelVocabulary { + r.headers.Set(key, value) + + return r +} + +// ModelId The unique identifier of the trained model. +// API Name: modelid +func (r *PutTrainedModelVocabulary) _modelid(modelid string) *PutTrainedModelVocabulary { + r.paramSet |= modelidMask + r.modelid = modelid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutTrainedModelVocabulary) ErrorTrace(errortrace bool) *PutTrainedModelVocabulary { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutTrainedModelVocabulary) FilterPath(filterpaths ...string) *PutTrainedModelVocabulary { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutTrainedModelVocabulary) Human(human bool) *PutTrainedModelVocabulary { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutTrainedModelVocabulary) Pretty(pretty bool) *PutTrainedModelVocabulary { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The optional model merges if required by the tokenizer. +// API name: merges +func (r *PutTrainedModelVocabulary) Merges(merges ...string) *PutTrainedModelVocabulary { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range merges { + + r.req.Merges = append(r.req.Merges, v) + + } + return r +} + +// The optional vocabulary value scores if required by the tokenizer. +// API name: scores +func (r *PutTrainedModelVocabulary) Scores(scores ...types.Float64) *PutTrainedModelVocabulary { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range scores { + + r.req.Scores = append(r.req.Scores, v) + + } + return r +} + +// The model vocabulary, which must not be empty. +// API name: vocabulary +func (r *PutTrainedModelVocabulary) Vocabulary(vocabularies ...string) *PutTrainedModelVocabulary { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range vocabularies { + + r.req.Vocabulary = append(r.req.Vocabulary, v) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelvocabulary/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelvocabulary/request.go new file mode 100644 index 000000000..d0997052b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelvocabulary/request.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package puttrainedmodelvocabulary + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package puttrainedmodelvocabulary +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyRequest.ts#L24-L68 +type Request struct { + + // Merges The optional model merges if required by the tokenizer. + Merges []string `json:"merges,omitempty"` + // Scores The optional vocabulary value scores if required by the tokenizer. + Scores []types.Float64 `json:"scores,omitempty"` + // Vocabulary The model vocabulary, which must not be empty. + Vocabulary []string `json:"vocabulary"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Puttrainedmodelvocabulary request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelvocabulary/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelvocabulary/response.go new file mode 100644 index 000000000..58d310524 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelvocabulary/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package puttrainedmodelvocabulary + +// Response holds the response body struct for the package puttrainedmodelvocabulary +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/resetjob/reset_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/resetjob/reset_job.go new file mode 100644 index 000000000..c82862705 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/resetjob/reset_job.go @@ -0,0 +1,374 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Reset an anomaly detection job. +// All model state and results are deleted. The job is ready to start over as if +// it had just been created. +// It is not currently possible to reset multiple jobs using wildcards or a +// comma separated list. +package resetjob + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ResetJob struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewResetJob type alias for index. +type NewResetJob func(jobid string) *ResetJob + +// NewResetJobFunc returns a new instance of ResetJob with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewResetJobFunc(tp elastictransport.Interface) NewResetJob { + return func(jobid string) *ResetJob { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Reset an anomaly detection job. +// All model state and results are deleted. The job is ready to start over as if +// it had just been created. +// It is not currently possible to reset multiple jobs using wildcards or a +// comma separated list. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job +func New(tp elastictransport.Interface) *ResetJob { + r := &ResetJob{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ResetJob) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("_reset") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ResetJob) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.reset_job") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.reset_job") + if reader := instrument.RecordRequestBody(ctx, "ml.reset_job", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.reset_job") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ResetJob query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a resetjob.Response +func (r ResetJob) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.reset_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ResetJob) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.reset_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ResetJob query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ResetJob headers map. +func (r *ResetJob) Header(key, value string) *ResetJob { + r.headers.Set(key, value) + + return r +} + +// JobId The ID of the job to reset. +// API Name: jobid +func (r *ResetJob) _jobid(jobid string) *ResetJob { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// WaitForCompletion Should this request wait until the operation has completed before +// returning. +// API name: wait_for_completion +func (r *ResetJob) WaitForCompletion(waitforcompletion bool) *ResetJob { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// DeleteUserAnnotations Specifies whether annotations that have been added by the +// user should be deleted along with any auto-generated annotations when the job +// is +// reset. +// API name: delete_user_annotations +func (r *ResetJob) DeleteUserAnnotations(deleteuserannotations bool) *ResetJob { + r.values.Set("delete_user_annotations", strconv.FormatBool(deleteuserannotations)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ResetJob) ErrorTrace(errortrace bool) *ResetJob { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ResetJob) FilterPath(filterpaths ...string) *ResetJob { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ResetJob) Human(human bool) *ResetJob { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ResetJob) Pretty(pretty bool) *ResetJob { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/resetjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/resetjob/response.go new file mode 100644 index 000000000..03c3de022 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/resetjob/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package resetjob + +// Response holds the response body struct for the package resetjob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/reset_job/MlResetJobResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/revertmodelsnapshot/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/revertmodelsnapshot/request.go new file mode 100644 index 000000000..fba132b59 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/revertmodelsnapshot/request.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package revertmodelsnapshot + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package revertmodelsnapshot +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/revert_model_snapshot/MlRevertModelSnapshotRequest.ts#L23-L77 +type Request struct { + + // DeleteInterveningResults Refer to the description for the `delete_intervening_results` query + // parameter. + DeleteInterveningResults *bool `json:"delete_intervening_results,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Revertmodelsnapshot request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/revertmodelsnapshot/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/revertmodelsnapshot/response.go new file mode 100644 index 000000000..aa82cd87c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/revertmodelsnapshot/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package revertmodelsnapshot + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package revertmodelsnapshot +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/revert_model_snapshot/MlRevertModelSnapshotResponse.ts#L22-L24 +type Response struct { + Model types.ModelSnapshot `json:"model"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go new file mode 100644 index 000000000..5ac1193b3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go @@ -0,0 +1,412 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Revert to a snapshot. +// The machine learning features react quickly to anomalous input, learning new +// behaviors in data. Highly anomalous input increases the variance in the +// models whilst the system learns whether this is a new step-change in behavior +// or a one-off event. In the case where this anomalous input is known to be a +// one-off, then it might be appropriate to reset the model state to a time +// before this event. For example, you might consider reverting to a saved +// snapshot after Black Friday or a critical system failure. +package revertmodelsnapshot + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 + + snapshotidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RevertModelSnapshot struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + snapshotid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRevertModelSnapshot type alias for index. +type NewRevertModelSnapshot func(jobid, snapshotid string) *RevertModelSnapshot + +// NewRevertModelSnapshotFunc returns a new instance of RevertModelSnapshot with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRevertModelSnapshotFunc(tp elastictransport.Interface) NewRevertModelSnapshot { + return func(jobid, snapshotid string) *RevertModelSnapshot { + n := New(tp) + + n._jobid(jobid) + + n._snapshotid(snapshotid) + + return n + } +} + +// Revert to a snapshot. +// The machine learning features react quickly to anomalous input, learning new +// behaviors in data. Highly anomalous input increases the variance in the +// models whilst the system learns whether this is a new step-change in behavior +// or a one-off event. In the case where this anomalous input is known to be a +// one-off, then it might be appropriate to reset the model state to a time +// before this event. For example, you might consider reverting to a saved +// snapshot after Black Friday or a critical system failure. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot +func New(tp elastictransport.Interface) *RevertModelSnapshot { + r := &RevertModelSnapshot{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *RevertModelSnapshot) Raw(raw io.Reader) *RevertModelSnapshot { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *RevertModelSnapshot) Request(req *Request) *RevertModelSnapshot { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RevertModelSnapshot) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for RevertModelSnapshot: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask|snapshotidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("model_snapshots") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshotid", r.snapshotid) + } + path.WriteString(r.snapshotid) + path.WriteString("/") + path.WriteString("_revert") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RevertModelSnapshot) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.revert_model_snapshot") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.revert_model_snapshot") + if reader := instrument.RecordRequestBody(ctx, "ml.revert_model_snapshot", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.revert_model_snapshot") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RevertModelSnapshot query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a revertmodelsnapshot.Response +func (r RevertModelSnapshot) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.revert_model_snapshot") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the RevertModelSnapshot headers map. +func (r *RevertModelSnapshot) Header(key, value string) *RevertModelSnapshot { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *RevertModelSnapshot) _jobid(jobid string) *RevertModelSnapshot { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// SnapshotId You can specify `empty` as the . Reverting to the empty +// snapshot means the anomaly detection job starts learning a new model from +// scratch when it is started. +// API Name: snapshotid +func (r *RevertModelSnapshot) _snapshotid(snapshotid string) *RevertModelSnapshot { + r.paramSet |= snapshotidMask + r.snapshotid = snapshotid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RevertModelSnapshot) ErrorTrace(errortrace bool) *RevertModelSnapshot { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RevertModelSnapshot) FilterPath(filterpaths ...string) *RevertModelSnapshot { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RevertModelSnapshot) Human(human bool) *RevertModelSnapshot { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RevertModelSnapshot) Pretty(pretty bool) *RevertModelSnapshot { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Refer to the description for the `delete_intervening_results` query +// parameter. +// API name: delete_intervening_results +func (r *RevertModelSnapshot) DeleteInterveningResults(deleteinterveningresults bool) *RevertModelSnapshot { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DeleteInterveningResults = &deleteinterveningresults + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/setupgrademode/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/setupgrademode/response.go new file mode 100644 index 000000000..6922b9e03 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/setupgrademode/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package setupgrademode + +// Response holds the response body struct for the package setupgrademode +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/set_upgrade_mode/MlSetUpgradeModeResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/setupgrademode/set_upgrade_mode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/setupgrademode/set_upgrade_mode.go new file mode 100644 index 000000000..77835fa24 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/setupgrademode/set_upgrade_mode.go @@ -0,0 +1,363 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Set upgrade_mode for ML indices. +// Sets a cluster wide upgrade_mode setting that prepares machine learning +// indices for an upgrade. +// When upgrading your cluster, in some circumstances you must restart your +// nodes and reindex your machine learning indices. In those circumstances, +// there must be no machine learning jobs running. You can close the machine +// learning jobs, do the upgrade, then open all the jobs again. Alternatively, +// you can use this API to temporarily halt tasks associated with the jobs and +// datafeeds and prevent new jobs from opening. You can also use this API +// during upgrades that do not require you to reindex your machine learning +// indices, though stopping jobs is not a requirement in that case. +// You can see the current value for the upgrade_mode setting by using the get +// machine learning info API. +package setupgrademode + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SetUpgradeMode struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSetUpgradeMode type alias for index. +type NewSetUpgradeMode func() *SetUpgradeMode + +// NewSetUpgradeModeFunc returns a new instance of SetUpgradeMode with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSetUpgradeModeFunc(tp elastictransport.Interface) NewSetUpgradeMode { + return func() *SetUpgradeMode { + n := New(tp) + + return n + } +} + +// Set upgrade_mode for ML indices. +// Sets a cluster wide upgrade_mode setting that prepares machine learning +// indices for an upgrade. +// When upgrading your cluster, in some circumstances you must restart your +// nodes and reindex your machine learning indices. In those circumstances, +// there must be no machine learning jobs running. You can close the machine +// learning jobs, do the upgrade, then open all the jobs again. Alternatively, +// you can use this API to temporarily halt tasks associated with the jobs and +// datafeeds and prevent new jobs from opening. You can also use this API +// during upgrades that do not require you to reindex your machine learning +// indices, though stopping jobs is not a requirement in that case. +// You can see the current value for the upgrade_mode setting by using the get +// machine learning info API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode +func New(tp elastictransport.Interface) *SetUpgradeMode { + r := &SetUpgradeMode{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SetUpgradeMode) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("set_upgrade_mode") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SetUpgradeMode) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.set_upgrade_mode") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.set_upgrade_mode") + if reader := instrument.RecordRequestBody(ctx, "ml.set_upgrade_mode", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.set_upgrade_mode") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SetUpgradeMode query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a setupgrademode.Response +func (r SetUpgradeMode) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.set_upgrade_mode") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r SetUpgradeMode) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.set_upgrade_mode") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the SetUpgradeMode query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the SetUpgradeMode headers map. +func (r *SetUpgradeMode) Header(key, value string) *SetUpgradeMode { + r.headers.Set(key, value) + + return r +} + +// Enabled When `true`, it enables `upgrade_mode` which temporarily halts all job +// and datafeed tasks and prohibits new job and datafeed tasks from +// starting. +// API name: enabled +func (r *SetUpgradeMode) Enabled(enabled bool) *SetUpgradeMode { + r.values.Set("enabled", strconv.FormatBool(enabled)) + + return r +} + +// Timeout The time to wait for the request to be completed. +// API name: timeout +func (r *SetUpgradeMode) Timeout(duration string) *SetUpgradeMode { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SetUpgradeMode) ErrorTrace(errortrace bool) *SetUpgradeMode { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SetUpgradeMode) FilterPath(filterpaths ...string) *SetUpgradeMode { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SetUpgradeMode) Human(human bool) *SetUpgradeMode { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SetUpgradeMode) Pretty(pretty bool) *SetUpgradeMode { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdatafeed/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdatafeed/request.go new file mode 100644 index 000000000..9755e2e55 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdatafeed/request.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package startdatafeed + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package startdatafeed +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/start_datafeed/MlStartDatafeedRequest.ts#L24-L99 +type Request struct { + + // End Refer to the description for the `end` query parameter. + End types.DateTime `json:"end,omitempty"` + // Start Refer to the description for the `start` query parameter. + Start types.DateTime `json:"start,omitempty"` + // Timeout Refer to the description for the `timeout` query parameter. + Timeout types.Duration `json:"timeout,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Startdatafeed request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "end": + if err := dec.Decode(&s.End); err != nil { + return fmt.Errorf("%s | %w", "End", err) + } + + case "start": + if err := dec.Decode(&s.Start); err != nil { + return fmt.Errorf("%s | %w", "Start", err) + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdatafeed/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdatafeed/response.go new file mode 100644 index 000000000..7b14fc90f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdatafeed/response.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package startdatafeed + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Response holds the response body struct for the package startdatafeed +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/start_datafeed/MlStartDatafeedResponse.ts#L22-L34 +type Response struct { + + // Node The ID of the node that the job was started on. In serverless this will be + // the "serverless". + // If the job is allowed to open lazily and has not yet been assigned to a node, + // this value is an empty string. + Node []string `json:"node"` + // Started For a successful response, this value is always `true`. On failure, an + // exception is returned instead. + Started bool `json:"started"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + s.Node = append(s.Node, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + } + + case "started": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Started", err) + } + s.Started = value + case bool: + s.Started = v + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdatafeed/start_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdatafeed/start_datafeed.go new file mode 100644 index 000000000..85e12d6cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdatafeed/start_datafeed.go @@ -0,0 +1,441 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Start datafeeds. +// +// A datafeed must be started in order to retrieve data from Elasticsearch. A +// datafeed can be started and stopped +// multiple times throughout its lifecycle. +// +// Before you can start a datafeed, the anomaly detection job must be open. +// Otherwise, an error occurs. +// +// If you restart a stopped datafeed, it continues processing input data from +// the next millisecond after it was stopped. +// If new data was indexed for that exact millisecond between stopping and +// starting, it will be ignored. +// +// When Elasticsearch security features are enabled, your datafeed remembers +// which roles the last user to create or +// update it had at the time of creation or update and runs the query using +// those same roles. If you provided secondary +// authorization headers when you created or updated the datafeed, those +// credentials are used instead. +package startdatafeed + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + datafeedidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type StartDatafeed struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + datafeedid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStartDatafeed type alias for index. +type NewStartDatafeed func(datafeedid string) *StartDatafeed + +// NewStartDatafeedFunc returns a new instance of StartDatafeed with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStartDatafeedFunc(tp elastictransport.Interface) NewStartDatafeed { + return func(datafeedid string) *StartDatafeed { + n := New(tp) + + n._datafeedid(datafeedid) + + return n + } +} + +// Start datafeeds. +// +// A datafeed must be started in order to retrieve data from Elasticsearch. A +// datafeed can be started and stopped +// multiple times throughout its lifecycle. +// +// Before you can start a datafeed, the anomaly detection job must be open. +// Otherwise, an error occurs. +// +// If you restart a stopped datafeed, it continues processing input data from +// the next millisecond after it was stopped. +// If new data was indexed for that exact millisecond between stopping and +// starting, it will be ignored. +// +// When Elasticsearch security features are enabled, your datafeed remembers +// which roles the last user to create or +// update it had at the time of creation or update and runs the query using +// those same roles. If you provided secondary +// authorization headers when you created or updated the datafeed, those +// credentials are used instead. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed +func New(tp elastictransport.Interface) *StartDatafeed { + r := &StartDatafeed{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *StartDatafeed) Raw(raw io.Reader) *StartDatafeed { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *StartDatafeed) Request(req *Request) *StartDatafeed { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *StartDatafeed) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for StartDatafeed: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == datafeedidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeedid", r.datafeedid) + } + path.WriteString(r.datafeedid) + path.WriteString("/") + path.WriteString("_start") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r StartDatafeed) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.start_datafeed") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.start_datafeed") + if reader := instrument.RecordRequestBody(ctx, "ml.start_datafeed", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.start_datafeed") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the StartDatafeed query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a startdatafeed.Response +func (r StartDatafeed) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.start_datafeed") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the StartDatafeed headers map. +func (r *StartDatafeed) Header(key, value string) *StartDatafeed { + r.headers.Set(key, value) + + return r +} + +// DatafeedId A numerical character string that uniquely identifies the datafeed. This +// identifier can contain lowercase +// alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must +// start and end with alphanumeric +// characters. +// API Name: datafeedid +func (r *StartDatafeed) _datafeedid(datafeedid string) *StartDatafeed { + r.paramSet |= datafeedidMask + r.datafeedid = datafeedid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *StartDatafeed) ErrorTrace(errortrace bool) *StartDatafeed { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *StartDatafeed) FilterPath(filterpaths ...string) *StartDatafeed { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *StartDatafeed) Human(human bool) *StartDatafeed { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *StartDatafeed) Pretty(pretty bool) *StartDatafeed { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Refer to the description for the `end` query parameter. +// API name: end +func (r *StartDatafeed) End(datetime types.DateTimeVariant) *StartDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.End = *datetime.DateTimeCaster() + + return r +} + +// Refer to the description for the `start` query parameter. +// API name: start +func (r *StartDatafeed) Start(datetime types.DateTimeVariant) *StartDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Start = *datetime.DateTimeCaster() + + return r +} + +// Refer to the description for the `timeout` query parameter. +// API name: timeout +func (r *StartDatafeed) Timeout(duration types.DurationVariant) *StartDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdataframeanalytics/response.go new file mode 100644 index 000000000..9769590a8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdataframeanalytics/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package startdataframeanalytics + +// Response holds the response body struct for the package startdataframeanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/start_data_frame_analytics/MlStartDataFrameAnalyticsResponse.ts#L22-L34 +type Response struct { + Acknowledged bool `json:"acknowledged"` + // Node The ID of the node that the job was started on. If the job is allowed to open + // lazily and has not yet been assigned to a node, this value is an empty + // string. + // The node ID of the node the job has been assigned to, or + // an empty string if it hasn't been assigned to a node. In + // serverless if the job has been assigned to run then the + // node ID will be "serverless". + Node string `json:"node"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go new file mode 100644 index 000000000..4e9c4d367 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go @@ -0,0 +1,387 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Start a data frame analytics job. +// A data frame analytics job can be started and stopped multiple times +// throughout its lifecycle. +// If the destination index does not exist, it is created automatically the +// first time you start the data frame analytics job. The +// `index.number_of_shards` and `index.number_of_replicas` settings for the +// destination index are copied from the source index. If there are multiple +// source indices, the destination index copies the highest setting values. The +// mappings for the destination index are also copied from the source indices. +// If there are any mapping conflicts, the job fails to start. +// If the destination index exists, it is used as is. You can therefore set up +// the destination index in advance with custom settings and mappings. +package startdataframeanalytics + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type StartDataFrameAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStartDataFrameAnalytics type alias for index. +type NewStartDataFrameAnalytics func(id string) *StartDataFrameAnalytics + +// NewStartDataFrameAnalyticsFunc returns a new instance of StartDataFrameAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStartDataFrameAnalyticsFunc(tp elastictransport.Interface) NewStartDataFrameAnalytics { + return func(id string) *StartDataFrameAnalytics { + n := New(tp) + + n._id(id) + + return n + } +} + +// Start a data frame analytics job. +// A data frame analytics job can be started and stopped multiple times +// throughout its lifecycle. +// If the destination index does not exist, it is created automatically the +// first time you start the data frame analytics job. The +// `index.number_of_shards` and `index.number_of_replicas` settings for the +// destination index are copied from the source index. If there are multiple +// source indices, the destination index copies the highest setting values. The +// mappings for the destination index are also copied from the source indices. +// If there are any mapping conflicts, the job fails to start. +// If the destination index exists, it is used as is. You can therefore set up +// the destination index in advance with custom settings and mappings. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics +func New(tp elastictransport.Interface) *StartDataFrameAnalytics { + r := &StartDataFrameAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *StartDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + path.WriteString("/") + path.WriteString("_start") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r StartDataFrameAnalytics) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.start_data_frame_analytics") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.start_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "ml.start_data_frame_analytics", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.start_data_frame_analytics") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the StartDataFrameAnalytics query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a startdataframeanalytics.Response +func (r StartDataFrameAnalytics) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.start_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r StartDataFrameAnalytics) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.start_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the StartDataFrameAnalytics query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the StartDataFrameAnalytics headers map. +func (r *StartDataFrameAnalytics) Header(key, value string) *StartDataFrameAnalytics { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the data frame analytics job. This identifier can contain +// lowercase alphanumeric characters (a-z and 0-9), hyphens, and +// underscores. It must start and end with alphanumeric characters. +// API Name: id +func (r *StartDataFrameAnalytics) _id(id string) *StartDataFrameAnalytics { + r.paramSet |= idMask + r.id = id + + return r +} + +// Timeout Controls the amount of time to wait until the data frame analytics job +// starts. +// API name: timeout +func (r *StartDataFrameAnalytics) Timeout(duration string) *StartDataFrameAnalytics { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *StartDataFrameAnalytics) ErrorTrace(errortrace bool) *StartDataFrameAnalytics { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *StartDataFrameAnalytics) FilterPath(filterpaths ...string) *StartDataFrameAnalytics { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *StartDataFrameAnalytics) Human(human bool) *StartDataFrameAnalytics { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *StartDataFrameAnalytics) Pretty(pretty bool) *StartDataFrameAnalytics { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/starttrainedmodeldeployment/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/starttrainedmodeldeployment/request.go new file mode 100644 index 000000000..ee1c9278f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/starttrainedmodeldeployment/request.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package starttrainedmodeldeployment + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package starttrainedmodeldeployment +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/start_trained_model_deployment/MlStartTrainedModelDeploymentRequest.ts#L30-L111 +type Request struct { + + // AdaptiveAllocations Adaptive allocations configuration. When enabled, the number of allocations + // is set based on the current load. + // If adaptive_allocations is enabled, do not set the number of allocations + // manually. + AdaptiveAllocations *types.AdaptiveAllocationsSettings `json:"adaptive_allocations,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Starttrainedmodeldeployment request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/starttrainedmodeldeployment/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/starttrainedmodeldeployment/response.go new file mode 100644 index 000000000..b81a3aa68 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/starttrainedmodeldeployment/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package starttrainedmodeldeployment + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package starttrainedmodeldeployment +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/start_trained_model_deployment/MlStartTrainedModelDeploymentResponse.ts#L22-L26 +type Response struct { + Assignment types.TrainedModelAssignment `json:"assignment"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go new file mode 100644 index 000000000..d67aaae3a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go @@ -0,0 +1,470 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Start a trained model deployment. +// It allocates the model to every machine learning node. +package starttrainedmodeldeployment + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deploymentallocationstate" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainingpriority" +) + +const ( + modelidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type StartTrainedModelDeployment struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + modelid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStartTrainedModelDeployment type alias for index. +type NewStartTrainedModelDeployment func(modelid string) *StartTrainedModelDeployment + +// NewStartTrainedModelDeploymentFunc returns a new instance of StartTrainedModelDeployment with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStartTrainedModelDeploymentFunc(tp elastictransport.Interface) NewStartTrainedModelDeployment { + return func(modelid string) *StartTrainedModelDeployment { + n := New(tp) + + n._modelid(modelid) + + return n + } +} + +// Start a trained model deployment. +// It allocates the model to every machine learning node. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment +func New(tp elastictransport.Interface) *StartTrainedModelDeployment { + r := &StartTrainedModelDeployment{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *StartTrainedModelDeployment) Raw(raw io.Reader) *StartTrainedModelDeployment { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *StartTrainedModelDeployment) Request(req *Request) *StartTrainedModelDeployment { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *StartTrainedModelDeployment) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for StartTrainedModelDeployment: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == modelidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelid", r.modelid) + } + path.WriteString(r.modelid) + path.WriteString("/") + path.WriteString("deployment") + path.WriteString("/") + path.WriteString("_start") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r StartTrainedModelDeployment) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.start_trained_model_deployment") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.start_trained_model_deployment") + if reader := instrument.RecordRequestBody(ctx, "ml.start_trained_model_deployment", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.start_trained_model_deployment") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the StartTrainedModelDeployment query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a starttrainedmodeldeployment.Response +func (r StartTrainedModelDeployment) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.start_trained_model_deployment") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the StartTrainedModelDeployment headers map. +func (r *StartTrainedModelDeployment) Header(key, value string) *StartTrainedModelDeployment { + r.headers.Set(key, value) + + return r +} + +// ModelId The unique identifier of the trained model. Currently, only PyTorch models +// are supported. +// API Name: modelid +func (r *StartTrainedModelDeployment) _modelid(modelid string) *StartTrainedModelDeployment { + r.paramSet |= modelidMask + r.modelid = modelid + + return r +} + +// CacheSize The inference cache size (in memory outside the JVM heap) per node for the +// model. +// The default value is the same size as the `model_size_bytes`. To disable the +// cache, +// `0b` can be provided. +// API name: cache_size +func (r *StartTrainedModelDeployment) CacheSize(bytesize string) *StartTrainedModelDeployment { + r.values.Set("cache_size", bytesize) + + return r +} + +// DeploymentId A unique identifier for the deployment of the model. +// API name: deployment_id +func (r *StartTrainedModelDeployment) DeploymentId(deploymentid string) *StartTrainedModelDeployment { + r.values.Set("deployment_id", deploymentid) + + return r +} + +// NumberOfAllocations The number of model allocations on each node where the model is deployed. +// All allocations on a node share the same copy of the model in memory but use +// a separate set of threads to evaluate the model. +// Increasing this value generally increases the throughput. +// If this setting is greater than the number of hardware threads +// it will automatically be changed to a value less than the number of hardware +// threads. +// If adaptive_allocations is enabled, do not set this value, because it’s +// automatically set. +// API name: number_of_allocations +func (r *StartTrainedModelDeployment) NumberOfAllocations(numberofallocations int) *StartTrainedModelDeployment { + r.values.Set("number_of_allocations", strconv.Itoa(numberofallocations)) + + return r +} + +// Priority The deployment priority. +// API name: priority +func (r *StartTrainedModelDeployment) Priority(priority trainingpriority.TrainingPriority) *StartTrainedModelDeployment { + r.values.Set("priority", priority.String()) + + return r +} + +// QueueCapacity Specifies the number of inference requests that are allowed in the queue. +// After the number of requests exceeds +// this value, new requests are rejected with a 429 error. +// API name: queue_capacity +func (r *StartTrainedModelDeployment) QueueCapacity(queuecapacity int) *StartTrainedModelDeployment { + r.values.Set("queue_capacity", strconv.Itoa(queuecapacity)) + + return r +} + +// ThreadsPerAllocation Sets the number of threads used by each model allocation during inference. +// This generally increases +// the inference speed. The inference process is a compute-bound process; any +// number +// greater than the number of available hardware threads on the machine does not +// increase the +// inference speed. If this setting is greater than the number of hardware +// threads +// it will automatically be changed to a value less than the number of hardware +// threads. +// API name: threads_per_allocation +func (r *StartTrainedModelDeployment) ThreadsPerAllocation(threadsperallocation int) *StartTrainedModelDeployment { + r.values.Set("threads_per_allocation", strconv.Itoa(threadsperallocation)) + + return r +} + +// Timeout Specifies the amount of time to wait for the model to deploy. +// API name: timeout +func (r *StartTrainedModelDeployment) Timeout(duration string) *StartTrainedModelDeployment { + r.values.Set("timeout", duration) + + return r +} + +// WaitFor Specifies the allocation status to wait for before returning. +// API name: wait_for +func (r *StartTrainedModelDeployment) WaitFor(waitfor deploymentallocationstate.DeploymentAllocationState) *StartTrainedModelDeployment { + r.values.Set("wait_for", waitfor.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *StartTrainedModelDeployment) ErrorTrace(errortrace bool) *StartTrainedModelDeployment { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *StartTrainedModelDeployment) FilterPath(filterpaths ...string) *StartTrainedModelDeployment { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *StartTrainedModelDeployment) Human(human bool) *StartTrainedModelDeployment { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *StartTrainedModelDeployment) Pretty(pretty bool) *StartTrainedModelDeployment { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Adaptive allocations configuration. When enabled, the number of allocations +// is set based on the current load. +// If adaptive_allocations is enabled, do not set the number of allocations +// manually. +// API name: adaptive_allocations +func (r *StartTrainedModelDeployment) AdaptiveAllocations(adaptiveallocations types.AdaptiveAllocationsSettingsVariant) *StartTrainedModelDeployment { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AdaptiveAllocations = adaptiveallocations.AdaptiveAllocationsSettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdatafeed/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdatafeed/request.go new file mode 100644 index 000000000..3d6075beb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdatafeed/request.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stopdatafeed + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package stopdatafeed +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/stop_datafeed/MlStopDatafeedRequest.ts#L24-L86 +type Request struct { + + // AllowNoMatch Refer to the description for the `allow_no_match` query parameter. + AllowNoMatch *bool `json:"allow_no_match,omitempty"` + // Force Refer to the description for the `force` query parameter. + Force *bool `json:"force,omitempty"` + // Timeout Refer to the description for the `timeout` query parameter. + Timeout types.Duration `json:"timeout,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Stopdatafeed request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_no_match": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowNoMatch", err) + } + s.AllowNoMatch = &value + case bool: + s.AllowNoMatch = &v + } + + case "force": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Force", err) + } + s.Force = &value + case bool: + s.Force = &v + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdatafeed/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdatafeed/response.go new file mode 100644 index 000000000..73d1277c4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdatafeed/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stopdatafeed + +// Response holds the response body struct for the package stopdatafeed +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/stop_datafeed/MlStopDatafeedResponse.ts#L20-L22 +type Response struct { + Stopped bool `json:"stopped"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdatafeed/stop_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdatafeed/stop_datafeed.go new file mode 100644 index 000000000..cb7ce5d0f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdatafeed/stop_datafeed.go @@ -0,0 +1,409 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Stop datafeeds. +// A datafeed that is stopped ceases to retrieve data from Elasticsearch. A +// datafeed can be started and stopped +// multiple times throughout its lifecycle. +package stopdatafeed + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + datafeedidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type StopDatafeed struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + datafeedid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStopDatafeed type alias for index. +type NewStopDatafeed func(datafeedid string) *StopDatafeed + +// NewStopDatafeedFunc returns a new instance of StopDatafeed with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStopDatafeedFunc(tp elastictransport.Interface) NewStopDatafeed { + return func(datafeedid string) *StopDatafeed { + n := New(tp) + + n._datafeedid(datafeedid) + + return n + } +} + +// Stop datafeeds. +// A datafeed that is stopped ceases to retrieve data from Elasticsearch. A +// datafeed can be started and stopped +// multiple times throughout its lifecycle. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed +func New(tp elastictransport.Interface) *StopDatafeed { + r := &StopDatafeed{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *StopDatafeed) Raw(raw io.Reader) *StopDatafeed { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *StopDatafeed) Request(req *Request) *StopDatafeed { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *StopDatafeed) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for StopDatafeed: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == datafeedidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeedid", r.datafeedid) + } + path.WriteString(r.datafeedid) + path.WriteString("/") + path.WriteString("_stop") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r StopDatafeed) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.stop_datafeed") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.stop_datafeed") + if reader := instrument.RecordRequestBody(ctx, "ml.stop_datafeed", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.stop_datafeed") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the StopDatafeed query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stopdatafeed.Response +func (r StopDatafeed) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.stop_datafeed") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the StopDatafeed headers map. +func (r *StopDatafeed) Header(key, value string) *StopDatafeed { + r.headers.Set(key, value) + + return r +} + +// DatafeedId Identifier for the datafeed. You can stop multiple datafeeds in a single API +// request by using a comma-separated +// list of datafeeds or a wildcard expression. You can close all datafeeds by +// using `_all` or by specifying `*` as +// the identifier. +// API Name: datafeedid +func (r *StopDatafeed) _datafeedid(datafeedid string) *StopDatafeed { + r.paramSet |= datafeedidMask + r.datafeedid = datafeedid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *StopDatafeed) ErrorTrace(errortrace bool) *StopDatafeed { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *StopDatafeed) FilterPath(filterpaths ...string) *StopDatafeed { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *StopDatafeed) Human(human bool) *StopDatafeed { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *StopDatafeed) Pretty(pretty bool) *StopDatafeed { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Refer to the description for the `allow_no_match` query parameter. +// API name: allow_no_match +func (r *StopDatafeed) AllowNoMatch(allownomatch bool) *StopDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowNoMatch = &allownomatch + + return r +} + +// Refer to the description for the `force` query parameter. +// API name: force +func (r *StopDatafeed) Force(force bool) *StopDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Force = &force + + return r +} + +// Refer to the description for the `timeout` query parameter. +// API name: timeout +func (r *StopDatafeed) Timeout(duration types.DurationVariant) *StopDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdataframeanalytics/response.go new file mode 100644 index 000000000..11bd938e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdataframeanalytics/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stopdataframeanalytics + +// Response holds the response body struct for the package stopdataframeanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/stop_data_frame_analytics/MlStopDataFrameAnalyticsResponse.ts#L20-L22 +type Response struct { + Stopped bool `json:"stopped"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go new file mode 100644 index 000000000..80ce61c31 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go @@ -0,0 +1,395 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Stop data frame analytics jobs. +// A data frame analytics job can be started and stopped multiple times +// throughout its lifecycle. +package stopdataframeanalytics + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type StopDataFrameAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStopDataFrameAnalytics type alias for index. +type NewStopDataFrameAnalytics func(id string) *StopDataFrameAnalytics + +// NewStopDataFrameAnalyticsFunc returns a new instance of StopDataFrameAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStopDataFrameAnalyticsFunc(tp elastictransport.Interface) NewStopDataFrameAnalytics { + return func(id string) *StopDataFrameAnalytics { + n := New(tp) + + n._id(id) + + return n + } +} + +// Stop data frame analytics jobs. +// A data frame analytics job can be started and stopped multiple times +// throughout its lifecycle. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics +func New(tp elastictransport.Interface) *StopDataFrameAnalytics { + r := &StopDataFrameAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *StopDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + path.WriteString("/") + path.WriteString("_stop") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r StopDataFrameAnalytics) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.stop_data_frame_analytics") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.stop_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "ml.stop_data_frame_analytics", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.stop_data_frame_analytics") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the StopDataFrameAnalytics query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stopdataframeanalytics.Response +func (r StopDataFrameAnalytics) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.stop_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r StopDataFrameAnalytics) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.stop_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the StopDataFrameAnalytics query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the StopDataFrameAnalytics headers map. +func (r *StopDataFrameAnalytics) Header(key, value string) *StopDataFrameAnalytics { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the data frame analytics job. This identifier can contain +// lowercase alphanumeric characters (a-z and 0-9), hyphens, and +// underscores. It must start and end with alphanumeric characters. +// API Name: id +func (r *StopDataFrameAnalytics) _id(id string) *StopDataFrameAnalytics { + r.paramSet |= idMask + r.id = id + + return r +} + +// AllowNoMatch Specifies what to do when the request: +// +// 1. Contains wildcard expressions and there are no data frame analytics +// jobs that match. +// 2. Contains the _all string or no identifiers and there are no matches. +// 3. Contains wildcard expressions and there are only partial matches. +// +// The default value is true, which returns an empty data_frame_analytics +// array when there are no matches and the subset of results when there are +// partial matches. If this parameter is false, the request returns a 404 +// status code when there are no matches or only partial matches. +// API name: allow_no_match +func (r *StopDataFrameAnalytics) AllowNoMatch(allownomatch bool) *StopDataFrameAnalytics { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// Force If true, the data frame analytics job is stopped forcefully. +// API name: force +func (r *StopDataFrameAnalytics) Force(force bool) *StopDataFrameAnalytics { + r.values.Set("force", strconv.FormatBool(force)) + + return r +} + +// Timeout Controls the amount of time to wait until the data frame analytics job +// stops. Defaults to 20 seconds. +// API name: timeout +func (r *StopDataFrameAnalytics) Timeout(duration string) *StopDataFrameAnalytics { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *StopDataFrameAnalytics) ErrorTrace(errortrace bool) *StopDataFrameAnalytics { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *StopDataFrameAnalytics) FilterPath(filterpaths ...string) *StopDataFrameAnalytics { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *StopDataFrameAnalytics) Human(human bool) *StopDataFrameAnalytics { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *StopDataFrameAnalytics) Pretty(pretty bool) *StopDataFrameAnalytics { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stoptrainedmodeldeployment/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stoptrainedmodeldeployment/response.go new file mode 100644 index 000000000..7c1630fad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stoptrainedmodeldeployment/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stoptrainedmodeldeployment + +// Response holds the response body struct for the package stoptrainedmodeldeployment +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/stop_trained_model_deployment/MlStopTrainedModelDeploymentResponse.ts#L20-L22 +type Response struct { + Stopped bool `json:"stopped"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go new file mode 100644 index 000000000..3690827f3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go @@ -0,0 +1,380 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Stop a trained model deployment. +package stoptrainedmodeldeployment + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + modelidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type StopTrainedModelDeployment struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + modelid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStopTrainedModelDeployment type alias for index. +type NewStopTrainedModelDeployment func(modelid string) *StopTrainedModelDeployment + +// NewStopTrainedModelDeploymentFunc returns a new instance of StopTrainedModelDeployment with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStopTrainedModelDeploymentFunc(tp elastictransport.Interface) NewStopTrainedModelDeployment { + return func(modelid string) *StopTrainedModelDeployment { + n := New(tp) + + n._modelid(modelid) + + return n + } +} + +// Stop a trained model deployment. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment +func New(tp elastictransport.Interface) *StopTrainedModelDeployment { + r := &StopTrainedModelDeployment{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *StopTrainedModelDeployment) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == modelidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelid", r.modelid) + } + path.WriteString(r.modelid) + path.WriteString("/") + path.WriteString("deployment") + path.WriteString("/") + path.WriteString("_stop") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r StopTrainedModelDeployment) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.stop_trained_model_deployment") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.stop_trained_model_deployment") + if reader := instrument.RecordRequestBody(ctx, "ml.stop_trained_model_deployment", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.stop_trained_model_deployment") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the StopTrainedModelDeployment query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stoptrainedmodeldeployment.Response +func (r StopTrainedModelDeployment) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.stop_trained_model_deployment") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r StopTrainedModelDeployment) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.stop_trained_model_deployment") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the StopTrainedModelDeployment query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the StopTrainedModelDeployment headers map. +func (r *StopTrainedModelDeployment) Header(key, value string) *StopTrainedModelDeployment { + r.headers.Set(key, value) + + return r +} + +// ModelId The unique identifier of the trained model. +// API Name: modelid +func (r *StopTrainedModelDeployment) _modelid(modelid string) *StopTrainedModelDeployment { + r.paramSet |= modelidMask + r.modelid = modelid + + return r +} + +// AllowNoMatch Specifies what to do when the request: contains wildcard expressions and +// there are no deployments that match; +// contains the `_all` string or no identifiers and there are no matches; or +// contains wildcard expressions and +// there are only partial matches. By default, it returns an empty array when +// there are no matches and the subset of results when there are partial +// matches. +// If `false`, the request returns a 404 status code when there are no matches +// or only partial matches. +// API name: allow_no_match +func (r *StopTrainedModelDeployment) AllowNoMatch(allownomatch bool) *StopTrainedModelDeployment { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// Force Forcefully stops the deployment, even if it is used by ingest pipelines. You +// can't use these pipelines until you +// restart the model deployment. +// API name: force +func (r *StopTrainedModelDeployment) Force(force bool) *StopTrainedModelDeployment { + r.values.Set("force", strconv.FormatBool(force)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *StopTrainedModelDeployment) ErrorTrace(errortrace bool) *StopTrainedModelDeployment { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *StopTrainedModelDeployment) FilterPath(filterpaths ...string) *StopTrainedModelDeployment { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *StopTrainedModelDeployment) Human(human bool) *StopTrainedModelDeployment { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *StopTrainedModelDeployment) Pretty(pretty bool) *StopTrainedModelDeployment { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedatafeed/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedatafeed/request.go new file mode 100644 index 000000000..562553ab5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedatafeed/request.go @@ -0,0 +1,252 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatedatafeed + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package updatedatafeed +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/update_datafeed/MlUpdateDatafeedRequest.ts#L31-L164 +type Request struct { + + // Aggregations If set, the datafeed performs aggregation searches. Support for aggregations + // is limited and should be used only + // with low cardinality data. + Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` + // ChunkingConfig Datafeeds might search over long time periods, for several months or years. + // This search is split into time + // chunks in order to ensure the load on Elasticsearch is managed. Chunking + // configuration controls how the size of + // these time chunks are calculated; it is an advanced configuration option. + ChunkingConfig *types.ChunkingConfig `json:"chunking_config,omitempty"` + // DelayedDataCheckConfig Specifies whether the datafeed checks for missing data and the size of the + // window. The datafeed can optionally + // search over indices that have already been read in an effort to determine + // whether any data has subsequently been + // added to the index. If missing data is found, it is a good indication that + // the `query_delay` is set too low and + // the data is being indexed after the datafeed has passed that moment in time. + // This check runs only on real-time + // datafeeds. + DelayedDataCheckConfig *types.DelayedDataCheckConfig `json:"delayed_data_check_config,omitempty"` + // Frequency The interval at which scheduled queries are made while the datafeed runs in + // real time. The default value is + // either the bucket span for short bucket spans, or, for longer bucket spans, a + // sensible fraction of the bucket + // span. When `frequency` is shorter than the bucket span, interim results for + // the last (partial) bucket are + // written then eventually overwritten by the full bucket results. If the + // datafeed uses aggregations, this value + // must be divisible by the interval of the date histogram aggregation. + Frequency types.Duration `json:"frequency,omitempty"` + // Indices An array of index names. Wildcards are supported. If any of the indices are + // in remote clusters, the machine + // learning nodes must have the `remote_cluster_client` role. + Indices []string `json:"indices,omitempty"` + // IndicesOptions Specifies index expansion options that are used during search. + IndicesOptions *types.IndicesOptions `json:"indices_options,omitempty"` + JobId *string `json:"job_id,omitempty"` + // MaxEmptySearches If a real-time datafeed has never seen any data (including during any initial + // training period), it automatically + // stops and closes the associated job after this many real-time searches return + // no documents. In other words, + // it stops after `frequency` times `max_empty_searches` of real-time operation. + // If not set, a datafeed with no + // end time that sees no data remains started until it is explicitly stopped. By + // default, it is not set. + MaxEmptySearches *int `json:"max_empty_searches,omitempty"` + // Query The Elasticsearch query domain-specific language (DSL). This value + // corresponds to the query object in an + // Elasticsearch search POST body. All the options that are supported by + // Elasticsearch can be used, as this + // object is passed verbatim to Elasticsearch. Note that if you change the + // query, the analyzed data is also + // changed. Therefore, the time required to learn might be long and the + // understandability of the results is + // unpredictable. If you want to make significant changes to the source data, it + // is recommended that you + // clone the job and datafeed and make the amendments in the clone. Let both run + // in parallel and close one + // when you are satisfied with the results of the job. + Query *types.Query `json:"query,omitempty"` + // QueryDelay The number of seconds behind real time that data is queried. For example, if + // data from 10:04 a.m. might + // not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 + // seconds. The default + // value is randomly selected between `60s` and `120s`. This randomness improves + // the query performance + // when there are multiple jobs running on the same node. + QueryDelay types.Duration `json:"query_delay,omitempty"` + // RuntimeMappings Specifies runtime fields for the datafeed search. + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` + // ScriptFields Specifies scripts that evaluate custom expressions and returns script fields + // to the datafeed. + // The detector configuration objects in a job can contain functions that use + // these script fields. + ScriptFields map[string]types.ScriptField `json:"script_fields,omitempty"` + // ScrollSize The size parameter that is used in Elasticsearch searches when the datafeed + // does not use aggregations. + // The maximum value is the value of `index.max_result_window`. + ScrollSize *int `json:"scroll_size,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Aggregations: make(map[string]types.Aggregations, 0), + ScriptFields: make(map[string]types.ScriptField, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatedatafeed request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "chunking_config": + if err := dec.Decode(&s.ChunkingConfig); err != nil { + return fmt.Errorf("%s | %w", "ChunkingConfig", err) + } + + case "delayed_data_check_config": + if err := dec.Decode(&s.DelayedDataCheckConfig); err != nil { + return fmt.Errorf("%s | %w", "DelayedDataCheckConfig", err) + } + + case "frequency": + if err := dec.Decode(&s.Frequency); err != nil { + return fmt.Errorf("%s | %w", "Frequency", err) + } + + case "indices", "indexes": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "indices_options": + if err := dec.Decode(&s.IndicesOptions); err != nil { + return fmt.Errorf("%s | %w", "IndicesOptions", err) + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "max_empty_searches": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxEmptySearches", err) + } + s.MaxEmptySearches = &value + case float64: + f := int(v) + s.MaxEmptySearches = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "query_delay": + if err := dec.Decode(&s.QueryDelay); err != nil { + return fmt.Errorf("%s | %w", "QueryDelay", err) + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]types.ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return fmt.Errorf("%s | %w", "ScriptFields", err) + } + + case "scroll_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ScrollSize", err) + } + s.ScrollSize = &value + case float64: + f := int(v) + s.ScrollSize = &f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedatafeed/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedatafeed/response.go new file mode 100644 index 000000000..0e2447d48 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedatafeed/response.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatedatafeed + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package updatedatafeed +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/update_datafeed/MlUpdateDatafeedResponse.ts#L31-L49 +type Response struct { + Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` + Authorization *types.DatafeedAuthorization `json:"authorization,omitempty"` + ChunkingConfig types.ChunkingConfig `json:"chunking_config"` + DatafeedId string `json:"datafeed_id"` + DelayedDataCheckConfig *types.DelayedDataCheckConfig `json:"delayed_data_check_config,omitempty"` + Frequency types.Duration `json:"frequency,omitempty"` + Indices []string `json:"indices"` + IndicesOptions *types.IndicesOptions `json:"indices_options,omitempty"` + JobId string `json:"job_id"` + MaxEmptySearches *int `json:"max_empty_searches,omitempty"` + Query types.Query `json:"query"` + QueryDelay types.Duration `json:"query_delay"` + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` + ScriptFields map[string]types.ScriptField `json:"script_fields,omitempty"` + ScrollSize int `json:"scroll_size"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Aggregations: make(map[string]types.Aggregations, 0), + ScriptFields: make(map[string]types.ScriptField, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedatafeed/update_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedatafeed/update_datafeed.go new file mode 100644 index 000000000..487c0c586 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedatafeed/update_datafeed.go @@ -0,0 +1,675 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update a datafeed. +// You must stop and start the datafeed for the changes to be applied. +// When Elasticsearch security features are enabled, your datafeed remembers +// which roles the user who updated it had at +// the time of the update and runs the query using those same roles. If you +// provide secondary authorization headers, +// those credentials are used instead. +package updatedatafeed + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + datafeedidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateDatafeed struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + datafeedid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateDatafeed type alias for index. +type NewUpdateDatafeed func(datafeedid string) *UpdateDatafeed + +// NewUpdateDatafeedFunc returns a new instance of UpdateDatafeed with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateDatafeedFunc(tp elastictransport.Interface) NewUpdateDatafeed { + return func(datafeedid string) *UpdateDatafeed { + n := New(tp) + + n._datafeedid(datafeedid) + + return n + } +} + +// Update a datafeed. +// You must stop and start the datafeed for the changes to be applied. +// When Elasticsearch security features are enabled, your datafeed remembers +// which roles the user who updated it had at +// the time of the update and runs the query using those same roles. If you +// provide secondary authorization headers, +// those credentials are used instead. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed +func New(tp elastictransport.Interface) *UpdateDatafeed { + r := &UpdateDatafeed{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateDatafeed) Raw(raw io.Reader) *UpdateDatafeed { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateDatafeed) Request(req *Request) *UpdateDatafeed { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateDatafeed) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateDatafeed: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == datafeedidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("datafeeds") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "datafeedid", r.datafeedid) + } + path.WriteString(r.datafeedid) + path.WriteString("/") + path.WriteString("_update") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateDatafeed) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.update_datafeed") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.update_datafeed") + if reader := instrument.RecordRequestBody(ctx, "ml.update_datafeed", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.update_datafeed") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateDatafeed query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatedatafeed.Response +func (r UpdateDatafeed) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.update_datafeed") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateDatafeed headers map. +func (r *UpdateDatafeed) Header(key, value string) *UpdateDatafeed { + r.headers.Set(key, value) + + return r +} + +// DatafeedId A numerical character string that uniquely identifies the datafeed. +// This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +// hyphens, and underscores. +// It must start and end with alphanumeric characters. +// API Name: datafeedid +func (r *UpdateDatafeed) _datafeedid(datafeedid string) *UpdateDatafeed { + r.paramSet |= datafeedidMask + r.datafeedid = datafeedid + + return r +} + +// AllowNoIndices If `true`, wildcard indices expressions that resolve into no concrete indices +// are ignored. This includes the +// `_all` string or when no indices are specified. +// API name: allow_no_indices +func (r *UpdateDatafeed) AllowNoIndices(allownoindices bool) *UpdateDatafeed { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// ExpandWildcards Type of index that wildcard patterns can match. If the request can target +// data streams, this argument determines +// whether wildcard expressions match hidden data streams. Supports +// comma-separated values. +// API name: expand_wildcards +func (r *UpdateDatafeed) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *UpdateDatafeed { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreThrottled If `true`, concrete, expanded or aliased indices are ignored when frozen. +// API name: ignore_throttled +func (r *UpdateDatafeed) IgnoreThrottled(ignorethrottled bool) *UpdateDatafeed { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// IgnoreUnavailable If `true`, unavailable indices (missing or closed) are ignored. +// API name: ignore_unavailable +func (r *UpdateDatafeed) IgnoreUnavailable(ignoreunavailable bool) *UpdateDatafeed { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateDatafeed) ErrorTrace(errortrace bool) *UpdateDatafeed { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateDatafeed) FilterPath(filterpaths ...string) *UpdateDatafeed { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateDatafeed) Human(human bool) *UpdateDatafeed { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateDatafeed) Pretty(pretty bool) *UpdateDatafeed { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// If set, the datafeed performs aggregation searches. Support for aggregations +// is limited and should be used only +// with low cardinality data. +// API name: aggregations +func (r *UpdateDatafeed) Aggregations(aggregations map[string]types.Aggregations) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Aggregations = aggregations + return r +} + +func (r *UpdateDatafeed) AddAggregation(key string, value types.AggregationsVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp + return r +} + +// Datafeeds might search over long time periods, for several months or years. +// This search is split into time +// chunks in order to ensure the load on Elasticsearch is managed. Chunking +// configuration controls how the size of +// these time chunks are calculated; it is an advanced configuration option. +// API name: chunking_config +func (r *UpdateDatafeed) ChunkingConfig(chunkingconfig types.ChunkingConfigVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ChunkingConfig = chunkingconfig.ChunkingConfigCaster() + + return r +} + +// Specifies whether the datafeed checks for missing data and the size of the +// window. The datafeed can optionally +// search over indices that have already been read in an effort to determine +// whether any data has subsequently been +// added to the index. If missing data is found, it is a good indication that +// the `query_delay` is set too low and +// the data is being indexed after the datafeed has passed that moment in time. +// This check runs only on real-time +// datafeeds. +// API name: delayed_data_check_config +func (r *UpdateDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig types.DelayedDataCheckConfigVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DelayedDataCheckConfig = delayeddatacheckconfig.DelayedDataCheckConfigCaster() + + return r +} + +// The interval at which scheduled queries are made while the datafeed runs in +// real time. The default value is +// either the bucket span for short bucket spans, or, for longer bucket spans, a +// sensible fraction of the bucket +// span. When `frequency` is shorter than the bucket span, interim results for +// the last (partial) bucket are +// written then eventually overwritten by the full bucket results. If the +// datafeed uses aggregations, this value +// must be divisible by the interval of the date histogram aggregation. +// API name: frequency +func (r *UpdateDatafeed) Frequency(duration types.DurationVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() + + return r +} + +// An array of index names. Wildcards are supported. If any of the indices are +// in remote clusters, the machine +// learning nodes must have the `remote_cluster_client` role. +// API name: indices +func (r *UpdateDatafeed) Indices(indices ...string) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range indices { + + r.req.Indices = append(r.req.Indices, v) + + } + return r +} + +// Specifies index expansion options that are used during search. +// API name: indices_options +func (r *UpdateDatafeed) IndicesOptions(indicesoptions types.IndicesOptionsVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndicesOptions = indicesoptions.IndicesOptionsCaster() + + return r +} + +// API name: job_id +func (r *UpdateDatafeed) JobId(id string) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.JobId = &id + + return r +} + +// If a real-time datafeed has never seen any data (including during any initial +// training period), it automatically +// stops and closes the associated job after this many real-time searches return +// no documents. In other words, +// it stops after `frequency` times `max_empty_searches` of real-time operation. +// If not set, a datafeed with no +// end time that sees no data remains started until it is explicitly stopped. By +// default, it is not set. +// API name: max_empty_searches +func (r *UpdateDatafeed) MaxEmptySearches(maxemptysearches int) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxEmptySearches = &maxemptysearches + + return r +} + +// The Elasticsearch query domain-specific language (DSL). This value +// corresponds to the query object in an +// Elasticsearch search POST body. All the options that are supported by +// Elasticsearch can be used, as this +// object is passed verbatim to Elasticsearch. Note that if you change the +// query, the analyzed data is also +// changed. Therefore, the time required to learn might be long and the +// understandability of the results is +// unpredictable. If you want to make significant changes to the source data, it +// is recommended that you +// clone the job and datafeed and make the amendments in the clone. Let both run +// in parallel and close one +// when you are satisfied with the results of the job. +// API name: query +func (r *UpdateDatafeed) Query(query types.QueryVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} + +// The number of seconds behind real time that data is queried. For example, if +// data from 10:04 a.m. might +// not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 +// seconds. The default +// value is randomly selected between `60s` and `120s`. This randomness improves +// the query performance +// when there are multiple jobs running on the same node. +// API name: query_delay +func (r *UpdateDatafeed) QueryDelay(duration types.DurationVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.QueryDelay = *duration.DurationCaster() + + return r +} + +// Specifies runtime fields for the datafeed search. +// API name: runtime_mappings +func (r *UpdateDatafeed) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() + + return r +} + +// Specifies scripts that evaluate custom expressions and returns script fields +// to the datafeed. +// The detector configuration objects in a job can contain functions that use +// these script fields. +// API name: script_fields +func (r *UpdateDatafeed) ScriptFields(scriptfields map[string]types.ScriptField) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ScriptFields = scriptfields + return r +} + +func (r *UpdateDatafeed) AddScriptField(key string, value types.ScriptFieldVariant) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ScriptField + if r.req.ScriptFields == nil { + r.req.ScriptFields = make(map[string]types.ScriptField) + } else { + tmp = r.req.ScriptFields + } + + tmp[key] = *value.ScriptFieldCaster() + + r.req.ScriptFields = tmp + return r +} + +// The size parameter that is used in Elasticsearch searches when the datafeed +// does not use aggregations. +// The maximum value is the value of `index.max_result_window`. +// API name: scroll_size +func (r *UpdateDatafeed) ScrollSize(scrollsize int) *UpdateDatafeed { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ScrollSize = &scrollsize + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedataframeanalytics/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedataframeanalytics/request.go new file mode 100644 index 000000000..791ad85ba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedataframeanalytics/request.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatedataframeanalytics + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package updatedataframeanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsRequest.ts#L24-L80 +type Request struct { + + // AllowLazyStart Specifies whether this job can start when there is insufficient machine + // learning node capacity for it to be immediately assigned to a node. + AllowLazyStart *bool `json:"allow_lazy_start,omitempty"` + // Description A description of the job. + Description *string `json:"description,omitempty"` + // MaxNumThreads The maximum number of threads to be used by the analysis. Using more + // threads may decrease the time necessary to complete the analysis at the + // cost of using more CPU. Note that the process may use additional threads + // for operational functionality other than the analysis itself. + MaxNumThreads *int `json:"max_num_threads,omitempty"` + // ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for + // analytical processing. If your `elasticsearch.yml` file contains an + // `xpack.ml.max_model_memory_limit` setting, an error occurs when you try + // to create data frame analytics jobs that have `model_memory_limit` values + // greater than that setting. + ModelMemoryLimit *string `json:"model_memory_limit,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatedataframeanalytics request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedataframeanalytics/response.go new file mode 100644 index 000000000..7c5703edf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedataframeanalytics/response.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatedataframeanalytics + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package updatedataframeanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsResponse.ts#L30-L45 +type Response struct { + AllowLazyStart bool `json:"allow_lazy_start"` + Analysis types.DataframeAnalysisContainer `json:"analysis"` + AnalyzedFields *types.DataframeAnalysisAnalyzedFields `json:"analyzed_fields,omitempty"` + Authorization *types.DataframeAnalyticsAuthorization `json:"authorization,omitempty"` + CreateTime int64 `json:"create_time"` + Description *string `json:"description,omitempty"` + Dest types.DataframeAnalyticsDestination `json:"dest"` + Id string `json:"id"` + MaxNumThreads int `json:"max_num_threads"` + ModelMemoryLimit string `json:"model_memory_limit"` + Source types.DataframeAnalyticsSource `json:"source"` + Version string `json:"version"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go new file mode 100644 index 000000000..86dc62124 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go @@ -0,0 +1,424 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update a data frame analytics job. +package updatedataframeanalytics + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateDataFrameAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateDataFrameAnalytics type alias for index. +type NewUpdateDataFrameAnalytics func(id string) *UpdateDataFrameAnalytics + +// NewUpdateDataFrameAnalyticsFunc returns a new instance of UpdateDataFrameAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateDataFrameAnalyticsFunc(tp elastictransport.Interface) NewUpdateDataFrameAnalytics { + return func(id string) *UpdateDataFrameAnalytics { + n := New(tp) + + n._id(id) + + return n + } +} + +// Update a data frame analytics job. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics +func New(tp elastictransport.Interface) *UpdateDataFrameAnalytics { + r := &UpdateDataFrameAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateDataFrameAnalytics) Raw(raw io.Reader) *UpdateDataFrameAnalytics { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateDataFrameAnalytics) Request(req *Request) *UpdateDataFrameAnalytics { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateDataFrameAnalytics: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("data_frame") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + path.WriteString("/") + path.WriteString("_update") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateDataFrameAnalytics) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.update_data_frame_analytics") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.update_data_frame_analytics") + if reader := instrument.RecordRequestBody(ctx, "ml.update_data_frame_analytics", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.update_data_frame_analytics") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateDataFrameAnalytics query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatedataframeanalytics.Response +func (r UpdateDataFrameAnalytics) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.update_data_frame_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateDataFrameAnalytics headers map. +func (r *UpdateDataFrameAnalytics) Header(key, value string) *UpdateDataFrameAnalytics { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the data frame analytics job. This identifier can contain +// lowercase alphanumeric characters (a-z and 0-9), hyphens, and +// underscores. It must start and end with alphanumeric characters. +// API Name: id +func (r *UpdateDataFrameAnalytics) _id(id string) *UpdateDataFrameAnalytics { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateDataFrameAnalytics) ErrorTrace(errortrace bool) *UpdateDataFrameAnalytics { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateDataFrameAnalytics) FilterPath(filterpaths ...string) *UpdateDataFrameAnalytics { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateDataFrameAnalytics) Human(human bool) *UpdateDataFrameAnalytics { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateDataFrameAnalytics) Pretty(pretty bool) *UpdateDataFrameAnalytics { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Specifies whether this job can start when there is insufficient machine +// learning node capacity for it to be immediately assigned to a node. +// API name: allow_lazy_start +func (r *UpdateDataFrameAnalytics) AllowLazyStart(allowlazystart bool) *UpdateDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowLazyStart = &allowlazystart + + return r +} + +// A description of the job. +// API name: description +func (r *UpdateDataFrameAnalytics) Description(description string) *UpdateDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// The maximum number of threads to be used by the analysis. Using more +// threads may decrease the time necessary to complete the analysis at the +// cost of using more CPU. Note that the process may use additional threads +// for operational functionality other than the analysis itself. +// API name: max_num_threads +func (r *UpdateDataFrameAnalytics) MaxNumThreads(maxnumthreads int) *UpdateDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MaxNumThreads = &maxnumthreads + + return r +} + +// The approximate maximum amount of memory resources that are permitted for +// analytical processing. If your `elasticsearch.yml` file contains an +// `xpack.ml.max_model_memory_limit` setting, an error occurs when you try +// to create data frame analytics jobs that have `model_memory_limit` values +// greater than that setting. +// API name: model_memory_limit +func (r *UpdateDataFrameAnalytics) ModelMemoryLimit(modelmemorylimit string) *UpdateDataFrameAnalytics { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ModelMemoryLimit = &modelmemorylimit + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatefilter/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatefilter/request.go new file mode 100644 index 000000000..35c1bb7cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatefilter/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatefilter + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package updatefilter +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/update_filter/MlUpdateFilterRequest.ts#L23-L60 +type Request struct { + + // AddItems The items to add to the filter. + AddItems []string `json:"add_items,omitempty"` + // Description A description for the filter. + Description *string `json:"description,omitempty"` + // RemoveItems The items to remove from the filter. + RemoveItems []string `json:"remove_items,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatefilter request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatefilter/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatefilter/response.go new file mode 100644 index 000000000..f945ce7d8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatefilter/response.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatefilter + +// Response holds the response body struct for the package updatefilter +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/update_filter/MlUpdateFilterResponse.ts#L22-L28 +type Response struct { + Description string `json:"description"` + FilterId string `json:"filter_id"` + Items []string `json:"items"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatefilter/update_filter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatefilter/update_filter.go new file mode 100644 index 000000000..d44852da2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatefilter/update_filter.go @@ -0,0 +1,407 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update a filter. +// Updates the description of a filter, adds items, or removes items from the +// list. +package updatefilter + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + filteridMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateFilter struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + filterid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateFilter type alias for index. +type NewUpdateFilter func(filterid string) *UpdateFilter + +// NewUpdateFilterFunc returns a new instance of UpdateFilter with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateFilterFunc(tp elastictransport.Interface) NewUpdateFilter { + return func(filterid string) *UpdateFilter { + n := New(tp) + + n._filterid(filterid) + + return n + } +} + +// Update a filter. +// Updates the description of a filter, adds items, or removes items from the +// list. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter +func New(tp elastictransport.Interface) *UpdateFilter { + r := &UpdateFilter{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateFilter) Raw(raw io.Reader) *UpdateFilter { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateFilter) Request(req *Request) *UpdateFilter { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateFilter) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateFilter: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == filteridMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("filters") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "filterid", r.filterid) + } + path.WriteString(r.filterid) + path.WriteString("/") + path.WriteString("_update") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateFilter) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.update_filter") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.update_filter") + if reader := instrument.RecordRequestBody(ctx, "ml.update_filter", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.update_filter") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateFilter query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatefilter.Response +func (r UpdateFilter) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.update_filter") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateFilter headers map. +func (r *UpdateFilter) Header(key, value string) *UpdateFilter { + r.headers.Set(key, value) + + return r +} + +// FilterId A string that uniquely identifies a filter. +// API Name: filterid +func (r *UpdateFilter) _filterid(filterid string) *UpdateFilter { + r.paramSet |= filteridMask + r.filterid = filterid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateFilter) ErrorTrace(errortrace bool) *UpdateFilter { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateFilter) FilterPath(filterpaths ...string) *UpdateFilter { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateFilter) Human(human bool) *UpdateFilter { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateFilter) Pretty(pretty bool) *UpdateFilter { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The items to add to the filter. +// API name: add_items +func (r *UpdateFilter) AddItems(additems ...string) *UpdateFilter { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range additems { + + r.req.AddItems = append(r.req.AddItems, v) + + } + return r +} + +// A description for the filter. +// API name: description +func (r *UpdateFilter) Description(description string) *UpdateFilter { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// The items to remove from the filter. +// API name: remove_items +func (r *UpdateFilter) RemoveItems(removeitems ...string) *UpdateFilter { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range removeitems { + + r.req.RemoveItems = append(r.req.RemoveItems, v) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatejob/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatejob/request.go new file mode 100644 index 000000000..00ee320f4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatejob/request.go @@ -0,0 +1,273 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatejob + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package updatejob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/update_job/MlUpdateJobRequest.ts#L33-L147 +type Request struct { + + // AllowLazyOpen Advanced configuration option. Specifies whether this job can open when + // there is insufficient machine learning node capacity for it to be + // immediately assigned to a node. If `false` and a machine learning node + // with capacity to run the job cannot immediately be found, the open + // anomaly detection jobs API returns an error. However, this is also + // subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this + // option is set to `true`, the open anomaly detection jobs API does not + // return an error and the job waits in the opening state until sufficient + // machine learning node capacity is available. + AllowLazyOpen *bool `json:"allow_lazy_open,omitempty"` + AnalysisLimits *types.AnalysisMemoryLimit `json:"analysis_limits,omitempty"` + // BackgroundPersistInterval Advanced configuration option. The time between each periodic persistence + // of the model. + // The default value is a randomized value between 3 to 4 hours, which + // avoids all jobs persisting at exactly the same time. The smallest allowed + // value is 1 hour. + // For very large models (several GB), persistence could take 10-20 minutes, + // so do not set the value too low. + // If the job is open when you make the update, you must stop the datafeed, + // close the job, then reopen the job and restart the datafeed for the + // changes to take effect. + BackgroundPersistInterval types.Duration `json:"background_persist_interval,omitempty"` + CategorizationFilters []string `json:"categorization_filters,omitempty"` + // CustomSettings Advanced configuration option. Contains custom meta data about the job. + // For example, it can contain custom URL information as shown in Adding + // custom URLs to machine learning results. + CustomSettings map[string]json.RawMessage `json:"custom_settings,omitempty"` + // DailyModelSnapshotRetentionAfterDays Advanced configuration option, which affects the automatic removal of old + // model snapshots for this job. It specifies a period of time (in days) + // after which only the first snapshot per day is retained. This period is + // relative to the timestamp of the most recent snapshot for this job. Valid + // values range from 0 to `model_snapshot_retention_days`. For jobs created + // before version 7.8.0, the default value matches + // `model_snapshot_retention_days`. + DailyModelSnapshotRetentionAfterDays *int64 `json:"daily_model_snapshot_retention_after_days,omitempty"` + // Description A description of the job. + Description *string `json:"description,omitempty"` + // Detectors An array of detector update objects. + Detectors []types.DetectorUpdate `json:"detectors,omitempty"` + // Groups A list of job groups. A job can belong to no groups or many. + Groups []string `json:"groups,omitempty"` + ModelPlotConfig *types.ModelPlotConfig `json:"model_plot_config,omitempty"` + ModelPruneWindow types.Duration `json:"model_prune_window,omitempty"` + // ModelSnapshotRetentionDays Advanced configuration option, which affects the automatic removal of old + // model snapshots for this job. It specifies the maximum period of time (in + // days) that snapshots are retained. This period is relative to the + // timestamp of the most recent snapshot for this job. + ModelSnapshotRetentionDays *int64 `json:"model_snapshot_retention_days,omitempty"` + // PerPartitionCategorization Settings related to how categorization interacts with partition fields. + PerPartitionCategorization *types.PerPartitionCategorization `json:"per_partition_categorization,omitempty"` + // RenormalizationWindowDays Advanced configuration option. The period over which adjustments to the + // score are applied, as new data is seen. + RenormalizationWindowDays *int64 `json:"renormalization_window_days,omitempty"` + // ResultsRetentionDays Advanced configuration option. The period of time (in days) that results + // are retained. Age is calculated relative to the timestamp of the latest + // bucket result. If this property has a non-null value, once per day at + // 00:30 (server time), results that are the specified number of days older + // than the latest bucket result are deleted from Elasticsearch. The default + // value is null, which means all results are retained. + ResultsRetentionDays *int64 `json:"results_retention_days,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + CustomSettings: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatejob request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_lazy_open": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowLazyOpen", err) + } + s.AllowLazyOpen = &value + case bool: + s.AllowLazyOpen = &v + } + + case "analysis_limits": + if err := dec.Decode(&s.AnalysisLimits); err != nil { + return fmt.Errorf("%s | %w", "AnalysisLimits", err) + } + + case "background_persist_interval": + if err := dec.Decode(&s.BackgroundPersistInterval); err != nil { + return fmt.Errorf("%s | %w", "BackgroundPersistInterval", err) + } + + case "categorization_filters": + if err := dec.Decode(&s.CategorizationFilters); err != nil { + return fmt.Errorf("%s | %w", "CategorizationFilters", err) + } + + case "custom_settings": + if s.CustomSettings == nil { + s.CustomSettings = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.CustomSettings); err != nil { + return fmt.Errorf("%s | %w", "CustomSettings", err) + } + + case "daily_model_snapshot_retention_after_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DailyModelSnapshotRetentionAfterDays", err) + } + s.DailyModelSnapshotRetentionAfterDays = &value + case float64: + f := int64(v) + s.DailyModelSnapshotRetentionAfterDays = &f + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "detectors": + if err := dec.Decode(&s.Detectors); err != nil { + return fmt.Errorf("%s | %w", "Detectors", err) + } + + case "groups": + if err := dec.Decode(&s.Groups); err != nil { + return fmt.Errorf("%s | %w", "Groups", err) + } + + case "model_plot_config": + if err := dec.Decode(&s.ModelPlotConfig); err != nil { + return fmt.Errorf("%s | %w", "ModelPlotConfig", err) + } + + case "model_prune_window": + if err := dec.Decode(&s.ModelPruneWindow); err != nil { + return fmt.Errorf("%s | %w", "ModelPruneWindow", err) + } + + case "model_snapshot_retention_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ModelSnapshotRetentionDays", err) + } + s.ModelSnapshotRetentionDays = &value + case float64: + f := int64(v) + s.ModelSnapshotRetentionDays = &f + } + + case "per_partition_categorization": + if err := dec.Decode(&s.PerPartitionCategorization); err != nil { + return fmt.Errorf("%s | %w", "PerPartitionCategorization", err) + } + + case "renormalization_window_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RenormalizationWindowDays", err) + } + s.RenormalizationWindowDays = &value + case float64: + f := int64(v) + s.RenormalizationWindowDays = &f + } + + case "results_retention_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ResultsRetentionDays", err) + } + s.ResultsRetentionDays = &value + case float64: + f := int64(v) + s.ResultsRetentionDays = &f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatejob/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatejob/response.go new file mode 100644 index 000000000..ffcdfb9e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatejob/response.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatejob + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package updatejob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/update_job/MlUpdateJobResponse.ts#L29-L53 +type Response struct { + AllowLazyOpen bool `json:"allow_lazy_open"` + AnalysisConfig types.AnalysisConfigRead `json:"analysis_config"` + AnalysisLimits types.AnalysisLimits `json:"analysis_limits"` + BackgroundPersistInterval types.Duration `json:"background_persist_interval,omitempty"` + CreateTime int64 `json:"create_time"` + CustomSettings map[string]string `json:"custom_settings,omitempty"` + DailyModelSnapshotRetentionAfterDays int64 `json:"daily_model_snapshot_retention_after_days"` + DataDescription types.DataDescription `json:"data_description"` + DatafeedConfig *types.MLDatafeed `json:"datafeed_config,omitempty"` + Description *string `json:"description,omitempty"` + FinishedTime *int64 `json:"finished_time,omitempty"` + Groups []string `json:"groups,omitempty"` + JobId string `json:"job_id"` + JobType string `json:"job_type"` + JobVersion string `json:"job_version"` + ModelPlotConfig *types.ModelPlotConfig `json:"model_plot_config,omitempty"` + ModelSnapshotId *string `json:"model_snapshot_id,omitempty"` + ModelSnapshotRetentionDays int64 `json:"model_snapshot_retention_days"` + RenormalizationWindowDays *int64 `json:"renormalization_window_days,omitempty"` + ResultsIndexName string `json:"results_index_name"` + ResultsRetentionDays *int64 `json:"results_retention_days,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + CustomSettings: make(map[string]string, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatejob/update_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatejob/update_job.go new file mode 100644 index 000000000..bd379b33b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatejob/update_job.go @@ -0,0 +1,610 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update an anomaly detection job. +// Updates certain properties of an anomaly detection job. +package updatejob + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateJob struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateJob type alias for index. +type NewUpdateJob func(jobid string) *UpdateJob + +// NewUpdateJobFunc returns a new instance of UpdateJob with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateJobFunc(tp elastictransport.Interface) NewUpdateJob { + return func(jobid string) *UpdateJob { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Update an anomaly detection job. +// Updates certain properties of an anomaly detection job. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job +func New(tp elastictransport.Interface) *UpdateJob { + r := &UpdateJob{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateJob) Raw(raw io.Reader) *UpdateJob { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateJob) Request(req *Request) *UpdateJob { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateJob) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateJob: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("_update") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateJob) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.update_job") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.update_job") + if reader := instrument.RecordRequestBody(ctx, "ml.update_job", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.update_job") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateJob query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatejob.Response +func (r UpdateJob) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.update_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateJob headers map. +func (r *UpdateJob) Header(key, value string) *UpdateJob { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the job. +// API Name: jobid +func (r *UpdateJob) _jobid(jobid string) *UpdateJob { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateJob) ErrorTrace(errortrace bool) *UpdateJob { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateJob) FilterPath(filterpaths ...string) *UpdateJob { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateJob) Human(human bool) *UpdateJob { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateJob) Pretty(pretty bool) *UpdateJob { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Advanced configuration option. Specifies whether this job can open when +// there is insufficient machine learning node capacity for it to be +// immediately assigned to a node. If `false` and a machine learning node +// with capacity to run the job cannot immediately be found, the open +// anomaly detection jobs API returns an error. However, this is also +// subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this +// option is set to `true`, the open anomaly detection jobs API does not +// return an error and the job waits in the opening state until sufficient +// machine learning node capacity is available. +// API name: allow_lazy_open +func (r *UpdateJob) AllowLazyOpen(allowlazyopen bool) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowLazyOpen = &allowlazyopen + + return r +} + +// API name: analysis_limits +func (r *UpdateJob) AnalysisLimits(analysislimits types.AnalysisMemoryLimitVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AnalysisLimits = analysislimits.AnalysisMemoryLimitCaster() + + return r +} + +// Advanced configuration option. The time between each periodic persistence +// of the model. +// The default value is a randomized value between 3 to 4 hours, which +// avoids all jobs persisting at exactly the same time. The smallest allowed +// value is 1 hour. +// For very large models (several GB), persistence could take 10-20 minutes, +// so do not set the value too low. +// If the job is open when you make the update, you must stop the datafeed, +// close the job, then reopen the job and restart the datafeed for the +// changes to take effect. +// API name: background_persist_interval +func (r *UpdateJob) BackgroundPersistInterval(duration types.DurationVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.BackgroundPersistInterval = *duration.DurationCaster() + + return r +} + +// API name: categorization_filters +func (r *UpdateJob) CategorizationFilters(categorizationfilters ...string) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range categorizationfilters { + + r.req.CategorizationFilters = append(r.req.CategorizationFilters, v) + + } + return r +} + +// Advanced configuration option. Contains custom meta data about the job. +// For example, it can contain custom URL information as shown in Adding +// custom URLs to machine learning results. +// API name: custom_settings +func (r *UpdateJob) CustomSettings(customsettings map[string]json.RawMessage) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.CustomSettings = customsettings + return r +} + +func (r *UpdateJob) AddCustomSetting(key string, value json.RawMessage) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.CustomSettings == nil { + r.req.CustomSettings = make(map[string]json.RawMessage) + } else { + tmp = r.req.CustomSettings + } + + tmp[key] = value + + r.req.CustomSettings = tmp + return r +} + +// Advanced configuration option, which affects the automatic removal of old +// model snapshots for this job. It specifies a period of time (in days) +// after which only the first snapshot per day is retained. This period is +// relative to the timestamp of the most recent snapshot for this job. Valid +// values range from 0 to `model_snapshot_retention_days`. For jobs created +// before version 7.8.0, the default value matches +// `model_snapshot_retention_days`. +// API name: daily_model_snapshot_retention_after_days +func (r *UpdateJob) DailyModelSnapshotRetentionAfterDays(dailymodelsnapshotretentionafterdays int64) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DailyModelSnapshotRetentionAfterDays = &dailymodelsnapshotretentionafterdays + + return r +} + +// A description of the job. +// API name: description +func (r *UpdateJob) Description(description string) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// An array of detector update objects. +// API name: detectors +func (r *UpdateJob) Detectors(detectors ...types.DetectorUpdateVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range detectors { + + r.req.Detectors = append(r.req.Detectors, *v.DetectorUpdateCaster()) + + } + return r +} + +// A list of job groups. A job can belong to no groups or many. +// API name: groups +func (r *UpdateJob) Groups(groups ...string) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range groups { + + r.req.Groups = append(r.req.Groups, v) + + } + return r +} + +// API name: model_plot_config +func (r *UpdateJob) ModelPlotConfig(modelplotconfig types.ModelPlotConfigVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ModelPlotConfig = modelplotconfig.ModelPlotConfigCaster() + + return r +} + +// API name: model_prune_window +func (r *UpdateJob) ModelPruneWindow(duration types.DurationVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ModelPruneWindow = *duration.DurationCaster() + + return r +} + +// Advanced configuration option, which affects the automatic removal of old +// model snapshots for this job. It specifies the maximum period of time (in +// days) that snapshots are retained. This period is relative to the +// timestamp of the most recent snapshot for this job. +// API name: model_snapshot_retention_days +func (r *UpdateJob) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ModelSnapshotRetentionDays = &modelsnapshotretentiondays + + return r +} + +// Settings related to how categorization interacts with partition fields. +// API name: per_partition_categorization +func (r *UpdateJob) PerPartitionCategorization(perpartitioncategorization types.PerPartitionCategorizationVariant) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PerPartitionCategorization = perpartitioncategorization.PerPartitionCategorizationCaster() + + return r +} + +// Advanced configuration option. The period over which adjustments to the +// score are applied, as new data is seen. +// API name: renormalization_window_days +func (r *UpdateJob) RenormalizationWindowDays(renormalizationwindowdays int64) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RenormalizationWindowDays = &renormalizationwindowdays + + return r +} + +// Advanced configuration option. The period of time (in days) that results +// are retained. Age is calculated relative to the timestamp of the latest +// bucket result. If this property has a non-null value, once per day at +// 00:30 (server time), results that are the specified number of days older +// than the latest bucket result are deleted from Elasticsearch. The default +// value is null, which means all results are retained. +// API name: results_retention_days +func (r *UpdateJob) ResultsRetentionDays(resultsretentiondays int64) *UpdateJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ResultsRetentionDays = &resultsretentiondays + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatemodelsnapshot/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatemodelsnapshot/request.go new file mode 100644 index 000000000..df23c8c3c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatemodelsnapshot/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatemodelsnapshot + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package updatemodelsnapshot +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/update_model_snapshot/MlUpdateModelSnapshotRequest.ts#L23-L63 +type Request struct { + + // Description A description of the model snapshot. + Description *string `json:"description,omitempty"` + // Retain If `true`, this snapshot will not be deleted during automatic cleanup of + // snapshots older than `model_snapshot_retention_days`. However, this + // snapshot will be deleted when the job is deleted. + Retain *bool `json:"retain,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatemodelsnapshot request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatemodelsnapshot/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatemodelsnapshot/response.go new file mode 100644 index 000000000..5c4965cf3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatemodelsnapshot/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatemodelsnapshot + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package updatemodelsnapshot +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/update_model_snapshot/MlUpdateModelSnapshotResponse.ts#L22-L27 +type Response struct { + Acknowledged bool `json:"acknowledged"` + Model types.ModelSnapshot `json:"model"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go new file mode 100644 index 000000000..9f9031a9b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go @@ -0,0 +1,412 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update a snapshot. +// Updates certain properties of a snapshot. +package updatemodelsnapshot + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 + + snapshotidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateModelSnapshot struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + jobid string + snapshotid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateModelSnapshot type alias for index. +type NewUpdateModelSnapshot func(jobid, snapshotid string) *UpdateModelSnapshot + +// NewUpdateModelSnapshotFunc returns a new instance of UpdateModelSnapshot with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateModelSnapshotFunc(tp elastictransport.Interface) NewUpdateModelSnapshot { + return func(jobid, snapshotid string) *UpdateModelSnapshot { + n := New(tp) + + n._jobid(jobid) + + n._snapshotid(snapshotid) + + return n + } +} + +// Update a snapshot. +// Updates certain properties of a snapshot. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot +func New(tp elastictransport.Interface) *UpdateModelSnapshot { + r := &UpdateModelSnapshot{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateModelSnapshot) Raw(raw io.Reader) *UpdateModelSnapshot { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateModelSnapshot) Request(req *Request) *UpdateModelSnapshot { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateModelSnapshot) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateModelSnapshot: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask|snapshotidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("model_snapshots") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshotid", r.snapshotid) + } + path.WriteString(r.snapshotid) + path.WriteString("/") + path.WriteString("_update") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateModelSnapshot) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.update_model_snapshot") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.update_model_snapshot") + if reader := instrument.RecordRequestBody(ctx, "ml.update_model_snapshot", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.update_model_snapshot") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateModelSnapshot query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatemodelsnapshot.Response +func (r UpdateModelSnapshot) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.update_model_snapshot") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateModelSnapshot headers map. +func (r *UpdateModelSnapshot) Header(key, value string) *UpdateModelSnapshot { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *UpdateModelSnapshot) _jobid(jobid string) *UpdateModelSnapshot { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// SnapshotId Identifier for the model snapshot. +// API Name: snapshotid +func (r *UpdateModelSnapshot) _snapshotid(snapshotid string) *UpdateModelSnapshot { + r.paramSet |= snapshotidMask + r.snapshotid = snapshotid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateModelSnapshot) ErrorTrace(errortrace bool) *UpdateModelSnapshot { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateModelSnapshot) FilterPath(filterpaths ...string) *UpdateModelSnapshot { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateModelSnapshot) Human(human bool) *UpdateModelSnapshot { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateModelSnapshot) Pretty(pretty bool) *UpdateModelSnapshot { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A description of the model snapshot. +// API name: description +func (r *UpdateModelSnapshot) Description(description string) *UpdateModelSnapshot { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// If `true`, this snapshot will not be deleted during automatic cleanup of +// snapshots older than `model_snapshot_retention_days`. However, this +// snapshot will be deleted when the job is deleted. +// API name: retain +func (r *UpdateModelSnapshot) Retain(retain bool) *UpdateModelSnapshot { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Retain = &retain + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatetrainedmodeldeployment/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatetrainedmodeldeployment/request.go new file mode 100644 index 000000000..8febf2deb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatetrainedmodeldeployment/request.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatetrainedmodeldeployment + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package updatetrainedmodeldeployment +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/update_trained_model_deployment/MlUpdateTrainedModelDeploymentRequest.ts#L25-L78 +type Request struct { + + // AdaptiveAllocations Adaptive allocations configuration. When enabled, the number of allocations + // is set based on the current load. + // If adaptive_allocations is enabled, do not set the number of allocations + // manually. + AdaptiveAllocations *types.AdaptiveAllocationsSettings `json:"adaptive_allocations,omitempty"` + // NumberOfAllocations The number of model allocations on each node where the model is deployed. + // All allocations on a node share the same copy of the model in memory but use + // a separate set of threads to evaluate the model. + // Increasing this value generally increases the throughput. + // If this setting is greater than the number of hardware threads + // it will automatically be changed to a value less than the number of hardware + // threads. + // If adaptive_allocations is enabled, do not set this value, because it’s + // automatically set. + NumberOfAllocations *int `json:"number_of_allocations,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatetrainedmodeldeployment request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatetrainedmodeldeployment/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatetrainedmodeldeployment/response.go new file mode 100644 index 000000000..ac7226efa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatetrainedmodeldeployment/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatetrainedmodeldeployment + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package updatetrainedmodeldeployment +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/update_trained_model_deployment/MlUpdateTrainedModelDeploymentResponse.ts#L22-L26 +type Response struct { + Assignment types.TrainedModelAssignment `json:"assignment"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatetrainedmodeldeployment/update_trained_model_deployment.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatetrainedmodeldeployment/update_trained_model_deployment.go new file mode 100644 index 000000000..99e7ebbeb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatetrainedmodeldeployment/update_trained_model_deployment.go @@ -0,0 +1,400 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update a trained model deployment. +package updatetrainedmodeldeployment + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + modelidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateTrainedModelDeployment struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + modelid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateTrainedModelDeployment type alias for index. +type NewUpdateTrainedModelDeployment func(modelid string) *UpdateTrainedModelDeployment + +// NewUpdateTrainedModelDeploymentFunc returns a new instance of UpdateTrainedModelDeployment with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateTrainedModelDeploymentFunc(tp elastictransport.Interface) NewUpdateTrainedModelDeployment { + return func(modelid string) *UpdateTrainedModelDeployment { + n := New(tp) + + n._modelid(modelid) + + return n + } +} + +// Update a trained model deployment. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment +func New(tp elastictransport.Interface) *UpdateTrainedModelDeployment { + r := &UpdateTrainedModelDeployment{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateTrainedModelDeployment) Raw(raw io.Reader) *UpdateTrainedModelDeployment { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateTrainedModelDeployment) Request(req *Request) *UpdateTrainedModelDeployment { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateTrainedModelDeployment) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateTrainedModelDeployment: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == modelidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("trained_models") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "modelid", r.modelid) + } + path.WriteString(r.modelid) + path.WriteString("/") + path.WriteString("deployment") + path.WriteString("/") + path.WriteString("_update") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateTrainedModelDeployment) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.update_trained_model_deployment") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.update_trained_model_deployment") + if reader := instrument.RecordRequestBody(ctx, "ml.update_trained_model_deployment", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.update_trained_model_deployment") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateTrainedModelDeployment query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatetrainedmodeldeployment.Response +func (r UpdateTrainedModelDeployment) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.update_trained_model_deployment") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateTrainedModelDeployment headers map. +func (r *UpdateTrainedModelDeployment) Header(key, value string) *UpdateTrainedModelDeployment { + r.headers.Set(key, value) + + return r +} + +// ModelId The unique identifier of the trained model. Currently, only PyTorch models +// are supported. +// API Name: modelid +func (r *UpdateTrainedModelDeployment) _modelid(modelid string) *UpdateTrainedModelDeployment { + r.paramSet |= modelidMask + r.modelid = modelid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateTrainedModelDeployment) ErrorTrace(errortrace bool) *UpdateTrainedModelDeployment { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateTrainedModelDeployment) FilterPath(filterpaths ...string) *UpdateTrainedModelDeployment { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateTrainedModelDeployment) Human(human bool) *UpdateTrainedModelDeployment { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateTrainedModelDeployment) Pretty(pretty bool) *UpdateTrainedModelDeployment { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Adaptive allocations configuration. When enabled, the number of allocations +// is set based on the current load. +// If adaptive_allocations is enabled, do not set the number of allocations +// manually. +// API name: adaptive_allocations +func (r *UpdateTrainedModelDeployment) AdaptiveAllocations(adaptiveallocations types.AdaptiveAllocationsSettingsVariant) *UpdateTrainedModelDeployment { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AdaptiveAllocations = adaptiveallocations.AdaptiveAllocationsSettingsCaster() + + return r +} + +// The number of model allocations on each node where the model is deployed. +// All allocations on a node share the same copy of the model in memory but use +// a separate set of threads to evaluate the model. +// Increasing this value generally increases the throughput. +// If this setting is greater than the number of hardware threads +// it will automatically be changed to a value less than the number of hardware +// threads. +// If adaptive_allocations is enabled, do not set this value, because it’s +// automatically set. +// API name: number_of_allocations +func (r *UpdateTrainedModelDeployment) NumberOfAllocations(numberofallocations int) *UpdateTrainedModelDeployment { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.NumberOfAllocations = &numberofallocations + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/upgradejobsnapshot/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/upgradejobsnapshot/response.go new file mode 100644 index 000000000..43abdd5a3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/upgradejobsnapshot/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package upgradejobsnapshot + +// Response holds the response body struct for the package upgradejobsnapshot +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/upgrade_job_snapshot/MlUpgradeJobSnapshotResponse.ts#L22-L31 +type Response struct { + + // Completed When true, this means the task is complete. When false, it is still running. + Completed bool `json:"completed"` + // Node The ID of the node that the upgrade task was started on if it is still + // running. In serverless this will be the "serverless". + Node string `json:"node"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go new file mode 100644 index 000000000..ac34bef10 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go @@ -0,0 +1,403 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Upgrade a snapshot. +// Upgrade an anomaly detection model snapshot to the latest major version. +// Over time, older snapshot formats are deprecated and removed. Anomaly +// detection jobs support only snapshots that are from the current or previous +// major version. +// This API provides a means to upgrade a snapshot to the current major version. +// This aids in preparing the cluster for an upgrade to the next major version. +// Only one snapshot per anomaly detection job can be upgraded at a time and the +// upgraded snapshot cannot be the current snapshot of the anomaly detection +// job. +package upgradejobsnapshot + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + jobidMask = iota + 1 + + snapshotidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpgradeJobSnapshot struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + jobid string + snapshotid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpgradeJobSnapshot type alias for index. +type NewUpgradeJobSnapshot func(jobid, snapshotid string) *UpgradeJobSnapshot + +// NewUpgradeJobSnapshotFunc returns a new instance of UpgradeJobSnapshot with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpgradeJobSnapshotFunc(tp elastictransport.Interface) NewUpgradeJobSnapshot { + return func(jobid, snapshotid string) *UpgradeJobSnapshot { + n := New(tp) + + n._jobid(jobid) + + n._snapshotid(snapshotid) + + return n + } +} + +// Upgrade a snapshot. +// Upgrade an anomaly detection model snapshot to the latest major version. +// Over time, older snapshot formats are deprecated and removed. Anomaly +// detection jobs support only snapshots that are from the current or previous +// major version. +// This API provides a means to upgrade a snapshot to the current major version. +// This aids in preparing the cluster for an upgrade to the next major version. +// Only one snapshot per anomaly detection job can be upgraded at a time and the +// upgraded snapshot cannot be the current snapshot of the anomaly detection +// job. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot +func New(tp elastictransport.Interface) *UpgradeJobSnapshot { + r := &UpgradeJobSnapshot{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpgradeJobSnapshot) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask|snapshotidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "jobid", r.jobid) + } + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("model_snapshots") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshotid", r.snapshotid) + } + path.WriteString(r.snapshotid) + path.WriteString("/") + path.WriteString("_upgrade") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpgradeJobSnapshot) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.upgrade_job_snapshot") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.upgrade_job_snapshot") + if reader := instrument.RecordRequestBody(ctx, "ml.upgrade_job_snapshot", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.upgrade_job_snapshot") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpgradeJobSnapshot query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a upgradejobsnapshot.Response +func (r UpgradeJobSnapshot) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.upgrade_job_snapshot") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r UpgradeJobSnapshot) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.upgrade_job_snapshot") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the UpgradeJobSnapshot query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the UpgradeJobSnapshot headers map. +func (r *UpgradeJobSnapshot) Header(key, value string) *UpgradeJobSnapshot { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. +// API Name: jobid +func (r *UpgradeJobSnapshot) _jobid(jobid string) *UpgradeJobSnapshot { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// SnapshotId A numerical character string that uniquely identifies the model snapshot. +// API Name: snapshotid +func (r *UpgradeJobSnapshot) _snapshotid(snapshotid string) *UpgradeJobSnapshot { + r.paramSet |= snapshotidMask + r.snapshotid = snapshotid + + return r +} + +// WaitForCompletion When true, the API won’t respond until the upgrade is complete. +// Otherwise, it responds as soon as the upgrade task is assigned to a node. +// API name: wait_for_completion +func (r *UpgradeJobSnapshot) WaitForCompletion(waitforcompletion bool) *UpgradeJobSnapshot { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// Timeout Controls the time to wait for the request to complete. +// API name: timeout +func (r *UpgradeJobSnapshot) Timeout(duration string) *UpgradeJobSnapshot { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpgradeJobSnapshot) ErrorTrace(errortrace bool) *UpgradeJobSnapshot { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpgradeJobSnapshot) FilterPath(filterpaths ...string) *UpgradeJobSnapshot { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpgradeJobSnapshot) Human(human bool) *UpgradeJobSnapshot { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpgradeJobSnapshot) Pretty(pretty bool) *UpgradeJobSnapshot { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validate/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validate/request.go new file mode 100644 index 000000000..1b006f1dc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validate/request.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package validate + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package validate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/validate/MlValidateJobRequest.ts#L27-L52 +type Request struct { + AnalysisConfig *types.AnalysisConfig `json:"analysis_config,omitempty"` + AnalysisLimits *types.AnalysisLimits `json:"analysis_limits,omitempty"` + DataDescription *types.DataDescription `json:"data_description,omitempty"` + Description *string `json:"description,omitempty"` + JobId *string `json:"job_id,omitempty"` + ModelPlot *types.ModelPlotConfig `json:"model_plot,omitempty"` + ModelSnapshotId *string `json:"model_snapshot_id,omitempty"` + ModelSnapshotRetentionDays *int64 `json:"model_snapshot_retention_days,omitempty"` + ResultsIndexName *string `json:"results_index_name,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Validate request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analysis_config": + if err := dec.Decode(&s.AnalysisConfig); err != nil { + return fmt.Errorf("%s | %w", "AnalysisConfig", err) + } + + case "analysis_limits": + if err := dec.Decode(&s.AnalysisLimits); err != nil { + return fmt.Errorf("%s | %w", "AnalysisLimits", err) + } + + case "data_description": + if err := dec.Decode(&s.DataDescription); err != nil { + return fmt.Errorf("%s | %w", "DataDescription", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "model_plot": + if err := dec.Decode(&s.ModelPlot); err != nil { + return fmt.Errorf("%s | %w", "ModelPlot", err) + } + + case "model_snapshot_id": + if err := dec.Decode(&s.ModelSnapshotId); err != nil { + return fmt.Errorf("%s | %w", "ModelSnapshotId", err) + } + + case "model_snapshot_retention_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ModelSnapshotRetentionDays", err) + } + s.ModelSnapshotRetentionDays = &value + case float64: + f := int64(v) + s.ModelSnapshotRetentionDays = &f + } + + case "results_index_name": + if err := dec.Decode(&s.ResultsIndexName); err != nil { + return fmt.Errorf("%s | %w", "ResultsIndexName", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validate/response.go new file mode 100644 index 000000000..e258f9097 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validate/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package validate + +// Response holds the response body struct for the package validate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/validate/MlValidateJobResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validate/validate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validate/validate.go new file mode 100644 index 000000000..590e7b79c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validate/validate.go @@ -0,0 +1,445 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Validate an anomaly detection job. +package validate + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Validate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewValidate type alias for index. +type NewValidate func() *Validate + +// NewValidateFunc returns a new instance of Validate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewValidateFunc(tp elastictransport.Interface) NewValidate { + return func() *Validate { + n := New(tp) + + return n + } +} + +// Validate an anomaly detection job. +// +// https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html +func New(tp elastictransport.Interface) *Validate { + r := &Validate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Validate) Raw(raw io.Reader) *Validate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Validate) Request(req *Request) *Validate { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Validate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Validate: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString("_validate") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Validate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.validate") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.validate") + if reader := instrument.RecordRequestBody(ctx, "ml.validate", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.validate") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Validate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a validate.Response +func (r Validate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.validate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Validate headers map. +func (r *Validate) Header(key, value string) *Validate { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Validate) ErrorTrace(errortrace bool) *Validate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Validate) FilterPath(filterpaths ...string) *Validate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Validate) Human(human bool) *Validate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Validate) Pretty(pretty bool) *Validate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: analysis_config +func (r *Validate) AnalysisConfig(analysisconfig types.AnalysisConfigVariant) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AnalysisConfig = analysisconfig.AnalysisConfigCaster() + + return r +} + +// API name: analysis_limits +func (r *Validate) AnalysisLimits(analysislimits types.AnalysisLimitsVariant) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AnalysisLimits = analysislimits.AnalysisLimitsCaster() + + return r +} + +// API name: data_description +func (r *Validate) DataDescription(datadescription types.DataDescriptionVariant) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DataDescription = datadescription.DataDescriptionCaster() + + return r +} + +// API name: description +func (r *Validate) Description(description string) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// API name: job_id +func (r *Validate) JobId(id string) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.JobId = &id + + return r +} + +// API name: model_plot +func (r *Validate) ModelPlot(modelplot types.ModelPlotConfigVariant) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ModelPlot = modelplot.ModelPlotConfigCaster() + + return r +} + +// API name: model_snapshot_id +func (r *Validate) ModelSnapshotId(id string) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ModelSnapshotId = &id + + return r +} + +// API name: model_snapshot_retention_days +func (r *Validate) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ModelSnapshotRetentionDays = &modelsnapshotretentiondays + + return r +} + +// API name: results_index_name +func (r *Validate) ResultsIndexName(indexname string) *Validate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ResultsIndexName = &indexname + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validatedetector/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validatedetector/request.go new file mode 100644 index 000000000..6055371b7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validatedetector/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package validatedetector + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package validatedetector +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/validate_detector/MlValidateDetectorRequest.ts#L23-L40 +type Request = types.Detector + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewDetector() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validatedetector/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validatedetector/response.go new file mode 100644 index 000000000..cf1ae97b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validatedetector/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package validatedetector + +// Response holds the response body struct for the package validatedetector +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/validate_detector/MlValidateDetectorResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validatedetector/validate_detector.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validatedetector/validate_detector.go new file mode 100644 index 000000000..169006249 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ml/validatedetector/validate_detector.go @@ -0,0 +1,486 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Validate an anomaly detection job. +package validatedetector + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/excludefrequent" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ValidateDetector struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewValidateDetector type alias for index. +type NewValidateDetector func() *ValidateDetector + +// NewValidateDetectorFunc returns a new instance of ValidateDetector with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewValidateDetectorFunc(tp elastictransport.Interface) NewValidateDetector { + return func() *ValidateDetector { + n := New(tp) + + return n + } +} + +// Validate an anomaly detection job. +// +// https://www.elastic.co/docs/api/doc/elasticsearch +func New(tp elastictransport.Interface) *ValidateDetector { + r := &ValidateDetector{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ValidateDetector) Raw(raw io.Reader) *ValidateDetector { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ValidateDetector) Request(req *Request) *ValidateDetector { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ValidateDetector) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ValidateDetector: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + path.WriteString("_validate") + path.WriteString("/") + path.WriteString("detector") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ValidateDetector) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ml.validate_detector") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ml.validate_detector") + if reader := instrument.RecordRequestBody(ctx, "ml.validate_detector", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ml.validate_detector") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ValidateDetector query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a validatedetector.Response +func (r ValidateDetector) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ml.validate_detector") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ValidateDetector headers map. +func (r *ValidateDetector) Header(key, value string) *ValidateDetector { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ValidateDetector) ErrorTrace(errortrace bool) *ValidateDetector { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ValidateDetector) FilterPath(filterpaths ...string) *ValidateDetector { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ValidateDetector) Human(human bool) *ValidateDetector { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ValidateDetector) Pretty(pretty bool) *ValidateDetector { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The field used to split the data. In particular, this property is used for +// analyzing the splits with respect to their own history. It is used for +// finding unusual values in the context of the split. +// API name: by_field_name +func (r *ValidateDetector) ByFieldName(field string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ByFieldName = &field + + return r +} + +// Custom rules enable you to customize the way detectors operate. For example, +// a rule may dictate conditions under which results should be skipped. Kibana +// refers to custom rules as job rules. +// API name: custom_rules +func (r *ValidateDetector) CustomRules(customrules ...types.DetectionRuleVariant) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range customrules { + + r.req.CustomRules = append(r.req.CustomRules, *v.DetectionRuleCaster()) + + } + return r +} + +// A description of the detector. +// API name: detector_description +func (r *ValidateDetector) DetectorDescription(detectordescription string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DetectorDescription = &detectordescription + + return r +} + +// A unique identifier for the detector. This identifier is based on the order +// of the detectors in the `analysis_config`, starting at zero. If you specify a +// value for this property, it is ignored. +// API name: detector_index +func (r *ValidateDetector) DetectorIndex(detectorindex int) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.DetectorIndex = &detectorindex + + return r +} + +// If set, frequent entities are excluded from influencing the anomaly results. +// Entities can be considered frequent over time or frequent in a population. If +// you are working with both over and by fields, you can set `exclude_frequent` +// to `all` for both fields, or to `by` or `over` for those specific fields. +// API name: exclude_frequent +func (r *ValidateDetector) ExcludeFrequent(excludefrequent excludefrequent.ExcludeFrequent) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ExcludeFrequent = &excludefrequent + return r +} + +// The field that the detector uses in the function. If you use an event rate +// function such as count or rare, do not specify this field. The `field_name` +// cannot contain double quotes or backslashes. +// API name: field_name +func (r *ValidateDetector) FieldName(field string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FieldName = &field + + return r +} + +// The analysis function that is used. For example, `count`, `rare`, `mean`, +// `min`, `max`, or `sum`. +// API name: function +func (r *ValidateDetector) Function(function string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Function = &function + + return r +} + +// The field used to split the data. In particular, this property is used for +// analyzing the splits with respect to the history of all splits. It is used +// for finding unusual values in the population of all splits. +// API name: over_field_name +func (r *ValidateDetector) OverFieldName(field string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.OverFieldName = &field + + return r +} + +// The field used to segment the analysis. When you use this property, you have +// completely independent baselines for each value of this field. +// API name: partition_field_name +func (r *ValidateDetector) PartitionFieldName(field string) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PartitionFieldName = &field + + return r +} + +// Defines whether a new series is used as the null series when there is no +// value for the by or partition fields. +// API name: use_null +func (r *ValidateDetector) UseNull(usenull bool) *ValidateDetector { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.UseNull = &usenull + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/monitoring/bulk/bulk.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/monitoring/bulk/bulk.go new file mode 100644 index 000000000..fe7cbcbca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/monitoring/bulk/bulk.go @@ -0,0 +1,393 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Send monitoring data. +// This API is used by the monitoring features to send monitoring data. +package bulk + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + type_Mask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Bulk struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + type_ string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewBulk type alias for index. +type NewBulk func() *Bulk + +// NewBulkFunc returns a new instance of Bulk with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewBulkFunc(tp elastictransport.Interface) NewBulk { + return func() *Bulk { + n := New(tp) + + return n + } +} + +// Send monitoring data. +// This API is used by the monitoring features to send monitoring data. +// +// https://www.elastic.co/docs/api/doc/elasticsearch +func New(tp elastictransport.Interface) *Bulk { + r := &Bulk{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Bulk) Raw(raw io.Reader) *Bulk { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Bulk) Request(req *Request) *Bulk { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Bulk) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + for _, elem := range *r.req { + data, err := json.Marshal(elem) + if err != nil { + return nil, err + } + r.buf.Write(data) + r.buf.Write([]byte("\n")) + } + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Bulk: %w", err) + } + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_monitoring") + path.WriteString("/") + path.WriteString("bulk") + + method = http.MethodPost + case r.paramSet == type_Mask: + path.WriteString("/") + path.WriteString("_monitoring") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "type_", r.type_) + } + path.WriteString(r.type_) + path.WriteString("/") + path.WriteString("bulk") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+x-ndjson;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Bulk) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "monitoring.bulk") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "monitoring.bulk") + if reader := instrument.RecordRequestBody(ctx, "monitoring.bulk", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "monitoring.bulk") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Bulk query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a bulk.Response +func (r Bulk) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "monitoring.bulk") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Bulk headers map. +func (r *Bulk) Header(key, value string) *Bulk { + r.headers.Set(key, value) + + return r +} + +// Type Default document type for items which don't provide one +// API Name: type_ +func (r *Bulk) Type(type_ string) *Bulk { + r.paramSet |= type_Mask + r.type_ = type_ + + return r +} + +// SystemId Identifier of the monitored system +// API name: system_id +func (r *Bulk) SystemId(systemid string) *Bulk { + r.values.Set("system_id", systemid) + + return r +} + +// API name: system_api_version +func (r *Bulk) SystemApiVersion(systemapiversion string) *Bulk { + r.values.Set("system_api_version", systemapiversion) + + return r +} + +// Interval Collection interval (e.g., '10s' or '10000ms') of the payload +// API name: interval +func (r *Bulk) Interval(duration string) *Bulk { + r.values.Set("interval", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Bulk) ErrorTrace(errortrace bool) *Bulk { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Bulk) FilterPath(filterpaths ...string) *Bulk { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Bulk) Human(human bool) *Bulk { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Bulk) Pretty(pretty bool) *Bulk { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/monitoring/bulk/helpers.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/monitoring/bulk/helpers.go new file mode 100644 index 000000000..927be62a4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/monitoring/bulk/helpers.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package bulk + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// CreateOp is a helper function to add a CreateOperation to the current bulk request. +// doc argument can be a []byte, json.RawMessage or a struct. +func (r *Bulk) CreateOp(op types.CreateOperation, doc interface{}) error { + operation := types.OperationContainer{Create: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + + switch v := doc.(type) { + case []byte: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: invalid json") + } + case json.RawMessage: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: invalid json") + } + default: + body, err := json.Marshal(doc) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + if _, err := r.buf.Write(body); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + } + + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + + return nil +} + +// IndexOp is a helper function to add an IndexOperation to the current bulk request. +// doc argument can be a []byte, json.RawMessage or a struct. +func (r *Bulk) IndexOp(op types.IndexOperation, doc interface{}) error { + operation := types.OperationContainer{Index: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + + switch v := doc.(type) { + case []byte: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: invalid json") + } + case json.RawMessage: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: invalid json") + } + default: + body, err := json.Marshal(doc) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + if _, err := r.buf.Write(body); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + } + + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + + return nil +} + +// UpdateOp is a helper function to add an UpdateOperation with and UpdateAction to the current bulk request. +// update is optional, if both doc and update.Doc are provided, update.Doc has precedence. +func (r *Bulk) UpdateOp(op types.UpdateOperation, doc interface{}, update *types.UpdateAction) error { + operation := types.OperationContainer{Update: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + if update == nil { + update = types.NewUpdateAction() + } + + if len(update.Doc) == 0 { + switch v := doc.(type) { + case []byte: + if json.Valid(v) { + update.Doc = v + } else { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: invalid json") + } + case json.RawMessage: + if json.Valid(v) { + update.Doc = v + } else { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: invalid json") + } + default: + body, err := json.Marshal(doc) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + update.Doc = body + } + } + + body, err := json.Marshal(update) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + if _, err := r.buf.Write(body); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + return nil +} + +// DeleteOp is a helper function to add a DeleteOperation to the current bulk request. +func (r *Bulk) DeleteOp(op types.DeleteOperation) error { + operation := types.OperationContainer{Delete: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.DeleteOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.DeleteOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.DeleteOp: %w", err) + } + + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/monitoring/bulk/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/monitoring/bulk/request.go new file mode 100644 index 000000000..860c420d5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/monitoring/bulk/request.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package bulk + +// Request holds the request body struct for the package bulk +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/monitoring/bulk/BulkMonitoringRequest.ts#L24-L73 +type Request = []any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/monitoring/bulk/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/monitoring/bulk/response.go new file mode 100644 index 000000000..64195cb17 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/monitoring/bulk/response.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package bulk + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package bulk +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/monitoring/bulk/BulkMonitoringResponse.ts#L23-L32 +type Response struct { + Error *types.ErrorCause `json:"error,omitempty"` + // Errors True if there is was an error + Errors bool `json:"errors"` + // Ignored Was collection disabled? + Ignored bool `json:"ignored"` + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go new file mode 100644 index 000000000..2121268e3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go @@ -0,0 +1,366 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clear the archived repositories metering. +// Clear the archived repositories metering information in the cluster. +package clearrepositoriesmeteringarchive + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nodeidMask = iota + 1 + + maxarchiveversionMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ClearRepositoriesMeteringArchive struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + nodeid string + maxarchiveversion string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClearRepositoriesMeteringArchive type alias for index. +type NewClearRepositoriesMeteringArchive func(nodeid, maxarchiveversion string) *ClearRepositoriesMeteringArchive + +// NewClearRepositoriesMeteringArchiveFunc returns a new instance of ClearRepositoriesMeteringArchive with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewClearRepositoriesMeteringArchiveFunc(tp elastictransport.Interface) NewClearRepositoriesMeteringArchive { + return func(nodeid, maxarchiveversion string) *ClearRepositoriesMeteringArchive { + n := New(tp) + + n._nodeid(nodeid) + + n._maxarchiveversion(maxarchiveversion) + + return n + } +} + +// Clear the archived repositories metering. +// Clear the archived repositories metering information in the cluster. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive +func New(tp elastictransport.Interface) *ClearRepositoriesMeteringArchive { + r := &ClearRepositoriesMeteringArchive{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ClearRepositoriesMeteringArchive) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nodeidMask|maxarchiveversionMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + path.WriteString("_repositories_metering") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "maxarchiveversion", r.maxarchiveversion) + } + path.WriteString(r.maxarchiveversion) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ClearRepositoriesMeteringArchive) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "nodes.clear_repositories_metering_archive") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "nodes.clear_repositories_metering_archive") + if reader := instrument.RecordRequestBody(ctx, "nodes.clear_repositories_metering_archive", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "nodes.clear_repositories_metering_archive") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ClearRepositoriesMeteringArchive query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a clearrepositoriesmeteringarchive.Response +func (r ClearRepositoriesMeteringArchive) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.clear_repositories_metering_archive") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ClearRepositoriesMeteringArchive) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.clear_repositories_metering_archive") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ClearRepositoriesMeteringArchive query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ClearRepositoriesMeteringArchive headers map. +func (r *ClearRepositoriesMeteringArchive) Header(key, value string) *ClearRepositoriesMeteringArchive { + r.headers.Set(key, value) + + return r +} + +// NodeId Comma-separated list of node IDs or names used to limit returned information. +// API Name: nodeid +func (r *ClearRepositoriesMeteringArchive) _nodeid(nodeid string) *ClearRepositoriesMeteringArchive { + r.paramSet |= nodeidMask + r.nodeid = nodeid + + return r +} + +// MaxArchiveVersion Specifies the maximum `archive_version` to be cleared from the archive. +// API Name: maxarchiveversion +func (r *ClearRepositoriesMeteringArchive) _maxarchiveversion(maxarchiveversion string) *ClearRepositoriesMeteringArchive { + r.paramSet |= maxarchiveversionMask + r.maxarchiveversion = maxarchiveversion + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ClearRepositoriesMeteringArchive) ErrorTrace(errortrace bool) *ClearRepositoriesMeteringArchive { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ClearRepositoriesMeteringArchive) FilterPath(filterpaths ...string) *ClearRepositoriesMeteringArchive { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ClearRepositoriesMeteringArchive) Human(human bool) *ClearRepositoriesMeteringArchive { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ClearRepositoriesMeteringArchive) Pretty(pretty bool) *ClearRepositoriesMeteringArchive { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/clearrepositoriesmeteringarchive/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/clearrepositoriesmeteringarchive/response.go new file mode 100644 index 000000000..b09572a6e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/clearrepositoriesmeteringarchive/response.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clearrepositoriesmeteringarchive + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package clearrepositoriesmeteringarchive +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveResponse.ts#L37-L40 +type Response struct { + + // ClusterName Name of the cluster. Based on the `cluster.name` setting. + ClusterName string `json:"cluster_name"` + // NodeStats Contains statistics about the number of nodes selected by the request’s node + // filters. + NodeStats *types.NodeStatistics `json:"_nodes,omitempty"` + // Nodes Contains repositories metering information for the nodes selected by the + // request. + Nodes map[string]types.RepositoryMeteringInformation `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.RepositoryMeteringInformation, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go new file mode 100644 index 000000000..e9f595524 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go @@ -0,0 +1,356 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get cluster repositories metering. +// Get repositories metering information for a cluster. +// This API exposes monotonically non-decreasing counters and it is expected +// that clients would durably store the information needed to compute +// aggregations over a period of time. +// Additionally, the information exposed by this API is volatile, meaning that +// it will not be present after node restarts. +package getrepositoriesmeteringinfo + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nodeidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetRepositoriesMeteringInfo struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + nodeid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetRepositoriesMeteringInfo type alias for index. +type NewGetRepositoriesMeteringInfo func(nodeid string) *GetRepositoriesMeteringInfo + +// NewGetRepositoriesMeteringInfoFunc returns a new instance of GetRepositoriesMeteringInfo with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetRepositoriesMeteringInfoFunc(tp elastictransport.Interface) NewGetRepositoriesMeteringInfo { + return func(nodeid string) *GetRepositoriesMeteringInfo { + n := New(tp) + + n._nodeid(nodeid) + + return n + } +} + +// Get cluster repositories metering. +// Get repositories metering information for a cluster. +// This API exposes monotonically non-decreasing counters and it is expected +// that clients would durably store the information needed to compute +// aggregations over a period of time. +// Additionally, the information exposed by this API is volatile, meaning that +// it will not be present after node restarts. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info +func New(tp elastictransport.Interface) *GetRepositoriesMeteringInfo { + r := &GetRepositoriesMeteringInfo{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetRepositoriesMeteringInfo) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nodeidMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + path.WriteString("_repositories_metering") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetRepositoriesMeteringInfo) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "nodes.get_repositories_metering_info") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "nodes.get_repositories_metering_info") + if reader := instrument.RecordRequestBody(ctx, "nodes.get_repositories_metering_info", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "nodes.get_repositories_metering_info") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetRepositoriesMeteringInfo query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getrepositoriesmeteringinfo.Response +func (r GetRepositoriesMeteringInfo) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.get_repositories_metering_info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetRepositoriesMeteringInfo) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.get_repositories_metering_info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetRepositoriesMeteringInfo query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetRepositoriesMeteringInfo headers map. +func (r *GetRepositoriesMeteringInfo) Header(key, value string) *GetRepositoriesMeteringInfo { + r.headers.Set(key, value) + + return r +} + +// NodeId Comma-separated list of node IDs or names used to limit returned information. +// API Name: nodeid +func (r *GetRepositoriesMeteringInfo) _nodeid(nodeid string) *GetRepositoriesMeteringInfo { + r.paramSet |= nodeidMask + r.nodeid = nodeid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetRepositoriesMeteringInfo) ErrorTrace(errortrace bool) *GetRepositoriesMeteringInfo { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetRepositoriesMeteringInfo) FilterPath(filterpaths ...string) *GetRepositoriesMeteringInfo { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetRepositoriesMeteringInfo) Human(human bool) *GetRepositoriesMeteringInfo { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetRepositoriesMeteringInfo) Pretty(pretty bool) *GetRepositoriesMeteringInfo { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/getrepositoriesmeteringinfo/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/getrepositoriesmeteringinfo/response.go new file mode 100644 index 000000000..51397b962 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/getrepositoriesmeteringinfo/response.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getrepositoriesmeteringinfo + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getrepositoriesmeteringinfo +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/get_repositories_metering_info/GetRepositoriesMeteringInfoResponse.ts#L36-L39 +type Response struct { + + // ClusterName Name of the cluster. Based on the `cluster.name` setting. + ClusterName string `json:"cluster_name"` + // NodeStats Contains statistics about the number of nodes selected by the request’s node + // filters. + NodeStats *types.NodeStatistics `json:"_nodes,omitempty"` + // Nodes Contains repositories metering information for the nodes selected by the + // request. + Nodes map[string]types.RepositoryMeteringInformation `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.RepositoryMeteringInformation, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/hotthreads/hot_threads.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/hotthreads/hot_threads.go new file mode 100644 index 000000000..c4ac26303 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/hotthreads/hot_threads.go @@ -0,0 +1,414 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the hot threads for nodes. +// Get a breakdown of the hot threads on each selected node in the cluster. +// The output is plain text with a breakdown of the top hot threads for each +// node. +package hotthreads + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/threadtype" +) + +const ( + nodeidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type HotThreads struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + nodeid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewHotThreads type alias for index. +type NewHotThreads func() *HotThreads + +// NewHotThreadsFunc returns a new instance of HotThreads with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewHotThreadsFunc(tp elastictransport.Interface) NewHotThreads { + return func() *HotThreads { + n := New(tp) + + return n + } +} + +// Get the hot threads for nodes. +// Get a breakdown of the hot threads on each selected node in the cluster. +// The output is plain text with a breakdown of the top hot threads for each +// node. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads +func New(tp elastictransport.Interface) *HotThreads { + r := &HotThreads{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *HotThreads) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + path.WriteString("hot_threads") + + method = http.MethodGet + case r.paramSet == nodeidMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + path.WriteString("hot_threads") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "text/plain") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r HotThreads) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "nodes.hot_threads") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "nodes.hot_threads") + if reader := instrument.RecordRequestBody(ctx, "nodes.hot_threads", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "nodes.hot_threads") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the HotThreads query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a hotthreads.Response +func (r HotThreads) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.hot_threads") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r HotThreads) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.hot_threads") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the HotThreads query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the HotThreads headers map. +func (r *HotThreads) Header(key, value string) *HotThreads { + r.headers.Set(key, value) + + return r +} + +// NodeId List of node IDs or names used to limit returned information. +// API Name: nodeid +func (r *HotThreads) NodeId(nodeid string) *HotThreads { + r.paramSet |= nodeidMask + r.nodeid = nodeid + + return r +} + +// IgnoreIdleThreads If true, known idle threads (e.g. waiting in a socket select, or to get +// a task from an empty queue) are filtered out. +// API name: ignore_idle_threads +func (r *HotThreads) IgnoreIdleThreads(ignoreidlethreads bool) *HotThreads { + r.values.Set("ignore_idle_threads", strconv.FormatBool(ignoreidlethreads)) + + return r +} + +// Interval The interval to do the second sampling of threads. +// API name: interval +func (r *HotThreads) Interval(duration string) *HotThreads { + r.values.Set("interval", duration) + + return r +} + +// Snapshots Number of samples of thread stacktrace. +// API name: snapshots +func (r *HotThreads) Snapshots(snapshots string) *HotThreads { + r.values.Set("snapshots", snapshots) + + return r +} + +// Threads Specifies the number of hot threads to provide information for. +// API name: threads +func (r *HotThreads) Threads(threads string) *HotThreads { + r.values.Set("threads", threads) + + return r +} + +// Timeout Period to wait for a response. If no response is received +// before the timeout expires, the request fails and returns an error. +// API name: timeout +func (r *HotThreads) Timeout(duration string) *HotThreads { + r.values.Set("timeout", duration) + + return r +} + +// Type The type to sample. +// API name: type +func (r *HotThreads) Type(type_ threadtype.ThreadType) *HotThreads { + r.values.Set("type", type_.String()) + + return r +} + +// Sort The sort order for 'cpu' type (default: total) +// API name: sort +func (r *HotThreads) Sort(sort threadtype.ThreadType) *HotThreads { + r.values.Set("sort", sort.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *HotThreads) ErrorTrace(errortrace bool) *HotThreads { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *HotThreads) FilterPath(filterpaths ...string) *HotThreads { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *HotThreads) Human(human bool) *HotThreads { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *HotThreads) Pretty(pretty bool) *HotThreads { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/hotthreads/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/hotthreads/response.go new file mode 100644 index 000000000..a06008b55 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/hotthreads/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package hotthreads + +// Response holds the response body struct for the package hotthreads +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/hot_threads/NodesHotThreadsResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/info/info.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/info/info.go new file mode 100644 index 000000000..782bce318 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/info/info.go @@ -0,0 +1,409 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get node information. +// +// By default, the API returns all attributes and core settings for cluster +// nodes. +package info + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nodeidMask = iota + 1 + + metricMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Info struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + nodeid string + metric string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewInfo type alias for index. +type NewInfo func() *Info + +// NewInfoFunc returns a new instance of Info with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewInfoFunc(tp elastictransport.Interface) NewInfo { + return func() *Info { + n := New(tp) + + return n + } +} + +// Get node information. +// +// By default, the API returns all attributes and core settings for cluster +// nodes. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info +func New(tp elastictransport.Interface) *Info { + r := &Info{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Info) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_nodes") + + method = http.MethodGet + case r.paramSet == nodeidMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + + method = http.MethodGet + case r.paramSet == metricMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", r.metric) + } + path.WriteString(r.metric) + + method = http.MethodGet + case r.paramSet == nodeidMask|metricMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", r.metric) + } + path.WriteString(r.metric) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Info) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "nodes.info") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "nodes.info") + if reader := instrument.RecordRequestBody(ctx, "nodes.info", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "nodes.info") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Info query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a info.Response +func (r Info) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Info) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Info query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Info headers map. +func (r *Info) Header(key, value string) *Info { + r.headers.Set(key, value) + + return r +} + +// NodeId Comma-separated list of node IDs or names used to limit returned information. +// API Name: nodeid +func (r *Info) NodeId(nodeid string) *Info { + r.paramSet |= nodeidMask + r.nodeid = nodeid + + return r +} + +// Metric Limits the information returned to the specific metrics. Supports a +// comma-separated list, such as http,ingest. +// API Name: metric +func (r *Info) Metric(metric string) *Info { + r.paramSet |= metricMask + r.metric = metric + + return r +} + +// FlatSettings If true, returns settings in flat format. +// API name: flat_settings +func (r *Info) FlatSettings(flatsettings bool) *Info { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *Info) Timeout(duration string) *Info { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Info) ErrorTrace(errortrace bool) *Info { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Info) FilterPath(filterpaths ...string) *Info { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Info) Human(human bool) *Info { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Info) Pretty(pretty bool) *Info { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/info/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/info/response.go new file mode 100644 index 000000000..c4004268a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/info/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package info + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package info +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/NodesInfoResponse.ts#L30-L33 +type Response struct { + ClusterName string `json:"cluster_name"` + // NodeStats Contains statistics about the number of nodes selected by the request’s node + // filters. + NodeStats *types.NodeStatistics `json:"_nodes,omitempty"` + Nodes map[string]types.NodeInfo `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.NodeInfo, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go new file mode 100644 index 000000000..ccd277f46 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go @@ -0,0 +1,420 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Reload the keystore on nodes in the cluster. +// +// Secure settings are stored in an on-disk keystore. Certain of these settings +// are reloadable. +// That is, you can change them on disk and reload them without restarting any +// nodes in the cluster. +// When you have updated reloadable secure settings in your keystore, you can +// use this API to reload those settings on each node. +// +// When the Elasticsearch keystore is password protected and not simply +// obfuscated, you must provide the password for the keystore when you reload +// the secure settings. +// Reloading the settings for the whole cluster assumes that the keystores for +// all nodes are protected with the same password; this method is allowed only +// when inter-node communications are encrypted. +// Alternatively, you can reload the secure settings on each node by locally +// accessing the API and passing the node-specific Elasticsearch keystore +// password. +package reloadsecuresettings + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nodeidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ReloadSecureSettings struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + nodeid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewReloadSecureSettings type alias for index. +type NewReloadSecureSettings func() *ReloadSecureSettings + +// NewReloadSecureSettingsFunc returns a new instance of ReloadSecureSettings with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewReloadSecureSettingsFunc(tp elastictransport.Interface) NewReloadSecureSettings { + return func() *ReloadSecureSettings { + n := New(tp) + + return n + } +} + +// Reload the keystore on nodes in the cluster. +// +// Secure settings are stored in an on-disk keystore. Certain of these settings +// are reloadable. +// That is, you can change them on disk and reload them without restarting any +// nodes in the cluster. +// When you have updated reloadable secure settings in your keystore, you can +// use this API to reload those settings on each node. +// +// When the Elasticsearch keystore is password protected and not simply +// obfuscated, you must provide the password for the keystore when you reload +// the secure settings. +// Reloading the settings for the whole cluster assumes that the keystores for +// all nodes are protected with the same password; this method is allowed only +// when inter-node communications are encrypted. +// Alternatively, you can reload the secure settings on each node by locally +// accessing the API and passing the node-specific Elasticsearch keystore +// password. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-reload-secure-settings +func New(tp elastictransport.Interface) *ReloadSecureSettings { + r := &ReloadSecureSettings{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ReloadSecureSettings) Raw(raw io.Reader) *ReloadSecureSettings { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ReloadSecureSettings) Request(req *Request) *ReloadSecureSettings { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ReloadSecureSettings) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ReloadSecureSettings: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + path.WriteString("reload_secure_settings") + + method = http.MethodPost + case r.paramSet == nodeidMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + path.WriteString("reload_secure_settings") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ReloadSecureSettings) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "nodes.reload_secure_settings") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "nodes.reload_secure_settings") + if reader := instrument.RecordRequestBody(ctx, "nodes.reload_secure_settings", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "nodes.reload_secure_settings") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ReloadSecureSettings query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a reloadsecuresettings.Response +func (r ReloadSecureSettings) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.reload_secure_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ReloadSecureSettings headers map. +func (r *ReloadSecureSettings) Header(key, value string) *ReloadSecureSettings { + r.headers.Set(key, value) + + return r +} + +// NodeId The names of particular nodes in the cluster to target. +// API Name: nodeid +func (r *ReloadSecureSettings) NodeId(nodeid string) *ReloadSecureSettings { + r.paramSet |= nodeidMask + r.nodeid = nodeid + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *ReloadSecureSettings) Timeout(duration string) *ReloadSecureSettings { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ReloadSecureSettings) ErrorTrace(errortrace bool) *ReloadSecureSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ReloadSecureSettings) FilterPath(filterpaths ...string) *ReloadSecureSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ReloadSecureSettings) Human(human bool) *ReloadSecureSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ReloadSecureSettings) Pretty(pretty bool) *ReloadSecureSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The password for the Elasticsearch keystore. +// API name: secure_settings_password +func (r *ReloadSecureSettings) SecureSettingsPassword(password string) *ReloadSecureSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SecureSettingsPassword = &password + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/reloadsecuresettings/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/reloadsecuresettings/request.go new file mode 100644 index 000000000..04b4c4d24 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/reloadsecuresettings/request.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package reloadsecuresettings + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package reloadsecuresettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/reload_secure_settings/ReloadSecureSettingsRequest.ts#L24-L70 +type Request struct { + + // SecureSettingsPassword The password for the Elasticsearch keystore. + SecureSettingsPassword *string `json:"secure_settings_password,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Reloadsecuresettings request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "secure_settings_password": + if err := dec.Decode(&s.SecureSettingsPassword); err != nil { + return fmt.Errorf("%s | %w", "SecureSettingsPassword", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/reloadsecuresettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/reloadsecuresettings/response.go new file mode 100644 index 000000000..099b2b0a7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/reloadsecuresettings/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package reloadsecuresettings + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package reloadsecuresettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/reload_secure_settings/ReloadSecureSettingsResponse.ts#L30-L33 +type Response struct { + ClusterName string `json:"cluster_name"` + // NodeStats Contains statistics about the number of nodes selected by the request’s node + // filters. + NodeStats *types.NodeStatistics `json:"_nodes,omitempty"` + Nodes map[string]types.NodeReloadResult `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.NodeReloadResult, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/stats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/stats/response.go new file mode 100644 index 000000000..cc5fef916 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/stats/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package stats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/stats/NodesStatsResponse.ts#L30-L33 +type Response struct { + ClusterName *string `json:"cluster_name,omitempty"` + // NodeStats Contains statistics about the number of nodes selected by the request’s node + // filters. + NodeStats *types.NodeStatistics `json:"_nodes,omitempty"` + Nodes map[string]types.Stats `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.Stats, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/stats/stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/stats/stats.go new file mode 100644 index 000000000..8e8222133 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/stats/stats.go @@ -0,0 +1,540 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get node statistics. +// Get statistics for nodes in a cluster. +// By default, all stats are returned. You can limit the returned information by +// using metrics. +package stats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/level" +) + +const ( + nodeidMask = iota + 1 + + metricMask + + indexmetricMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Stats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + nodeid string + metric string + indexmetric string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStats type alias for index. +type NewStats func() *Stats + +// NewStatsFunc returns a new instance of Stats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStatsFunc(tp elastictransport.Interface) NewStats { + return func() *Stats { + n := New(tp) + + return n + } +} + +// Get node statistics. +// Get statistics for nodes in a cluster. +// By default, all stats are returned. You can limit the returned information by +// using metrics. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats +func New(tp elastictransport.Interface) *Stats { + r := &Stats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Stats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + case r.paramSet == nodeidMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + case r.paramSet == metricMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + path.WriteString("stats") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", r.metric) + } + path.WriteString(r.metric) + + method = http.MethodGet + case r.paramSet == nodeidMask|metricMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + path.WriteString("stats") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", r.metric) + } + path.WriteString(r.metric) + + method = http.MethodGet + case r.paramSet == metricMask|indexmetricMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + path.WriteString("stats") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", r.metric) + } + path.WriteString(r.metric) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "indexmetric", r.indexmetric) + } + path.WriteString(r.indexmetric) + + method = http.MethodGet + case r.paramSet == nodeidMask|metricMask|indexmetricMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + path.WriteString("stats") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", r.metric) + } + path.WriteString(r.metric) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "indexmetric", r.indexmetric) + } + path.WriteString(r.indexmetric) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Stats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "nodes.stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "nodes.stats") + if reader := instrument.RecordRequestBody(ctx, "nodes.stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "nodes.stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Stats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stats.Response +func (r Stats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Stats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Stats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Stats headers map. +func (r *Stats) Header(key, value string) *Stats { + r.headers.Set(key, value) + + return r +} + +// NodeId Comma-separated list of node IDs or names used to limit returned information. +// API Name: nodeid +func (r *Stats) NodeId(nodeid string) *Stats { + r.paramSet |= nodeidMask + r.nodeid = nodeid + + return r +} + +// Metric Limit the information returned to the specified metrics +// API Name: metric +func (r *Stats) Metric(metric string) *Stats { + r.paramSet |= metricMask + r.metric = metric + + return r +} + +// IndexMetric Limit the information returned for indices metric to the specific index +// metrics. It can be used only if indices (or all) metric is specified. +// API Name: indexmetric +func (r *Stats) IndexMetric(indexmetric string) *Stats { + r.paramSet |= indexmetricMask + r.indexmetric = indexmetric + + return r +} + +// CompletionFields Comma-separated list or wildcard expressions of fields to include in +// fielddata and suggest statistics. +// API name: completion_fields +func (r *Stats) CompletionFields(fields ...string) *Stats { + r.values.Set("completion_fields", strings.Join(fields, ",")) + + return r +} + +// FielddataFields Comma-separated list or wildcard expressions of fields to include in +// fielddata statistics. +// API name: fielddata_fields +func (r *Stats) FielddataFields(fields ...string) *Stats { + r.values.Set("fielddata_fields", strings.Join(fields, ",")) + + return r +} + +// Fields Comma-separated list or wildcard expressions of fields to include in the +// statistics. +// API name: fields +func (r *Stats) Fields(fields ...string) *Stats { + r.values.Set("fields", strings.Join(fields, ",")) + + return r +} + +// Groups Comma-separated list of search groups to include in the search statistics. +// API name: groups +func (r *Stats) Groups(groups bool) *Stats { + r.values.Set("groups", strconv.FormatBool(groups)) + + return r +} + +// IncludeSegmentFileSizes If true, the call reports the aggregated disk usage of each one of the Lucene +// index files (only applies if segment stats are requested). +// API name: include_segment_file_sizes +func (r *Stats) IncludeSegmentFileSizes(includesegmentfilesizes bool) *Stats { + r.values.Set("include_segment_file_sizes", strconv.FormatBool(includesegmentfilesizes)) + + return r +} + +// Level Indicates whether statistics are aggregated at the cluster, index, or shard +// level. +// API name: level +func (r *Stats) Level(level level.Level) *Stats { + r.values.Set("level", level.String()) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *Stats) Timeout(duration string) *Stats { + r.values.Set("timeout", duration) + + return r +} + +// Types A comma-separated list of document types for the indexing index metric. +// API name: types +func (r *Stats) Types(types ...string) *Stats { + tmp := []string{} + for _, item := range types { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("types", strings.Join(tmp, ",")) + + return r +} + +// IncludeUnloadedSegments If `true`, the response includes information from segments that are not +// loaded into memory. +// API name: include_unloaded_segments +func (r *Stats) IncludeUnloadedSegments(includeunloadedsegments bool) *Stats { + r.values.Set("include_unloaded_segments", strconv.FormatBool(includeunloadedsegments)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Stats) ErrorTrace(errortrace bool) *Stats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Stats) FilterPath(filterpaths ...string) *Stats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Stats) Human(human bool) *Stats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Stats) Pretty(pretty bool) *Stats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/usage/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/usage/response.go new file mode 100644 index 000000000..026b2732e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/usage/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package usage + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package usage +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/usage/NodesUsageResponse.ts#L30-L33 +type Response struct { + ClusterName string `json:"cluster_name"` + // NodeStats Contains statistics about the number of nodes selected by the request’s node + // filters. + NodeStats *types.NodeStatistics `json:"_nodes,omitempty"` + Nodes map[string]types.NodeUsage `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.NodeUsage, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/usage/usage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/usage/usage.go new file mode 100644 index 000000000..fd14c8123 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/nodes/usage/usage.go @@ -0,0 +1,406 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get feature usage information. +package usage + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nodeidMask = iota + 1 + + metricMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Usage struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + nodeid string + metric string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUsage type alias for index. +type NewUsage func() *Usage + +// NewUsageFunc returns a new instance of Usage with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUsageFunc(tp elastictransport.Interface) NewUsage { + return func() *Usage { + n := New(tp) + + return n + } +} + +// Get feature usage information. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage +func New(tp elastictransport.Interface) *Usage { + r := &Usage{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Usage) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + path.WriteString("usage") + + method = http.MethodGet + case r.paramSet == nodeidMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + path.WriteString("usage") + + method = http.MethodGet + case r.paramSet == metricMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + path.WriteString("usage") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", r.metric) + } + path.WriteString(r.metric) + + method = http.MethodGet + case r.paramSet == nodeidMask|metricMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + path.WriteString("usage") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", r.metric) + } + path.WriteString(r.metric) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Usage) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "nodes.usage") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "nodes.usage") + if reader := instrument.RecordRequestBody(ctx, "nodes.usage", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "nodes.usage") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Usage query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a usage.Response +func (r Usage) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.usage") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Usage) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "nodes.usage") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Usage query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Usage headers map. +func (r *Usage) Header(key, value string) *Usage { + r.headers.Set(key, value) + + return r +} + +// NodeId A comma-separated list of node IDs or names to limit the returned +// information; use `_local` to return information from the node you're +// connecting to, leave empty to get information from all nodes +// API Name: nodeid +func (r *Usage) NodeId(nodeid string) *Usage { + r.paramSet |= nodeidMask + r.nodeid = nodeid + + return r +} + +// Metric Limits the information returned to the specific metrics. +// A comma-separated list of the following options: `_all`, `rest_actions`. +// API Name: metric +func (r *Usage) Metric(metric string) *Usage { + r.paramSet |= metricMask + r.metric = metric + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *Usage) Timeout(duration string) *Usage { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Usage) ErrorTrace(errortrace bool) *Usage { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Usage) FilterPath(filterpaths ...string) *Usage { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Usage) Human(human bool) *Usage { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Usage) Pretty(pretty bool) *Usage { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/profiling/flamegraph/flamegraph.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/profiling/flamegraph/flamegraph.go new file mode 100644 index 000000000..27410bb94 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/profiling/flamegraph/flamegraph.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Extracts a UI-optimized structure to render flamegraphs from Universal +// Profiling. +package flamegraph + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Flamegraph struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewFlamegraph type alias for index. +type NewFlamegraph func() *Flamegraph + +// NewFlamegraphFunc returns a new instance of Flamegraph with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewFlamegraphFunc(tp elastictransport.Interface) NewFlamegraph { + return func() *Flamegraph { + n := New(tp) + + return n + } +} + +// Extracts a UI-optimized structure to render flamegraphs from Universal +// Profiling. +// +// https://www.elastic.co/guide/en/observability/current/universal-profiling.html +func New(tp elastictransport.Interface) *Flamegraph { + r := &Flamegraph{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Flamegraph) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_profiling") + path.WriteString("/") + path.WriteString("flamegraph") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Flamegraph) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "profiling.flamegraph") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "profiling.flamegraph") + if reader := instrument.RecordRequestBody(ctx, "profiling.flamegraph", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "profiling.flamegraph") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Flamegraph query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a flamegraph.Response +func (r Flamegraph) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Flamegraph) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "profiling.flamegraph") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Flamegraph query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Flamegraph headers map. +func (r *Flamegraph) Header(key, value string) *Flamegraph { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/profiling/stacktraces/stacktraces.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/profiling/stacktraces/stacktraces.go new file mode 100644 index 000000000..9e07ba170 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/profiling/stacktraces/stacktraces.go @@ -0,0 +1,231 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Extracts raw stacktrace information from Universal Profiling. +package stacktraces + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Stacktraces struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStacktraces type alias for index. +type NewStacktraces func() *Stacktraces + +// NewStacktracesFunc returns a new instance of Stacktraces with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStacktracesFunc(tp elastictransport.Interface) NewStacktraces { + return func() *Stacktraces { + n := New(tp) + + return n + } +} + +// Extracts raw stacktrace information from Universal Profiling. +// +// https://www.elastic.co/guide/en/observability/current/universal-profiling.html +func New(tp elastictransport.Interface) *Stacktraces { + r := &Stacktraces{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Stacktraces) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_profiling") + path.WriteString("/") + path.WriteString("stacktraces") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Stacktraces) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "profiling.stacktraces") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "profiling.stacktraces") + if reader := instrument.RecordRequestBody(ctx, "profiling.stacktraces", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "profiling.stacktraces") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Stacktraces query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stacktraces.Response +func (r Stacktraces) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Stacktraces) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "profiling.stacktraces") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Stacktraces query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Stacktraces headers map. +func (r *Stacktraces) Header(key, value string) *Stacktraces { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/profiling/status/status.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/profiling/status/status.go new file mode 100644 index 000000000..55bcc2565 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/profiling/status/status.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Returns basic information about the status of Universal Profiling. +package status + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Status struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStatus type alias for index. +type NewStatus func() *Status + +// NewStatusFunc returns a new instance of Status with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStatusFunc(tp elastictransport.Interface) NewStatus { + return func() *Status { + n := New(tp) + + return n + } +} + +// Returns basic information about the status of Universal Profiling. +// +// https://www.elastic.co/guide/en/observability/current/universal-profiling.html +func New(tp elastictransport.Interface) *Status { + r := &Status{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Status) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_profiling") + path.WriteString("/") + path.WriteString("status") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Status) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "profiling.status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "profiling.status") + if reader := instrument.RecordRequestBody(ctx, "profiling.status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "profiling.status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Status query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a status.Response +func (r Status) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Status) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "profiling.status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Status query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Status headers map. +func (r *Status) Header(key, value string) *Status { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/profiling/topnfunctions/topn_functions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/profiling/topnfunctions/topn_functions.go new file mode 100644 index 000000000..e6595052a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/profiling/topnfunctions/topn_functions.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Extracts a list of topN functions from Universal Profiling. +package topnfunctions + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type TopnFunctions struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewTopnFunctions type alias for index. +type NewTopnFunctions func() *TopnFunctions + +// NewTopnFunctionsFunc returns a new instance of TopnFunctions with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewTopnFunctionsFunc(tp elastictransport.Interface) NewTopnFunctions { + return func() *TopnFunctions { + n := New(tp) + + return n + } +} + +// Extracts a list of topN functions from Universal Profiling. +// +// https://www.elastic.co/guide/en/observability/current/universal-profiling.html +func New(tp elastictransport.Interface) *TopnFunctions { + r := &TopnFunctions{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *TopnFunctions) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_profiling") + path.WriteString("/") + path.WriteString("topn") + path.WriteString("/") + path.WriteString("functions") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r TopnFunctions) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "profiling.topn_functions") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "profiling.topn_functions") + if reader := instrument.RecordRequestBody(ctx, "profiling.topn_functions", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "profiling.topn_functions") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the TopnFunctions query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a topnfunctions.Response +func (r TopnFunctions) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r TopnFunctions) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "profiling.topn_functions") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the TopnFunctions query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the TopnFunctions headers map. +func (r *TopnFunctions) Header(key, value string) *TopnFunctions { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleterule/delete_rule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleterule/delete_rule.go new file mode 100644 index 000000000..6c464b894 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleterule/delete_rule.go @@ -0,0 +1,371 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a query rule. +// Delete a query rule within a query ruleset. +// This is a destructive action that is only recoverable by re-adding the same +// rule with the create or update query rule API. +package deleterule + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + rulesetidMask = iota + 1 + + ruleidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteRule struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + rulesetid string + ruleid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteRule type alias for index. +type NewDeleteRule func(rulesetid, ruleid string) *DeleteRule + +// NewDeleteRuleFunc returns a new instance of DeleteRule with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteRuleFunc(tp elastictransport.Interface) NewDeleteRule { + return func(rulesetid, ruleid string) *DeleteRule { + n := New(tp) + + n._rulesetid(rulesetid) + + n._ruleid(ruleid) + + return n + } +} + +// Delete a query rule. +// Delete a query rule within a query ruleset. +// This is a destructive action that is only recoverable by re-adding the same +// rule with the create or update query rule API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule +func New(tp elastictransport.Interface) *DeleteRule { + r := &DeleteRule{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteRule) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == rulesetidMask|ruleidMask: + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "rulesetid", r.rulesetid) + } + path.WriteString(r.rulesetid) + path.WriteString("/") + path.WriteString("_rule") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "ruleid", r.ruleid) + } + path.WriteString(r.ruleid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteRule) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "query_rules.delete_rule") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.delete_rule") + if reader := instrument.RecordRequestBody(ctx, "query_rules.delete_rule", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.delete_rule") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteRule query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleterule.Response +func (r DeleteRule) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.delete_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteRule) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.delete_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteRule query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteRule headers map. +func (r *DeleteRule) Header(key, value string) *DeleteRule { + r.headers.Set(key, value) + + return r +} + +// RulesetId The unique identifier of the query ruleset containing the rule to delete +// API Name: rulesetid +func (r *DeleteRule) _rulesetid(rulesetid string) *DeleteRule { + r.paramSet |= rulesetidMask + r.rulesetid = rulesetid + + return r +} + +// RuleId The unique identifier of the query rule within the specified ruleset to +// delete +// API Name: ruleid +func (r *DeleteRule) _ruleid(ruleid string) *DeleteRule { + r.paramSet |= ruleidMask + r.ruleid = ruleid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteRule) ErrorTrace(errortrace bool) *DeleteRule { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteRule) FilterPath(filterpaths ...string) *DeleteRule { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteRule) Human(human bool) *DeleteRule { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteRule) Pretty(pretty bool) *DeleteRule { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleterule/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleterule/response.go new file mode 100644 index 000000000..d0bf9125e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleterule/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleterule + +// Response holds the response body struct for the package deleterule +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/delete_rule/QueryRuleDeleteResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleteruleset/delete_ruleset.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleteruleset/delete_ruleset.go new file mode 100644 index 000000000..1338556af --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleteruleset/delete_ruleset.go @@ -0,0 +1,346 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a query ruleset. +// Remove a query ruleset and its associated data. +// This is a destructive action that is not recoverable. +package deleteruleset + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + rulesetidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteRuleset struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + rulesetid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteRuleset type alias for index. +type NewDeleteRuleset func(rulesetid string) *DeleteRuleset + +// NewDeleteRulesetFunc returns a new instance of DeleteRuleset with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteRulesetFunc(tp elastictransport.Interface) NewDeleteRuleset { + return func(rulesetid string) *DeleteRuleset { + n := New(tp) + + n._rulesetid(rulesetid) + + return n + } +} + +// Delete a query ruleset. +// Remove a query ruleset and its associated data. +// This is a destructive action that is not recoverable. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset +func New(tp elastictransport.Interface) *DeleteRuleset { + r := &DeleteRuleset{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteRuleset) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == rulesetidMask: + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "rulesetid", r.rulesetid) + } + path.WriteString(r.rulesetid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteRuleset) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "query_rules.delete_ruleset") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.delete_ruleset") + if reader := instrument.RecordRequestBody(ctx, "query_rules.delete_ruleset", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.delete_ruleset") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteRuleset query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleteruleset.Response +func (r DeleteRuleset) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.delete_ruleset") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteRuleset) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.delete_ruleset") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteRuleset query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteRuleset headers map. +func (r *DeleteRuleset) Header(key, value string) *DeleteRuleset { + r.headers.Set(key, value) + + return r +} + +// RulesetId The unique identifier of the query ruleset to delete +// API Name: rulesetid +func (r *DeleteRuleset) _rulesetid(rulesetid string) *DeleteRuleset { + r.paramSet |= rulesetidMask + r.rulesetid = rulesetid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteRuleset) ErrorTrace(errortrace bool) *DeleteRuleset { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteRuleset) FilterPath(filterpaths ...string) *DeleteRuleset { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteRuleset) Human(human bool) *DeleteRuleset { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteRuleset) Pretty(pretty bool) *DeleteRuleset { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleteruleset/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleteruleset/response.go new file mode 100644 index 000000000..23687fec3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleteruleset/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleteruleset + +// Response holds the response body struct for the package deleteruleset +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/delete_ruleset/QueryRulesetDeleteResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getrule/get_rule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getrule/get_rule.go new file mode 100644 index 000000000..8af122ae7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getrule/get_rule.go @@ -0,0 +1,367 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get a query rule. +// Get details about a query rule within a query ruleset. +package getrule + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + rulesetidMask = iota + 1 + + ruleidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetRule struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + rulesetid string + ruleid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetRule type alias for index. +type NewGetRule func(rulesetid, ruleid string) *GetRule + +// NewGetRuleFunc returns a new instance of GetRule with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetRuleFunc(tp elastictransport.Interface) NewGetRule { + return func(rulesetid, ruleid string) *GetRule { + n := New(tp) + + n._rulesetid(rulesetid) + + n._ruleid(ruleid) + + return n + } +} + +// Get a query rule. +// Get details about a query rule within a query ruleset. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule +func New(tp elastictransport.Interface) *GetRule { + r := &GetRule{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetRule) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == rulesetidMask|ruleidMask: + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "rulesetid", r.rulesetid) + } + path.WriteString(r.rulesetid) + path.WriteString("/") + path.WriteString("_rule") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "ruleid", r.ruleid) + } + path.WriteString(r.ruleid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetRule) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "query_rules.get_rule") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.get_rule") + if reader := instrument.RecordRequestBody(ctx, "query_rules.get_rule", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.get_rule") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetRule query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getrule.Response +func (r GetRule) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.get_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetRule) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.get_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetRule query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetRule headers map. +func (r *GetRule) Header(key, value string) *GetRule { + r.headers.Set(key, value) + + return r +} + +// RulesetId The unique identifier of the query ruleset containing the rule to retrieve +// API Name: rulesetid +func (r *GetRule) _rulesetid(rulesetid string) *GetRule { + r.paramSet |= rulesetidMask + r.rulesetid = rulesetid + + return r +} + +// RuleId The unique identifier of the query rule within the specified ruleset to +// retrieve +// API Name: ruleid +func (r *GetRule) _ruleid(ruleid string) *GetRule { + r.paramSet |= ruleidMask + r.ruleid = ruleid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetRule) ErrorTrace(errortrace bool) *GetRule { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetRule) FilterPath(filterpaths ...string) *GetRule { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetRule) Human(human bool) *GetRule { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetRule) Pretty(pretty bool) *GetRule { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getrule/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getrule/response.go new file mode 100644 index 000000000..7f2bee701 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getrule/response.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getrule + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/queryruletype" +) + +// Response holds the response body struct for the package getrule +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/get_rule/QueryRuleGetResponse.ts#L22-L25 +type Response struct { + + // Actions The actions to take when the rule is matched. + // The format of this action depends on the rule type. + Actions types.QueryRuleActions `json:"actions"` + // Criteria The criteria that must be met for the rule to be applied. + // If multiple criteria are specified for a rule, all criteria must be met for + // the rule to be applied. + Criteria []types.QueryRuleCriteria `json:"criteria"` + Priority *int `json:"priority,omitempty"` + // RuleId A unique identifier for the rule. + RuleId string `json:"rule_id"` + // Type The type of rule. + // `pinned` will identify and pin specific documents to the top of search + // results. + // `exclude` will exclude specific documents from search results. + Type queryruletype.QueryRuleType `json:"type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getruleset/get_ruleset.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getruleset/get_ruleset.go new file mode 100644 index 000000000..b6ed5049f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getruleset/get_ruleset.go @@ -0,0 +1,344 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get a query ruleset. +// Get details about a query ruleset. +package getruleset + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + rulesetidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetRuleset struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + rulesetid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetRuleset type alias for index. +type NewGetRuleset func(rulesetid string) *GetRuleset + +// NewGetRulesetFunc returns a new instance of GetRuleset with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetRulesetFunc(tp elastictransport.Interface) NewGetRuleset { + return func(rulesetid string) *GetRuleset { + n := New(tp) + + n._rulesetid(rulesetid) + + return n + } +} + +// Get a query ruleset. +// Get details about a query ruleset. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset +func New(tp elastictransport.Interface) *GetRuleset { + r := &GetRuleset{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetRuleset) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == rulesetidMask: + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "rulesetid", r.rulesetid) + } + path.WriteString(r.rulesetid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetRuleset) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "query_rules.get_ruleset") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.get_ruleset") + if reader := instrument.RecordRequestBody(ctx, "query_rules.get_ruleset", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.get_ruleset") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetRuleset query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getruleset.Response +func (r GetRuleset) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.get_ruleset") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetRuleset) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.get_ruleset") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetRuleset query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetRuleset headers map. +func (r *GetRuleset) Header(key, value string) *GetRuleset { + r.headers.Set(key, value) + + return r +} + +// RulesetId The unique identifier of the query ruleset +// API Name: rulesetid +func (r *GetRuleset) _rulesetid(rulesetid string) *GetRuleset { + r.paramSet |= rulesetidMask + r.rulesetid = rulesetid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetRuleset) ErrorTrace(errortrace bool) *GetRuleset { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetRuleset) FilterPath(filterpaths ...string) *GetRuleset { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetRuleset) Human(human bool) *GetRuleset { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetRuleset) Pretty(pretty bool) *GetRuleset { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getruleset/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getruleset/response.go new file mode 100644 index 000000000..10e662c9e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getruleset/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getruleset + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getruleset +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/get_ruleset/QueryRulesetGetResponse.ts#L22-L25 +type Response struct { + + // Rules Rules associated with the query ruleset. + Rules []types.QueryRule `json:"rules"` + // RulesetId A unique identifier for the ruleset. + RulesetId string `json:"ruleset_id"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/listrulesets/list_rulesets.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/listrulesets/list_rulesets.go new file mode 100644 index 000000000..ba76238b3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/listrulesets/list_rulesets.go @@ -0,0 +1,337 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get all query rulesets. +// Get summarized information about the query rulesets. +package listrulesets + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ListRulesets struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewListRulesets type alias for index. +type NewListRulesets func() *ListRulesets + +// NewListRulesetsFunc returns a new instance of ListRulesets with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewListRulesetsFunc(tp elastictransport.Interface) NewListRulesets { + return func() *ListRulesets { + n := New(tp) + + return n + } +} + +// Get all query rulesets. +// Get summarized information about the query rulesets. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets +func New(tp elastictransport.Interface) *ListRulesets { + r := &ListRulesets{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ListRulesets) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_query_rules") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ListRulesets) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "query_rules.list_rulesets") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.list_rulesets") + if reader := instrument.RecordRequestBody(ctx, "query_rules.list_rulesets", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.list_rulesets") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ListRulesets query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a listrulesets.Response +func (r ListRulesets) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.list_rulesets") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ListRulesets) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.list_rulesets") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ListRulesets query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ListRulesets headers map. +func (r *ListRulesets) Header(key, value string) *ListRulesets { + r.headers.Set(key, value) + + return r +} + +// From The offset from the first result to fetch. +// API name: from +func (r *ListRulesets) From(from int) *ListRulesets { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size The maximum number of results to retrieve. +// API name: size +func (r *ListRulesets) Size(size int) *ListRulesets { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ListRulesets) ErrorTrace(errortrace bool) *ListRulesets { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ListRulesets) FilterPath(filterpaths ...string) *ListRulesets { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ListRulesets) Human(human bool) *ListRulesets { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ListRulesets) Pretty(pretty bool) *ListRulesets { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/listrulesets/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/listrulesets/response.go new file mode 100644 index 000000000..3c4b19322 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/listrulesets/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package listrulesets + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package listrulesets +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/list_rulesets/QueryRulesetListResponse.ts#L23-L28 +type Response struct { + Count int64 `json:"count"` + Results []types.QueryRulesetListItem `json:"results"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putrule/put_rule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putrule/put_rule.go new file mode 100644 index 000000000..bcd9b91b4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putrule/put_rule.go @@ -0,0 +1,451 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a query rule. +// Create or update a query rule within a query ruleset. +// +// IMPORTANT: Due to limitations within pinned queries, you can only pin +// documents using ids or docs, but cannot use both in single rule. +// It is advised to use one or the other in query rulesets, to avoid errors. +// Additionally, pinned queries have a maximum limit of 100 pinned hits. +// If multiple matching rules pin more than 100 documents, only the first 100 +// documents are pinned in the order they are specified in the ruleset. +package putrule + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/queryruletype" +) + +const ( + rulesetidMask = iota + 1 + + ruleidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutRule struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + rulesetid string + ruleid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutRule type alias for index. +type NewPutRule func(rulesetid, ruleid string) *PutRule + +// NewPutRuleFunc returns a new instance of PutRule with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutRuleFunc(tp elastictransport.Interface) NewPutRule { + return func(rulesetid, ruleid string) *PutRule { + n := New(tp) + + n._rulesetid(rulesetid) + + n._ruleid(ruleid) + + return n + } +} + +// Create or update a query rule. +// Create or update a query rule within a query ruleset. +// +// IMPORTANT: Due to limitations within pinned queries, you can only pin +// documents using ids or docs, but cannot use both in single rule. +// It is advised to use one or the other in query rulesets, to avoid errors. +// Additionally, pinned queries have a maximum limit of 100 pinned hits. +// If multiple matching rules pin more than 100 documents, only the first 100 +// documents are pinned in the order they are specified in the ruleset. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule +func New(tp elastictransport.Interface) *PutRule { + r := &PutRule{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutRule) Raw(raw io.Reader) *PutRule { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutRule) Request(req *Request) *PutRule { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutRule) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutRule: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == rulesetidMask|ruleidMask: + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "rulesetid", r.rulesetid) + } + path.WriteString(r.rulesetid) + path.WriteString("/") + path.WriteString("_rule") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "ruleid", r.ruleid) + } + path.WriteString(r.ruleid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutRule) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "query_rules.put_rule") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.put_rule") + if reader := instrument.RecordRequestBody(ctx, "query_rules.put_rule", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.put_rule") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutRule query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putrule.Response +func (r PutRule) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.put_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutRule headers map. +func (r *PutRule) Header(key, value string) *PutRule { + r.headers.Set(key, value) + + return r +} + +// RulesetId The unique identifier of the query ruleset containing the rule to be created +// or updated. +// API Name: rulesetid +func (r *PutRule) _rulesetid(rulesetid string) *PutRule { + r.paramSet |= rulesetidMask + r.rulesetid = rulesetid + + return r +} + +// RuleId The unique identifier of the query rule within the specified ruleset to be +// created or updated. +// API Name: ruleid +func (r *PutRule) _ruleid(ruleid string) *PutRule { + r.paramSet |= ruleidMask + r.ruleid = ruleid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutRule) ErrorTrace(errortrace bool) *PutRule { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutRule) FilterPath(filterpaths ...string) *PutRule { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutRule) Human(human bool) *PutRule { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutRule) Pretty(pretty bool) *PutRule { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The actions to take when the rule is matched. +// The format of this action depends on the rule type. +// API name: actions +func (r *PutRule) Actions(actions types.QueryRuleActionsVariant) *PutRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Actions = *actions.QueryRuleActionsCaster() + + return r +} + +// The criteria that must be met for the rule to be applied. +// If multiple criteria are specified for a rule, all criteria must be met for +// the rule to be applied. +// API name: criteria +func (r *PutRule) Criteria(criteria ...types.QueryRuleCriteriaVariant) *PutRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Criteria = make([]types.QueryRuleCriteria, len(criteria)) + for i, v := range criteria { + r.req.Criteria[i] = *v.QueryRuleCriteriaCaster() + } + + return r +} + +// API name: priority +func (r *PutRule) Priority(priority int) *PutRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Priority = &priority + + return r +} + +// The type of rule. +// API name: type +func (r *PutRule) Type(type_ queryruletype.QueryRuleType) *PutRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Type = type_ + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putrule/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putrule/request.go new file mode 100644 index 000000000..64a289f0d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putrule/request.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putrule + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/queryruletype" +) + +// Request holds the request body struct for the package putrule +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/put_rule/QueryRulePutRequest.ts#L28-L79 +type Request struct { + + // Actions The actions to take when the rule is matched. + // The format of this action depends on the rule type. + Actions types.QueryRuleActions `json:"actions"` + // Criteria The criteria that must be met for the rule to be applied. + // If multiple criteria are specified for a rule, all criteria must be met for + // the rule to be applied. + Criteria []types.QueryRuleCriteria `json:"criteria"` + Priority *int `json:"priority,omitempty"` + // Type The type of rule. + Type queryruletype.QueryRuleType `json:"type"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putrule request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return fmt.Errorf("%s | %w", "Actions", err) + } + + case "criteria": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := types.NewQueryRuleCriteria() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Criteria", err) + } + + s.Criteria = append(s.Criteria, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Criteria); err != nil { + return fmt.Errorf("%s | %w", "Criteria", err) + } + } + + case "priority": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + s.Priority = &value + case float64: + f := int(v) + s.Priority = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putrule/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putrule/response.go new file mode 100644 index 000000000..bd25403ac --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putrule/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putrule + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package putrule +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/put_rule/QueryRulePutResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putruleset/put_ruleset.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putruleset/put_ruleset.go new file mode 100644 index 000000000..24072d584 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putruleset/put_ruleset.go @@ -0,0 +1,390 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a query ruleset. +// There is a limit of 100 rules per ruleset. +// This limit can be increased by using the +// `xpack.applications.rules.max_rules_per_ruleset` cluster setting. +// +// IMPORTANT: Due to limitations within pinned queries, you can only select +// documents using `ids` or `docs`, but cannot use both in single rule. +// It is advised to use one or the other in query rulesets, to avoid errors. +// Additionally, pinned queries have a maximum limit of 100 pinned hits. +// If multiple matching rules pin more than 100 documents, only the first 100 +// documents are pinned in the order they are specified in the ruleset. +package putruleset + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + rulesetidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutRuleset struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + rulesetid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutRuleset type alias for index. +type NewPutRuleset func(rulesetid string) *PutRuleset + +// NewPutRulesetFunc returns a new instance of PutRuleset with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutRulesetFunc(tp elastictransport.Interface) NewPutRuleset { + return func(rulesetid string) *PutRuleset { + n := New(tp) + + n._rulesetid(rulesetid) + + return n + } +} + +// Create or update a query ruleset. +// There is a limit of 100 rules per ruleset. +// This limit can be increased by using the +// `xpack.applications.rules.max_rules_per_ruleset` cluster setting. +// +// IMPORTANT: Due to limitations within pinned queries, you can only select +// documents using `ids` or `docs`, but cannot use both in single rule. +// It is advised to use one or the other in query rulesets, to avoid errors. +// Additionally, pinned queries have a maximum limit of 100 pinned hits. +// If multiple matching rules pin more than 100 documents, only the first 100 +// documents are pinned in the order they are specified in the ruleset. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset +func New(tp elastictransport.Interface) *PutRuleset { + r := &PutRuleset{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutRuleset) Raw(raw io.Reader) *PutRuleset { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutRuleset) Request(req *Request) *PutRuleset { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutRuleset) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutRuleset: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == rulesetidMask: + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "rulesetid", r.rulesetid) + } + path.WriteString(r.rulesetid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutRuleset) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "query_rules.put_ruleset") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.put_ruleset") + if reader := instrument.RecordRequestBody(ctx, "query_rules.put_ruleset", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.put_ruleset") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutRuleset query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putruleset.Response +func (r PutRuleset) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.put_ruleset") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutRuleset headers map. +func (r *PutRuleset) Header(key, value string) *PutRuleset { + r.headers.Set(key, value) + + return r +} + +// RulesetId The unique identifier of the query ruleset to be created or updated. +// API Name: rulesetid +func (r *PutRuleset) _rulesetid(rulesetid string) *PutRuleset { + r.paramSet |= rulesetidMask + r.rulesetid = rulesetid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutRuleset) ErrorTrace(errortrace bool) *PutRuleset { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutRuleset) FilterPath(filterpaths ...string) *PutRuleset { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutRuleset) Human(human bool) *PutRuleset { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutRuleset) Pretty(pretty bool) *PutRuleset { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: rules +func (r *PutRuleset) Rules(rules ...types.QueryRuleVariant) *PutRuleset { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Rules = make([]types.QueryRule, len(rules)) + for i, v := range rules { + r.req.Rules[i] = *v.QueryRuleCaster() + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putruleset/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putruleset/request.go new file mode 100644 index 000000000..71a214a98 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putruleset/request.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putruleset + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putruleset +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/put_ruleset/QueryRulesetPutRequest.ts#L23-L59 +type Request struct { + Rules []types.QueryRule `json:"rules"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putruleset request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "rules": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := types.NewQueryRule() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Rules", err) + } + + s.Rules = append(s.Rules, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Rules); err != nil { + return fmt.Errorf("%s | %w", "Rules", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putruleset/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putruleset/response.go new file mode 100644 index 000000000..2c594d227 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putruleset/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putruleset + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package putruleset +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/put_ruleset/QueryRulesetPutResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/test/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/test/request.go new file mode 100644 index 000000000..b92dde3c8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/test/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package test + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package test +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/test/QueryRulesetTestRequest.ts#L24-L57 +type Request struct { + + // MatchCriteria The match criteria to apply to rules in the given query ruleset. + // Match criteria should match the keys defined in the `criteria.metadata` field + // of the rule. + MatchCriteria map[string]json.RawMessage `json:"match_criteria"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + MatchCriteria: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Test request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/test/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/test/response.go new file mode 100644 index 000000000..26b49236a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/test/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package test + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package test +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/test/QueryRulesetTestResponse.ts#L23-L28 +type Response struct { + MatchedRules []types.QueryRulesetMatchedRule `json:"matched_rules"` + TotalMatchedRules int `json:"total_matched_rules"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/test/test.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/test/test.go new file mode 100644 index 000000000..1ae6424a5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/test/test.go @@ -0,0 +1,394 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Test a query ruleset. +// Evaluate match criteria against a query ruleset to identify the rules that +// would match that criteria. +package test + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + rulesetidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Test struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + rulesetid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewTest type alias for index. +type NewTest func(rulesetid string) *Test + +// NewTestFunc returns a new instance of Test with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewTestFunc(tp elastictransport.Interface) NewTest { + return func(rulesetid string) *Test { + n := New(tp) + + n._rulesetid(rulesetid) + + return n + } +} + +// Test a query ruleset. +// Evaluate match criteria against a query ruleset to identify the rules that +// would match that criteria. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test +func New(tp elastictransport.Interface) *Test { + r := &Test{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Test) Raw(raw io.Reader) *Test { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Test) Request(req *Request) *Test { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Test) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Test: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == rulesetidMask: + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "rulesetid", r.rulesetid) + } + path.WriteString(r.rulesetid) + path.WriteString("/") + path.WriteString("_test") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Test) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "query_rules.test") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "query_rules.test") + if reader := instrument.RecordRequestBody(ctx, "query_rules.test", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "query_rules.test") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Test query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a test.Response +func (r Test) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "query_rules.test") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Test headers map. +func (r *Test) Header(key, value string) *Test { + r.headers.Set(key, value) + + return r +} + +// RulesetId The unique identifier of the query ruleset to be created or updated +// API Name: rulesetid +func (r *Test) _rulesetid(rulesetid string) *Test { + r.paramSet |= rulesetidMask + r.rulesetid = rulesetid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Test) ErrorTrace(errortrace bool) *Test { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Test) FilterPath(filterpaths ...string) *Test { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Test) Human(human bool) *Test { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Test) Pretty(pretty bool) *Test { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The match criteria to apply to rules in the given query ruleset. +// Match criteria should match the keys defined in the `criteria.metadata` field +// of the rule. +// API name: match_criteria +func (r *Test) MatchCriteria(matchcriteria map[string]json.RawMessage) *Test { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.MatchCriteria = matchcriteria + return r +} + +func (r *Test) AddMatchCriterion(key string, value json.RawMessage) *Test { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.MatchCriteria == nil { + r.req.MatchCriteria = make(map[string]json.RawMessage) + } else { + tmp = r.req.MatchCriteria + } + + tmp[key] = value + + r.req.MatchCriteria = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/deletejob/delete_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/deletejob/delete_job.go new file mode 100644 index 000000000..f2adeeab9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/deletejob/delete_job.go @@ -0,0 +1,404 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a rollup job. +// +// A job must be stopped before it can be deleted. +// If you attempt to delete a started job, an error occurs. +// Similarly, if you attempt to delete a nonexistent job, an exception occurs. +// +// IMPORTANT: When you delete a job, you remove only the process that is +// actively monitoring and rolling up data. +// The API does not delete any previously rolled up data. +// This is by design; a user may wish to roll up a static data set. +// Because the data set is static, after it has been fully rolled up there is no +// need to keep the indexing rollup job around (as there will be no new data). +// Thus the job can be deleted, leaving behind the rolled up data for analysis. +// If you wish to also remove the rollup data and the rollup index contains the +// data for only a single job, you can delete the whole rollup index. +// If the rollup index stores data from several jobs, you must issue a +// delete-by-query that targets the rollup job's identifier in the rollup index. +// For example: +// +// ``` +// POST my_rollup_index/_delete_by_query +// +// { +// "query": { +// "term": { +// "_rollup.id": "the_rollup_job_id" +// } +// } +// } +// +// ``` +package deletejob + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteJob struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteJob type alias for index. +type NewDeleteJob func(id string) *DeleteJob + +// NewDeleteJobFunc returns a new instance of DeleteJob with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteJobFunc(tp elastictransport.Interface) NewDeleteJob { + return func(id string) *DeleteJob { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete a rollup job. +// +// A job must be stopped before it can be deleted. +// If you attempt to delete a started job, an error occurs. +// Similarly, if you attempt to delete a nonexistent job, an exception occurs. +// +// IMPORTANT: When you delete a job, you remove only the process that is +// actively monitoring and rolling up data. +// The API does not delete any previously rolled up data. +// This is by design; a user may wish to roll up a static data set. +// Because the data set is static, after it has been fully rolled up there is no +// need to keep the indexing rollup job around (as there will be no new data). +// Thus the job can be deleted, leaving behind the rolled up data for analysis. +// If you wish to also remove the rollup data and the rollup index contains the +// data for only a single job, you can delete the whole rollup index. +// If the rollup index stores data from several jobs, you must issue a +// delete-by-query that targets the rollup job's identifier in the rollup index. +// For example: +// +// ``` +// POST my_rollup_index/_delete_by_query +// +// { +// "query": { +// "term": { +// "_rollup.id": "the_rollup_job_id" +// } +// } +// } +// +// ``` +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job +func New(tp elastictransport.Interface) *DeleteJob { + r := &DeleteJob{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteJob) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteJob) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "rollup.delete_job") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.delete_job") + if reader := instrument.RecordRequestBody(ctx, "rollup.delete_job", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.delete_job") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteJob query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletejob.Response +func (r DeleteJob) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.delete_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteJob) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.delete_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteJob query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteJob headers map. +func (r *DeleteJob) Header(key, value string) *DeleteJob { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the job. +// API Name: id +func (r *DeleteJob) _id(id string) *DeleteJob { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteJob) ErrorTrace(errortrace bool) *DeleteJob { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteJob) FilterPath(filterpaths ...string) *DeleteJob { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteJob) Human(human bool) *DeleteJob { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteJob) Pretty(pretty bool) *DeleteJob { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/deletejob/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/deletejob/response.go new file mode 100644 index 000000000..556f7ec65 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/deletejob/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletejob + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package deletejob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/delete_job/DeleteRollupJobResponse.ts#L22-L27 +type Response struct { + Acknowledged bool `json:"acknowledged"` + TaskFailures []types.TaskFailure `json:"task_failures,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getjobs/get_jobs.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getjobs/get_jobs.go new file mode 100644 index 000000000..d87486a1d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getjobs/get_jobs.go @@ -0,0 +1,364 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get rollup job information. +// Get the configuration, stats, and status of rollup jobs. +// +// NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. +// If a job was created, ran for a while, then was deleted, the API does not +// return any details about it. +// For details about a historical rollup job, the rollup capabilities API may be +// more useful. +package getjobs + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetJobs struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetJobs type alias for index. +type NewGetJobs func() *GetJobs + +// NewGetJobsFunc returns a new instance of GetJobs with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetJobsFunc(tp elastictransport.Interface) NewGetJobs { + return func() *GetJobs { + n := New(tp) + + return n + } +} + +// Get rollup job information. +// Get the configuration, stats, and status of rollup jobs. +// +// NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. +// If a job was created, ran for a while, then was deleted, the API does not +// return any details about it. +// For details about a historical rollup job, the rollup capabilities API may be +// more useful. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs +func New(tp elastictransport.Interface) *GetJobs { + r := &GetJobs{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetJobs) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("job") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetJobs) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "rollup.get_jobs") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.get_jobs") + if reader := instrument.RecordRequestBody(ctx, "rollup.get_jobs", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.get_jobs") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetJobs query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getjobs.Response +func (r GetJobs) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.get_jobs") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetJobs) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.get_jobs") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetJobs query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetJobs headers map. +func (r *GetJobs) Header(key, value string) *GetJobs { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the rollup job. +// If it is `_all` or omitted, the API returns all rollup jobs. +// API Name: id +func (r *GetJobs) Id(id string) *GetJobs { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetJobs) ErrorTrace(errortrace bool) *GetJobs { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetJobs) FilterPath(filterpaths ...string) *GetJobs { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetJobs) Human(human bool) *GetJobs { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetJobs) Pretty(pretty bool) *GetJobs { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getjobs/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getjobs/response.go new file mode 100644 index 000000000..2399a360b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getjobs/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getjobs + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getjobs +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/get_jobs/GetRollupJobResponse.ts#L22-L24 +type Response struct { + Jobs []types.RollupJob `json:"jobs"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupcaps/get_rollup_caps.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupcaps/get_rollup_caps.go new file mode 100644 index 000000000..8a780e326 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupcaps/get_rollup_caps.go @@ -0,0 +1,374 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the rollup job capabilities. +// Get the capabilities of any rollup jobs that have been configured for a +// specific index or index pattern. +// +// This API is useful because a rollup job is often configured to rollup only a +// subset of fields from the source index. +// Furthermore, only certain aggregations can be configured for various fields, +// leading to a limited subset of functionality depending on that configuration. +// This API enables you to inspect an index and determine: +// +// 1. Does this index have associated rollup data somewhere in the cluster? +// 2. If yes to the first question, what fields were rolled up, what +// aggregations can be performed, and where does the data live? +package getrollupcaps + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetRollupCaps struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetRollupCaps type alias for index. +type NewGetRollupCaps func() *GetRollupCaps + +// NewGetRollupCapsFunc returns a new instance of GetRollupCaps with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetRollupCapsFunc(tp elastictransport.Interface) NewGetRollupCaps { + return func() *GetRollupCaps { + n := New(tp) + + return n + } +} + +// Get the rollup job capabilities. +// Get the capabilities of any rollup jobs that have been configured for a +// specific index or index pattern. +// +// This API is useful because a rollup job is often configured to rollup only a +// subset of fields from the source index. +// Furthermore, only certain aggregations can be configured for various fields, +// leading to a limited subset of functionality depending on that configuration. +// This API enables you to inspect an index and determine: +// +// 1. Does this index have associated rollup data somewhere in the cluster? +// 2. If yes to the first question, what fields were rolled up, what +// aggregations can be performed, and where does the data live? +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps +func New(tp elastictransport.Interface) *GetRollupCaps { + r := &GetRollupCaps{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetRollupCaps) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("data") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("data") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetRollupCaps) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "rollup.get_rollup_caps") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.get_rollup_caps") + if reader := instrument.RecordRequestBody(ctx, "rollup.get_rollup_caps", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.get_rollup_caps") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetRollupCaps query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getrollupcaps.Response +func (r GetRollupCaps) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.get_rollup_caps") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetRollupCaps) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.get_rollup_caps") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetRollupCaps query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetRollupCaps headers map. +func (r *GetRollupCaps) Header(key, value string) *GetRollupCaps { + r.headers.Set(key, value) + + return r +} + +// Id Index, indices or index-pattern to return rollup capabilities for. +// `_all` may be used to fetch rollup capabilities from all jobs. +// API Name: id +func (r *GetRollupCaps) Id(id string) *GetRollupCaps { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetRollupCaps) ErrorTrace(errortrace bool) *GetRollupCaps { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetRollupCaps) FilterPath(filterpaths ...string) *GetRollupCaps { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetRollupCaps) Human(human bool) *GetRollupCaps { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetRollupCaps) Pretty(pretty bool) *GetRollupCaps { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupcaps/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupcaps/response.go new file mode 100644 index 000000000..2d8f242d4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupcaps/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getrollupcaps + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getrollupcaps +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/get_rollup_caps/GetRollupCapabilitiesResponse.ts#L24-L27 + +type Response map[string]types.RollupCapabilities + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go new file mode 100644 index 000000000..973ecdcc6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go @@ -0,0 +1,361 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the rollup index capabilities. +// Get the rollup capabilities of all jobs inside of a rollup index. +// A single rollup index may store the data for multiple rollup jobs and may +// have a variety of capabilities depending on those jobs. This API enables you +// to determine: +// +// * What jobs are stored in an index (or indices specified via a pattern)? +// * What target indices were rolled up, what fields were used in those rollups, +// and what aggregations can be performed on each job? +package getrollupindexcaps + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetRollupIndexCaps struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetRollupIndexCaps type alias for index. +type NewGetRollupIndexCaps func(index string) *GetRollupIndexCaps + +// NewGetRollupIndexCapsFunc returns a new instance of GetRollupIndexCaps with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetRollupIndexCapsFunc(tp elastictransport.Interface) NewGetRollupIndexCaps { + return func(index string) *GetRollupIndexCaps { + n := New(tp) + + n._index(index) + + return n + } +} + +// Get the rollup index capabilities. +// Get the rollup capabilities of all jobs inside of a rollup index. +// A single rollup index may store the data for multiple rollup jobs and may +// have a variety of capabilities depending on those jobs. This API enables you +// to determine: +// +// * What jobs are stored in an index (or indices specified via a pattern)? +// * What target indices were rolled up, what fields were used in those rollups, +// and what aggregations can be performed on each job? +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps +func New(tp elastictransport.Interface) *GetRollupIndexCaps { + r := &GetRollupIndexCaps{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetRollupIndexCaps) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("data") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetRollupIndexCaps) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "rollup.get_rollup_index_caps") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.get_rollup_index_caps") + if reader := instrument.RecordRequestBody(ctx, "rollup.get_rollup_index_caps", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.get_rollup_index_caps") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetRollupIndexCaps query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getrollupindexcaps.Response +func (r GetRollupIndexCaps) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.get_rollup_index_caps") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetRollupIndexCaps) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.get_rollup_index_caps") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetRollupIndexCaps query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetRollupIndexCaps headers map. +func (r *GetRollupIndexCaps) Header(key, value string) *GetRollupIndexCaps { + r.headers.Set(key, value) + + return r +} + +// Index Data stream or index to check for rollup capabilities. +// Wildcard (`*`) expressions are supported. +// API Name: index +func (r *GetRollupIndexCaps) _index(index string) *GetRollupIndexCaps { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetRollupIndexCaps) ErrorTrace(errortrace bool) *GetRollupIndexCaps { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetRollupIndexCaps) FilterPath(filterpaths ...string) *GetRollupIndexCaps { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetRollupIndexCaps) Human(human bool) *GetRollupIndexCaps { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetRollupIndexCaps) Pretty(pretty bool) *GetRollupIndexCaps { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupindexcaps/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupindexcaps/response.go new file mode 100644 index 000000000..4f8cfa2e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupindexcaps/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getrollupindexcaps + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getrollupindexcaps +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/get_rollup_index_caps/GetRollupIndexCapabilitiesResponse.ts#L24-L27 + +type Response map[string]types.IndexCapabilities + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/putjob/put_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/putjob/put_job.go new file mode 100644 index 000000000..5c3bc5492 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/putjob/put_job.go @@ -0,0 +1,534 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a rollup job. +// +// WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will +// fail with a message about the deprecation and planned removal of rollup +// features. A cluster needs to contain either a rollup job or a rollup index in +// order for this API to be allowed to run. +// +// The rollup job configuration contains all the details about how the job +// should run, when it indexes documents, and what future queries will be able +// to run against the rollup index. +// +// There are three main sections to the job configuration: the logistical +// details about the job (for example, the cron schedule), the fields that are +// used for grouping, and what metrics to collect for each group. +// +// Jobs are created in a `STOPPED` state. You can start them with the start +// rollup jobs API. +package putjob + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutJob struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutJob type alias for index. +type NewPutJob func(id string) *PutJob + +// NewPutJobFunc returns a new instance of PutJob with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutJobFunc(tp elastictransport.Interface) NewPutJob { + return func(id string) *PutJob { + n := New(tp) + + n._id(id) + + return n + } +} + +// Create a rollup job. +// +// WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will +// fail with a message about the deprecation and planned removal of rollup +// features. A cluster needs to contain either a rollup job or a rollup index in +// order for this API to be allowed to run. +// +// The rollup job configuration contains all the details about how the job +// should run, when it indexes documents, and what future queries will be able +// to run against the rollup index. +// +// There are three main sections to the job configuration: the logistical +// details about the job (for example, the cron schedule), the fields that are +// used for grouping, and what metrics to collect for each group. +// +// Jobs are created in a `STOPPED` state. You can start them with the start +// rollup jobs API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job +func New(tp elastictransport.Interface) *PutJob { + r := &PutJob{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutJob) Raw(raw io.Reader) *PutJob { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutJob) Request(req *Request) *PutJob { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutJob) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutJob: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutJob) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "rollup.put_job") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.put_job") + if reader := instrument.RecordRequestBody(ctx, "rollup.put_job", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.put_job") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutJob query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putjob.Response +func (r PutJob) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.put_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutJob headers map. +func (r *PutJob) Header(key, value string) *PutJob { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the rollup job. This can be any alphanumeric string and +// uniquely identifies the +// data that is associated with the rollup job. The ID is persistent; it is +// stored with the rolled +// up data. If you create a job, let it run for a while, then delete the job, +// the data that the job +// rolled up is still be associated with this job ID. You cannot create a new +// job with the same ID +// since that could lead to problems with mismatched job configurations. +// API Name: id +func (r *PutJob) _id(id string) *PutJob { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutJob) ErrorTrace(errortrace bool) *PutJob { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutJob) FilterPath(filterpaths ...string) *PutJob { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutJob) Human(human bool) *PutJob { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutJob) Pretty(pretty bool) *PutJob { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A cron string which defines the intervals when the rollup job should be +// executed. When the interval +// triggers, the indexer attempts to rollup the data in the index pattern. The +// cron pattern is unrelated +// to the time interval of the data being rolled up. For example, you may wish +// to create hourly rollups +// of your document but to only run the indexer on a daily basis at midnight, as +// defined by the cron. The +// cron pattern is defined just like a Watcher cron schedule. +// API name: cron +func (r *PutJob) Cron(cron string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Cron = cron + + return r +} + +// Defines the grouping fields and aggregations that are defined for this rollup +// job. These fields will then be +// available later for aggregating into buckets. These aggs and fields can be +// used in any combination. Think of +// the groups configuration as defining a set of tools that can later be used in +// aggregations to partition the +// data. Unlike raw data, we have to think ahead to which fields and +// aggregations might be used. Rollups provide +// enough flexibility that you simply need to determine which fields are needed, +// not in what order they are needed. +// API name: groups +func (r *PutJob) Groups(groups types.GroupingsVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Groups = *groups.GroupingsCaster() + + return r +} + +// API name: headers +func (r *PutJob) Headers(httpheaders types.HttpHeadersVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Headers = *httpheaders.HttpHeadersCaster() + + return r +} + +// The index or index pattern to roll up. Supports wildcard-style patterns +// (`logstash-*`). The job attempts to +// rollup the entire index or index-pattern. +// API name: index_pattern +func (r *PutJob) IndexPattern(indexpattern string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexPattern = indexpattern + + return r +} + +// Defines the metrics to collect for each grouping tuple. By default, only the +// doc_counts are collected for each +// group. To make rollup useful, you will often add metrics like averages, mins, +// maxes, etc. Metrics are defined +// on a per-field basis and for each field you configure which metric should be +// collected. +// API name: metrics +func (r *PutJob) Metrics(metrics ...types.FieldMetricVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range metrics { + + r.req.Metrics = append(r.req.Metrics, *v.FieldMetricCaster()) + + } + return r +} + +// The number of bucket results that are processed on each iteration of the +// rollup indexer. A larger value tends +// to execute faster, but requires more memory during processing. This value has +// no effect on how the data is +// rolled up; it is merely used for tweaking the speed or memory cost of the +// indexer. +// API name: page_size +func (r *PutJob) PageSize(pagesize int) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PageSize = pagesize + + return r +} + +// The index that contains the rollup results. The index can be shared with +// other rollup jobs. The data is stored so that it doesn’t interfere with +// unrelated jobs. +// API name: rollup_index +func (r *PutJob) RollupIndex(indexname string) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RollupIndex = indexname + + return r +} + +// Time to wait for the request to complete. +// API name: timeout +func (r *PutJob) Timeout(duration types.DurationVariant) *PutJob { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Timeout = *duration.DurationCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/putjob/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/putjob/request.go new file mode 100644 index 000000000..dd3d7e176 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/putjob/request.go @@ -0,0 +1,188 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putjob + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putjob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/put_job/CreateRollupJobRequest.ts#L27-L105 +type Request struct { + + // Cron A cron string which defines the intervals when the rollup job should be + // executed. When the interval + // triggers, the indexer attempts to rollup the data in the index pattern. The + // cron pattern is unrelated + // to the time interval of the data being rolled up. For example, you may wish + // to create hourly rollups + // of your document but to only run the indexer on a daily basis at midnight, as + // defined by the cron. The + // cron pattern is defined just like a Watcher cron schedule. + Cron string `json:"cron"` + // Groups Defines the grouping fields and aggregations that are defined for this rollup + // job. These fields will then be + // available later for aggregating into buckets. These aggs and fields can be + // used in any combination. Think of + // the groups configuration as defining a set of tools that can later be used in + // aggregations to partition the + // data. Unlike raw data, we have to think ahead to which fields and + // aggregations might be used. Rollups provide + // enough flexibility that you simply need to determine which fields are needed, + // not in what order they are needed. + Groups types.Groupings `json:"groups"` + Headers types.HttpHeaders `json:"headers,omitempty"` + // IndexPattern The index or index pattern to roll up. Supports wildcard-style patterns + // (`logstash-*`). The job attempts to + // rollup the entire index or index-pattern. + IndexPattern string `json:"index_pattern"` + // Metrics Defines the metrics to collect for each grouping tuple. By default, only the + // doc_counts are collected for each + // group. To make rollup useful, you will often add metrics like averages, mins, + // maxes, etc. Metrics are defined + // on a per-field basis and for each field you configure which metric should be + // collected. + Metrics []types.FieldMetric `json:"metrics,omitempty"` + // PageSize The number of bucket results that are processed on each iteration of the + // rollup indexer. A larger value tends + // to execute faster, but requires more memory during processing. This value has + // no effect on how the data is + // rolled up; it is merely used for tweaking the speed or memory cost of the + // indexer. + PageSize int `json:"page_size"` + // RollupIndex The index that contains the rollup results. The index can be shared with + // other rollup jobs. The data is stored so that it doesn’t interfere with + // unrelated jobs. + RollupIndex string `json:"rollup_index"` + // Timeout Time to wait for the request to complete. + Timeout types.Duration `json:"timeout,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putjob request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cron": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Cron", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Cron = o + + case "groups": + if err := dec.Decode(&s.Groups); err != nil { + return fmt.Errorf("%s | %w", "Groups", err) + } + + case "headers": + if err := dec.Decode(&s.Headers); err != nil { + return fmt.Errorf("%s | %w", "Headers", err) + } + + case "index_pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexPattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexPattern = o + + case "metrics": + if err := dec.Decode(&s.Metrics); err != nil { + return fmt.Errorf("%s | %w", "Metrics", err) + } + + case "page_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PageSize", err) + } + s.PageSize = value + case float64: + f := int(v) + s.PageSize = f + } + + case "rollup_index": + if err := dec.Decode(&s.RollupIndex); err != nil { + return fmt.Errorf("%s | %w", "RollupIndex", err) + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/putjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/putjob/response.go new file mode 100644 index 000000000..569c68a5b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/putjob/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putjob + +// Response holds the response body struct for the package putjob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/put_job/CreateRollupJobResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/rollupsearch/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/rollupsearch/request.go new file mode 100644 index 000000000..e16f40c7f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/rollupsearch/request.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package rollupsearch + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package rollupsearch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/rollup_search/RollupSearchRequest.ts#L27-L86 +type Request struct { + + // Aggregations Specifies aggregations. + Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` + // Query Specifies a DSL query that is subject to some limitations. + Query *types.Query `json:"query,omitempty"` + // Size Must be zero if set, as rollups work on pre-aggregated data. + Size *int `json:"size,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Aggregations: make(map[string]types.Aggregations, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Rollupsearch request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/rollupsearch/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/rollupsearch/response.go new file mode 100644 index 000000000..1d1144b4d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/rollupsearch/response.go @@ -0,0 +1,657 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package rollupsearch + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package rollupsearch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/rollup_search/RollupSearchResponse.ts#L27-L36 +type Response struct { + Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` + Hits types.HitsMetadata `json:"hits"` + Shards_ types.ShardStatistics `json:"_shards"` + TerminatedEarly *bool `json:"terminated_early,omitempty"` + TimedOut bool `json:"timed_out"` + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Aggregations: make(map[string]types.Aggregate, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := types.NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := types.NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := types.NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := types.NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := types.NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := types.NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := types.NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := types.NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := types.NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := types.NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := types.NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := types.NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := types.NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := types.NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := types.NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := types.NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := types.NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := types.NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := types.NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := types.NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := types.NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := types.NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := types.NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := types.NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := types.NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := types.NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := types.NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := types.NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := types.NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := types.NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := types.NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := types.NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := types.NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := types.NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := types.NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := types.NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := types.NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := types.NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := types.NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := types.NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := types.NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := types.NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := types.NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := types.NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := types.NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := types.NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := types.NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := types.NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := types.NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := types.NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := types.NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := types.NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := types.NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := types.NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := types.NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := types.NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := types.NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := types.NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := types.NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := types.NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := types.NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := types.NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := types.NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := types.NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := types.NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := types.NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := types.NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := types.NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := types.NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := types.NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + } + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return fmt.Errorf("%s | %w", "Hits", err) + } + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return fmt.Errorf("%s | %w", "Shards_", err) + } + + case "terminated_early": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TerminatedEarly", err) + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v + } + + case "timed_out": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimedOut", err) + } + s.TimedOut = value + case bool: + s.TimedOut = v + } + + case "took": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + s.Took = value + case float64: + f := int64(v) + s.Took = f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/rollupsearch/rollup_search.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/rollupsearch/rollup_search.go new file mode 100644 index 000000000..7154cce2a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/rollupsearch/rollup_search.go @@ -0,0 +1,478 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Search rolled-up data. +// The rollup search endpoint is needed because, internally, rolled-up documents +// utilize a different document structure than the original data. +// It rewrites standard Query DSL into a format that matches the rollup +// documents then takes the response and rewrites it back to what a client would +// expect given the original query. +// +// The request body supports a subset of features from the regular search API. +// The following functionality is not available: +// +// `size`: Because rollups work on pre-aggregated data, no search hits can be +// returned and so size must be set to zero or omitted entirely. +// `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are +// similarly disallowed. +// +// For more detailed examples of using the rollup search API, including querying +// rolled-up data only or combining rolled-up and live data, refer to the +// External documentation. +package rollupsearch + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RollupSearch struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRollupSearch type alias for index. +type NewRollupSearch func(index string) *RollupSearch + +// NewRollupSearchFunc returns a new instance of RollupSearch with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRollupSearchFunc(tp elastictransport.Interface) NewRollupSearch { + return func(index string) *RollupSearch { + n := New(tp) + + n._index(index) + + return n + } +} + +// Search rolled-up data. +// The rollup search endpoint is needed because, internally, rolled-up documents +// utilize a different document structure than the original data. +// It rewrites standard Query DSL into a format that matches the rollup +// documents then takes the response and rewrites it back to what a client would +// expect given the original query. +// +// The request body supports a subset of features from the regular search API. +// The following functionality is not available: +// +// `size`: Because rollups work on pre-aggregated data, no search hits can be +// returned and so size must be set to zero or omitted entirely. +// `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are +// similarly disallowed. +// +// For more detailed examples of using the rollup search API, including querying +// rolled-up data only or combining rolled-up and live data, refer to the +// External documentation. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search +func New(tp elastictransport.Interface) *RollupSearch { + r := &RollupSearch{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *RollupSearch) Raw(raw io.Reader) *RollupSearch { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *RollupSearch) Request(req *Request) *RollupSearch { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RollupSearch) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for RollupSearch: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_rollup_search") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RollupSearch) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "rollup.rollup_search") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.rollup_search") + if reader := instrument.RecordRequestBody(ctx, "rollup.rollup_search", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.rollup_search") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RollupSearch query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a rollupsearch.Response +func (r RollupSearch) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.rollup_search") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + r.TypedKeys(true) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the RollupSearch headers map. +func (r *RollupSearch) Header(key, value string) *RollupSearch { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams and indices used to limit the request. +// This parameter has the following rules: +// +// * At least one data stream, index, or wildcard expression must be specified. +// This target can include a rollup or non-rollup index. For data streams, the +// stream's backing indices can only serve as non-rollup indices. Omitting the +// parameter or using `_all` are not permitted. +// * Multiple non-rollup indices may be specified. +// * Only one rollup index may be specified. If more than one are supplied, an +// exception occurs. +// * Wildcard expressions (`*`) may be used. If they match more than one rollup +// index, an exception occurs. However, you can use an expression to match +// multiple non-rollup indices or data streams. +// API Name: index +func (r *RollupSearch) _index(index string) *RollupSearch { + r.paramSet |= indexMask + r.index = index + + return r +} + +// RestTotalHitsAsInt Indicates whether hits.total should be rendered as an integer or an object in +// the rest search response +// API name: rest_total_hits_as_int +func (r *RollupSearch) RestTotalHitsAsInt(resttotalhitsasint bool) *RollupSearch { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) + + return r +} + +// TypedKeys Specify whether aggregation and suggester names should be prefixed by their +// respective types in the response +// API name: typed_keys +func (r *RollupSearch) TypedKeys(typedkeys bool) *RollupSearch { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RollupSearch) ErrorTrace(errortrace bool) *RollupSearch { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RollupSearch) FilterPath(filterpaths ...string) *RollupSearch { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RollupSearch) Human(human bool) *RollupSearch { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RollupSearch) Pretty(pretty bool) *RollupSearch { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Specifies aggregations. +// API name: aggregations +func (r *RollupSearch) Aggregations(aggregations map[string]types.Aggregations) *RollupSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Aggregations = aggregations + return r +} + +func (r *RollupSearch) AddAggregation(key string, value types.AggregationsVariant) *RollupSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.Aggregations + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.Aggregations) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.AggregationsCaster() + + r.req.Aggregations = tmp + return r +} + +// Specifies a DSL query that is subject to some limitations. +// API name: query +func (r *RollupSearch) Query(query types.QueryVariant) *RollupSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} + +// Must be zero if set, as rollups work on pre-aggregated data. +// API name: size +func (r *RollupSearch) Size(size int) *RollupSearch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Size = &size + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/startjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/startjob/response.go new file mode 100644 index 000000000..5534b6f5d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/startjob/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package startjob + +// Response holds the response body struct for the package startjob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/start_job/StartRollupJobResponse.ts#L20-L22 +type Response struct { + Started bool `json:"started"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/startjob/start_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/startjob/start_job.go new file mode 100644 index 000000000..57e6fea84 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/startjob/start_job.go @@ -0,0 +1,350 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Start rollup jobs. +// If you try to start a job that does not exist, an exception occurs. +// If you try to start a job that is already started, nothing happens. +package startjob + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type StartJob struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStartJob type alias for index. +type NewStartJob func(id string) *StartJob + +// NewStartJobFunc returns a new instance of StartJob with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStartJobFunc(tp elastictransport.Interface) NewStartJob { + return func(id string) *StartJob { + n := New(tp) + + n._id(id) + + return n + } +} + +// Start rollup jobs. +// If you try to start a job that does not exist, an exception occurs. +// If you try to start a job that is already started, nothing happens. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job +func New(tp elastictransport.Interface) *StartJob { + r := &StartJob{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *StartJob) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + path.WriteString("/") + path.WriteString("_start") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r StartJob) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "rollup.start_job") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.start_job") + if reader := instrument.RecordRequestBody(ctx, "rollup.start_job", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.start_job") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the StartJob query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a startjob.Response +func (r StartJob) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.start_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r StartJob) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.start_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the StartJob query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the StartJob headers map. +func (r *StartJob) Header(key, value string) *StartJob { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the rollup job. +// API Name: id +func (r *StartJob) _id(id string) *StartJob { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *StartJob) ErrorTrace(errortrace bool) *StartJob { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *StartJob) FilterPath(filterpaths ...string) *StartJob { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *StartJob) Human(human bool) *StartJob { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *StartJob) Pretty(pretty bool) *StartJob { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/stopjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/stopjob/response.go new file mode 100644 index 000000000..724ec9264 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/stopjob/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stopjob + +// Response holds the response body struct for the package stopjob +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/stop_job/StopRollupJobResponse.ts#L20-L22 +type Response struct { + Stopped bool `json:"stopped"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/stopjob/stop_job.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/stopjob/stop_job.go new file mode 100644 index 000000000..e8cd6772f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/rollup/stopjob/stop_job.go @@ -0,0 +1,401 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Stop rollup jobs. +// If you try to stop a job that does not exist, an exception occurs. +// If you try to stop a job that is already stopped, nothing happens. +// +// Since only a stopped job can be deleted, it can be useful to block the API +// until the indexer has fully stopped. +// This is accomplished with the `wait_for_completion` query parameter, and +// optionally a timeout. For example: +// +// ``` +// POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s +// ``` +// The parameter blocks the API call from returning until either the job has +// moved to STOPPED or the specified time has elapsed. +// If the specified time elapses without the job moving to STOPPED, a timeout +// exception occurs. +package stopjob + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type StopJob struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStopJob type alias for index. +type NewStopJob func(id string) *StopJob + +// NewStopJobFunc returns a new instance of StopJob with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStopJobFunc(tp elastictransport.Interface) NewStopJob { + return func(id string) *StopJob { + n := New(tp) + + n._id(id) + + return n + } +} + +// Stop rollup jobs. +// If you try to stop a job that does not exist, an exception occurs. +// If you try to stop a job that is already stopped, nothing happens. +// +// Since only a stopped job can be deleted, it can be useful to block the API +// until the indexer has fully stopped. +// This is accomplished with the `wait_for_completion` query parameter, and +// optionally a timeout. For example: +// +// ``` +// POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s +// ``` +// The parameter blocks the API call from returning until either the job has +// moved to STOPPED or the specified time has elapsed. +// If the specified time elapses without the job moving to STOPPED, a timeout +// exception occurs. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job +func New(tp elastictransport.Interface) *StopJob { + r := &StopJob{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *StopJob) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_rollup") + path.WriteString("/") + path.WriteString("job") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + path.WriteString("/") + path.WriteString("_stop") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r StopJob) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "rollup.stop_job") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "rollup.stop_job") + if reader := instrument.RecordRequestBody(ctx, "rollup.stop_job", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "rollup.stop_job") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the StopJob query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stopjob.Response +func (r StopJob) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.stop_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r StopJob) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "rollup.stop_job") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the StopJob query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the StopJob headers map. +func (r *StopJob) Header(key, value string) *StopJob { + r.headers.Set(key, value) + + return r +} + +// Id Identifier for the rollup job. +// API Name: id +func (r *StopJob) _id(id string) *StopJob { + r.paramSet |= idMask + r.id = id + + return r +} + +// Timeout If `wait_for_completion` is `true`, the API blocks for (at maximum) the +// specified duration while waiting for the job to stop. +// If more than `timeout` time has passed, the API throws a timeout exception. +// NOTE: Even if a timeout occurs, the stop request is still processing and +// eventually moves the job to STOPPED. +// The timeout simply means the API call itself timed out while waiting for the +// status change. +// API name: timeout +func (r *StopJob) Timeout(duration string) *StopJob { + r.values.Set("timeout", duration) + + return r +} + +// WaitForCompletion If set to `true`, causes the API to block until the indexer state completely +// stops. +// If set to `false`, the API returns immediately and the indexer is stopped +// asynchronously in the background. +// API name: wait_for_completion +func (r *StopJob) WaitForCompletion(waitforcompletion bool) *StopJob { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *StopJob) ErrorTrace(errortrace bool) *StopJob { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *StopJob) FilterPath(filterpaths ...string) *StopJob { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *StopJob) Human(human bool) *StopJob { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *StopJob) Pretty(pretty bool) *StopJob { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/cachestats/cache_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/cachestats/cache_stats.go new file mode 100644 index 000000000..36e81b060 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/cachestats/cache_stats.go @@ -0,0 +1,362 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get cache statistics. +// Get statistics about the shared cache for partially mounted indices. +package cachestats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nodeidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CacheStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + nodeid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCacheStats type alias for index. +type NewCacheStats func() *CacheStats + +// NewCacheStatsFunc returns a new instance of CacheStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCacheStatsFunc(tp elastictransport.Interface) NewCacheStats { + return func() *CacheStats { + n := New(tp) + + return n + } +} + +// Get cache statistics. +// Get statistics about the shared cache for partially mounted indices. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats +func New(tp elastictransport.Interface) *CacheStats { + r := &CacheStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CacheStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_searchable_snapshots") + path.WriteString("/") + path.WriteString("cache") + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + case r.paramSet == nodeidMask: + path.WriteString("/") + path.WriteString("_searchable_snapshots") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + path.WriteString("cache") + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CacheStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "searchable_snapshots.cache_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "searchable_snapshots.cache_stats") + if reader := instrument.RecordRequestBody(ctx, "searchable_snapshots.cache_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "searchable_snapshots.cache_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CacheStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a cachestats.Response +func (r CacheStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "searchable_snapshots.cache_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r CacheStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "searchable_snapshots.cache_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the CacheStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the CacheStats headers map. +func (r *CacheStats) Header(key, value string) *CacheStats { + r.headers.Set(key, value) + + return r +} + +// NodeId The names of the nodes in the cluster to target. +// API Name: nodeid +func (r *CacheStats) NodeId(nodeid string) *CacheStats { + r.paramSet |= nodeidMask + r.nodeid = nodeid + + return r +} + +// API name: master_timeout +func (r *CacheStats) MasterTimeout(duration string) *CacheStats { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CacheStats) ErrorTrace(errortrace bool) *CacheStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CacheStats) FilterPath(filterpaths ...string) *CacheStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CacheStats) Human(human bool) *CacheStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CacheStats) Pretty(pretty bool) *CacheStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/cachestats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/cachestats/response.go new file mode 100644 index 000000000..fb152a264 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/cachestats/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package cachestats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package cachestats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/searchable_snapshots/cache_stats/Response.ts#L24-L28 +type Response struct { + Nodes map[string]types.Node `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.Node, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/clearcache/clear_cache.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/clearcache/clear_cache.go new file mode 100644 index 000000000..28f1b319b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/clearcache/clear_cache.go @@ -0,0 +1,391 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clear the cache. +// Clear indices and data streams from the shared cache for partially mounted +// indices. +package clearcache + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ClearCache struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClearCache type alias for index. +type NewClearCache func() *ClearCache + +// NewClearCacheFunc returns a new instance of ClearCache with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewClearCacheFunc(tp elastictransport.Interface) NewClearCache { + return func() *ClearCache { + n := New(tp) + + return n + } +} + +// Clear the cache. +// Clear indices and data streams from the shared cache for partially mounted +// indices. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache +func New(tp elastictransport.Interface) *ClearCache { + r := &ClearCache{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ClearCache) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_searchable_snapshots") + path.WriteString("/") + path.WriteString("cache") + path.WriteString("/") + path.WriteString("clear") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_searchable_snapshots") + path.WriteString("/") + path.WriteString("cache") + path.WriteString("/") + path.WriteString("clear") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ClearCache) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "searchable_snapshots.clear_cache") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "searchable_snapshots.clear_cache") + if reader := instrument.RecordRequestBody(ctx, "searchable_snapshots.clear_cache", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "searchable_snapshots.clear_cache") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ClearCache query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a clearcache.Response +func (r ClearCache) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "searchable_snapshots.clear_cache") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := new(Response) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return *response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ClearCache) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "searchable_snapshots.clear_cache") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ClearCache query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ClearCache headers map. +func (r *ClearCache) Header(key, value string) *ClearCache { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams, indices, and aliases to clear from +// the cache. +// It supports wildcards (`*`). +// API Name: index +func (r *ClearCache) Index(index string) *ClearCache { + r.paramSet |= indexMask + r.index = index + + return r +} + +// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, +// closed or both. +// API name: expand_wildcards +func (r *ClearCache) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ClearCache { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete +// indices. (This includes `_all` string or when no indices have been specified) +// API name: allow_no_indices +func (r *ClearCache) AllowNoIndices(allownoindices bool) *ClearCache { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable +// (missing or closed) +// API name: ignore_unavailable +func (r *ClearCache) IgnoreUnavailable(ignoreunavailable bool) *ClearCache { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ClearCache) ErrorTrace(errortrace bool) *ClearCache { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ClearCache) FilterPath(filterpaths ...string) *ClearCache { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ClearCache) Human(human bool) *ClearCache { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ClearCache) Pretty(pretty bool) *ClearCache { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/clearcache/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/clearcache/response.go new file mode 100644 index 000000000..57f6bca0a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/clearcache/response.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clearcache + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package clearcache +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/searchable_snapshots/clear_cache/SearchableSnapshotsClearCacheResponse.ts#L22-L25 + +type Response = json.RawMessage + +func NewResponse() *Response { + return new(Response) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/mount/mount.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/mount/mount.go new file mode 100644 index 000000000..3480edab8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/mount/mount.go @@ -0,0 +1,487 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Mount a snapshot. +// Mount a snapshot as a searchable snapshot index. +// Do not use this API for snapshots managed by index lifecycle management +// (ILM). +// Manually mounting ILM-managed snapshots can interfere with ILM processes. +package mount + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + repositoryMask = iota + 1 + + snapshotMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Mount struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + repository string + snapshot string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewMount type alias for index. +type NewMount func(repository, snapshot string) *Mount + +// NewMountFunc returns a new instance of Mount with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMountFunc(tp elastictransport.Interface) NewMount { + return func(repository, snapshot string) *Mount { + n := New(tp) + + n._repository(repository) + + n._snapshot(snapshot) + + return n + } +} + +// Mount a snapshot. +// Mount a snapshot as a searchable snapshot index. +// Do not use this API for snapshots managed by index lifecycle management +// (ILM). +// Manually mounting ILM-managed snapshots can interfere with ILM processes. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount +func New(tp elastictransport.Interface) *Mount { + r := &Mount{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Mount) Raw(raw io.Reader) *Mount { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Mount) Request(req *Request) *Mount { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Mount) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Mount: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask|snapshotMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot", r.snapshot) + } + path.WriteString(r.snapshot) + path.WriteString("/") + path.WriteString("_mount") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Mount) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "searchable_snapshots.mount") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "searchable_snapshots.mount") + if reader := instrument.RecordRequestBody(ctx, "searchable_snapshots.mount", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "searchable_snapshots.mount") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Mount query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a mount.Response +func (r Mount) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "searchable_snapshots.mount") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Mount headers map. +func (r *Mount) Header(key, value string) *Mount { + r.headers.Set(key, value) + + return r +} + +// Repository The name of the repository containing the snapshot of the index to mount. +// API Name: repository +func (r *Mount) _repository(repository string) *Mount { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// Snapshot The name of the snapshot of the index to mount. +// API Name: snapshot +func (r *Mount) _snapshot(snapshot string) *Mount { + r.paramSet |= snapshotMask + r.snapshot = snapshot + + return r +} + +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Mount) MasterTimeout(duration string) *Mount { + r.values.Set("master_timeout", duration) + + return r +} + +// WaitForCompletion If true, the request blocks until the operation is complete. +// API name: wait_for_completion +func (r *Mount) WaitForCompletion(waitforcompletion bool) *Mount { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// Storage The mount option for the searchable snapshot index. +// API name: storage +func (r *Mount) Storage(storage string) *Mount { + r.values.Set("storage", storage) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Mount) ErrorTrace(errortrace bool) *Mount { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Mount) FilterPath(filterpaths ...string) *Mount { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Mount) Human(human bool) *Mount { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Mount) Pretty(pretty bool) *Mount { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The names of settings that should be removed from the index when it is +// mounted. +// API name: ignore_index_settings +func (r *Mount) IgnoreIndexSettings(ignoreindexsettings ...string) *Mount { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ignoreindexsettings { + + r.req.IgnoreIndexSettings = append(r.req.IgnoreIndexSettings, v) + + } + return r +} + +// The name of the index contained in the snapshot whose data is to be mounted. +// If no `renamed_index` is specified, this name will also be used to create the +// new index. +// API name: index +func (r *Mount) Index(indexname string) *Mount { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Index = indexname + + return r +} + +// The settings that should be added to the index when it is mounted. +// API name: index_settings +func (r *Mount) IndexSettings(indexsettings map[string]json.RawMessage) *Mount { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexSettings = indexsettings + return r +} + +func (r *Mount) AddIndexSetting(key string, value json.RawMessage) *Mount { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.IndexSettings == nil { + r.req.IndexSettings = make(map[string]json.RawMessage) + } else { + tmp = r.req.IndexSettings + } + + tmp[key] = value + + r.req.IndexSettings = tmp + return r +} + +// The name of the index that will be created. +// API name: renamed_index +func (r *Mount) RenamedIndex(indexname string) *Mount { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RenamedIndex = &indexname + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/mount/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/mount/request.go new file mode 100644 index 000000000..07cdf6d9c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/mount/request.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package mount + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package mount +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/searchable_snapshots/mount/SearchableSnapshotsMountRequest.ts#L26-L92 +type Request struct { + + // IgnoreIndexSettings The names of settings that should be removed from the index when it is + // mounted. + IgnoreIndexSettings []string `json:"ignore_index_settings,omitempty"` + // Index The name of the index contained in the snapshot whose data is to be mounted. + // If no `renamed_index` is specified, this name will also be used to create the + // new index. + Index string `json:"index"` + // IndexSettings The settings that should be added to the index when it is mounted. + IndexSettings map[string]json.RawMessage `json:"index_settings,omitempty"` + // RenamedIndex The name of the index that will be created. + RenamedIndex *string `json:"renamed_index,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + IndexSettings: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Mount request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ignore_index_settings": + if err := dec.Decode(&s.IgnoreIndexSettings); err != nil { + return fmt.Errorf("%s | %w", "IgnoreIndexSettings", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "index_settings": + if s.IndexSettings == nil { + s.IndexSettings = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.IndexSettings); err != nil { + return fmt.Errorf("%s | %w", "IndexSettings", err) + } + + case "renamed_index": + if err := dec.Decode(&s.RenamedIndex); err != nil { + return fmt.Errorf("%s | %w", "RenamedIndex", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/mount/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/mount/response.go new file mode 100644 index 000000000..0d5656756 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/mount/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package mount + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package mount +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/searchable_snapshots/mount/SearchableSnapshotsMountResponse.ts#L22-L26 +type Response struct { + Snapshot types.MountedSnapshot `json:"snapshot"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/stats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/stats/response.go new file mode 100644 index 000000000..2d946eca6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/stats/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stats + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package stats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/searchable_snapshots/stats/SearchableSnapshotsStatsResponse.ts#L22-L27 +type Response struct { + Stats json.RawMessage `json:"stats,omitempty"` + Total json.RawMessage `json:"total,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/stats/stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/stats/stats.go new file mode 100644 index 000000000..04d77ac1d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/stats/stats.go @@ -0,0 +1,359 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get searchable snapshot statistics. +package stats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/statslevel" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Stats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStats type alias for index. +type NewStats func() *Stats + +// NewStatsFunc returns a new instance of Stats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStatsFunc(tp elastictransport.Interface) NewStats { + return func() *Stats { + n := New(tp) + + return n + } +} + +// Get searchable snapshot statistics. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats +func New(tp elastictransport.Interface) *Stats { + r := &Stats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Stats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_searchable_snapshots") + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + case r.paramSet == indexMask: + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_searchable_snapshots") + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Stats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "searchable_snapshots.stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "searchable_snapshots.stats") + if reader := instrument.RecordRequestBody(ctx, "searchable_snapshots.stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "searchable_snapshots.stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Stats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stats.Response +func (r Stats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "searchable_snapshots.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Stats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "searchable_snapshots.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Stats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Stats headers map. +func (r *Stats) Header(key, value string) *Stats { + r.headers.Set(key, value) + + return r +} + +// Index A comma-separated list of data streams and indices to retrieve statistics +// for. +// API Name: index +func (r *Stats) Index(index string) *Stats { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Level Return stats aggregated at cluster, index or shard level +// API name: level +func (r *Stats) Level(level statslevel.StatsLevel) *Stats { + r.values.Set("level", level.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Stats) ErrorTrace(errortrace bool) *Stats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Stats) FilterPath(filterpaths ...string) *Stats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Stats) Human(human bool) *Stats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Stats) Pretty(pretty bool) *Stats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/delete/delete.go new file mode 100644 index 000000000..68cb8efa5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/delete/delete.go @@ -0,0 +1,350 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a search application. +// +// Remove a search application and its associated alias. Indices attached to the +// search application are not removed. +package delete + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Delete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDelete type alias for index. +type NewDelete func(name string) *Delete + +// NewDeleteFunc returns a new instance of Delete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteFunc(tp elastictransport.Interface) NewDelete { + return func(name string) *Delete { + n := New(tp) + + n._name(name) + + return n + } +} + +// Delete a search application. +// +// Remove a search application and its associated alias. Indices attached to the +// search application are not removed. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete +func New(tp elastictransport.Interface) *Delete { + r := &Delete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Delete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Delete) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_application.delete") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.delete") + if reader := instrument.RecordRequestBody(ctx, "search_application.delete", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.delete") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Delete query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a delete.Response +func (r Delete) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Delete) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Delete query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Delete headers map. +func (r *Delete) Header(key, value string) *Delete { + r.headers.Set(key, value) + + return r +} + +// Name The name of the search application to delete. +// API Name: name +func (r *Delete) _name(name string) *Delete { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Delete) ErrorTrace(errortrace bool) *Delete { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Delete) FilterPath(filterpaths ...string) *Delete { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Delete) Human(human bool) *Delete { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Delete) Pretty(pretty bool) *Delete { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/delete/response.go new file mode 100644 index 000000000..7ce3326ef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/delete/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package delete + +// Response holds the response body struct for the package delete +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/delete/SearchApplicationsDeleteResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/deletebehavioralanalytics/delete_behavioral_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/deletebehavioralanalytics/delete_behavioral_analytics.go new file mode 100644 index 000000000..9635d8825 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/deletebehavioralanalytics/delete_behavioral_analytics.go @@ -0,0 +1,346 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a behavioral analytics collection. +// The associated data stream is also deleted. +package deletebehavioralanalytics + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteBehavioralAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteBehavioralAnalytics type alias for index. +type NewDeleteBehavioralAnalytics func(name string) *DeleteBehavioralAnalytics + +// NewDeleteBehavioralAnalyticsFunc returns a new instance of DeleteBehavioralAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteBehavioralAnalyticsFunc(tp elastictransport.Interface) NewDeleteBehavioralAnalytics { + return func(name string) *DeleteBehavioralAnalytics { + n := New(tp) + + n._name(name) + + return n + } +} + +// Delete a behavioral analytics collection. +// The associated data stream is also deleted. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics +func New(tp elastictransport.Interface) *DeleteBehavioralAnalytics { + r := &DeleteBehavioralAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteBehavioralAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteBehavioralAnalytics) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_application.delete_behavioral_analytics") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.delete_behavioral_analytics") + if reader := instrument.RecordRequestBody(ctx, "search_application.delete_behavioral_analytics", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.delete_behavioral_analytics") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteBehavioralAnalytics query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletebehavioralanalytics.Response +func (r DeleteBehavioralAnalytics) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.delete_behavioral_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteBehavioralAnalytics) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.delete_behavioral_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteBehavioralAnalytics query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteBehavioralAnalytics headers map. +func (r *DeleteBehavioralAnalytics) Header(key, value string) *DeleteBehavioralAnalytics { + r.headers.Set(key, value) + + return r +} + +// Name The name of the analytics collection to be deleted +// API Name: name +func (r *DeleteBehavioralAnalytics) _name(name string) *DeleteBehavioralAnalytics { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteBehavioralAnalytics) ErrorTrace(errortrace bool) *DeleteBehavioralAnalytics { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteBehavioralAnalytics) FilterPath(filterpaths ...string) *DeleteBehavioralAnalytics { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteBehavioralAnalytics) Human(human bool) *DeleteBehavioralAnalytics { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteBehavioralAnalytics) Pretty(pretty bool) *DeleteBehavioralAnalytics { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/deletebehavioralanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/deletebehavioralanalytics/response.go new file mode 100644 index 000000000..c14930606 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/deletebehavioralanalytics/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletebehavioralanalytics + +// Response holds the response body struct for the package deletebehavioralanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/delete_behavioral_analytics/BehavioralAnalyticsDeleteResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/get/get.go new file mode 100644 index 000000000..d6f5f50b2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/get/get.go @@ -0,0 +1,344 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get search application details. +package get + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Get struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGet type alias for index. +type NewGet func(name string) *Get + +// NewGetFunc returns a new instance of Get with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFunc(tp elastictransport.Interface) NewGet { + return func(name string) *Get { + n := New(tp) + + n._name(name) + + return n + } +} + +// Get search application details. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get +func New(tp elastictransport.Interface) *Get { + r := &Get{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Get) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Get) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_application.get") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.get") + if reader := instrument.RecordRequestBody(ctx, "search_application.get", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.get") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Get query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a get.Response +func (r Get) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Get) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Get query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Get headers map. +func (r *Get) Header(key, value string) *Get { + r.headers.Set(key, value) + + return r +} + +// Name The name of the search application +// API Name: name +func (r *Get) _name(name string) *Get { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Get) ErrorTrace(errortrace bool) *Get { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Get) FilterPath(filterpaths ...string) *Get { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Get) Human(human bool) *Get { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Get) Pretty(pretty bool) *Get { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/get/response.go new file mode 100644 index 000000000..86722a7da --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/get/response.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package get + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package get +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/get/SearchApplicationsGetResponse.ts#L22-L25 +type Response struct { + + // AnalyticsCollectionName Analytics collection associated to the Search Application. + AnalyticsCollectionName *string `json:"analytics_collection_name,omitempty"` + // Indices Indices that are part of the Search Application. + Indices []string `json:"indices"` + // Name Search Application name + Name string `json:"name"` + // Template Search template to use on search operations. + Template *types.SearchApplicationTemplate `json:"template,omitempty"` + // UpdatedAtMillis Last time the Search Application was updated. + UpdatedAtMillis int64 `json:"updated_at_millis"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/getbehavioralanalytics/get_behavioral_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/getbehavioralanalytics/get_behavioral_analytics.go new file mode 100644 index 000000000..e446fcfaf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/getbehavioralanalytics/get_behavioral_analytics.go @@ -0,0 +1,349 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get behavioral analytics collections. +package getbehavioralanalytics + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetBehavioralAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetBehavioralAnalytics type alias for index. +type NewGetBehavioralAnalytics func() *GetBehavioralAnalytics + +// NewGetBehavioralAnalyticsFunc returns a new instance of GetBehavioralAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetBehavioralAnalyticsFunc(tp elastictransport.Interface) NewGetBehavioralAnalytics { + return func() *GetBehavioralAnalytics { + n := New(tp) + + return n + } +} + +// Get behavioral analytics collections. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics +func New(tp elastictransport.Interface) *GetBehavioralAnalytics { + r := &GetBehavioralAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetBehavioralAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + + method = http.MethodGet + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetBehavioralAnalytics) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_application.get_behavioral_analytics") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.get_behavioral_analytics") + if reader := instrument.RecordRequestBody(ctx, "search_application.get_behavioral_analytics", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.get_behavioral_analytics") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetBehavioralAnalytics query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getbehavioralanalytics.Response +func (r GetBehavioralAnalytics) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.get_behavioral_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetBehavioralAnalytics) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.get_behavioral_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetBehavioralAnalytics query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetBehavioralAnalytics headers map. +func (r *GetBehavioralAnalytics) Header(key, value string) *GetBehavioralAnalytics { + r.headers.Set(key, value) + + return r +} + +// Name A list of analytics collections to limit the returned information +// API Name: name +func (r *GetBehavioralAnalytics) Name(name string) *GetBehavioralAnalytics { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetBehavioralAnalytics) ErrorTrace(errortrace bool) *GetBehavioralAnalytics { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetBehavioralAnalytics) FilterPath(filterpaths ...string) *GetBehavioralAnalytics { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetBehavioralAnalytics) Human(human bool) *GetBehavioralAnalytics { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetBehavioralAnalytics) Pretty(pretty bool) *GetBehavioralAnalytics { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/getbehavioralanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/getbehavioralanalytics/response.go new file mode 100644 index 000000000..9965a8ec6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/getbehavioralanalytics/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getbehavioralanalytics + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getbehavioralanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/get_behavioral_analytics/BehavioralAnalyticsGetResponse.ts#L24-L27 + +type Response map[string]types.AnalyticsCollection + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/list/list.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/list/list.go new file mode 100644 index 000000000..718b9a469 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/list/list.go @@ -0,0 +1,347 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get search applications. +// Get information about search applications. +package list + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type List struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewList type alias for index. +type NewList func() *List + +// NewListFunc returns a new instance of List with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewListFunc(tp elastictransport.Interface) NewList { + return func() *List { + n := New(tp) + + return n + } +} + +// Get search applications. +// Get information about search applications. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics +func New(tp elastictransport.Interface) *List { + r := &List{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *List) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r List) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_application.list") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.list") + if reader := instrument.RecordRequestBody(ctx, "search_application.list", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.list") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the List query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a list.Response +func (r List) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.list") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r List) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.list") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the List query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the List headers map. +func (r *List) Header(key, value string) *List { + r.headers.Set(key, value) + + return r +} + +// Q Query in the Lucene query string syntax. +// API name: q +func (r *List) Q(q string) *List { + r.values.Set("q", q) + + return r +} + +// From Starting offset. +// API name: from +func (r *List) From(from int) *List { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size Specifies a max number of results to get. +// API name: size +func (r *List) Size(size int) *List { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *List) ErrorTrace(errortrace bool) *List { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *List) FilterPath(filterpaths ...string) *List { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *List) Human(human bool) *List { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *List) Pretty(pretty bool) *List { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/list/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/list/response.go new file mode 100644 index 000000000..8b940408b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/list/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package list + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package list +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/list/SearchApplicationsListResponse.ts#L23-L28 +type Response struct { + Count int64 `json:"count"` + Results []types.SearchApplication `json:"results"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/postbehavioralanalyticsevent/post_behavioral_analytics_event.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/postbehavioralanalyticsevent/post_behavioral_analytics_event.go new file mode 100644 index 000000000..709daa289 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/postbehavioralanalyticsevent/post_behavioral_analytics_event.go @@ -0,0 +1,397 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a behavioral analytics collection event. +package postbehavioralanalyticsevent + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "slices" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + collectionnameMask = iota + 1 + + eventtypeMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PostBehavioralAnalyticsEvent struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req any + deferred []func(request any) error + buf *gobytes.Buffer + + paramSet int + + collectionname string + eventtype string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPostBehavioralAnalyticsEvent type alias for index. +type NewPostBehavioralAnalyticsEvent func(collectionname, eventtype string) *PostBehavioralAnalyticsEvent + +// NewPostBehavioralAnalyticsEventFunc returns a new instance of PostBehavioralAnalyticsEvent with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPostBehavioralAnalyticsEventFunc(tp elastictransport.Interface) NewPostBehavioralAnalyticsEvent { + return func(collectionname, eventtype string) *PostBehavioralAnalyticsEvent { + n := New(tp) + + n._collectionname(collectionname) + + n._eventtype(eventtype) + + return n + } +} + +// Create a behavioral analytics collection event. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event +func New(tp elastictransport.Interface) *PostBehavioralAnalyticsEvent { + r := &PostBehavioralAnalyticsEvent{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PostBehavioralAnalyticsEvent) Raw(raw io.Reader) *PostBehavioralAnalyticsEvent { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PostBehavioralAnalyticsEvent) Request(req any) *PostBehavioralAnalyticsEvent { + r.req = req + + return r +} + +// Payload allows to set the request property with the appropriate payload. +func (r *PostBehavioralAnalyticsEvent) Payload(payload any) *PostBehavioralAnalyticsEvent { + r.req = payload + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PostBehavioralAnalyticsEvent) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PostBehavioralAnalyticsEvent: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == collectionnameMask|eventtypeMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "collectionname", r.collectionname) + } + path.WriteString(r.collectionname) + path.WriteString("/") + path.WriteString("event") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "eventtype", r.eventtype) + } + path.WriteString(r.eventtype) + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PostBehavioralAnalyticsEvent) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_application.post_behavioral_analytics_event") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.post_behavioral_analytics_event") + if reader := instrument.RecordRequestBody(ctx, "search_application.post_behavioral_analytics_event", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.post_behavioral_analytics_event") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PostBehavioralAnalyticsEvent query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a postbehavioralanalyticsevent.Response +func (r PostBehavioralAnalyticsEvent) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.post_behavioral_analytics_event") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 || slices.Contains([]int{202, 400, 404}, res.StatusCode) { + + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PostBehavioralAnalyticsEvent headers map. +func (r *PostBehavioralAnalyticsEvent) Header(key, value string) *PostBehavioralAnalyticsEvent { + r.headers.Set(key, value) + + return r +} + +// CollectionName The name of the behavioral analytics collection. +// API Name: collectionname +func (r *PostBehavioralAnalyticsEvent) _collectionname(collectionname string) *PostBehavioralAnalyticsEvent { + r.paramSet |= collectionnameMask + r.collectionname = collectionname + + return r +} + +// EventType The analytics event type. +// API Name: eventtype +func (r *PostBehavioralAnalyticsEvent) _eventtype(eventtype string) *PostBehavioralAnalyticsEvent { + r.paramSet |= eventtypeMask + r.eventtype = eventtype + + return r +} + +// Debug Whether the response type has to include more details +// API name: debug +func (r *PostBehavioralAnalyticsEvent) Debug(debug bool) *PostBehavioralAnalyticsEvent { + r.values.Set("debug", strconv.FormatBool(debug)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PostBehavioralAnalyticsEvent) ErrorTrace(errortrace bool) *PostBehavioralAnalyticsEvent { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PostBehavioralAnalyticsEvent) FilterPath(filterpaths ...string) *PostBehavioralAnalyticsEvent { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PostBehavioralAnalyticsEvent) Human(human bool) *PostBehavioralAnalyticsEvent { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PostBehavioralAnalyticsEvent) Pretty(pretty bool) *PostBehavioralAnalyticsEvent { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/postbehavioralanalyticsevent/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/postbehavioralanalyticsevent/request.go new file mode 100644 index 000000000..1772d5870 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/postbehavioralanalyticsevent/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package postbehavioralanalyticsevent + +import ( + "encoding/json" +) + +// Request holds the request body struct for the package postbehavioralanalyticsevent +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/post_behavioral_analytics_event/BehavioralAnalyticsEventPostRequest.ts#L24-L58 +type Request = json.RawMessage + +// NewRequest returns a Request +func NewRequest() *Request { + r := new(json.RawMessage) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/postbehavioralanalyticsevent/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/postbehavioralanalyticsevent/response.go new file mode 100644 index 000000000..c46edac2b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/postbehavioralanalyticsevent/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package postbehavioralanalyticsevent + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package postbehavioralanalyticsevent +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/post_behavioral_analytics_event/BehavioralAnalyticsEventPostResponse.ts#L22-L47 +type Response struct { + Accepted bool `json:"accepted"` + Event json.RawMessage `json:"event,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/put/put.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/put/put.go new file mode 100644 index 000000000..0538cf281 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/put/put.go @@ -0,0 +1,408 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a search application. +package put + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Put struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPut type alias for index. +type NewPut func(name string) *Put + +// NewPutFunc returns a new instance of Put with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutFunc(tp elastictransport.Interface) NewPut { + return func(name string) *Put { + n := New(tp) + + n._name(name) + + return n + } +} + +// Create or update a search application. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put +func New(tp elastictransport.Interface) *Put { + r := &Put{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Put) Raw(raw io.Reader) *Put { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Put) Request(req *Request) *Put { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Put) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Put: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Put) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_application.put") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.put") + if reader := instrument.RecordRequestBody(ctx, "search_application.put", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.put") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Put query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a put.Response +func (r Put) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.put") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Put headers map. +func (r *Put) Header(key, value string) *Put { + r.headers.Set(key, value) + + return r +} + +// Name The name of the search application to be created or updated. +// API Name: name +func (r *Put) _name(name string) *Put { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Create If `true`, this request cannot replace or update existing Search +// Applications. +// API name: create +func (r *Put) Create(create bool) *Put { + r.values.Set("create", strconv.FormatBool(create)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Put) ErrorTrace(errortrace bool) *Put { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Put) FilterPath(filterpaths ...string) *Put { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Put) Human(human bool) *Put { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Put) Pretty(pretty bool) *Put { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Analytics collection associated to the Search Application. +// API name: analytics_collection_name +func (r *Put) AnalyticsCollectionName(name string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AnalyticsCollectionName = &name + + return r +} + +// Indices that are part of the Search Application. +// API name: indices +func (r *Put) Indices(indices ...string) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range indices { + + r.req.Indices = append(r.req.Indices, v) + + } + return r +} + +// Search template to use on search operations. +// API name: template +func (r *Put) Template(template types.SearchApplicationTemplateVariant) *Put { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Template = template.SearchApplicationTemplateCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/put/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/put/request.go new file mode 100644 index 000000000..70ee79ca0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/put/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package put + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package put +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/put/SearchApplicationsPutRequest.ts#L23-L57 +type Request = types.SearchApplicationParameters + +// NewRequest returns a Request +func NewRequest() *Request { + r := types.NewSearchApplicationParameters() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/put/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/put/response.go new file mode 100644 index 000000000..4fa0a37c3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/put/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package put + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package put +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/put/SearchApplicationsPutResponse.ts#L22-L26 +type Response struct { + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/putbehavioralanalytics/put_behavioral_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/putbehavioralanalytics/put_behavioral_analytics.go new file mode 100644 index 000000000..42b0c91e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/putbehavioralanalytics/put_behavioral_analytics.go @@ -0,0 +1,344 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a behavioral analytics collection. +package putbehavioralanalytics + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutBehavioralAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutBehavioralAnalytics type alias for index. +type NewPutBehavioralAnalytics func(name string) *PutBehavioralAnalytics + +// NewPutBehavioralAnalyticsFunc returns a new instance of PutBehavioralAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutBehavioralAnalyticsFunc(tp elastictransport.Interface) NewPutBehavioralAnalytics { + return func(name string) *PutBehavioralAnalytics { + n := New(tp) + + n._name(name) + + return n + } +} + +// Create a behavioral analytics collection. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics +func New(tp elastictransport.Interface) *PutBehavioralAnalytics { + r := &PutBehavioralAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutBehavioralAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutBehavioralAnalytics) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_application.put_behavioral_analytics") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.put_behavioral_analytics") + if reader := instrument.RecordRequestBody(ctx, "search_application.put_behavioral_analytics", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.put_behavioral_analytics") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutBehavioralAnalytics query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putbehavioralanalytics.Response +func (r PutBehavioralAnalytics) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.put_behavioral_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r PutBehavioralAnalytics) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.put_behavioral_analytics") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the PutBehavioralAnalytics query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the PutBehavioralAnalytics headers map. +func (r *PutBehavioralAnalytics) Header(key, value string) *PutBehavioralAnalytics { + r.headers.Set(key, value) + + return r +} + +// Name The name of the analytics collection to be created or updated. +// API Name: name +func (r *PutBehavioralAnalytics) _name(name string) *PutBehavioralAnalytics { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutBehavioralAnalytics) ErrorTrace(errortrace bool) *PutBehavioralAnalytics { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutBehavioralAnalytics) FilterPath(filterpaths ...string) *PutBehavioralAnalytics { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutBehavioralAnalytics) Human(human bool) *PutBehavioralAnalytics { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutBehavioralAnalytics) Pretty(pretty bool) *PutBehavioralAnalytics { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/putbehavioralanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/putbehavioralanalytics/response.go new file mode 100644 index 000000000..23eb9b21e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/putbehavioralanalytics/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putbehavioralanalytics + +// Response holds the response body struct for the package putbehavioralanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/put_behavioral_analytics/BehavioralAnalyticsPutResponse.ts#L23-L26 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` + // Name The name of the analytics collection created or updated + Name string `json:"name"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/renderquery/render_query.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/renderquery/render_query.go new file mode 100644 index 000000000..2d406d40a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/renderquery/render_query.go @@ -0,0 +1,409 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Render a search application query. +// Generate an Elasticsearch query using the specified query parameters and the +// search template associated with the search application or a default template +// if none is specified. +// If a parameter used in the search template is not specified in `params`, the +// parameter's default value will be used. +// The API returns the specific Elasticsearch query that would be generated and +// run by calling the search application search API. +// +// You must have `read` privileges on the backing alias of the search +// application. +package renderquery + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RenderQuery struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRenderQuery type alias for index. +type NewRenderQuery func(name string) *RenderQuery + +// NewRenderQueryFunc returns a new instance of RenderQuery with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRenderQueryFunc(tp elastictransport.Interface) NewRenderQuery { + return func(name string) *RenderQuery { + n := New(tp) + + n._name(name) + + return n + } +} + +// Render a search application query. +// Generate an Elasticsearch query using the specified query parameters and the +// search template associated with the search application or a default template +// if none is specified. +// If a parameter used in the search template is not specified in `params`, the +// parameter's default value will be used. +// The API returns the specific Elasticsearch query that would be generated and +// run by calling the search application search API. +// +// You must have `read` privileges on the backing alias of the search +// application. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query +func New(tp elastictransport.Interface) *RenderQuery { + r := &RenderQuery{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *RenderQuery) Raw(raw io.Reader) *RenderQuery { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *RenderQuery) Request(req *Request) *RenderQuery { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RenderQuery) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for RenderQuery: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_render_query") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RenderQuery) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_application.render_query") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.render_query") + if reader := instrument.RecordRequestBody(ctx, "search_application.render_query", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.render_query") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RenderQuery query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a renderquery.Response +func (r RenderQuery) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.render_query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the RenderQuery headers map. +func (r *RenderQuery) Header(key, value string) *RenderQuery { + r.headers.Set(key, value) + + return r +} + +// Name The name of the search application to render teh query for. +// API Name: name +func (r *RenderQuery) _name(name string) *RenderQuery { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RenderQuery) ErrorTrace(errortrace bool) *RenderQuery { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RenderQuery) FilterPath(filterpaths ...string) *RenderQuery { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RenderQuery) Human(human bool) *RenderQuery { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RenderQuery) Pretty(pretty bool) *RenderQuery { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: params +func (r *RenderQuery) Params(params map[string]json.RawMessage) *RenderQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Params = params + return r +} + +func (r *RenderQuery) AddParam(key string, value json.RawMessage) *RenderQuery { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Params == nil { + r.req.Params = make(map[string]json.RawMessage) + } else { + tmp = r.req.Params + } + + tmp[key] = value + + r.req.Params = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/renderquery/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/renderquery/request.go new file mode 100644 index 000000000..8a3fa66a6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/renderquery/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package renderquery + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package renderquery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/render_query/SearchApplicationsRenderQueryRequest.ts#L24-L54 +type Request struct { + Params map[string]json.RawMessage `json:"params,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Params: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Renderquery request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/renderquery/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/renderquery/response.go new file mode 100644 index 000000000..1bf6b8f7c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/renderquery/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package renderquery + +// Response holds the response body struct for the package renderquery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/render_query/SearchApplicationsRenderQueryResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/search/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/search/request.go new file mode 100644 index 000000000..92a749ce1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/search/request.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package search + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package search +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/search/SearchApplicationsSearchRequest.ts#L24-L61 +type Request struct { + + // Params Query parameters specific to this request, which will override any defaults + // specified in the template. + Params map[string]json.RawMessage `json:"params,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Params: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Search request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/search/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/search/response.go new file mode 100644 index 000000000..0b1e39e24 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/search/response.go @@ -0,0 +1,813 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package search + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package search +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/search/SearchApplicationsSearchResponse.ts#L22-L25 +type Response struct { + Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` + Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Hits The returned documents and metadata. + Hits types.HitsMetadata `json:"hits"` + MaxScore *types.Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *types.Profile `json:"profile,omitempty"` + // ScrollId_ The identifier for the search and its search context. + // You can use this scroll ID with the scroll API to retrieve the next batch of + // search results for the request. + // This property is returned only if the `scroll` query parameter is specified + // in the request. + ScrollId_ *string `json:"_scroll_id,omitempty"` + // Shards_ A count of shards used for the request. + Shards_ types.ShardStatistics `json:"_shards"` + Suggest map[string][]types.Suggest `json:"suggest,omitempty"` + TerminatedEarly *bool `json:"terminated_early,omitempty"` + // TimedOut If `true`, the request timed out before completion; returned results may be + // partial or empty. + TimedOut bool `json:"timed_out"` + // Took The number of milliseconds it took Elasticsearch to run the request. + // This value is calculated by measuring the time elapsed between receipt of a + // request on the coordinating node and the time at which the coordinating node + // is ready to send the response. + // It includes: + // + // * Communication time between the coordinating node and data nodes + // * Time the request spends in the search thread pool, queued for execution + // * Actual run time + // + // It does not include: + // + // * Time needed to send the request to Elasticsearch + // * Time needed to serialize the JSON response + // * Time needed to send the response to a client + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Aggregations: make(map[string]types.Aggregate, 0), + Fields: make(map[string]json.RawMessage, 0), + Suggest: make(map[string][]types.Suggest, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := types.NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := types.NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := types.NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := types.NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := types.NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := types.NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := types.NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := types.NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := types.NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := types.NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := types.NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := types.NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := types.NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := types.NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := types.NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := types.NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := types.NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := types.NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := types.NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := types.NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := types.NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := types.NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := types.NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := types.NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := types.NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := types.NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := types.NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := types.NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := types.NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := types.NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := types.NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := types.NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := types.NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := types.NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := types.NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := types.NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := types.NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := types.NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := types.NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := types.NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := types.NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := types.NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := types.NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := types.NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := types.NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := types.NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := types.NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := types.NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := types.NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := types.NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := types.NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := types.NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := types.NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := types.NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := types.NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := types.NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := types.NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := types.NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := types.NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := types.NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := types.NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := types.NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := types.NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := types.NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := types.NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := types.NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := types.NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := types.NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := types.NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := types.NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + } + + case "_clusters": + if err := dec.Decode(&s.Clusters_); err != nil { + return fmt.Errorf("%s | %w", "Clusters_", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return fmt.Errorf("%s | %w", "Hits", err) + } + + case "max_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxScore", err) + } + f := types.Float64(value) + s.MaxScore = &f + case float64: + f := types.Float64(v) + s.MaxScore = &f + } + + case "num_reduce_phases": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumReducePhases", err) + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f + } + + case "pit_id": + if err := dec.Decode(&s.PitId); err != nil { + return fmt.Errorf("%s | %w", "PitId", err) + } + + case "profile": + if err := dec.Decode(&s.Profile); err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + + case "_scroll_id": + if err := dec.Decode(&s.ScrollId_); err != nil { + return fmt.Errorf("%s | %w", "ScrollId_", err) + } + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return fmt.Errorf("%s | %w", "Shards_", err) + } + + case "suggest": + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + switch elems[0] { + + case "completion": + o := types.NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := types.NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := types.NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } + } + + case "terminated_early": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TerminatedEarly", err) + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v + } + + case "timed_out": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimedOut", err) + } + s.TimedOut = value + case bool: + s.TimedOut = v + } + + case "took": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + s.Took = value + case float64: + f := int64(v) + s.Took = f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/search/search.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/search/search.go new file mode 100644 index 000000000..1782cd3e3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/search/search.go @@ -0,0 +1,412 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run a search application search. +// Generate and run an Elasticsearch query that uses the specified query +// parameteter and the search template associated with the search application or +// default template. +// Unspecified template parameters are assigned their default values if +// applicable. +package search + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Search struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSearch type alias for index. +type NewSearch func(name string) *Search + +// NewSearchFunc returns a new instance of Search with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSearchFunc(tp elastictransport.Interface) NewSearch { + return func(name string) *Search { + n := New(tp) + + n._name(name) + + return n + } +} + +// Run a search application search. +// Generate and run an Elasticsearch query that uses the specified query +// parameteter and the search template associated with the search application or +// default template. +// Unspecified template parameters are assigned their default values if +// applicable. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search +func New(tp elastictransport.Interface) *Search { + r := &Search{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Search) Raw(raw io.Reader) *Search { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Search) Request(req *Request) *Search { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Search) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Search: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_search") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Search) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "search_application.search") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "search_application.search") + if reader := instrument.RecordRequestBody(ctx, "search_application.search", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "search_application.search") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Search query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a search.Response +func (r Search) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "search_application.search") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + r.TypedKeys(true) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Search headers map. +func (r *Search) Header(key, value string) *Search { + r.headers.Set(key, value) + + return r +} + +// Name The name of the search application to be searched. +// API Name: name +func (r *Search) _name(name string) *Search { + r.paramSet |= nameMask + r.name = name + + return r +} + +// TypedKeys Determines whether aggregation names are prefixed by their respective types +// in the response. +// API name: typed_keys +func (r *Search) TypedKeys(typedkeys bool) *Search { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Search) ErrorTrace(errortrace bool) *Search { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Search) FilterPath(filterpaths ...string) *Search { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Search) Human(human bool) *Search { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Search) Pretty(pretty bool) *Search { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Query parameters specific to this request, which will override any defaults +// specified in the template. +// API name: params +func (r *Search) Params(params map[string]json.RawMessage) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Params = params + return r +} + +func (r *Search) AddParam(key string, value json.RawMessage) *Search { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Params == nil { + r.req.Params = make(map[string]json.RawMessage) + } else { + tmp = r.req.Params + } + + tmp[key] = value + + r.req.Params = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/activateuserprofile/activate_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/activateuserprofile/activate_user_profile.go new file mode 100644 index 000000000..94f8ee05b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/activateuserprofile/activate_user_profile.go @@ -0,0 +1,442 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Activate a user profile. +// +// Create or update a user profile on behalf of another user. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// The calling application must have either an `access_token` or a combination +// of `username` and `password` for the user that the profile document is +// intended for. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// This API creates or updates a profile document for end users with information +// that is extracted from the user's authentication object including `username`, +// `full_name,` `roles`, and the authentication realm. +// For example, in the JWT `access_token` case, the profile user's `username` is +// extracted from the JWT token claim pointed to by the `claims.principal` +// setting of the JWT realm that authenticated the token. +// +// When updating a profile document, the API enables the document if it was +// disabled. +// Any updates do not change existing content for either the `labels` or `data` +// fields. +package activateuserprofile + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/granttype" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ActivateUserProfile struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewActivateUserProfile type alias for index. +type NewActivateUserProfile func() *ActivateUserProfile + +// NewActivateUserProfileFunc returns a new instance of ActivateUserProfile with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewActivateUserProfileFunc(tp elastictransport.Interface) NewActivateUserProfile { + return func() *ActivateUserProfile { + n := New(tp) + + return n + } +} + +// Activate a user profile. +// +// Create or update a user profile on behalf of another user. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// The calling application must have either an `access_token` or a combination +// of `username` and `password` for the user that the profile document is +// intended for. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// This API creates or updates a profile document for end users with information +// that is extracted from the user's authentication object including `username`, +// `full_name,` `roles`, and the authentication realm. +// For example, in the JWT `access_token` case, the profile user's `username` is +// extracted from the JWT token claim pointed to by the `claims.principal` +// setting of the JWT realm that authenticated the token. +// +// When updating a profile document, the API enables the document if it was +// disabled. +// Any updates do not change existing content for either the `labels` or `data` +// fields. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile +func New(tp elastictransport.Interface) *ActivateUserProfile { + r := &ActivateUserProfile{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ActivateUserProfile) Raw(raw io.Reader) *ActivateUserProfile { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ActivateUserProfile) Request(req *Request) *ActivateUserProfile { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ActivateUserProfile) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ActivateUserProfile: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("profile") + path.WriteString("/") + path.WriteString("_activate") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ActivateUserProfile) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.activate_user_profile") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.activate_user_profile") + if reader := instrument.RecordRequestBody(ctx, "security.activate_user_profile", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.activate_user_profile") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ActivateUserProfile query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a activateuserprofile.Response +func (r ActivateUserProfile) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.activate_user_profile") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ActivateUserProfile headers map. +func (r *ActivateUserProfile) Header(key, value string) *ActivateUserProfile { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ActivateUserProfile) ErrorTrace(errortrace bool) *ActivateUserProfile { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ActivateUserProfile) FilterPath(filterpaths ...string) *ActivateUserProfile { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ActivateUserProfile) Human(human bool) *ActivateUserProfile { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ActivateUserProfile) Pretty(pretty bool) *ActivateUserProfile { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The user's Elasticsearch access token or JWT. +// Both `access` and `id` JWT token types are supported and they depend on the +// underlying JWT realm configuration. +// If you specify the `access_token` grant type, this parameter is required. +// It is not valid with other grant types. +// API name: access_token +func (r *ActivateUserProfile) AccessToken(accesstoken string) *ActivateUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AccessToken = &accesstoken + + return r +} + +// The type of grant. +// API name: grant_type +func (r *ActivateUserProfile) GrantType(granttype granttype.GrantType) *ActivateUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.GrantType = granttype + return r +} + +// The user's password. +// If you specify the `password` grant type, this parameter is required. +// It is not valid with other grant types. +// API name: password +func (r *ActivateUserProfile) Password(password string) *ActivateUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Password = &password + + return r +} + +// The username that identifies the user. +// If you specify the `password` grant type, this parameter is required. +// It is not valid with other grant types. +// API name: username +func (r *ActivateUserProfile) Username(username string) *ActivateUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Username = &username + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/activateuserprofile/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/activateuserprofile/request.go new file mode 100644 index 000000000..112c627f3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/activateuserprofile/request.go @@ -0,0 +1,70 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package activateuserprofile + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/granttype" +) + +// Request holds the request body struct for the package activateuserprofile +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/activate_user_profile/Request.ts#L23-L76 +type Request struct { + + // AccessToken The user's Elasticsearch access token or JWT. + // Both `access` and `id` JWT token types are supported and they depend on the + // underlying JWT realm configuration. + // If you specify the `access_token` grant type, this parameter is required. + // It is not valid with other grant types. + AccessToken *string `json:"access_token,omitempty"` + // GrantType The type of grant. + GrantType granttype.GrantType `json:"grant_type"` + // Password The user's password. + // If you specify the `password` grant type, this parameter is required. + // It is not valid with other grant types. + Password *string `json:"password,omitempty"` + // Username The username that identifies the user. + // If you specify the `password` grant type, this parameter is required. + // It is not valid with other grant types. + Username *string `json:"username,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Activateuserprofile request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/activateuserprofile/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/activateuserprofile/response.go new file mode 100644 index 000000000..c41ebbbdd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/activateuserprofile/response.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package activateuserprofile + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package activateuserprofile +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/activate_user_profile/Response.ts#L22-L25 +type Response struct { + Data map[string]json.RawMessage `json:"data"` + Doc_ types.UserProfileHitMetadata `json:"_doc"` + Enabled *bool `json:"enabled,omitempty"` + Labels map[string]json.RawMessage `json:"labels"` + LastSynchronized int64 `json:"last_synchronized"` + Uid string `json:"uid"` + User types.UserProfileUser `json:"user"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Data: make(map[string]json.RawMessage, 0), + Labels: make(map[string]json.RawMessage, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/authenticate/authenticate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/authenticate/authenticate.go new file mode 100644 index 000000000..2efc5c187 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/authenticate/authenticate.go @@ -0,0 +1,339 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Authenticate a user. +// +// Authenticates a user and returns information about the authenticated user. +// Include the user information in a [basic auth +// header](https://en.wikipedia.org/wiki/Basic_access_authentication). +// A successful call returns a JSON structure that shows user information such +// as their username, the roles that are assigned to the user, any assigned +// metadata, and information about the realms that authenticated and authorized +// the user. +// If the user cannot be authenticated, this API returns a 401 status code. +package authenticate + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Authenticate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAuthenticate type alias for index. +type NewAuthenticate func() *Authenticate + +// NewAuthenticateFunc returns a new instance of Authenticate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAuthenticateFunc(tp elastictransport.Interface) NewAuthenticate { + return func() *Authenticate { + n := New(tp) + + return n + } +} + +// Authenticate a user. +// +// Authenticates a user and returns information about the authenticated user. +// Include the user information in a [basic auth +// header](https://en.wikipedia.org/wiki/Basic_access_authentication). +// A successful call returns a JSON structure that shows user information such +// as their username, the roles that are assigned to the user, any assigned +// metadata, and information about the realms that authenticated and authorized +// the user. +// If the user cannot be authenticated, this API returns a 401 status code. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate +func New(tp elastictransport.Interface) *Authenticate { + r := &Authenticate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Authenticate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("_authenticate") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Authenticate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.authenticate") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.authenticate") + if reader := instrument.RecordRequestBody(ctx, "security.authenticate", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.authenticate") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Authenticate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a authenticate.Response +func (r Authenticate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.authenticate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Authenticate) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.authenticate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Authenticate query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Authenticate headers map. +func (r *Authenticate) Header(key, value string) *Authenticate { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Authenticate) ErrorTrace(errortrace bool) *Authenticate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Authenticate) FilterPath(filterpaths ...string) *Authenticate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Authenticate) Human(human bool) *Authenticate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Authenticate) Pretty(pretty bool) *Authenticate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/authenticate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/authenticate/response.go new file mode 100644 index 000000000..2ea69fdc8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/authenticate/response.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package authenticate + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package authenticate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/authenticate/SecurityAuthenticateResponse.ts#L24-L42 +type Response struct { + ApiKey *types.AuthenticateApiKey `json:"api_key,omitempty"` + AuthenticationRealm types.RealmInfo `json:"authentication_realm"` + AuthenticationType string `json:"authentication_type"` + Email *string `json:"email,omitempty"` + Enabled bool `json:"enabled"` + FullName *string `json:"full_name,omitempty"` + LookupRealm types.RealmInfo `json:"lookup_realm"` + Metadata types.Metadata `json:"metadata"` + Roles []string `json:"roles"` + Token *types.AuthenticateToken `json:"token,omitempty"` + Username string `json:"username"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkdeleterole/bulk_delete_role.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkdeleterole/bulk_delete_role.go new file mode 100644 index 000000000..f3f4463a5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkdeleterole/bulk_delete_role.go @@ -0,0 +1,371 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Bulk delete roles. +// +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The bulk delete roles API cannot delete roles that are defined in roles +// files. +package bulkdeleterole + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type BulkDeleteRole struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewBulkDeleteRole type alias for index. +type NewBulkDeleteRole func() *BulkDeleteRole + +// NewBulkDeleteRoleFunc returns a new instance of BulkDeleteRole with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewBulkDeleteRoleFunc(tp elastictransport.Interface) NewBulkDeleteRole { + return func() *BulkDeleteRole { + n := New(tp) + + return n + } +} + +// Bulk delete roles. +// +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The bulk delete roles API cannot delete roles that are defined in roles +// files. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role +func New(tp elastictransport.Interface) *BulkDeleteRole { + r := &BulkDeleteRole{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *BulkDeleteRole) Raw(raw io.Reader) *BulkDeleteRole { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *BulkDeleteRole) Request(req *Request) *BulkDeleteRole { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *BulkDeleteRole) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for BulkDeleteRole: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role") + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r BulkDeleteRole) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.bulk_delete_role") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.bulk_delete_role") + if reader := instrument.RecordRequestBody(ctx, "security.bulk_delete_role", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.bulk_delete_role") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the BulkDeleteRole query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a bulkdeleterole.Response +func (r BulkDeleteRole) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.bulk_delete_role") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the BulkDeleteRole headers map. +func (r *BulkDeleteRole) Header(key, value string) *BulkDeleteRole { + r.headers.Set(key, value) + + return r +} + +// Refresh If `true` (the default) then refresh the affected shards to make this +// operation visible to search, if `wait_for` then wait for a refresh to make +// this operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *BulkDeleteRole) Refresh(refresh refresh.Refresh) *BulkDeleteRole { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *BulkDeleteRole) ErrorTrace(errortrace bool) *BulkDeleteRole { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *BulkDeleteRole) FilterPath(filterpaths ...string) *BulkDeleteRole { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *BulkDeleteRole) Human(human bool) *BulkDeleteRole { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *BulkDeleteRole) Pretty(pretty bool) *BulkDeleteRole { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// An array of role names to delete +// API name: names +func (r *BulkDeleteRole) Names(names ...string) *BulkDeleteRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range names { + + r.req.Names = append(r.req.Names, v) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkdeleterole/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkdeleterole/request.go new file mode 100644 index 000000000..efcee0950 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkdeleterole/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package bulkdeleterole + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package bulkdeleterole +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/bulk_delete_role/SecurityBulkDeleteRoleRequest.ts#L23-L50 +type Request struct { + + // Names An array of role names to delete + Names []string `json:"names"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Bulkdeleterole request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkdeleterole/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkdeleterole/response.go new file mode 100644 index 000000000..ba5efd322 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkdeleterole/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package bulkdeleterole + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package bulkdeleterole +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/bulk_delete_role/SecurityBulkDeleteRoleResponse.ts#L22-L37 +type Response struct { + + // Deleted Array of deleted roles + Deleted []string `json:"deleted,omitempty"` + // Errors Present if any deletes resulted in errors + Errors *types.BulkError `json:"errors,omitempty"` + // NotFound Array of roles that could not be found + NotFound []string `json:"not_found,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkputrole/bulk_put_role.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkputrole/bulk_put_role.go new file mode 100644 index 000000000..668fb272e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkputrole/bulk_put_role.go @@ -0,0 +1,386 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Bulk create or update roles. +// +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The bulk create or update roles API cannot update roles that are defined in +// roles files. +package bulkputrole + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type BulkPutRole struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewBulkPutRole type alias for index. +type NewBulkPutRole func() *BulkPutRole + +// NewBulkPutRoleFunc returns a new instance of BulkPutRole with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewBulkPutRoleFunc(tp elastictransport.Interface) NewBulkPutRole { + return func() *BulkPutRole { + n := New(tp) + + return n + } +} + +// Bulk create or update roles. +// +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The bulk create or update roles API cannot update roles that are defined in +// roles files. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role +func New(tp elastictransport.Interface) *BulkPutRole { + r := &BulkPutRole{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *BulkPutRole) Raw(raw io.Reader) *BulkPutRole { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *BulkPutRole) Request(req *Request) *BulkPutRole { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *BulkPutRole) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for BulkPutRole: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r BulkPutRole) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.bulk_put_role") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.bulk_put_role") + if reader := instrument.RecordRequestBody(ctx, "security.bulk_put_role", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.bulk_put_role") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the BulkPutRole query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a bulkputrole.Response +func (r BulkPutRole) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.bulk_put_role") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the BulkPutRole headers map. +func (r *BulkPutRole) Header(key, value string) *BulkPutRole { + r.headers.Set(key, value) + + return r +} + +// Refresh If `true` (the default) then refresh the affected shards to make this +// operation visible to search, if `wait_for` then wait for a refresh to make +// this operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *BulkPutRole) Refresh(refresh refresh.Refresh) *BulkPutRole { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *BulkPutRole) ErrorTrace(errortrace bool) *BulkPutRole { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *BulkPutRole) FilterPath(filterpaths ...string) *BulkPutRole { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *BulkPutRole) Human(human bool) *BulkPutRole { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *BulkPutRole) Pretty(pretty bool) *BulkPutRole { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A dictionary of role name to RoleDescriptor objects to add or update +// API name: roles +func (r *BulkPutRole) Roles(roles map[string]types.RoleDescriptor) *BulkPutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Roles = roles + return r +} + +func (r *BulkPutRole) AddRole(key string, value types.RoleDescriptorVariant) *BulkPutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.RoleDescriptor + if r.req.Roles == nil { + r.req.Roles = make(map[string]types.RoleDescriptor) + } else { + tmp = r.req.Roles + } + + tmp[key] = *value.RoleDescriptorCaster() + + r.req.Roles = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkputrole/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkputrole/request.go new file mode 100644 index 000000000..458cccaa5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkputrole/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package bulkputrole + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package bulkputrole +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/bulk_put_role/SecurityBulkPutRoleRequest.ts#L25-L52 +type Request struct { + + // Roles A dictionary of role name to RoleDescriptor objects to add or update + Roles map[string]types.RoleDescriptor `json:"roles"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Roles: make(map[string]types.RoleDescriptor, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Bulkputrole request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkputrole/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkputrole/response.go new file mode 100644 index 000000000..780732e3b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkputrole/response.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package bulkputrole + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package bulkputrole +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/bulk_put_role/SecurityBulkPutRoleResponse.ts#L22-L41 +type Response struct { + + // Created Array of created roles + Created []string `json:"created,omitempty"` + // Errors Present if any updates resulted in errors + Errors *types.BulkError `json:"errors,omitempty"` + // Noop Array of role names without any changes + Noop []string `json:"noop,omitempty"` + // Updated Array of updated roles + Updated []string `json:"updated,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go new file mode 100644 index 000000000..6357b4bc8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go @@ -0,0 +1,477 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Bulk update API keys. +// Update the attributes for multiple API keys. +// +// IMPORTANT: It is not possible to use an API key as the authentication +// credential for this API. To update API keys, the owner user's credentials are +// required. +// +// This API is similar to the update API key API but enables you to apply the +// same update to multiple API keys in one API call. This operation can greatly +// improve performance over making individual updates. +// +// It is not possible to update expired or invalidated API keys. +// +// This API supports updates to API key access scope, metadata and expiration. +// The access scope of each API key is derived from the `role_descriptors` you +// specify in the request and a snapshot of the owner user's permissions at the +// time of the request. +// The snapshot of the owner's permissions is updated automatically on every +// call. +// +// IMPORTANT: If you don't specify `role_descriptors` in the request, a call to +// this API might still change an API key's access scope. This change can occur +// if the owner user's permissions have changed since the API key was created or +// last modified. +// +// A successful request returns a JSON structure that contains the IDs of all +// updated API keys, the IDs of API keys that already had the requested changes +// and did not require an update, and error details for any failed update. +package bulkupdateapikeys + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type BulkUpdateApiKeys struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewBulkUpdateApiKeys type alias for index. +type NewBulkUpdateApiKeys func() *BulkUpdateApiKeys + +// NewBulkUpdateApiKeysFunc returns a new instance of BulkUpdateApiKeys with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewBulkUpdateApiKeysFunc(tp elastictransport.Interface) NewBulkUpdateApiKeys { + return func() *BulkUpdateApiKeys { + n := New(tp) + + return n + } +} + +// Bulk update API keys. +// Update the attributes for multiple API keys. +// +// IMPORTANT: It is not possible to use an API key as the authentication +// credential for this API. To update API keys, the owner user's credentials are +// required. +// +// This API is similar to the update API key API but enables you to apply the +// same update to multiple API keys in one API call. This operation can greatly +// improve performance over making individual updates. +// +// It is not possible to update expired or invalidated API keys. +// +// This API supports updates to API key access scope, metadata and expiration. +// The access scope of each API key is derived from the `role_descriptors` you +// specify in the request and a snapshot of the owner user's permissions at the +// time of the request. +// The snapshot of the owner's permissions is updated automatically on every +// call. +// +// IMPORTANT: If you don't specify `role_descriptors` in the request, a call to +// this API might still change an API key's access scope. This change can occur +// if the owner user's permissions have changed since the API key was created or +// last modified. +// +// A successful request returns a JSON structure that contains the IDs of all +// updated API keys, the IDs of API keys that already had the requested changes +// and did not require an update, and error details for any failed update. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys +func New(tp elastictransport.Interface) *BulkUpdateApiKeys { + r := &BulkUpdateApiKeys{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *BulkUpdateApiKeys) Raw(raw io.Reader) *BulkUpdateApiKeys { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *BulkUpdateApiKeys) Request(req *Request) *BulkUpdateApiKeys { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *BulkUpdateApiKeys) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for BulkUpdateApiKeys: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("api_key") + path.WriteString("/") + path.WriteString("_bulk_update") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r BulkUpdateApiKeys) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.bulk_update_api_keys") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.bulk_update_api_keys") + if reader := instrument.RecordRequestBody(ctx, "security.bulk_update_api_keys", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.bulk_update_api_keys") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the BulkUpdateApiKeys query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a bulkupdateapikeys.Response +func (r BulkUpdateApiKeys) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.bulk_update_api_keys") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the BulkUpdateApiKeys headers map. +func (r *BulkUpdateApiKeys) Header(key, value string) *BulkUpdateApiKeys { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *BulkUpdateApiKeys) ErrorTrace(errortrace bool) *BulkUpdateApiKeys { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *BulkUpdateApiKeys) FilterPath(filterpaths ...string) *BulkUpdateApiKeys { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *BulkUpdateApiKeys) Human(human bool) *BulkUpdateApiKeys { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *BulkUpdateApiKeys) Pretty(pretty bool) *BulkUpdateApiKeys { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Expiration time for the API keys. +// By default, API keys never expire. +// This property can be omitted to leave the value unchanged. +// API name: expiration +func (r *BulkUpdateApiKeys) Expiration(duration types.DurationVariant) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() + + return r +} + +// The API key identifiers. +// API name: ids +func (r *BulkUpdateApiKeys) Ids(ids ...string) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Ids = make([]string, len(ids)) + r.req.Ids = ids + + return r +} + +// Arbitrary nested metadata to associate with the API keys. +// Within the `metadata` object, top-level keys beginning with an underscore +// (`_`) are reserved for system usage. +// Any information specified with this parameter fully replaces metadata +// previously associated with the API key. +// API name: metadata +func (r *BulkUpdateApiKeys) Metadata(metadata types.MetadataVariant) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// The role descriptors to assign to the API keys. +// An API key's effective permissions are an intersection of its assigned +// privileges and the point-in-time snapshot of permissions of the owner user. +// You can assign new privileges by specifying them in this parameter. +// To remove assigned privileges, supply the `role_descriptors` parameter as an +// empty object `{}`. +// If an API key has no assigned privileges, it inherits the owner user's full +// permissions. +// The snapshot of the owner's permissions is always updated, whether you supply +// the `role_descriptors` parameter. +// The structure of a role descriptor is the same as the request for the create +// API keys API. +// API name: role_descriptors +func (r *BulkUpdateApiKeys) RoleDescriptors(roledescriptors map[string]types.RoleDescriptor) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RoleDescriptors = roledescriptors + return r +} + +func (r *BulkUpdateApiKeys) AddRoleDescriptor(key string, value types.RoleDescriptorVariant) *BulkUpdateApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.RoleDescriptor + if r.req.RoleDescriptors == nil { + r.req.RoleDescriptors = make(map[string]types.RoleDescriptor) + } else { + tmp = r.req.RoleDescriptors + } + + tmp[key] = *value.RoleDescriptorCaster() + + r.req.RoleDescriptors = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkupdateapikeys/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkupdateapikeys/request.go new file mode 100644 index 000000000..65e64dd3d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkupdateapikeys/request.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package bulkupdateapikeys + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package bulkupdateapikeys +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/bulk_update_api_keys/SecurityBulkUpdateApiKeysRequest.ts#L26-L83 +type Request struct { + + // Expiration Expiration time for the API keys. + // By default, API keys never expire. + // This property can be omitted to leave the value unchanged. + Expiration types.Duration `json:"expiration,omitempty"` + // Ids The API key identifiers. + Ids []string `json:"ids"` + // Metadata Arbitrary nested metadata to associate with the API keys. + // Within the `metadata` object, top-level keys beginning with an underscore + // (`_`) are reserved for system usage. + // Any information specified with this parameter fully replaces metadata + // previously associated with the API key. + Metadata types.Metadata `json:"metadata,omitempty"` + // RoleDescriptors The role descriptors to assign to the API keys. + // An API key's effective permissions are an intersection of its assigned + // privileges and the point-in-time snapshot of permissions of the owner user. + // You can assign new privileges by specifying them in this parameter. + // To remove assigned privileges, supply the `role_descriptors` parameter as an + // empty object `{}`. + // If an API key has no assigned privileges, it inherits the owner user's full + // permissions. + // The snapshot of the owner's permissions is always updated, whether you supply + // the `role_descriptors` parameter. + // The structure of a role descriptor is the same as the request for the create + // API keys API. + RoleDescriptors map[string]types.RoleDescriptor `json:"role_descriptors,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + RoleDescriptors: make(map[string]types.RoleDescriptor, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Bulkupdateapikeys request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expiration": + if err := dec.Decode(&s.Expiration); err != nil { + return fmt.Errorf("%s | %w", "Expiration", err) + } + + case "ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + + s.Ids = append(s.Ids, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Ids); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "role_descriptors": + if s.RoleDescriptors == nil { + s.RoleDescriptors = make(map[string]types.RoleDescriptor, 0) + } + if err := dec.Decode(&s.RoleDescriptors); err != nil { + return fmt.Errorf("%s | %w", "RoleDescriptors", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkupdateapikeys/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkupdateapikeys/response.go new file mode 100644 index 000000000..2f2ad4cdf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkupdateapikeys/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package bulkupdateapikeys + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package bulkupdateapikeys +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/bulk_update_api_keys/SecurityBulkUpdateApiKeysResponse.ts#L22-L28 +type Response struct { + Errors *types.BulkError `json:"errors,omitempty"` + Noops []string `json:"noops"` + Updated []string `json:"updated"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/changepassword/change_password.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/changepassword/change_password.go new file mode 100644 index 000000000..f7cbb0932 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/changepassword/change_password.go @@ -0,0 +1,413 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Change passwords. +// +// Change the passwords of users in the native realm and built-in users. +package changepassword + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + usernameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ChangePassword struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + username string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewChangePassword type alias for index. +type NewChangePassword func() *ChangePassword + +// NewChangePasswordFunc returns a new instance of ChangePassword with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewChangePasswordFunc(tp elastictransport.Interface) NewChangePassword { + return func() *ChangePassword { + n := New(tp) + + return n + } +} + +// Change passwords. +// +// Change the passwords of users in the native realm and built-in users. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password +func New(tp elastictransport.Interface) *ChangePassword { + r := &ChangePassword{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ChangePassword) Raw(raw io.Reader) *ChangePassword { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ChangePassword) Request(req *Request) *ChangePassword { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ChangePassword) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ChangePassword: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == usernameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "username", r.username) + } + path.WriteString(r.username) + path.WriteString("/") + path.WriteString("_password") + + method = http.MethodPut + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + path.WriteString("/") + path.WriteString("_password") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ChangePassword) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.change_password") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.change_password") + if reader := instrument.RecordRequestBody(ctx, "security.change_password", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.change_password") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ChangePassword query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a changepassword.Response +func (r ChangePassword) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.change_password") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ChangePassword headers map. +func (r *ChangePassword) Header(key, value string) *ChangePassword { + r.headers.Set(key, value) + + return r +} + +// Username The user whose password you want to change. If you do not specify this +// parameter, the password is changed for the current user. +// API Name: username +func (r *ChangePassword) Username(username string) *ChangePassword { + r.paramSet |= usernameMask + r.username = username + + return r +} + +// Refresh If `true` (the default) then refresh the affected shards to make this +// operation visible to search, if `wait_for` then wait for a refresh to make +// this operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *ChangePassword) Refresh(refresh refresh.Refresh) *ChangePassword { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ChangePassword) ErrorTrace(errortrace bool) *ChangePassword { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ChangePassword) FilterPath(filterpaths ...string) *ChangePassword { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ChangePassword) Human(human bool) *ChangePassword { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ChangePassword) Pretty(pretty bool) *ChangePassword { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The new password value. Passwords must be at least 6 characters long. +// API name: password +func (r *ChangePassword) Password(password string) *ChangePassword { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Password = &password + + return r +} + +// A hash of the new password value. This must be produced using the same +// hashing algorithm as has been configured for password storage. For more +// details, +// see the explanation of the `xpack.security.authc.password_hashing.algorithm` +// setting. +// API name: password_hash +func (r *ChangePassword) PasswordHash(passwordhash string) *ChangePassword { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PasswordHash = &passwordhash + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/changepassword/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/changepassword/request.go new file mode 100644 index 000000000..acd3a242a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/changepassword/request.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package changepassword + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Request holds the request body struct for the package changepassword +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/change_password/SecurityChangePasswordRequest.ts#L23-L65 +type Request struct { + + // Password The new password value. Passwords must be at least 6 characters long. + Password *string `json:"password,omitempty"` + // PasswordHash A hash of the new password value. This must be produced using the same + // hashing algorithm as has been configured for password storage. For more + // details, + // see the explanation of the `xpack.security.authc.password_hashing.algorithm` + // setting. + PasswordHash *string `json:"password_hash,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Changepassword request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "password": + if err := dec.Decode(&s.Password); err != nil { + return fmt.Errorf("%s | %w", "Password", err) + } + + case "password_hash": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PasswordHash", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PasswordHash = &o + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/changepassword/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/changepassword/response.go new file mode 100644 index 000000000..cdbb8310e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/changepassword/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package changepassword + +// Response holds the response body struct for the package changepassword +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/change_password/SecurityChangePasswordResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearapikeycache/clear_api_key_cache.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearapikeycache/clear_api_key_cache.go new file mode 100644 index 000000000..511a46468 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearapikeycache/clear_api_key_cache.go @@ -0,0 +1,356 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clear the API key cache. +// +// Evict a subset of all entries from the API key cache. +// The cache is also automatically cleared on state changes of the security +// index. +package clearapikeycache + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idsMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ClearApiKeyCache struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + ids string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClearApiKeyCache type alias for index. +type NewClearApiKeyCache func(ids string) *ClearApiKeyCache + +// NewClearApiKeyCacheFunc returns a new instance of ClearApiKeyCache with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewClearApiKeyCacheFunc(tp elastictransport.Interface) NewClearApiKeyCache { + return func(ids string) *ClearApiKeyCache { + n := New(tp) + + n._ids(ids) + + return n + } +} + +// Clear the API key cache. +// +// Evict a subset of all entries from the API key cache. +// The cache is also automatically cleared on state changes of the security +// index. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache +func New(tp elastictransport.Interface) *ClearApiKeyCache { + r := &ClearApiKeyCache{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ClearApiKeyCache) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idsMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("api_key") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "ids", r.ids) + } + path.WriteString(r.ids) + path.WriteString("/") + path.WriteString("_clear_cache") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ClearApiKeyCache) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.clear_api_key_cache") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.clear_api_key_cache") + if reader := instrument.RecordRequestBody(ctx, "security.clear_api_key_cache", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.clear_api_key_cache") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ClearApiKeyCache query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a clearapikeycache.Response +func (r ClearApiKeyCache) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_api_key_cache") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ClearApiKeyCache) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_api_key_cache") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ClearApiKeyCache query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ClearApiKeyCache headers map. +func (r *ClearApiKeyCache) Header(key, value string) *ClearApiKeyCache { + r.headers.Set(key, value) + + return r +} + +// Ids Comma-separated list of API key IDs to evict from the API key cache. +// To evict all API keys, use `*`. +// Does not support other wildcard patterns. +// API Name: ids +func (r *ClearApiKeyCache) _ids(ids string) *ClearApiKeyCache { + r.paramSet |= idsMask + r.ids = ids + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ClearApiKeyCache) ErrorTrace(errortrace bool) *ClearApiKeyCache { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ClearApiKeyCache) FilterPath(filterpaths ...string) *ClearApiKeyCache { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ClearApiKeyCache) Human(human bool) *ClearApiKeyCache { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ClearApiKeyCache) Pretty(pretty bool) *ClearApiKeyCache { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearapikeycache/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearapikeycache/response.go new file mode 100644 index 000000000..59fcea5b5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearapikeycache/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clearapikeycache + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package clearapikeycache +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/clear_api_key_cache/SecurityClearApiKeyCacheResponse.ts#L25-L32 +type Response struct { + ClusterName string `json:"cluster_name"` + NodeStats types.NodeStatistics `json:"_nodes"` + Nodes map[string]types.ClusterNode `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.ClusterNode, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedprivileges/clear_cached_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedprivileges/clear_cached_privileges.go new file mode 100644 index 000000000..a530b431f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedprivileges/clear_cached_privileges.go @@ -0,0 +1,356 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clear the privileges cache. +// +// Evict privileges from the native application privilege cache. +// The cache is also automatically cleared for applications that have their +// privileges updated. +package clearcachedprivileges + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + applicationMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ClearCachedPrivileges struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + application string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClearCachedPrivileges type alias for index. +type NewClearCachedPrivileges func(application string) *ClearCachedPrivileges + +// NewClearCachedPrivilegesFunc returns a new instance of ClearCachedPrivileges with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewClearCachedPrivilegesFunc(tp elastictransport.Interface) NewClearCachedPrivileges { + return func(application string) *ClearCachedPrivileges { + n := New(tp) + + n._application(application) + + return n + } +} + +// Clear the privileges cache. +// +// Evict privileges from the native application privilege cache. +// The cache is also automatically cleared for applications that have their +// privileges updated. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges +func New(tp elastictransport.Interface) *ClearCachedPrivileges { + r := &ClearCachedPrivileges{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ClearCachedPrivileges) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == applicationMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("privilege") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "application", r.application) + } + path.WriteString(r.application) + path.WriteString("/") + path.WriteString("_clear_cache") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ClearCachedPrivileges) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.clear_cached_privileges") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.clear_cached_privileges") + if reader := instrument.RecordRequestBody(ctx, "security.clear_cached_privileges", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.clear_cached_privileges") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ClearCachedPrivileges query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a clearcachedprivileges.Response +func (r ClearCachedPrivileges) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_cached_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ClearCachedPrivileges) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_cached_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ClearCachedPrivileges query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ClearCachedPrivileges headers map. +func (r *ClearCachedPrivileges) Header(key, value string) *ClearCachedPrivileges { + r.headers.Set(key, value) + + return r +} + +// Application A comma-separated list of applications. +// To clear all applications, use an asterism (`*`). +// It does not support other wildcard patterns. +// API Name: application +func (r *ClearCachedPrivileges) _application(application string) *ClearCachedPrivileges { + r.paramSet |= applicationMask + r.application = application + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ClearCachedPrivileges) ErrorTrace(errortrace bool) *ClearCachedPrivileges { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ClearCachedPrivileges) FilterPath(filterpaths ...string) *ClearCachedPrivileges { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ClearCachedPrivileges) Human(human bool) *ClearCachedPrivileges { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ClearCachedPrivileges) Pretty(pretty bool) *ClearCachedPrivileges { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedprivileges/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedprivileges/response.go new file mode 100644 index 000000000..7895a85a0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedprivileges/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clearcachedprivileges + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package clearcachedprivileges +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/clear_cached_privileges/SecurityClearCachedPrivilegesResponse.ts#L25-L32 +type Response struct { + ClusterName string `json:"cluster_name"` + NodeStats types.NodeStatistics `json:"_nodes"` + Nodes map[string]types.ClusterNode `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.ClusterNode, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedrealms/clear_cached_realms.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedrealms/clear_cached_realms.go new file mode 100644 index 000000000..820325811 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedrealms/clear_cached_realms.go @@ -0,0 +1,380 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clear the user cache. +// +// Evict users from the user cache. +// You can completely clear the cache or evict specific users. +// +// User credentials are cached in memory on each node to avoid connecting to a +// remote authentication service or hitting the disk for every incoming request. +// There are realm settings that you can use to configure the user cache. +// For more information, refer to the documentation about controlling the user +// cache. +package clearcachedrealms + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + realmsMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ClearCachedRealms struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + realms string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClearCachedRealms type alias for index. +type NewClearCachedRealms func(realms string) *ClearCachedRealms + +// NewClearCachedRealmsFunc returns a new instance of ClearCachedRealms with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewClearCachedRealmsFunc(tp elastictransport.Interface) NewClearCachedRealms { + return func(realms string) *ClearCachedRealms { + n := New(tp) + + n._realms(realms) + + return n + } +} + +// Clear the user cache. +// +// Evict users from the user cache. +// You can completely clear the cache or evict specific users. +// +// User credentials are cached in memory on each node to avoid connecting to a +// remote authentication service or hitting the disk for every incoming request. +// There are realm settings that you can use to configure the user cache. +// For more information, refer to the documentation about controlling the user +// cache. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms +func New(tp elastictransport.Interface) *ClearCachedRealms { + r := &ClearCachedRealms{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ClearCachedRealms) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == realmsMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("realm") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "realms", r.realms) + } + path.WriteString(r.realms) + path.WriteString("/") + path.WriteString("_clear_cache") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ClearCachedRealms) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.clear_cached_realms") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.clear_cached_realms") + if reader := instrument.RecordRequestBody(ctx, "security.clear_cached_realms", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.clear_cached_realms") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ClearCachedRealms query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a clearcachedrealms.Response +func (r ClearCachedRealms) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_cached_realms") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ClearCachedRealms) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_cached_realms") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ClearCachedRealms query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ClearCachedRealms headers map. +func (r *ClearCachedRealms) Header(key, value string) *ClearCachedRealms { + r.headers.Set(key, value) + + return r +} + +// Realms A comma-separated list of realms. +// To clear all realms, use an asterisk (`*`). +// It does not support other wildcard patterns. +// API Name: realms +func (r *ClearCachedRealms) _realms(realms string) *ClearCachedRealms { + r.paramSet |= realmsMask + r.realms = realms + + return r +} + +// Usernames A comma-separated list of the users to clear from the cache. +// If you do not specify this parameter, the API evicts all users from the user +// cache. +// API name: usernames +func (r *ClearCachedRealms) Usernames(usernames ...string) *ClearCachedRealms { + tmp := []string{} + for _, item := range usernames { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("usernames", strings.Join(tmp, ",")) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ClearCachedRealms) ErrorTrace(errortrace bool) *ClearCachedRealms { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ClearCachedRealms) FilterPath(filterpaths ...string) *ClearCachedRealms { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ClearCachedRealms) Human(human bool) *ClearCachedRealms { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ClearCachedRealms) Pretty(pretty bool) *ClearCachedRealms { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedrealms/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedrealms/response.go new file mode 100644 index 000000000..7d32f3186 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedrealms/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clearcachedrealms + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package clearcachedrealms +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/clear_cached_realms/SecurityClearCachedRealmsResponse.ts#L25-L32 +type Response struct { + ClusterName string `json:"cluster_name"` + NodeStats types.NodeStatistics `json:"_nodes"` + Nodes map[string]types.ClusterNode `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.ClusterNode, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedroles/clear_cached_roles.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedroles/clear_cached_roles.go new file mode 100644 index 000000000..977cf439c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedroles/clear_cached_roles.go @@ -0,0 +1,352 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clear the roles cache. +// +// Evict roles from the native role cache. +package clearcachedroles + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ClearCachedRoles struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClearCachedRoles type alias for index. +type NewClearCachedRoles func(name string) *ClearCachedRoles + +// NewClearCachedRolesFunc returns a new instance of ClearCachedRoles with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewClearCachedRolesFunc(tp elastictransport.Interface) NewClearCachedRoles { + return func(name string) *ClearCachedRoles { + n := New(tp) + + n._name(name) + + return n + } +} + +// Clear the roles cache. +// +// Evict roles from the native role cache. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles +func New(tp elastictransport.Interface) *ClearCachedRoles { + r := &ClearCachedRoles{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ClearCachedRoles) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_clear_cache") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ClearCachedRoles) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.clear_cached_roles") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.clear_cached_roles") + if reader := instrument.RecordRequestBody(ctx, "security.clear_cached_roles", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.clear_cached_roles") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ClearCachedRoles query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a clearcachedroles.Response +func (r ClearCachedRoles) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_cached_roles") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ClearCachedRoles) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_cached_roles") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ClearCachedRoles query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ClearCachedRoles headers map. +func (r *ClearCachedRoles) Header(key, value string) *ClearCachedRoles { + r.headers.Set(key, value) + + return r +} + +// Name A comma-separated list of roles to evict from the role cache. +// To evict all roles, use an asterisk (`*`). +// It does not support other wildcard patterns. +// API Name: name +func (r *ClearCachedRoles) _name(name string) *ClearCachedRoles { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ClearCachedRoles) ErrorTrace(errortrace bool) *ClearCachedRoles { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ClearCachedRoles) FilterPath(filterpaths ...string) *ClearCachedRoles { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ClearCachedRoles) Human(human bool) *ClearCachedRoles { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ClearCachedRoles) Pretty(pretty bool) *ClearCachedRoles { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedroles/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedroles/response.go new file mode 100644 index 000000000..b7651b5b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedroles/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clearcachedroles + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package clearcachedroles +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/clear_cached_roles/ClearCachedRolesResponse.ts#L25-L32 +type Response struct { + ClusterName string `json:"cluster_name"` + NodeStats types.NodeStatistics `json:"_nodes"` + Nodes map[string]types.ClusterNode `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.ClusterNode, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go new file mode 100644 index 000000000..14f04e8c1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go @@ -0,0 +1,415 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clear service account token caches. +// +// Evict a subset of all entries from the service account token caches. +// Two separate caches exist for service account tokens: one cache for tokens +// backed by the `service_tokens` file, and another for tokens backed by the +// `.security` index. +// This API clears matching entries from both caches. +// +// The cache for service account tokens backed by the `.security` index is +// cleared automatically on state changes of the security index. +// The cache for tokens backed by the `service_tokens` file is cleared +// automatically on file changes. +package clearcachedservicetokens + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + namespaceMask = iota + 1 + + serviceMask + + nameMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ClearCachedServiceTokens struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + namespace string + service string + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClearCachedServiceTokens type alias for index. +type NewClearCachedServiceTokens func(namespace, service, name string) *ClearCachedServiceTokens + +// NewClearCachedServiceTokensFunc returns a new instance of ClearCachedServiceTokens with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewClearCachedServiceTokensFunc(tp elastictransport.Interface) NewClearCachedServiceTokens { + return func(namespace, service, name string) *ClearCachedServiceTokens { + n := New(tp) + + n._namespace(namespace) + + n._service(service) + + n._name(name) + + return n + } +} + +// Clear service account token caches. +// +// Evict a subset of all entries from the service account token caches. +// Two separate caches exist for service account tokens: one cache for tokens +// backed by the `service_tokens` file, and another for tokens backed by the +// `.security` index. +// This API clears matching entries from both caches. +// +// The cache for service account tokens backed by the `.security` index is +// cleared automatically on state changes of the security index. +// The cache for tokens backed by the `service_tokens` file is cleared +// automatically on file changes. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens +func New(tp elastictransport.Interface) *ClearCachedServiceTokens { + r := &ClearCachedServiceTokens{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ClearCachedServiceTokens) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == namespaceMask|serviceMask|nameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("service") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "namespace", r.namespace) + } + path.WriteString(r.namespace) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "service", r.service) + } + path.WriteString(r.service) + path.WriteString("/") + path.WriteString("credential") + path.WriteString("/") + path.WriteString("token") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_clear_cache") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ClearCachedServiceTokens) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.clear_cached_service_tokens") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.clear_cached_service_tokens") + if reader := instrument.RecordRequestBody(ctx, "security.clear_cached_service_tokens", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.clear_cached_service_tokens") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ClearCachedServiceTokens query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a clearcachedservicetokens.Response +func (r ClearCachedServiceTokens) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_cached_service_tokens") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ClearCachedServiceTokens) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.clear_cached_service_tokens") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ClearCachedServiceTokens query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ClearCachedServiceTokens headers map. +func (r *ClearCachedServiceTokens) Header(key, value string) *ClearCachedServiceTokens { + r.headers.Set(key, value) + + return r +} + +// Namespace The namespace, which is a top-level grouping of service accounts. +// API Name: namespace +func (r *ClearCachedServiceTokens) _namespace(namespace string) *ClearCachedServiceTokens { + r.paramSet |= namespaceMask + r.namespace = namespace + + return r +} + +// Service The name of the service, which must be unique within its namespace. +// API Name: service +func (r *ClearCachedServiceTokens) _service(service string) *ClearCachedServiceTokens { + r.paramSet |= serviceMask + r.service = service + + return r +} + +// Name A comma-separated list of token names to evict from the service account token +// caches. +// Use a wildcard (`*`) to evict all tokens that belong to a service account. +// It does not support other wildcard patterns. +// API Name: name +func (r *ClearCachedServiceTokens) _name(name string) *ClearCachedServiceTokens { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ClearCachedServiceTokens) ErrorTrace(errortrace bool) *ClearCachedServiceTokens { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ClearCachedServiceTokens) FilterPath(filterpaths ...string) *ClearCachedServiceTokens { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ClearCachedServiceTokens) Human(human bool) *ClearCachedServiceTokens { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ClearCachedServiceTokens) Pretty(pretty bool) *ClearCachedServiceTokens { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedservicetokens/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedservicetokens/response.go new file mode 100644 index 000000000..392b40192 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedservicetokens/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clearcachedservicetokens + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package clearcachedservicetokens +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/clear_cached_service_tokens/ClearCachedServiceTokensResponse.ts#L25-L32 +type Response struct { + ClusterName string `json:"cluster_name"` + NodeStats types.NodeStatistics `json:"_nodes"` + Nodes map[string]types.ClusterNode `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.ClusterNode, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createapikey/create_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createapikey/create_api_key.go new file mode 100644 index 000000000..149b1afc8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createapikey/create_api_key.go @@ -0,0 +1,472 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create an API key. +// +// Create an API key for access without requiring basic authentication. +// +// IMPORTANT: If the credential that is used to authenticate this request is an +// API key, the derived API key cannot have any privileges. +// If you specify privileges, the API returns an error. +// +// A successful request returns a JSON structure that contains the API key, its +// unique id, and its name. +// If applicable, it also returns expiration information for the API key in +// milliseconds. +// +// NOTE: By default, API keys never expire. You can specify expiration +// information when you create the API keys. +// +// The API keys are created by the Elasticsearch API key service, which is +// automatically enabled. +// To configure or turn off the API key service, refer to API key service +// setting documentation. +package createapikey + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CreateApiKey struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCreateApiKey type alias for index. +type NewCreateApiKey func() *CreateApiKey + +// NewCreateApiKeyFunc returns a new instance of CreateApiKey with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCreateApiKeyFunc(tp elastictransport.Interface) NewCreateApiKey { + return func() *CreateApiKey { + n := New(tp) + + return n + } +} + +// Create an API key. +// +// Create an API key for access without requiring basic authentication. +// +// IMPORTANT: If the credential that is used to authenticate this request is an +// API key, the derived API key cannot have any privileges. +// If you specify privileges, the API returns an error. +// +// A successful request returns a JSON structure that contains the API key, its +// unique id, and its name. +// If applicable, it also returns expiration information for the API key in +// milliseconds. +// +// NOTE: By default, API keys never expire. You can specify expiration +// information when you create the API keys. +// +// The API keys are created by the Elasticsearch API key service, which is +// automatically enabled. +// To configure or turn off the API key service, refer to API key service +// setting documentation. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key +func New(tp elastictransport.Interface) *CreateApiKey { + r := &CreateApiKey{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *CreateApiKey) Raw(raw io.Reader) *CreateApiKey { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *CreateApiKey) Request(req *Request) *CreateApiKey { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CreateApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for CreateApiKey: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("api_key") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CreateApiKey) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.create_api_key") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.create_api_key") + if reader := instrument.RecordRequestBody(ctx, "security.create_api_key", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.create_api_key") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CreateApiKey query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a createapikey.Response +func (r CreateApiKey) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.create_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the CreateApiKey headers map. +func (r *CreateApiKey) Header(key, value string) *CreateApiKey { + r.headers.Set(key, value) + + return r +} + +// Refresh If `true` (the default) then refresh the affected shards to make this +// operation visible to search, if `wait_for` then wait for a refresh to make +// this operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *CreateApiKey) Refresh(refresh refresh.Refresh) *CreateApiKey { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CreateApiKey) ErrorTrace(errortrace bool) *CreateApiKey { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CreateApiKey) FilterPath(filterpaths ...string) *CreateApiKey { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CreateApiKey) Human(human bool) *CreateApiKey { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CreateApiKey) Pretty(pretty bool) *CreateApiKey { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The expiration time for the API key. +// By default, API keys never expire. +// API name: expiration +func (r *CreateApiKey) Expiration(duration types.DurationVariant) *CreateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() + + return r +} + +// Arbitrary metadata that you want to associate with the API key. It supports +// nested data structure. Within the metadata object, keys beginning with `_` +// are reserved for system usage. +// API name: metadata +func (r *CreateApiKey) Metadata(metadata types.MetadataVariant) *CreateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// A name for the API key. +// API name: name +func (r *CreateApiKey) Name(name string) *CreateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Name = &name + + return r +} + +// An array of role descriptors for this API key. +// When it is not specified or it is an empty array, the API key will have a +// point in time snapshot of permissions of the authenticated user. +// If you supply role descriptors, the resultant permissions are an intersection +// of API keys permissions and the authenticated user's permissions thereby +// limiting the access scope for API keys. +// The structure of role descriptor is the same as the request for the create +// role API. +// For more details, refer to the create or update roles API. +// +// NOTE: Due to the way in which this permission intersection is calculated, it +// is not possible to create an API key that is a child of another API key, +// unless the derived key is created without any privileges. +// In this case, you must explicitly specify a role descriptor with no +// privileges. +// The derived API key can be used for authentication; it will not have +// authority to call Elasticsearch APIs. +// API name: role_descriptors +func (r *CreateApiKey) RoleDescriptors(roledescriptors map[string]types.RoleDescriptor) *CreateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RoleDescriptors = roledescriptors + return r +} + +func (r *CreateApiKey) AddRoleDescriptor(key string, value types.RoleDescriptorVariant) *CreateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.RoleDescriptor + if r.req.RoleDescriptors == nil { + r.req.RoleDescriptors = make(map[string]types.RoleDescriptor) + } else { + tmp = r.req.RoleDescriptors + } + + tmp[key] = *value.RoleDescriptorCaster() + + r.req.RoleDescriptors = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createapikey/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createapikey/request.go new file mode 100644 index 000000000..c60cbb7db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createapikey/request.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package createapikey + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package createapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/create_api_key/SecurityCreateApiKeyRequest.ts#L26-L86 +type Request struct { + + // Expiration The expiration time for the API key. + // By default, API keys never expire. + Expiration types.Duration `json:"expiration,omitempty"` + // Metadata Arbitrary metadata that you want to associate with the API key. It supports + // nested data structure. Within the metadata object, keys beginning with `_` + // are reserved for system usage. + Metadata types.Metadata `json:"metadata,omitempty"` + // Name A name for the API key. + Name *string `json:"name,omitempty"` + // RoleDescriptors An array of role descriptors for this API key. + // When it is not specified or it is an empty array, the API key will have a + // point in time snapshot of permissions of the authenticated user. + // If you supply role descriptors, the resultant permissions are an intersection + // of API keys permissions and the authenticated user's permissions thereby + // limiting the access scope for API keys. + // The structure of role descriptor is the same as the request for the create + // role API. + // For more details, refer to the create or update roles API. + // + // NOTE: Due to the way in which this permission intersection is calculated, it + // is not possible to create an API key that is a child of another API key, + // unless the derived key is created without any privileges. + // In this case, you must explicitly specify a role descriptor with no + // privileges. + // The derived API key can be used for authentication; it will not have + // authority to call Elasticsearch APIs. + RoleDescriptors map[string]types.RoleDescriptor `json:"role_descriptors,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + RoleDescriptors: make(map[string]types.RoleDescriptor, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Createapikey request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expiration": + if err := dec.Decode(&s.Expiration); err != nil { + return fmt.Errorf("%s | %w", "Expiration", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "role_descriptors": + if s.RoleDescriptors == nil { + s.RoleDescriptors = make(map[string]types.RoleDescriptor, 0) + } + if err := dec.Decode(&s.RoleDescriptors); err != nil { + return fmt.Errorf("%s | %w", "RoleDescriptors", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createapikey/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createapikey/response.go new file mode 100644 index 000000000..7122deb9e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createapikey/response.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package createapikey + +// Response holds the response body struct for the package createapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/create_api_key/SecurityCreateApiKeyResponse.ts#L23-L50 +type Response struct { + + // ApiKey Generated API key. + ApiKey string `json:"api_key"` + // Encoded API key credentials which is the base64-encoding of + // the UTF-8 representation of `id` and `api_key` joined + // by a colon (`:`). + Encoded string `json:"encoded"` + // Expiration Expiration in milliseconds for the API key. + Expiration *int64 `json:"expiration,omitempty"` + // Id Unique ID for this API key. + Id string `json:"id"` + // Name Specifies the name for this API key. + Name string `json:"name"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createcrossclusterapikey/create_cross_cluster_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createcrossclusterapikey/create_cross_cluster_api_key.go new file mode 100644 index 000000000..033063f58 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createcrossclusterapikey/create_cross_cluster_api_key.go @@ -0,0 +1,457 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a cross-cluster API key. +// +// Create an API key of the `cross_cluster` type for the API key based remote +// cluster access. +// A `cross_cluster` API key cannot be used to authenticate through the REST +// interface. +// +// IMPORTANT: To authenticate this request you must use a credential that is not +// an API key. Even if you use an API key that has the required privilege, the +// API returns an error. +// +// Cross-cluster API keys are created by the Elasticsearch API key service, +// which is automatically enabled. +// +// NOTE: Unlike REST API keys, a cross-cluster API key does not capture +// permissions of the authenticated user. The API key’s effective permission is +// exactly as specified with the `access` property. +// +// A successful request returns a JSON structure that contains the API key, its +// unique ID, and its name. If applicable, it also returns expiration +// information for the API key in milliseconds. +// +// By default, API keys never expire. You can specify expiration information +// when you create the API keys. +// +// Cross-cluster API keys can only be updated with the update cross-cluster API +// key API. +// Attempting to update them with the update REST API key API or the bulk update +// REST API keys API will result in an error. +package createcrossclusterapikey + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CreateCrossClusterApiKey struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCreateCrossClusterApiKey type alias for index. +type NewCreateCrossClusterApiKey func() *CreateCrossClusterApiKey + +// NewCreateCrossClusterApiKeyFunc returns a new instance of CreateCrossClusterApiKey with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCreateCrossClusterApiKeyFunc(tp elastictransport.Interface) NewCreateCrossClusterApiKey { + return func() *CreateCrossClusterApiKey { + n := New(tp) + + return n + } +} + +// Create a cross-cluster API key. +// +// Create an API key of the `cross_cluster` type for the API key based remote +// cluster access. +// A `cross_cluster` API key cannot be used to authenticate through the REST +// interface. +// +// IMPORTANT: To authenticate this request you must use a credential that is not +// an API key. Even if you use an API key that has the required privilege, the +// API returns an error. +// +// Cross-cluster API keys are created by the Elasticsearch API key service, +// which is automatically enabled. +// +// NOTE: Unlike REST API keys, a cross-cluster API key does not capture +// permissions of the authenticated user. The API key’s effective permission is +// exactly as specified with the `access` property. +// +// A successful request returns a JSON structure that contains the API key, its +// unique ID, and its name. If applicable, it also returns expiration +// information for the API key in milliseconds. +// +// By default, API keys never expire. You can specify expiration information +// when you create the API keys. +// +// Cross-cluster API keys can only be updated with the update cross-cluster API +// key API. +// Attempting to update them with the update REST API key API or the bulk update +// REST API keys API will result in an error. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key +func New(tp elastictransport.Interface) *CreateCrossClusterApiKey { + r := &CreateCrossClusterApiKey{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *CreateCrossClusterApiKey) Raw(raw io.Reader) *CreateCrossClusterApiKey { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *CreateCrossClusterApiKey) Request(req *Request) *CreateCrossClusterApiKey { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CreateCrossClusterApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for CreateCrossClusterApiKey: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("cross_cluster") + path.WriteString("/") + path.WriteString("api_key") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CreateCrossClusterApiKey) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.create_cross_cluster_api_key") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.create_cross_cluster_api_key") + if reader := instrument.RecordRequestBody(ctx, "security.create_cross_cluster_api_key", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.create_cross_cluster_api_key") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CreateCrossClusterApiKey query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a createcrossclusterapikey.Response +func (r CreateCrossClusterApiKey) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.create_cross_cluster_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the CreateCrossClusterApiKey headers map. +func (r *CreateCrossClusterApiKey) Header(key, value string) *CreateCrossClusterApiKey { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CreateCrossClusterApiKey) ErrorTrace(errortrace bool) *CreateCrossClusterApiKey { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CreateCrossClusterApiKey) FilterPath(filterpaths ...string) *CreateCrossClusterApiKey { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CreateCrossClusterApiKey) Human(human bool) *CreateCrossClusterApiKey { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CreateCrossClusterApiKey) Pretty(pretty bool) *CreateCrossClusterApiKey { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The access to be granted to this API key. +// The access is composed of permissions for cross-cluster search and +// cross-cluster replication. +// At least one of them must be specified. +// +// NOTE: No explicit privileges should be specified for either search or +// replication access. +// The creation process automatically converts the access specification to a +// role descriptor which has relevant privileges assigned accordingly. +// API name: access +func (r *CreateCrossClusterApiKey) Access(access types.AccessVariant) *CreateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Access = *access.AccessCaster() + + return r +} + +// Expiration time for the API key. +// By default, API keys never expire. +// API name: expiration +func (r *CreateCrossClusterApiKey) Expiration(duration types.DurationVariant) *CreateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() + + return r +} + +// Arbitrary metadata that you want to associate with the API key. +// It supports nested data structure. +// Within the metadata object, keys beginning with `_` are reserved for system +// usage. +// API name: metadata +func (r *CreateCrossClusterApiKey) Metadata(metadata types.MetadataVariant) *CreateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// Specifies the name for this API key. +// API name: name +func (r *CreateCrossClusterApiKey) Name(name string) *CreateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Name = name + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createcrossclusterapikey/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createcrossclusterapikey/request.go new file mode 100644 index 000000000..fb46d668b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createcrossclusterapikey/request.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package createcrossclusterapikey + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package createcrossclusterapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/create_cross_cluster_api_key/CreateCrossClusterApiKeyRequest.ts#L25-L80 +type Request struct { + + // Access The access to be granted to this API key. + // The access is composed of permissions for cross-cluster search and + // cross-cluster replication. + // At least one of them must be specified. + // + // NOTE: No explicit privileges should be specified for either search or + // replication access. + // The creation process automatically converts the access specification to a + // role descriptor which has relevant privileges assigned accordingly. + Access types.Access `json:"access"` + // Expiration Expiration time for the API key. + // By default, API keys never expire. + Expiration types.Duration `json:"expiration,omitempty"` + // Metadata Arbitrary metadata that you want to associate with the API key. + // It supports nested data structure. + // Within the metadata object, keys beginning with `_` are reserved for system + // usage. + Metadata types.Metadata `json:"metadata,omitempty"` + // Name Specifies the name for this API key. + Name string `json:"name"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Createcrossclusterapikey request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "access": + if err := dec.Decode(&s.Access); err != nil { + return fmt.Errorf("%s | %w", "Access", err) + } + + case "expiration": + if err := dec.Decode(&s.Expiration); err != nil { + return fmt.Errorf("%s | %w", "Expiration", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createcrossclusterapikey/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createcrossclusterapikey/response.go new file mode 100644 index 000000000..f374c0b8e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createcrossclusterapikey/response.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package createcrossclusterapikey + +// Response holds the response body struct for the package createcrossclusterapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/create_cross_cluster_api_key/CreateCrossClusterApiKeyResponse.ts#L23-L48 +type Response struct { + + // ApiKey Generated API key. + ApiKey string `json:"api_key"` + // Encoded API key credentials which is the base64-encoding of + // the UTF-8 representation of `id` and `api_key` joined + // by a colon (`:`). + Encoded string `json:"encoded"` + // Expiration Expiration in milliseconds for the API key. + Expiration *int64 `json:"expiration,omitempty"` + // Id Unique ID for this API key. + Id string `json:"id"` + // Name Specifies the name for this API key. + Name string `json:"name"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createservicetoken/create_service_token.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createservicetoken/create_service_token.go new file mode 100644 index 000000000..a032eccaf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createservicetoken/create_service_token.go @@ -0,0 +1,443 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a service account token. +// +// Create a service accounts token for access without requiring basic +// authentication. +// +// NOTE: Service account tokens never expire. +// You must actively delete them if they are no longer needed. +package createservicetoken + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + namespaceMask = iota + 1 + + serviceMask + + nameMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CreateServiceToken struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + namespace string + service string + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCreateServiceToken type alias for index. +type NewCreateServiceToken func(namespace, service string) *CreateServiceToken + +// NewCreateServiceTokenFunc returns a new instance of CreateServiceToken with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCreateServiceTokenFunc(tp elastictransport.Interface) NewCreateServiceToken { + return func(namespace, service string) *CreateServiceToken { + n := New(tp) + + n._namespace(namespace) + + n._service(service) + + return n + } +} + +// Create a service account token. +// +// Create a service accounts token for access without requiring basic +// authentication. +// +// NOTE: Service account tokens never expire. +// You must actively delete them if they are no longer needed. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token +func New(tp elastictransport.Interface) *CreateServiceToken { + r := &CreateServiceToken{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CreateServiceToken) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == namespaceMask|serviceMask|nameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("service") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "namespace", r.namespace) + } + path.WriteString(r.namespace) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "service", r.service) + } + path.WriteString(r.service) + path.WriteString("/") + path.WriteString("credential") + path.WriteString("/") + path.WriteString("token") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPut + case r.paramSet == namespaceMask|serviceMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("service") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "namespace", r.namespace) + } + path.WriteString(r.namespace) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "service", r.service) + } + path.WriteString(r.service) + path.WriteString("/") + path.WriteString("credential") + path.WriteString("/") + path.WriteString("token") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CreateServiceToken) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.create_service_token") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.create_service_token") + if reader := instrument.RecordRequestBody(ctx, "security.create_service_token", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.create_service_token") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CreateServiceToken query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a createservicetoken.Response +func (r CreateServiceToken) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.create_service_token") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r CreateServiceToken) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.create_service_token") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the CreateServiceToken query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the CreateServiceToken headers map. +func (r *CreateServiceToken) Header(key, value string) *CreateServiceToken { + r.headers.Set(key, value) + + return r +} + +// Namespace The name of the namespace, which is a top-level grouping of service accounts. +// API Name: namespace +func (r *CreateServiceToken) _namespace(namespace string) *CreateServiceToken { + r.paramSet |= namespaceMask + r.namespace = namespace + + return r +} + +// Service The name of the service. +// API Name: service +func (r *CreateServiceToken) _service(service string) *CreateServiceToken { + r.paramSet |= serviceMask + r.service = service + + return r +} + +// Name The name for the service account token. +// If omitted, a random name will be generated. +// +// Token names must be at least one and no more than 256 characters. +// They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and +// underscores (`_`), but cannot begin with an underscore. +// +// NOTE: Token names must be unique in the context of the associated service +// account. +// They must also be globally unique with their fully qualified names, which are +// comprised of the service account principal and token name, such as +// `//`. +// API Name: name +func (r *CreateServiceToken) Name(name string) *CreateServiceToken { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Refresh If `true` then refresh the affected shards to make this operation visible to +// search, if `wait_for` (the default) then wait for a refresh to make this +// operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *CreateServiceToken) Refresh(refresh refresh.Refresh) *CreateServiceToken { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CreateServiceToken) ErrorTrace(errortrace bool) *CreateServiceToken { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CreateServiceToken) FilterPath(filterpaths ...string) *CreateServiceToken { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CreateServiceToken) Human(human bool) *CreateServiceToken { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CreateServiceToken) Pretty(pretty bool) *CreateServiceToken { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createservicetoken/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createservicetoken/response.go new file mode 100644 index 000000000..ede966979 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/createservicetoken/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package createservicetoken + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package createservicetoken +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/create_service_token/CreateServiceTokenResponse.ts#L22-L30 +type Response struct { + Created bool `json:"created"` + Token types.ServiceToken `json:"token"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/delegatepki/delegate_pki.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/delegatepki/delegate_pki.go new file mode 100644 index 000000000..d1cea4731 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/delegatepki/delegate_pki.go @@ -0,0 +1,391 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delegate PKI authentication. +// +// This API implements the exchange of an X509Certificate chain for an +// Elasticsearch access token. +// The certificate chain is validated, according to RFC 5280, by sequentially +// considering the trust configuration of every installed PKI realm that has +// `delegation.enabled` set to `true`. +// A successfully trusted client certificate is also subject to the validation +// of the subject distinguished name according to thw `username_pattern` of the +// respective realm. +// +// This API is called by smart and trusted proxies, such as Kibana, which +// terminate the user's TLS session but still want to authenticate the user by +// using a PKI realm—-​as if the user connected directly to Elasticsearch. +// +// IMPORTANT: The association between the subject public key in the target +// certificate and the corresponding private key is not validated. +// This is part of the TLS authentication process and it is delegated to the +// proxy that calls this API. +// The proxy is trusted to have performed the TLS authentication and this API +// translates that authentication into an Elasticsearch access token. +package delegatepki + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DelegatePki struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDelegatePki type alias for index. +type NewDelegatePki func() *DelegatePki + +// NewDelegatePkiFunc returns a new instance of DelegatePki with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDelegatePkiFunc(tp elastictransport.Interface) NewDelegatePki { + return func() *DelegatePki { + n := New(tp) + + return n + } +} + +// Delegate PKI authentication. +// +// This API implements the exchange of an X509Certificate chain for an +// Elasticsearch access token. +// The certificate chain is validated, according to RFC 5280, by sequentially +// considering the trust configuration of every installed PKI realm that has +// `delegation.enabled` set to `true`. +// A successfully trusted client certificate is also subject to the validation +// of the subject distinguished name according to thw `username_pattern` of the +// respective realm. +// +// This API is called by smart and trusted proxies, such as Kibana, which +// terminate the user's TLS session but still want to authenticate the user by +// using a PKI realm—-​as if the user connected directly to Elasticsearch. +// +// IMPORTANT: The association between the subject public key in the target +// certificate and the corresponding private key is not validated. +// This is part of the TLS authentication process and it is delegated to the +// proxy that calls this API. +// The proxy is trusted to have performed the TLS authentication and this API +// translates that authentication into an Elasticsearch access token. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki +func New(tp elastictransport.Interface) *DelegatePki { + r := &DelegatePki{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *DelegatePki) Raw(raw io.Reader) *DelegatePki { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *DelegatePki) Request(req *Request) *DelegatePki { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DelegatePki) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for DelegatePki: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("delegate_pki") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DelegatePki) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.delegate_pki") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.delegate_pki") + if reader := instrument.RecordRequestBody(ctx, "security.delegate_pki", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delegate_pki") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DelegatePki query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a delegatepki.Response +func (r DelegatePki) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delegate_pki") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the DelegatePki headers map. +func (r *DelegatePki) Header(key, value string) *DelegatePki { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DelegatePki) ErrorTrace(errortrace bool) *DelegatePki { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DelegatePki) FilterPath(filterpaths ...string) *DelegatePki { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DelegatePki) Human(human bool) *DelegatePki { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DelegatePki) Pretty(pretty bool) *DelegatePki { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The X509Certificate chain, which is represented as an ordered string array. +// Each string in the array is a base64-encoded (Section 4 of RFC4648 - not +// base64url-encoded) of the certificate's DER encoding. +// +// The first element is the target certificate that contains the subject +// distinguished name that is requesting access. +// This may be followed by additional certificates; each subsequent certificate +// is used to certify the previous one. +// API name: x509_certificate_chain +func (r *DelegatePki) X509CertificateChain(x509certificatechains ...string) *DelegatePki { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range x509certificatechains { + + r.req.X509CertificateChain = append(r.req.X509CertificateChain, v) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/delegatepki/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/delegatepki/request.go new file mode 100644 index 000000000..98c508850 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/delegatepki/request.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package delegatepki + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package delegatepki +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/delegate_pki/SecurityDelegatePkiRequest.ts#L22-L57 +type Request struct { + + // X509CertificateChain The X509Certificate chain, which is represented as an ordered string array. + // Each string in the array is a base64-encoded (Section 4 of RFC4648 - not + // base64url-encoded) of the certificate's DER encoding. + // + // The first element is the target certificate that contains the subject + // distinguished name that is requesting access. + // This may be followed by additional certificates; each subsequent certificate + // is used to certify the previous one. + X509CertificateChain []string `json:"x509_certificate_chain"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Delegatepki request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/delegatepki/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/delegatepki/response.go new file mode 100644 index 000000000..ca22fd48f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/delegatepki/response.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package delegatepki + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package delegatepki +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/delegate_pki/SecurityDelegatePkiResponse.ts#L24-L41 +type Response struct { + + // AccessToken An access token associated with the subject distinguished name of the + // client's certificate. + AccessToken string `json:"access_token"` + Authentication *types.Authentication `json:"authentication,omitempty"` + // ExpiresIn The amount of time (in seconds) before the token expires. + ExpiresIn int64 `json:"expires_in"` + // Type The type of token. + Type string `json:"type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteprivileges/delete_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteprivileges/delete_privileges.go new file mode 100644 index 000000000..b30916306 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteprivileges/delete_privileges.go @@ -0,0 +1,390 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_security` cluster privilege (or a greater privilege such as +// `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +package deleteprivileges + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + applicationMask = iota + 1 + + nameMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeletePrivileges struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + application string + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeletePrivileges type alias for index. +type NewDeletePrivileges func(application, name string) *DeletePrivileges + +// NewDeletePrivilegesFunc returns a new instance of DeletePrivileges with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeletePrivilegesFunc(tp elastictransport.Interface) NewDeletePrivileges { + return func(application, name string) *DeletePrivileges { + n := New(tp) + + n._application(application) + + n._name(name) + + return n + } +} + +// Delete application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_security` cluster privilege (or a greater privilege such as +// `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges +func New(tp elastictransport.Interface) *DeletePrivileges { + r := &DeletePrivileges{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeletePrivileges) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == applicationMask|nameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("privilege") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "application", r.application) + } + path.WriteString(r.application) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeletePrivileges) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.delete_privileges") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.delete_privileges") + if reader := instrument.RecordRequestBody(ctx, "security.delete_privileges", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delete_privileges") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeletePrivileges query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleteprivileges.Response +func (r DeletePrivileges) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeletePrivileges) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeletePrivileges query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeletePrivileges headers map. +func (r *DeletePrivileges) Header(key, value string) *DeletePrivileges { + r.headers.Set(key, value) + + return r +} + +// Application The name of the application. +// Application privileges are always associated with exactly one application. +// API Name: application +func (r *DeletePrivileges) _application(application string) *DeletePrivileges { + r.paramSet |= applicationMask + r.application = application + + return r +} + +// Name The name of the privilege. +// API Name: name +func (r *DeletePrivileges) _name(name string) *DeletePrivileges { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Refresh If `true` (the default) then refresh the affected shards to make this +// operation visible to search, if `wait_for` then wait for a refresh to make +// this operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *DeletePrivileges) Refresh(refresh refresh.Refresh) *DeletePrivileges { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeletePrivileges) ErrorTrace(errortrace bool) *DeletePrivileges { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeletePrivileges) FilterPath(filterpaths ...string) *DeletePrivileges { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeletePrivileges) Human(human bool) *DeletePrivileges { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeletePrivileges) Pretty(pretty bool) *DeletePrivileges { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteprivileges/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteprivileges/response.go new file mode 100644 index 000000000..143607391 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteprivileges/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleteprivileges + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package deleteprivileges +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/delete_privileges/SecurityDeletePrivilegesResponse.ts#L23-L26 + +type Response map[string]map[string]types.FoundStatus + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterole/delete_role.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterole/delete_role.go new file mode 100644 index 000000000..e254bf8e3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterole/delete_role.go @@ -0,0 +1,365 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete roles. +// +// Delete roles in the native realm. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The delete roles API cannot remove roles that are defined in roles files. +package deleterole + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteRole struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteRole type alias for index. +type NewDeleteRole func(name string) *DeleteRole + +// NewDeleteRoleFunc returns a new instance of DeleteRole with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteRoleFunc(tp elastictransport.Interface) NewDeleteRole { + return func(name string) *DeleteRole { + n := New(tp) + + n._name(name) + + return n + } +} + +// Delete roles. +// +// Delete roles in the native realm. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The delete roles API cannot remove roles that are defined in roles files. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role +func New(tp elastictransport.Interface) *DeleteRole { + r := &DeleteRole{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteRole) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteRole) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.delete_role") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.delete_role") + if reader := instrument.RecordRequestBody(ctx, "security.delete_role", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delete_role") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteRole query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleterole.Response +func (r DeleteRole) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_role") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteRole) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_role") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteRole query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteRole headers map. +func (r *DeleteRole) Header(key, value string) *DeleteRole { + r.headers.Set(key, value) + + return r +} + +// Name The name of the role. +// API Name: name +func (r *DeleteRole) _name(name string) *DeleteRole { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Refresh If `true` (the default) then refresh the affected shards to make this +// operation visible to search, if `wait_for` then wait for a refresh to make +// this operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *DeleteRole) Refresh(refresh refresh.Refresh) *DeleteRole { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteRole) ErrorTrace(errortrace bool) *DeleteRole { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteRole) FilterPath(filterpaths ...string) *DeleteRole { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteRole) Human(human bool) *DeleteRole { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteRole) Pretty(pretty bool) *DeleteRole { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterole/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterole/response.go new file mode 100644 index 000000000..f765c8ef5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterole/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleterole + +// Response holds the response body struct for the package deleterole +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/delete_role/SecurityDeleteRoleResponse.ts#L20-L28 +type Response struct { + + // Found If the role is successfully deleted, `found` is `true`. + // Otherwise, `found` is `false`. + Found bool `json:"found"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterolemapping/delete_role_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterolemapping/delete_role_mapping.go new file mode 100644 index 000000000..c18cc5583 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterolemapping/delete_role_mapping.go @@ -0,0 +1,369 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete role mappings. +// +// Role mappings define which roles are assigned to each user. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. +// The delete role mappings API cannot remove role mappings that are defined in +// role mapping files. +package deleterolemapping + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteRoleMapping struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteRoleMapping type alias for index. +type NewDeleteRoleMapping func(name string) *DeleteRoleMapping + +// NewDeleteRoleMappingFunc returns a new instance of DeleteRoleMapping with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteRoleMappingFunc(tp elastictransport.Interface) NewDeleteRoleMapping { + return func(name string) *DeleteRoleMapping { + n := New(tp) + + n._name(name) + + return n + } +} + +// Delete role mappings. +// +// Role mappings define which roles are assigned to each user. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. +// The delete role mappings API cannot remove role mappings that are defined in +// role mapping files. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping +func New(tp elastictransport.Interface) *DeleteRoleMapping { + r := &DeleteRoleMapping{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteRoleMapping) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role_mapping") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteRoleMapping) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.delete_role_mapping") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.delete_role_mapping") + if reader := instrument.RecordRequestBody(ctx, "security.delete_role_mapping", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delete_role_mapping") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteRoleMapping query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleterolemapping.Response +func (r DeleteRoleMapping) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_role_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteRoleMapping) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_role_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteRoleMapping query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteRoleMapping headers map. +func (r *DeleteRoleMapping) Header(key, value string) *DeleteRoleMapping { + r.headers.Set(key, value) + + return r +} + +// Name The distinct name that identifies the role mapping. +// The name is used solely as an identifier to facilitate interaction via the +// API; it does not affect the behavior of the mapping in any way. +// API Name: name +func (r *DeleteRoleMapping) _name(name string) *DeleteRoleMapping { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Refresh If `true` (the default) then refresh the affected shards to make this +// operation visible to search, if `wait_for` then wait for a refresh to make +// this operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *DeleteRoleMapping) Refresh(refresh refresh.Refresh) *DeleteRoleMapping { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteRoleMapping) ErrorTrace(errortrace bool) *DeleteRoleMapping { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteRoleMapping) FilterPath(filterpaths ...string) *DeleteRoleMapping { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteRoleMapping) Human(human bool) *DeleteRoleMapping { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteRoleMapping) Pretty(pretty bool) *DeleteRoleMapping { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterolemapping/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterolemapping/response.go new file mode 100644 index 000000000..2145bfc3f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterolemapping/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleterolemapping + +// Response holds the response body struct for the package deleterolemapping +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/delete_role_mapping/SecurityDeleteRoleMappingResponse.ts#L20-L28 +type Response struct { + + // Found If the mapping is successfully deleted, `found` is `true`. + // Otherwise, `found` is `false`. + Found bool `json:"found"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteservicetoken/delete_service_token.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteservicetoken/delete_service_token.go new file mode 100644 index 000000000..5fe0d7980 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteservicetoken/delete_service_token.go @@ -0,0 +1,403 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete service account tokens. +// +// Delete service account tokens for a service in a specified namespace. +package deleteservicetoken + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + namespaceMask = iota + 1 + + serviceMask + + nameMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteServiceToken struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + namespace string + service string + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteServiceToken type alias for index. +type NewDeleteServiceToken func(namespace, service, name string) *DeleteServiceToken + +// NewDeleteServiceTokenFunc returns a new instance of DeleteServiceToken with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteServiceTokenFunc(tp elastictransport.Interface) NewDeleteServiceToken { + return func(namespace, service, name string) *DeleteServiceToken { + n := New(tp) + + n._namespace(namespace) + + n._service(service) + + n._name(name) + + return n + } +} + +// Delete service account tokens. +// +// Delete service account tokens for a service in a specified namespace. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token +func New(tp elastictransport.Interface) *DeleteServiceToken { + r := &DeleteServiceToken{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteServiceToken) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == namespaceMask|serviceMask|nameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("service") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "namespace", r.namespace) + } + path.WriteString(r.namespace) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "service", r.service) + } + path.WriteString(r.service) + path.WriteString("/") + path.WriteString("credential") + path.WriteString("/") + path.WriteString("token") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteServiceToken) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.delete_service_token") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.delete_service_token") + if reader := instrument.RecordRequestBody(ctx, "security.delete_service_token", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delete_service_token") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteServiceToken query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleteservicetoken.Response +func (r DeleteServiceToken) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_service_token") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteServiceToken) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_service_token") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteServiceToken query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteServiceToken headers map. +func (r *DeleteServiceToken) Header(key, value string) *DeleteServiceToken { + r.headers.Set(key, value) + + return r +} + +// Namespace The namespace, which is a top-level grouping of service accounts. +// API Name: namespace +func (r *DeleteServiceToken) _namespace(namespace string) *DeleteServiceToken { + r.paramSet |= namespaceMask + r.namespace = namespace + + return r +} + +// Service The service name. +// API Name: service +func (r *DeleteServiceToken) _service(service string) *DeleteServiceToken { + r.paramSet |= serviceMask + r.service = service + + return r +} + +// Name The name of the service account token. +// API Name: name +func (r *DeleteServiceToken) _name(name string) *DeleteServiceToken { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Refresh If `true` then refresh the affected shards to make this operation visible to +// search, if `wait_for` (the default) then wait for a refresh to make this +// operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *DeleteServiceToken) Refresh(refresh refresh.Refresh) *DeleteServiceToken { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteServiceToken) ErrorTrace(errortrace bool) *DeleteServiceToken { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteServiceToken) FilterPath(filterpaths ...string) *DeleteServiceToken { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteServiceToken) Human(human bool) *DeleteServiceToken { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteServiceToken) Pretty(pretty bool) *DeleteServiceToken { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteservicetoken/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteservicetoken/response.go new file mode 100644 index 000000000..a2eefbae3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteservicetoken/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleteservicetoken + +// Response holds the response body struct for the package deleteservicetoken +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/delete_service_token/DeleteServiceTokenResponse.ts#L20-L28 +type Response struct { + + // Found If the service account token is successfully deleted, the request returns + // `{"found": true}`. + // Otherwise, the response will have status code 404 and `found` is set to + // `false`. + Found bool `json:"found"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteuser/delete_user.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteuser/delete_user.go new file mode 100644 index 000000000..95ea59434 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteuser/delete_user.go @@ -0,0 +1,359 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete users. +// +// Delete users from the native realm. +package deleteuser + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + usernameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteUser struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + username string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteUser type alias for index. +type NewDeleteUser func(username string) *DeleteUser + +// NewDeleteUserFunc returns a new instance of DeleteUser with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteUserFunc(tp elastictransport.Interface) NewDeleteUser { + return func(username string) *DeleteUser { + n := New(tp) + + n._username(username) + + return n + } +} + +// Delete users. +// +// Delete users from the native realm. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user +func New(tp elastictransport.Interface) *DeleteUser { + r := &DeleteUser{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteUser) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == usernameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "username", r.username) + } + path.WriteString(r.username) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteUser) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.delete_user") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.delete_user") + if reader := instrument.RecordRequestBody(ctx, "security.delete_user", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.delete_user") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteUser query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleteuser.Response +func (r DeleteUser) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteUser) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.delete_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteUser query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteUser headers map. +func (r *DeleteUser) Header(key, value string) *DeleteUser { + r.headers.Set(key, value) + + return r +} + +// Username An identifier for the user. +// API Name: username +func (r *DeleteUser) _username(username string) *DeleteUser { + r.paramSet |= usernameMask + r.username = username + + return r +} + +// Refresh If `true` (the default) then refresh the affected shards to make this +// operation visible to search, if `wait_for` then wait for a refresh to make +// this operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *DeleteUser) Refresh(refresh refresh.Refresh) *DeleteUser { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteUser) ErrorTrace(errortrace bool) *DeleteUser { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteUser) FilterPath(filterpaths ...string) *DeleteUser { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteUser) Human(human bool) *DeleteUser { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteUser) Pretty(pretty bool) *DeleteUser { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteuser/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteuser/response.go new file mode 100644 index 000000000..768d8f292 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteuser/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleteuser + +// Response holds the response body struct for the package deleteuser +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/delete_user/SecurityDeleteUserResponse.ts#L20-L28 +type Response struct { + + // Found If the user is successfully deleted, the request returns `{"found": true}`. + // Otherwise, `found` is set to `false`. + Found bool `json:"found"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuser/disable_user.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuser/disable_user.go new file mode 100644 index 000000000..7480849fb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuser/disable_user.go @@ -0,0 +1,365 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Disable users. +// +// Disable users in the native realm. +// By default, when you create users, they are enabled. +// You can use this API to revoke a user's access to Elasticsearch. +package disableuser + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + usernameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DisableUser struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + username string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDisableUser type alias for index. +type NewDisableUser func(username string) *DisableUser + +// NewDisableUserFunc returns a new instance of DisableUser with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDisableUserFunc(tp elastictransport.Interface) NewDisableUser { + return func(username string) *DisableUser { + n := New(tp) + + n._username(username) + + return n + } +} + +// Disable users. +// +// Disable users in the native realm. +// By default, when you create users, they are enabled. +// You can use this API to revoke a user's access to Elasticsearch. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user +func New(tp elastictransport.Interface) *DisableUser { + r := &DisableUser{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DisableUser) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == usernameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "username", r.username) + } + path.WriteString(r.username) + path.WriteString("/") + path.WriteString("_disable") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DisableUser) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.disable_user") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.disable_user") + if reader := instrument.RecordRequestBody(ctx, "security.disable_user", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.disable_user") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DisableUser query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a disableuser.Response +func (r DisableUser) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.disable_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DisableUser) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.disable_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DisableUser query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DisableUser headers map. +func (r *DisableUser) Header(key, value string) *DisableUser { + r.headers.Set(key, value) + + return r +} + +// Username An identifier for the user. +// API Name: username +func (r *DisableUser) _username(username string) *DisableUser { + r.paramSet |= usernameMask + r.username = username + + return r +} + +// Refresh If `true` (the default) then refresh the affected shards to make this +// operation visible to search, if `wait_for` then wait for a refresh to make +// this operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *DisableUser) Refresh(refresh refresh.Refresh) *DisableUser { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DisableUser) ErrorTrace(errortrace bool) *DisableUser { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DisableUser) FilterPath(filterpaths ...string) *DisableUser { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DisableUser) Human(human bool) *DisableUser { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DisableUser) Pretty(pretty bool) *DisableUser { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuser/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuser/response.go new file mode 100644 index 000000000..bce70f19c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuser/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package disableuser + +// Response holds the response body struct for the package disableuser +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/disable_user/SecurityDisableUserResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuserprofile/disable_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuserprofile/disable_user_profile.go new file mode 100644 index 000000000..02ce4864b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuserprofile/disable_user_profile.go @@ -0,0 +1,385 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Disable a user profile. +// +// Disable user profiles so that they are not visible in user profile searches. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// When you activate a user profile, its automatically enabled and visible in +// user profile searches. You can use the disable user profile API to disable a +// user profile so it’s not visible in these searches. +// To re-enable a disabled user profile, use the enable user profile API . +package disableuserprofile + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + uidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DisableUserProfile struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + uid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDisableUserProfile type alias for index. +type NewDisableUserProfile func(uid string) *DisableUserProfile + +// NewDisableUserProfileFunc returns a new instance of DisableUserProfile with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDisableUserProfileFunc(tp elastictransport.Interface) NewDisableUserProfile { + return func(uid string) *DisableUserProfile { + n := New(tp) + + n._uid(uid) + + return n + } +} + +// Disable a user profile. +// +// Disable user profiles so that they are not visible in user profile searches. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// When you activate a user profile, its automatically enabled and visible in +// user profile searches. You can use the disable user profile API to disable a +// user profile so it’s not visible in these searches. +// To re-enable a disabled user profile, use the enable user profile API . +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile +func New(tp elastictransport.Interface) *DisableUserProfile { + r := &DisableUserProfile{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DisableUserProfile) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == uidMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("profile") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "uid", r.uid) + } + path.WriteString(r.uid) + path.WriteString("/") + path.WriteString("_disable") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DisableUserProfile) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.disable_user_profile") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.disable_user_profile") + if reader := instrument.RecordRequestBody(ctx, "security.disable_user_profile", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.disable_user_profile") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DisableUserProfile query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a disableuserprofile.Response +func (r DisableUserProfile) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.disable_user_profile") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DisableUserProfile) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.disable_user_profile") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DisableUserProfile query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DisableUserProfile headers map. +func (r *DisableUserProfile) Header(key, value string) *DisableUserProfile { + r.headers.Set(key, value) + + return r +} + +// Uid Unique identifier for the user profile. +// API Name: uid +func (r *DisableUserProfile) _uid(uid string) *DisableUserProfile { + r.paramSet |= uidMask + r.uid = uid + + return r +} + +// Refresh If 'true', Elasticsearch refreshes the affected shards to make this operation +// visible to search. +// If 'wait_for', it waits for a refresh to make this operation visible to +// search. +// If 'false', it does nothing with refreshes. +// API name: refresh +func (r *DisableUserProfile) Refresh(refresh refresh.Refresh) *DisableUserProfile { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DisableUserProfile) ErrorTrace(errortrace bool) *DisableUserProfile { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DisableUserProfile) FilterPath(filterpaths ...string) *DisableUserProfile { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DisableUserProfile) Human(human bool) *DisableUserProfile { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DisableUserProfile) Pretty(pretty bool) *DisableUserProfile { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuserprofile/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuserprofile/response.go new file mode 100644 index 000000000..ba87dcc98 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuserprofile/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package disableuserprofile + +// Response holds the response body struct for the package disableuserprofile +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/disable_user_profile/Response.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuser/enable_user.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuser/enable_user.go new file mode 100644 index 000000000..8f6b5d550 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuser/enable_user.go @@ -0,0 +1,363 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Enable users. +// +// Enable users in the native realm. +// By default, when you create users, they are enabled. +package enableuser + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + usernameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type EnableUser struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + username string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewEnableUser type alias for index. +type NewEnableUser func(username string) *EnableUser + +// NewEnableUserFunc returns a new instance of EnableUser with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewEnableUserFunc(tp elastictransport.Interface) NewEnableUser { + return func(username string) *EnableUser { + n := New(tp) + + n._username(username) + + return n + } +} + +// Enable users. +// +// Enable users in the native realm. +// By default, when you create users, they are enabled. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user +func New(tp elastictransport.Interface) *EnableUser { + r := &EnableUser{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *EnableUser) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == usernameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "username", r.username) + } + path.WriteString(r.username) + path.WriteString("/") + path.WriteString("_enable") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r EnableUser) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.enable_user") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.enable_user") + if reader := instrument.RecordRequestBody(ctx, "security.enable_user", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.enable_user") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the EnableUser query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a enableuser.Response +func (r EnableUser) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.enable_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r EnableUser) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.enable_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the EnableUser query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the EnableUser headers map. +func (r *EnableUser) Header(key, value string) *EnableUser { + r.headers.Set(key, value) + + return r +} + +// Username An identifier for the user. +// API Name: username +func (r *EnableUser) _username(username string) *EnableUser { + r.paramSet |= usernameMask + r.username = username + + return r +} + +// Refresh If `true` (the default) then refresh the affected shards to make this +// operation visible to search, if `wait_for` then wait for a refresh to make +// this operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *EnableUser) Refresh(refresh refresh.Refresh) *EnableUser { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *EnableUser) ErrorTrace(errortrace bool) *EnableUser { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *EnableUser) FilterPath(filterpaths ...string) *EnableUser { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *EnableUser) Human(human bool) *EnableUser { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *EnableUser) Pretty(pretty bool) *EnableUser { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuser/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuser/response.go new file mode 100644 index 000000000..6f2e204cf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuser/response.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package enableuser + +// Response holds the response body struct for the package enableuser +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/enable_user/SecurityEnableUserResponse.ts#L20-L22 +type Response struct { +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuserprofile/enable_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuserprofile/enable_user_profile.go new file mode 100644 index 000000000..d341160e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuserprofile/enable_user_profile.go @@ -0,0 +1,385 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Enable a user profile. +// +// Enable user profiles to make them visible in user profile searches. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// When you activate a user profile, it's automatically enabled and visible in +// user profile searches. +// If you later disable the user profile, you can use the enable user profile +// API to make the profile visible in these searches again. +package enableuserprofile + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + uidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type EnableUserProfile struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + uid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewEnableUserProfile type alias for index. +type NewEnableUserProfile func(uid string) *EnableUserProfile + +// NewEnableUserProfileFunc returns a new instance of EnableUserProfile with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewEnableUserProfileFunc(tp elastictransport.Interface) NewEnableUserProfile { + return func(uid string) *EnableUserProfile { + n := New(tp) + + n._uid(uid) + + return n + } +} + +// Enable a user profile. +// +// Enable user profiles to make them visible in user profile searches. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// When you activate a user profile, it's automatically enabled and visible in +// user profile searches. +// If you later disable the user profile, you can use the enable user profile +// API to make the profile visible in these searches again. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile +func New(tp elastictransport.Interface) *EnableUserProfile { + r := &EnableUserProfile{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *EnableUserProfile) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == uidMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("profile") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "uid", r.uid) + } + path.WriteString(r.uid) + path.WriteString("/") + path.WriteString("_enable") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r EnableUserProfile) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.enable_user_profile") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.enable_user_profile") + if reader := instrument.RecordRequestBody(ctx, "security.enable_user_profile", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.enable_user_profile") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the EnableUserProfile query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a enableuserprofile.Response +func (r EnableUserProfile) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.enable_user_profile") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r EnableUserProfile) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.enable_user_profile") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the EnableUserProfile query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the EnableUserProfile headers map. +func (r *EnableUserProfile) Header(key, value string) *EnableUserProfile { + r.headers.Set(key, value) + + return r +} + +// Uid A unique identifier for the user profile. +// API Name: uid +func (r *EnableUserProfile) _uid(uid string) *EnableUserProfile { + r.paramSet |= uidMask + r.uid = uid + + return r +} + +// Refresh If 'true', Elasticsearch refreshes the affected shards to make this operation +// visible to search. +// If 'wait_for', it waits for a refresh to make this operation visible to +// search. +// If 'false', nothing is done with refreshes. +// API name: refresh +func (r *EnableUserProfile) Refresh(refresh refresh.Refresh) *EnableUserProfile { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *EnableUserProfile) ErrorTrace(errortrace bool) *EnableUserProfile { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *EnableUserProfile) FilterPath(filterpaths ...string) *EnableUserProfile { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *EnableUserProfile) Human(human bool) *EnableUserProfile { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *EnableUserProfile) Pretty(pretty bool) *EnableUserProfile { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuserprofile/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuserprofile/response.go new file mode 100644 index 000000000..18765828c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuserprofile/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package enableuserprofile + +// Response holds the response body struct for the package enableuserprofile +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/enable_user_profile/Response.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollkibana/enroll_kibana.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollkibana/enroll_kibana.go new file mode 100644 index 000000000..c39be36a2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollkibana/enroll_kibana.go @@ -0,0 +1,343 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Enroll Kibana. +// +// Enable a Kibana instance to configure itself for communication with a secured +// Elasticsearch cluster. +// +// NOTE: This API is currently intended for internal use only by Kibana. +// Kibana uses this API internally to configure itself for communications with +// an Elasticsearch cluster that already has security features enabled. +package enrollkibana + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type EnrollKibana struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewEnrollKibana type alias for index. +type NewEnrollKibana func() *EnrollKibana + +// NewEnrollKibanaFunc returns a new instance of EnrollKibana with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewEnrollKibanaFunc(tp elastictransport.Interface) NewEnrollKibana { + return func() *EnrollKibana { + n := New(tp) + + return n + } +} + +// Enroll Kibana. +// +// Enable a Kibana instance to configure itself for communication with a secured +// Elasticsearch cluster. +// +// NOTE: This API is currently intended for internal use only by Kibana. +// Kibana uses this API internally to configure itself for communications with +// an Elasticsearch cluster that already has security features enabled. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana +func New(tp elastictransport.Interface) *EnrollKibana { + r := &EnrollKibana{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *EnrollKibana) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("enroll") + path.WriteString("/") + path.WriteString("kibana") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r EnrollKibana) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.enroll_kibana") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.enroll_kibana") + if reader := instrument.RecordRequestBody(ctx, "security.enroll_kibana", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.enroll_kibana") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the EnrollKibana query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a enrollkibana.Response +func (r EnrollKibana) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.enroll_kibana") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r EnrollKibana) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.enroll_kibana") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the EnrollKibana query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the EnrollKibana headers map. +func (r *EnrollKibana) Header(key, value string) *EnrollKibana { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *EnrollKibana) ErrorTrace(errortrace bool) *EnrollKibana { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *EnrollKibana) FilterPath(filterpaths ...string) *EnrollKibana { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *EnrollKibana) Human(human bool) *EnrollKibana { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *EnrollKibana) Pretty(pretty bool) *EnrollKibana { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollkibana/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollkibana/response.go new file mode 100644 index 000000000..0200e3a89 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollkibana/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package enrollkibana + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package enrollkibana +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/enroll_kibana/Response.ts#L20-L29 +type Response struct { + + // HttpCa The CA certificate used to sign the node certificates that Elasticsearch uses + // for TLS on the HTTP layer. + // The certificate is returned as a Base64 encoded string of the ASN.1 DER + // encoding of the certificate. + HttpCa string `json:"http_ca"` + Token types.KibanaToken `json:"token"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollnode/enroll_node.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollnode/enroll_node.go new file mode 100644 index 000000000..a465d3dc2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollnode/enroll_node.go @@ -0,0 +1,349 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Enroll a node. +// +// Enroll a new node to allow it to join an existing cluster with security +// features enabled. +// +// The response contains all the necessary information for the joining node to +// bootstrap discovery and security related settings so that it can successfully +// join the cluster. +// The response contains key and certificate material that allows the caller to +// generate valid signed certificates for the HTTP layer of all nodes in the +// cluster. +package enrollnode + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type EnrollNode struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewEnrollNode type alias for index. +type NewEnrollNode func() *EnrollNode + +// NewEnrollNodeFunc returns a new instance of EnrollNode with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewEnrollNodeFunc(tp elastictransport.Interface) NewEnrollNode { + return func() *EnrollNode { + n := New(tp) + + return n + } +} + +// Enroll a node. +// +// Enroll a new node to allow it to join an existing cluster with security +// features enabled. +// +// The response contains all the necessary information for the joining node to +// bootstrap discovery and security related settings so that it can successfully +// join the cluster. +// The response contains key and certificate material that allows the caller to +// generate valid signed certificates for the HTTP layer of all nodes in the +// cluster. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node +func New(tp elastictransport.Interface) *EnrollNode { + r := &EnrollNode{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *EnrollNode) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("enroll") + path.WriteString("/") + path.WriteString("node") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r EnrollNode) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.enroll_node") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.enroll_node") + if reader := instrument.RecordRequestBody(ctx, "security.enroll_node", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.enroll_node") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the EnrollNode query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a enrollnode.Response +func (r EnrollNode) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.enroll_node") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r EnrollNode) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.enroll_node") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the EnrollNode query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the EnrollNode headers map. +func (r *EnrollNode) Header(key, value string) *EnrollNode { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *EnrollNode) ErrorTrace(errortrace bool) *EnrollNode { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *EnrollNode) FilterPath(filterpaths ...string) *EnrollNode { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *EnrollNode) Human(human bool) *EnrollNode { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *EnrollNode) Pretty(pretty bool) *EnrollNode { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollnode/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollnode/response.go new file mode 100644 index 000000000..8f13b003e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollnode/response.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package enrollnode + +// Response holds the response body struct for the package enrollnode +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/enroll_node/Response.ts#L20-L47 +type Response struct { + + // HttpCaCert The CA certificate that can be used by the new node in order to sign its + // certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER + // encoding of the certificate. + HttpCaCert string `json:"http_ca_cert"` + // HttpCaKey The CA private key that can be used by the new node in order to sign its + // certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER + // encoding of the key. + HttpCaKey string `json:"http_ca_key"` + // NodesAddresses A list of transport addresses in the form of `host:port` for the nodes that + // are already members of the cluster. + NodesAddresses []string `json:"nodes_addresses"` + // TransportCaCert The CA certificate that is used to sign the TLS certificate for the transport + // layer, as a Base64 encoded string of the ASN.1 DER encoding of the + // certificate. + TransportCaCert string `json:"transport_ca_cert"` + // TransportCert The certificate that the node can use for TLS for its transport layer, as a + // Base64 encoded string of the ASN.1 DER encoding of the certificate. + TransportCert string `json:"transport_cert"` + // TransportKey The private key that the node can use for TLS for its transport layer, as a + // Base64 encoded string of the ASN.1 DER encoding of the key. + TransportKey string `json:"transport_key"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getapikey/get_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getapikey/get_api_key.go new file mode 100644 index 000000000..30cb0e8d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getapikey/get_api_key.go @@ -0,0 +1,418 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get API key information. +// +// Retrieves information for one or more API keys. +// NOTE: If you have only the `manage_own_api_key` privilege, this API returns +// only the API keys that you own. +// If you have `read_security`, `manage_api_key` or greater privileges +// (including `manage_security`), this API returns all API keys regardless of +// ownership. +package getapikey + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetApiKey struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetApiKey type alias for index. +type NewGetApiKey func() *GetApiKey + +// NewGetApiKeyFunc returns a new instance of GetApiKey with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetApiKeyFunc(tp elastictransport.Interface) NewGetApiKey { + return func() *GetApiKey { + n := New(tp) + + return n + } +} + +// Get API key information. +// +// Retrieves information for one or more API keys. +// NOTE: If you have only the `manage_own_api_key` privilege, this API returns +// only the API keys that you own. +// If you have `read_security`, `manage_api_key` or greater privileges +// (including `manage_security`), this API returns all API keys regardless of +// ownership. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key +func New(tp elastictransport.Interface) *GetApiKey { + r := &GetApiKey{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("api_key") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetApiKey) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.get_api_key") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_api_key") + if reader := instrument.RecordRequestBody(ctx, "security.get_api_key", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_api_key") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetApiKey query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getapikey.Response +func (r GetApiKey) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetApiKey) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetApiKey query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetApiKey headers map. +func (r *GetApiKey) Header(key, value string) *GetApiKey { + r.headers.Set(key, value) + + return r +} + +// Id An API key id. +// This parameter cannot be used with any of `name`, `realm_name` or `username`. +// API name: id +func (r *GetApiKey) Id(id string) *GetApiKey { + r.values.Set("id", id) + + return r +} + +// Name An API key name. +// This parameter cannot be used with any of `id`, `realm_name` or `username`. +// It supports prefix search with wildcard. +// API name: name +func (r *GetApiKey) Name(name string) *GetApiKey { + r.values.Set("name", name) + + return r +} + +// Owner A boolean flag that can be used to query API keys owned by the currently +// authenticated user. +// The `realm_name` or `username` parameters cannot be specified when this +// parameter is set to `true` as they are assumed to be the currently +// authenticated ones. +// API name: owner +func (r *GetApiKey) Owner(owner bool) *GetApiKey { + r.values.Set("owner", strconv.FormatBool(owner)) + + return r +} + +// RealmName The name of an authentication realm. +// This parameter cannot be used with either `id` or `name` or when `owner` flag +// is set to `true`. +// API name: realm_name +func (r *GetApiKey) RealmName(name string) *GetApiKey { + r.values.Set("realm_name", name) + + return r +} + +// Username The username of a user. +// This parameter cannot be used with either `id` or `name` or when `owner` flag +// is set to `true`. +// API name: username +func (r *GetApiKey) Username(username string) *GetApiKey { + r.values.Set("username", username) + + return r +} + +// WithLimitedBy Return the snapshot of the owner user's role descriptors +// associated with the API key. An API key's actual +// permission is the intersection of its assigned role +// descriptors and the owner user's role descriptors. +// API name: with_limited_by +func (r *GetApiKey) WithLimitedBy(withlimitedby bool) *GetApiKey { + r.values.Set("with_limited_by", strconv.FormatBool(withlimitedby)) + + return r +} + +// ActiveOnly A boolean flag that can be used to query API keys that are currently active. +// An API key is considered active if it is neither invalidated, nor expired at +// query time. You can specify this together with other parameters such as +// `owner` or `name`. If `active_only` is false, the response will include both +// active and inactive (expired or invalidated) keys. +// API name: active_only +func (r *GetApiKey) ActiveOnly(activeonly bool) *GetApiKey { + r.values.Set("active_only", strconv.FormatBool(activeonly)) + + return r +} + +// WithProfileUid Determines whether to also retrieve the profile uid, for the API key owner +// principal, if it exists. +// API name: with_profile_uid +func (r *GetApiKey) WithProfileUid(withprofileuid bool) *GetApiKey { + r.values.Set("with_profile_uid", strconv.FormatBool(withprofileuid)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetApiKey) ErrorTrace(errortrace bool) *GetApiKey { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetApiKey) FilterPath(filterpaths ...string) *GetApiKey { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetApiKey) Human(human bool) *GetApiKey { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetApiKey) Pretty(pretty bool) *GetApiKey { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getapikey/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getapikey/response.go new file mode 100644 index 000000000..d36117d1a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getapikey/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getapikey + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_api_key/SecurityGetApiKeyResponse.ts#L22-L24 +type Response struct { + ApiKeys []types.ApiKey `json:"api_keys"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go new file mode 100644 index 000000000..22ab3d28f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go @@ -0,0 +1,329 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get builtin privileges. +// +// Get the list of cluster privileges and index privileges that are available in +// this version of Elasticsearch. +package getbuiltinprivileges + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetBuiltinPrivileges struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetBuiltinPrivileges type alias for index. +type NewGetBuiltinPrivileges func() *GetBuiltinPrivileges + +// NewGetBuiltinPrivilegesFunc returns a new instance of GetBuiltinPrivileges with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetBuiltinPrivilegesFunc(tp elastictransport.Interface) NewGetBuiltinPrivileges { + return func() *GetBuiltinPrivileges { + n := New(tp) + + return n + } +} + +// Get builtin privileges. +// +// Get the list of cluster privileges and index privileges that are available in +// this version of Elasticsearch. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges +func New(tp elastictransport.Interface) *GetBuiltinPrivileges { + r := &GetBuiltinPrivileges{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetBuiltinPrivileges) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("privilege") + path.WriteString("/") + path.WriteString("_builtin") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetBuiltinPrivileges) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.get_builtin_privileges") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_builtin_privileges") + if reader := instrument.RecordRequestBody(ctx, "security.get_builtin_privileges", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_builtin_privileges") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetBuiltinPrivileges query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getbuiltinprivileges.Response +func (r GetBuiltinPrivileges) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_builtin_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetBuiltinPrivileges) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_builtin_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetBuiltinPrivileges query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetBuiltinPrivileges headers map. +func (r *GetBuiltinPrivileges) Header(key, value string) *GetBuiltinPrivileges { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetBuiltinPrivileges) ErrorTrace(errortrace bool) *GetBuiltinPrivileges { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetBuiltinPrivileges) FilterPath(filterpaths ...string) *GetBuiltinPrivileges { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetBuiltinPrivileges) Human(human bool) *GetBuiltinPrivileges { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetBuiltinPrivileges) Pretty(pretty bool) *GetBuiltinPrivileges { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getbuiltinprivileges/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getbuiltinprivileges/response.go new file mode 100644 index 000000000..c7c99e9dd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getbuiltinprivileges/response.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getbuiltinprivileges + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterprivilege" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/remoteclusterprivilege" +) + +// Response holds the response body struct for the package getbuiltinprivileges +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_builtin_privileges/SecurityGetBuiltinPrivilegesResponse.ts#L26-L42 +type Response struct { + + // Cluster The list of cluster privileges that are understood by this version of + // Elasticsearch. + Cluster []clusterprivilege.ClusterPrivilege `json:"cluster"` + // Index The list of index privileges that are understood by this version of + // Elasticsearch. + Index []string `json:"index"` + // RemoteCluster The list of remote_cluster privileges that are understood by this version of + // Elasticsearch. + RemoteCluster []remoteclusterprivilege.RemoteClusterPrivilege `json:"remote_cluster"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getprivileges/get_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getprivileges/get_privileges.go new file mode 100644 index 000000000..3bf05d94f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getprivileges/get_privileges.go @@ -0,0 +1,399 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `read_security` cluster privilege (or a greater privilege such as +// `manage_security` or `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +package getprivileges + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + applicationMask = iota + 1 + + nameMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetPrivileges struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + application string + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetPrivileges type alias for index. +type NewGetPrivileges func() *GetPrivileges + +// NewGetPrivilegesFunc returns a new instance of GetPrivileges with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetPrivilegesFunc(tp elastictransport.Interface) NewGetPrivileges { + return func() *GetPrivileges { + n := New(tp) + + return n + } +} + +// Get application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `read_security` cluster privilege (or a greater privilege such as +// `manage_security` or `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges +func New(tp elastictransport.Interface) *GetPrivileges { + r := &GetPrivileges{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetPrivileges) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("privilege") + + method = http.MethodGet + case r.paramSet == applicationMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("privilege") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "application", r.application) + } + path.WriteString(r.application) + + method = http.MethodGet + case r.paramSet == applicationMask|nameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("privilege") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "application", r.application) + } + path.WriteString(r.application) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetPrivileges) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.get_privileges") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_privileges") + if reader := instrument.RecordRequestBody(ctx, "security.get_privileges", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_privileges") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetPrivileges query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getprivileges.Response +func (r GetPrivileges) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetPrivileges) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetPrivileges query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetPrivileges headers map. +func (r *GetPrivileges) Header(key, value string) *GetPrivileges { + r.headers.Set(key, value) + + return r +} + +// Application The name of the application. +// Application privileges are always associated with exactly one application. +// If you do not specify this parameter, the API returns information about all +// privileges for all applications. +// API Name: application +func (r *GetPrivileges) Application(application string) *GetPrivileges { + r.paramSet |= applicationMask + r.application = application + + return r +} + +// Name The name of the privilege. +// If you do not specify this parameter, the API returns information about all +// privileges for the requested application. +// API Name: name +func (r *GetPrivileges) Name(name string) *GetPrivileges { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetPrivileges) ErrorTrace(errortrace bool) *GetPrivileges { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetPrivileges) FilterPath(filterpaths ...string) *GetPrivileges { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetPrivileges) Human(human bool) *GetPrivileges { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetPrivileges) Pretty(pretty bool) *GetPrivileges { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getprivileges/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getprivileges/response.go new file mode 100644 index 000000000..f594284c4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getprivileges/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getprivileges + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getprivileges +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_privileges/SecurityGetPrivilegesResponse.ts#L23-L29 + +type Response map[string]map[string]types.PrivilegesActions + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getrole/get_role.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getrole/get_role.go new file mode 100644 index 000000000..5d8c7b3db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getrole/get_role.go @@ -0,0 +1,362 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get roles. +// +// Get roles in the native realm. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The get roles API cannot retrieve roles that are defined in roles files. +package getrole + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetRole struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetRole type alias for index. +type NewGetRole func() *GetRole + +// NewGetRoleFunc returns a new instance of GetRole with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetRoleFunc(tp elastictransport.Interface) NewGetRole { + return func() *GetRole { + n := New(tp) + + return n + } +} + +// Get roles. +// +// Get roles in the native realm. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The get roles API cannot retrieve roles that are defined in roles files. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role +func New(tp elastictransport.Interface) *GetRole { + r := &GetRole{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetRole) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetRole) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.get_role") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_role") + if reader := instrument.RecordRequestBody(ctx, "security.get_role", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_role") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetRole query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getrole.Response +func (r GetRole) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_role") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetRole) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_role") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetRole query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetRole headers map. +func (r *GetRole) Header(key, value string) *GetRole { + r.headers.Set(key, value) + + return r +} + +// Name The name of the role. +// You can specify multiple roles as a comma-separated list. +// If you do not specify this parameter, the API returns information about all +// roles. +// API Name: name +func (r *GetRole) Name(name string) *GetRole { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetRole) ErrorTrace(errortrace bool) *GetRole { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetRole) FilterPath(filterpaths ...string) *GetRole { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetRole) Human(human bool) *GetRole { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetRole) Pretty(pretty bool) *GetRole { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getrole/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getrole/response.go new file mode 100644 index 000000000..c232decec --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getrole/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getrole + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getrole +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_role/SecurityGetRoleResponse.ts#L23-L31 + +type Response map[string]types.Role + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getrolemapping/get_role_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getrolemapping/get_role_mapping.go new file mode 100644 index 000000000..3edb056fe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getrolemapping/get_role_mapping.go @@ -0,0 +1,365 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get role mappings. +// +// Role mappings define which roles are assigned to each user. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. +// The get role mappings API cannot retrieve role mappings that are defined in +// role mapping files. +package getrolemapping + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetRoleMapping struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetRoleMapping type alias for index. +type NewGetRoleMapping func() *GetRoleMapping + +// NewGetRoleMappingFunc returns a new instance of GetRoleMapping with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetRoleMappingFunc(tp elastictransport.Interface) NewGetRoleMapping { + return func() *GetRoleMapping { + n := New(tp) + + return n + } +} + +// Get role mappings. +// +// Role mappings define which roles are assigned to each user. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. +// The get role mappings API cannot retrieve role mappings that are defined in +// role mapping files. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping +func New(tp elastictransport.Interface) *GetRoleMapping { + r := &GetRoleMapping{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetRoleMapping) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role_mapping") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role_mapping") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetRoleMapping) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.get_role_mapping") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_role_mapping") + if reader := instrument.RecordRequestBody(ctx, "security.get_role_mapping", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_role_mapping") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetRoleMapping query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getrolemapping.Response +func (r GetRoleMapping) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_role_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetRoleMapping) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_role_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetRoleMapping query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetRoleMapping headers map. +func (r *GetRoleMapping) Header(key, value string) *GetRoleMapping { + r.headers.Set(key, value) + + return r +} + +// Name The distinct name that identifies the role mapping. The name is used solely +// as an identifier to facilitate interaction via the API; it does not affect +// the behavior of the mapping in any way. You can specify multiple mapping +// names as a comma-separated list. If you do not specify this parameter, the +// API returns information about all role mappings. +// API Name: name +func (r *GetRoleMapping) Name(name string) *GetRoleMapping { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetRoleMapping) ErrorTrace(errortrace bool) *GetRoleMapping { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetRoleMapping) FilterPath(filterpaths ...string) *GetRoleMapping { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetRoleMapping) Human(human bool) *GetRoleMapping { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetRoleMapping) Pretty(pretty bool) *GetRoleMapping { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getrolemapping/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getrolemapping/response.go new file mode 100644 index 000000000..52bcfde78 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getrolemapping/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getrolemapping + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getrolemapping +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_role_mapping/SecurityGetRoleMappingResponse.ts#L23-L29 + +type Response map[string]types.SecurityRoleMapping + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getserviceaccounts/get_service_accounts.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getserviceaccounts/get_service_accounts.go new file mode 100644 index 000000000..853b692f1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getserviceaccounts/get_service_accounts.go @@ -0,0 +1,394 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get service accounts. +// +// Get a list of service accounts that match the provided path parameters. +// +// NOTE: Currently, only the `elastic/fleet-server` service account is +// available. +package getserviceaccounts + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + namespaceMask = iota + 1 + + serviceMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetServiceAccounts struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + namespace string + service string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetServiceAccounts type alias for index. +type NewGetServiceAccounts func() *GetServiceAccounts + +// NewGetServiceAccountsFunc returns a new instance of GetServiceAccounts with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetServiceAccountsFunc(tp elastictransport.Interface) NewGetServiceAccounts { + return func() *GetServiceAccounts { + n := New(tp) + + return n + } +} + +// Get service accounts. +// +// Get a list of service accounts that match the provided path parameters. +// +// NOTE: Currently, only the `elastic/fleet-server` service account is +// available. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts +func New(tp elastictransport.Interface) *GetServiceAccounts { + r := &GetServiceAccounts{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetServiceAccounts) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == namespaceMask|serviceMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("service") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "namespace", r.namespace) + } + path.WriteString(r.namespace) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "service", r.service) + } + path.WriteString(r.service) + + method = http.MethodGet + case r.paramSet == namespaceMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("service") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "namespace", r.namespace) + } + path.WriteString(r.namespace) + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("service") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetServiceAccounts) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.get_service_accounts") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_service_accounts") + if reader := instrument.RecordRequestBody(ctx, "security.get_service_accounts", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_service_accounts") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetServiceAccounts query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getserviceaccounts.Response +func (r GetServiceAccounts) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_service_accounts") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetServiceAccounts) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_service_accounts") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetServiceAccounts query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetServiceAccounts headers map. +func (r *GetServiceAccounts) Header(key, value string) *GetServiceAccounts { + r.headers.Set(key, value) + + return r +} + +// Namespace The name of the namespace. +// Omit this parameter to retrieve information about all service accounts. +// If you omit this parameter, you must also omit the `service` parameter. +// API Name: namespace +func (r *GetServiceAccounts) Namespace(namespace string) *GetServiceAccounts { + r.paramSet |= namespaceMask + r.namespace = namespace + + return r +} + +// Service The service name. +// Omit this parameter to retrieve information about all service accounts that +// belong to the specified `namespace`. +// API Name: service +func (r *GetServiceAccounts) Service(service string) *GetServiceAccounts { + r.paramSet |= serviceMask + r.service = service + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetServiceAccounts) ErrorTrace(errortrace bool) *GetServiceAccounts { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetServiceAccounts) FilterPath(filterpaths ...string) *GetServiceAccounts { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetServiceAccounts) Human(human bool) *GetServiceAccounts { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetServiceAccounts) Pretty(pretty bool) *GetServiceAccounts { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getserviceaccounts/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getserviceaccounts/response.go new file mode 100644 index 000000000..84b70c59d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getserviceaccounts/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getserviceaccounts + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getserviceaccounts +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_service_accounts/GetServiceAccountsResponse.ts#L23-L29 + +type Response map[string]types.RoleDescriptorWrapper + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getservicecredentials/get_service_credentials.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getservicecredentials/get_service_credentials.go new file mode 100644 index 000000000..748a23583 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getservicecredentials/get_service_credentials.go @@ -0,0 +1,392 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get service account credentials. +// +// To use this API, you must have at least the `read_security` cluster privilege +// (or a greater privilege such as `manage_service_account` or +// `manage_security`). +// +// The response includes service account tokens that were created with the +// create service account tokens API as well as file-backed tokens from all +// nodes of the cluster. +// +// NOTE: For tokens backed by the `service_tokens` file, the API collects them +// from all nodes of the cluster. +// Tokens with the same name from different nodes are assumed to be the same +// token and are only counted once towards the total number of service tokens. +package getservicecredentials + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + namespaceMask = iota + 1 + + serviceMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetServiceCredentials struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + namespace string + service string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetServiceCredentials type alias for index. +type NewGetServiceCredentials func(namespace, service string) *GetServiceCredentials + +// NewGetServiceCredentialsFunc returns a new instance of GetServiceCredentials with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetServiceCredentialsFunc(tp elastictransport.Interface) NewGetServiceCredentials { + return func(namespace, service string) *GetServiceCredentials { + n := New(tp) + + n._namespace(namespace) + + n._service(service) + + return n + } +} + +// Get service account credentials. +// +// To use this API, you must have at least the `read_security` cluster privilege +// (or a greater privilege such as `manage_service_account` or +// `manage_security`). +// +// The response includes service account tokens that were created with the +// create service account tokens API as well as file-backed tokens from all +// nodes of the cluster. +// +// NOTE: For tokens backed by the `service_tokens` file, the API collects them +// from all nodes of the cluster. +// Tokens with the same name from different nodes are assumed to be the same +// token and are only counted once towards the total number of service tokens. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials +func New(tp elastictransport.Interface) *GetServiceCredentials { + r := &GetServiceCredentials{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetServiceCredentials) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == namespaceMask|serviceMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("service") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "namespace", r.namespace) + } + path.WriteString(r.namespace) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "service", r.service) + } + path.WriteString(r.service) + path.WriteString("/") + path.WriteString("credential") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetServiceCredentials) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.get_service_credentials") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_service_credentials") + if reader := instrument.RecordRequestBody(ctx, "security.get_service_credentials", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_service_credentials") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetServiceCredentials query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getservicecredentials.Response +func (r GetServiceCredentials) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_service_credentials") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetServiceCredentials) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_service_credentials") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetServiceCredentials query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetServiceCredentials headers map. +func (r *GetServiceCredentials) Header(key, value string) *GetServiceCredentials { + r.headers.Set(key, value) + + return r +} + +// Namespace The name of the namespace. +// API Name: namespace +func (r *GetServiceCredentials) _namespace(namespace string) *GetServiceCredentials { + r.paramSet |= namespaceMask + r.namespace = namespace + + return r +} + +// Service The service name. +// API Name: service +func (r *GetServiceCredentials) _service(service string) *GetServiceCredentials { + r.paramSet |= serviceMask + r.service = service + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetServiceCredentials) ErrorTrace(errortrace bool) *GetServiceCredentials { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetServiceCredentials) FilterPath(filterpaths ...string) *GetServiceCredentials { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetServiceCredentials) Human(human bool) *GetServiceCredentials { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetServiceCredentials) Pretty(pretty bool) *GetServiceCredentials { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getservicecredentials/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getservicecredentials/response.go new file mode 100644 index 000000000..084cdf099 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getservicecredentials/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getservicecredentials + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getservicecredentials +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_service_credentials/GetServiceCredentialsResponse.ts#L25-L34 +type Response struct { + Count int `json:"count"` + // NodesCredentials Service account credentials collected from all nodes of the cluster. + NodesCredentials types.NodesCredentials `json:"nodes_credentials"` + ServiceAccount string `json:"service_account"` + Tokens map[string]types.Metadata `json:"tokens"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Tokens: make(map[string]types.Metadata, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getsettings/get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getsettings/get_settings.go new file mode 100644 index 000000000..8f8ee2762 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getsettings/get_settings.go @@ -0,0 +1,355 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get security index settings. +// +// Get the user-configurable settings for the security internal index +// (`.security` and associated indices). +// Only a subset of the index settings — those that are user-configurable—will +// be shown. +// This includes: +// +// * `index.auto_expand_replicas` +// * `index.number_of_replicas` +package getsettings + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetSettings struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetSettings type alias for index. +type NewGetSettings func() *GetSettings + +// NewGetSettingsFunc returns a new instance of GetSettings with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetSettingsFunc(tp elastictransport.Interface) NewGetSettings { + return func() *GetSettings { + n := New(tp) + + return n + } +} + +// Get security index settings. +// +// Get the user-configurable settings for the security internal index +// (`.security` and associated indices). +// Only a subset of the index settings — those that are user-configurable—will +// be shown. +// This includes: +// +// * `index.auto_expand_replicas` +// * `index.number_of_replicas` +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings +func New(tp elastictransport.Interface) *GetSettings { + r := &GetSettings{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetSettings) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("settings") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetSettings) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.get_settings") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_settings") + if reader := instrument.RecordRequestBody(ctx, "security.get_settings", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_settings") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetSettings query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getsettings.Response +func (r GetSettings) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetSettings) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetSettings query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetSettings headers map. +func (r *GetSettings) Header(key, value string) *GetSettings { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetSettings) MasterTimeout(duration string) *GetSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetSettings) ErrorTrace(errortrace bool) *GetSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetSettings) FilterPath(filterpaths ...string) *GetSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetSettings) Human(human bool) *GetSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetSettings) Pretty(pretty bool) *GetSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getsettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getsettings/response.go new file mode 100644 index 000000000..fe5931be7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getsettings/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getsettings + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_settings/SecurityGetSettingsResponse.ts#L21-L36 +type Response struct { + + // Security Settings for the index used for most security configuration, including native + // realm users and roles configured with the API. + Security types.SecuritySettings `json:"security"` + // SecurityProfile Settings for the index used to store profile information. + SecurityProfile types.SecuritySettings `json:"security-profile"` + // SecurityTokens Settings for the index used to store tokens. + SecurityTokens types.SecuritySettings `json:"security-tokens"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/gettoken/get_token.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/gettoken/get_token.go new file mode 100644 index 000000000..1939c399b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/gettoken/get_token.go @@ -0,0 +1,471 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get a token. +// +// Create a bearer token for access without requiring basic authentication. +// The tokens are created by the Elasticsearch Token Service, which is +// automatically enabled when you configure TLS on the HTTP interface. +// Alternatively, you can explicitly enable the +// `xpack.security.authc.token.enabled` setting. +// When you are running in production mode, a bootstrap check prevents you from +// enabling the token service unless you also enable TLS on the HTTP interface. +// +// The get token API takes the same parameters as a typical OAuth 2.0 token API +// except for the use of a JSON request body. +// +// A successful get token API call returns a JSON structure that contains the +// access token, the amount of time (seconds) that the token expires in, the +// type, and the scope if available. +// +// The tokens returned by the get token API have a finite period of time for +// which they are valid and after that time period, they can no longer be used. +// That time period is defined by the `xpack.security.authc.token.timeout` +// setting. +// If you want to invalidate a token immediately, you can do so by using the +// invalidate token API. +package gettoken + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/accesstokengranttype" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetToken struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetToken type alias for index. +type NewGetToken func() *GetToken + +// NewGetTokenFunc returns a new instance of GetToken with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetTokenFunc(tp elastictransport.Interface) NewGetToken { + return func() *GetToken { + n := New(tp) + + return n + } +} + +// Get a token. +// +// Create a bearer token for access without requiring basic authentication. +// The tokens are created by the Elasticsearch Token Service, which is +// automatically enabled when you configure TLS on the HTTP interface. +// Alternatively, you can explicitly enable the +// `xpack.security.authc.token.enabled` setting. +// When you are running in production mode, a bootstrap check prevents you from +// enabling the token service unless you also enable TLS on the HTTP interface. +// +// The get token API takes the same parameters as a typical OAuth 2.0 token API +// except for the use of a JSON request body. +// +// A successful get token API call returns a JSON structure that contains the +// access token, the amount of time (seconds) that the token expires in, the +// type, and the scope if available. +// +// The tokens returned by the get token API have a finite period of time for +// which they are valid and after that time period, they can no longer be used. +// That time period is defined by the `xpack.security.authc.token.timeout` +// setting. +// If you want to invalidate a token immediately, you can do so by using the +// invalidate token API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token +func New(tp elastictransport.Interface) *GetToken { + r := &GetToken{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *GetToken) Raw(raw io.Reader) *GetToken { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *GetToken) Request(req *Request) *GetToken { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetToken) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for GetToken: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("oauth2") + path.WriteString("/") + path.WriteString("token") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetToken) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.get_token") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_token") + if reader := instrument.RecordRequestBody(ctx, "security.get_token", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_token") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetToken query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a gettoken.Response +func (r GetToken) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_token") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the GetToken headers map. +func (r *GetToken) Header(key, value string) *GetToken { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetToken) ErrorTrace(errortrace bool) *GetToken { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetToken) FilterPath(filterpaths ...string) *GetToken { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetToken) Human(human bool) *GetToken { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetToken) Pretty(pretty bool) *GetToken { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The type of grant. +// Supported grant types are: `password`, `_kerberos`, `client_credentials`, and +// `refresh_token`. +// API name: grant_type +func (r *GetToken) GrantType(granttype accesstokengranttype.AccessTokenGrantType) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.GrantType = &granttype + return r +} + +// The base64 encoded kerberos ticket. +// If you specify the `_kerberos` grant type, this parameter is required. +// This parameter is not valid with any other supported grant type. +// API name: kerberos_ticket +func (r *GetToken) KerberosTicket(kerberosticket string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.KerberosTicket = &kerberosticket + + return r +} + +// The user's password. +// If you specify the `password` grant type, this parameter is required. +// This parameter is not valid with any other supported grant type. +// API name: password +func (r *GetToken) Password(password string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Password = &password + + return r +} + +// The string that was returned when you created the token, which enables you to +// extend its life. +// If you specify the `refresh_token` grant type, this parameter is required. +// This parameter is not valid with any other supported grant type. +// API name: refresh_token +func (r *GetToken) RefreshToken(refreshtoken string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RefreshToken = &refreshtoken + + return r +} + +// The scope of the token. +// Currently tokens are only issued for a scope of FULL regardless of the value +// sent with the request. +// API name: scope +func (r *GetToken) Scope(scope string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Scope = &scope + + return r +} + +// The username that identifies the user. +// If you specify the `password` grant type, this parameter is required. +// This parameter is not valid with any other supported grant type. +// API name: username +func (r *GetToken) Username(username string) *GetToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Username = &username + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/gettoken/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/gettoken/request.go new file mode 100644 index 000000000..f696a77f2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/gettoken/request.go @@ -0,0 +1,153 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package gettoken + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/accesstokengranttype" +) + +// Request holds the request body struct for the package gettoken +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_token/GetUserAccessTokenRequest.ts#L25-L90 +type Request struct { + + // GrantType The type of grant. + // Supported grant types are: `password`, `_kerberos`, `client_credentials`, and + // `refresh_token`. + GrantType *accesstokengranttype.AccessTokenGrantType `json:"grant_type,omitempty"` + // KerberosTicket The base64 encoded kerberos ticket. + // If you specify the `_kerberos` grant type, this parameter is required. + // This parameter is not valid with any other supported grant type. + KerberosTicket *string `json:"kerberos_ticket,omitempty"` + // Password The user's password. + // If you specify the `password` grant type, this parameter is required. + // This parameter is not valid with any other supported grant type. + Password *string `json:"password,omitempty"` + // RefreshToken The string that was returned when you created the token, which enables you to + // extend its life. + // If you specify the `refresh_token` grant type, this parameter is required. + // This parameter is not valid with any other supported grant type. + RefreshToken *string `json:"refresh_token,omitempty"` + // Scope The scope of the token. + // Currently tokens are only issued for a scope of FULL regardless of the value + // sent with the request. + Scope *string `json:"scope,omitempty"` + // Username The username that identifies the user. + // If you specify the `password` grant type, this parameter is required. + // This parameter is not valid with any other supported grant type. + Username *string `json:"username,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Gettoken request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "grant_type": + if err := dec.Decode(&s.GrantType); err != nil { + return fmt.Errorf("%s | %w", "GrantType", err) + } + + case "kerberos_ticket": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "KerberosTicket", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KerberosTicket = &o + + case "password": + if err := dec.Decode(&s.Password); err != nil { + return fmt.Errorf("%s | %w", "Password", err) + } + + case "refresh_token": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshToken", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshToken = &o + + case "scope": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Scope", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Scope = &o + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/gettoken/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/gettoken/response.go new file mode 100644 index 000000000..3dd88e8b4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/gettoken/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package gettoken + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package gettoken +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_token/GetUserAccessTokenResponse.ts#L23-L33 +type Response struct { + AccessToken string `json:"access_token"` + Authentication types.AuthenticatedUser `json:"authentication"` + ExpiresIn int64 `json:"expires_in"` + KerberosAuthenticationResponseToken *string `json:"kerberos_authentication_response_token,omitempty"` + RefreshToken *string `json:"refresh_token,omitempty"` + Scope *string `json:"scope,omitempty"` + Type string `json:"type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuser/get_user.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuser/get_user.go new file mode 100644 index 000000000..655e295f9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuser/get_user.go @@ -0,0 +1,364 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get users. +// +// Get information about users in the native realm and built-in users. +package getuser + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + usernameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetUser struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + username string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetUser type alias for index. +type NewGetUser func() *GetUser + +// NewGetUserFunc returns a new instance of GetUser with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetUserFunc(tp elastictransport.Interface) NewGetUser { + return func() *GetUser { + n := New(tp) + + return n + } +} + +// Get users. +// +// Get information about users in the native realm and built-in users. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user +func New(tp elastictransport.Interface) *GetUser { + r := &GetUser{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetUser) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == usernameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "username", r.username) + } + path.WriteString(r.username) + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetUser) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.get_user") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_user") + if reader := instrument.RecordRequestBody(ctx, "security.get_user", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_user") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetUser query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getuser.Response +func (r GetUser) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetUser) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetUser query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetUser headers map. +func (r *GetUser) Header(key, value string) *GetUser { + r.headers.Set(key, value) + + return r +} + +// Username An identifier for the user. You can specify multiple usernames as a +// comma-separated list. If you omit this parameter, the API retrieves +// information about all users. +// API Name: username +func (r *GetUser) Username(usernames ...string) *GetUser { + r.paramSet |= usernameMask + r.username = strings.Join(usernames, ",") + + return r +} + +// WithProfileUid Determines whether to retrieve the user profile UID, if it exists, for the +// users. +// API name: with_profile_uid +func (r *GetUser) WithProfileUid(withprofileuid bool) *GetUser { + r.values.Set("with_profile_uid", strconv.FormatBool(withprofileuid)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetUser) ErrorTrace(errortrace bool) *GetUser { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetUser) FilterPath(filterpaths ...string) *GetUser { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetUser) Human(human bool) *GetUser { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetUser) Pretty(pretty bool) *GetUser { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuser/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuser/response.go new file mode 100644 index 000000000..757dfe666 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuser/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getuser + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getuser +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_user/SecurityGetUserResponse.ts#L23-L30 + +type Response map[string]types.User + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprivileges/get_user_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprivileges/get_user_privileges.go new file mode 100644 index 000000000..b71b7aa6d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprivileges/get_user_privileges.go @@ -0,0 +1,335 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get user privileges. +// +// Get the security privileges for the logged in user. +// All users can use this API, but only to determine their own privileges. +// To check the privileges of other users, you must use the run as feature. +// To check whether a user has a specific list of privileges, use the has +// privileges API. +package getuserprivileges + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetUserPrivileges struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetUserPrivileges type alias for index. +type NewGetUserPrivileges func() *GetUserPrivileges + +// NewGetUserPrivilegesFunc returns a new instance of GetUserPrivileges with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetUserPrivilegesFunc(tp elastictransport.Interface) NewGetUserPrivileges { + return func() *GetUserPrivileges { + n := New(tp) + + return n + } +} + +// Get user privileges. +// +// Get the security privileges for the logged in user. +// All users can use this API, but only to determine their own privileges. +// To check the privileges of other users, you must use the run as feature. +// To check whether a user has a specific list of privileges, use the has +// privileges API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges +func New(tp elastictransport.Interface) *GetUserPrivileges { + r := &GetUserPrivileges{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetUserPrivileges) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + path.WriteString("/") + path.WriteString("_privileges") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetUserPrivileges) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.get_user_privileges") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_user_privileges") + if reader := instrument.RecordRequestBody(ctx, "security.get_user_privileges", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_user_privileges") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetUserPrivileges query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getuserprivileges.Response +func (r GetUserPrivileges) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_user_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetUserPrivileges) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_user_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetUserPrivileges query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetUserPrivileges headers map. +func (r *GetUserPrivileges) Header(key, value string) *GetUserPrivileges { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetUserPrivileges) ErrorTrace(errortrace bool) *GetUserPrivileges { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetUserPrivileges) FilterPath(filterpaths ...string) *GetUserPrivileges { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetUserPrivileges) Human(human bool) *GetUserPrivileges { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetUserPrivileges) Pretty(pretty bool) *GetUserPrivileges { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprivileges/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprivileges/response.go new file mode 100644 index 000000000..df6a9a85b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprivileges/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getuserprivileges + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getuserprivileges +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_user_privileges/SecurityGetUserPrivilegesResponse.ts#L28-L38 +type Response struct { + Applications []types.ApplicationPrivileges `json:"applications"` + Cluster []string `json:"cluster"` + Global []types.GlobalPrivilege `json:"global"` + Indices []types.UserIndicesPrivileges `json:"indices"` + RemoteCluster []types.RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + RemoteIndices []types.RemoteUserIndicesPrivileges `json:"remote_indices,omitempty"` + RunAs []string `json:"run_as"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprofile/get_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprofile/get_user_profile.go new file mode 100644 index 000000000..25f3f5f99 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprofile/get_user_profile.go @@ -0,0 +1,377 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get a user profile. +// +// Get a user's profile using the unique profile ID. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +package getuserprofile + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + uidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetUserProfile struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + uid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetUserProfile type alias for index. +type NewGetUserProfile func(uid string) *GetUserProfile + +// NewGetUserProfileFunc returns a new instance of GetUserProfile with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetUserProfileFunc(tp elastictransport.Interface) NewGetUserProfile { + return func(uid string) *GetUserProfile { + n := New(tp) + + n._uid(uid) + + return n + } +} + +// Get a user profile. +// +// Get a user's profile using the unique profile ID. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile +func New(tp elastictransport.Interface) *GetUserProfile { + r := &GetUserProfile{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetUserProfile) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == uidMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("profile") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "uid", r.uid) + } + path.WriteString(r.uid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetUserProfile) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.get_user_profile") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.get_user_profile") + if reader := instrument.RecordRequestBody(ctx, "security.get_user_profile", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.get_user_profile") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetUserProfile query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getuserprofile.Response +func (r GetUserProfile) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_user_profile") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetUserProfile) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.get_user_profile") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetUserProfile query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetUserProfile headers map. +func (r *GetUserProfile) Header(key, value string) *GetUserProfile { + r.headers.Set(key, value) + + return r +} + +// Uid A unique identifier for the user profile. +// API Name: uid +func (r *GetUserProfile) _uid(uids ...string) *GetUserProfile { + r.paramSet |= uidMask + r.uid = strings.Join(uids, ",") + + return r +} + +// Data A comma-separated list of filters for the `data` field of the profile +// document. +// To return all content use `data=*`. +// To return a subset of content use `data=` to retrieve content nested +// under the specified ``. +// By default returns no `data` content. +// API name: data +func (r *GetUserProfile) Data(data ...string) *GetUserProfile { + tmp := []string{} + for _, item := range data { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("data", strings.Join(tmp, ",")) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetUserProfile) ErrorTrace(errortrace bool) *GetUserProfile { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetUserProfile) FilterPath(filterpaths ...string) *GetUserProfile { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetUserProfile) Human(human bool) *GetUserProfile { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetUserProfile) Pretty(pretty bool) *GetUserProfile { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprofile/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprofile/response.go new file mode 100644 index 000000000..0b0f8c0af --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprofile/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getuserprofile + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getuserprofile +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_user_profile/Response.ts#L23-L33 +type Response struct { + Errors *types.GetUserProfileErrors `json:"errors,omitempty"` + // Profiles A successful call returns the JSON representation of the user profile and its + // internal versioning numbers. + // The API returns an empty object if no profile document is found for the + // provided `uid`. + // The content of the data field is not returned by default to avoid + // deserializing a potential large payload. + Profiles []types.UserProfileWithMetadata `json:"profiles"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/grantapikey/grant_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/grantapikey/grant_api_key.go new file mode 100644 index 000000000..3e9474cab --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/grantapikey/grant_api_key.go @@ -0,0 +1,495 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Grant an API key. +// +// Create an API key on behalf of another user. +// This API is similar to the create API keys API, however it creates the API +// key for a user that is different than the user that runs the API. +// The caller must have authentication credentials for the user on whose behalf +// the API key will be created. +// It is not possible to use this API to create an API key without that user's +// credentials. +// The supported user authentication credential types are: +// +// * username and password +// * Elasticsearch access tokens +// * JWTs +// +// The user, for whom the authentication credentials is provided, can optionally +// "run as" (impersonate) another user. +// In this case, the API key will be created on behalf of the impersonated user. +// +// This API is intended be used by applications that need to create and manage +// API keys for end users, but cannot guarantee that those users have permission +// to create API keys on their own behalf. +// The API keys are created by the Elasticsearch API key service, which is +// automatically enabled. +// +// A successful grant API key API call returns a JSON structure that contains +// the API key, its unique id, and its name. +// If applicable, it also returns expiration information for the API key in +// milliseconds. +// +// By default, API keys never expire. You can specify expiration information +// when you create the API keys. +package grantapikey + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/apikeygranttype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GrantApiKey struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGrantApiKey type alias for index. +type NewGrantApiKey func() *GrantApiKey + +// NewGrantApiKeyFunc returns a new instance of GrantApiKey with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGrantApiKeyFunc(tp elastictransport.Interface) NewGrantApiKey { + return func() *GrantApiKey { + n := New(tp) + + return n + } +} + +// Grant an API key. +// +// Create an API key on behalf of another user. +// This API is similar to the create API keys API, however it creates the API +// key for a user that is different than the user that runs the API. +// The caller must have authentication credentials for the user on whose behalf +// the API key will be created. +// It is not possible to use this API to create an API key without that user's +// credentials. +// The supported user authentication credential types are: +// +// * username and password +// * Elasticsearch access tokens +// * JWTs +// +// The user, for whom the authentication credentials is provided, can optionally +// "run as" (impersonate) another user. +// In this case, the API key will be created on behalf of the impersonated user. +// +// This API is intended be used by applications that need to create and manage +// API keys for end users, but cannot guarantee that those users have permission +// to create API keys on their own behalf. +// The API keys are created by the Elasticsearch API key service, which is +// automatically enabled. +// +// A successful grant API key API call returns a JSON structure that contains +// the API key, its unique id, and its name. +// If applicable, it also returns expiration information for the API key in +// milliseconds. +// +// By default, API keys never expire. You can specify expiration information +// when you create the API keys. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key +func New(tp elastictransport.Interface) *GrantApiKey { + r := &GrantApiKey{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *GrantApiKey) Raw(raw io.Reader) *GrantApiKey { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *GrantApiKey) Request(req *Request) *GrantApiKey { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GrantApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for GrantApiKey: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("api_key") + path.WriteString("/") + path.WriteString("grant") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GrantApiKey) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.grant_api_key") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.grant_api_key") + if reader := instrument.RecordRequestBody(ctx, "security.grant_api_key", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.grant_api_key") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GrantApiKey query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a grantapikey.Response +func (r GrantApiKey) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.grant_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the GrantApiKey headers map. +func (r *GrantApiKey) Header(key, value string) *GrantApiKey { + r.headers.Set(key, value) + + return r +} + +// Refresh If 'true', Elasticsearch refreshes the affected shards to make this operation +// visible to search. +// If 'wait_for', it waits for a refresh to make this operation visible to +// search. +// If 'false', nothing is done with refreshes. +// API name: refresh +func (r *GrantApiKey) Refresh(refresh refresh.Refresh) *GrantApiKey { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GrantApiKey) ErrorTrace(errortrace bool) *GrantApiKey { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GrantApiKey) FilterPath(filterpaths ...string) *GrantApiKey { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GrantApiKey) Human(human bool) *GrantApiKey { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GrantApiKey) Pretty(pretty bool) *GrantApiKey { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The user's access token. +// If you specify the `access_token` grant type, this parameter is required. +// It is not valid with other grant types. +// API name: access_token +func (r *GrantApiKey) AccessToken(accesstoken string) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AccessToken = &accesstoken + + return r +} + +// The API key. +// API name: api_key +func (r *GrantApiKey) ApiKey(apikey types.GrantApiKeyVariant) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ApiKey = *apikey.GrantApiKeyCaster() + + return r +} + +// The type of grant. Supported grant types are: `access_token`, `password`. +// API name: grant_type +func (r *GrantApiKey) GrantType(granttype apikeygranttype.ApiKeyGrantType) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.GrantType = granttype + return r +} + +// The user's password. +// If you specify the `password` grant type, this parameter is required. +// It is not valid with other grant types. +// API name: password +func (r *GrantApiKey) Password(password string) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Password = &password + + return r +} + +// The name of the user to be impersonated. +// API name: run_as +func (r *GrantApiKey) RunAs(username string) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RunAs = &username + + return r +} + +// The user name that identifies the user. +// If you specify the `password` grant type, this parameter is required. +// It is not valid with other grant types. +// API name: username +func (r *GrantApiKey) Username(username string) *GrantApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Username = &username + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/grantapikey/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/grantapikey/request.go new file mode 100644 index 000000000..d0fb1d4ed --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/grantapikey/request.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package grantapikey + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/apikeygranttype" +) + +// Request holds the request body struct for the package grantapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/grant_api_key/SecurityGrantApiKeyRequest.ts#L24-L102 +type Request struct { + + // AccessToken The user's access token. + // If you specify the `access_token` grant type, this parameter is required. + // It is not valid with other grant types. + AccessToken *string `json:"access_token,omitempty"` + // ApiKey The API key. + ApiKey types.GrantApiKey `json:"api_key"` + // GrantType The type of grant. Supported grant types are: `access_token`, `password`. + GrantType apikeygranttype.ApiKeyGrantType `json:"grant_type"` + // Password The user's password. + // If you specify the `password` grant type, this parameter is required. + // It is not valid with other grant types. + Password *string `json:"password,omitempty"` + // RunAs The name of the user to be impersonated. + RunAs *string `json:"run_as,omitempty"` + // Username The user name that identifies the user. + // If you specify the `password` grant type, this parameter is required. + // It is not valid with other grant types. + Username *string `json:"username,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Grantapikey request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "access_token": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AccessToken", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AccessToken = &o + + case "api_key": + if err := dec.Decode(&s.ApiKey); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + + case "grant_type": + if err := dec.Decode(&s.GrantType); err != nil { + return fmt.Errorf("%s | %w", "GrantType", err) + } + + case "password": + if err := dec.Decode(&s.Password); err != nil { + return fmt.Errorf("%s | %w", "Password", err) + } + + case "run_as": + if err := dec.Decode(&s.RunAs); err != nil { + return fmt.Errorf("%s | %w", "RunAs", err) + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/grantapikey/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/grantapikey/response.go new file mode 100644 index 000000000..e5e205a75 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/grantapikey/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package grantapikey + +// Response holds the response body struct for the package grantapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/grant_api_key/SecurityGrantApiKeyResponse.ts#L23-L31 +type Response struct { + ApiKey string `json:"api_key"` + Encoded string `json:"encoded"` + Expiration *int64 `json:"expiration,omitempty"` + Id string `json:"id"` + Name string `json:"name"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivileges/has_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivileges/has_privileges.go new file mode 100644 index 000000000..9e29a7e07 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivileges/has_privileges.go @@ -0,0 +1,419 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Check user privileges. +// +// Determine whether the specified user has a specified list of privileges. +// All users can use this API, but only to determine their own privileges. +// To check the privileges of other users, you must use the run as feature. +package hasprivileges + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterprivilege" +) + +const ( + userMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type HasPrivileges struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + user string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewHasPrivileges type alias for index. +type NewHasPrivileges func() *HasPrivileges + +// NewHasPrivilegesFunc returns a new instance of HasPrivileges with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewHasPrivilegesFunc(tp elastictransport.Interface) NewHasPrivileges { + return func() *HasPrivileges { + n := New(tp) + + return n + } +} + +// Check user privileges. +// +// Determine whether the specified user has a specified list of privileges. +// All users can use this API, but only to determine their own privileges. +// To check the privileges of other users, you must use the run as feature. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges +func New(tp elastictransport.Interface) *HasPrivileges { + r := &HasPrivileges{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *HasPrivileges) Raw(raw io.Reader) *HasPrivileges { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *HasPrivileges) Request(req *Request) *HasPrivileges { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *HasPrivileges) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for HasPrivileges: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + path.WriteString("/") + path.WriteString("_has_privileges") + + method = http.MethodPost + case r.paramSet == userMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "user", r.user) + } + path.WriteString(r.user) + path.WriteString("/") + path.WriteString("_has_privileges") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r HasPrivileges) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.has_privileges") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.has_privileges") + if reader := instrument.RecordRequestBody(ctx, "security.has_privileges", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.has_privileges") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the HasPrivileges query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a hasprivileges.Response +func (r HasPrivileges) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.has_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the HasPrivileges headers map. +func (r *HasPrivileges) Header(key, value string) *HasPrivileges { + r.headers.Set(key, value) + + return r +} + +// User Username +// API Name: user +func (r *HasPrivileges) User(user string) *HasPrivileges { + r.paramSet |= userMask + r.user = user + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *HasPrivileges) ErrorTrace(errortrace bool) *HasPrivileges { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *HasPrivileges) FilterPath(filterpaths ...string) *HasPrivileges { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *HasPrivileges) Human(human bool) *HasPrivileges { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *HasPrivileges) Pretty(pretty bool) *HasPrivileges { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: application +func (r *HasPrivileges) Application(applications ...types.ApplicationPrivilegesCheckVariant) *HasPrivileges { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range applications { + + r.req.Application = append(r.req.Application, *v.ApplicationPrivilegesCheckCaster()) + + } + return r +} + +// A list of the cluster privileges that you want to check. +// API name: cluster +func (r *HasPrivileges) Cluster(clusters ...clusterprivilege.ClusterPrivilege) *HasPrivileges { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range clusters { + + r.req.Cluster = append(r.req.Cluster, v) + + } + return r +} + +// API name: index +func (r *HasPrivileges) Index(indices ...types.IndexPrivilegesCheckVariant) *HasPrivileges { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range indices { + + r.req.Index = append(r.req.Index, *v.IndexPrivilegesCheckCaster()) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivileges/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivileges/request.go new file mode 100644 index 000000000..ae50dbd46 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivileges/request.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package hasprivileges + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterprivilege" +) + +// Request holds the request body struct for the package hasprivileges +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/has_privileges/SecurityHasPrivilegesRequest.ts#L25-L59 +type Request struct { + Application []types.ApplicationPrivilegesCheck `json:"application,omitempty"` + // Cluster A list of the cluster privileges that you want to check. + Cluster []clusterprivilege.ClusterPrivilege `json:"cluster,omitempty"` + Index []types.IndexPrivilegesCheck `json:"index,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Hasprivileges request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivileges/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivileges/response.go new file mode 100644 index 000000000..bc5093db5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivileges/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package hasprivileges + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package hasprivileges +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/has_privileges/SecurityHasPrivilegesResponse.ts#L24-L35 +type Response struct { + Application types.ApplicationsPrivileges `json:"application"` + Cluster map[string]bool `json:"cluster"` + HasAllRequested bool `json:"has_all_requested"` + Index map[string]types.Privileges `json:"index"` + Username string `json:"username"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Cluster: make(map[string]bool, 0), + Index: make(map[string]types.Privileges, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go new file mode 100644 index 000000000..4c6394280 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go @@ -0,0 +1,384 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Check user profile privileges. +// +// Determine whether the users associated with the specified user profile IDs +// have all the requested privileges. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +package hasprivilegesuserprofile + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type HasPrivilegesUserProfile struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewHasPrivilegesUserProfile type alias for index. +type NewHasPrivilegesUserProfile func() *HasPrivilegesUserProfile + +// NewHasPrivilegesUserProfileFunc returns a new instance of HasPrivilegesUserProfile with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewHasPrivilegesUserProfileFunc(tp elastictransport.Interface) NewHasPrivilegesUserProfile { + return func() *HasPrivilegesUserProfile { + n := New(tp) + + return n + } +} + +// Check user profile privileges. +// +// Determine whether the users associated with the specified user profile IDs +// have all the requested privileges. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile +func New(tp elastictransport.Interface) *HasPrivilegesUserProfile { + r := &HasPrivilegesUserProfile{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *HasPrivilegesUserProfile) Raw(raw io.Reader) *HasPrivilegesUserProfile { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *HasPrivilegesUserProfile) Request(req *Request) *HasPrivilegesUserProfile { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *HasPrivilegesUserProfile) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for HasPrivilegesUserProfile: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("profile") + path.WriteString("/") + path.WriteString("_has_privileges") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r HasPrivilegesUserProfile) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.has_privileges_user_profile") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.has_privileges_user_profile") + if reader := instrument.RecordRequestBody(ctx, "security.has_privileges_user_profile", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.has_privileges_user_profile") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the HasPrivilegesUserProfile query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a hasprivilegesuserprofile.Response +func (r HasPrivilegesUserProfile) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.has_privileges_user_profile") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the HasPrivilegesUserProfile headers map. +func (r *HasPrivilegesUserProfile) Header(key, value string) *HasPrivilegesUserProfile { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *HasPrivilegesUserProfile) ErrorTrace(errortrace bool) *HasPrivilegesUserProfile { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *HasPrivilegesUserProfile) FilterPath(filterpaths ...string) *HasPrivilegesUserProfile { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *HasPrivilegesUserProfile) Human(human bool) *HasPrivilegesUserProfile { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *HasPrivilegesUserProfile) Pretty(pretty bool) *HasPrivilegesUserProfile { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// An object containing all the privileges to be checked. +// API name: privileges +func (r *HasPrivilegesUserProfile) Privileges(privileges types.PrivilegesCheckVariant) *HasPrivilegesUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Privileges = *privileges.PrivilegesCheckCaster() + + return r +} + +// A list of profile IDs. The privileges are checked for associated users of the +// profiles. +// API name: uids +func (r *HasPrivilegesUserProfile) Uids(uids ...string) *HasPrivilegesUserProfile { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range uids { + + r.req.Uids = append(r.req.Uids, v) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivilegesuserprofile/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivilegesuserprofile/request.go new file mode 100644 index 000000000..1ae6113b5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivilegesuserprofile/request.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package hasprivilegesuserprofile + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package hasprivilegesuserprofile +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/has_privileges_user_profile/Request.ts#L24-L55 +type Request struct { + + // Privileges An object containing all the privileges to be checked. + Privileges types.PrivilegesCheck `json:"privileges"` + // Uids A list of profile IDs. The privileges are checked for associated users of the + // profiles. + Uids []string `json:"uids"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Hasprivilegesuserprofile request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivilegesuserprofile/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivilegesuserprofile/response.go new file mode 100644 index 000000000..eeeed5173 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivilegesuserprofile/response.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package hasprivilegesuserprofile + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package hasprivilegesuserprofile +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/has_privileges_user_profile/Response.ts#L23-L38 +type Response struct { + + // Errors The subset of the requested profile IDs for which an error + // was encountered. It does not include the missing profile IDs + // or the profile IDs of the users that do not have all the + // requested privileges. This field is absent if empty. + Errors *types.HasPrivilegesUserProfileErrors `json:"errors,omitempty"` + // HasPrivilegeUids The subset of the requested profile IDs of the users that + // have all the requested privileges. + HasPrivilegeUids []string `json:"has_privilege_uids"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidateapikey/invalidate_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidateapikey/invalidate_api_key.go new file mode 100644 index 000000000..7c122f302 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidateapikey/invalidate_api_key.go @@ -0,0 +1,471 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Invalidate API keys. +// +// This API invalidates API keys created by the create API key or grant API key +// APIs. +// Invalidated API keys fail authentication, but they can still be viewed using +// the get API key information and query API key information APIs, for at least +// the configured retention period, until they are automatically deleted. +// +// To use this API, you must have at least the `manage_security`, +// `manage_api_key`, or `manage_own_api_key` cluster privileges. +// The `manage_security` privilege allows deleting any API key, including both +// REST and cross cluster API keys. +// The `manage_api_key` privilege allows deleting any REST API key, but not +// cross cluster API keys. +// The `manage_own_api_key` only allows deleting REST API keys that are owned by +// the user. +// In addition, with the `manage_own_api_key` privilege, an invalidation request +// must be issued in one of the three formats: +// +// - Set the parameter `owner=true`. +// - Or, set both `username` and `realm_name` to match the user's identity. +// - Or, if the request is issued by an API key, that is to say an API key +// invalidates itself, specify its ID in the `ids` field. +package invalidateapikey + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type InvalidateApiKey struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewInvalidateApiKey type alias for index. +type NewInvalidateApiKey func() *InvalidateApiKey + +// NewInvalidateApiKeyFunc returns a new instance of InvalidateApiKey with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewInvalidateApiKeyFunc(tp elastictransport.Interface) NewInvalidateApiKey { + return func() *InvalidateApiKey { + n := New(tp) + + return n + } +} + +// Invalidate API keys. +// +// This API invalidates API keys created by the create API key or grant API key +// APIs. +// Invalidated API keys fail authentication, but they can still be viewed using +// the get API key information and query API key information APIs, for at least +// the configured retention period, until they are automatically deleted. +// +// To use this API, you must have at least the `manage_security`, +// `manage_api_key`, or `manage_own_api_key` cluster privileges. +// The `manage_security` privilege allows deleting any API key, including both +// REST and cross cluster API keys. +// The `manage_api_key` privilege allows deleting any REST API key, but not +// cross cluster API keys. +// The `manage_own_api_key` only allows deleting REST API keys that are owned by +// the user. +// In addition, with the `manage_own_api_key` privilege, an invalidation request +// must be issued in one of the three formats: +// +// - Set the parameter `owner=true`. +// - Or, set both `username` and `realm_name` to match the user's identity. +// - Or, if the request is issued by an API key, that is to say an API key +// invalidates itself, specify its ID in the `ids` field. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key +func New(tp elastictransport.Interface) *InvalidateApiKey { + r := &InvalidateApiKey{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *InvalidateApiKey) Raw(raw io.Reader) *InvalidateApiKey { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *InvalidateApiKey) Request(req *Request) *InvalidateApiKey { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *InvalidateApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for InvalidateApiKey: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("api_key") + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r InvalidateApiKey) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.invalidate_api_key") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.invalidate_api_key") + if reader := instrument.RecordRequestBody(ctx, "security.invalidate_api_key", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.invalidate_api_key") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the InvalidateApiKey query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a invalidateapikey.Response +func (r InvalidateApiKey) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.invalidate_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the InvalidateApiKey headers map. +func (r *InvalidateApiKey) Header(key, value string) *InvalidateApiKey { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *InvalidateApiKey) ErrorTrace(errortrace bool) *InvalidateApiKey { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *InvalidateApiKey) FilterPath(filterpaths ...string) *InvalidateApiKey { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *InvalidateApiKey) Human(human bool) *InvalidateApiKey { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *InvalidateApiKey) Pretty(pretty bool) *InvalidateApiKey { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: id +func (r *InvalidateApiKey) Id(id string) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Id = &id + + return r +} + +// A list of API key ids. +// This parameter cannot be used with any of `name`, `realm_name`, or +// `username`. +// API name: ids +func (r *InvalidateApiKey) Ids(ids ...string) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ids { + + r.req.Ids = append(r.req.Ids, v) + + } + return r +} + +// An API key name. +// This parameter cannot be used with any of `ids`, `realm_name` or `username`. +// API name: name +func (r *InvalidateApiKey) Name(name string) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Name = &name + + return r +} + +// Query API keys owned by the currently authenticated user. +// The `realm_name` or `username` parameters cannot be specified when this +// parameter is set to `true` as they are assumed to be the currently +// authenticated ones. +// +// NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be +// specified if `owner` is `false`. +// API name: owner +func (r *InvalidateApiKey) Owner(owner bool) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Owner = &owner + + return r +} + +// The name of an authentication realm. +// This parameter cannot be used with either `ids` or `name`, or when `owner` +// flag is set to `true`. +// API name: realm_name +func (r *InvalidateApiKey) RealmName(realmname string) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RealmName = &realmname + + return r +} + +// The username of a user. +// This parameter cannot be used with either `ids` or `name` or when `owner` +// flag is set to `true`. +// API name: username +func (r *InvalidateApiKey) Username(username string) *InvalidateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Username = &username + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidateapikey/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidateapikey/request.go new file mode 100644 index 000000000..6cb600a20 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidateapikey/request.go @@ -0,0 +1,144 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package invalidateapikey + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Request holds the request body struct for the package invalidateapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/invalidate_api_key/SecurityInvalidateApiKeyRequest.ts#L23-L82 +type Request struct { + Id *string `json:"id,omitempty"` + // Ids A list of API key ids. + // This parameter cannot be used with any of `name`, `realm_name`, or + // `username`. + Ids []string `json:"ids,omitempty"` + // Name An API key name. + // This parameter cannot be used with any of `ids`, `realm_name` or `username`. + Name *string `json:"name,omitempty"` + // Owner Query API keys owned by the currently authenticated user. + // The `realm_name` or `username` parameters cannot be specified when this + // parameter is set to `true` as they are assumed to be the currently + // authenticated ones. + // + // NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be + // specified if `owner` is `false`. + Owner *bool `json:"owner,omitempty"` + // RealmName The name of an authentication realm. + // This parameter cannot be used with either `ids` or `name`, or when `owner` + // flag is set to `true`. + RealmName *string `json:"realm_name,omitempty"` + // Username The username of a user. + // This parameter cannot be used with either `ids` or `name` or when `owner` + // flag is set to `true`. + Username *string `json:"username,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Invalidateapikey request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "ids": + if err := dec.Decode(&s.Ids); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "owner": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Owner", err) + } + s.Owner = &value + case bool: + s.Owner = &v + } + + case "realm_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RealmName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RealmName = &o + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidateapikey/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidateapikey/response.go new file mode 100644 index 000000000..1c3bd0ffc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidateapikey/response.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package invalidateapikey + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package invalidateapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/invalidate_api_key/SecurityInvalidateApiKeyResponse.ts#L23-L46 +type Response struct { + + // ErrorCount The number of errors that were encountered when invalidating the API keys. + ErrorCount int `json:"error_count"` + // ErrorDetails Details about the errors. + // This field is not present in the response when `error_count` is `0`. + ErrorDetails []types.ErrorCause `json:"error_details,omitempty"` + // InvalidatedApiKeys The IDs of the API keys that were invalidated as part of this request. + InvalidatedApiKeys []string `json:"invalidated_api_keys"` + // PreviouslyInvalidatedApiKeys The IDs of the API keys that were already invalidated. + PreviouslyInvalidatedApiKeys []string `json:"previously_invalidated_api_keys"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidatetoken/invalidate_token.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidatetoken/invalidate_token.go new file mode 100644 index 000000000..5735d1fbd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidatetoken/invalidate_token.go @@ -0,0 +1,429 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Invalidate a token. +// +// The access tokens returned by the get token API have a finite period of time +// for which they are valid. +// After that time period, they can no longer be used. +// The time period is defined by the `xpack.security.authc.token.timeout` +// setting. +// +// The refresh tokens returned by the get token API are only valid for 24 hours. +// They can also be used exactly once. +// If you want to invalidate one or more access or refresh tokens immediately, +// use this invalidate token API. +// +// NOTE: While all parameters are optional, at least one of them is required. +// More specifically, either one of `token` or `refresh_token` parameters is +// required. +// If none of these two are specified, then `realm_name` and/or `username` need +// to be specified. +package invalidatetoken + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type InvalidateToken struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewInvalidateToken type alias for index. +type NewInvalidateToken func() *InvalidateToken + +// NewInvalidateTokenFunc returns a new instance of InvalidateToken with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewInvalidateTokenFunc(tp elastictransport.Interface) NewInvalidateToken { + return func() *InvalidateToken { + n := New(tp) + + return n + } +} + +// Invalidate a token. +// +// The access tokens returned by the get token API have a finite period of time +// for which they are valid. +// After that time period, they can no longer be used. +// The time period is defined by the `xpack.security.authc.token.timeout` +// setting. +// +// The refresh tokens returned by the get token API are only valid for 24 hours. +// They can also be used exactly once. +// If you want to invalidate one or more access or refresh tokens immediately, +// use this invalidate token API. +// +// NOTE: While all parameters are optional, at least one of them is required. +// More specifically, either one of `token` or `refresh_token` parameters is +// required. +// If none of these two are specified, then `realm_name` and/or `username` need +// to be specified. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token +func New(tp elastictransport.Interface) *InvalidateToken { + r := &InvalidateToken{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *InvalidateToken) Raw(raw io.Reader) *InvalidateToken { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *InvalidateToken) Request(req *Request) *InvalidateToken { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *InvalidateToken) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for InvalidateToken: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("oauth2") + path.WriteString("/") + path.WriteString("token") + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r InvalidateToken) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.invalidate_token") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.invalidate_token") + if reader := instrument.RecordRequestBody(ctx, "security.invalidate_token", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.invalidate_token") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the InvalidateToken query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a invalidatetoken.Response +func (r InvalidateToken) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.invalidate_token") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the InvalidateToken headers map. +func (r *InvalidateToken) Header(key, value string) *InvalidateToken { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *InvalidateToken) ErrorTrace(errortrace bool) *InvalidateToken { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *InvalidateToken) FilterPath(filterpaths ...string) *InvalidateToken { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *InvalidateToken) Human(human bool) *InvalidateToken { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *InvalidateToken) Pretty(pretty bool) *InvalidateToken { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The name of an authentication realm. +// This parameter cannot be used with either `refresh_token` or `token`. +// API name: realm_name +func (r *InvalidateToken) RealmName(name string) *InvalidateToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RealmName = &name + + return r +} + +// A refresh token. +// This parameter cannot be used if any of `refresh_token`, `realm_name`, or +// `username` are used. +// API name: refresh_token +func (r *InvalidateToken) RefreshToken(refreshtoken string) *InvalidateToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RefreshToken = &refreshtoken + + return r +} + +// An access token. +// This parameter cannot be used if any of `refresh_token`, `realm_name`, or +// `username` are used. +// API name: token +func (r *InvalidateToken) Token(token string) *InvalidateToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Token = &token + + return r +} + +// The username of a user. +// This parameter cannot be used with either `refresh_token` or `token`. +// API name: username +func (r *InvalidateToken) Username(username string) *InvalidateToken { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Username = &username + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidatetoken/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidatetoken/request.go new file mode 100644 index 000000000..ce68c658e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidatetoken/request.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package invalidatetoken + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Request holds the request body struct for the package invalidatetoken +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/invalidate_token/SecurityInvalidateTokenRequest.ts#L23-L71 +type Request struct { + + // RealmName The name of an authentication realm. + // This parameter cannot be used with either `refresh_token` or `token`. + RealmName *string `json:"realm_name,omitempty"` + // RefreshToken A refresh token. + // This parameter cannot be used if any of `refresh_token`, `realm_name`, or + // `username` are used. + RefreshToken *string `json:"refresh_token,omitempty"` + // Token An access token. + // This parameter cannot be used if any of `refresh_token`, `realm_name`, or + // `username` are used. + Token *string `json:"token,omitempty"` + // Username The username of a user. + // This parameter cannot be used with either `refresh_token` or `token`. + Username *string `json:"username,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Invalidatetoken request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "realm_name": + if err := dec.Decode(&s.RealmName); err != nil { + return fmt.Errorf("%s | %w", "RealmName", err) + } + + case "refresh_token": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshToken", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshToken = &o + + case "token": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Token", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Token = &o + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidatetoken/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidatetoken/response.go new file mode 100644 index 000000000..78b234d8a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidatetoken/response.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package invalidatetoken + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package invalidatetoken +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/invalidate_token/SecurityInvalidateTokenResponse.ts#L23-L46 +type Response struct { + + // ErrorCount The number of errors that were encountered when invalidating the tokens. + ErrorCount int64 `json:"error_count"` + // ErrorDetails Details about the errors. + // This field is not present in the response when `error_count` is `0`. + ErrorDetails []types.ErrorCause `json:"error_details,omitempty"` + // InvalidatedTokens The number of the tokens that were invalidated as part of this request. + InvalidatedTokens int64 `json:"invalidated_tokens"` + // PreviouslyInvalidatedTokens The number of tokens that were already invalidated. + PreviouslyInvalidatedTokens int64 `json:"previously_invalidated_tokens"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcauthenticate/oidc_authenticate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcauthenticate/oidc_authenticate.go new file mode 100644 index 000000000..9b16a6f3a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcauthenticate/oidc_authenticate.go @@ -0,0 +1,420 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Authenticate OpenID Connect. +// +// Exchange an OpenID Connect authentication response message for an +// Elasticsearch internal access token and refresh token that can be +// subsequently used for authentication. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. +package oidcauthenticate + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type OidcAuthenticate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewOidcAuthenticate type alias for index. +type NewOidcAuthenticate func() *OidcAuthenticate + +// NewOidcAuthenticateFunc returns a new instance of OidcAuthenticate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewOidcAuthenticateFunc(tp elastictransport.Interface) NewOidcAuthenticate { + return func() *OidcAuthenticate { + n := New(tp) + + return n + } +} + +// Authenticate OpenID Connect. +// +// Exchange an OpenID Connect authentication response message for an +// Elasticsearch internal access token and refresh token that can be +// subsequently used for authentication. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate +func New(tp elastictransport.Interface) *OidcAuthenticate { + r := &OidcAuthenticate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *OidcAuthenticate) Raw(raw io.Reader) *OidcAuthenticate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *OidcAuthenticate) Request(req *Request) *OidcAuthenticate { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *OidcAuthenticate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for OidcAuthenticate: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("oidc") + path.WriteString("/") + path.WriteString("authenticate") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r OidcAuthenticate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.oidc_authenticate") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.oidc_authenticate") + if reader := instrument.RecordRequestBody(ctx, "security.oidc_authenticate", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.oidc_authenticate") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the OidcAuthenticate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a oidcauthenticate.Response +func (r OidcAuthenticate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.oidc_authenticate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the OidcAuthenticate headers map. +func (r *OidcAuthenticate) Header(key, value string) *OidcAuthenticate { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *OidcAuthenticate) ErrorTrace(errortrace bool) *OidcAuthenticate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *OidcAuthenticate) FilterPath(filterpaths ...string) *OidcAuthenticate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *OidcAuthenticate) Human(human bool) *OidcAuthenticate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *OidcAuthenticate) Pretty(pretty bool) *OidcAuthenticate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Associate a client session with an ID token and mitigate replay attacks. +// This value needs to be the same as the one that was provided to the +// `/_security/oidc/prepare` API or the one that was generated by Elasticsearch +// and included in the response to that call. +// API name: nonce +func (r *OidcAuthenticate) Nonce(nonce string) *OidcAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Nonce = nonce + + return r +} + +// The name of the OpenID Connect realm. +// This property is useful in cases where multiple realms are defined. +// API name: realm +func (r *OidcAuthenticate) Realm(realm string) *OidcAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Realm = &realm + + return r +} + +// The URL to which the OpenID Connect Provider redirected the User Agent in +// response to an authentication request after a successful authentication. +// This URL must be provided as-is (URL encoded), taken from the body of the +// response or as the value of a location header in the response from the OpenID +// Connect Provider. +// API name: redirect_uri +func (r *OidcAuthenticate) RedirectUri(redirecturi string) *OidcAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RedirectUri = redirecturi + + return r +} + +// Maintain state between the authentication request and the response. +// This value needs to be the same as the one that was provided to the +// `/_security/oidc/prepare` API or the one that was generated by Elasticsearch +// and included in the response to that call. +// API name: state +func (r *OidcAuthenticate) State(state string) *OidcAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.State = state + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcauthenticate/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcauthenticate/request.go new file mode 100644 index 000000000..35ebb098a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcauthenticate/request.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package oidcauthenticate + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package oidcauthenticate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/oidc_authenticate/Request.ts#L22-L61 +type Request struct { + + // Nonce Associate a client session with an ID token and mitigate replay attacks. + // This value needs to be the same as the one that was provided to the + // `/_security/oidc/prepare` API or the one that was generated by Elasticsearch + // and included in the response to that call. + Nonce string `json:"nonce"` + // Realm The name of the OpenID Connect realm. + // This property is useful in cases where multiple realms are defined. + Realm *string `json:"realm,omitempty"` + // RedirectUri The URL to which the OpenID Connect Provider redirected the User Agent in + // response to an authentication request after a successful authentication. + // This URL must be provided as-is (URL encoded), taken from the body of the + // response or as the value of a location header in the response from the OpenID + // Connect Provider. + RedirectUri string `json:"redirect_uri"` + // State Maintain state between the authentication request and the response. + // This value needs to be the same as the one that was provided to the + // `/_security/oidc/prepare` API or the one that was generated by Elasticsearch + // and included in the response to that call. + State string `json:"state"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Oidcauthenticate request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcauthenticate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcauthenticate/response.go new file mode 100644 index 000000000..adb6e98ea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcauthenticate/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package oidcauthenticate + +// Response holds the response body struct for the package oidcauthenticate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/oidc_authenticate/Response.ts#L22-L41 +type Response struct { + + // AccessToken The Elasticsearch access token. + AccessToken string `json:"access_token"` + // ExpiresIn The duration (in seconds) of the tokens. + ExpiresIn int `json:"expires_in"` + // RefreshToken The Elasticsearch refresh token. + RefreshToken string `json:"refresh_token"` + // Type The type of token. + Type string `json:"type"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidclogout/oidc_logout.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidclogout/oidc_logout.go new file mode 100644 index 000000000..414bca3ce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidclogout/oidc_logout.go @@ -0,0 +1,391 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Logout of OpenID Connect. +// +// Invalidate an access token and a refresh token that were generated as a +// response to the `/_security/oidc/authenticate` API. +// +// If the OpenID Connect authentication realm in Elasticsearch is accordingly +// configured, the response to this call will contain a URI pointing to the end +// session endpoint of the OpenID Connect Provider in order to perform single +// logout. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. +package oidclogout + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type OidcLogout struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewOidcLogout type alias for index. +type NewOidcLogout func() *OidcLogout + +// NewOidcLogoutFunc returns a new instance of OidcLogout with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewOidcLogoutFunc(tp elastictransport.Interface) NewOidcLogout { + return func() *OidcLogout { + n := New(tp) + + return n + } +} + +// Logout of OpenID Connect. +// +// Invalidate an access token and a refresh token that were generated as a +// response to the `/_security/oidc/authenticate` API. +// +// If the OpenID Connect authentication realm in Elasticsearch is accordingly +// configured, the response to this call will contain a URI pointing to the end +// session endpoint of the OpenID Connect Provider in order to perform single +// logout. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout +func New(tp elastictransport.Interface) *OidcLogout { + r := &OidcLogout{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *OidcLogout) Raw(raw io.Reader) *OidcLogout { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *OidcLogout) Request(req *Request) *OidcLogout { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *OidcLogout) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for OidcLogout: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("oidc") + path.WriteString("/") + path.WriteString("logout") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r OidcLogout) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.oidc_logout") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.oidc_logout") + if reader := instrument.RecordRequestBody(ctx, "security.oidc_logout", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.oidc_logout") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the OidcLogout query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a oidclogout.Response +func (r OidcLogout) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.oidc_logout") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the OidcLogout headers map. +func (r *OidcLogout) Header(key, value string) *OidcLogout { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *OidcLogout) ErrorTrace(errortrace bool) *OidcLogout { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *OidcLogout) FilterPath(filterpaths ...string) *OidcLogout { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *OidcLogout) Human(human bool) *OidcLogout { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *OidcLogout) Pretty(pretty bool) *OidcLogout { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The refresh token to be invalidated. +// API name: refresh_token +func (r *OidcLogout) RefreshToken(refreshtoken string) *OidcLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RefreshToken = &refreshtoken + + return r +} + +// The access token to be invalidated. +// API name: token +func (r *OidcLogout) Token(token string) *OidcLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Token = token + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidclogout/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidclogout/request.go new file mode 100644 index 000000000..6f694ce8b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidclogout/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package oidclogout + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package oidclogout +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/oidc_logout/Request.ts#L22-L52 +type Request struct { + + // RefreshToken The refresh token to be invalidated. + RefreshToken *string `json:"refresh_token,omitempty"` + // Token The access token to be invalidated. + Token string `json:"token"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Oidclogout request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidclogout/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidclogout/response.go new file mode 100644 index 000000000..32307ca53 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidclogout/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package oidclogout + +// Response holds the response body struct for the package oidclogout +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/oidc_logout/Response.ts#L20-L27 +type Response struct { + + // Redirect A URI that points to the end session endpoint of the OpenID Connect Provider + // with all the parameters of the logout request as HTTP GET parameters. + Redirect string `json:"redirect"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go new file mode 100644 index 000000000..bf4bc4018 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go @@ -0,0 +1,442 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Prepare OpenID connect authentication. +// +// Create an oAuth 2.0 authentication request as a URL string based on the +// configuration of the OpenID Connect authentication realm in Elasticsearch. +// +// The response of this API is a URL pointing to the Authorization Endpoint of +// the configured OpenID Connect Provider, which can be used to redirect the +// browser of the user in order to continue the authentication process. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. +package oidcprepareauthentication + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type OidcPrepareAuthentication struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewOidcPrepareAuthentication type alias for index. +type NewOidcPrepareAuthentication func() *OidcPrepareAuthentication + +// NewOidcPrepareAuthenticationFunc returns a new instance of OidcPrepareAuthentication with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewOidcPrepareAuthenticationFunc(tp elastictransport.Interface) NewOidcPrepareAuthentication { + return func() *OidcPrepareAuthentication { + n := New(tp) + + return n + } +} + +// Prepare OpenID connect authentication. +// +// Create an oAuth 2.0 authentication request as a URL string based on the +// configuration of the OpenID Connect authentication realm in Elasticsearch. +// +// The response of this API is a URL pointing to the Authorization Endpoint of +// the configured OpenID Connect Provider, which can be used to redirect the +// browser of the user in order to continue the authentication process. +// +// Elasticsearch exposes all the necessary OpenID Connect related functionality +// with the OpenID Connect APIs. +// These APIs are used internally by Kibana in order to provide OpenID Connect +// based authentication, but can also be used by other, custom web applications +// or other clients. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication +func New(tp elastictransport.Interface) *OidcPrepareAuthentication { + r := &OidcPrepareAuthentication{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *OidcPrepareAuthentication) Raw(raw io.Reader) *OidcPrepareAuthentication { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *OidcPrepareAuthentication) Request(req *Request) *OidcPrepareAuthentication { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *OidcPrepareAuthentication) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for OidcPrepareAuthentication: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("oidc") + path.WriteString("/") + path.WriteString("prepare") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r OidcPrepareAuthentication) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.oidc_prepare_authentication") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.oidc_prepare_authentication") + if reader := instrument.RecordRequestBody(ctx, "security.oidc_prepare_authentication", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.oidc_prepare_authentication") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the OidcPrepareAuthentication query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a oidcprepareauthentication.Response +func (r OidcPrepareAuthentication) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.oidc_prepare_authentication") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the OidcPrepareAuthentication headers map. +func (r *OidcPrepareAuthentication) Header(key, value string) *OidcPrepareAuthentication { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *OidcPrepareAuthentication) ErrorTrace(errortrace bool) *OidcPrepareAuthentication { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *OidcPrepareAuthentication) FilterPath(filterpaths ...string) *OidcPrepareAuthentication { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *OidcPrepareAuthentication) Human(human bool) *OidcPrepareAuthentication { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *OidcPrepareAuthentication) Pretty(pretty bool) *OidcPrepareAuthentication { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// In the case of a third party initiated single sign on, this is the issuer +// identifier for the OP that the RP is to send the authentication request to. +// It cannot be specified when *realm* is specified. +// One of *realm* or *iss* is required. +// API name: iss +func (r *OidcPrepareAuthentication) Iss(iss string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Iss = &iss + + return r +} + +// In the case of a third party initiated single sign on, it is a string value +// that is included in the authentication request as the *login_hint* parameter. +// This parameter is not valid when *realm* is specified. +// API name: login_hint +func (r *OidcPrepareAuthentication) LoginHint(loginhint string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.LoginHint = &loginhint + + return r +} + +// The value used to associate a client session with an ID token and to mitigate +// replay attacks. +// If the caller of the API does not provide a value, Elasticsearch will +// generate one with sufficient entropy and return it in the response. +// API name: nonce +func (r *OidcPrepareAuthentication) Nonce(nonce string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Nonce = &nonce + + return r +} + +// The name of the OpenID Connect realm in Elasticsearch the configuration of +// which should be used in order to generate the authentication request. +// It cannot be specified when *iss* is specified. +// One of *realm* or *iss* is required. +// API name: realm +func (r *OidcPrepareAuthentication) Realm(realm string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Realm = &realm + + return r +} + +// The value used to maintain state between the authentication request and the +// response, typically used as a Cross-Site Request Forgery mitigation. +// If the caller of the API does not provide a value, Elasticsearch will +// generate one with sufficient entropy and return it in the response. +// API name: state +func (r *OidcPrepareAuthentication) State(state string) *OidcPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.State = &state + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcprepareauthentication/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcprepareauthentication/request.go new file mode 100644 index 000000000..d731f3875 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcprepareauthentication/request.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package oidcprepareauthentication + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package oidcprepareauthentication +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/oidc_prepare_authentication/Request.ts#L22-L71 +type Request struct { + + // Iss In the case of a third party initiated single sign on, this is the issuer + // identifier for the OP that the RP is to send the authentication request to. + // It cannot be specified when *realm* is specified. + // One of *realm* or *iss* is required. + Iss *string `json:"iss,omitempty"` + // LoginHint In the case of a third party initiated single sign on, it is a string value + // that is included in the authentication request as the *login_hint* parameter. + // This parameter is not valid when *realm* is specified. + LoginHint *string `json:"login_hint,omitempty"` + // Nonce The value used to associate a client session with an ID token and to mitigate + // replay attacks. + // If the caller of the API does not provide a value, Elasticsearch will + // generate one with sufficient entropy and return it in the response. + Nonce *string `json:"nonce,omitempty"` + // Realm The name of the OpenID Connect realm in Elasticsearch the configuration of + // which should be used in order to generate the authentication request. + // It cannot be specified when *iss* is specified. + // One of *realm* or *iss* is required. + Realm *string `json:"realm,omitempty"` + // State The value used to maintain state between the authentication request and the + // response, typically used as a Cross-Site Request Forgery mitigation. + // If the caller of the API does not provide a value, Elasticsearch will + // generate one with sufficient entropy and return it in the response. + State *string `json:"state,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Oidcprepareauthentication request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcprepareauthentication/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcprepareauthentication/response.go new file mode 100644 index 000000000..90211c244 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcprepareauthentication/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package oidcprepareauthentication + +// Response holds the response body struct for the package oidcprepareauthentication +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/oidc_prepare_authentication/Response.ts#L20-L30 +type Response struct { + Nonce string `json:"nonce"` + Realm string `json:"realm"` + // Redirect A URI that points to the authorization endpoint of the OpenID Connect + // Provider with all the parameters of the authentication request as HTTP GET + // parameters. + Redirect string `json:"redirect"` + State string `json:"state"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putprivileges/put_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putprivileges/put_privileges.go new file mode 100644 index 000000000..248f319a8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putprivileges/put_privileges.go @@ -0,0 +1,394 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_security` cluster privilege (or a greater privilege such as +// `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +// +// Application names are formed from a prefix, with an optional suffix that +// conform to the following rules: +// +// * The prefix must begin with a lowercase ASCII letter. +// * The prefix must contain only ASCII letters or digits. +// * The prefix must be at least 3 characters long. +// * If the suffix exists, it must begin with either a dash `-` or `_`. +// * The suffix cannot contain any of the following characters: `\`, `/`, `*`, +// `?`, `"`, `<`, `>`, `|`, `,`, `*`. +// * No part of the name can contain whitespace. +// +// Privilege names must begin with a lowercase ASCII letter and must contain +// only ASCII letters and digits along with the characters `_`, `-`, and `.`. +// +// Action names can contain any number of printable ASCII characters and must +// contain at least one of the following characters: `/`, `*`, `:`. +package putprivileges + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutPrivileges struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutPrivileges type alias for index. +type NewPutPrivileges func() *PutPrivileges + +// NewPutPrivilegesFunc returns a new instance of PutPrivileges with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutPrivilegesFunc(tp elastictransport.Interface) NewPutPrivileges { + return func() *PutPrivileges { + n := New(tp) + + return n + } +} + +// Create or update application privileges. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_security` cluster privilege (or a greater privilege such as +// `all`). +// * The "Manage Application Privileges" global privilege for the application +// being referenced in the request. +// +// Application names are formed from a prefix, with an optional suffix that +// conform to the following rules: +// +// * The prefix must begin with a lowercase ASCII letter. +// * The prefix must contain only ASCII letters or digits. +// * The prefix must be at least 3 characters long. +// * If the suffix exists, it must begin with either a dash `-` or `_`. +// * The suffix cannot contain any of the following characters: `\`, `/`, `*`, +// `?`, `"`, `<`, `>`, `|`, `,`, `*`. +// * No part of the name can contain whitespace. +// +// Privilege names must begin with a lowercase ASCII letter and must contain +// only ASCII letters and digits along with the characters `_`, `-`, and `.`. +// +// Action names can contain any number of printable ASCII characters and must +// contain at least one of the following characters: `/`, `*`, `:`. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges +func New(tp elastictransport.Interface) *PutPrivileges { + r := &PutPrivileges{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutPrivileges) Raw(raw io.Reader) *PutPrivileges { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutPrivileges) Request(req *Request) *PutPrivileges { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutPrivileges) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutPrivileges: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("privilege") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutPrivileges) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.put_privileges") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.put_privileges") + if reader := instrument.RecordRequestBody(ctx, "security.put_privileges", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.put_privileges") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutPrivileges query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putprivileges.Response +func (r PutPrivileges) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.put_privileges") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutPrivileges headers map. +func (r *PutPrivileges) Header(key, value string) *PutPrivileges { + r.headers.Set(key, value) + + return r +} + +// Refresh If `true` (the default) then refresh the affected shards to make this +// operation visible to search, if `wait_for` then wait for a refresh to make +// this operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *PutPrivileges) Refresh(refresh refresh.Refresh) *PutPrivileges { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutPrivileges) ErrorTrace(errortrace bool) *PutPrivileges { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutPrivileges) FilterPath(filterpaths ...string) *PutPrivileges { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutPrivileges) Human(human bool) *PutPrivileges { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutPrivileges) Pretty(pretty bool) *PutPrivileges { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putprivileges/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putprivileges/request.go new file mode 100644 index 000000000..ea4bca51f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putprivileges/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putprivileges + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putprivileges +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/put_privileges/SecurityPutPrivilegesRequest.ts#L25-L67 +type Request = map[string]map[string]types.PrivilegesActions + +// NewRequest returns a Request +func NewRequest() *Request { + r := make(map[string]map[string]types.PrivilegesActions, 0) + + return &r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putprivileges/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putprivileges/response.go new file mode 100644 index 000000000..4acf8e0da --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putprivileges/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putprivileges + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package putprivileges +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/put_privileges/SecurityPutPrivilegesResponse.ts#L23-L28 + +type Response map[string]map[string]types.CreatedStatus + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrole/put_role.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrole/put_role.go new file mode 100644 index 000000000..db2bb76e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrole/put_role.go @@ -0,0 +1,577 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update roles. +// +// The role management APIs are generally the preferred way to manage roles in +// the native realm, rather than using file-based role management. +// The create or update roles API cannot update roles that are defined in roles +// files. +// File-based role management is not available in Elastic Serverless. +package putrole + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterprivilege" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutRole struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutRole type alias for index. +type NewPutRole func(name string) *PutRole + +// NewPutRoleFunc returns a new instance of PutRole with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutRoleFunc(tp elastictransport.Interface) NewPutRole { + return func(name string) *PutRole { + n := New(tp) + + n._name(name) + + return n + } +} + +// Create or update roles. +// +// The role management APIs are generally the preferred way to manage roles in +// the native realm, rather than using file-based role management. +// The create or update roles API cannot update roles that are defined in roles +// files. +// File-based role management is not available in Elastic Serverless. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role +func New(tp elastictransport.Interface) *PutRole { + r := &PutRole{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutRole) Raw(raw io.Reader) *PutRole { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutRole) Request(req *Request) *PutRole { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutRole) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutRole: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutRole) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.put_role") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.put_role") + if reader := instrument.RecordRequestBody(ctx, "security.put_role", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.put_role") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutRole query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putrole.Response +func (r PutRole) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.put_role") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutRole headers map. +func (r *PutRole) Header(key, value string) *PutRole { + r.headers.Set(key, value) + + return r +} + +// Name The name of the role that is being created or updated. On Elasticsearch +// Serverless, the role name must begin with a letter or digit and can only +// contain letters, digits and the characters '_', '-', and '.'. Each role must +// have a unique name, as this will serve as the identifier for that role. +// API Name: name +func (r *PutRole) _name(name string) *PutRole { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Refresh If `true` (the default) then refresh the affected shards to make this +// operation visible to search, if `wait_for` then wait for a refresh to make +// this operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *PutRole) Refresh(refresh refresh.Refresh) *PutRole { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutRole) ErrorTrace(errortrace bool) *PutRole { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutRole) FilterPath(filterpaths ...string) *PutRole { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutRole) Human(human bool) *PutRole { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutRole) Pretty(pretty bool) *PutRole { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A list of application privilege entries. +// API name: applications +func (r *PutRole) Applications(applications ...types.ApplicationPrivilegesVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range applications { + + r.req.Applications = append(r.req.Applications, *v.ApplicationPrivilegesCaster()) + + } + return r +} + +// A list of cluster privileges. These privileges define the cluster-level +// actions for users with this role. +// API name: cluster +func (r *PutRole) Cluster(clusters ...clusterprivilege.ClusterPrivilege) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range clusters { + + r.req.Cluster = append(r.req.Cluster, v) + + } + return r +} + +// Optional description of the role descriptor +// API name: description +func (r *PutRole) Description(description string) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// An object defining global privileges. A global privilege is a form of cluster +// privilege that is request-aware. Support for global privileges is currently +// limited to the management of application privileges. +// API name: global +func (r *PutRole) Global(global map[string]json.RawMessage) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Global = global + return r +} + +func (r *PutRole) AddGlobal(key string, value json.RawMessage) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Global == nil { + r.req.Global = make(map[string]json.RawMessage) + } else { + tmp = r.req.Global + } + + tmp[key] = value + + r.req.Global = tmp + return r +} + +// A list of indices permissions entries. +// API name: indices +func (r *PutRole) Indices(indices ...types.IndicesPrivilegesVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range indices { + + r.req.Indices = append(r.req.Indices, *v.IndicesPrivilegesCaster()) + + } + return r +} + +// Optional metadata. Within the metadata object, keys that begin with an +// underscore (`_`) are reserved for system use. +// API name: metadata +func (r *PutRole) Metadata(metadata types.MetadataVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// A list of remote cluster permissions entries. +// API name: remote_cluster +func (r *PutRole) RemoteCluster(remoteclusters ...types.RemoteClusterPrivilegesVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range remoteclusters { + + r.req.RemoteCluster = append(r.req.RemoteCluster, *v.RemoteClusterPrivilegesCaster()) + + } + return r +} + +// A list of remote indices permissions entries. +// +// NOTE: Remote indices are effective for remote clusters configured with the +// API key based model. +// They have no effect for remote clusters configured with the certificate based +// model. +// API name: remote_indices +func (r *PutRole) RemoteIndices(remoteindices ...types.RemoteIndicesPrivilegesVariant) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range remoteindices { + + r.req.RemoteIndices = append(r.req.RemoteIndices, *v.RemoteIndicesPrivilegesCaster()) + + } + return r +} + +// A list of users that the owners of this role can impersonate. *Note*: in +// Serverless, the run-as feature is disabled. For API compatibility, you can +// still specify an empty `run_as` field, but a non-empty list will be rejected. +// API name: run_as +func (r *PutRole) RunAs(runas ...string) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range runas { + + r.req.RunAs = append(r.req.RunAs, v) + + } + return r +} + +// Indicates roles that might be incompatible with the current cluster license, +// specifically roles with document and field level security. When the cluster +// license doesn’t allow certain features for a given role, this parameter is +// updated dynamically to list the incompatible features. If `enabled` is +// `false`, the role is ignored, but is still listed in the response from the +// authenticate API. +// API name: transient_metadata +func (r *PutRole) TransientMetadata(transientmetadata map[string]json.RawMessage) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.TransientMetadata = transientmetadata + return r +} + +func (r *PutRole) AddTransientMetadatum(key string, value json.RawMessage) *PutRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.TransientMetadata == nil { + r.req.TransientMetadata = make(map[string]json.RawMessage) + } else { + tmp = r.req.TransientMetadata + } + + tmp[key] = value + + r.req.TransientMetadata = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrole/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrole/request.go new file mode 100644 index 000000000..975ca1afd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrole/request.go @@ -0,0 +1,180 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putrole + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterprivilege" +) + +// Request holds the request body struct for the package putrole +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/put_role/SecurityPutRoleRequest.ts#L32-L111 +type Request struct { + + // Applications A list of application privilege entries. + Applications []types.ApplicationPrivileges `json:"applications,omitempty"` + // Cluster A list of cluster privileges. These privileges define the cluster-level + // actions for users with this role. + Cluster []clusterprivilege.ClusterPrivilege `json:"cluster,omitempty"` + // Description Optional description of the role descriptor + Description *string `json:"description,omitempty"` + // Global An object defining global privileges. A global privilege is a form of cluster + // privilege that is request-aware. Support for global privileges is currently + // limited to the management of application privileges. + Global map[string]json.RawMessage `json:"global,omitempty"` + // Indices A list of indices permissions entries. + Indices []types.IndicesPrivileges `json:"indices,omitempty"` + // Metadata Optional metadata. Within the metadata object, keys that begin with an + // underscore (`_`) are reserved for system use. + Metadata types.Metadata `json:"metadata,omitempty"` + // RemoteCluster A list of remote cluster permissions entries. + RemoteCluster []types.RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + // RemoteIndices A list of remote indices permissions entries. + // + // NOTE: Remote indices are effective for remote clusters configured with the + // API key based model. + // They have no effect for remote clusters configured with the certificate based + // model. + RemoteIndices []types.RemoteIndicesPrivileges `json:"remote_indices,omitempty"` + // RunAs A list of users that the owners of this role can impersonate. *Note*: in + // Serverless, the run-as feature is disabled. For API compatibility, you can + // still specify an empty `run_as` field, but a non-empty list will be rejected. + RunAs []string `json:"run_as,omitempty"` + // TransientMetadata Indicates roles that might be incompatible with the current cluster license, + // specifically roles with document and field level security. When the cluster + // license doesn’t allow certain features for a given role, this parameter is + // updated dynamically to list the incompatible features. If `enabled` is + // `false`, the role is ignored, but is still listed in the response from the + // authenticate API. + TransientMetadata map[string]json.RawMessage `json:"transient_metadata,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Global: make(map[string]json.RawMessage, 0), + TransientMetadata: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putrole request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "applications": + if err := dec.Decode(&s.Applications); err != nil { + return fmt.Errorf("%s | %w", "Applications", err) + } + + case "cluster": + if err := dec.Decode(&s.Cluster); err != nil { + return fmt.Errorf("%s | %w", "Cluster", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "global": + if s.Global == nil { + s.Global = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Global); err != nil { + return fmt.Errorf("%s | %w", "Global", err) + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "remote_indices": + if err := dec.Decode(&s.RemoteIndices); err != nil { + return fmt.Errorf("%s | %w", "RemoteIndices", err) + } + + case "run_as": + if err := dec.Decode(&s.RunAs); err != nil { + return fmt.Errorf("%s | %w", "RunAs", err) + } + + case "transient_metadata": + if s.TransientMetadata == nil { + s.TransientMetadata = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.TransientMetadata); err != nil { + return fmt.Errorf("%s | %w", "TransientMetadata", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrole/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrole/response.go new file mode 100644 index 000000000..d2e637297 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrole/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putrole + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package putrole +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/put_role/SecurityPutRoleResponse.ts#L22-L29 +type Response struct { + + // Role When an existing role is updated, `created` is set to `false`. + Role types.CreatedStatus `json:"role"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrolemapping/put_role_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrolemapping/put_role_mapping.go new file mode 100644 index 000000000..7941488b4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrolemapping/put_role_mapping.go @@ -0,0 +1,538 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update role mappings. +// +// Role mappings define which roles are assigned to each user. +// Each mapping has rules that identify users and a list of roles that are +// granted to those users. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. The create or update role mappings API +// cannot update role mappings that are defined in role mapping files. +// +// NOTE: This API does not create roles. Rather, it maps users to existing +// roles. +// Roles can be created by using the create or update roles API or roles files. +// +// **Role templates** +// +// The most common use for role mappings is to create a mapping from a known +// value on the user to a fixed role name. +// For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should +// be given the superuser role in Elasticsearch. +// The `roles` field is used for this purpose. +// +// For more complex needs, it is possible to use Mustache templates to +// dynamically determine the names of the roles that should be granted to the +// user. +// The `role_templates` field is used for this purpose. +// +// NOTE: To use role templates successfully, the relevant scripting feature must +// be enabled. +// Otherwise, all attempts to create a role mapping with role templates fail. +// +// All of the user fields that are available in the role mapping rules are also +// available in the role templates. +// Thus it is possible to assign a user to a role that reflects their username, +// their groups, or the name of the realm to which they authenticated. +// +// By default a template is evaluated to produce a single string that is the +// name of the role which should be assigned to the user. +// If the format of the template is set to "json" then the template is expected +// to produce a JSON string or an array of JSON strings for the role names. +package putrolemapping + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutRoleMapping struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + name string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutRoleMapping type alias for index. +type NewPutRoleMapping func(name string) *PutRoleMapping + +// NewPutRoleMappingFunc returns a new instance of PutRoleMapping with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutRoleMappingFunc(tp elastictransport.Interface) NewPutRoleMapping { + return func(name string) *PutRoleMapping { + n := New(tp) + + n._name(name) + + return n + } +} + +// Create or update role mappings. +// +// Role mappings define which roles are assigned to each user. +// Each mapping has rules that identify users and a list of roles that are +// granted to those users. +// The role mapping APIs are generally the preferred way to manage role mappings +// rather than using role mapping files. The create or update role mappings API +// cannot update role mappings that are defined in role mapping files. +// +// NOTE: This API does not create roles. Rather, it maps users to existing +// roles. +// Roles can be created by using the create or update roles API or roles files. +// +// **Role templates** +// +// The most common use for role mappings is to create a mapping from a known +// value on the user to a fixed role name. +// For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should +// be given the superuser role in Elasticsearch. +// The `roles` field is used for this purpose. +// +// For more complex needs, it is possible to use Mustache templates to +// dynamically determine the names of the roles that should be granted to the +// user. +// The `role_templates` field is used for this purpose. +// +// NOTE: To use role templates successfully, the relevant scripting feature must +// be enabled. +// Otherwise, all attempts to create a role mapping with role templates fail. +// +// All of the user fields that are available in the role mapping rules are also +// available in the role templates. +// Thus it is possible to assign a user to a role that reflects their username, +// their groups, or the name of the realm to which they authenticated. +// +// By default a template is evaluated to produce a single string that is the +// name of the role which should be assigned to the user. +// If the format of the template is set to "json" then the template is expected +// to produce a JSON string or an array of JSON strings for the role names. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping +func New(tp elastictransport.Interface) *PutRoleMapping { + r := &PutRoleMapping{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutRoleMapping) Raw(raw io.Reader) *PutRoleMapping { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutRoleMapping) Request(req *Request) *PutRoleMapping { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutRoleMapping) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutRoleMapping: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("role_mapping") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "name", r.name) + } + path.WriteString(r.name) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutRoleMapping) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.put_role_mapping") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.put_role_mapping") + if reader := instrument.RecordRequestBody(ctx, "security.put_role_mapping", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.put_role_mapping") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutRoleMapping query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putrolemapping.Response +func (r PutRoleMapping) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.put_role_mapping") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutRoleMapping headers map. +func (r *PutRoleMapping) Header(key, value string) *PutRoleMapping { + r.headers.Set(key, value) + + return r +} + +// Name The distinct name that identifies the role mapping. +// The name is used solely as an identifier to facilitate interaction via the +// API; it does not affect the behavior of the mapping in any way. +// API Name: name +func (r *PutRoleMapping) _name(name string) *PutRoleMapping { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Refresh If `true` (the default) then refresh the affected shards to make this +// operation visible to search, if `wait_for` then wait for a refresh to make +// this operation visible to search, if `false` then do nothing with refreshes. +// API name: refresh +func (r *PutRoleMapping) Refresh(refresh refresh.Refresh) *PutRoleMapping { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutRoleMapping) ErrorTrace(errortrace bool) *PutRoleMapping { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutRoleMapping) FilterPath(filterpaths ...string) *PutRoleMapping { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutRoleMapping) Human(human bool) *PutRoleMapping { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutRoleMapping) Pretty(pretty bool) *PutRoleMapping { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Mappings that have `enabled` set to `false` are ignored when role mapping is +// performed. +// API name: enabled +func (r *PutRoleMapping) Enabled(enabled bool) *PutRoleMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Enabled = &enabled + + return r +} + +// Additional metadata that helps define which roles are assigned to each user. +// Within the metadata object, keys beginning with `_` are reserved for system +// usage. +// API name: metadata +func (r *PutRoleMapping) Metadata(metadata types.MetadataVariant) *PutRoleMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// A list of Mustache templates that will be evaluated to determine the roles +// names that should granted to the users that match the role mapping rules. +// Exactly one of `roles` or `role_templates` must be specified. +// API name: role_templates +func (r *PutRoleMapping) RoleTemplates(roletemplates ...types.RoleTemplateVariant) *PutRoleMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range roletemplates { + + r.req.RoleTemplates = append(r.req.RoleTemplates, *v.RoleTemplateCaster()) + + } + return r +} + +// A list of role names that are granted to the users that match the role +// mapping rules. +// Exactly one of `roles` or `role_templates` must be specified. +// API name: roles +func (r *PutRoleMapping) Roles(roles ...string) *PutRoleMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range roles { + + r.req.Roles = append(r.req.Roles, v) + + } + return r +} + +// The rules that determine which users should be matched by the mapping. +// A rule is a logical condition that is expressed by using a JSON DSL. +// API name: rules +func (r *PutRoleMapping) Rules(rules types.RoleMappingRuleVariant) *PutRoleMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Rules = rules.RoleMappingRuleCaster() + + return r +} + +// API name: run_as +func (r *PutRoleMapping) RunAs(runas ...string) *PutRoleMapping { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range runas { + + r.req.RunAs = append(r.req.RunAs, v) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrolemapping/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrolemapping/request.go new file mode 100644 index 000000000..b178bb4b8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrolemapping/request.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putrolemapping + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putrolemapping +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/put_role_mapping/SecurityPutRoleMappingRequest.ts#L25-L103 +type Request struct { + + // Enabled Mappings that have `enabled` set to `false` are ignored when role mapping is + // performed. + Enabled *bool `json:"enabled,omitempty"` + // Metadata Additional metadata that helps define which roles are assigned to each user. + // Within the metadata object, keys beginning with `_` are reserved for system + // usage. + Metadata types.Metadata `json:"metadata,omitempty"` + // RoleTemplates A list of Mustache templates that will be evaluated to determine the roles + // names that should granted to the users that match the role mapping rules. + // Exactly one of `roles` or `role_templates` must be specified. + RoleTemplates []types.RoleTemplate `json:"role_templates,omitempty"` + // Roles A list of role names that are granted to the users that match the role + // mapping rules. + // Exactly one of `roles` or `role_templates` must be specified. + Roles []string `json:"roles,omitempty"` + // Rules The rules that determine which users should be matched by the mapping. + // A rule is a logical condition that is expressed by using a JSON DSL. + Rules *types.RoleMappingRule `json:"rules,omitempty"` + RunAs []string `json:"run_as,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putrolemapping request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "role_templates": + if err := dec.Decode(&s.RoleTemplates); err != nil { + return fmt.Errorf("%s | %w", "RoleTemplates", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "rules": + if err := dec.Decode(&s.Rules); err != nil { + return fmt.Errorf("%s | %w", "Rules", err) + } + + case "run_as": + if err := dec.Decode(&s.RunAs); err != nil { + return fmt.Errorf("%s | %w", "RunAs", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrolemapping/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrolemapping/response.go new file mode 100644 index 000000000..c7e808d55 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putrolemapping/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putrolemapping + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package putrolemapping +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/put_role_mapping/SecurityPutRoleMappingResponse.ts#L22-L24 +type Response struct { + Created *bool `json:"created,omitempty"` + RoleMapping types.CreatedStatus `json:"role_mapping"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putuser/put_user.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putuser/put_user.go new file mode 100644 index 000000000..f19d18347 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putuser/put_user.go @@ -0,0 +1,495 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update users. +// +// Add and update users in the native realm. +// A password is required for adding a new user but is optional when updating an +// existing user. +// To change a user's password without updating any other fields, use the change +// password API. +package putuser + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + usernameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutUser struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + username string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutUser type alias for index. +type NewPutUser func(username string) *PutUser + +// NewPutUserFunc returns a new instance of PutUser with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutUserFunc(tp elastictransport.Interface) NewPutUser { + return func(username string) *PutUser { + n := New(tp) + + n._username(username) + + return n + } +} + +// Create or update users. +// +// Add and update users in the native realm. +// A password is required for adding a new user but is optional when updating an +// existing user. +// To change a user's password without updating any other fields, use the change +// password API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user +func New(tp elastictransport.Interface) *PutUser { + r := &PutUser{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutUser) Raw(raw io.Reader) *PutUser { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutUser) Request(req *Request) *PutUser { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutUser) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutUser: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == usernameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("user") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "username", r.username) + } + path.WriteString(r.username) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutUser) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.put_user") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.put_user") + if reader := instrument.RecordRequestBody(ctx, "security.put_user", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.put_user") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutUser query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putuser.Response +func (r PutUser) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.put_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutUser headers map. +func (r *PutUser) Header(key, value string) *PutUser { + r.headers.Set(key, value) + + return r +} + +// Username An identifier for the user. +// +// NOTE: Usernames must be at least 1 and no more than 507 characters. +// They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, +// punctuation, and printable symbols in the Basic Latin (ASCII) block. +// Leading or trailing whitespace is not allowed. +// API Name: username +func (r *PutUser) _username(username string) *PutUser { + r.paramSet |= usernameMask + r.username = username + + return r +} + +// Refresh Valid values are `true`, `false`, and `wait_for`. +// These values have the same meaning as in the index API, but the default value +// for this API is true. +// API name: refresh +func (r *PutUser) Refresh(refresh refresh.Refresh) *PutUser { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutUser) ErrorTrace(errortrace bool) *PutUser { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutUser) FilterPath(filterpaths ...string) *PutUser { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutUser) Human(human bool) *PutUser { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutUser) Pretty(pretty bool) *PutUser { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The email of the user. +// API name: email +func (r *PutUser) Email(email string) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Email = &email + + return r +} + +// Specifies whether the user is enabled. +// API name: enabled +func (r *PutUser) Enabled(enabled bool) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Enabled = &enabled + + return r +} + +// The full name of the user. +// API name: full_name +func (r *PutUser) FullName(fullname string) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FullName = &fullname + + return r +} + +// Arbitrary metadata that you want to associate with the user. +// API name: metadata +func (r *PutUser) Metadata(metadata types.MetadataVariant) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// The user's password. +// Passwords must be at least 6 characters long. +// When adding a user, one of `password` or `password_hash` is required. +// When updating an existing user, the password is optional, so that other +// fields on the user (such as their roles) may be updated without modifying the +// user's password +// API name: password +func (r *PutUser) Password(password string) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Password = &password + + return r +} + +// A hash of the user's password. +// This must be produced using the same hashing algorithm as has been configured +// for password storage. +// For more details, see the explanation of the +// `xpack.security.authc.password_hashing.algorithm` setting in the user cache +// and password hash algorithm documentation. +// Using this parameter allows the client to pre-hash the password for +// performance and/or confidentiality reasons. +// The `password` parameter and the `password_hash` parameter cannot be used in +// the same request. +// API name: password_hash +func (r *PutUser) PasswordHash(passwordhash string) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PasswordHash = &passwordhash + + return r +} + +// A set of roles the user has. +// The roles determine the user's access permissions. +// To create a user without any roles, specify an empty list (`[]`). +// API name: roles +func (r *PutUser) Roles(roles ...string) *PutUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range roles { + + r.req.Roles = append(r.req.Roles, v) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putuser/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putuser/request.go new file mode 100644 index 000000000..ab12c68c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putuser/request.go @@ -0,0 +1,178 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putuser + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putuser +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/put_user/SecurityPutUserRequest.ts#L23-L101 +type Request struct { + + // Email The email of the user. + Email *string `json:"email,omitempty"` + // Enabled Specifies whether the user is enabled. + Enabled *bool `json:"enabled,omitempty"` + // FullName The full name of the user. + FullName *string `json:"full_name,omitempty"` + // Metadata Arbitrary metadata that you want to associate with the user. + Metadata types.Metadata `json:"metadata,omitempty"` + // Password The user's password. + // Passwords must be at least 6 characters long. + // When adding a user, one of `password` or `password_hash` is required. + // When updating an existing user, the password is optional, so that other + // fields on the user (such as their roles) may be updated without modifying the + // user's password + Password *string `json:"password,omitempty"` + // PasswordHash A hash of the user's password. + // This must be produced using the same hashing algorithm as has been configured + // for password storage. + // For more details, see the explanation of the + // `xpack.security.authc.password_hashing.algorithm` setting in the user cache + // and password hash algorithm documentation. + // Using this parameter allows the client to pre-hash the password for + // performance and/or confidentiality reasons. + // The `password` parameter and the `password_hash` parameter cannot be used in + // the same request. + PasswordHash *string `json:"password_hash,omitempty"` + // Roles A set of roles the user has. + // The roles determine the user's access permissions. + // To create a user without any roles, specify an empty list (`[]`). + Roles []string `json:"roles,omitempty"` + Username *string `json:"username,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putuser request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "email": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Email", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Email = &o + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "full_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FullName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FullName = &o + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "password": + if err := dec.Decode(&s.Password); err != nil { + return fmt.Errorf("%s | %w", "Password", err) + } + + case "password_hash": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PasswordHash", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PasswordHash = &o + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putuser/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putuser/response.go new file mode 100644 index 000000000..bc3c2f59a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/putuser/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putuser + +// Response holds the response body struct for the package putuser +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/put_user/SecurityPutUserResponse.ts#L20-L28 +type Response struct { + + // Created A successful call returns a JSON structure that shows whether the user has + // been created or updated. + // When an existing user is updated, `created` is set to `false`. + Created bool `json:"created"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryapikeys/query_api_keys.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryapikeys/query_api_keys.go new file mode 100644 index 000000000..02f34b19c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryapikeys/query_api_keys.go @@ -0,0 +1,531 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Find API keys with a query. +// +// Get a paginated list of API keys and their information. +// You can optionally filter the results with a query. +// +// To use this API, you must have at least the `manage_own_api_key` or the +// `read_security` cluster privileges. +// If you have only the `manage_own_api_key` privilege, this API returns only +// the API keys that you own. +// If you have the `read_security`, `manage_api_key`, or greater privileges +// (including `manage_security`), this API returns all API keys regardless of +// ownership. +// Refer to the linked documentation for examples of how to find API keys: +package queryapikeys + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type QueryApiKeys struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewQueryApiKeys type alias for index. +type NewQueryApiKeys func() *QueryApiKeys + +// NewQueryApiKeysFunc returns a new instance of QueryApiKeys with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewQueryApiKeysFunc(tp elastictransport.Interface) NewQueryApiKeys { + return func() *QueryApiKeys { + n := New(tp) + + return n + } +} + +// Find API keys with a query. +// +// Get a paginated list of API keys and their information. +// You can optionally filter the results with a query. +// +// To use this API, you must have at least the `manage_own_api_key` or the +// `read_security` cluster privileges. +// If you have only the `manage_own_api_key` privilege, this API returns only +// the API keys that you own. +// If you have the `read_security`, `manage_api_key`, or greater privileges +// (including `manage_security`), this API returns all API keys regardless of +// ownership. +// Refer to the linked documentation for examples of how to find API keys: +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys +func New(tp elastictransport.Interface) *QueryApiKeys { + r := &QueryApiKeys{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *QueryApiKeys) Raw(raw io.Reader) *QueryApiKeys { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *QueryApiKeys) Request(req *Request) *QueryApiKeys { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *QueryApiKeys) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for QueryApiKeys: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("api_key") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r QueryApiKeys) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.query_api_keys") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.query_api_keys") + if reader := instrument.RecordRequestBody(ctx, "security.query_api_keys", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.query_api_keys") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the QueryApiKeys query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a queryapikeys.Response +func (r QueryApiKeys) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.query_api_keys") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + r.TypedKeys(true) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the QueryApiKeys headers map. +func (r *QueryApiKeys) Header(key, value string) *QueryApiKeys { + r.headers.Set(key, value) + + return r +} + +// WithLimitedBy Return the snapshot of the owner user's role descriptors associated with the +// API key. +// An API key's actual permission is the intersection of its assigned role +// descriptors and the owner user's role descriptors (effectively limited by +// it). +// An API key cannot retrieve any API key’s limited-by role descriptors +// (including itself) unless it has `manage_api_key` or higher privileges. +// API name: with_limited_by +func (r *QueryApiKeys) WithLimitedBy(withlimitedby bool) *QueryApiKeys { + r.values.Set("with_limited_by", strconv.FormatBool(withlimitedby)) + + return r +} + +// WithProfileUid Determines whether to also retrieve the profile UID for the API key owner +// principal. +// If it exists, the profile UID is returned under the `profile_uid` response +// field for each API key. +// API name: with_profile_uid +func (r *QueryApiKeys) WithProfileUid(withprofileuid bool) *QueryApiKeys { + r.values.Set("with_profile_uid", strconv.FormatBool(withprofileuid)) + + return r +} + +// TypedKeys Determines whether aggregation names are prefixed by their respective types +// in the response. +// API name: typed_keys +func (r *QueryApiKeys) TypedKeys(typedkeys bool) *QueryApiKeys { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *QueryApiKeys) ErrorTrace(errortrace bool) *QueryApiKeys { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *QueryApiKeys) FilterPath(filterpaths ...string) *QueryApiKeys { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *QueryApiKeys) Human(human bool) *QueryApiKeys { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *QueryApiKeys) Pretty(pretty bool) *QueryApiKeys { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Any aggregations to run over the corpus of returned API keys. +// Aggregations and queries work together. Aggregations are computed only on the +// API keys that match the query. +// This supports only a subset of aggregation types, namely: `terms`, `range`, +// `date_range`, `missing`, +// `cardinality`, `value_count`, `composite`, `filter`, and `filters`. +// Additionally, aggregations only run over the same subset of fields that query +// works with. +// API name: aggregations +func (r *QueryApiKeys) Aggregations(aggregations map[string]types.ApiKeyAggregationContainer) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Aggregations = aggregations + return r +} + +func (r *QueryApiKeys) AddAggregation(key string, value types.ApiKeyAggregationContainerVariant) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ApiKeyAggregationContainer + if r.req.Aggregations == nil { + r.req.Aggregations = make(map[string]types.ApiKeyAggregationContainer) + } else { + tmp = r.req.Aggregations + } + + tmp[key] = *value.ApiKeyAggregationContainerCaster() + + r.req.Aggregations = tmp + return r +} + +// The starting document offset. +// It must not be negative. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. +// To page through more hits, use the `search_after` parameter. +// API name: from +func (r *QueryApiKeys) From(from int) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.From = &from + + return r +} + +// A query to filter which API keys to return. +// If the query parameter is missing, it is equivalent to a `match_all` query. +// The query supports a subset of query types, including `match_all`, `bool`, +// `term`, `terms`, `match`, +// `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +// You can query the following public information associated with an API key: +// `id`, `type`, `name`, +// `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, +// and `metadata`. +// +// NOTE: The queryable string values associated with API keys are internally +// mapped as keywords. +// Consequently, if no `analyzer` parameter is specified for a `match` query, +// then the provided match query string is interpreted as a single keyword +// value. +// Such a match query is hence equivalent to a `term` query. +// API name: query +func (r *QueryApiKeys) Query(query types.ApiKeyQueryContainerVariant) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.ApiKeyQueryContainerCaster() + + return r +} + +// The search after definition. +// API name: search_after +func (r *QueryApiKeys) SearchAfter(sortresults ...types.FieldValueVariant) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } + + return r +} + +// The number of hits to return. +// It must not be negative. +// The `size` parameter can be set to `0`, in which case no API key matches are +// returned, only the aggregation results. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. +// To page through more hits, use the `search_after` parameter. +// API name: size +func (r *QueryApiKeys) Size(size int) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Size = &size + + return r +} + +// The sort definition. +// Other than `id`, all public fields of an API key are eligible for sorting. +// In addition, sort can also be applied to the `_doc` field to sort by index +// order. +// API name: sort +func (r *QueryApiKeys) Sort(sorts ...types.SortCombinationsVariant) *QueryApiKeys { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryapikeys/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryapikeys/request.go new file mode 100644 index 000000000..6c0a0ce91 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryapikeys/request.go @@ -0,0 +1,192 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package queryapikeys + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package queryapikeys +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/query_api_keys/QueryApiKeysRequest.ts#L26-L126 +type Request struct { + + // Aggregations Any aggregations to run over the corpus of returned API keys. + // Aggregations and queries work together. Aggregations are computed only on the + // API keys that match the query. + // This supports only a subset of aggregation types, namely: `terms`, `range`, + // `date_range`, `missing`, + // `cardinality`, `value_count`, `composite`, `filter`, and `filters`. + // Additionally, aggregations only run over the same subset of fields that query + // works with. + Aggregations map[string]types.ApiKeyAggregationContainer `json:"aggregations,omitempty"` + // From The starting document offset. + // It must not be negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. + // To page through more hits, use the `search_after` parameter. + From *int `json:"from,omitempty"` + // Query A query to filter which API keys to return. + // If the query parameter is missing, it is equivalent to a `match_all` query. + // The query supports a subset of query types, including `match_all`, `bool`, + // `term`, `terms`, `match`, + // `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + // You can query the following public information associated with an API key: + // `id`, `type`, `name`, + // `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, + // and `metadata`. + // + // NOTE: The queryable string values associated with API keys are internally + // mapped as keywords. + // Consequently, if no `analyzer` parameter is specified for a `match` query, + // then the provided match query string is interpreted as a single keyword + // value. + // Such a match query is hence equivalent to a `term` query. + Query *types.ApiKeyQueryContainer `json:"query,omitempty"` + // SearchAfter The search after definition. + SearchAfter []types.FieldValue `json:"search_after,omitempty"` + // Size The number of hits to return. + // It must not be negative. + // The `size` parameter can be set to `0`, in which case no API key matches are + // returned, only the aggregation results. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. + // To page through more hits, use the `search_after` parameter. + Size *int `json:"size,omitempty"` + // Sort The sort definition. + // Other than `id`, all public fields of an API key are eligible for sorting. + // In addition, sort can also be applied to the `_doc` field to sort by index + // order. + Sort []types.SortCombinations `json:"sort,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Aggregations: make(map[string]types.ApiKeyAggregationContainer, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Queryapikeys request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.ApiKeyAggregationContainer, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "from": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "search_after": + if err := dec.Decode(&s.SearchAfter); err != nil { + return fmt.Errorf("%s | %w", "SearchAfter", err) + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(types.SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryapikeys/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryapikeys/response.go new file mode 100644 index 000000000..42f5cb386 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryapikeys/response.go @@ -0,0 +1,245 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package queryapikeys + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package queryapikeys +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/query_api_keys/QueryApiKeysResponse.ts#L26-L45 +type Response struct { + + // Aggregations The aggregations result, if requested. + Aggregations map[string]types.ApiKeyAggregate `json:"aggregations,omitempty"` + // ApiKeys A list of API key information. + ApiKeys []types.ApiKey `json:"api_keys"` + // Count The number of API keys returned in the response. + Count int `json:"count"` + // Total The total number of API keys found. + Total int `json:"total"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Aggregations: make(map[string]types.ApiKeyAggregate, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.ApiKeyAggregate, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.ApiKeyAggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := types.NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := types.NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := types.NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := types.NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := types.NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := types.NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := types.NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := types.NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := types.NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := types.NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := types.NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := types.NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := types.NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + } + + case "api_keys": + if err := dec.Decode(&s.ApiKeys); err != nil { + return fmt.Errorf("%s | %w", "ApiKeys", err) + } + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int(v) + s.Total = f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryrole/query_role.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryrole/query_role.go new file mode 100644 index 000000000..053de07e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryrole/query_role.go @@ -0,0 +1,441 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Find roles with a query. +// +// Get roles in a paginated manner. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The query roles API does not retrieve roles that are defined in roles files, +// nor built-in ones. +// You can optionally filter the results with a query. +// Also, the results can be paginated and sorted. +package queryrole + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type QueryRole struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewQueryRole type alias for index. +type NewQueryRole func() *QueryRole + +// NewQueryRoleFunc returns a new instance of QueryRole with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewQueryRoleFunc(tp elastictransport.Interface) NewQueryRole { + return func() *QueryRole { + n := New(tp) + + return n + } +} + +// Find roles with a query. +// +// Get roles in a paginated manner. +// The role management APIs are generally the preferred way to manage roles, +// rather than using file-based role management. +// The query roles API does not retrieve roles that are defined in roles files, +// nor built-in ones. +// You can optionally filter the results with a query. +// Also, the results can be paginated and sorted. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role +func New(tp elastictransport.Interface) *QueryRole { + r := &QueryRole{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *QueryRole) Raw(raw io.Reader) *QueryRole { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *QueryRole) Request(req *Request) *QueryRole { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *QueryRole) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for QueryRole: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("role") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r QueryRole) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.query_role") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.query_role") + if reader := instrument.RecordRequestBody(ctx, "security.query_role", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.query_role") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the QueryRole query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a queryrole.Response +func (r QueryRole) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.query_role") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the QueryRole headers map. +func (r *QueryRole) Header(key, value string) *QueryRole { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *QueryRole) ErrorTrace(errortrace bool) *QueryRole { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *QueryRole) FilterPath(filterpaths ...string) *QueryRole { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *QueryRole) Human(human bool) *QueryRole { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *QueryRole) Pretty(pretty bool) *QueryRole { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The starting document offset. +// It must not be negative. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. +// To page through more hits, use the `search_after` parameter. +// API name: from +func (r *QueryRole) From(from int) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.From = &from + + return r +} + +// A query to filter which roles to return. +// If the query parameter is missing, it is equivalent to a `match_all` query. +// The query supports a subset of query types, including `match_all`, `bool`, +// `term`, `terms`, `match`, +// `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +// You can query the following information associated with roles: `name`, +// `description`, `metadata`, +// `applications.application`, `applications.privileges`, and +// `applications.resources`. +// API name: query +func (r *QueryRole) Query(query types.RoleQueryContainerVariant) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.RoleQueryContainerCaster() + + return r +} + +// The search after definition. +// API name: search_after +func (r *QueryRole) SearchAfter(sortresults ...types.FieldValueVariant) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } + + return r +} + +// The number of hits to return. +// It must not be negative. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. +// To page through more hits, use the `search_after` parameter. +// API name: size +func (r *QueryRole) Size(size int) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Size = &size + + return r +} + +// The sort definition. +// You can sort on `username`, `roles`, or `enabled`. +// In addition, sort can also be applied to the `_doc` field to sort by index +// order. +// API name: sort +func (r *QueryRole) Sort(sorts ...types.SortCombinationsVariant) *QueryRole { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryrole/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryrole/request.go new file mode 100644 index 000000000..d679490dd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryrole/request.go @@ -0,0 +1,164 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package queryrole + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package queryrole +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/query_role/QueryRolesRequest.ts#L25-L85 +type Request struct { + + // From The starting document offset. + // It must not be negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. + // To page through more hits, use the `search_after` parameter. + From *int `json:"from,omitempty"` + // Query A query to filter which roles to return. + // If the query parameter is missing, it is equivalent to a `match_all` query. + // The query supports a subset of query types, including `match_all`, `bool`, + // `term`, `terms`, `match`, + // `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + // You can query the following information associated with roles: `name`, + // `description`, `metadata`, + // `applications.application`, `applications.privileges`, and + // `applications.resources`. + Query *types.RoleQueryContainer `json:"query,omitempty"` + // SearchAfter The search after definition. + SearchAfter []types.FieldValue `json:"search_after,omitempty"` + // Size The number of hits to return. + // It must not be negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. + // To page through more hits, use the `search_after` parameter. + Size *int `json:"size,omitempty"` + // Sort The sort definition. + // You can sort on `username`, `roles`, or `enabled`. + // In addition, sort can also be applied to the `_doc` field to sort by index + // order. + Sort []types.SortCombinations `json:"sort,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Queryrole request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "search_after": + if err := dec.Decode(&s.SearchAfter); err != nil { + return fmt.Errorf("%s | %w", "SearchAfter", err) + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(types.SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryrole/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryrole/response.go new file mode 100644 index 000000000..dbbd01fb6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryrole/response.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package queryrole + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package queryrole +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/query_role/QueryRolesResponse.ts#L23-L43 +type Response struct { + + // Count The number of roles returned in the response. + Count int `json:"count"` + // Roles A list of roles that match the query. + // The returned role format is an extension of the role definition format. + // It adds the `transient_metadata.enabled` and the `_sort` fields. + // `transient_metadata.enabled` is set to `false` in case the role is + // automatically disabled, for example when the role grants privileges that are + // not allowed by the installed license. + // `_sort` is present when the search query sorts on some field. + // It contains the array of values that have been used for sorting. + Roles []types.QueryRole `json:"roles"` + // Total The total number of roles found. + Total int `json:"total"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryuser/query_user.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryuser/query_user.go new file mode 100644 index 000000000..2b548a1f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryuser/query_user.go @@ -0,0 +1,446 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Find users with a query. +// +// Get information for users in a paginated manner. +// You can optionally filter the results with a query. +// +// NOTE: As opposed to the get user API, built-in users are excluded from the +// result. +// This API is only for native users. +package queryuser + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type QueryUser struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewQueryUser type alias for index. +type NewQueryUser func() *QueryUser + +// NewQueryUserFunc returns a new instance of QueryUser with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewQueryUserFunc(tp elastictransport.Interface) NewQueryUser { + return func() *QueryUser { + n := New(tp) + + return n + } +} + +// Find users with a query. +// +// Get information for users in a paginated manner. +// You can optionally filter the results with a query. +// +// NOTE: As opposed to the get user API, built-in users are excluded from the +// result. +// This API is only for native users. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user +func New(tp elastictransport.Interface) *QueryUser { + r := &QueryUser{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *QueryUser) Raw(raw io.Reader) *QueryUser { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *QueryUser) Request(req *Request) *QueryUser { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *QueryUser) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for QueryUser: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("user") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r QueryUser) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.query_user") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.query_user") + if reader := instrument.RecordRequestBody(ctx, "security.query_user", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.query_user") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the QueryUser query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a queryuser.Response +func (r QueryUser) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.query_user") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the QueryUser headers map. +func (r *QueryUser) Header(key, value string) *QueryUser { + r.headers.Set(key, value) + + return r +} + +// WithProfileUid Determines whether to retrieve the user profile UID, if it exists, for the +// users. +// API name: with_profile_uid +func (r *QueryUser) WithProfileUid(withprofileuid bool) *QueryUser { + r.values.Set("with_profile_uid", strconv.FormatBool(withprofileuid)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *QueryUser) ErrorTrace(errortrace bool) *QueryUser { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *QueryUser) FilterPath(filterpaths ...string) *QueryUser { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *QueryUser) Human(human bool) *QueryUser { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *QueryUser) Pretty(pretty bool) *QueryUser { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The starting document offset. +// It must not be negative. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. +// To page through more hits, use the `search_after` parameter. +// API name: from +func (r *QueryUser) From(from int) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.From = &from + + return r +} + +// A query to filter which users to return. +// If the query parameter is missing, it is equivalent to a `match_all` query. +// The query supports a subset of query types, including `match_all`, `bool`, +// `term`, `terms`, `match`, +// `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +// You can query the following information associated with user: `username`, +// `roles`, `enabled`, `full_name`, and `email`. +// API name: query +func (r *QueryUser) Query(query types.UserQueryContainerVariant) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.UserQueryContainerCaster() + + return r +} + +// The search after definition +// API name: search_after +func (r *QueryUser) SearchAfter(sortresults ...types.FieldValueVariant) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } + + return r +} + +// The number of hits to return. +// It must not be negative. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. +// To page through more hits, use the `search_after` parameter. +// API name: size +func (r *QueryUser) Size(size int) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Size = &size + + return r +} + +// The sort definition. +// Fields eligible for sorting are: `username`, `roles`, `enabled`. +// In addition, sort can also be applied to the `_doc` field to sort by index +// order. +// API name: sort +func (r *QueryUser) Sort(sorts ...types.SortCombinationsVariant) *QueryUser { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryuser/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryuser/request.go new file mode 100644 index 000000000..5327978f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryuser/request.go @@ -0,0 +1,162 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package queryuser + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package queryuser +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/query_user/SecurityQueryUserRequest.ts#L25-L91 +type Request struct { + + // From The starting document offset. + // It must not be negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. + // To page through more hits, use the `search_after` parameter. + From *int `json:"from,omitempty"` + // Query A query to filter which users to return. + // If the query parameter is missing, it is equivalent to a `match_all` query. + // The query supports a subset of query types, including `match_all`, `bool`, + // `term`, `terms`, `match`, + // `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + // You can query the following information associated with user: `username`, + // `roles`, `enabled`, `full_name`, and `email`. + Query *types.UserQueryContainer `json:"query,omitempty"` + // SearchAfter The search after definition + SearchAfter []types.FieldValue `json:"search_after,omitempty"` + // Size The number of hits to return. + // It must not be negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. + // To page through more hits, use the `search_after` parameter. + Size *int `json:"size,omitempty"` + // Sort The sort definition. + // Fields eligible for sorting are: `username`, `roles`, `enabled`. + // In addition, sort can also be applied to the `_doc` field to sort by index + // order. + Sort []types.SortCombinations `json:"sort,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Queryuser request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "search_after": + if err := dec.Decode(&s.SearchAfter); err != nil { + return fmt.Errorf("%s | %w", "SearchAfter", err) + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(types.SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryuser/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryuser/response.go new file mode 100644 index 000000000..95a7482bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/queryuser/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package queryuser + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package queryuser +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/query_user/SecurityQueryUserResponse.ts#L23-L38 +type Response struct { + + // Count The number of users returned in the response. + Count int `json:"count"` + // Total The total number of users found. + Total int `json:"total"` + // Users A list of users that match the query. + Users []types.QueryUser `json:"users"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlauthenticate/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlauthenticate/request.go new file mode 100644 index 000000000..836768ccd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlauthenticate/request.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package samlauthenticate + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Request holds the request body struct for the package samlauthenticate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/saml_authenticate/Request.ts#L23-L61 +type Request struct { + + // Content The SAML response as it was sent by the user's browser, usually a Base64 + // encoded XML document. + Content string `json:"content"` + // Ids A JSON array with all the valid SAML Request Ids that the caller of the API + // has for the current user. + Ids []string `json:"ids"` + // Realm The name of the realm that should authenticate the SAML response. Useful in + // cases where many SAML realms are defined. + Realm *string `json:"realm,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Samlauthenticate request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "content": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Content", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Content = o + + case "ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + + s.Ids = append(s.Ids, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Ids); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + } + + case "realm": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Realm", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Realm = &o + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlauthenticate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlauthenticate/response.go new file mode 100644 index 000000000..4faba4562 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlauthenticate/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package samlauthenticate + +// Response holds the response body struct for the package samlauthenticate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/saml_authenticate/Response.ts#L22-L45 +type Response struct { + + // AccessToken The access token that was generated by Elasticsearch. + AccessToken string `json:"access_token"` + // ExpiresIn The amount of time (in seconds) left until the token expires. + ExpiresIn int `json:"expires_in"` + // Realm The name of the realm where the user was authenticated. + Realm string `json:"realm"` + // RefreshToken The refresh token that was generated by Elasticsearch. + RefreshToken string `json:"refresh_token"` + // Username The authenticated user's name. + Username string `json:"username"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlauthenticate/saml_authenticate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlauthenticate/saml_authenticate.go new file mode 100644 index 000000000..590af2757 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlauthenticate/saml_authenticate.go @@ -0,0 +1,427 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Authenticate SAML. +// +// Submit a SAML response message to Elasticsearch for consumption. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The SAML message that is submitted can be: +// +// * A response to a SAML authentication request that was previously created +// using the SAML prepare authentication API. +// * An unsolicited SAML message in the case of an IdP-initiated single sign-on +// (SSO) flow. +// +// In either case, the SAML message needs to be a base64 encoded XML document +// with a root element of ``. +// +// After successful validation, Elasticsearch responds with an Elasticsearch +// internal access token and refresh token that can be subsequently used for +// authentication. +// This API endpoint essentially exchanges SAML responses that indicate +// successful authentication in the IdP for Elasticsearch access and refresh +// tokens, which can be used for authentication against Elasticsearch. +package samlauthenticate + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SamlAuthenticate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSamlAuthenticate type alias for index. +type NewSamlAuthenticate func() *SamlAuthenticate + +// NewSamlAuthenticateFunc returns a new instance of SamlAuthenticate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSamlAuthenticateFunc(tp elastictransport.Interface) NewSamlAuthenticate { + return func() *SamlAuthenticate { + n := New(tp) + + return n + } +} + +// Authenticate SAML. +// +// Submit a SAML response message to Elasticsearch for consumption. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The SAML message that is submitted can be: +// +// * A response to a SAML authentication request that was previously created +// using the SAML prepare authentication API. +// * An unsolicited SAML message in the case of an IdP-initiated single sign-on +// (SSO) flow. +// +// In either case, the SAML message needs to be a base64 encoded XML document +// with a root element of ``. +// +// After successful validation, Elasticsearch responds with an Elasticsearch +// internal access token and refresh token that can be subsequently used for +// authentication. +// This API endpoint essentially exchanges SAML responses that indicate +// successful authentication in the IdP for Elasticsearch access and refresh +// tokens, which can be used for authentication against Elasticsearch. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate +func New(tp elastictransport.Interface) *SamlAuthenticate { + r := &SamlAuthenticate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SamlAuthenticate) Raw(raw io.Reader) *SamlAuthenticate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SamlAuthenticate) Request(req *Request) *SamlAuthenticate { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SamlAuthenticate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SamlAuthenticate: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("saml") + path.WriteString("/") + path.WriteString("authenticate") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SamlAuthenticate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.saml_authenticate") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.saml_authenticate") + if reader := instrument.RecordRequestBody(ctx, "security.saml_authenticate", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.saml_authenticate") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SamlAuthenticate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a samlauthenticate.Response +func (r SamlAuthenticate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.saml_authenticate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SamlAuthenticate headers map. +func (r *SamlAuthenticate) Header(key, value string) *SamlAuthenticate { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SamlAuthenticate) ErrorTrace(errortrace bool) *SamlAuthenticate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SamlAuthenticate) FilterPath(filterpaths ...string) *SamlAuthenticate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SamlAuthenticate) Human(human bool) *SamlAuthenticate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SamlAuthenticate) Pretty(pretty bool) *SamlAuthenticate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The SAML response as it was sent by the user's browser, usually a Base64 +// encoded XML document. +// API name: content +func (r *SamlAuthenticate) Content(content string) *SamlAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Content = content + + return r +} + +// A JSON array with all the valid SAML Request Ids that the caller of the API +// has for the current user. +// API name: ids +func (r *SamlAuthenticate) Ids(ids ...string) *SamlAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Ids = ids + + return r +} + +// The name of the realm that should authenticate the SAML response. Useful in +// cases where many SAML realms are defined. +// API name: realm +func (r *SamlAuthenticate) Realm(realm string) *SamlAuthenticate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Realm = &realm + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlcompletelogout/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlcompletelogout/request.go new file mode 100644 index 000000000..a5b5c43ca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlcompletelogout/request.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package samlcompletelogout + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Request holds the request body struct for the package samlcompletelogout +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/saml_complete_logout/Request.ts#L23-L61 +type Request struct { + + // Content If the SAML IdP sends the logout response with the HTTP-Post binding, this + // field must be set to the value of the SAMLResponse form parameter from the + // logout response. + Content *string `json:"content,omitempty"` + // Ids A JSON array with all the valid SAML Request Ids that the caller of the API + // has for the current user. + Ids []string `json:"ids"` + // QueryString If the SAML IdP sends the logout response with the HTTP-Redirect binding, + // this field must be set to the query string of the redirect URI. + QueryString *string `json:"query_string,omitempty"` + // Realm The name of the SAML realm in Elasticsearch for which the configuration is + // used to verify the logout response. + Realm string `json:"realm"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Samlcompletelogout request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "content": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Content", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Content = &o + + case "ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + + s.Ids = append(s.Ids, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Ids); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + } + + case "query_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryString = &o + + case "realm": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Realm", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Realm = o + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlcompletelogout/saml_complete_logout.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlcompletelogout/saml_complete_logout.go new file mode 100644 index 000000000..b5f797d21 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlcompletelogout/saml_complete_logout.go @@ -0,0 +1,373 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Logout of SAML completely. +// +// Verifies the logout response sent from the SAML IdP. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The SAML IdP may send a logout response back to the SP after handling the +// SP-initiated SAML Single Logout. +// This API verifies the response by ensuring the content is relevant and +// validating its signature. +// An empty response is returned if the verification process is successful. +// The response can be sent by the IdP with either the HTTP-Redirect or the +// HTTP-Post binding. +// The caller of this API must prepare the request accordingly so that this API +// can handle either of them. +package samlcompletelogout + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SamlCompleteLogout struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSamlCompleteLogout type alias for index. +type NewSamlCompleteLogout func() *SamlCompleteLogout + +// NewSamlCompleteLogoutFunc returns a new instance of SamlCompleteLogout with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSamlCompleteLogoutFunc(tp elastictransport.Interface) NewSamlCompleteLogout { + return func() *SamlCompleteLogout { + n := New(tp) + + return n + } +} + +// Logout of SAML completely. +// +// Verifies the logout response sent from the SAML IdP. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The SAML IdP may send a logout response back to the SP after handling the +// SP-initiated SAML Single Logout. +// This API verifies the response by ensuring the content is relevant and +// validating its signature. +// An empty response is returned if the verification process is successful. +// The response can be sent by the IdP with either the HTTP-Redirect or the +// HTTP-Post binding. +// The caller of this API must prepare the request accordingly so that this API +// can handle either of them. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout +func New(tp elastictransport.Interface) *SamlCompleteLogout { + r := &SamlCompleteLogout{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SamlCompleteLogout) Raw(raw io.Reader) *SamlCompleteLogout { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SamlCompleteLogout) Request(req *Request) *SamlCompleteLogout { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SamlCompleteLogout) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SamlCompleteLogout: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("saml") + path.WriteString("/") + path.WriteString("complete_logout") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SamlCompleteLogout) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.saml_complete_logout") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.saml_complete_logout") + if reader := instrument.RecordRequestBody(ctx, "security.saml_complete_logout", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.saml_complete_logout") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SamlCompleteLogout query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Header set a key, value pair in the SamlCompleteLogout headers map. +func (r *SamlCompleteLogout) Header(key, value string) *SamlCompleteLogout { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SamlCompleteLogout) ErrorTrace(errortrace bool) *SamlCompleteLogout { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SamlCompleteLogout) FilterPath(filterpaths ...string) *SamlCompleteLogout { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SamlCompleteLogout) Human(human bool) *SamlCompleteLogout { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SamlCompleteLogout) Pretty(pretty bool) *SamlCompleteLogout { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// If the SAML IdP sends the logout response with the HTTP-Post binding, this +// field must be set to the value of the SAMLResponse form parameter from the +// logout response. +// API name: content +func (r *SamlCompleteLogout) Content(content string) *SamlCompleteLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Content = &content + + return r +} + +// A JSON array with all the valid SAML Request Ids that the caller of the API +// has for the current user. +// API name: ids +func (r *SamlCompleteLogout) Ids(ids ...string) *SamlCompleteLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Ids = ids + + return r +} + +// If the SAML IdP sends the logout response with the HTTP-Redirect binding, +// this field must be set to the query string of the redirect URI. +// API name: query_string +func (r *SamlCompleteLogout) QueryString(querystring string) *SamlCompleteLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.QueryString = &querystring + + return r +} + +// The name of the SAML realm in Elasticsearch for which the configuration is +// used to verify the logout response. +// API name: realm +func (r *SamlCompleteLogout) Realm(realm string) *SamlCompleteLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Realm = realm + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlinvalidate/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlinvalidate/request.go new file mode 100644 index 000000000..20959bd67 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlinvalidate/request.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package samlinvalidate + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package samlinvalidate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/saml_invalidate/Request.ts#L22-L61 +type Request struct { + + // Acs The Assertion Consumer Service URL that matches the one of the SAML realm in + // Elasticsearch that should be used. You must specify either this parameter or + // the `realm` parameter. + Acs *string `json:"acs,omitempty"` + // QueryString The query part of the URL that the user was redirected to by the SAML IdP to + // initiate the Single Logout. + // This query should include a single parameter named `SAMLRequest` that + // contains a SAML logout request that is deflated and Base64 encoded. + // If the SAML IdP has signed the logout request, the URL should include two + // extra parameters named `SigAlg` and `Signature` that contain the algorithm + // used for the signature and the signature value itself. + // In order for Elasticsearch to be able to verify the IdP's signature, the + // value of the `query_string` field must be an exact match to the string + // provided by the browser. + // The client application must not attempt to parse or process the string in any + // way. + QueryString string `json:"query_string"` + // Realm The name of the SAML realm in Elasticsearch the configuration. You must + // specify either this parameter or the `acs` parameter. + Realm *string `json:"realm,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Samlinvalidate request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlinvalidate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlinvalidate/response.go new file mode 100644 index 000000000..f02f7ec61 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlinvalidate/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package samlinvalidate + +// Response holds the response body struct for the package samlinvalidate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/saml_invalidate/Response.ts#L22-L37 +type Response struct { + + // Invalidated The number of tokens that were invalidated as part of this logout. + Invalidated int `json:"invalidated"` + // Realm The realm name of the SAML realm in Elasticsearch that authenticated the + // user. + Realm string `json:"realm"` + // Redirect A SAML logout response as a parameter so that the user can be redirected back + // to the SAML IdP. + Redirect string `json:"redirect"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlinvalidate/saml_invalidate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlinvalidate/saml_invalidate.go new file mode 100644 index 000000000..cb4511215 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlinvalidate/saml_invalidate.go @@ -0,0 +1,422 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Invalidate SAML. +// +// Submit a SAML LogoutRequest message to Elasticsearch for consumption. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The logout request comes from the SAML IdP during an IdP initiated Single +// Logout. +// The custom web application can use this API to have Elasticsearch process the +// `LogoutRequest`. +// After successful validation of the request, Elasticsearch invalidates the +// access token and refresh token that corresponds to that specific SAML +// principal and provides a URL that contains a SAML LogoutResponse message. +// Thus the user can be redirected back to their IdP. +package samlinvalidate + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SamlInvalidate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSamlInvalidate type alias for index. +type NewSamlInvalidate func() *SamlInvalidate + +// NewSamlInvalidateFunc returns a new instance of SamlInvalidate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSamlInvalidateFunc(tp elastictransport.Interface) NewSamlInvalidate { + return func() *SamlInvalidate { + n := New(tp) + + return n + } +} + +// Invalidate SAML. +// +// Submit a SAML LogoutRequest message to Elasticsearch for consumption. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// The logout request comes from the SAML IdP during an IdP initiated Single +// Logout. +// The custom web application can use this API to have Elasticsearch process the +// `LogoutRequest`. +// After successful validation of the request, Elasticsearch invalidates the +// access token and refresh token that corresponds to that specific SAML +// principal and provides a URL that contains a SAML LogoutResponse message. +// Thus the user can be redirected back to their IdP. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate +func New(tp elastictransport.Interface) *SamlInvalidate { + r := &SamlInvalidate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SamlInvalidate) Raw(raw io.Reader) *SamlInvalidate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SamlInvalidate) Request(req *Request) *SamlInvalidate { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SamlInvalidate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SamlInvalidate: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("saml") + path.WriteString("/") + path.WriteString("invalidate") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SamlInvalidate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.saml_invalidate") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.saml_invalidate") + if reader := instrument.RecordRequestBody(ctx, "security.saml_invalidate", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.saml_invalidate") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SamlInvalidate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a samlinvalidate.Response +func (r SamlInvalidate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.saml_invalidate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SamlInvalidate headers map. +func (r *SamlInvalidate) Header(key, value string) *SamlInvalidate { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SamlInvalidate) ErrorTrace(errortrace bool) *SamlInvalidate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SamlInvalidate) FilterPath(filterpaths ...string) *SamlInvalidate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SamlInvalidate) Human(human bool) *SamlInvalidate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SamlInvalidate) Pretty(pretty bool) *SamlInvalidate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The Assertion Consumer Service URL that matches the one of the SAML realm in +// Elasticsearch that should be used. You must specify either this parameter or +// the `realm` parameter. +// API name: acs +func (r *SamlInvalidate) Acs(acs string) *SamlInvalidate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Acs = &acs + + return r +} + +// The query part of the URL that the user was redirected to by the SAML IdP to +// initiate the Single Logout. +// This query should include a single parameter named `SAMLRequest` that +// contains a SAML logout request that is deflated and Base64 encoded. +// If the SAML IdP has signed the logout request, the URL should include two +// extra parameters named `SigAlg` and `Signature` that contain the algorithm +// used for the signature and the signature value itself. +// In order for Elasticsearch to be able to verify the IdP's signature, the +// value of the `query_string` field must be an exact match to the string +// provided by the browser. +// The client application must not attempt to parse or process the string in any +// way. +// API name: query_string +func (r *SamlInvalidate) QueryString(querystring string) *SamlInvalidate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.QueryString = querystring + + return r +} + +// The name of the SAML realm in Elasticsearch the configuration. You must +// specify either this parameter or the `acs` parameter. +// API name: realm +func (r *SamlInvalidate) Realm(realm string) *SamlInvalidate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Realm = &realm + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samllogout/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samllogout/request.go new file mode 100644 index 000000000..3cb8fda94 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samllogout/request.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package samllogout + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package samllogout +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/saml_logout/Request.ts#L22-L57 +type Request struct { + + // RefreshToken The refresh token that was returned as a response to calling the SAML + // authenticate API. + // Alternatively, the most recent refresh token that was received after + // refreshing the original access token. + RefreshToken *string `json:"refresh_token,omitempty"` + // Token The access token that was returned as a response to calling the SAML + // authenticate API. + // Alternatively, the most recent token that was received after refreshing the + // original one by using a `refresh_token`. + Token string `json:"token"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Samllogout request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samllogout/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samllogout/response.go new file mode 100644 index 000000000..e8e70faaf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samllogout/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package samllogout + +// Response holds the response body struct for the package samllogout +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/saml_logout/Response.ts#L20-L28 +type Response struct { + + // Redirect A URL that contains a SAML logout request as a parameter. + // You can use this URL to be redirected back to the SAML IdP and to initiate + // Single Logout. + Redirect string `json:"redirect"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samllogout/saml_logout.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samllogout/saml_logout.go new file mode 100644 index 000000000..3ef393b8f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samllogout/saml_logout.go @@ -0,0 +1,397 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Logout of SAML. +// +// Submits a request to invalidate an access token and refresh token. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// This API invalidates the tokens that were generated for a user by the SAML +// authenticate API. +// If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP +// supports this, the Elasticsearch response contains a URL to redirect the user +// to the IdP that contains a SAML logout request (starting an SP-initiated SAML +// Single Logout). +package samllogout + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SamlLogout struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSamlLogout type alias for index. +type NewSamlLogout func() *SamlLogout + +// NewSamlLogoutFunc returns a new instance of SamlLogout with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSamlLogoutFunc(tp elastictransport.Interface) NewSamlLogout { + return func() *SamlLogout { + n := New(tp) + + return n + } +} + +// Logout of SAML. +// +// Submits a request to invalidate an access token and refresh token. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// This API invalidates the tokens that were generated for a user by the SAML +// authenticate API. +// If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP +// supports this, the Elasticsearch response contains a URL to redirect the user +// to the IdP that contains a SAML logout request (starting an SP-initiated SAML +// Single Logout). +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout +func New(tp elastictransport.Interface) *SamlLogout { + r := &SamlLogout{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SamlLogout) Raw(raw io.Reader) *SamlLogout { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SamlLogout) Request(req *Request) *SamlLogout { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SamlLogout) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SamlLogout: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("saml") + path.WriteString("/") + path.WriteString("logout") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SamlLogout) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.saml_logout") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.saml_logout") + if reader := instrument.RecordRequestBody(ctx, "security.saml_logout", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.saml_logout") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SamlLogout query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a samllogout.Response +func (r SamlLogout) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.saml_logout") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SamlLogout headers map. +func (r *SamlLogout) Header(key, value string) *SamlLogout { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SamlLogout) ErrorTrace(errortrace bool) *SamlLogout { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SamlLogout) FilterPath(filterpaths ...string) *SamlLogout { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SamlLogout) Human(human bool) *SamlLogout { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SamlLogout) Pretty(pretty bool) *SamlLogout { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The refresh token that was returned as a response to calling the SAML +// authenticate API. +// Alternatively, the most recent refresh token that was received after +// refreshing the original access token. +// API name: refresh_token +func (r *SamlLogout) RefreshToken(refreshtoken string) *SamlLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RefreshToken = &refreshtoken + + return r +} + +// The access token that was returned as a response to calling the SAML +// authenticate API. +// Alternatively, the most recent token that was received after refreshing the +// original one by using a `refresh_token`. +// API name: token +func (r *SamlLogout) Token(token string) *SamlLogout { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Token = token + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlprepareauthentication/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlprepareauthentication/request.go new file mode 100644 index 000000000..feb53fea1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlprepareauthentication/request.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package samlprepareauthentication + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package samlprepareauthentication +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/saml_prepare_authentication/Request.ts#L22-L67 +type Request struct { + + // Acs The Assertion Consumer Service URL that matches the one of the SAML realms in + // Elasticsearch. + // The realm is used to generate the authentication request. You must specify + // either this parameter or the `realm` parameter. + Acs *string `json:"acs,omitempty"` + // Realm The name of the SAML realm in Elasticsearch for which the configuration is + // used to generate the authentication request. + // You must specify either this parameter or the `acs` parameter. + Realm *string `json:"realm,omitempty"` + // RelayState A string that will be included in the redirect URL that this API returns as + // the `RelayState` query parameter. + // If the Authentication Request is signed, this value is used as part of the + // signature computation. + RelayState *string `json:"relay_state,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Samlprepareauthentication request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlprepareauthentication/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlprepareauthentication/response.go new file mode 100644 index 000000000..8520c48ee --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlprepareauthentication/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package samlprepareauthentication + +// Response holds the response body struct for the package samlprepareauthentication +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/saml_prepare_authentication/Response.ts#L22-L37 +type Response struct { + + // Id A unique identifier for the SAML Request to be stored by the caller of the + // API. + Id string `json:"id"` + // Realm The name of the Elasticsearch realm that was used to construct the + // authentication request. + Realm string `json:"realm"` + // Redirect The URL to redirect the user to. + Redirect string `json:"redirect"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go new file mode 100644 index 000000000..72edb4aed --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go @@ -0,0 +1,428 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Prepare SAML authentication. +// +// Create a SAML authentication request (``) as a URL string based +// on the configuration of the respective SAML realm in Elasticsearch. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// This API returns a URL pointing to the SAML Identity Provider. +// You can use the URL to redirect the browser of the user in order to continue +// the authentication process. +// The URL includes a single parameter named `SAMLRequest`, which contains a +// SAML Authentication request that is deflated and Base64 encoded. +// If the configuration dictates that SAML authentication requests should be +// signed, the URL has two extra parameters named `SigAlg` and `Signature`. +// These parameters contain the algorithm used for the signature and the +// signature value itself. +// It also returns a random string that uniquely identifies this SAML +// Authentication request. +// The caller of this API needs to store this identifier as it needs to be used +// in a following step of the authentication process. +package samlprepareauthentication + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SamlPrepareAuthentication struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSamlPrepareAuthentication type alias for index. +type NewSamlPrepareAuthentication func() *SamlPrepareAuthentication + +// NewSamlPrepareAuthenticationFunc returns a new instance of SamlPrepareAuthentication with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSamlPrepareAuthenticationFunc(tp elastictransport.Interface) NewSamlPrepareAuthentication { + return func() *SamlPrepareAuthentication { + n := New(tp) + + return n + } +} + +// Prepare SAML authentication. +// +// Create a SAML authentication request (``) as a URL string based +// on the configuration of the respective SAML realm in Elasticsearch. +// +// NOTE: This API is intended for use by custom web applications other than +// Kibana. +// If you are using Kibana, refer to the documentation for configuring SAML +// single-sign-on on the Elastic Stack. +// +// This API returns a URL pointing to the SAML Identity Provider. +// You can use the URL to redirect the browser of the user in order to continue +// the authentication process. +// The URL includes a single parameter named `SAMLRequest`, which contains a +// SAML Authentication request that is deflated and Base64 encoded. +// If the configuration dictates that SAML authentication requests should be +// signed, the URL has two extra parameters named `SigAlg` and `Signature`. +// These parameters contain the algorithm used for the signature and the +// signature value itself. +// It also returns a random string that uniquely identifies this SAML +// Authentication request. +// The caller of this API needs to store this identifier as it needs to be used +// in a following step of the authentication process. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication +func New(tp elastictransport.Interface) *SamlPrepareAuthentication { + r := &SamlPrepareAuthentication{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SamlPrepareAuthentication) Raw(raw io.Reader) *SamlPrepareAuthentication { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SamlPrepareAuthentication) Request(req *Request) *SamlPrepareAuthentication { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SamlPrepareAuthentication) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SamlPrepareAuthentication: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("saml") + path.WriteString("/") + path.WriteString("prepare") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SamlPrepareAuthentication) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.saml_prepare_authentication") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.saml_prepare_authentication") + if reader := instrument.RecordRequestBody(ctx, "security.saml_prepare_authentication", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.saml_prepare_authentication") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SamlPrepareAuthentication query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a samlprepareauthentication.Response +func (r SamlPrepareAuthentication) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.saml_prepare_authentication") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SamlPrepareAuthentication headers map. +func (r *SamlPrepareAuthentication) Header(key, value string) *SamlPrepareAuthentication { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SamlPrepareAuthentication) ErrorTrace(errortrace bool) *SamlPrepareAuthentication { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SamlPrepareAuthentication) FilterPath(filterpaths ...string) *SamlPrepareAuthentication { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SamlPrepareAuthentication) Human(human bool) *SamlPrepareAuthentication { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SamlPrepareAuthentication) Pretty(pretty bool) *SamlPrepareAuthentication { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The Assertion Consumer Service URL that matches the one of the SAML realms in +// Elasticsearch. +// The realm is used to generate the authentication request. You must specify +// either this parameter or the `realm` parameter. +// API name: acs +func (r *SamlPrepareAuthentication) Acs(acs string) *SamlPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Acs = &acs + + return r +} + +// The name of the SAML realm in Elasticsearch for which the configuration is +// used to generate the authentication request. +// You must specify either this parameter or the `acs` parameter. +// API name: realm +func (r *SamlPrepareAuthentication) Realm(realm string) *SamlPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Realm = &realm + + return r +} + +// A string that will be included in the redirect URL that this API returns as +// the `RelayState` query parameter. +// If the Authentication Request is signed, this value is used as part of the +// signature computation. +// API name: relay_state +func (r *SamlPrepareAuthentication) RelayState(relaystate string) *SamlPrepareAuthentication { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RelayState = &relaystate + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlserviceprovidermetadata/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlserviceprovidermetadata/response.go new file mode 100644 index 000000000..77494c4d0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlserviceprovidermetadata/response.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package samlserviceprovidermetadata + +// Response holds the response body struct for the package samlserviceprovidermetadata +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/saml_service_provider_metadata/Response.ts#L20-L27 +type Response struct { + + // Metadata An XML string that contains a SAML Service Provider's metadata for the realm. + Metadata string `json:"metadata"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go new file mode 100644 index 000000000..dded854ef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go @@ -0,0 +1,366 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create SAML service provider metadata. +// +// Generate SAML metadata for a SAML 2.0 Service Provider. +// +// The SAML 2.0 specification provides a mechanism for Service Providers to +// describe their capabilities and configuration using a metadata file. +// This API generates Service Provider metadata based on the configuration of a +// SAML realm in Elasticsearch. +package samlserviceprovidermetadata + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + realmnameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SamlServiceProviderMetadata struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + realmname string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSamlServiceProviderMetadata type alias for index. +type NewSamlServiceProviderMetadata func(realmname string) *SamlServiceProviderMetadata + +// NewSamlServiceProviderMetadataFunc returns a new instance of SamlServiceProviderMetadata with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSamlServiceProviderMetadataFunc(tp elastictransport.Interface) NewSamlServiceProviderMetadata { + return func(realmname string) *SamlServiceProviderMetadata { + n := New(tp) + + n._realmname(realmname) + + return n + } +} + +// Create SAML service provider metadata. +// +// Generate SAML metadata for a SAML 2.0 Service Provider. +// +// The SAML 2.0 specification provides a mechanism for Service Providers to +// describe their capabilities and configuration using a metadata file. +// This API generates Service Provider metadata based on the configuration of a +// SAML realm in Elasticsearch. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata +func New(tp elastictransport.Interface) *SamlServiceProviderMetadata { + r := &SamlServiceProviderMetadata{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SamlServiceProviderMetadata) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == realmnameMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("saml") + path.WriteString("/") + path.WriteString("metadata") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "realmname", r.realmname) + } + path.WriteString(r.realmname) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SamlServiceProviderMetadata) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.saml_service_provider_metadata") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.saml_service_provider_metadata") + if reader := instrument.RecordRequestBody(ctx, "security.saml_service_provider_metadata", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.saml_service_provider_metadata") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SamlServiceProviderMetadata query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a samlserviceprovidermetadata.Response +func (r SamlServiceProviderMetadata) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.saml_service_provider_metadata") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r SamlServiceProviderMetadata) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.saml_service_provider_metadata") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the SamlServiceProviderMetadata query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the SamlServiceProviderMetadata headers map. +func (r *SamlServiceProviderMetadata) Header(key, value string) *SamlServiceProviderMetadata { + r.headers.Set(key, value) + + return r +} + +// RealmName The name of the SAML realm in Elasticsearch. +// API Name: realmname +func (r *SamlServiceProviderMetadata) _realmname(realmname string) *SamlServiceProviderMetadata { + r.paramSet |= realmnameMask + r.realmname = realmname + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SamlServiceProviderMetadata) ErrorTrace(errortrace bool) *SamlServiceProviderMetadata { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SamlServiceProviderMetadata) FilterPath(filterpaths ...string) *SamlServiceProviderMetadata { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SamlServiceProviderMetadata) Human(human bool) *SamlServiceProviderMetadata { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SamlServiceProviderMetadata) Pretty(pretty bool) *SamlServiceProviderMetadata { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/suggestuserprofiles/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/suggestuserprofiles/request.go new file mode 100644 index 000000000..b47a8d2df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/suggestuserprofiles/request.go @@ -0,0 +1,144 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package suggestuserprofiles + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package suggestuserprofiles +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/suggest_user_profiles/Request.ts#L24-L81 +type Request struct { + + // Data A comma-separated list of filters for the `data` field of the profile + // document. + // To return all content use `data=*`. + // To return a subset of content, use `data=` to retrieve content nested + // under the specified ``. + // By default, the API returns no `data` content. + // It is an error to specify `data` as both the query parameter and the request + // body field. + Data []string `json:"data,omitempty"` + // Hint Extra search criteria to improve relevance of the suggestion result. + // Profiles matching the spcified hint are ranked higher in the response. + // Profiles not matching the hint aren't excluded from the response as long as + // the profile matches the `name` field query. + Hint *types.Hint `json:"hint,omitempty"` + // Name A query string used to match name-related fields in user profile documents. + // Name-related fields are the user's `username`, `full_name`, and `email`. + Name *string `json:"name,omitempty"` + // Size The number of profiles to return. + Size *int64 `json:"size,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Suggestuserprofiles request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Data", err) + } + + s.Data = append(s.Data, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Data); err != nil { + return fmt.Errorf("%s | %w", "Data", err) + } + } + + case "hint": + if err := dec.Decode(&s.Hint); err != nil { + return fmt.Errorf("%s | %w", "Hint", err) + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "size": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int64(v) + s.Size = &f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/suggestuserprofiles/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/suggestuserprofiles/response.go new file mode 100644 index 000000000..ab046ec6c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/suggestuserprofiles/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package suggestuserprofiles + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package suggestuserprofiles +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/suggest_user_profiles/Response.ts#L29-L44 +type Response struct { + + // Profiles A list of profile documents, ordered by relevance, that match the search + // criteria. + Profiles []types.UserProfile `json:"profiles"` + // Took The number of milliseconds it took Elasticsearch to run the request. + Took int64 `json:"took"` + // Total Metadata about the number of matching profiles. + Total types.TotalUserProfiles `json:"total"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/suggestuserprofiles/suggest_user_profiles.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/suggestuserprofiles/suggest_user_profiles.go new file mode 100644 index 000000000..a6d90843f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/suggestuserprofiles/suggest_user_profiles.go @@ -0,0 +1,416 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Suggest a user profile. +// +// Get suggestions for user profiles that match specified search criteria. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +package suggestuserprofiles + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type SuggestUserProfiles struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewSuggestUserProfiles type alias for index. +type NewSuggestUserProfiles func() *SuggestUserProfiles + +// NewSuggestUserProfilesFunc returns a new instance of SuggestUserProfiles with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSuggestUserProfilesFunc(tp elastictransport.Interface) NewSuggestUserProfiles { + return func() *SuggestUserProfiles { + n := New(tp) + + return n + } +} + +// Suggest a user profile. +// +// Get suggestions for user profiles that match specified search criteria. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles +func New(tp elastictransport.Interface) *SuggestUserProfiles { + r := &SuggestUserProfiles{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *SuggestUserProfiles) Raw(raw io.Reader) *SuggestUserProfiles { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *SuggestUserProfiles) Request(req *Request) *SuggestUserProfiles { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *SuggestUserProfiles) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for SuggestUserProfiles: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("profile") + path.WriteString("/") + path.WriteString("_suggest") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r SuggestUserProfiles) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.suggest_user_profiles") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.suggest_user_profiles") + if reader := instrument.RecordRequestBody(ctx, "security.suggest_user_profiles", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.suggest_user_profiles") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the SuggestUserProfiles query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a suggestuserprofiles.Response +func (r SuggestUserProfiles) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.suggest_user_profiles") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the SuggestUserProfiles headers map. +func (r *SuggestUserProfiles) Header(key, value string) *SuggestUserProfiles { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *SuggestUserProfiles) ErrorTrace(errortrace bool) *SuggestUserProfiles { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *SuggestUserProfiles) FilterPath(filterpaths ...string) *SuggestUserProfiles { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *SuggestUserProfiles) Human(human bool) *SuggestUserProfiles { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *SuggestUserProfiles) Pretty(pretty bool) *SuggestUserProfiles { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A comma-separated list of filters for the `data` field of the profile +// document. +// To return all content use `data=*`. +// To return a subset of content, use `data=` to retrieve content nested +// under the specified ``. +// By default, the API returns no `data` content. +// It is an error to specify `data` as both the query parameter and the request +// body field. +// API name: data +func (r *SuggestUserProfiles) Data(data ...string) *SuggestUserProfiles { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Data = make([]string, len(data)) + r.req.Data = data + + return r +} + +// Extra search criteria to improve relevance of the suggestion result. +// Profiles matching the spcified hint are ranked higher in the response. +// Profiles not matching the hint aren't excluded from the response as long as +// the profile matches the `name` field query. +// API name: hint +func (r *SuggestUserProfiles) Hint(hint types.HintVariant) *SuggestUserProfiles { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Hint = hint.HintCaster() + + return r +} + +// A query string used to match name-related fields in user profile documents. +// Name-related fields are the user's `username`, `full_name`, and `email`. +// API name: name +func (r *SuggestUserProfiles) Name(name string) *SuggestUserProfiles { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Name = &name + + return r +} + +// The number of profiles to return. +// API name: size +func (r *SuggestUserProfiles) Size(size int64) *SuggestUserProfiles { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Size = &size + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateapikey/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateapikey/request.go new file mode 100644 index 000000000..3a70c2742 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateapikey/request.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateapikey + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package updateapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/update_api_key/Request.ts#L26-L91 +type Request struct { + + // Expiration The expiration time for the API key. + // By default, API keys never expire. + // This property can be omitted to leave the expiration unchanged. + Expiration types.Duration `json:"expiration,omitempty"` + // Metadata Arbitrary metadata that you want to associate with the API key. + // It supports a nested data structure. + // Within the metadata object, keys beginning with `_` are reserved for system + // usage. + // When specified, this value fully replaces the metadata previously associated + // with the API key. + Metadata types.Metadata `json:"metadata,omitempty"` + // RoleDescriptors The role descriptors to assign to this API key. + // The API key's effective permissions are an intersection of its assigned + // privileges and the point in time snapshot of permissions of the owner user. + // You can assign new privileges by specifying them in this parameter. + // To remove assigned privileges, you can supply an empty `role_descriptors` + // parameter, that is to say, an empty object `{}`. + // If an API key has no assigned privileges, it inherits the owner user's full + // permissions. + // The snapshot of the owner's permissions is always updated, whether you supply + // the `role_descriptors` parameter or not. + // The structure of a role descriptor is the same as the request for the create + // API keys API. + RoleDescriptors map[string]types.RoleDescriptor `json:"role_descriptors,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + RoleDescriptors: make(map[string]types.RoleDescriptor, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updateapikey request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expiration": + if err := dec.Decode(&s.Expiration); err != nil { + return fmt.Errorf("%s | %w", "Expiration", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "role_descriptors": + if s.RoleDescriptors == nil { + s.RoleDescriptors = make(map[string]types.RoleDescriptor, 0) + } + if err := dec.Decode(&s.RoleDescriptors); err != nil { + return fmt.Errorf("%s | %w", "RoleDescriptors", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateapikey/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateapikey/response.go new file mode 100644 index 000000000..8f11649df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateapikey/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateapikey + +// Response holds the response body struct for the package updateapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/update_api_key/Response.ts#L20-L28 +type Response struct { + + // Updated If `true`, the API key was updated. + // If `false`, the API key didn't change because no change was detected. + Updated bool `json:"updated"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateapikey/update_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateapikey/update_api_key.go new file mode 100644 index 000000000..1428d0420 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateapikey/update_api_key.go @@ -0,0 +1,496 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update an API key. +// +// Update attributes of an existing API key. +// This API supports updates to an API key's access scope, expiration, and +// metadata. +// +// To use this API, you must have at least the `manage_own_api_key` cluster +// privilege. +// Users can only update API keys that they created or that were granted to +// them. +// To update another user’s API key, use the `run_as` feature to submit a +// request on behalf of another user. +// +// IMPORTANT: It's not possible to use an API key as the authentication +// credential for this API. The owner user’s credentials are required. +// +// Use this API to update API keys created by the create API key or grant API +// Key APIs. +// If you need to apply the same update to many API keys, you can use the bulk +// update API keys API to reduce overhead. +// It's not possible to update expired API keys or API keys that have been +// invalidated by the invalidate API key API. +// +// The access scope of an API key is derived from the `role_descriptors` you +// specify in the request and a snapshot of the owner user's permissions at the +// time of the request. +// The snapshot of the owner's permissions is updated automatically on every +// call. +// +// IMPORTANT: If you don't specify `role_descriptors` in the request, a call to +// this API might still change the API key's access scope. +// This change can occur if the owner user's permissions have changed since the +// API key was created or last modified. +package updateapikey + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateApiKey struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateApiKey type alias for index. +type NewUpdateApiKey func(id string) *UpdateApiKey + +// NewUpdateApiKeyFunc returns a new instance of UpdateApiKey with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateApiKeyFunc(tp elastictransport.Interface) NewUpdateApiKey { + return func(id string) *UpdateApiKey { + n := New(tp) + + n._id(id) + + return n + } +} + +// Update an API key. +// +// Update attributes of an existing API key. +// This API supports updates to an API key's access scope, expiration, and +// metadata. +// +// To use this API, you must have at least the `manage_own_api_key` cluster +// privilege. +// Users can only update API keys that they created or that were granted to +// them. +// To update another user’s API key, use the `run_as` feature to submit a +// request on behalf of another user. +// +// IMPORTANT: It's not possible to use an API key as the authentication +// credential for this API. The owner user’s credentials are required. +// +// Use this API to update API keys created by the create API key or grant API +// Key APIs. +// If you need to apply the same update to many API keys, you can use the bulk +// update API keys API to reduce overhead. +// It's not possible to update expired API keys or API keys that have been +// invalidated by the invalidate API key API. +// +// The access scope of an API key is derived from the `role_descriptors` you +// specify in the request and a snapshot of the owner user's permissions at the +// time of the request. +// The snapshot of the owner's permissions is updated automatically on every +// call. +// +// IMPORTANT: If you don't specify `role_descriptors` in the request, a call to +// this API might still change the API key's access scope. +// This change can occur if the owner user's permissions have changed since the +// API key was created or last modified. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key +func New(tp elastictransport.Interface) *UpdateApiKey { + r := &UpdateApiKey{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateApiKey) Raw(raw io.Reader) *UpdateApiKey { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateApiKey) Request(req *Request) *UpdateApiKey { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateApiKey: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("api_key") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateApiKey) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.update_api_key") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.update_api_key") + if reader := instrument.RecordRequestBody(ctx, "security.update_api_key", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.update_api_key") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateApiKey query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updateapikey.Response +func (r UpdateApiKey) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.update_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateApiKey headers map. +func (r *UpdateApiKey) Header(key, value string) *UpdateApiKey { + r.headers.Set(key, value) + + return r +} + +// Id The ID of the API key to update. +// API Name: id +func (r *UpdateApiKey) _id(id string) *UpdateApiKey { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateApiKey) ErrorTrace(errortrace bool) *UpdateApiKey { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateApiKey) FilterPath(filterpaths ...string) *UpdateApiKey { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateApiKey) Human(human bool) *UpdateApiKey { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateApiKey) Pretty(pretty bool) *UpdateApiKey { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The expiration time for the API key. +// By default, API keys never expire. +// This property can be omitted to leave the expiration unchanged. +// API name: expiration +func (r *UpdateApiKey) Expiration(duration types.DurationVariant) *UpdateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() + + return r +} + +// Arbitrary metadata that you want to associate with the API key. +// It supports a nested data structure. +// Within the metadata object, keys beginning with `_` are reserved for system +// usage. +// When specified, this value fully replaces the metadata previously associated +// with the API key. +// API name: metadata +func (r *UpdateApiKey) Metadata(metadata types.MetadataVariant) *UpdateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// The role descriptors to assign to this API key. +// The API key's effective permissions are an intersection of its assigned +// privileges and the point in time snapshot of permissions of the owner user. +// You can assign new privileges by specifying them in this parameter. +// To remove assigned privileges, you can supply an empty `role_descriptors` +// parameter, that is to say, an empty object `{}`. +// If an API key has no assigned privileges, it inherits the owner user's full +// permissions. +// The snapshot of the owner's permissions is always updated, whether you supply +// the `role_descriptors` parameter or not. +// The structure of a role descriptor is the same as the request for the create +// API keys API. +// API name: role_descriptors +func (r *UpdateApiKey) RoleDescriptors(roledescriptors map[string]types.RoleDescriptor) *UpdateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.RoleDescriptors = roledescriptors + return r +} + +func (r *UpdateApiKey) AddRoleDescriptor(key string, value types.RoleDescriptorVariant) *UpdateApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.RoleDescriptor + if r.req.RoleDescriptors == nil { + r.req.RoleDescriptors = make(map[string]types.RoleDescriptor) + } else { + tmp = r.req.RoleDescriptors + } + + tmp[key] = *value.RoleDescriptorCaster() + + r.req.RoleDescriptors = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatecrossclusterapikey/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatecrossclusterapikey/request.go new file mode 100644 index 000000000..aaa43ff12 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatecrossclusterapikey/request.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatecrossclusterapikey + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package updatecrossclusterapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/update_cross_cluster_api_key/UpdateCrossClusterApiKeyRequest.ts#L25-L85 +type Request struct { + + // Access The access to be granted to this API key. + // The access is composed of permissions for cross cluster search and cross + // cluster replication. + // At least one of them must be specified. + // When specified, the new access assignment fully replaces the previously + // assigned access. + Access types.Access `json:"access"` + // Expiration The expiration time for the API key. + // By default, API keys never expire. This property can be omitted to leave the + // value unchanged. + Expiration types.Duration `json:"expiration,omitempty"` + // Metadata Arbitrary metadata that you want to associate with the API key. + // It supports nested data structure. + // Within the metadata object, keys beginning with `_` are reserved for system + // usage. + // When specified, this information fully replaces metadata previously + // associated with the API key. + Metadata types.Metadata `json:"metadata,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatecrossclusterapikey request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "access": + if err := dec.Decode(&s.Access); err != nil { + return fmt.Errorf("%s | %w", "Access", err) + } + + case "expiration": + if err := dec.Decode(&s.Expiration); err != nil { + return fmt.Errorf("%s | %w", "Expiration", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatecrossclusterapikey/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatecrossclusterapikey/response.go new file mode 100644 index 000000000..0611abf87 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatecrossclusterapikey/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatecrossclusterapikey + +// Response holds the response body struct for the package updatecrossclusterapikey +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/update_cross_cluster_api_key/UpdateCrossClusterApiKeyResponse.ts#L20-L28 +type Response struct { + + // Updated If `true`, the API key was updated. + // If `false`, the API key didn’t change because no change was detected. + Updated bool `json:"updated"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatecrossclusterapikey/update_cross_cluster_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatecrossclusterapikey/update_cross_cluster_api_key.go new file mode 100644 index 000000000..05f754b00 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatecrossclusterapikey/update_cross_cluster_api_key.go @@ -0,0 +1,467 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update a cross-cluster API key. +// +// Update the attributes of an existing cross-cluster API key, which is used for +// API key based remote cluster access. +// +// To use this API, you must have at least the `manage_security` cluster +// privilege. +// Users can only update API keys that they created. +// To update another user's API key, use the `run_as` feature to submit a +// request on behalf of another user. +// +// IMPORTANT: It's not possible to use an API key as the authentication +// credential for this API. +// To update an API key, the owner user's credentials are required. +// +// It's not possible to update expired API keys, or API keys that have been +// invalidated by the invalidate API key API. +// +// This API supports updates to an API key's access scope, metadata, and +// expiration. +// The owner user's information, such as the `username` and `realm`, is also +// updated automatically on every call. +// +// NOTE: This API cannot update REST API keys, which should be updated by either +// the update API key or bulk update API keys API. +// +// To learn more about how to use this API, refer to the [Update cross cluter +// API key API examples +// page](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-cc-api-key-examples). +package updatecrossclusterapikey + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateCrossClusterApiKey struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateCrossClusterApiKey type alias for index. +type NewUpdateCrossClusterApiKey func(id string) *UpdateCrossClusterApiKey + +// NewUpdateCrossClusterApiKeyFunc returns a new instance of UpdateCrossClusterApiKey with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateCrossClusterApiKeyFunc(tp elastictransport.Interface) NewUpdateCrossClusterApiKey { + return func(id string) *UpdateCrossClusterApiKey { + n := New(tp) + + n._id(id) + + return n + } +} + +// Update a cross-cluster API key. +// +// Update the attributes of an existing cross-cluster API key, which is used for +// API key based remote cluster access. +// +// To use this API, you must have at least the `manage_security` cluster +// privilege. +// Users can only update API keys that they created. +// To update another user's API key, use the `run_as` feature to submit a +// request on behalf of another user. +// +// IMPORTANT: It's not possible to use an API key as the authentication +// credential for this API. +// To update an API key, the owner user's credentials are required. +// +// It's not possible to update expired API keys, or API keys that have been +// invalidated by the invalidate API key API. +// +// This API supports updates to an API key's access scope, metadata, and +// expiration. +// The owner user's information, such as the `username` and `realm`, is also +// updated automatically on every call. +// +// NOTE: This API cannot update REST API keys, which should be updated by either +// the update API key or bulk update API keys API. +// +// To learn more about how to use this API, refer to the [Update cross cluter +// API key API examples +// page](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-cc-api-key-examples). +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key +func New(tp elastictransport.Interface) *UpdateCrossClusterApiKey { + r := &UpdateCrossClusterApiKey{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateCrossClusterApiKey) Raw(raw io.Reader) *UpdateCrossClusterApiKey { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateCrossClusterApiKey) Request(req *Request) *UpdateCrossClusterApiKey { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateCrossClusterApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateCrossClusterApiKey: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("cross_cluster") + path.WriteString("/") + path.WriteString("api_key") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateCrossClusterApiKey) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.update_cross_cluster_api_key") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.update_cross_cluster_api_key") + if reader := instrument.RecordRequestBody(ctx, "security.update_cross_cluster_api_key", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.update_cross_cluster_api_key") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateCrossClusterApiKey query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatecrossclusterapikey.Response +func (r UpdateCrossClusterApiKey) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.update_cross_cluster_api_key") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateCrossClusterApiKey headers map. +func (r *UpdateCrossClusterApiKey) Header(key, value string) *UpdateCrossClusterApiKey { + r.headers.Set(key, value) + + return r +} + +// Id The ID of the cross-cluster API key to update. +// API Name: id +func (r *UpdateCrossClusterApiKey) _id(id string) *UpdateCrossClusterApiKey { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateCrossClusterApiKey) ErrorTrace(errortrace bool) *UpdateCrossClusterApiKey { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateCrossClusterApiKey) FilterPath(filterpaths ...string) *UpdateCrossClusterApiKey { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateCrossClusterApiKey) Human(human bool) *UpdateCrossClusterApiKey { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateCrossClusterApiKey) Pretty(pretty bool) *UpdateCrossClusterApiKey { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The access to be granted to this API key. +// The access is composed of permissions for cross cluster search and cross +// cluster replication. +// At least one of them must be specified. +// When specified, the new access assignment fully replaces the previously +// assigned access. +// API name: access +func (r *UpdateCrossClusterApiKey) Access(access types.AccessVariant) *UpdateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Access = *access.AccessCaster() + + return r +} + +// The expiration time for the API key. +// By default, API keys never expire. This property can be omitted to leave the +// value unchanged. +// API name: expiration +func (r *UpdateCrossClusterApiKey) Expiration(duration types.DurationVariant) *UpdateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Expiration = *duration.DurationCaster() + + return r +} + +// Arbitrary metadata that you want to associate with the API key. +// It supports nested data structure. +// Within the metadata object, keys beginning with `_` are reserved for system +// usage. +// When specified, this information fully replaces metadata previously +// associated with the API key. +// API name: metadata +func (r *UpdateCrossClusterApiKey) Metadata(metadata types.MetadataVariant) *UpdateCrossClusterApiKey { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatesettings/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatesettings/request.go new file mode 100644 index 000000000..46fcbf775 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatesettings/request.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatesettings + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package updatesettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/update_settings/SecurityUpdateSettingsRequest.ts#L24-L71 +type Request struct { + + // Security Settings for the index used for most security configuration, including native + // realm users and roles configured with the API. + Security *types.SecuritySettings `json:"security,omitempty"` + // SecurityProfile Settings for the index used to store profile information. + SecurityProfile *types.SecuritySettings `json:"security-profile,omitempty"` + // SecurityTokens Settings for the index used to store tokens. + SecurityTokens *types.SecuritySettings `json:"security-tokens,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatesettings request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatesettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatesettings/response.go new file mode 100644 index 000000000..f912818f2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatesettings/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatesettings + +// Response holds the response body struct for the package updatesettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/update_settings/SecurityUpdateSettingsResponse.ts#L20-L24 +type Response struct { + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatesettings/update_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatesettings/update_settings.go new file mode 100644 index 000000000..fe9f3bd73 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updatesettings/update_settings.go @@ -0,0 +1,421 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update security index settings. +// +// Update the user-configurable settings for the security internal index +// (`.security` and associated indices). Only a subset of settings are allowed +// to be modified. This includes `index.auto_expand_replicas` and +// `index.number_of_replicas`. +// +// NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will +// be ignored during updates. +// +// If a specific index is not in use on the system and settings are provided for +// it, the request will be rejected. +// This API does not yet support configuring the settings for indices before +// they are in use. +package updatesettings + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateSettings struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateSettings type alias for index. +type NewUpdateSettings func() *UpdateSettings + +// NewUpdateSettingsFunc returns a new instance of UpdateSettings with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateSettingsFunc(tp elastictransport.Interface) NewUpdateSettings { + return func() *UpdateSettings { + n := New(tp) + + return n + } +} + +// Update security index settings. +// +// Update the user-configurable settings for the security internal index +// (`.security` and associated indices). Only a subset of settings are allowed +// to be modified. This includes `index.auto_expand_replicas` and +// `index.number_of_replicas`. +// +// NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will +// be ignored during updates. +// +// If a specific index is not in use on the system and settings are provided for +// it, the request will be rejected. +// This API does not yet support configuring the settings for indices before +// they are in use. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings +func New(tp elastictransport.Interface) *UpdateSettings { + r := &UpdateSettings{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateSettings) Raw(raw io.Reader) *UpdateSettings { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateSettings) Request(req *Request) *UpdateSettings { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateSettings) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateSettings: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("settings") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateSettings) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.update_settings") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.update_settings") + if reader := instrument.RecordRequestBody(ctx, "security.update_settings", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.update_settings") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateSettings query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatesettings.Response +func (r UpdateSettings) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.update_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateSettings headers map. +func (r *UpdateSettings) Header(key, value string) *UpdateSettings { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *UpdateSettings) MasterTimeout(duration string) *UpdateSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *UpdateSettings) Timeout(duration string) *UpdateSettings { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateSettings) ErrorTrace(errortrace bool) *UpdateSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateSettings) FilterPath(filterpaths ...string) *UpdateSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateSettings) Human(human bool) *UpdateSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateSettings) Pretty(pretty bool) *UpdateSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Settings for the index used for most security configuration, including native +// realm users and roles configured with the API. +// API name: security +func (r *UpdateSettings) Security(security types.SecuritySettingsVariant) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Security = security.SecuritySettingsCaster() + + return r +} + +// Settings for the index used to store profile information. +// API name: security-profile +func (r *UpdateSettings) SecurityProfile(securityprofile types.SecuritySettingsVariant) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SecurityProfile = securityprofile.SecuritySettingsCaster() + + return r +} + +// Settings for the index used to store tokens. +// API name: security-tokens +func (r *UpdateSettings) SecurityTokens(securitytokens types.SecuritySettingsVariant) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SecurityTokens = securitytokens.SecuritySettingsCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateuserprofiledata/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateuserprofiledata/request.go new file mode 100644 index 000000000..4f4390bc7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateuserprofiledata/request.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateuserprofiledata + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package updateuserprofiledata +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/update_user_profile_data/Request.ts#L27-L98 +type Request struct { + + // Data Non-searchable data that you want to associate with the user profile. + // This field supports a nested data structure. + // Within the `data` object, top-level keys cannot begin with an underscore + // (`_`) or contain a period (`.`). + // The data object is not searchable, but can be retrieved with the get user + // profile API. + Data map[string]json.RawMessage `json:"data,omitempty"` + // Labels Searchable data that you want to associate with the user profile. + // This field supports a nested data structure. + // Within the labels object, top-level keys cannot begin with an underscore + // (`_`) or contain a period (`.`). + Labels map[string]json.RawMessage `json:"labels,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Data: make(map[string]json.RawMessage, 0), + Labels: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updateuserprofiledata request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateuserprofiledata/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateuserprofiledata/response.go new file mode 100644 index 000000000..09dd1b091 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateuserprofiledata/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updateuserprofiledata + +// Response holds the response body struct for the package updateuserprofiledata +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/update_user_profile_data/Response.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateuserprofiledata/update_user_profile_data.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateuserprofiledata/update_user_profile_data.go new file mode 100644 index 000000000..cbcefd860 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/security/updateuserprofiledata/update_user_profile_data.go @@ -0,0 +1,505 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update user profile data. +// +// Update specific data for the user profile that is associated with a unique +// ID. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_user_profile` cluster privilege. +// * The `update_profile_data` global privilege for the namespaces that are +// referenced in the request. +// +// This API updates the `labels` and `data` fields of an existing user profile +// document with JSON objects. +// New keys and their values are added to the profile document and conflicting +// keys are replaced by data that's included in the request. +// +// For both labels and data, content is namespaced by the top-level fields. +// The `update_profile_data` global privilege grants privileges for updating +// only the allowed namespaces. +package updateuserprofiledata + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +const ( + uidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateUserProfileData struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + uid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateUserProfileData type alias for index. +type NewUpdateUserProfileData func(uid string) *UpdateUserProfileData + +// NewUpdateUserProfileDataFunc returns a new instance of UpdateUserProfileData with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateUserProfileDataFunc(tp elastictransport.Interface) NewUpdateUserProfileData { + return func(uid string) *UpdateUserProfileData { + n := New(tp) + + n._uid(uid) + + return n + } +} + +// Update user profile data. +// +// Update specific data for the user profile that is associated with a unique +// ID. +// +// NOTE: The user profile feature is designed only for use by Kibana and +// Elastic's Observability, Enterprise Search, and Elastic Security solutions. +// Individual users and external applications should not call this API directly. +// Elastic reserves the right to change or remove this feature in future +// releases without prior notice. +// +// To use this API, you must have one of the following privileges: +// +// * The `manage_user_profile` cluster privilege. +// * The `update_profile_data` global privilege for the namespaces that are +// referenced in the request. +// +// This API updates the `labels` and `data` fields of an existing user profile +// document with JSON objects. +// New keys and their values are added to the profile document and conflicting +// keys are replaced by data that's included in the request. +// +// For both labels and data, content is namespaced by the top-level fields. +// The `update_profile_data` global privilege grants privileges for updating +// only the allowed namespaces. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data +func New(tp elastictransport.Interface) *UpdateUserProfileData { + r := &UpdateUserProfileData{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateUserProfileData) Raw(raw io.Reader) *UpdateUserProfileData { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateUserProfileData) Request(req *Request) *UpdateUserProfileData { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateUserProfileData) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateUserProfileData: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == uidMask: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("profile") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "uid", r.uid) + } + path.WriteString(r.uid) + path.WriteString("/") + path.WriteString("_data") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateUserProfileData) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "security.update_user_profile_data") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "security.update_user_profile_data") + if reader := instrument.RecordRequestBody(ctx, "security.update_user_profile_data", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "security.update_user_profile_data") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateUserProfileData query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updateuserprofiledata.Response +func (r UpdateUserProfileData) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "security.update_user_profile_data") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateUserProfileData headers map. +func (r *UpdateUserProfileData) Header(key, value string) *UpdateUserProfileData { + r.headers.Set(key, value) + + return r +} + +// Uid A unique identifier for the user profile. +// API Name: uid +func (r *UpdateUserProfileData) _uid(uid string) *UpdateUserProfileData { + r.paramSet |= uidMask + r.uid = uid + + return r +} + +// IfSeqNo Only perform the operation if the document has this sequence number. +// API name: if_seq_no +func (r *UpdateUserProfileData) IfSeqNo(sequencenumber string) *UpdateUserProfileData { + r.values.Set("if_seq_no", sequencenumber) + + return r +} + +// IfPrimaryTerm Only perform the operation if the document has this primary term. +// API name: if_primary_term +func (r *UpdateUserProfileData) IfPrimaryTerm(ifprimaryterm string) *UpdateUserProfileData { + r.values.Set("if_primary_term", ifprimaryterm) + + return r +} + +// Refresh If 'true', Elasticsearch refreshes the affected shards to make this operation +// visible to search. +// If 'wait_for', it waits for a refresh to make this operation visible to +// search. +// If 'false', nothing is done with refreshes. +// API name: refresh +func (r *UpdateUserProfileData) Refresh(refresh refresh.Refresh) *UpdateUserProfileData { + r.values.Set("refresh", refresh.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateUserProfileData) ErrorTrace(errortrace bool) *UpdateUserProfileData { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateUserProfileData) FilterPath(filterpaths ...string) *UpdateUserProfileData { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateUserProfileData) Human(human bool) *UpdateUserProfileData { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateUserProfileData) Pretty(pretty bool) *UpdateUserProfileData { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Non-searchable data that you want to associate with the user profile. +// This field supports a nested data structure. +// Within the `data` object, top-level keys cannot begin with an underscore +// (`_`) or contain a period (`.`). +// The data object is not searchable, but can be retrieved with the get user +// profile API. +// API name: data +func (r *UpdateUserProfileData) Data(data map[string]json.RawMessage) *UpdateUserProfileData { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Data = data + return r +} + +func (r *UpdateUserProfileData) AddDatum(key string, value json.RawMessage) *UpdateUserProfileData { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Data == nil { + r.req.Data = make(map[string]json.RawMessage) + } else { + tmp = r.req.Data + } + + tmp[key] = value + + r.req.Data = tmp + return r +} + +// Searchable data that you want to associate with the user profile. +// This field supports a nested data structure. +// Within the labels object, top-level keys cannot begin with an underscore +// (`_`) or contain a period (`.`). +// API name: labels +func (r *UpdateUserProfileData) Labels(labels map[string]json.RawMessage) *UpdateUserProfileData { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Labels = labels + return r +} + +func (r *UpdateUserProfileData) AddLabel(key string, value json.RawMessage) *UpdateUserProfileData { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.Labels == nil { + r.req.Labels = make(map[string]json.RawMessage) + } else { + tmp = r.req.Labels + } + + tmp[key] = value + + r.req.Labels = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/deletenode/delete_node.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/deletenode/delete_node.go new file mode 100644 index 000000000..113f7de58 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/deletenode/delete_node.go @@ -0,0 +1,391 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Cancel node shutdown preparations. +// Remove a node from the shutdown list so it can resume normal operations. +// You must explicitly clear the shutdown request when a node rejoins the +// cluster or when a node has permanently left the cluster. +// Shutdown requests are never removed automatically by Elasticsearch. +// +// NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic +// Cloud Enterprise, and Elastic Cloud on Kubernetes. +// Direct use is not supported. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. +package deletenode + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +const ( + nodeidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteNode struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + nodeid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteNode type alias for index. +type NewDeleteNode func(nodeid string) *DeleteNode + +// NewDeleteNodeFunc returns a new instance of DeleteNode with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteNodeFunc(tp elastictransport.Interface) NewDeleteNode { + return func(nodeid string) *DeleteNode { + n := New(tp) + + n._nodeid(nodeid) + + return n + } +} + +// Cancel node shutdown preparations. +// Remove a node from the shutdown list so it can resume normal operations. +// You must explicitly clear the shutdown request when a node rejoins the +// cluster or when a node has permanently left the cluster. +// Shutdown requests are never removed automatically by Elasticsearch. +// +// NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic +// Cloud Enterprise, and Elastic Cloud on Kubernetes. +// Direct use is not supported. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node +func New(tp elastictransport.Interface) *DeleteNode { + r := &DeleteNode{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteNode) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nodeidMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + path.WriteString("shutdown") + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteNode) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "shutdown.delete_node") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "shutdown.delete_node") + if reader := instrument.RecordRequestBody(ctx, "shutdown.delete_node", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "shutdown.delete_node") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteNode query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletenode.Response +func (r DeleteNode) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "shutdown.delete_node") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteNode) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "shutdown.delete_node") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteNode query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteNode headers map. +func (r *DeleteNode) Header(key, value string) *DeleteNode { + r.headers.Set(key, value) + + return r +} + +// NodeId The node id of node to be removed from the shutdown state +// API Name: nodeid +func (r *DeleteNode) _nodeid(nodeid string) *DeleteNode { + r.paramSet |= nodeidMask + r.nodeid = nodeid + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *DeleteNode) MasterTimeout(mastertimeout timeunit.TimeUnit) *DeleteNode { + r.values.Set("master_timeout", mastertimeout.String()) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *DeleteNode) Timeout(timeout timeunit.TimeUnit) *DeleteNode { + r.values.Set("timeout", timeout.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteNode) ErrorTrace(errortrace bool) *DeleteNode { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteNode) FilterPath(filterpaths ...string) *DeleteNode { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteNode) Human(human bool) *DeleteNode { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteNode) Pretty(pretty bool) *DeleteNode { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/deletenode/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/deletenode/response.go new file mode 100644 index 000000000..29cc85b33 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/deletenode/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletenode + +// Response holds the response body struct for the package deletenode +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/shutdown/delete_node/ShutdownDeleteNodeResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/getnode/get_node.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/getnode/get_node.go new file mode 100644 index 000000000..cdffcad2b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/getnode/get_node.go @@ -0,0 +1,387 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the shutdown status. +// +// Get information about nodes that are ready to be shut down, have shut down +// preparations still in progress, or have stalled. +// The API returns status information for each part of the shut down process. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. +package getnode + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" +) + +const ( + nodeidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetNode struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + nodeid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetNode type alias for index. +type NewGetNode func() *GetNode + +// NewGetNodeFunc returns a new instance of GetNode with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetNodeFunc(tp elastictransport.Interface) NewGetNode { + return func() *GetNode { + n := New(tp) + + return n + } +} + +// Get the shutdown status. +// +// Get information about nodes that are ready to be shut down, have shut down +// preparations still in progress, or have stalled. +// The API returns status information for each part of the shut down process. +// +// NOTE: This feature is designed for indirect use by Elasticsearch Service, +// Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node +func New(tp elastictransport.Interface) *GetNode { + r := &GetNode{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetNode) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + path.WriteString("shutdown") + + method = http.MethodGet + case r.paramSet == nodeidMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + path.WriteString("shutdown") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetNode) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "shutdown.get_node") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "shutdown.get_node") + if reader := instrument.RecordRequestBody(ctx, "shutdown.get_node", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "shutdown.get_node") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetNode query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getnode.Response +func (r GetNode) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "shutdown.get_node") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetNode) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "shutdown.get_node") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetNode query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetNode headers map. +func (r *GetNode) Header(key, value string) *GetNode { + r.headers.Set(key, value) + + return r +} + +// NodeId Which node for which to retrieve the shutdown status +// API Name: nodeid +func (r *GetNode) NodeId(nodeid string) *GetNode { + r.paramSet |= nodeidMask + r.nodeid = nodeid + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *GetNode) MasterTimeout(mastertimeout timeunit.TimeUnit) *GetNode { + r.values.Set("master_timeout", mastertimeout.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetNode) ErrorTrace(errortrace bool) *GetNode { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetNode) FilterPath(filterpaths ...string) *GetNode { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetNode) Human(human bool) *GetNode { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetNode) Pretty(pretty bool) *GetNode { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/getnode/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/getnode/response.go new file mode 100644 index 000000000..4e68ae583 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/getnode/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getnode + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getnode +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L23-L27 +type Response struct { + Nodes []types.NodeShutdownStatus `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/putnode/put_node.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/putnode/put_node.go new file mode 100644 index 000000000..ef06933da --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/putnode/put_node.go @@ -0,0 +1,503 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Prepare a node to be shut down. +// +// NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic +// Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// If you specify a node that is offline, it will be prepared for shut down when +// it rejoins the cluster. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. +// +// The API migrates ongoing tasks and index shards to other nodes as needed to +// prepare a node to be restarted or shut down and removed from the cluster. +// This ensures that Elasticsearch can be stopped safely with minimal disruption +// to the cluster. +// +// You must specify the type of shutdown: `restart`, `remove`, or `replace`. +// If a node is already being prepared for shutdown, you can use this API to +// change the shutdown type. +// +// IMPORTANT: This API does NOT terminate the Elasticsearch process. +// Monitor the node shutdown status to determine when it is safe to stop +// Elasticsearch. +package putnode + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/type_" +) + +const ( + nodeidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutNode struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + nodeid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutNode type alias for index. +type NewPutNode func(nodeid string) *PutNode + +// NewPutNodeFunc returns a new instance of PutNode with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutNodeFunc(tp elastictransport.Interface) NewPutNode { + return func(nodeid string) *PutNode { + n := New(tp) + + n._nodeid(nodeid) + + return n + } +} + +// Prepare a node to be shut down. +// +// NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic +// Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not +// supported. +// +// If you specify a node that is offline, it will be prepared for shut down when +// it rejoins the cluster. +// +// If the operator privileges feature is enabled, you must be an operator to use +// this API. +// +// The API migrates ongoing tasks and index shards to other nodes as needed to +// prepare a node to be restarted or shut down and removed from the cluster. +// This ensures that Elasticsearch can be stopped safely with minimal disruption +// to the cluster. +// +// You must specify the type of shutdown: `restart`, `remove`, or `replace`. +// If a node is already being prepared for shutdown, you can use this API to +// change the shutdown type. +// +// IMPORTANT: This API does NOT terminate the Elasticsearch process. +// Monitor the node shutdown status to determine when it is safe to stop +// Elasticsearch. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node +func New(tp elastictransport.Interface) *PutNode { + r := &PutNode{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutNode) Raw(raw io.Reader) *PutNode { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutNode) Request(req *Request) *PutNode { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutNode) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutNode: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nodeidMask: + path.WriteString("/") + path.WriteString("_nodes") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "nodeid", r.nodeid) + } + path.WriteString(r.nodeid) + path.WriteString("/") + path.WriteString("shutdown") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutNode) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "shutdown.put_node") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "shutdown.put_node") + if reader := instrument.RecordRequestBody(ctx, "shutdown.put_node", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "shutdown.put_node") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutNode query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putnode.Response +func (r PutNode) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "shutdown.put_node") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutNode headers map. +func (r *PutNode) Header(key, value string) *PutNode { + r.headers.Set(key, value) + + return r +} + +// NodeId The node identifier. +// This parameter is not validated against the cluster's active nodes. +// This enables you to register a node for shut down while it is offline. +// No error is thrown if you specify an invalid node ID. +// API Name: nodeid +func (r *PutNode) _nodeid(nodeid string) *PutNode { + r.paramSet |= nodeidMask + r.nodeid = nodeid + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *PutNode) MasterTimeout(mastertimeout timeunit.TimeUnit) *PutNode { + r.values.Set("master_timeout", mastertimeout.String()) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *PutNode) Timeout(timeout timeunit.TimeUnit) *PutNode { + r.values.Set("timeout", timeout.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutNode) ErrorTrace(errortrace bool) *PutNode { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutNode) FilterPath(filterpaths ...string) *PutNode { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutNode) Human(human bool) *PutNode { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutNode) Pretty(pretty bool) *PutNode { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Only valid if type is restart. +// Controls how long Elasticsearch will wait for the node to restart and join +// the cluster before reassigning its shards to other nodes. +// This works the same as delaying allocation with the +// index.unassigned.node_left.delayed_timeout setting. +// If you specify both a restart allocation delay and an index-level allocation +// delay, the longer of the two is used. +// API name: allocation_delay +func (r *PutNode) AllocationDelay(allocationdelay string) *PutNode { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllocationDelay = &allocationdelay + + return r +} + +// A human-readable reason that the node is being shut down. +// This field provides information for other cluster operators; it does not +// affect the shut down process. +// API name: reason +func (r *PutNode) Reason(reason string) *PutNode { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Reason = reason + + return r +} + +// Only valid if type is replace. +// Specifies the name of the node that is replacing the node being shut down. +// Shards from the shut down node are only allowed to be allocated to the target +// node, and no other data will be allocated to the target node. +// During relocation of data certain allocation rules are ignored, such as disk +// watermarks or user attribute filtering rules. +// API name: target_node_name +func (r *PutNode) TargetNodeName(targetnodename string) *PutNode { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TargetNodeName = &targetnodename + + return r +} + +// Valid values are restart, remove, or replace. +// Use restart when you need to temporarily shut down a node to perform an +// upgrade, make configuration changes, or perform other maintenance. +// Because the node is expected to rejoin the cluster, data is not migrated off +// of the node. +// Use remove when you need to permanently remove a node from the cluster. +// The node is not marked ready for shutdown until data is migrated off of the +// node Use replace to do a 1:1 replacement of a node with another node. +// Certain allocation decisions will be ignored (such as disk watermarks) in the +// interest of true replacement of the source node with the target node. +// During a replace-type shutdown, rollover and index creation may result in +// unassigned shards, and shrink may fail until the replacement is complete. +// API name: type +func (r *PutNode) Type(type_ type_.Type) *PutNode { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Type = type_ + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/putnode/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/putnode/request.go new file mode 100644 index 000000000..04abcabf9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/putnode/request.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putnode + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/type_" +) + +// Request holds the request body struct for the package putnode +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/shutdown/put_node/ShutdownPutNodeRequest.ts#L25-L108 +type Request struct { + + // AllocationDelay Only valid if type is restart. + // Controls how long Elasticsearch will wait for the node to restart and join + // the cluster before reassigning its shards to other nodes. + // This works the same as delaying allocation with the + // index.unassigned.node_left.delayed_timeout setting. + // If you specify both a restart allocation delay and an index-level allocation + // delay, the longer of the two is used. + AllocationDelay *string `json:"allocation_delay,omitempty"` + // Reason A human-readable reason that the node is being shut down. + // This field provides information for other cluster operators; it does not + // affect the shut down process. + Reason string `json:"reason"` + // TargetNodeName Only valid if type is replace. + // Specifies the name of the node that is replacing the node being shut down. + // Shards from the shut down node are only allowed to be allocated to the target + // node, and no other data will be allocated to the target node. + // During relocation of data certain allocation rules are ignored, such as disk + // watermarks or user attribute filtering rules. + TargetNodeName *string `json:"target_node_name,omitempty"` + // Type Valid values are restart, remove, or replace. + // Use restart when you need to temporarily shut down a node to perform an + // upgrade, make configuration changes, or perform other maintenance. + // Because the node is expected to rejoin the cluster, data is not migrated off + // of the node. + // Use remove when you need to permanently remove a node from the cluster. + // The node is not marked ready for shutdown until data is migrated off of the + // node Use replace to do a 1:1 replacement of a node with another node. + // Certain allocation decisions will be ignored (such as disk watermarks) in the + // interest of true replacement of the source node with the target node. + // During a replace-type shutdown, rollover and index creation may result in + // unassigned shards, and shrink may fail until the replacement is complete. + Type type_.Type `json:"type"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putnode request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/putnode/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/putnode/response.go new file mode 100644 index 000000000..cd548c89b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/putnode/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putnode + +// Response holds the response body struct for the package putnode +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/shutdown/put_node/ShutdownPutNodeResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/simulate/ingest/ingest.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/simulate/ingest/ingest.go new file mode 100644 index 000000000..c38e3f07d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/simulate/ingest/ingest.go @@ -0,0 +1,564 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Simulate data ingestion. +// Run ingest pipelines against a set of provided documents, optionally with +// substitute pipeline definitions, to simulate ingesting data into an index. +// +// This API is meant to be used for troubleshooting or pipeline development, as +// it does not actually index any data into Elasticsearch. +// +// The API runs the default and final pipeline for that index against a set of +// documents provided in the body of the request. +// If a pipeline contains a reroute processor, it follows that reroute processor +// to the new index, running that index's pipelines as well the same way that a +// non-simulated ingest would. +// No data is indexed into Elasticsearch. +// Instead, the transformed document is returned, along with the list of +// pipelines that have been run and the name of the index where the document +// would have been indexed if this were not a simulation. +// The transformed document is validated against the mappings that would apply +// to this index, and any validation error is reported in the result. +// +// This API differs from the simulate pipeline API in that you specify a single +// pipeline for that API, and it runs only that one pipeline. +// The simulate pipeline API is more useful for developing a single pipeline, +// while the simulate ingest API is more useful for troubleshooting the +// interaction of the various pipelines that get applied when ingesting into an +// index. +// +// By default, the pipeline definitions that are currently in the system are +// used. +// However, you can supply substitute pipeline definitions in the body of the +// request. +// These will be used in place of the pipeline definitions that are already in +// the system. This can be used to replace existing pipeline definitions or to +// create new ones. The pipeline substitutions are used only within this +// request. +package ingest + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Ingest struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + index string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewIngest type alias for index. +type NewIngest func() *Ingest + +// NewIngestFunc returns a new instance of Ingest with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewIngestFunc(tp elastictransport.Interface) NewIngest { + return func() *Ingest { + n := New(tp) + + return n + } +} + +// Simulate data ingestion. +// Run ingest pipelines against a set of provided documents, optionally with +// substitute pipeline definitions, to simulate ingesting data into an index. +// +// This API is meant to be used for troubleshooting or pipeline development, as +// it does not actually index any data into Elasticsearch. +// +// The API runs the default and final pipeline for that index against a set of +// documents provided in the body of the request. +// If a pipeline contains a reroute processor, it follows that reroute processor +// to the new index, running that index's pipelines as well the same way that a +// non-simulated ingest would. +// No data is indexed into Elasticsearch. +// Instead, the transformed document is returned, along with the list of +// pipelines that have been run and the name of the index where the document +// would have been indexed if this were not a simulation. +// The transformed document is validated against the mappings that would apply +// to this index, and any validation error is reported in the result. +// +// This API differs from the simulate pipeline API in that you specify a single +// pipeline for that API, and it runs only that one pipeline. +// The simulate pipeline API is more useful for developing a single pipeline, +// while the simulate ingest API is more useful for troubleshooting the +// interaction of the various pipelines that get applied when ingesting into an +// index. +// +// By default, the pipeline definitions that are currently in the system are +// used. +// However, you can supply substitute pipeline definitions in the body of the +// request. +// These will be used in place of the pipeline definitions that are already in +// the system. This can be used to replace existing pipeline definitions or to +// create new ones. The pipeline substitutions are used only within this +// request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest +func New(tp elastictransport.Interface) *Ingest { + r := &Ingest{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Ingest) Raw(raw io.Reader) *Ingest { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Ingest) Request(req *Request) *Ingest { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Ingest) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Ingest: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + path.WriteString("_simulate") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + path.WriteString("_ingest") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "index", r.index) + } + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_simulate") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Ingest) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "simulate.ingest") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "simulate.ingest") + if reader := instrument.RecordRequestBody(ctx, "simulate.ingest", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "simulate.ingest") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Ingest query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a ingest.Response +func (r Ingest) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "simulate.ingest") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Ingest headers map. +func (r *Ingest) Header(key, value string) *Ingest { + r.headers.Set(key, value) + + return r +} + +// Index The index to simulate ingesting into. +// This value can be overridden by specifying an index on each document. +// If you specify this parameter in the request path, it is used for any +// documents that do not explicitly specify an index argument. +// API Name: index +func (r *Ingest) Index(index string) *Ingest { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Pipeline The pipeline to use as the default pipeline. +// This value can be used to override the default pipeline of the index. +// API name: pipeline +func (r *Ingest) Pipeline(pipelinename string) *Ingest { + r.values.Set("pipeline", pipelinename) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Ingest) ErrorTrace(errortrace bool) *Ingest { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Ingest) FilterPath(filterpaths ...string) *Ingest { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Ingest) Human(human bool) *Ingest { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Ingest) Pretty(pretty bool) *Ingest { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A map of component template names to substitute component template definition +// objects. +// API name: component_template_substitutions +func (r *Ingest) ComponentTemplateSubstitutions(componenttemplatesubstitutions map[string]types.ComponentTemplateNode) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ComponentTemplateSubstitutions = componenttemplatesubstitutions + return r +} + +func (r *Ingest) AddComponentTemplateSubstitution(key string, value types.ComponentTemplateNodeVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.ComponentTemplateNode + if r.req.ComponentTemplateSubstitutions == nil { + r.req.ComponentTemplateSubstitutions = make(map[string]types.ComponentTemplateNode) + } else { + tmp = r.req.ComponentTemplateSubstitutions + } + + tmp[key] = *value.ComponentTemplateNodeCaster() + + r.req.ComponentTemplateSubstitutions = tmp + return r +} + +// Sample documents to test in the pipeline. +// API name: docs +func (r *Ingest) Docs(docs ...types.DocumentVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range docs { + + r.req.Docs = append(r.req.Docs, *v.DocumentCaster()) + + } + return r +} + +// A map of index template names to substitute index template definition +// objects. +// API name: index_template_substitutions +func (r *Ingest) IndexTemplateSubstitutions(indextemplatesubstitutions map[string]types.IndexTemplate) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.IndexTemplateSubstitutions = indextemplatesubstitutions + return r +} + +func (r *Ingest) AddIndexTemplateSubstitution(key string, value types.IndexTemplateVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.IndexTemplate + if r.req.IndexTemplateSubstitutions == nil { + r.req.IndexTemplateSubstitutions = make(map[string]types.IndexTemplate) + } else { + tmp = r.req.IndexTemplateSubstitutions + } + + tmp[key] = *value.IndexTemplateCaster() + + r.req.IndexTemplateSubstitutions = tmp + return r +} + +// API name: mapping_addition +func (r *Ingest) MappingAddition(mappingaddition types.TypeMappingVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.MappingAddition = mappingaddition.TypeMappingCaster() + + return r +} + +// Pipelines to test. +// If you don’t specify the `pipeline` request path parameter, this parameter is +// required. +// If you specify both this and the request path parameter, the API only uses +// the request path parameter. +// API name: pipeline_substitutions +func (r *Ingest) PipelineSubstitutions(pipelinesubstitutions map[string]types.IngestPipeline) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.PipelineSubstitutions = pipelinesubstitutions + return r +} + +func (r *Ingest) AddPipelineSubstitution(key string, value types.IngestPipelineVariant) *Ingest { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.IngestPipeline + if r.req.PipelineSubstitutions == nil { + r.req.PipelineSubstitutions = make(map[string]types.IngestPipeline) + } else { + tmp = r.req.PipelineSubstitutions + } + + tmp[key] = *value.IngestPipelineCaster() + + r.req.PipelineSubstitutions = tmp + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/simulate/ingest/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/simulate/ingest/request.go new file mode 100644 index 000000000..a514b627b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/simulate/ingest/request.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package ingest + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package ingest +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/simulate/ingest/SimulateIngestRequest.ts#L29-L100 +type Request struct { + + // ComponentTemplateSubstitutions A map of component template names to substitute component template definition + // objects. + ComponentTemplateSubstitutions map[string]types.ComponentTemplateNode `json:"component_template_substitutions,omitempty"` + // Docs Sample documents to test in the pipeline. + Docs []types.Document `json:"docs"` + // IndexTemplateSubstitutions A map of index template names to substitute index template definition + // objects. + IndexTemplateSubstitutions map[string]types.IndexTemplate `json:"index_template_substitutions,omitempty"` + MappingAddition *types.TypeMapping `json:"mapping_addition,omitempty"` + // PipelineSubstitutions Pipelines to test. + // If you don’t specify the `pipeline` request path parameter, this parameter is + // required. + // If you specify both this and the request path parameter, the API only uses + // the request path parameter. + PipelineSubstitutions map[string]types.IngestPipeline `json:"pipeline_substitutions,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + ComponentTemplateSubstitutions: make(map[string]types.ComponentTemplateNode, 0), + IndexTemplateSubstitutions: make(map[string]types.IndexTemplate, 0), + PipelineSubstitutions: make(map[string]types.IngestPipeline, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Ingest request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/simulate/ingest/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/simulate/ingest/response.go new file mode 100644 index 000000000..0019baf10 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/simulate/ingest/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package ingest + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package ingest +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/simulate/ingest/SimulateIngestResponse.ts#L27-L29 +type Response struct { + Docs []types.SimulateIngestDocumentResult `json:"docs"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/deletelifecycle/delete_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/deletelifecycle/delete_lifecycle.go new file mode 100644 index 000000000..0781a5bb8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/deletelifecycle/delete_lifecycle.go @@ -0,0 +1,370 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a policy. +// Delete a snapshot lifecycle policy definition. +// This operation prevents any future snapshots from being taken but does not +// cancel in-progress snapshots or remove previously-taken snapshots. +package deletelifecycle + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + policyidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + policyid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteLifecycle type alias for index. +type NewDeleteLifecycle func(policyid string) *DeleteLifecycle + +// NewDeleteLifecycleFunc returns a new instance of DeleteLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteLifecycleFunc(tp elastictransport.Interface) NewDeleteLifecycle { + return func(policyid string) *DeleteLifecycle { + n := New(tp) + + n._policyid(policyid) + + return n + } +} + +// Delete a policy. +// Delete a snapshot lifecycle policy definition. +// This operation prevents any future snapshots from being taken but does not +// cancel in-progress snapshots or remove previously-taken snapshots. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle +func New(tp elastictransport.Interface) *DeleteLifecycle { + r := &DeleteLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == policyidMask: + path.WriteString("/") + path.WriteString("_slm") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "policyid", r.policyid) + } + path.WriteString(r.policyid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteLifecycle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "slm.delete_lifecycle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "slm.delete_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "slm.delete_lifecycle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.delete_lifecycle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteLifecycle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletelifecycle.Response +func (r DeleteLifecycle) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.delete_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteLifecycle) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.delete_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteLifecycle query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteLifecycle headers map. +func (r *DeleteLifecycle) Header(key, value string) *DeleteLifecycle { + r.headers.Set(key, value) + + return r +} + +// PolicyId The id of the snapshot lifecycle policy to remove +// API Name: policyid +func (r *DeleteLifecycle) _policyid(policyid string) *DeleteLifecycle { + r.paramSet |= policyidMask + r.policyid = policyid + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *DeleteLifecycle) MasterTimeout(duration string) *DeleteLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *DeleteLifecycle) Timeout(duration string) *DeleteLifecycle { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteLifecycle) ErrorTrace(errortrace bool) *DeleteLifecycle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteLifecycle) FilterPath(filterpaths ...string) *DeleteLifecycle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteLifecycle) Human(human bool) *DeleteLifecycle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteLifecycle) Pretty(pretty bool) *DeleteLifecycle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/deletelifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/deletelifecycle/response.go new file mode 100644 index 000000000..7d92af3f9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/deletelifecycle/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletelifecycle + +// Response holds the response body struct for the package deletelifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/delete_lifecycle/DeleteSnapshotLifecycleResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/executelifecycle/execute_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/executelifecycle/execute_lifecycle.go new file mode 100644 index 000000000..8f008b1aa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/executelifecycle/execute_lifecycle.go @@ -0,0 +1,376 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run a policy. +// Immediately create a snapshot according to the snapshot lifecycle policy +// without waiting for the scheduled time. +// The snapshot policy is normally applied according to its schedule, but you +// might want to manually run a policy before performing an upgrade or other +// maintenance. +package executelifecycle + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + policyidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ExecuteLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + policyid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExecuteLifecycle type alias for index. +type NewExecuteLifecycle func(policyid string) *ExecuteLifecycle + +// NewExecuteLifecycleFunc returns a new instance of ExecuteLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExecuteLifecycleFunc(tp elastictransport.Interface) NewExecuteLifecycle { + return func(policyid string) *ExecuteLifecycle { + n := New(tp) + + n._policyid(policyid) + + return n + } +} + +// Run a policy. +// Immediately create a snapshot according to the snapshot lifecycle policy +// without waiting for the scheduled time. +// The snapshot policy is normally applied according to its schedule, but you +// might want to manually run a policy before performing an upgrade or other +// maintenance. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle +func New(tp elastictransport.Interface) *ExecuteLifecycle { + r := &ExecuteLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ExecuteLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == policyidMask: + path.WriteString("/") + path.WriteString("_slm") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "policyid", r.policyid) + } + path.WriteString(r.policyid) + path.WriteString("/") + path.WriteString("_execute") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ExecuteLifecycle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "slm.execute_lifecycle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "slm.execute_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "slm.execute_lifecycle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.execute_lifecycle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ExecuteLifecycle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a executelifecycle.Response +func (r ExecuteLifecycle) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.execute_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ExecuteLifecycle) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.execute_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ExecuteLifecycle query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ExecuteLifecycle headers map. +func (r *ExecuteLifecycle) Header(key, value string) *ExecuteLifecycle { + r.headers.Set(key, value) + + return r +} + +// PolicyId The id of the snapshot lifecycle policy to be executed +// API Name: policyid +func (r *ExecuteLifecycle) _policyid(policyid string) *ExecuteLifecycle { + r.paramSet |= policyidMask + r.policyid = policyid + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *ExecuteLifecycle) MasterTimeout(duration string) *ExecuteLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *ExecuteLifecycle) Timeout(duration string) *ExecuteLifecycle { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ExecuteLifecycle) ErrorTrace(errortrace bool) *ExecuteLifecycle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ExecuteLifecycle) FilterPath(filterpaths ...string) *ExecuteLifecycle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ExecuteLifecycle) Human(human bool) *ExecuteLifecycle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ExecuteLifecycle) Pretty(pretty bool) *ExecuteLifecycle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/executelifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/executelifecycle/response.go new file mode 100644 index 000000000..66373ad96 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/executelifecycle/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package executelifecycle + +// Response holds the response body struct for the package executelifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/execute_lifecycle/ExecuteSnapshotLifecycleResponse.ts#L22-L24 +type Response struct { + SnapshotName string `json:"snapshot_name"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/executeretention/execute_retention.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/executeretention/execute_retention.go new file mode 100644 index 000000000..4e73350fc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/executeretention/execute_retention.go @@ -0,0 +1,347 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run a retention policy. +// Manually apply the retention policy to force immediate removal of snapshots +// that are expired according to the snapshot lifecycle policy retention rules. +// The retention policy is normally applied according to its schedule. +package executeretention + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ExecuteRetention struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExecuteRetention type alias for index. +type NewExecuteRetention func() *ExecuteRetention + +// NewExecuteRetentionFunc returns a new instance of ExecuteRetention with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExecuteRetentionFunc(tp elastictransport.Interface) NewExecuteRetention { + return func() *ExecuteRetention { + n := New(tp) + + return n + } +} + +// Run a retention policy. +// Manually apply the retention policy to force immediate removal of snapshots +// that are expired according to the snapshot lifecycle policy retention rules. +// The retention policy is normally applied according to its schedule. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention +func New(tp elastictransport.Interface) *ExecuteRetention { + r := &ExecuteRetention{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ExecuteRetention) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_slm") + path.WriteString("/") + path.WriteString("_execute_retention") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ExecuteRetention) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "slm.execute_retention") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "slm.execute_retention") + if reader := instrument.RecordRequestBody(ctx, "slm.execute_retention", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.execute_retention") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ExecuteRetention query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a executeretention.Response +func (r ExecuteRetention) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.execute_retention") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ExecuteRetention) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.execute_retention") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ExecuteRetention query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ExecuteRetention headers map. +func (r *ExecuteRetention) Header(key, value string) *ExecuteRetention { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *ExecuteRetention) MasterTimeout(duration string) *ExecuteRetention { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *ExecuteRetention) Timeout(duration string) *ExecuteRetention { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ExecuteRetention) ErrorTrace(errortrace bool) *ExecuteRetention { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ExecuteRetention) FilterPath(filterpaths ...string) *ExecuteRetention { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ExecuteRetention) Human(human bool) *ExecuteRetention { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ExecuteRetention) Pretty(pretty bool) *ExecuteRetention { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/executeretention/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/executeretention/response.go new file mode 100644 index 000000000..520e102b1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/executeretention/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package executeretention + +// Response holds the response body struct for the package executeretention +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/execute_retention/ExecuteRetentionResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getlifecycle/get_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getlifecycle/get_lifecycle.go new file mode 100644 index 000000000..092708c16 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getlifecycle/get_lifecycle.go @@ -0,0 +1,373 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get policy information. +// Get snapshot lifecycle policy definitions and information about the latest +// snapshot attempts. +package getlifecycle + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + policyidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + policyid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetLifecycle type alias for index. +type NewGetLifecycle func() *GetLifecycle + +// NewGetLifecycleFunc returns a new instance of GetLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetLifecycleFunc(tp elastictransport.Interface) NewGetLifecycle { + return func() *GetLifecycle { + n := New(tp) + + return n + } +} + +// Get policy information. +// Get snapshot lifecycle policy definitions and information about the latest +// snapshot attempts. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle +func New(tp elastictransport.Interface) *GetLifecycle { + r := &GetLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == policyidMask: + path.WriteString("/") + path.WriteString("_slm") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "policyid", r.policyid) + } + path.WriteString(r.policyid) + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_slm") + path.WriteString("/") + path.WriteString("policy") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetLifecycle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "slm.get_lifecycle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "slm.get_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "slm.get_lifecycle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.get_lifecycle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetLifecycle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getlifecycle.Response +func (r GetLifecycle) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.get_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetLifecycle) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.get_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetLifecycle query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetLifecycle headers map. +func (r *GetLifecycle) Header(key, value string) *GetLifecycle { + r.headers.Set(key, value) + + return r +} + +// PolicyId Comma-separated list of snapshot lifecycle policies to retrieve +// API Name: policyid +func (r *GetLifecycle) PolicyId(policyid string) *GetLifecycle { + r.paramSet |= policyidMask + r.policyid = policyid + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetLifecycle) MasterTimeout(duration string) *GetLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *GetLifecycle) Timeout(duration string) *GetLifecycle { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetLifecycle) ErrorTrace(errortrace bool) *GetLifecycle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetLifecycle) FilterPath(filterpaths ...string) *GetLifecycle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetLifecycle) Human(human bool) *GetLifecycle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetLifecycle) Pretty(pretty bool) *GetLifecycle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getlifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getlifecycle/response.go new file mode 100644 index 000000000..c40af1dd6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getlifecycle/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getlifecycle + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getlifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/get_lifecycle/GetSnapshotLifecycleResponse.ts#L24-L27 + +type Response map[string]types.SnapshotLifecycle + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstats/get_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstats/get_stats.go new file mode 100644 index 000000000..586c7ef89 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstats/get_stats.go @@ -0,0 +1,343 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get snapshot lifecycle management statistics. +// Get global and policy-level statistics about actions taken by snapshot +// lifecycle management. +package getstats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetStats type alias for index. +type NewGetStats func() *GetStats + +// NewGetStatsFunc returns a new instance of GetStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetStatsFunc(tp elastictransport.Interface) NewGetStats { + return func() *GetStats { + n := New(tp) + + return n + } +} + +// Get snapshot lifecycle management statistics. +// Get global and policy-level statistics about actions taken by snapshot +// lifecycle management. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats +func New(tp elastictransport.Interface) *GetStats { + r := &GetStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_slm") + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "slm.get_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "slm.get_stats") + if reader := instrument.RecordRequestBody(ctx, "slm.get_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.get_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getstats.Response +func (r GetStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.get_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.get_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetStats headers map. +func (r *GetStats) Header(key, value string) *GetStats { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an error. +// API name: master_timeout +func (r *GetStats) MasterTimeout(duration string) *GetStats { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *GetStats) Timeout(duration string) *GetStats { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetStats) ErrorTrace(errortrace bool) *GetStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetStats) FilterPath(filterpaths ...string) *GetStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetStats) Human(human bool) *GetStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetStats) Pretty(pretty bool) *GetStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstats/response.go new file mode 100644 index 000000000..2903adfe5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstats/response.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getstats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getstats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/get_stats/GetSnapshotLifecycleStatsResponse.ts#L24-L37 +type Response struct { + PolicyStats []types.SnapshotPolicyStats `json:"policy_stats"` + RetentionDeletionTime types.Duration `json:"retention_deletion_time"` + RetentionDeletionTimeMillis int64 `json:"retention_deletion_time_millis"` + RetentionFailed int64 `json:"retention_failed"` + RetentionRuns int64 `json:"retention_runs"` + RetentionTimedOut int64 `json:"retention_timed_out"` + TotalSnapshotDeletionFailures int64 `json:"total_snapshot_deletion_failures"` + TotalSnapshotsDeleted int64 `json:"total_snapshots_deleted"` + TotalSnapshotsFailed int64 `json:"total_snapshots_failed"` + TotalSnapshotsTaken int64 `json:"total_snapshots_taken"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstatus/get_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstatus/get_status.go new file mode 100644 index 000000000..3dee2b2bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstatus/get_status.go @@ -0,0 +1,343 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the snapshot lifecycle management status. +package getstatus + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetStatus struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetStatus type alias for index. +type NewGetStatus func() *GetStatus + +// NewGetStatusFunc returns a new instance of GetStatus with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetStatusFunc(tp elastictransport.Interface) NewGetStatus { + return func() *GetStatus { + n := New(tp) + + return n + } +} + +// Get the snapshot lifecycle management status. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status +func New(tp elastictransport.Interface) *GetStatus { + r := &GetStatus{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetStatus) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_slm") + path.WriteString("/") + path.WriteString("status") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetStatus) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "slm.get_status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "slm.get_status") + if reader := instrument.RecordRequestBody(ctx, "slm.get_status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.get_status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetStatus query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getstatus.Response +func (r GetStatus) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.get_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetStatus) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.get_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetStatus query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetStatus headers map. +func (r *GetStatus) Header(key, value string) *GetStatus { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *GetStatus) MasterTimeout(duration string) *GetStatus { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: timeout +func (r *GetStatus) Timeout(duration string) *GetStatus { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetStatus) ErrorTrace(errortrace bool) *GetStatus { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetStatus) FilterPath(filterpaths ...string) *GetStatus { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetStatus) Human(human bool) *GetStatus { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetStatus) Pretty(pretty bool) *GetStatus { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstatus/response.go new file mode 100644 index 000000000..204e48d61 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstatus/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getstatus + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/lifecycleoperationmode" +) + +// Response holds the response body struct for the package getstatus +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/get_status/GetSnapshotLifecycleManagementStatusResponse.ts#L22-L24 +type Response struct { + OperationMode lifecycleoperationmode.LifecycleOperationMode `json:"operation_mode"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/putlifecycle/put_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/putlifecycle/put_lifecycle.go new file mode 100644 index 000000000..a4ef41afd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/putlifecycle/put_lifecycle.go @@ -0,0 +1,457 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a policy. +// Create or update a snapshot lifecycle policy. +// If the policy already exists, this request increments the policy version. +// Only the latest version of a policy is stored. +package putlifecycle + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + policyidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + policyid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutLifecycle type alias for index. +type NewPutLifecycle func(policyid string) *PutLifecycle + +// NewPutLifecycleFunc returns a new instance of PutLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutLifecycleFunc(tp elastictransport.Interface) NewPutLifecycle { + return func(policyid string) *PutLifecycle { + n := New(tp) + + n._policyid(policyid) + + return n + } +} + +// Create or update a policy. +// Create or update a snapshot lifecycle policy. +// If the policy already exists, this request increments the policy version. +// Only the latest version of a policy is stored. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle +func New(tp elastictransport.Interface) *PutLifecycle { + r := &PutLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutLifecycle) Raw(raw io.Reader) *PutLifecycle { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutLifecycle) Request(req *Request) *PutLifecycle { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutLifecycle: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == policyidMask: + path.WriteString("/") + path.WriteString("_slm") + path.WriteString("/") + path.WriteString("policy") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "policyid", r.policyid) + } + path.WriteString(r.policyid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutLifecycle) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "slm.put_lifecycle") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "slm.put_lifecycle") + if reader := instrument.RecordRequestBody(ctx, "slm.put_lifecycle", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.put_lifecycle") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutLifecycle query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putlifecycle.Response +func (r PutLifecycle) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.put_lifecycle") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutLifecycle headers map. +func (r *PutLifecycle) Header(key, value string) *PutLifecycle { + r.headers.Set(key, value) + + return r +} + +// PolicyId The identifier for the snapshot lifecycle policy you want to create or +// update. +// API Name: policyid +func (r *PutLifecycle) _policyid(policyid string) *PutLifecycle { + r.paramSet |= policyidMask + r.policyid = policyid + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *PutLifecycle) MasterTimeout(duration string) *PutLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: timeout +func (r *PutLifecycle) Timeout(duration string) *PutLifecycle { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutLifecycle) ErrorTrace(errortrace bool) *PutLifecycle { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutLifecycle) FilterPath(filterpaths ...string) *PutLifecycle { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutLifecycle) Human(human bool) *PutLifecycle { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutLifecycle) Pretty(pretty bool) *PutLifecycle { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Configuration for each snapshot created by the policy. +// API name: config +func (r *PutLifecycle) Config(config types.ConfigurationVariant) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Config = config.ConfigurationCaster() + + return r +} + +// Name automatically assigned to each snapshot created by the policy. Date math +// is supported. To prevent conflicting snapshot names, a UUID is automatically +// appended to each snapshot name. +// API name: name +func (r *PutLifecycle) Name(name string) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Name = &name + + return r +} + +// Repository used to store snapshots created by this policy. This repository +// must exist prior to the policy’s creation. You can create a repository using +// the snapshot repository API. +// API name: repository +func (r *PutLifecycle) Repository(repository string) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Repository = &repository + + return r +} + +// Retention rules used to retain and delete snapshots created by the policy. +// API name: retention +func (r *PutLifecycle) Retention(retention types.RetentionVariant) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Retention = retention.RetentionCaster() + + return r +} + +// Periodic or absolute schedule at which the policy creates snapshots. SLM +// applies schedule changes immediately. +// API name: schedule +func (r *PutLifecycle) Schedule(cronexpression string) *PutLifecycle { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Schedule = &cronexpression + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/putlifecycle/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/putlifecycle/request.go new file mode 100644 index 000000000..1085e015c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/putlifecycle/request.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putlifecycle + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putlifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/put_lifecycle/PutSnapshotLifecycleRequest.ts#L26-L89 +type Request struct { + + // Config Configuration for each snapshot created by the policy. + Config *types.Configuration `json:"config,omitempty"` + // Name Name automatically assigned to each snapshot created by the policy. Date math + // is supported. To prevent conflicting snapshot names, a UUID is automatically + // appended to each snapshot name. + Name *string `json:"name,omitempty"` + // Repository Repository used to store snapshots created by this policy. This repository + // must exist prior to the policy’s creation. You can create a repository using + // the snapshot repository API. + Repository *string `json:"repository,omitempty"` + // Retention Retention rules used to retain and delete snapshots created by the policy. + Retention *types.Retention `json:"retention,omitempty"` + // Schedule Periodic or absolute schedule at which the policy creates snapshots. SLM + // applies schedule changes immediately. + Schedule *string `json:"schedule,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putlifecycle request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "config": + if err := dec.Decode(&s.Config); err != nil { + return fmt.Errorf("%s | %w", "Config", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "repository": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Repository", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Repository = &o + + case "retention": + if err := dec.Decode(&s.Retention); err != nil { + return fmt.Errorf("%s | %w", "Retention", err) + } + + case "schedule": + if err := dec.Decode(&s.Schedule); err != nil { + return fmt.Errorf("%s | %w", "Schedule", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/putlifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/putlifecycle/response.go new file mode 100644 index 000000000..1294a8286 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/putlifecycle/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putlifecycle + +// Response holds the response body struct for the package putlifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/put_lifecycle/PutSnapshotLifecycleResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/start/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/start/response.go new file mode 100644 index 000000000..565a2fcc0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/start/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package start + +// Response holds the response body struct for the package start +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/start/StartSnapshotLifecycleManagementResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/start/start.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/start/start.go new file mode 100644 index 000000000..7b0aa1105 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/start/start.go @@ -0,0 +1,351 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Start snapshot lifecycle management. +// Snapshot lifecycle management (SLM) starts automatically when a cluster is +// formed. +// Manually starting SLM is necessary only if it has been stopped using the stop +// SLM API. +package start + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Start struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStart type alias for index. +type NewStart func() *Start + +// NewStartFunc returns a new instance of Start with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStartFunc(tp elastictransport.Interface) NewStart { + return func() *Start { + n := New(tp) + + return n + } +} + +// Start snapshot lifecycle management. +// Snapshot lifecycle management (SLM) starts automatically when a cluster is +// formed. +// Manually starting SLM is necessary only if it has been stopped using the stop +// SLM API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start +func New(tp elastictransport.Interface) *Start { + r := &Start{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Start) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_slm") + path.WriteString("/") + path.WriteString("start") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Start) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "slm.start") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "slm.start") + if reader := instrument.RecordRequestBody(ctx, "slm.start", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.start") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Start query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a start.Response +func (r Start) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.start") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Start) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.start") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Start query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Start headers map. +func (r *Start) Header(key, value string) *Start { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Start) MasterTimeout(duration string) *Start { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: timeout +func (r *Start) Timeout(duration string) *Start { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Start) ErrorTrace(errortrace bool) *Start { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Start) FilterPath(filterpaths ...string) *Start { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Start) Human(human bool) *Start { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Start) Pretty(pretty bool) *Start { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/stop/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/stop/response.go new file mode 100644 index 000000000..5ec0b5893 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/stop/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stop + +// Response holds the response body struct for the package stop +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/stop/StopSnapshotLifecycleManagementResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/stop/stop.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/stop/stop.go new file mode 100644 index 000000000..a30d6b4f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/slm/stop/stop.go @@ -0,0 +1,367 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Stop snapshot lifecycle management. +// Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +// This API is useful when you are performing maintenance on a cluster and need +// to prevent SLM from performing any actions on your data streams or indices. +// Stopping SLM does not stop any snapshots that are in progress. +// You can manually trigger snapshots with the run snapshot lifecycle policy API +// even if SLM is stopped. +// +// The API returns a response as soon as the request is acknowledged, but the +// plugin might continue to run until in-progress operations complete and it can +// be safely stopped. +// Use the get snapshot lifecycle management status API to see if SLM is +// running. +package stop + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Stop struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStop type alias for index. +type NewStop func() *Stop + +// NewStopFunc returns a new instance of Stop with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStopFunc(tp elastictransport.Interface) NewStop { + return func() *Stop { + n := New(tp) + + return n + } +} + +// Stop snapshot lifecycle management. +// Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +// This API is useful when you are performing maintenance on a cluster and need +// to prevent SLM from performing any actions on your data streams or indices. +// Stopping SLM does not stop any snapshots that are in progress. +// You can manually trigger snapshots with the run snapshot lifecycle policy API +// even if SLM is stopped. +// +// The API returns a response as soon as the request is acknowledged, but the +// plugin might continue to run until in-progress operations complete and it can +// be safely stopped. +// Use the get snapshot lifecycle management status API to see if SLM is +// running. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop +func New(tp elastictransport.Interface) *Stop { + r := &Stop{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Stop) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_slm") + path.WriteString("/") + path.WriteString("stop") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Stop) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "slm.stop") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "slm.stop") + if reader := instrument.RecordRequestBody(ctx, "slm.stop", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "slm.stop") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Stop query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stop.Response +func (r Stop) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Stop) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "slm.stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Stop query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Stop headers map. +func (r *Stop) Header(key, value string) *Stop { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Stop) MasterTimeout(duration string) *Stop { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: timeout +func (r *Stop) Timeout(duration string) *Stop { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Stop) ErrorTrace(errortrace bool) *Stop { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Stop) FilterPath(filterpaths ...string) *Stop { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Stop) Human(human bool) *Stop { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Stop) Pretty(pretty bool) *Stop { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/cleanuprepository/cleanup_repository.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/cleanuprepository/cleanup_repository.go new file mode 100644 index 000000000..6fc4a25e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/cleanuprepository/cleanup_repository.go @@ -0,0 +1,372 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clean up the snapshot repository. +// Trigger the review of the contents of a snapshot repository and delete any +// stale data not referenced by existing snapshots. +package cleanuprepository + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + repositoryMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CleanupRepository struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + repository string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCleanupRepository type alias for index. +type NewCleanupRepository func(repository string) *CleanupRepository + +// NewCleanupRepositoryFunc returns a new instance of CleanupRepository with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCleanupRepositoryFunc(tp elastictransport.Interface) NewCleanupRepository { + return func(repository string) *CleanupRepository { + n := New(tp) + + n._repository(repository) + + return n + } +} + +// Clean up the snapshot repository. +// Trigger the review of the contents of a snapshot repository and delete any +// stale data not referenced by existing snapshots. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository +func New(tp elastictransport.Interface) *CleanupRepository { + r := &CleanupRepository{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CleanupRepository) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + path.WriteString("_cleanup") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CleanupRepository) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.cleanup_repository") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.cleanup_repository") + if reader := instrument.RecordRequestBody(ctx, "snapshot.cleanup_repository", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.cleanup_repository") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CleanupRepository query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a cleanuprepository.Response +func (r CleanupRepository) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.cleanup_repository") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r CleanupRepository) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.cleanup_repository") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the CleanupRepository query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the CleanupRepository headers map. +func (r *CleanupRepository) Header(key, value string) *CleanupRepository { + r.headers.Set(key, value) + + return r +} + +// Repository The name of the snapshot repository to clean up. +// API Name: repository +func (r *CleanupRepository) _repository(repository string) *CleanupRepository { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1` +// API name: master_timeout +func (r *CleanupRepository) MasterTimeout(duration string) *CleanupRepository { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response will indicate that it was not +// completely acknowledged. +// To indicate that the request should never timeout, set it to `-1`. +// API name: timeout +func (r *CleanupRepository) Timeout(duration string) *CleanupRepository { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CleanupRepository) ErrorTrace(errortrace bool) *CleanupRepository { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CleanupRepository) FilterPath(filterpaths ...string) *CleanupRepository { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CleanupRepository) Human(human bool) *CleanupRepository { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CleanupRepository) Pretty(pretty bool) *CleanupRepository { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/cleanuprepository/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/cleanuprepository/response.go new file mode 100644 index 000000000..e95f9375e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/cleanuprepository/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package cleanuprepository + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package cleanuprepository +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L22-L27 +type Response struct { + + // Results Statistics for cleanup operations. + Results types.CleanupRepositoryResults `json:"results"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/clone/clone.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/clone/clone.go new file mode 100644 index 000000000..4561de086 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/clone/clone.go @@ -0,0 +1,426 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clone a snapshot. +// Clone part of all of a snapshot into another snapshot in the same repository. +package clone + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + repositoryMask = iota + 1 + + snapshotMask + + targetsnapshotMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Clone struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + repository string + snapshot string + targetsnapshot string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClone type alias for index. +type NewClone func(repository, snapshot, targetsnapshot string) *Clone + +// NewCloneFunc returns a new instance of Clone with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCloneFunc(tp elastictransport.Interface) NewClone { + return func(repository, snapshot, targetsnapshot string) *Clone { + n := New(tp) + + n._repository(repository) + + n._snapshot(snapshot) + + n._targetsnapshot(targetsnapshot) + + return n + } +} + +// Clone a snapshot. +// Clone part of all of a snapshot into another snapshot in the same repository. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone +func New(tp elastictransport.Interface) *Clone { + r := &Clone{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Clone) Raw(raw io.Reader) *Clone { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Clone) Request(req *Request) *Clone { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Clone) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Clone: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask|snapshotMask|targetsnapshotMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot", r.snapshot) + } + path.WriteString(r.snapshot) + path.WriteString("/") + path.WriteString("_clone") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "targetsnapshot", r.targetsnapshot) + } + path.WriteString(r.targetsnapshot) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Clone) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.clone") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.clone") + if reader := instrument.RecordRequestBody(ctx, "snapshot.clone", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.clone") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Clone query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a clone.Response +func (r Clone) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.clone") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Clone headers map. +func (r *Clone) Header(key, value string) *Clone { + r.headers.Set(key, value) + + return r +} + +// Repository The name of the snapshot repository that both source and target snapshot +// belong to. +// API Name: repository +func (r *Clone) _repository(repository string) *Clone { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// Snapshot The source snapshot name. +// API Name: snapshot +func (r *Clone) _snapshot(snapshot string) *Clone { + r.paramSet |= snapshotMask + r.snapshot = snapshot + + return r +} + +// TargetSnapshot The target snapshot name. +// API Name: targetsnapshot +func (r *Clone) _targetsnapshot(targetsnapshot string) *Clone { + r.paramSet |= targetsnapshotMask + r.targetsnapshot = targetsnapshot + + return r +} + +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Clone) MasterTimeout(duration string) *Clone { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Clone) ErrorTrace(errortrace bool) *Clone { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Clone) FilterPath(filterpaths ...string) *Clone { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Clone) Human(human bool) *Clone { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Clone) Pretty(pretty bool) *Clone { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// A comma-separated list of indices to include in the snapshot. +// Multi-target syntax is supported. +// API name: indices +func (r *Clone) Indices(indices string) *Clone { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Indices = indices + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/clone/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/clone/request.go new file mode 100644 index 000000000..d2e70fc4c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/clone/request.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clone + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package clone +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/clone/SnapshotCloneRequest.ts#L24-L70 +type Request struct { + + // Indices A comma-separated list of indices to include in the snapshot. + // Multi-target syntax is supported. + Indices string `json:"indices"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Clone request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/clone/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/clone/response.go new file mode 100644 index 000000000..4908f85e7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/clone/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clone + +// Response holds the response body struct for the package clone +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/clone/SnapshotCloneResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/create/create.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/create/create.go new file mode 100644 index 000000000..938ec3fc9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/create/create.go @@ -0,0 +1,532 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a snapshot. +// Take a snapshot of a cluster or of data streams and indices. +package create + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +const ( + repositoryMask = iota + 1 + + snapshotMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Create struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + repository string + snapshot string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCreate type alias for index. +type NewCreate func(repository, snapshot string) *Create + +// NewCreateFunc returns a new instance of Create with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCreateFunc(tp elastictransport.Interface) NewCreate { + return func(repository, snapshot string) *Create { + n := New(tp) + + n._repository(repository) + + n._snapshot(snapshot) + + return n + } +} + +// Create a snapshot. +// Take a snapshot of a cluster or of data streams and indices. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create +func New(tp elastictransport.Interface) *Create { + r := &Create{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Create) Raw(raw io.Reader) *Create { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Create) Request(req *Request) *Create { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Create) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Create: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask|snapshotMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot", r.snapshot) + } + path.WriteString(r.snapshot) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Create) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.create") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.create") + if reader := instrument.RecordRequestBody(ctx, "snapshot.create", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.create") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Create query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a create.Response +func (r Create) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.create") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Create headers map. +func (r *Create) Header(key, value string) *Create { + r.headers.Set(key, value) + + return r +} + +// Repository The name of the repository for the snapshot. +// API Name: repository +func (r *Create) _repository(repository string) *Create { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// Snapshot The name of the snapshot. +// It supportes date math. +// It must be unique in the repository. +// API Name: snapshot +func (r *Create) _snapshot(snapshot string) *Create { + r.paramSet |= snapshotMask + r.snapshot = snapshot + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *Create) MasterTimeout(duration string) *Create { + r.values.Set("master_timeout", duration) + + return r +} + +// WaitForCompletion If `true`, the request returns a response when the snapshot is complete. +// If `false`, the request returns a response when the snapshot initializes. +// API name: wait_for_completion +func (r *Create) WaitForCompletion(waitforcompletion bool) *Create { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Create) ErrorTrace(errortrace bool) *Create { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Create) FilterPath(filterpaths ...string) *Create { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Create) Human(human bool) *Create { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Create) Pretty(pretty bool) *Create { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Determines how wildcard patterns in the `indices` parameter match data +// streams and indices. +// It supports comma-separated values such as `open,hidden`. +// API name: expand_wildcards +func (r *Create) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ExpandWildcards = expandwildcards + + return r +} + +// The feature states to include in the snapshot. +// Each feature state includes one or more system indices containing related +// data. +// You can view a list of eligible features using the get features API. +// +// If `include_global_state` is `true`, all current feature states are included +// by default. +// If `include_global_state` is `false`, no feature states are included by +// default. +// +// Note that specifying an empty array will result in the default behavior. +// To exclude all feature states, regardless of the `include_global_state` +// value, specify an array with only the value `none` (`["none"]`). +// API name: feature_states +func (r *Create) FeatureStates(featurestates ...string) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range featurestates { + + r.req.FeatureStates = append(r.req.FeatureStates, v) + + } + return r +} + +// If `true`, the request ignores data streams and indices in `indices` that are +// missing or closed. +// If `false`, the request returns an error for any data stream or index that is +// missing or closed. +// API name: ignore_unavailable +func (r *Create) IgnoreUnavailable(ignoreunavailable bool) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IgnoreUnavailable = &ignoreunavailable + + return r +} + +// If `true`, the current cluster state is included in the snapshot. +// The cluster state includes persistent cluster settings, composable index +// templates, legacy index templates, ingest pipelines, and ILM policies. +// It also includes data stored in system indices, such as Watches and task +// records (configurable via `feature_states`). +// API name: include_global_state +func (r *Create) IncludeGlobalState(includeglobalstate bool) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IncludeGlobalState = &includeglobalstate + + return r +} + +// A comma-separated list of data streams and indices to include in the +// snapshot. +// It supports a multi-target syntax. +// The default is an empty array (`[]`), which includes all regular data streams +// and regular indices. +// To exclude all data streams and indices, use `-*`. +// +// You can't use this parameter to include or exclude system indices or system +// data streams from a snapshot. +// Use `feature_states` instead. +// API name: indices +func (r *Create) Indices(indices ...string) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Indices = indices + + return r +} + +// Arbitrary metadata to the snapshot, such as a record of who took the +// snapshot, why it was taken, or any other useful data. +// It can have any contents but it must be less than 1024 bytes. +// This information is not automatically generated by Elasticsearch. +// API name: metadata +func (r *Create) Metadata(metadata types.MetadataVariant) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// If `true`, it enables you to restore a partial snapshot of indices with +// unavailable shards. +// Only shards that were successfully included in the snapshot will be restored. +// All missing shards will be recreated as empty. +// +// If `false`, the entire restore operation will fail if one or more indices +// included in the snapshot do not have all primary shards available. +// API name: partial +func (r *Create) Partial(partial bool) *Create { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Partial = &partial + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/create/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/create/request.go new file mode 100644 index 000000000..6fa7934d0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/create/request.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package create + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +// Request holds the request body struct for the package create +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/create/SnapshotCreateRequest.ts#L24-L126 +type Request struct { + + // ExpandWildcards Determines how wildcard patterns in the `indices` parameter match data + // streams and indices. + // It supports comma-separated values such as `open,hidden`. + ExpandWildcards []expandwildcard.ExpandWildcard `json:"expand_wildcards,omitempty"` + // FeatureStates The feature states to include in the snapshot. + // Each feature state includes one or more system indices containing related + // data. + // You can view a list of eligible features using the get features API. + // + // If `include_global_state` is `true`, all current feature states are included + // by default. + // If `include_global_state` is `false`, no feature states are included by + // default. + // + // Note that specifying an empty array will result in the default behavior. + // To exclude all feature states, regardless of the `include_global_state` + // value, specify an array with only the value `none` (`["none"]`). + FeatureStates []string `json:"feature_states,omitempty"` + // IgnoreUnavailable If `true`, the request ignores data streams and indices in `indices` that are + // missing or closed. + // If `false`, the request returns an error for any data stream or index that is + // missing or closed. + IgnoreUnavailable *bool `json:"ignore_unavailable,omitempty"` + // IncludeGlobalState If `true`, the current cluster state is included in the snapshot. + // The cluster state includes persistent cluster settings, composable index + // templates, legacy index templates, ingest pipelines, and ILM policies. + // It also includes data stored in system indices, such as Watches and task + // records (configurable via `feature_states`). + IncludeGlobalState *bool `json:"include_global_state,omitempty"` + // Indices A comma-separated list of data streams and indices to include in the + // snapshot. + // It supports a multi-target syntax. + // The default is an empty array (`[]`), which includes all regular data streams + // and regular indices. + // To exclude all data streams and indices, use `-*`. + // + // You can't use this parameter to include or exclude system indices or system + // data streams from a snapshot. + // Use `feature_states` instead. + Indices []string `json:"indices,omitempty"` + // Metadata Arbitrary metadata to the snapshot, such as a record of who took the + // snapshot, why it was taken, or any other useful data. + // It can have any contents but it must be less than 1024 bytes. + // This information is not automatically generated by Elasticsearch. + Metadata types.Metadata `json:"metadata,omitempty"` + // Partial If `true`, it enables you to restore a partial snapshot of indices with + // unavailable shards. + // Only shards that were successfully included in the snapshot will be restored. + // All missing shards will be recreated as empty. + // + // If `false`, the entire restore operation will fail if one or more indices + // included in the snapshot do not have all primary shards available. + Partial *bool `json:"partial,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Create request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expand_wildcards": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := &expandwildcard.ExpandWildcard{} + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ExpandWildcards", err) + } + + s.ExpandWildcards = append(s.ExpandWildcards, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.ExpandWildcards); err != nil { + return fmt.Errorf("%s | %w", "ExpandWildcards", err) + } + } + + case "feature_states": + if err := dec.Decode(&s.FeatureStates); err != nil { + return fmt.Errorf("%s | %w", "FeatureStates", err) + } + + case "ignore_unavailable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnavailable", err) + } + s.IgnoreUnavailable = &value + case bool: + s.IgnoreUnavailable = &v + } + + case "include_global_state": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncludeGlobalState", err) + } + s.IncludeGlobalState = &value + case bool: + s.IncludeGlobalState = &v + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "partial": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Partial", err) + } + s.Partial = &value + case bool: + s.Partial = &v + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/create/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/create/response.go new file mode 100644 index 000000000..ec6ed1c2f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/create/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package create + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package create +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/create/SnapshotCreateResponse.ts#L22-L35 +type Response struct { + + // Accepted Equals `true` if the snapshot was accepted. Present when the request had + // `wait_for_completion` set to `false` + Accepted *bool `json:"accepted,omitempty"` + // Snapshot Snapshot information. Present when the request had `wait_for_completion` set + // to `true` + Snapshot *types.SnapshotInfo `json:"snapshot,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/createrepository/create_repository.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/createrepository/create_repository.go new file mode 100644 index 000000000..ed6eac3c0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/createrepository/create_repository.go @@ -0,0 +1,414 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a snapshot repository. +// IMPORTANT: If you are migrating searchable snapshots, the repository name +// must be identical in the source and destination clusters. +// To register a snapshot repository, the cluster's global metadata must be +// writeable. +// Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` +// and `clsuter.blocks.read_only_allow_delete` settings) that prevent write +// access. +// +// Several options for this API can be specified using a query parameter or a +// request body parameter. +// If both parameters are specified, only the query parameter is used. +package createrepository + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + repositoryMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CreateRepository struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + repository string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCreateRepository type alias for index. +type NewCreateRepository func(repository string) *CreateRepository + +// NewCreateRepositoryFunc returns a new instance of CreateRepository with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCreateRepositoryFunc(tp elastictransport.Interface) NewCreateRepository { + return func(repository string) *CreateRepository { + n := New(tp) + + n._repository(repository) + + return n + } +} + +// Create or update a snapshot repository. +// IMPORTANT: If you are migrating searchable snapshots, the repository name +// must be identical in the source and destination clusters. +// To register a snapshot repository, the cluster's global metadata must be +// writeable. +// Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` +// and `clsuter.blocks.read_only_allow_delete` settings) that prevent write +// access. +// +// Several options for this API can be specified using a query parameter or a +// request body parameter. +// If both parameters are specified, only the query parameter is used. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository +func New(tp elastictransport.Interface) *CreateRepository { + r := &CreateRepository{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *CreateRepository) Raw(raw io.Reader) *CreateRepository { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *CreateRepository) Request(req *Request) *CreateRepository { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CreateRepository) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for CreateRepository: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CreateRepository) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.create_repository") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.create_repository") + if reader := instrument.RecordRequestBody(ctx, "snapshot.create_repository", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.create_repository") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the CreateRepository query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a createrepository.Response +func (r CreateRepository) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.create_repository") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the CreateRepository headers map. +func (r *CreateRepository) Header(key, value string) *CreateRepository { + r.headers.Set(key, value) + + return r +} + +// Repository The name of the snapshot repository to register or update. +// API Name: repository +func (r *CreateRepository) _repository(repository string) *CreateRepository { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *CreateRepository) MasterTimeout(duration string) *CreateRepository { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response will indicate that it was not +// completely acknowledged. +// To indicate that the request should never timeout, set it to `-1`. +// API name: timeout +func (r *CreateRepository) Timeout(duration string) *CreateRepository { + r.values.Set("timeout", duration) + + return r +} + +// Verify If `true`, the request verifies the repository is functional on all master +// and data nodes in the cluster. +// If `false`, this verification is skipped. +// You can also perform this verification with the verify snapshot repository +// API. +// API name: verify +func (r *CreateRepository) Verify(verify bool) *CreateRepository { + r.values.Set("verify", strconv.FormatBool(verify)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *CreateRepository) ErrorTrace(errortrace bool) *CreateRepository { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *CreateRepository) FilterPath(filterpaths ...string) *CreateRepository { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *CreateRepository) Human(human bool) *CreateRepository { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *CreateRepository) Pretty(pretty bool) *CreateRepository { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/createrepository/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/createrepository/request.go new file mode 100644 index 000000000..ca66fdd04 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/createrepository/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package createrepository + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package createrepository +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/create_repository/SnapshotCreateRepositoryRequest.ts#L25-L79 +type Request = types.Repository diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/createrepository/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/createrepository/response.go new file mode 100644 index 000000000..4f8af286b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/createrepository/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package createrepository + +// Response holds the response body struct for the package createrepository +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/create_repository/SnapshotCreateRepositoryResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/delete/delete.go new file mode 100644 index 000000000..0c6b5ad5b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/delete/delete.go @@ -0,0 +1,385 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete snapshots. +package delete + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + repositoryMask = iota + 1 + + snapshotMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Delete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + repository string + snapshot string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDelete type alias for index. +type NewDelete func(repository, snapshot string) *Delete + +// NewDeleteFunc returns a new instance of Delete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteFunc(tp elastictransport.Interface) NewDelete { + return func(repository, snapshot string) *Delete { + n := New(tp) + + n._repository(repository) + + n._snapshot(snapshot) + + return n + } +} + +// Delete snapshots. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete +func New(tp elastictransport.Interface) *Delete { + r := &Delete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Delete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask|snapshotMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot", r.snapshot) + } + path.WriteString(r.snapshot) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Delete) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.delete") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.delete") + if reader := instrument.RecordRequestBody(ctx, "snapshot.delete", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.delete") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Delete query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a delete.Response +func (r Delete) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Delete) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.delete") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Delete query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Delete headers map. +func (r *Delete) Header(key, value string) *Delete { + r.headers.Set(key, value) + + return r +} + +// Repository The name of the repository to delete a snapshot from. +// API Name: repository +func (r *Delete) _repository(repository string) *Delete { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// Snapshot A comma-separated list of snapshot names to delete. +// It also accepts wildcards (`*`). +// API Name: snapshot +func (r *Delete) _snapshot(snapshot string) *Delete { + r.paramSet |= snapshotMask + r.snapshot = snapshot + + return r +} + +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Delete) MasterTimeout(duration string) *Delete { + r.values.Set("master_timeout", duration) + + return r +} + +// WaitForCompletion If `true`, the request returns a response when the matching snapshots are all +// deleted. +// If `false`, the request returns a response as soon as the deletes are +// scheduled. +// API name: wait_for_completion +func (r *Delete) WaitForCompletion(waitforcompletion bool) *Delete { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Delete) ErrorTrace(errortrace bool) *Delete { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Delete) FilterPath(filterpaths ...string) *Delete { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Delete) Human(human bool) *Delete { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Delete) Pretty(pretty bool) *Delete { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/delete/response.go new file mode 100644 index 000000000..85c8b5f41 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/delete/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package delete + +// Response holds the response body struct for the package delete +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/delete/SnapshotDeleteResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/deleterepository/delete_repository.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/deleterepository/delete_repository.go new file mode 100644 index 000000000..e9106441b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/deleterepository/delete_repository.go @@ -0,0 +1,373 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete snapshot repositories. +// When a repository is unregistered, Elasticsearch removes only the reference +// to the location where the repository is storing the snapshots. +// The snapshots themselves are left untouched and in place. +package deleterepository + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + repositoryMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteRepository struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + repository string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteRepository type alias for index. +type NewDeleteRepository func(repository string) *DeleteRepository + +// NewDeleteRepositoryFunc returns a new instance of DeleteRepository with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteRepositoryFunc(tp elastictransport.Interface) NewDeleteRepository { + return func(repository string) *DeleteRepository { + n := New(tp) + + n._repository(repository) + + return n + } +} + +// Delete snapshot repositories. +// When a repository is unregistered, Elasticsearch removes only the reference +// to the location where the repository is storing the snapshots. +// The snapshots themselves are left untouched and in place. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository +func New(tp elastictransport.Interface) *DeleteRepository { + r := &DeleteRepository{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteRepository) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteRepository) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.delete_repository") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.delete_repository") + if reader := instrument.RecordRequestBody(ctx, "snapshot.delete_repository", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.delete_repository") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteRepository query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleterepository.Response +func (r DeleteRepository) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.delete_repository") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteRepository) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.delete_repository") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteRepository query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteRepository headers map. +func (r *DeleteRepository) Header(key, value string) *DeleteRepository { + r.headers.Set(key, value) + + return r +} + +// Repository The ame of the snapshot repositories to unregister. +// Wildcard (`*`) patterns are supported. +// API Name: repository +func (r *DeleteRepository) _repository(repository string) *DeleteRepository { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *DeleteRepository) MasterTimeout(duration string) *DeleteRepository { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response will indicate that it was not +// completely acknowledged. +// To indicate that the request should never timeout, set it to `-1`. +// API name: timeout +func (r *DeleteRepository) Timeout(duration string) *DeleteRepository { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteRepository) ErrorTrace(errortrace bool) *DeleteRepository { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteRepository) FilterPath(filterpaths ...string) *DeleteRepository { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteRepository) Human(human bool) *DeleteRepository { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteRepository) Pretty(pretty bool) *DeleteRepository { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/deleterepository/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/deleterepository/response.go new file mode 100644 index 000000000..52aa165e0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/deleterepository/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleterepository + +// Response holds the response body struct for the package deleterepository +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/delete_repository/SnapshotDeleteRepositoryResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/get/get.go new file mode 100644 index 000000000..688be15de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/get/get.go @@ -0,0 +1,542 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get snapshot information. +// +// NOTE: The `after` parameter and `next` field enable you to iterate through +// snapshots with some consistency guarantees regarding concurrent creation or +// deletion of snapshots. +// It is guaranteed that any snapshot that exists at the beginning of the +// iteration and is not concurrently deleted will be seen during the iteration. +// Snapshots concurrently created may be seen during an iteration. +package get + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotsort" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotstate" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" +) + +const ( + repositoryMask = iota + 1 + + snapshotMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Get struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + repository string + snapshot string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGet type alias for index. +type NewGet func(repository, snapshot string) *Get + +// NewGetFunc returns a new instance of Get with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFunc(tp elastictransport.Interface) NewGet { + return func(repository, snapshot string) *Get { + n := New(tp) + + n._repository(repository) + + n._snapshot(snapshot) + + return n + } +} + +// Get snapshot information. +// +// NOTE: The `after` parameter and `next` field enable you to iterate through +// snapshots with some consistency guarantees regarding concurrent creation or +// deletion of snapshots. +// It is guaranteed that any snapshot that exists at the beginning of the +// iteration and is not concurrently deleted will be seen during the iteration. +// Snapshots concurrently created may be seen during an iteration. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get +func New(tp elastictransport.Interface) *Get { + r := &Get{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Get) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask|snapshotMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot", r.snapshot) + } + path.WriteString(r.snapshot) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Get) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.get") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.get") + if reader := instrument.RecordRequestBody(ctx, "snapshot.get", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.get") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Get query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a get.Response +func (r Get) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Get) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Get query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Get headers map. +func (r *Get) Header(key, value string) *Get { + r.headers.Set(key, value) + + return r +} + +// Repository A comma-separated list of snapshot repository names used to limit the +// request. +// Wildcard (`*`) expressions are supported. +// API Name: repository +func (r *Get) _repository(repository string) *Get { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// Snapshot A comma-separated list of snapshot names to retrieve +// Wildcards (`*`) are supported. +// +// * To get information about all snapshots in a registered repository, use a +// wildcard (`*`) or `_all`. +// * To get information about any snapshots that are currently running, use +// `_current`. +// API Name: snapshot +func (r *Get) _snapshot(snapshot string) *Get { + r.paramSet |= snapshotMask + r.snapshot = snapshot + + return r +} + +// After An offset identifier to start pagination from as returned by the next field +// in the response body. +// API name: after +func (r *Get) After(after string) *Get { + r.values.Set("after", after) + + return r +} + +// FromSortValue The value of the current sort column at which to start retrieval. +// It can be a string `snapshot-` or a repository name when sorting by snapshot +// or repository name. +// It can be a millisecond time value or a number when sorting by `index-` or +// shard count. +// API name: from_sort_value +func (r *Get) FromSortValue(fromsortvalue string) *Get { + r.values.Set("from_sort_value", fromsortvalue) + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error for any snapshots that are +// unavailable. +// API name: ignore_unavailable +func (r *Get) IgnoreUnavailable(ignoreunavailable bool) *Get { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// IndexDetails If `true`, the response includes additional information about each index in +// the snapshot comprising the number of shards in the index, the total size of +// the index in bytes, and the maximum number of segments per shard in the +// index. +// The default is `false`, meaning that this information is omitted. +// API name: index_details +func (r *Get) IndexDetails(indexdetails bool) *Get { + r.values.Set("index_details", strconv.FormatBool(indexdetails)) + + return r +} + +// IndexNames If `true`, the response includes the name of each index in each snapshot. +// API name: index_names +func (r *Get) IndexNames(indexnames bool) *Get { + r.values.Set("index_names", strconv.FormatBool(indexnames)) + + return r +} + +// IncludeRepository If `true`, the response includes the repository name in each snapshot. +// API name: include_repository +func (r *Get) IncludeRepository(includerepository bool) *Get { + r.values.Set("include_repository", strconv.FormatBool(includerepository)) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *Get) MasterTimeout(duration string) *Get { + r.values.Set("master_timeout", duration) + + return r +} + +// Order The sort order. +// Valid values are `asc` for ascending and `desc` for descending order. +// The default behavior is ascending order. +// API name: order +func (r *Get) Order(order sortorder.SortOrder) *Get { + r.values.Set("order", order.String()) + + return r +} + +// Offset Numeric offset to start pagination from based on the snapshots matching this +// request. Using a non-zero value for this parameter is mutually exclusive with +// using the after parameter. Defaults to 0. +// API name: offset +func (r *Get) Offset(offset int) *Get { + r.values.Set("offset", strconv.Itoa(offset)) + + return r +} + +// Size The maximum number of snapshots to return. +// The default is 0, which means to return all that match the request without +// limit. +// API name: size +func (r *Get) Size(size int) *Get { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// SlmPolicyFilter Filter snapshots by a comma-separated list of snapshot lifecycle management +// (SLM) policy names that snapshots belong to. +// +// You can use wildcards (`*`) and combinations of wildcards followed by exclude +// patterns starting with `-`. +// For example, the pattern `*,-policy-a-\*` will return all snapshots except +// for those that were created by an SLM policy with a name starting with +// `policy-a-`. +// Note that the wildcard pattern `*` matches all snapshots created by an SLM +// policy but not those snapshots that were not created by an SLM policy. +// To include snapshots that were not created by an SLM policy, you can use the +// special pattern `_none` that will match all snapshots without an SLM policy. +// API name: slm_policy_filter +func (r *Get) SlmPolicyFilter(name string) *Get { + r.values.Set("slm_policy_filter", name) + + return r +} + +// Sort The sort order for the result. +// The default behavior is sorting by snapshot start time stamp. +// API name: sort +func (r *Get) Sort(sort snapshotsort.SnapshotSort) *Get { + r.values.Set("sort", sort.String()) + + return r +} + +// State Only return snapshots with a state found in the given comma-separated list of +// snapshot states. +// The default is all snapshot states. +// API name: state +func (r *Get) State(states ...snapshotstate.SnapshotState) *Get { + tmp := []string{} + for _, item := range states { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("state", strings.Join(tmp, ",")) + + return r +} + +// Verbose If `true`, returns additional information about each snapshot such as the +// version of Elasticsearch which took the snapshot, the start and end times of +// the snapshot, and the number of shards snapshotted. +// +// NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, +// `slm_policy_filter`, and `sort` are not supported when you set +// `verbose=false` and the sort order for requests with `verbose=false` is +// undefined. +// API name: verbose +func (r *Get) Verbose(verbose bool) *Get { + r.values.Set("verbose", strconv.FormatBool(verbose)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Get) ErrorTrace(errortrace bool) *Get { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Get) FilterPath(filterpaths ...string) *Get { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Get) Human(human bool) *Get { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Get) Pretty(pretty bool) *Get { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/get/response.go new file mode 100644 index 000000000..0d9475142 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/get/response.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package get + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package get +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/get/SnapshotGetResponse.ts#L25-L47 +type Response struct { + + // Next If the request contained a size limit and there might be more results, a + // `next` field will be added to the response. + // It can be used as the `after` query parameter to fetch additional results. + Next *string `json:"next,omitempty"` + // Remaining The number of remaining snapshots that were not returned due to size limits + // and that can be fetched by additional requests using the `next` field value. + Remaining int `json:"remaining"` + Responses []types.SnapshotResponseItem `json:"responses,omitempty"` + Snapshots []types.SnapshotInfo `json:"snapshots,omitempty"` + // Total The total number of snapshots that match the request when ignoring the size + // limit or `after` query parameter. + Total int `json:"total"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/getrepository/get_repository.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/getrepository/get_repository.go new file mode 100644 index 000000000..acada4759 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/getrepository/get_repository.go @@ -0,0 +1,371 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get snapshot repository information. +package getrepository + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + repositoryMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetRepository struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + repository string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetRepository type alias for index. +type NewGetRepository func() *GetRepository + +// NewGetRepositoryFunc returns a new instance of GetRepository with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetRepositoryFunc(tp elastictransport.Interface) NewGetRepository { + return func() *GetRepository { + n := New(tp) + + return n + } +} + +// Get snapshot repository information. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository +func New(tp elastictransport.Interface) *GetRepository { + r := &GetRepository{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetRepository) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_snapshot") + + method = http.MethodGet + case r.paramSet == repositoryMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetRepository) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.get_repository") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.get_repository") + if reader := instrument.RecordRequestBody(ctx, "snapshot.get_repository", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.get_repository") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetRepository query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getrepository.Response +func (r GetRepository) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.get_repository") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetRepository) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.get_repository") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetRepository query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetRepository headers map. +func (r *GetRepository) Header(key, value string) *GetRepository { + r.headers.Set(key, value) + + return r +} + +// Repository A comma-separated list of snapshot repository names used to limit the +// request. +// Wildcard (`*`) expressions are supported including combining wildcards with +// exclude patterns starting with `-`. +// +// To get information about all snapshot repositories registered in the cluster, +// omit this parameter or use `*` or `_all`. +// API Name: repository +func (r *GetRepository) Repository(repository string) *GetRepository { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// Local If `true`, the request gets information from the local node only. +// If `false`, the request gets information from the master node. +// API name: local +func (r *GetRepository) Local(local bool) *GetRepository { + r.values.Set("local", strconv.FormatBool(local)) + + return r +} + +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *GetRepository) MasterTimeout(duration string) *GetRepository { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetRepository) ErrorTrace(errortrace bool) *GetRepository { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetRepository) FilterPath(filterpaths ...string) *GetRepository { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetRepository) Human(human bool) *GetRepository { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetRepository) Pretty(pretty bool) *GetRepository { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/getrepository/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/getrepository/response.go new file mode 100644 index 000000000..f21d43273 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/getrepository/response.go @@ -0,0 +1,138 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getrepository + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getrepository +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/get_repository/SnapshotGetRepositoryResponse.ts#L23-L26 + +type Response map[string]types.Repository + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} + +func (r Response) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + o := make(map[string]any, 0) + dec.Decode(&o) + dec = json.NewDecoder(bytes.NewReader(data)) + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + key := fmt.Sprintf("%s", t) + if target, ok := o[key]; ok { + if t, ok := target.(map[string]any)["type"]; ok { + + switch t { + + case "azure": + oo := types.NewAzureRepository() + err := dec.Decode(&oo) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + r[key] = oo + + case "gcs": + oo := types.NewGcsRepository() + err := dec.Decode(&oo) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + r[key] = oo + + case "s3": + oo := types.NewS3Repository() + err := dec.Decode(&oo) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + r[key] = oo + + case "fs": + oo := types.NewSharedFileSystemRepository() + err := dec.Decode(&oo) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + r[key] = oo + + case "url": + oo := types.NewReadOnlyUrlRepository() + err := dec.Decode(&oo) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + r[key] = oo + + case "source": + oo := types.NewSourceOnlyRepository() + err := dec.Decode(&oo) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + r[key] = oo + + } + } + } + + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryanalyze/repository_analyze.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryanalyze/repository_analyze.go new file mode 100644 index 000000000..969f0c478 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryanalyze/repository_analyze.go @@ -0,0 +1,854 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Analyze a snapshot repository. +// +// Performs operations on a snapshot repository in order to check for incorrect +// behaviour. +// +// There are a large number of third-party storage systems available, not all of +// which are suitable for use as a snapshot repository by Elasticsearch. +// Some storage systems behave incorrectly, or perform poorly, especially when +// accessed concurrently by multiple clients as the nodes of an Elasticsearch +// cluster do. +// This API performs a collection of read and write operations on your +// repository which are designed to detect incorrect behaviour and to measure +// the performance characteristics of your storage system. +// +// The default values for the parameters are deliberately low to reduce the +// impact of running an analysis inadvertently and to provide a sensible +// starting point for your investigations. +// Run your first analysis with the default parameter values to check for simple +// problems. +// Some repositories may behave correctly when lightly loaded but incorrectly +// under production-like workloads. +// If the first analysis is successful, run a sequence of increasingly large +// analyses until you encounter a failure or you reach a `blob_count` of at +// least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of +// at least `1tb`, and a `register_operation_count` of at least `100`. +// Always specify a generous timeout, possibly `1h` or longer, to allow time for +// each analysis to run to completion. +// Some repositories may behave correctly when accessed by a small number of +// Elasticsearch nodes but incorrectly when accessed concurrently by a +// production-scale cluster. +// Perform the analyses using a multi-node cluster of a similar size to your +// production cluster so that it can detect any problems that only arise when +// the repository is accessed by many nodes at once. +// +// If the analysis fails, Elasticsearch detected that your repository behaved +// unexpectedly. +// This usually means you are using a third-party storage system with an +// incorrect or incompatible implementation of the API it claims to support. +// If so, this storage system is not suitable for use as a snapshot repository. +// Repository analysis triggers conditions that occur only rarely when taking +// snapshots in a production system. +// Snapshotting to unsuitable storage may appear to work correctly most of the +// time despite repository analysis failures. +// However your snapshot data is at risk if you store it in a snapshot +// repository that does not reliably pass repository analysis. +// You can demonstrate that the analysis failure is due to an incompatible +// storage implementation by verifying that Elasticsearch does not detect the +// same problem when analysing the reference implementation of the storage +// protocol you are using. +// For instance, if you are using storage that offers an API which the supplier +// claims to be compatible with AWS S3, verify that repositories in AWS S3 do +// not fail repository analysis. +// This allows you to demonstrate to your storage supplier that a repository +// analysis failure must only be caused by an incompatibility with AWS S3 and +// cannot be attributed to a problem in Elasticsearch. +// Please do not report Elasticsearch issues involving third-party storage +// systems unless you can demonstrate that the same issue exists when analysing +// a repository that uses the reference implementation of the same storage +// protocol. +// You will need to work with the supplier of your storage system to address the +// incompatibilities that Elasticsearch detects. +// +// If the analysis is successful, the API returns details of the testing +// process, optionally including how long each operation took. +// You can use this information to determine the performance of your storage +// system. +// If any operation fails or returns an incorrect result, the API returns an +// error. +// If the API returns an error, it may not have removed all the data it wrote to +// the repository. +// The error will indicate the location of any leftover data and this path is +// also recorded in the Elasticsearch logs. +// You should verify that this location has been cleaned up correctly. +// If there is still leftover data at the specified location, you should +// manually remove it. +// +// If the connection from your client to Elasticsearch is closed while the +// client is waiting for the result of the analysis, the test is cancelled. +// Some clients are configured to close their connection if no response is +// received within a certain timeout. +// An analysis takes a long time to complete so you might need to relax any such +// client-side timeouts. +// On cancellation the analysis attempts to clean up the data it was writing, +// but it may not be able to remove it all. +// The path to the leftover data is recorded in the Elasticsearch logs. +// You should verify that this location has been cleaned up correctly. +// If there is still leftover data at the specified location, you should +// manually remove it. +// +// If the analysis is successful then it detected no incorrect behaviour, but +// this does not mean that correct behaviour is guaranteed. +// The analysis attempts to detect common bugs but it does not offer 100% +// coverage. +// Additionally, it does not test the following: +// +// * Your repository must perform durable writes. Once a blob has been written +// it must remain in place until it is deleted, even after a power loss or +// similar disaster. +// * Your repository must not suffer from silent data corruption. Once a blob +// has been written, its contents must remain unchanged until it is deliberately +// modified or deleted. +// * Your repository must behave correctly even if connectivity from the cluster +// is disrupted. Reads and writes may fail in this case, but they must not +// return incorrect results. +// +// IMPORTANT: An analysis writes a substantial amount of data to your repository +// and then reads it back again. +// This consumes bandwidth on the network between the cluster and the +// repository, and storage space and I/O bandwidth on the repository itself. +// You must ensure this load does not affect other users of these systems. +// Analyses respect the repository settings `max_snapshot_bytes_per_sec` and +// `max_restore_bytes_per_sec` if available and the cluster setting +// `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth +// they consume. +// +// NOTE: This API is intended for exploratory use by humans. +// You should expect the request parameters and the response format to vary in +// future versions. +// The response exposes immplementation details of the analysis which may change +// from version to version. +// +// NOTE: Different versions of Elasticsearch may perform different checks for +// repository compatibility, with newer versions typically being stricter than +// older ones. +// A storage system that passes repository analysis with one version of +// Elasticsearch may fail with a different version. +// This indicates it behaves incorrectly in ways that the former version did not +// detect. +// You must work with the supplier of your storage system to address the +// incompatibilities detected by the repository analysis API in any version of +// Elasticsearch. +// +// NOTE: This API may not work correctly in a mixed-version cluster. +// +// *Implementation details* +// +// NOTE: This section of documentation describes how the repository analysis API +// works in this version of Elasticsearch, but you should expect the +// implementation to vary between versions. +// The request parameters and response format depend on details of the +// implementation so may also be different in newer versions. +// +// The analysis comprises a number of blob-level tasks, as set by the +// `blob_count` parameter and a number of compare-and-exchange operations on +// linearizable registers, as set by the `register_operation_count` parameter. +// These tasks are distributed over the data and master-eligible nodes in the +// cluster for execution. +// +// For most blob-level tasks, the executing node first writes a blob to the +// repository and then instructs some of the other nodes in the cluster to +// attempt to read the data it just wrote. +// The size of the blob is chosen randomly, according to the `max_blob_size` and +// `max_total_data_size` parameters. +// If any of these reads fails then the repository does not implement the +// necessary read-after-write semantics that Elasticsearch requires. +// +// For some blob-level tasks, the executing node will instruct some of its peers +// to attempt to read the data before the writing process completes. +// These reads are permitted to fail, but must not return partial data. +// If any read returns partial data then the repository does not implement the +// necessary atomicity semantics that Elasticsearch requires. +// +// For some blob-level tasks, the executing node will overwrite the blob while +// its peers are reading it. +// In this case the data read may come from either the original or the +// overwritten blob, but the read operation must not return partial data or a +// mix of data from the two blobs. +// If any of these reads returns partial data or a mix of the two blobs then the +// repository does not implement the necessary atomicity semantics that +// Elasticsearch requires for overwrites. +// +// The executing node will use a variety of different methods to write the blob. +// For instance, where applicable, it will use both single-part and multi-part +// uploads. +// Similarly, the reading nodes will use a variety of different methods to read +// the data back again. +// For instance they may read the entire blob from start to end or may read only +// a subset of the data. +// +// For some blob-level tasks, the executing node will cancel the write before it +// is complete. +// In this case, it still instructs some of the other nodes in the cluster to +// attempt to read the blob but all of these reads must fail to find the blob. +// +// Linearizable registers are special blobs that Elasticsearch manipulates using +// an atomic compare-and-exchange operation. +// This operation ensures correct and strongly-consistent behavior even when the +// blob is accessed by multiple nodes at the same time. +// The detailed implementation of the compare-and-exchange operation on +// linearizable registers varies by repository type. +// Repository analysis verifies that that uncontended compare-and-exchange +// operations on a linearizable register blob always succeed. +// Repository analysis also verifies that contended operations either succeed or +// report the contention but do not return incorrect results. +// If an operation fails due to contention, Elasticsearch retries the operation +// until it succeeds. +// Most of the compare-and-exchange operations performed by repository analysis +// atomically increment a counter which is represented as an 8-byte blob. +// Some operations also verify the behavior on small blobs with sizes other than +// 8 bytes. +package repositoryanalyze + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + repositoryMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RepositoryAnalyze struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + repository string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRepositoryAnalyze type alias for index. +type NewRepositoryAnalyze func(repository string) *RepositoryAnalyze + +// NewRepositoryAnalyzeFunc returns a new instance of RepositoryAnalyze with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRepositoryAnalyzeFunc(tp elastictransport.Interface) NewRepositoryAnalyze { + return func(repository string) *RepositoryAnalyze { + n := New(tp) + + n._repository(repository) + + return n + } +} + +// Analyze a snapshot repository. +// +// Performs operations on a snapshot repository in order to check for incorrect +// behaviour. +// +// There are a large number of third-party storage systems available, not all of +// which are suitable for use as a snapshot repository by Elasticsearch. +// Some storage systems behave incorrectly, or perform poorly, especially when +// accessed concurrently by multiple clients as the nodes of an Elasticsearch +// cluster do. +// This API performs a collection of read and write operations on your +// repository which are designed to detect incorrect behaviour and to measure +// the performance characteristics of your storage system. +// +// The default values for the parameters are deliberately low to reduce the +// impact of running an analysis inadvertently and to provide a sensible +// starting point for your investigations. +// Run your first analysis with the default parameter values to check for simple +// problems. +// Some repositories may behave correctly when lightly loaded but incorrectly +// under production-like workloads. +// If the first analysis is successful, run a sequence of increasingly large +// analyses until you encounter a failure or you reach a `blob_count` of at +// least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of +// at least `1tb`, and a `register_operation_count` of at least `100`. +// Always specify a generous timeout, possibly `1h` or longer, to allow time for +// each analysis to run to completion. +// Some repositories may behave correctly when accessed by a small number of +// Elasticsearch nodes but incorrectly when accessed concurrently by a +// production-scale cluster. +// Perform the analyses using a multi-node cluster of a similar size to your +// production cluster so that it can detect any problems that only arise when +// the repository is accessed by many nodes at once. +// +// If the analysis fails, Elasticsearch detected that your repository behaved +// unexpectedly. +// This usually means you are using a third-party storage system with an +// incorrect or incompatible implementation of the API it claims to support. +// If so, this storage system is not suitable for use as a snapshot repository. +// Repository analysis triggers conditions that occur only rarely when taking +// snapshots in a production system. +// Snapshotting to unsuitable storage may appear to work correctly most of the +// time despite repository analysis failures. +// However your snapshot data is at risk if you store it in a snapshot +// repository that does not reliably pass repository analysis. +// You can demonstrate that the analysis failure is due to an incompatible +// storage implementation by verifying that Elasticsearch does not detect the +// same problem when analysing the reference implementation of the storage +// protocol you are using. +// For instance, if you are using storage that offers an API which the supplier +// claims to be compatible with AWS S3, verify that repositories in AWS S3 do +// not fail repository analysis. +// This allows you to demonstrate to your storage supplier that a repository +// analysis failure must only be caused by an incompatibility with AWS S3 and +// cannot be attributed to a problem in Elasticsearch. +// Please do not report Elasticsearch issues involving third-party storage +// systems unless you can demonstrate that the same issue exists when analysing +// a repository that uses the reference implementation of the same storage +// protocol. +// You will need to work with the supplier of your storage system to address the +// incompatibilities that Elasticsearch detects. +// +// If the analysis is successful, the API returns details of the testing +// process, optionally including how long each operation took. +// You can use this information to determine the performance of your storage +// system. +// If any operation fails or returns an incorrect result, the API returns an +// error. +// If the API returns an error, it may not have removed all the data it wrote to +// the repository. +// The error will indicate the location of any leftover data and this path is +// also recorded in the Elasticsearch logs. +// You should verify that this location has been cleaned up correctly. +// If there is still leftover data at the specified location, you should +// manually remove it. +// +// If the connection from your client to Elasticsearch is closed while the +// client is waiting for the result of the analysis, the test is cancelled. +// Some clients are configured to close their connection if no response is +// received within a certain timeout. +// An analysis takes a long time to complete so you might need to relax any such +// client-side timeouts. +// On cancellation the analysis attempts to clean up the data it was writing, +// but it may not be able to remove it all. +// The path to the leftover data is recorded in the Elasticsearch logs. +// You should verify that this location has been cleaned up correctly. +// If there is still leftover data at the specified location, you should +// manually remove it. +// +// If the analysis is successful then it detected no incorrect behaviour, but +// this does not mean that correct behaviour is guaranteed. +// The analysis attempts to detect common bugs but it does not offer 100% +// coverage. +// Additionally, it does not test the following: +// +// * Your repository must perform durable writes. Once a blob has been written +// it must remain in place until it is deleted, even after a power loss or +// similar disaster. +// * Your repository must not suffer from silent data corruption. Once a blob +// has been written, its contents must remain unchanged until it is deliberately +// modified or deleted. +// * Your repository must behave correctly even if connectivity from the cluster +// is disrupted. Reads and writes may fail in this case, but they must not +// return incorrect results. +// +// IMPORTANT: An analysis writes a substantial amount of data to your repository +// and then reads it back again. +// This consumes bandwidth on the network between the cluster and the +// repository, and storage space and I/O bandwidth on the repository itself. +// You must ensure this load does not affect other users of these systems. +// Analyses respect the repository settings `max_snapshot_bytes_per_sec` and +// `max_restore_bytes_per_sec` if available and the cluster setting +// `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth +// they consume. +// +// NOTE: This API is intended for exploratory use by humans. +// You should expect the request parameters and the response format to vary in +// future versions. +// The response exposes immplementation details of the analysis which may change +// from version to version. +// +// NOTE: Different versions of Elasticsearch may perform different checks for +// repository compatibility, with newer versions typically being stricter than +// older ones. +// A storage system that passes repository analysis with one version of +// Elasticsearch may fail with a different version. +// This indicates it behaves incorrectly in ways that the former version did not +// detect. +// You must work with the supplier of your storage system to address the +// incompatibilities detected by the repository analysis API in any version of +// Elasticsearch. +// +// NOTE: This API may not work correctly in a mixed-version cluster. +// +// *Implementation details* +// +// NOTE: This section of documentation describes how the repository analysis API +// works in this version of Elasticsearch, but you should expect the +// implementation to vary between versions. +// The request parameters and response format depend on details of the +// implementation so may also be different in newer versions. +// +// The analysis comprises a number of blob-level tasks, as set by the +// `blob_count` parameter and a number of compare-and-exchange operations on +// linearizable registers, as set by the `register_operation_count` parameter. +// These tasks are distributed over the data and master-eligible nodes in the +// cluster for execution. +// +// For most blob-level tasks, the executing node first writes a blob to the +// repository and then instructs some of the other nodes in the cluster to +// attempt to read the data it just wrote. +// The size of the blob is chosen randomly, according to the `max_blob_size` and +// `max_total_data_size` parameters. +// If any of these reads fails then the repository does not implement the +// necessary read-after-write semantics that Elasticsearch requires. +// +// For some blob-level tasks, the executing node will instruct some of its peers +// to attempt to read the data before the writing process completes. +// These reads are permitted to fail, but must not return partial data. +// If any read returns partial data then the repository does not implement the +// necessary atomicity semantics that Elasticsearch requires. +// +// For some blob-level tasks, the executing node will overwrite the blob while +// its peers are reading it. +// In this case the data read may come from either the original or the +// overwritten blob, but the read operation must not return partial data or a +// mix of data from the two blobs. +// If any of these reads returns partial data or a mix of the two blobs then the +// repository does not implement the necessary atomicity semantics that +// Elasticsearch requires for overwrites. +// +// The executing node will use a variety of different methods to write the blob. +// For instance, where applicable, it will use both single-part and multi-part +// uploads. +// Similarly, the reading nodes will use a variety of different methods to read +// the data back again. +// For instance they may read the entire blob from start to end or may read only +// a subset of the data. +// +// For some blob-level tasks, the executing node will cancel the write before it +// is complete. +// In this case, it still instructs some of the other nodes in the cluster to +// attempt to read the blob but all of these reads must fail to find the blob. +// +// Linearizable registers are special blobs that Elasticsearch manipulates using +// an atomic compare-and-exchange operation. +// This operation ensures correct and strongly-consistent behavior even when the +// blob is accessed by multiple nodes at the same time. +// The detailed implementation of the compare-and-exchange operation on +// linearizable registers varies by repository type. +// Repository analysis verifies that that uncontended compare-and-exchange +// operations on a linearizable register blob always succeed. +// Repository analysis also verifies that contended operations either succeed or +// report the contention but do not return incorrect results. +// If an operation fails due to contention, Elasticsearch retries the operation +// until it succeeds. +// Most of the compare-and-exchange operations performed by repository analysis +// atomically increment a counter which is represented as an 8-byte blob. +// Some operations also verify the behavior on small blobs with sizes other than +// 8 bytes. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze +func New(tp elastictransport.Interface) *RepositoryAnalyze { + r := &RepositoryAnalyze{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RepositoryAnalyze) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + path.WriteString("_analyze") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RepositoryAnalyze) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.repository_analyze") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.repository_analyze") + if reader := instrument.RecordRequestBody(ctx, "snapshot.repository_analyze", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.repository_analyze") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RepositoryAnalyze query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a repositoryanalyze.Response +func (r RepositoryAnalyze) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.repository_analyze") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r RepositoryAnalyze) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.repository_analyze") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the RepositoryAnalyze query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the RepositoryAnalyze headers map. +func (r *RepositoryAnalyze) Header(key, value string) *RepositoryAnalyze { + r.headers.Set(key, value) + + return r +} + +// Repository The name of the repository. +// API Name: repository +func (r *RepositoryAnalyze) _repository(repository string) *RepositoryAnalyze { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// BlobCount The total number of blobs to write to the repository during the test. +// For realistic experiments, you should set it to at least `2000`. +// API name: blob_count +func (r *RepositoryAnalyze) BlobCount(blobcount int) *RepositoryAnalyze { + r.values.Set("blob_count", strconv.Itoa(blobcount)) + + return r +} + +// Concurrency The number of operations to run concurrently during the test. +// API name: concurrency +func (r *RepositoryAnalyze) Concurrency(concurrency int) *RepositoryAnalyze { + r.values.Set("concurrency", strconv.Itoa(concurrency)) + + return r +} + +// Detailed Indicates whether to return detailed results, including timing information +// for every operation performed during the analysis. +// If false, it returns only a summary of the analysis. +// API name: detailed +func (r *RepositoryAnalyze) Detailed(detailed bool) *RepositoryAnalyze { + r.values.Set("detailed", strconv.FormatBool(detailed)) + + return r +} + +// EarlyReadNodeCount The number of nodes on which to perform an early read operation while writing +// each blob. +// Early read operations are only rarely performed. +// API name: early_read_node_count +func (r *RepositoryAnalyze) EarlyReadNodeCount(earlyreadnodecount int) *RepositoryAnalyze { + r.values.Set("early_read_node_count", strconv.Itoa(earlyreadnodecount)) + + return r +} + +// MaxBlobSize The maximum size of a blob to be written during the test. +// For realistic experiments, you should set it to at least `2gb`. +// API name: max_blob_size +func (r *RepositoryAnalyze) MaxBlobSize(bytesize string) *RepositoryAnalyze { + r.values.Set("max_blob_size", bytesize) + + return r +} + +// MaxTotalDataSize An upper limit on the total size of all the blobs written during the test. +// For realistic experiments, you should set it to at least `1tb`. +// API name: max_total_data_size +func (r *RepositoryAnalyze) MaxTotalDataSize(bytesize string) *RepositoryAnalyze { + r.values.Set("max_total_data_size", bytesize) + + return r +} + +// RareActionProbability The probability of performing a rare action such as an early read, an +// overwrite, or an aborted write on each blob. +// API name: rare_action_probability +func (r *RepositoryAnalyze) RareActionProbability(rareactionprobability string) *RepositoryAnalyze { + r.values.Set("rare_action_probability", rareactionprobability) + + return r +} + +// RarelyAbortWrites Indicates whether to rarely cancel writes before they complete. +// API name: rarely_abort_writes +func (r *RepositoryAnalyze) RarelyAbortWrites(rarelyabortwrites bool) *RepositoryAnalyze { + r.values.Set("rarely_abort_writes", strconv.FormatBool(rarelyabortwrites)) + + return r +} + +// ReadNodeCount The number of nodes on which to read a blob after writing. +// API name: read_node_count +func (r *RepositoryAnalyze) ReadNodeCount(readnodecount int) *RepositoryAnalyze { + r.values.Set("read_node_count", strconv.Itoa(readnodecount)) + + return r +} + +// RegisterOperationCount The minimum number of linearizable register operations to perform in total. +// For realistic experiments, you should set it to at least `100`. +// API name: register_operation_count +func (r *RepositoryAnalyze) RegisterOperationCount(registeroperationcount int) *RepositoryAnalyze { + r.values.Set("register_operation_count", strconv.Itoa(registeroperationcount)) + + return r +} + +// Seed The seed for the pseudo-random number generator used to generate the list of +// operations performed during the test. +// To repeat the same set of operations in multiple experiments, use the same +// seed in each experiment. +// Note that the operations are performed concurrently so might not always +// happen in the same order on each run. +// API name: seed +func (r *RepositoryAnalyze) Seed(seed int) *RepositoryAnalyze { + r.values.Set("seed", strconv.Itoa(seed)) + + return r +} + +// Timeout The period of time to wait for the test to complete. +// If no response is received before the timeout expires, the test is cancelled +// and returns an error. +// API name: timeout +func (r *RepositoryAnalyze) Timeout(duration string) *RepositoryAnalyze { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RepositoryAnalyze) ErrorTrace(errortrace bool) *RepositoryAnalyze { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RepositoryAnalyze) FilterPath(filterpaths ...string) *RepositoryAnalyze { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RepositoryAnalyze) Human(human bool) *RepositoryAnalyze { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RepositoryAnalyze) Pretty(pretty bool) *RepositoryAnalyze { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryanalyze/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryanalyze/response.go new file mode 100644 index 000000000..18110e099 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryanalyze/response.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package repositoryanalyze + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package repositoryanalyze +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L24-L108 +type Response struct { + + // BlobCount The number of blobs written to the repository during the test. + BlobCount int `json:"blob_count"` + // BlobPath The path in the repository under which all the blobs were written during the + // test. + BlobPath string `json:"blob_path"` + // Concurrency The number of write operations performed concurrently during the test. + Concurrency int `json:"concurrency"` + // CoordinatingNode The node that coordinated the analysis and performed the final cleanup. + CoordinatingNode types.SnapshotNodeInfo `json:"coordinating_node"` + // DeleteElapsed The time it took to delete all the blobs in the container. + DeleteElapsed types.Duration `json:"delete_elapsed"` + // DeleteElapsedNanos The time it took to delete all the blobs in the container, in nanoseconds. + DeleteElapsedNanos int64 `json:"delete_elapsed_nanos"` + // Details A description of every read and write operation performed during the test. + Details types.DetailsInfo `json:"details"` + // EarlyReadNodeCount The limit on the number of nodes on which early read operations were + // performed after writing each blob. + EarlyReadNodeCount int `json:"early_read_node_count"` + // IssuesDetected A list of correctness issues detected, which is empty if the API succeeded. + // It is included to emphasize that a successful response does not guarantee + // correct behaviour in future. + IssuesDetected []string `json:"issues_detected"` + // ListingElapsed The time it took to retrieve a list of all the blobs in the container. + ListingElapsed types.Duration `json:"listing_elapsed"` + // ListingElapsedNanos The time it took to retrieve a list of all the blobs in the container, in + // nanoseconds. + ListingElapsedNanos int64 `json:"listing_elapsed_nanos"` + // MaxBlobSize The limit on the size of a blob written during the test. + MaxBlobSize types.ByteSize `json:"max_blob_size"` + // MaxBlobSizeBytes The limit, in bytes, on the size of a blob written during the test. + MaxBlobSizeBytes int64 `json:"max_blob_size_bytes"` + // MaxTotalDataSize The limit on the total size of all blob written during the test. + MaxTotalDataSize types.ByteSize `json:"max_total_data_size"` + // MaxTotalDataSizeBytes The limit, in bytes, on the total size of all blob written during the test. + MaxTotalDataSizeBytes int64 `json:"max_total_data_size_bytes"` + // RareActionProbability The probability of performing rare actions during the test. + RareActionProbability types.Float64 `json:"rare_action_probability"` + // ReadNodeCount The limit on the number of nodes on which read operations were performed + // after writing each blob. + ReadNodeCount int `json:"read_node_count"` + // Repository The name of the repository that was the subject of the analysis. + Repository string `json:"repository"` + // Seed The seed for the pseudo-random number generator used to generate the + // operations used during the test. + Seed int64 `json:"seed"` + // Summary A collection of statistics that summarize the results of the test. + Summary types.SummaryInfo `json:"summary"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryverifyintegrity/repository_verify_integrity.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryverifyintegrity/repository_verify_integrity.go new file mode 100644 index 000000000..252d77659 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryverifyintegrity/repository_verify_integrity.go @@ -0,0 +1,550 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Verify the repository integrity. +// Verify the integrity of the contents of a snapshot repository. +// +// This API enables you to perform a comprehensive check of the contents of a +// repository, looking for any anomalies in its data or metadata which might +// prevent you from restoring snapshots from the repository or which might cause +// future snapshot create or delete operations to fail. +// +// If you suspect the integrity of the contents of one of your snapshot +// repositories, cease all write activity to this repository immediately, set +// its `read_only` option to `true`, and use this API to verify its integrity. +// Until you do so: +// +// * It may not be possible to restore some snapshots from this repository. +// * Searchable snapshots may report errors when searched or may have unassigned +// shards. +// * Taking snapshots into this repository may fail or may appear to succeed but +// have created a snapshot which cannot be restored. +// * Deleting snapshots from this repository may fail or may appear to succeed +// but leave the underlying data on disk. +// * Continuing to write to the repository while it is in an invalid state may +// causing additional damage to its contents. +// +// If the API finds any problems with the integrity of the contents of your +// repository, Elasticsearch will not be able to repair the damage. +// The only way to bring the repository back into a fully working state after +// its contents have been damaged is by restoring its contents from a repository +// backup which was taken before the damage occurred. +// You must also identify what caused the damage and take action to prevent it +// from happening again. +// +// If you cannot restore a repository backup, register a new repository and use +// this for all future snapshot operations. +// In some cases it may be possible to recover some of the contents of a damaged +// repository, either by restoring as many of its snapshots as needed and taking +// new snapshots of the restored data, or by using the reindex API to copy data +// from any searchable snapshots mounted from the damaged repository. +// +// Avoid all operations which write to the repository while the verify +// repository integrity API is running. +// If something changes the repository contents while an integrity verification +// is running then Elasticsearch may incorrectly report having detected some +// anomalies in its contents due to the concurrent writes. +// It may also incorrectly fail to report some anomalies that the concurrent +// writes prevented it from detecting. +// +// NOTE: This API is intended for exploratory use by humans. You should expect +// the request parameters and the response format to vary in future versions. +// +// NOTE: This API may not work correctly in a mixed-version cluster. +// +// The default values for the parameters of this API are designed to limit the +// impact of the integrity verification on other activities in your cluster. +// For instance, by default it will only use at most half of the `snapshot_meta` +// threads to verify the integrity of each snapshot, allowing other snapshot +// operations to use the other half of this thread pool. +// If you modify these parameters to speed up the verification process, you risk +// disrupting other snapshot-related operations in your cluster. +// For large repositories, consider setting up a separate single-node +// Elasticsearch cluster just for running the integrity verification API. +// +// The response exposes implementation details of the analysis which may change +// from version to version. +// The response body format is therefore not considered stable and may be +// different in newer versions. +package repositoryverifyintegrity + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + repositoryMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type RepositoryVerifyIntegrity struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + repository string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRepositoryVerifyIntegrity type alias for index. +type NewRepositoryVerifyIntegrity func(repository string) *RepositoryVerifyIntegrity + +// NewRepositoryVerifyIntegrityFunc returns a new instance of RepositoryVerifyIntegrity with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRepositoryVerifyIntegrityFunc(tp elastictransport.Interface) NewRepositoryVerifyIntegrity { + return func(repository string) *RepositoryVerifyIntegrity { + n := New(tp) + + n._repository(repository) + + return n + } +} + +// Verify the repository integrity. +// Verify the integrity of the contents of a snapshot repository. +// +// This API enables you to perform a comprehensive check of the contents of a +// repository, looking for any anomalies in its data or metadata which might +// prevent you from restoring snapshots from the repository or which might cause +// future snapshot create or delete operations to fail. +// +// If you suspect the integrity of the contents of one of your snapshot +// repositories, cease all write activity to this repository immediately, set +// its `read_only` option to `true`, and use this API to verify its integrity. +// Until you do so: +// +// * It may not be possible to restore some snapshots from this repository. +// * Searchable snapshots may report errors when searched or may have unassigned +// shards. +// * Taking snapshots into this repository may fail or may appear to succeed but +// have created a snapshot which cannot be restored. +// * Deleting snapshots from this repository may fail or may appear to succeed +// but leave the underlying data on disk. +// * Continuing to write to the repository while it is in an invalid state may +// causing additional damage to its contents. +// +// If the API finds any problems with the integrity of the contents of your +// repository, Elasticsearch will not be able to repair the damage. +// The only way to bring the repository back into a fully working state after +// its contents have been damaged is by restoring its contents from a repository +// backup which was taken before the damage occurred. +// You must also identify what caused the damage and take action to prevent it +// from happening again. +// +// If you cannot restore a repository backup, register a new repository and use +// this for all future snapshot operations. +// In some cases it may be possible to recover some of the contents of a damaged +// repository, either by restoring as many of its snapshots as needed and taking +// new snapshots of the restored data, or by using the reindex API to copy data +// from any searchable snapshots mounted from the damaged repository. +// +// Avoid all operations which write to the repository while the verify +// repository integrity API is running. +// If something changes the repository contents while an integrity verification +// is running then Elasticsearch may incorrectly report having detected some +// anomalies in its contents due to the concurrent writes. +// It may also incorrectly fail to report some anomalies that the concurrent +// writes prevented it from detecting. +// +// NOTE: This API is intended for exploratory use by humans. You should expect +// the request parameters and the response format to vary in future versions. +// +// NOTE: This API may not work correctly in a mixed-version cluster. +// +// The default values for the parameters of this API are designed to limit the +// impact of the integrity verification on other activities in your cluster. +// For instance, by default it will only use at most half of the `snapshot_meta` +// threads to verify the integrity of each snapshot, allowing other snapshot +// operations to use the other half of this thread pool. +// If you modify these parameters to speed up the verification process, you risk +// disrupting other snapshot-related operations in your cluster. +// For large repositories, consider setting up a separate single-node +// Elasticsearch cluster just for running the integrity verification API. +// +// The response exposes implementation details of the analysis which may change +// from version to version. +// The response body format is therefore not considered stable and may be +// different in newer versions. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-verify-integrity +func New(tp elastictransport.Interface) *RepositoryVerifyIntegrity { + r := &RepositoryVerifyIntegrity{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *RepositoryVerifyIntegrity) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + path.WriteString("_verify_integrity") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r RepositoryVerifyIntegrity) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.repository_verify_integrity") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.repository_verify_integrity") + if reader := instrument.RecordRequestBody(ctx, "snapshot.repository_verify_integrity", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.repository_verify_integrity") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the RepositoryVerifyIntegrity query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a repositoryverifyintegrity.Response +func (r RepositoryVerifyIntegrity) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.repository_verify_integrity") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := new(Response) + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return *response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r RepositoryVerifyIntegrity) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.repository_verify_integrity") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the RepositoryVerifyIntegrity query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the RepositoryVerifyIntegrity headers map. +func (r *RepositoryVerifyIntegrity) Header(key, value string) *RepositoryVerifyIntegrity { + r.headers.Set(key, value) + + return r +} + +// Repository The name of the snapshot repository. +// API Name: repository +func (r *RepositoryVerifyIntegrity) _repository(repository string) *RepositoryVerifyIntegrity { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// BlobThreadPoolConcurrency If `verify_blob_contents` is `true`, this parameter specifies how many blobs +// to verify at once. +// API name: blob_thread_pool_concurrency +func (r *RepositoryVerifyIntegrity) BlobThreadPoolConcurrency(blobthreadpoolconcurrency int) *RepositoryVerifyIntegrity { + r.values.Set("blob_thread_pool_concurrency", strconv.Itoa(blobthreadpoolconcurrency)) + + return r +} + +// IndexSnapshotVerificationConcurrency The maximum number of index snapshots to verify concurrently within each +// index verification. +// API name: index_snapshot_verification_concurrency +func (r *RepositoryVerifyIntegrity) IndexSnapshotVerificationConcurrency(indexsnapshotverificationconcurrency int) *RepositoryVerifyIntegrity { + r.values.Set("index_snapshot_verification_concurrency", strconv.Itoa(indexsnapshotverificationconcurrency)) + + return r +} + +// IndexVerificationConcurrency The number of indices to verify concurrently. +// The default behavior is to use the entire `snapshot_meta` thread pool. +// API name: index_verification_concurrency +func (r *RepositoryVerifyIntegrity) IndexVerificationConcurrency(indexverificationconcurrency int) *RepositoryVerifyIntegrity { + r.values.Set("index_verification_concurrency", strconv.Itoa(indexverificationconcurrency)) + + return r +} + +// MaxBytesPerSec If `verify_blob_contents` is `true`, this parameter specifies the maximum +// amount of data that Elasticsearch will read from the repository every second. +// API name: max_bytes_per_sec +func (r *RepositoryVerifyIntegrity) MaxBytesPerSec(maxbytespersec string) *RepositoryVerifyIntegrity { + r.values.Set("max_bytes_per_sec", maxbytespersec) + + return r +} + +// MaxFailedShardSnapshots The number of shard snapshot failures to track during integrity verification, +// in order to avoid excessive resource usage. +// If your repository contains more than this number of shard snapshot failures, +// the verification will fail. +// API name: max_failed_shard_snapshots +func (r *RepositoryVerifyIntegrity) MaxFailedShardSnapshots(maxfailedshardsnapshots int) *RepositoryVerifyIntegrity { + r.values.Set("max_failed_shard_snapshots", strconv.Itoa(maxfailedshardsnapshots)) + + return r +} + +// MetaThreadPoolConcurrency The maximum number of snapshot metadata operations to run concurrently. +// The default behavior is to use at most half of the `snapshot_meta` thread +// pool at once. +// API name: meta_thread_pool_concurrency +func (r *RepositoryVerifyIntegrity) MetaThreadPoolConcurrency(metathreadpoolconcurrency int) *RepositoryVerifyIntegrity { + r.values.Set("meta_thread_pool_concurrency", strconv.Itoa(metathreadpoolconcurrency)) + + return r +} + +// SnapshotVerificationConcurrency The number of snapshots to verify concurrently. +// The default behavior is to use at most half of the `snapshot_meta` thread +// pool at once. +// API name: snapshot_verification_concurrency +func (r *RepositoryVerifyIntegrity) SnapshotVerificationConcurrency(snapshotverificationconcurrency int) *RepositoryVerifyIntegrity { + r.values.Set("snapshot_verification_concurrency", strconv.Itoa(snapshotverificationconcurrency)) + + return r +} + +// VerifyBlobContents Indicates whether to verify the checksum of every data blob in the +// repository. +// If this feature is enabled, Elasticsearch will read the entire repository +// contents, which may be extremely slow and expensive. +// API name: verify_blob_contents +func (r *RepositoryVerifyIntegrity) VerifyBlobContents(verifyblobcontents bool) *RepositoryVerifyIntegrity { + r.values.Set("verify_blob_contents", strconv.FormatBool(verifyblobcontents)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *RepositoryVerifyIntegrity) ErrorTrace(errortrace bool) *RepositoryVerifyIntegrity { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *RepositoryVerifyIntegrity) FilterPath(filterpaths ...string) *RepositoryVerifyIntegrity { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *RepositoryVerifyIntegrity) Human(human bool) *RepositoryVerifyIntegrity { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *RepositoryVerifyIntegrity) Pretty(pretty bool) *RepositoryVerifyIntegrity { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryverifyintegrity/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryverifyintegrity/response.go new file mode 100644 index 000000000..48c6c2ac2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryverifyintegrity/response.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package repositoryverifyintegrity + +import ( + "encoding/json" +) + +// Response holds the response body struct for the package repositoryverifyintegrity +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/repository_verify_integrity/SnapshotRepositoryVerifyIntegrityResponse.ts#L22-L25 + +type Response = json.RawMessage + +func NewResponse() *Response { + return new(Response) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/restore/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/restore/request.go new file mode 100644 index 000000000..ad20a7d4e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/restore/request.go @@ -0,0 +1,271 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package restore + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package restore +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/restore/SnapshotRestoreRequest.ts#L25-L175 +type Request struct { + + // FeatureStates The feature states to restore. + // If `include_global_state` is `true`, the request restores all feature states + // in the snapshot by default. + // If `include_global_state` is `false`, the request restores no feature states + // by default. + // Note that specifying an empty array will result in the default behavior. + // To restore no feature states, regardless of the `include_global_state` value, + // specify an array containing only the value `none` (`["none"]`). + FeatureStates []string `json:"feature_states,omitempty"` + // IgnoreIndexSettings The index settings to not restore from the snapshot. + // You can't use this option to ignore `index.number_of_shards`. + // + // For data streams, this option applies only to restored backing indices. + // New backing indices are configured using the data stream's matching index + // template. + IgnoreIndexSettings []string `json:"ignore_index_settings,omitempty"` + // IgnoreUnavailable If `true`, the request ignores any index or data stream in indices that's + // missing from the snapshot. + // If `false`, the request returns an error for any missing index or data + // stream. + IgnoreUnavailable *bool `json:"ignore_unavailable,omitempty"` + // IncludeAliases If `true`, the request restores aliases for any restored data streams and + // indices. + // If `false`, the request doesn’t restore aliases. + IncludeAliases *bool `json:"include_aliases,omitempty"` + // IncludeGlobalState If `true`, restore the cluster state. The cluster state includes: + // + // * Persistent cluster settings + // * Index templates + // * Legacy index templates + // * Ingest pipelines + // * Index lifecycle management (ILM) policies + // * Stored scripts + // * For snapshots taken after 7.12.0, feature states + // + // If `include_global_state` is `true`, the restore operation merges the legacy + // index templates in your cluster with the templates contained in the snapshot, + // replacing any existing ones whose name matches one in the snapshot. + // It completely removes all persistent settings, non-legacy index templates, + // ingest pipelines, and ILM lifecycle policies that exist in your cluster and + // replaces them with the corresponding items from the snapshot. + // + // Use the `feature_states` parameter to configure how feature states are + // restored. + // + // If `include_global_state` is `true` and a snapshot was created without a + // global state then the restore request will fail. + IncludeGlobalState *bool `json:"include_global_state,omitempty"` + // IndexSettings Index settings to add or change in restored indices, including backing + // indices. + // You can't use this option to change `index.number_of_shards`. + // + // For data streams, this option applies only to restored backing indices. + // New backing indices are configured using the data stream's matching index + // template. + IndexSettings *types.IndexSettings `json:"index_settings,omitempty"` + // Indices A comma-separated list of indices and data streams to restore. + // It supports a multi-target syntax. + // The default behavior is all regular indices and regular data streams in the + // snapshot. + // + // You can't use this parameter to restore system indices or system data + // streams. + // Use `feature_states` instead. + Indices []string `json:"indices,omitempty"` + // Partial If `false`, the entire restore operation will fail if one or more indices + // included in the snapshot do not have all primary shards available. + // + // If true, it allows restoring a partial snapshot of indices with unavailable + // shards. + // Only shards that were successfully included in the snapshot will be restored. + // All missing shards will be recreated as empty. + Partial *bool `json:"partial,omitempty"` + // RenamePattern A rename pattern to apply to restored data streams and indices. + // Data streams and indices matching the rename pattern will be renamed + // according to `rename_replacement`. + // + // The rename pattern is applied as defined by the regular expression that + // supports referencing the original text, according to the `appendReplacement` + // logic. + RenamePattern *string `json:"rename_pattern,omitempty"` + // RenameReplacement The rename replacement string that is used with the `rename_pattern`. + RenameReplacement *string `json:"rename_replacement,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Restore request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_states": + if err := dec.Decode(&s.FeatureStates); err != nil { + return fmt.Errorf("%s | %w", "FeatureStates", err) + } + + case "ignore_index_settings": + if err := dec.Decode(&s.IgnoreIndexSettings); err != nil { + return fmt.Errorf("%s | %w", "IgnoreIndexSettings", err) + } + + case "ignore_unavailable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnavailable", err) + } + s.IgnoreUnavailable = &value + case bool: + s.IgnoreUnavailable = &v + } + + case "include_aliases": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncludeAliases", err) + } + s.IncludeAliases = &value + case bool: + s.IncludeAliases = &v + } + + case "include_global_state": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncludeGlobalState", err) + } + s.IncludeGlobalState = &value + case bool: + s.IncludeGlobalState = &v + } + + case "index_settings": + if err := dec.Decode(&s.IndexSettings); err != nil { + return fmt.Errorf("%s | %w", "IndexSettings", err) + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + case "partial": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Partial", err) + } + s.Partial = &value + case bool: + s.Partial = &v + } + + case "rename_pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RenamePattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RenamePattern = &o + + case "rename_replacement": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RenameReplacement", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RenameReplacement = &o + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/restore/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/restore/response.go new file mode 100644 index 000000000..480787e0c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/restore/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package restore + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package restore +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/restore/SnapshotRestoreResponse.ts#L23-L28 +type Response struct { + Accepted *bool `json:"accepted,omitempty"` + Snapshot *types.SnapshotRestore `json:"snapshot,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/restore/restore.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/restore/restore.go new file mode 100644 index 000000000..6d826df11 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/restore/restore.go @@ -0,0 +1,653 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Restore a snapshot. +// Restore a snapshot of a cluster or data streams and indices. +// +// You can restore a snapshot only to a running cluster with an elected master +// node. +// The snapshot repository must be registered and available to the cluster. +// The snapshot and cluster versions must be compatible. +// +// To restore a snapshot, the cluster's global metadata must be writable. Ensure +// there are't any cluster blocks that prevent writes. The restore operation +// ignores index blocks. +// +// Before you restore a data stream, ensure the cluster contains a matching +// index template with data streams enabled. To check, use the index management +// feature in Kibana or the get index template API: +// +// ``` +// GET +// _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +// ``` +// +// If no such template exists, you can create one or restore a cluster state +// that contains one. Without a matching index template, a data stream can't +// roll over or create backing indices. +// +// If your snapshot contains data from App Search or Workplace Search, you must +// restore the Enterprise Search encryption key before you restore the snapshot. +package restore + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + repositoryMask = iota + 1 + + snapshotMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Restore struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + repository string + snapshot string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewRestore type alias for index. +type NewRestore func(repository, snapshot string) *Restore + +// NewRestoreFunc returns a new instance of Restore with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewRestoreFunc(tp elastictransport.Interface) NewRestore { + return func(repository, snapshot string) *Restore { + n := New(tp) + + n._repository(repository) + + n._snapshot(snapshot) + + return n + } +} + +// Restore a snapshot. +// Restore a snapshot of a cluster or data streams and indices. +// +// You can restore a snapshot only to a running cluster with an elected master +// node. +// The snapshot repository must be registered and available to the cluster. +// The snapshot and cluster versions must be compatible. +// +// To restore a snapshot, the cluster's global metadata must be writable. Ensure +// there are't any cluster blocks that prevent writes. The restore operation +// ignores index blocks. +// +// Before you restore a data stream, ensure the cluster contains a matching +// index template with data streams enabled. To check, use the index management +// feature in Kibana or the get index template API: +// +// ``` +// GET +// _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +// ``` +// +// If no such template exists, you can create one or restore a cluster state +// that contains one. Without a matching index template, a data stream can't +// roll over or create backing indices. +// +// If your snapshot contains data from App Search or Workplace Search, you must +// restore the Enterprise Search encryption key before you restore the snapshot. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore +func New(tp elastictransport.Interface) *Restore { + r := &Restore{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Restore) Raw(raw io.Reader) *Restore { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Restore) Request(req *Request) *Restore { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Restore) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Restore: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask|snapshotMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot", r.snapshot) + } + path.WriteString(r.snapshot) + path.WriteString("/") + path.WriteString("_restore") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Restore) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.restore") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.restore") + if reader := instrument.RecordRequestBody(ctx, "snapshot.restore", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.restore") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Restore query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a restore.Response +func (r Restore) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.restore") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Restore headers map. +func (r *Restore) Header(key, value string) *Restore { + r.headers.Set(key, value) + + return r +} + +// Repository The name of the repository to restore a snapshot from. +// API Name: repository +func (r *Restore) _repository(repository string) *Restore { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// Snapshot The name of the snapshot to restore. +// API Name: snapshot +func (r *Restore) _snapshot(snapshot string) *Restore { + r.paramSet |= snapshotMask + r.snapshot = snapshot + + return r +} + +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Restore) MasterTimeout(duration string) *Restore { + r.values.Set("master_timeout", duration) + + return r +} + +// WaitForCompletion If `true`, the request returns a response when the restore operation +// completes. +// The operation is complete when it finishes all attempts to recover primary +// shards for restored indices. +// This applies even if one or more of the recovery attempts fail. +// +// If `false`, the request returns a response when the restore operation +// initializes. +// API name: wait_for_completion +func (r *Restore) WaitForCompletion(waitforcompletion bool) *Restore { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Restore) ErrorTrace(errortrace bool) *Restore { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Restore) FilterPath(filterpaths ...string) *Restore { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Restore) Human(human bool) *Restore { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Restore) Pretty(pretty bool) *Restore { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The feature states to restore. +// If `include_global_state` is `true`, the request restores all feature states +// in the snapshot by default. +// If `include_global_state` is `false`, the request restores no feature states +// by default. +// Note that specifying an empty array will result in the default behavior. +// To restore no feature states, regardless of the `include_global_state` value, +// specify an array containing only the value `none` (`["none"]`). +// API name: feature_states +func (r *Restore) FeatureStates(featurestates ...string) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range featurestates { + + r.req.FeatureStates = append(r.req.FeatureStates, v) + + } + return r +} + +// The index settings to not restore from the snapshot. +// You can't use this option to ignore `index.number_of_shards`. +// +// For data streams, this option applies only to restored backing indices. +// New backing indices are configured using the data stream's matching index +// template. +// API name: ignore_index_settings +func (r *Restore) IgnoreIndexSettings(ignoreindexsettings ...string) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range ignoreindexsettings { + + r.req.IgnoreIndexSettings = append(r.req.IgnoreIndexSettings, v) + + } + return r +} + +// If `true`, the request ignores any index or data stream in indices that's +// missing from the snapshot. +// If `false`, the request returns an error for any missing index or data +// stream. +// API name: ignore_unavailable +func (r *Restore) IgnoreUnavailable(ignoreunavailable bool) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IgnoreUnavailable = &ignoreunavailable + + return r +} + +// If `true`, the request restores aliases for any restored data streams and +// indices. +// If `false`, the request doesn’t restore aliases. +// API name: include_aliases +func (r *Restore) IncludeAliases(includealiases bool) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IncludeAliases = &includealiases + + return r +} + +// If `true`, restore the cluster state. The cluster state includes: +// +// * Persistent cluster settings +// * Index templates +// * Legacy index templates +// * Ingest pipelines +// * Index lifecycle management (ILM) policies +// * Stored scripts +// * For snapshots taken after 7.12.0, feature states +// +// If `include_global_state` is `true`, the restore operation merges the legacy +// index templates in your cluster with the templates contained in the snapshot, +// replacing any existing ones whose name matches one in the snapshot. +// It completely removes all persistent settings, non-legacy index templates, +// ingest pipelines, and ILM lifecycle policies that exist in your cluster and +// replaces them with the corresponding items from the snapshot. +// +// Use the `feature_states` parameter to configure how feature states are +// restored. +// +// If `include_global_state` is `true` and a snapshot was created without a +// global state then the restore request will fail. +// API name: include_global_state +func (r *Restore) IncludeGlobalState(includeglobalstate bool) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IncludeGlobalState = &includeglobalstate + + return r +} + +// Index settings to add or change in restored indices, including backing +// indices. +// You can't use this option to change `index.number_of_shards`. +// +// For data streams, this option applies only to restored backing indices. +// New backing indices are configured using the data stream's matching index +// template. +// API name: index_settings +func (r *Restore) IndexSettings(indexsettings types.IndexSettingsVariant) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexSettings = indexsettings.IndexSettingsCaster() + + return r +} + +// A comma-separated list of indices and data streams to restore. +// It supports a multi-target syntax. +// The default behavior is all regular indices and regular data streams in the +// snapshot. +// +// You can't use this parameter to restore system indices or system data +// streams. +// Use `feature_states` instead. +// API name: indices +func (r *Restore) Indices(indices ...string) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Indices = indices + + return r +} + +// If `false`, the entire restore operation will fail if one or more indices +// included in the snapshot do not have all primary shards available. +// +// If true, it allows restoring a partial snapshot of indices with unavailable +// shards. +// Only shards that were successfully included in the snapshot will be restored. +// All missing shards will be recreated as empty. +// API name: partial +func (r *Restore) Partial(partial bool) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Partial = &partial + + return r +} + +// A rename pattern to apply to restored data streams and indices. +// Data streams and indices matching the rename pattern will be renamed +// according to `rename_replacement`. +// +// The rename pattern is applied as defined by the regular expression that +// supports referencing the original text, according to the `appendReplacement` +// logic. +// API name: rename_pattern +func (r *Restore) RenamePattern(renamepattern string) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RenamePattern = &renamepattern + + return r +} + +// The rename replacement string that is used with the `rename_pattern`. +// API name: rename_replacement +func (r *Restore) RenameReplacement(renamereplacement string) *Restore { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RenameReplacement = &renamereplacement + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/status/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/status/response.go new file mode 100644 index 000000000..4776f535d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/status/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package status + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package status +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/status/SnapshotStatusResponse.ts#L22-L24 +type Response struct { + Snapshots []types.Status `json:"snapshots"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/status/status.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/status/status.go new file mode 100644 index 000000000..75b14f38f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/status/status.go @@ -0,0 +1,455 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the snapshot status. +// Get a detailed description of the current state for each shard participating +// in the snapshot. +// +// Note that this API should be used only to obtain detailed shard-level +// information for ongoing snapshots. +// If this detail is not needed or you want to obtain information about one or +// more existing snapshots, use the get snapshot API. +// +// If you omit the `` request path parameter, the request retrieves +// information only for currently running snapshots. +// This usage is preferred. +// If needed, you can specify `` and `` to retrieve +// information for specific snapshots, even if they're not currently running. +// +// WARNING: Using the API to return the status of any snapshots other than +// currently running snapshots can be expensive. +// The API requires a read from the repository for each shard in each snapshot. +// For example, if you have 100 snapshots with 1,000 shards each, an API request +// that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 +// shards). +// +// Depending on the latency of your storage, such requests can take an extremely +// long time to return results. +// These requests can also tax machine resources and, when using cloud storage, +// incur high processing costs. +package status + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + repositoryMask = iota + 1 + + snapshotMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Status struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + repository string + snapshot string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStatus type alias for index. +type NewStatus func() *Status + +// NewStatusFunc returns a new instance of Status with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStatusFunc(tp elastictransport.Interface) NewStatus { + return func() *Status { + n := New(tp) + + return n + } +} + +// Get the snapshot status. +// Get a detailed description of the current state for each shard participating +// in the snapshot. +// +// Note that this API should be used only to obtain detailed shard-level +// information for ongoing snapshots. +// If this detail is not needed or you want to obtain information about one or +// more existing snapshots, use the get snapshot API. +// +// If you omit the `` request path parameter, the request retrieves +// information only for currently running snapshots. +// This usage is preferred. +// If needed, you can specify `` and `` to retrieve +// information for specific snapshots, even if they're not currently running. +// +// WARNING: Using the API to return the status of any snapshots other than +// currently running snapshots can be expensive. +// The API requires a read from the repository for each shard in each snapshot. +// For example, if you have 100 snapshots with 1,000 shards each, an API request +// that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 +// shards). +// +// Depending on the latency of your storage, such requests can take an extremely +// long time to return results. +// These requests can also tax machine resources and, when using cloud storage, +// incur high processing costs. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status +func New(tp elastictransport.Interface) *Status { + r := &Status{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Status) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + path.WriteString("_status") + + method = http.MethodGet + case r.paramSet == repositoryMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + path.WriteString("_status") + + method = http.MethodGet + case r.paramSet == repositoryMask|snapshotMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "snapshot", r.snapshot) + } + path.WriteString(r.snapshot) + path.WriteString("/") + path.WriteString("_status") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Status) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.status") + if reader := instrument.RecordRequestBody(ctx, "snapshot.status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Status query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a status.Response +func (r Status) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Status) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Status query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Status headers map. +func (r *Status) Header(key, value string) *Status { + r.headers.Set(key, value) + + return r +} + +// Repository The snapshot repository name used to limit the request. +// It supports wildcards (`*`) if `` isn't specified. +// API Name: repository +func (r *Status) Repository(repository string) *Status { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// Snapshot A comma-separated list of snapshots to retrieve status for. +// The default is currently running snapshots. +// Wildcards (`*`) are not supported. +// API Name: snapshot +func (r *Status) Snapshot(snapshot string) *Status { + r.paramSet |= snapshotMask + r.snapshot = snapshot + + return r +} + +// IgnoreUnavailable If `false`, the request returns an error for any snapshots that are +// unavailable. +// If `true`, the request ignores snapshots that are unavailable, such as those +// that are corrupted or temporarily cannot be returned. +// API name: ignore_unavailable +func (r *Status) IgnoreUnavailable(ignoreunavailable bool) *Status { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Status) MasterTimeout(duration string) *Status { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Status) ErrorTrace(errortrace bool) *Status { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Status) FilterPath(filterpaths ...string) *Status { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Status) Human(human bool) *Status { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Status) Pretty(pretty bool) *Status { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/verifyrepository/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/verifyrepository/response.go new file mode 100644 index 000000000..cd19d3bbb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/verifyrepository/response.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package verifyrepository + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package verifyrepository +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L23-L31 +type Response struct { + + // Nodes Information about the nodes connected to the snapshot repository. + // The key is the ID of the node. + Nodes map[string]types.CompactNodeInfo `json:"nodes"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.CompactNodeInfo, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/verifyrepository/verify_repository.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/verifyrepository/verify_repository.go new file mode 100644 index 000000000..d5f8414a1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/verifyrepository/verify_repository.go @@ -0,0 +1,370 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Verify a snapshot repository. +// Check for common misconfigurations in a snapshot repository. +package verifyrepository + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + repositoryMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type VerifyRepository struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + repository string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewVerifyRepository type alias for index. +type NewVerifyRepository func(repository string) *VerifyRepository + +// NewVerifyRepositoryFunc returns a new instance of VerifyRepository with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewVerifyRepositoryFunc(tp elastictransport.Interface) NewVerifyRepository { + return func(repository string) *VerifyRepository { + n := New(tp) + + n._repository(repository) + + return n + } +} + +// Verify a snapshot repository. +// Check for common misconfigurations in a snapshot repository. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository +func New(tp elastictransport.Interface) *VerifyRepository { + r := &VerifyRepository{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *VerifyRepository) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == repositoryMask: + path.WriteString("/") + path.WriteString("_snapshot") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "repository", r.repository) + } + path.WriteString(r.repository) + path.WriteString("/") + path.WriteString("_verify") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r VerifyRepository) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "snapshot.verify_repository") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "snapshot.verify_repository") + if reader := instrument.RecordRequestBody(ctx, "snapshot.verify_repository", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "snapshot.verify_repository") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the VerifyRepository query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a verifyrepository.Response +func (r VerifyRepository) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.verify_repository") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r VerifyRepository) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "snapshot.verify_repository") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the VerifyRepository query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the VerifyRepository headers map. +func (r *VerifyRepository) Header(key, value string) *VerifyRepository { + r.headers.Set(key, value) + + return r +} + +// Repository The name of the snapshot repository to verify. +// API Name: repository +func (r *VerifyRepository) _repository(repository string) *VerifyRepository { + r.paramSet |= repositoryMask + r.repository = repository + + return r +} + +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *VerifyRepository) MasterTimeout(duration string) *VerifyRepository { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response from all relevant nodes in the cluster +// after updating the cluster metadata. +// If no response is received before the timeout expires, the cluster metadata +// update still applies but the response will indicate that it was not +// completely acknowledged. +// To indicate that the request should never timeout, set it to `-1`. +// API name: timeout +func (r *VerifyRepository) Timeout(duration string) *VerifyRepository { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *VerifyRepository) ErrorTrace(errortrace bool) *VerifyRepository { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *VerifyRepository) FilterPath(filterpaths ...string) *VerifyRepository { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *VerifyRepository) Human(human bool) *VerifyRepository { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *VerifyRepository) Pretty(pretty bool) *VerifyRepository { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/clearcursor/clear_cursor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/clearcursor/clear_cursor.go new file mode 100644 index 000000000..e2bf81d65 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/clearcursor/clear_cursor.go @@ -0,0 +1,348 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Clear an SQL search cursor. +package clearcursor + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ClearCursor struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewClearCursor type alias for index. +type NewClearCursor func() *ClearCursor + +// NewClearCursorFunc returns a new instance of ClearCursor with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewClearCursorFunc(tp elastictransport.Interface) NewClearCursor { + return func() *ClearCursor { + n := New(tp) + + return n + } +} + +// Clear an SQL search cursor. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor +func New(tp elastictransport.Interface) *ClearCursor { + r := &ClearCursor{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ClearCursor) Raw(raw io.Reader) *ClearCursor { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ClearCursor) Request(req *Request) *ClearCursor { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ClearCursor) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ClearCursor: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_sql") + path.WriteString("/") + path.WriteString("close") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ClearCursor) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "sql.clear_cursor") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "sql.clear_cursor") + if reader := instrument.RecordRequestBody(ctx, "sql.clear_cursor", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "sql.clear_cursor") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ClearCursor query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a clearcursor.Response +func (r ClearCursor) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.clear_cursor") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ClearCursor headers map. +func (r *ClearCursor) Header(key, value string) *ClearCursor { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ClearCursor) ErrorTrace(errortrace bool) *ClearCursor { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ClearCursor) FilterPath(filterpaths ...string) *ClearCursor { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ClearCursor) Human(human bool) *ClearCursor { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ClearCursor) Pretty(pretty bool) *ClearCursor { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Cursor to clear. +// API name: cursor +func (r *ClearCursor) Cursor(cursor string) *ClearCursor { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Cursor = cursor + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/clearcursor/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/clearcursor/request.go new file mode 100644 index 000000000..cef5ed464 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/clearcursor/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clearcursor + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package clearcursor +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/sql/clear_cursor/ClearSqlCursorRequest.ts#L22-L42 +type Request struct { + + // Cursor Cursor to clear. + Cursor string `json:"cursor"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Clearcursor request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/clearcursor/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/clearcursor/response.go new file mode 100644 index 000000000..048fb673e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/clearcursor/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package clearcursor + +// Response holds the response body struct for the package clearcursor +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/sql/clear_cursor/ClearSqlCursorResponse.ts#L20-L22 +type Response struct { + Succeeded bool `json:"succeeded"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/deleteasync/delete_async.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/deleteasync/delete_async.go new file mode 100644 index 000000000..dee2880b8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/deleteasync/delete_async.go @@ -0,0 +1,362 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete an async SQL search. +// Delete an async SQL search or a stored synchronous SQL search. +// If the search is still running, the API cancels it. +// +// If the Elasticsearch security features are enabled, only the following users +// can use this API to delete a search: +// +// * Users with the `cancel_task` cluster privilege. +// * The user who first submitted the search. +package deleteasync + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteAsync struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteAsync type alias for index. +type NewDeleteAsync func(id string) *DeleteAsync + +// NewDeleteAsyncFunc returns a new instance of DeleteAsync with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteAsyncFunc(tp elastictransport.Interface) NewDeleteAsync { + return func(id string) *DeleteAsync { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete an async SQL search. +// Delete an async SQL search or a stored synchronous SQL search. +// If the search is still running, the API cancels it. +// +// If the Elasticsearch security features are enabled, only the following users +// can use this API to delete a search: +// +// * Users with the `cancel_task` cluster privilege. +// * The user who first submitted the search. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async +func New(tp elastictransport.Interface) *DeleteAsync { + r := &DeleteAsync{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteAsync) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_sql") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + path.WriteString("delete") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteAsync) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "sql.delete_async") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "sql.delete_async") + if reader := instrument.RecordRequestBody(ctx, "sql.delete_async", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "sql.delete_async") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteAsync query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deleteasync.Response +func (r DeleteAsync) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.delete_async") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteAsync) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.delete_async") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteAsync query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteAsync headers map. +func (r *DeleteAsync) Header(key, value string) *DeleteAsync { + r.headers.Set(key, value) + + return r +} + +// Id The identifier for the search. +// API Name: id +func (r *DeleteAsync) _id(id string) *DeleteAsync { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteAsync) ErrorTrace(errortrace bool) *DeleteAsync { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteAsync) FilterPath(filterpaths ...string) *DeleteAsync { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteAsync) Human(human bool) *DeleteAsync { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteAsync) Pretty(pretty bool) *DeleteAsync { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/deleteasync/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/deleteasync/response.go new file mode 100644 index 000000000..3ffdb4eef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/deleteasync/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deleteasync + +// Response holds the response body struct for the package deleteasync +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/sql/delete_async/SqlDeleteAsyncResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasync/get_async.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasync/get_async.go new file mode 100644 index 000000000..fdb9c0a88 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasync/get_async.go @@ -0,0 +1,392 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get async SQL search results. +// Get the current status and available results for an async SQL search or +// stored synchronous SQL search. +// +// If the Elasticsearch security features are enabled, only the user who first +// submitted the SQL search can retrieve the search using this API. +package getasync + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetAsync struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetAsync type alias for index. +type NewGetAsync func(id string) *GetAsync + +// NewGetAsyncFunc returns a new instance of GetAsync with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetAsyncFunc(tp elastictransport.Interface) NewGetAsync { + return func(id string) *GetAsync { + n := New(tp) + + n._id(id) + + return n + } +} + +// Get async SQL search results. +// Get the current status and available results for an async SQL search or +// stored synchronous SQL search. +// +// If the Elasticsearch security features are enabled, only the user who first +// submitted the SQL search can retrieve the search using this API. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async +func New(tp elastictransport.Interface) *GetAsync { + r := &GetAsync{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetAsync) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_sql") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetAsync) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "sql.get_async") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "sql.get_async") + if reader := instrument.RecordRequestBody(ctx, "sql.get_async", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "sql.get_async") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetAsync query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getasync.Response +func (r GetAsync) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.get_async") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetAsync) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.get_async") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetAsync query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetAsync headers map. +func (r *GetAsync) Header(key, value string) *GetAsync { + r.headers.Set(key, value) + + return r +} + +// Id The identifier for the search. +// API Name: id +func (r *GetAsync) _id(id string) *GetAsync { + r.paramSet |= idMask + r.id = id + + return r +} + +// Delimiter The separator for CSV results. +// The API supports this parameter only for CSV responses. +// API name: delimiter +func (r *GetAsync) Delimiter(delimiter string) *GetAsync { + r.values.Set("delimiter", delimiter) + + return r +} + +// Format The format for the response. +// You must specify a format using this parameter or the `Accept` HTTP header. +// If you specify both, the API uses this parameter. +// API name: format +func (r *GetAsync) Format(format string) *GetAsync { + r.values.Set("format", format) + + return r +} + +// KeepAlive The retention period for the search and its results. +// It defaults to the `keep_alive` period for the original SQL search. +// API name: keep_alive +func (r *GetAsync) KeepAlive(duration string) *GetAsync { + r.values.Set("keep_alive", duration) + + return r +} + +// WaitForCompletionTimeout The period to wait for complete results. +// It defaults to no timeout, meaning the request waits for complete search +// results. +// API name: wait_for_completion_timeout +func (r *GetAsync) WaitForCompletionTimeout(duration string) *GetAsync { + r.values.Set("wait_for_completion_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetAsync) ErrorTrace(errortrace bool) *GetAsync { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetAsync) FilterPath(filterpaths ...string) *GetAsync { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetAsync) Human(human bool) *GetAsync { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetAsync) Pretty(pretty bool) *GetAsync { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasync/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasync/response.go new file mode 100644 index 000000000..2f1b7d4f3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasync/response.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getasync + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getasync +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/sql/get_async/SqlGetAsyncResponse.ts#L23-L60 +type Response struct { + + // Columns Column headings for the search results. Each object is a column. + Columns []types.Column `json:"columns,omitempty"` + // Cursor The cursor for the next set of paginated results. + // For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP + // header. + Cursor *string `json:"cursor,omitempty"` + // Id Identifier for the search. + // This value is returned only for async and saved synchronous searches. + // For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` + // HTTP header. + Id string `json:"id"` + // IsPartial If `true`, the response does not contain complete search results. + // If `is_partial` is `true` and `is_running` is `true`, the search is still + // running. + // If `is_partial` is `true` but `is_running` is `false`, the results are + // partial due to a failure or timeout. + // This value is returned only for async and saved synchronous searches. + // For CSV, TSV, and TXT responses, this value is returned in the + // `Async-partial` HTTP header. + IsPartial bool `json:"is_partial"` + // IsRunning If `true`, the search is still running. + // If `false`, the search has finished. + // This value is returned only for async and saved synchronous searches. + // For CSV, TSV, and TXT responses, this value is returned in the + // `Async-partial` HTTP header. + IsRunning bool `json:"is_running"` + // Rows The values for the search results. + Rows [][]json.RawMessage `json:"rows"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasyncstatus/get_async_status.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasyncstatus/get_async_status.go new file mode 100644 index 000000000..c6a755ecf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasyncstatus/get_async_status.go @@ -0,0 +1,350 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get the async SQL search status. +// Get the current status of an async SQL search or a stored synchronous SQL +// search. +package getasyncstatus + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetAsyncStatus struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetAsyncStatus type alias for index. +type NewGetAsyncStatus func(id string) *GetAsyncStatus + +// NewGetAsyncStatusFunc returns a new instance of GetAsyncStatus with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetAsyncStatusFunc(tp elastictransport.Interface) NewGetAsyncStatus { + return func(id string) *GetAsyncStatus { + n := New(tp) + + n._id(id) + + return n + } +} + +// Get the async SQL search status. +// Get the current status of an async SQL search or a stored synchronous SQL +// search. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status +func New(tp elastictransport.Interface) *GetAsyncStatus { + r := &GetAsyncStatus{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetAsyncStatus) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_sql") + path.WriteString("/") + path.WriteString("async") + path.WriteString("/") + path.WriteString("status") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetAsyncStatus) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "sql.get_async_status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "sql.get_async_status") + if reader := instrument.RecordRequestBody(ctx, "sql.get_async_status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "sql.get_async_status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetAsyncStatus query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getasyncstatus.Response +func (r GetAsyncStatus) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.get_async_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetAsyncStatus) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.get_async_status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetAsyncStatus query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetAsyncStatus headers map. +func (r *GetAsyncStatus) Header(key, value string) *GetAsyncStatus { + r.headers.Set(key, value) + + return r +} + +// Id The identifier for the search. +// API Name: id +func (r *GetAsyncStatus) _id(id string) *GetAsyncStatus { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetAsyncStatus) ErrorTrace(errortrace bool) *GetAsyncStatus { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetAsyncStatus) FilterPath(filterpaths ...string) *GetAsyncStatus { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetAsyncStatus) Human(human bool) *GetAsyncStatus { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetAsyncStatus) Pretty(pretty bool) *GetAsyncStatus { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasyncstatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasyncstatus/response.go new file mode 100644 index 000000000..5f4250686 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasyncstatus/response.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getasyncstatus + +// Response holds the response body struct for the package getasyncstatus +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/sql/get_async_status/SqlGetAsyncStatusResponse.ts#L23-L55 +type Response struct { + + // CompletionStatus The HTTP status code for the search. + // The API returns this property only for completed searches. + CompletionStatus *uint `json:"completion_status,omitempty"` + // ExpirationTimeInMillis The timestamp, in milliseconds since the Unix epoch, when Elasticsearch will + // delete the search and its results, even if the search is still running. + ExpirationTimeInMillis int64 `json:"expiration_time_in_millis"` + // Id The identifier for the search. + Id string `json:"id"` + // IsPartial If `true`, the response does not contain complete search results. + // If `is_partial` is `true` and `is_running` is `true`, the search is still + // running. + // If `is_partial` is `true` but `is_running` is `false`, the results are + // partial due to a failure or timeout. + IsPartial bool `json:"is_partial"` + // IsRunning If `true`, the search is still running. + // If `false`, the search has finished. + IsRunning bool `json:"is_running"` + // StartTimeInMillis The timestamp, in milliseconds since the Unix epoch, when the search started. + // The API returns this property only for running searches. + StartTimeInMillis int64 `json:"start_time_in_millis"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/query/query.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/query/query.go new file mode 100644 index 000000000..3f9892077 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/query/query.go @@ -0,0 +1,596 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get SQL search results. +// Run an SQL request. +package query + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sqlformat" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Query struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewQuery type alias for index. +type NewQuery func() *Query + +// NewQueryFunc returns a new instance of Query with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewQueryFunc(tp elastictransport.Interface) NewQuery { + return func() *Query { + n := New(tp) + + return n + } +} + +// Get SQL search results. +// Run an SQL request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query +func New(tp elastictransport.Interface) *Query { + r := &Query{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Query) Raw(raw io.Reader) *Query { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Query) Request(req *Request) *Query { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Query) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Query: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_sql") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Query) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "sql.query") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "sql.query") + if reader := instrument.RecordRequestBody(ctx, "sql.query", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "sql.query") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Query query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a query.Response +func (r Query) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.query") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Query headers map. +func (r *Query) Header(key, value string) *Query { + r.headers.Set(key, value) + + return r +} + +// Format The format for the response. +// You can also specify a format using the `Accept` HTTP header. +// If you specify both this parameter and the `Accept` HTTP header, this +// parameter takes precedence. +// API name: format +func (r *Query) Format(format sqlformat.SqlFormat) *Query { + r.values.Set("format", format.String()) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Query) ErrorTrace(errortrace bool) *Query { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Query) FilterPath(filterpaths ...string) *Query { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Query) Human(human bool) *Query { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Query) Pretty(pretty bool) *Query { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// If `true`, the response has partial results when there are shard request +// timeouts or shard failures. +// If `false`, the API returns an error with no partial results. +// API name: allow_partial_search_results +func (r *Query) AllowPartialSearchResults(allowpartialsearchresults bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.AllowPartialSearchResults = &allowpartialsearchresults + + return r +} + +// The default catalog (cluster) for queries. +// If unspecified, the queries execute on the data in the local cluster only. +// API name: catalog +func (r *Query) Catalog(catalog string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Catalog = &catalog + + return r +} + +// If `true`, the results are in a columnar fashion: one row represents all the +// values of a certain column from the current page of results. +// The API supports this parameter only for CBOR, JSON, SMILE, and YAML +// responses. +// API name: columnar +func (r *Query) Columnar(columnar bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Columnar = &columnar + + return r +} + +// The cursor used to retrieve a set of paginated results. +// If you specify a cursor, the API only uses the `columnar` and `time_zone` +// request body parameters. +// It ignores other request body parameters. +// API name: cursor +func (r *Query) Cursor(cursor string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Cursor = &cursor + + return r +} + +// The maximum number of rows (or entries) to return in one response. +// API name: fetch_size +func (r *Query) FetchSize(fetchsize int) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FetchSize = &fetchsize + + return r +} + +// If `false`, the API returns an exception when encountering multiple values +// for a field. +// If `true`, the API is lenient and returns the first value from the array with +// no guarantee of consistent results. +// API name: field_multi_value_leniency +func (r *Query) FieldMultiValueLeniency(fieldmultivalueleniency bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FieldMultiValueLeniency = &fieldmultivalueleniency + + return r +} + +// The Elasticsearch query DSL for additional filtering. +// API name: filter +func (r *Query) Filter(filter types.QueryVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Filter = filter.QueryCaster() + + return r +} + +// If `true`, the search can run on frozen indices. +// API name: index_using_frozen +func (r *Query) IndexUsingFrozen(indexusingfrozen bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexUsingFrozen = &indexusingfrozen + + return r +} + +// The retention period for an async or saved synchronous search. +// API name: keep_alive +func (r *Query) KeepAlive(duration types.DurationVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.KeepAlive = *duration.DurationCaster() + + return r +} + +// If `true`, Elasticsearch stores synchronous searches if you also specify the +// `wait_for_completion_timeout` parameter. +// If `false`, Elasticsearch only stores async searches that don't finish before +// the `wait_for_completion_timeout`. +// API name: keep_on_completion +func (r *Query) KeepOnCompletion(keeponcompletion bool) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.KeepOnCompletion = &keeponcompletion + + return r +} + +// The minimum retention period for the scroll cursor. +// After this time period, a pagination request might fail because the scroll +// cursor is no longer available. +// Subsequent scroll requests prolong the lifetime of the scroll cursor by the +// duration of `page_timeout` in the scroll request. +// API name: page_timeout +func (r *Query) PageTimeout(duration types.DurationVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.PageTimeout = *duration.DurationCaster() + + return r +} + +// The values for parameters in the query. +// API name: params +func (r *Query) Params(params ...json.RawMessage) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range params { + + r.req.Params = append(r.req.Params, v) + + } + return r +} + +// The SQL query to run. +// API name: query +func (r *Query) Query(query string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = &query + + return r +} + +// The timeout before the request fails. +// API name: request_timeout +func (r *Query) RequestTimeout(duration types.DurationVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RequestTimeout = *duration.DurationCaster() + + return r +} + +// One or more runtime fields for the search request. +// These fields take precedence over mapped fields with the same name. +// API name: runtime_mappings +func (r *Query) RuntimeMappings(runtimefields types.RuntimeFieldsVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RuntimeMappings = *runtimefields.RuntimeFieldsCaster() + + return r +} + +// The ISO-8601 time zone ID for the search. +// API name: time_zone +func (r *Query) TimeZone(timezone string) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TimeZone = &timezone + + return r +} + +// The period to wait for complete results. +// It defaults to no timeout, meaning the request waits for complete search +// results. +// If the search doesn't finish within this period, the search becomes async. +// +// To save a synchronous search, you must specify this parameter and the +// `keep_on_completion` parameter. +// API name: wait_for_completion_timeout +func (r *Query) WaitForCompletionTimeout(duration types.DurationVariant) *Query { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.WaitForCompletionTimeout = *duration.DurationCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/query/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/query/request.go new file mode 100644 index 000000000..ad7347e58 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/query/request.go @@ -0,0 +1,299 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package query + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package query +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/sql/query/QuerySqlRequest.ts#L27-L151 +type Request struct { + + // AllowPartialSearchResults If `true`, the response has partial results when there are shard request + // timeouts or shard failures. + // If `false`, the API returns an error with no partial results. + AllowPartialSearchResults *bool `json:"allow_partial_search_results,omitempty"` + // Catalog The default catalog (cluster) for queries. + // If unspecified, the queries execute on the data in the local cluster only. + Catalog *string `json:"catalog,omitempty"` + // Columnar If `true`, the results are in a columnar fashion: one row represents all the + // values of a certain column from the current page of results. + // The API supports this parameter only for CBOR, JSON, SMILE, and YAML + // responses. + Columnar *bool `json:"columnar,omitempty"` + // Cursor The cursor used to retrieve a set of paginated results. + // If you specify a cursor, the API only uses the `columnar` and `time_zone` + // request body parameters. + // It ignores other request body parameters. + Cursor *string `json:"cursor,omitempty"` + // FetchSize The maximum number of rows (or entries) to return in one response. + FetchSize *int `json:"fetch_size,omitempty"` + // FieldMultiValueLeniency If `false`, the API returns an exception when encountering multiple values + // for a field. + // If `true`, the API is lenient and returns the first value from the array with + // no guarantee of consistent results. + FieldMultiValueLeniency *bool `json:"field_multi_value_leniency,omitempty"` + // Filter The Elasticsearch query DSL for additional filtering. + Filter *types.Query `json:"filter,omitempty"` + // IndexUsingFrozen If `true`, the search can run on frozen indices. + IndexUsingFrozen *bool `json:"index_using_frozen,omitempty"` + // KeepAlive The retention period for an async or saved synchronous search. + KeepAlive types.Duration `json:"keep_alive,omitempty"` + // KeepOnCompletion If `true`, Elasticsearch stores synchronous searches if you also specify the + // `wait_for_completion_timeout` parameter. + // If `false`, Elasticsearch only stores async searches that don't finish before + // the `wait_for_completion_timeout`. + KeepOnCompletion *bool `json:"keep_on_completion,omitempty"` + // PageTimeout The minimum retention period for the scroll cursor. + // After this time period, a pagination request might fail because the scroll + // cursor is no longer available. + // Subsequent scroll requests prolong the lifetime of the scroll cursor by the + // duration of `page_timeout` in the scroll request. + PageTimeout types.Duration `json:"page_timeout,omitempty"` + // Params The values for parameters in the query. + Params []json.RawMessage `json:"params,omitempty"` + // Query The SQL query to run. + Query *string `json:"query,omitempty"` + // RequestTimeout The timeout before the request fails. + RequestTimeout types.Duration `json:"request_timeout,omitempty"` + // RuntimeMappings One or more runtime fields for the search request. + // These fields take precedence over mapped fields with the same name. + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` + // TimeZone The ISO-8601 time zone ID for the search. + TimeZone *string `json:"time_zone,omitempty"` + // WaitForCompletionTimeout The period to wait for complete results. + // It defaults to no timeout, meaning the request waits for complete search + // results. + // If the search doesn't finish within this period, the search becomes async. + // + // To save a synchronous search, you must specify this parameter and the + // `keep_on_completion` parameter. + WaitForCompletionTimeout types.Duration `json:"wait_for_completion_timeout,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Query request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_partial_search_results": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowPartialSearchResults", err) + } + s.AllowPartialSearchResults = &value + case bool: + s.AllowPartialSearchResults = &v + } + + case "catalog": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Catalog", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Catalog = &o + + case "columnar": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Columnar", err) + } + s.Columnar = &value + case bool: + s.Columnar = &v + } + + case "cursor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Cursor", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Cursor = &o + + case "fetch_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FetchSize", err) + } + s.FetchSize = &value + case float64: + f := int(v) + s.FetchSize = &f + } + + case "field_multi_value_leniency": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FieldMultiValueLeniency", err) + } + s.FieldMultiValueLeniency = &value + case bool: + s.FieldMultiValueLeniency = &v + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "index_using_frozen": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IndexUsingFrozen", err) + } + s.IndexUsingFrozen = &value + case bool: + s.IndexUsingFrozen = &v + } + + case "keep_alive": + if err := dec.Decode(&s.KeepAlive); err != nil { + return fmt.Errorf("%s | %w", "KeepAlive", err) + } + + case "keep_on_completion": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "KeepOnCompletion", err) + } + s.KeepOnCompletion = &value + case bool: + s.KeepOnCompletion = &v + } + + case "page_timeout": + if err := dec.Decode(&s.PageTimeout); err != nil { + return fmt.Errorf("%s | %w", "PageTimeout", err) + } + + case "params": + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = &o + + case "request_timeout": + if err := dec.Decode(&s.RequestTimeout); err != nil { + return fmt.Errorf("%s | %w", "RequestTimeout", err) + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return fmt.Errorf("%s | %w", "TimeZone", err) + } + + case "wait_for_completion_timeout": + if err := dec.Decode(&s.WaitForCompletionTimeout); err != nil { + return fmt.Errorf("%s | %w", "WaitForCompletionTimeout", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/query/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/query/response.go new file mode 100644 index 000000000..66c6f2f0d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/query/response.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package query + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package query +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/sql/query/QuerySqlResponse.ts#L23-L60 +type Response struct { + + // Columns Column headings for the search results. Each object is a column. + Columns []types.Column `json:"columns,omitempty"` + // Cursor The cursor for the next set of paginated results. + // For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP + // header. + Cursor *string `json:"cursor,omitempty"` + // Id The identifier for the search. + // This value is returned only for async and saved synchronous searches. + // For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` + // HTTP header. + Id *string `json:"id,omitempty"` + // IsPartial If `true`, the response does not contain complete search results. + // If `is_partial` is `true` and `is_running` is `true`, the search is still + // running. + // If `is_partial` is `true` but `is_running` is `false`, the results are + // partial due to a failure or timeout. + // This value is returned only for async and saved synchronous searches. + // For CSV, TSV, and TXT responses, this value is returned in the + // `Async-partial` HTTP header. + IsPartial *bool `json:"is_partial,omitempty"` + // IsRunning If `true`, the search is still running. + // If `false`, the search has finished. + // This value is returned only for async and saved synchronous searches. + // For CSV, TSV, and TXT responses, this value is returned in the + // `Async-partial` HTTP header. + IsRunning *bool `json:"is_running,omitempty"` + // Rows The values for the search results. + Rows [][]json.RawMessage `json:"rows"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/translate/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/translate/request.go new file mode 100644 index 000000000..c0562ee2f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/translate/request.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package translate + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package translate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/sql/translate/TranslateSqlRequest.ts#L25-L65 +type Request struct { + + // FetchSize The maximum number of rows (or entries) to return in one response. + FetchSize *int `json:"fetch_size,omitempty"` + // Filter The Elasticsearch query DSL for additional filtering. + Filter *types.Query `json:"filter,omitempty"` + // Query The SQL query to run. + Query string `json:"query"` + // TimeZone The ISO-8601 time zone ID for the search. + TimeZone *string `json:"time_zone,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Translate request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fetch_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FetchSize", err) + } + s.FetchSize = &value + case float64: + f := int(v) + s.FetchSize = &f + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return fmt.Errorf("%s | %w", "TimeZone", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/translate/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/translate/response.go new file mode 100644 index 000000000..f388d5c3c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/translate/response.go @@ -0,0 +1,156 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package translate + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package translate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/sql/translate/TranslateSqlResponse.ts#L27-L37 +type Response struct { + Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` + Fields []types.FieldAndFormat `json:"fields,omitempty"` + Query *types.Query `json:"query,omitempty"` + Size *int64 `json:"size,omitempty"` + Sort []types.SortCombinations `json:"sort,omitempty"` + Source_ types.SourceConfig `json:"_source,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Aggregations: make(map[string]types.Aggregations, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "size": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int64(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(types.SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + case "_source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source__field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source_", err) + } + + switch t { + + case "exclude_vectors", "excludes", "includes": + o := types.NewSourceFilter() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + s.Source_ = o + break source__field + + } + } + if s.Source_ == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/translate/translate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/translate/translate.go new file mode 100644 index 000000000..329f3a699 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/sql/translate/translate.go @@ -0,0 +1,393 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Translate SQL into Elasticsearch queries. +// Translate an SQL search into a search API request containing Query DSL. +// It accepts the same request body parameters as the SQL search API, excluding +// `cursor`. +package translate + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Translate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewTranslate type alias for index. +type NewTranslate func() *Translate + +// NewTranslateFunc returns a new instance of Translate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewTranslateFunc(tp elastictransport.Interface) NewTranslate { + return func() *Translate { + n := New(tp) + + return n + } +} + +// Translate SQL into Elasticsearch queries. +// Translate an SQL search into a search API request containing Query DSL. +// It accepts the same request body parameters as the SQL search API, excluding +// `cursor`. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate +func New(tp elastictransport.Interface) *Translate { + r := &Translate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Translate) Raw(raw io.Reader) *Translate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Translate) Request(req *Request) *Translate { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Translate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Translate: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_sql") + path.WriteString("/") + path.WriteString("translate") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Translate) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "sql.translate") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "sql.translate") + if reader := instrument.RecordRequestBody(ctx, "sql.translate", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "sql.translate") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Translate query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a translate.Response +func (r Translate) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "sql.translate") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the Translate headers map. +func (r *Translate) Header(key, value string) *Translate { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Translate) ErrorTrace(errortrace bool) *Translate { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Translate) FilterPath(filterpaths ...string) *Translate { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Translate) Human(human bool) *Translate { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Translate) Pretty(pretty bool) *Translate { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The maximum number of rows (or entries) to return in one response. +// API name: fetch_size +func (r *Translate) FetchSize(fetchsize int) *Translate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.FetchSize = &fetchsize + + return r +} + +// The Elasticsearch query DSL for additional filtering. +// API name: filter +func (r *Translate) Filter(filter types.QueryVariant) *Translate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Filter = filter.QueryCaster() + + return r +} + +// The SQL query to run. +// API name: query +func (r *Translate) Query(query string) *Translate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query + + return r +} + +// The ISO-8601 time zone ID for the search. +// API name: time_zone +func (r *Translate) TimeZone(timezone string) *Translate { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TimeZone = &timezone + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ssl/certificates/certificates.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ssl/certificates/certificates.go new file mode 100644 index 000000000..40ac83aea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ssl/certificates/certificates.go @@ -0,0 +1,379 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get SSL certificates. +// +// Get information about the X.509 certificates that are used to encrypt +// communications in the cluster. +// The API returns a list that includes certificates from all TLS contexts +// including: +// +// - Settings for transport and HTTP interfaces +// - TLS settings that are used within authentication realms +// - TLS settings for remote monitoring exporters +// +// The list includes certificates that are used for configuring trust, such as +// those configured in the `xpack.security.transport.ssl.truststore` and +// `xpack.security.transport.ssl.certificate_authorities` settings. +// It also includes certificates that are used for configuring server identity, +// such as `xpack.security.http.ssl.keystore` and +// `xpack.security.http.ssl.certificate settings`. +// +// The list does not include certificates that are sourced from the default SSL +// context of the Java Runtime Environment (JRE), even if those certificates are +// in use within Elasticsearch. +// +// NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the +// API returns all the certificates that are included in the PKCS#11 token +// irrespective of whether these are used in the Elasticsearch TLS +// configuration. +// +// If Elasticsearch is configured to use a keystore or truststore, the API +// output includes all certificates in that store, even though some of the +// certificates might not be in active use within the cluster. +package certificates + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Certificates struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCertificates type alias for index. +type NewCertificates func() *Certificates + +// NewCertificatesFunc returns a new instance of Certificates with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCertificatesFunc(tp elastictransport.Interface) NewCertificates { + return func() *Certificates { + n := New(tp) + + return n + } +} + +// Get SSL certificates. +// +// Get information about the X.509 certificates that are used to encrypt +// communications in the cluster. +// The API returns a list that includes certificates from all TLS contexts +// including: +// +// - Settings for transport and HTTP interfaces +// - TLS settings that are used within authentication realms +// - TLS settings for remote monitoring exporters +// +// The list includes certificates that are used for configuring trust, such as +// those configured in the `xpack.security.transport.ssl.truststore` and +// `xpack.security.transport.ssl.certificate_authorities` settings. +// It also includes certificates that are used for configuring server identity, +// such as `xpack.security.http.ssl.keystore` and +// `xpack.security.http.ssl.certificate settings`. +// +// The list does not include certificates that are sourced from the default SSL +// context of the Java Runtime Environment (JRE), even if those certificates are +// in use within Elasticsearch. +// +// NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the +// API returns all the certificates that are included in the PKCS#11 token +// irrespective of whether these are used in the Elasticsearch TLS +// configuration. +// +// If Elasticsearch is configured to use a keystore or truststore, the API +// output includes all certificates in that store, even though some of the +// certificates might not be in active use within the cluster. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates +func New(tp elastictransport.Interface) *Certificates { + r := &Certificates{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Certificates) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_ssl") + path.WriteString("/") + path.WriteString("certificates") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Certificates) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "ssl.certificates") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "ssl.certificates") + if reader := instrument.RecordRequestBody(ctx, "ssl.certificates", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "ssl.certificates") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Certificates query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a certificates.Response +func (r Certificates) Do(providedCtx context.Context) (Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ssl.certificates") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Certificates) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "ssl.certificates") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Certificates query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Certificates headers map. +func (r *Certificates) Header(key, value string) *Certificates { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Certificates) ErrorTrace(errortrace bool) *Certificates { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Certificates) FilterPath(filterpaths ...string) *Certificates { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Certificates) Human(human bool) *Certificates { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Certificates) Pretty(pretty bool) *Certificates { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ssl/certificates/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ssl/certificates/response.go new file mode 100644 index 000000000..3bfdf33c0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/ssl/certificates/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package certificates + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package certificates +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ssl/certificates/GetCertificatesResponse.ts#L22-L25 + +type Response []types.CertificateInformation + +// NewResponse returns a Response +func NewResponse() Response { + r := Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/streams/logsdisable/logs_disable.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/streams/logsdisable/logs_disable.go new file mode 100644 index 000000000..666628168 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/streams/logsdisable/logs_disable.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Disable the Logs Streams feature for this cluster +package logsdisable + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type LogsDisable struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewLogsDisable type alias for index. +type NewLogsDisable func() *LogsDisable + +// NewLogsDisableFunc returns a new instance of LogsDisable with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewLogsDisableFunc(tp elastictransport.Interface) NewLogsDisable { + return func() *LogsDisable { + n := New(tp) + + return n + } +} + +// Disable the Logs Streams feature for this cluster +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/streams-logs-disable.html +func New(tp elastictransport.Interface) *LogsDisable { + r := &LogsDisable{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *LogsDisable) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_streams") + path.WriteString("/") + path.WriteString("logs") + path.WriteString("/") + path.WriteString("_disable") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r LogsDisable) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "streams.logs_disable") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "streams.logs_disable") + if reader := instrument.RecordRequestBody(ctx, "streams.logs_disable", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "streams.logs_disable") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the LogsDisable query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a logsdisable.Response +func (r LogsDisable) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r LogsDisable) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "streams.logs_disable") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the LogsDisable query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the LogsDisable headers map. +func (r *LogsDisable) Header(key, value string) *LogsDisable { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/streams/logsenable/logs_enable.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/streams/logsenable/logs_enable.go new file mode 100644 index 000000000..df3c81c7b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/streams/logsenable/logs_enable.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Enable the Logs Streams feature for this cluster +package logsenable + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type LogsEnable struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewLogsEnable type alias for index. +type NewLogsEnable func() *LogsEnable + +// NewLogsEnableFunc returns a new instance of LogsEnable with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewLogsEnableFunc(tp elastictransport.Interface) NewLogsEnable { + return func() *LogsEnable { + n := New(tp) + + return n + } +} + +// Enable the Logs Streams feature for this cluster +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/streams-logs-enable.html +func New(tp elastictransport.Interface) *LogsEnable { + r := &LogsEnable{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *LogsEnable) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_streams") + path.WriteString("/") + path.WriteString("logs") + path.WriteString("/") + path.WriteString("_enable") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r LogsEnable) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "streams.logs_enable") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "streams.logs_enable") + if reader := instrument.RecordRequestBody(ctx, "streams.logs_enable", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "streams.logs_enable") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the LogsEnable query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a logsenable.Response +func (r LogsEnable) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r LogsEnable) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "streams.logs_enable") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the LogsEnable query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the LogsEnable headers map. +func (r *LogsEnable) Header(key, value string) *LogsEnable { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/streams/status/status.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/streams/status/status.go new file mode 100644 index 000000000..61ed64c74 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/streams/status/status.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Return the current status of the streams feature for each streams type +package status + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Status struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStatus type alias for index. +type NewStatus func() *Status + +// NewStatusFunc returns a new instance of Status with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStatusFunc(tp elastictransport.Interface) NewStatus { + return func() *Status { + n := New(tp) + + return n + } +} + +// Return the current status of the streams feature for each streams type +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/streams-status.html +func New(tp elastictransport.Interface) *Status { + r := &Status{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Status) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_streams") + path.WriteString("/") + path.WriteString("status") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Status) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "streams.status") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "streams.status") + if reader := instrument.RecordRequestBody(ctx, "streams.status", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "streams.status") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Status query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a status.Response +func (r Status) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Status) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "streams.status") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Status query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Status headers map. +func (r *Status) Header(key, value string) *Status { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonym/delete_synonym.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonym/delete_synonym.go new file mode 100644 index 000000000..26733a826 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonym/delete_synonym.go @@ -0,0 +1,392 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a synonym set. +// +// You can only delete a synonyms set that is not in use by any index analyzer. +// +// Synonyms sets can be used in synonym graph token filters and synonym token +// filters. +// These synonym filters can be used as part of search analyzers. +// +// Analyzers need to be loaded when an index is restored (such as when a node +// starts, or the index becomes open). +// Even if the analyzer is not used on any field mapping, it still needs to be +// loaded on the index recovery phase. +// +// If any analyzers cannot be loaded, the index becomes unavailable and the +// cluster status becomes red or yellow as index shards are not available. +// To prevent that, synonyms sets that are used in analyzers can't be deleted. +// A delete request in this case will return a 400 response code. +// +// To remove a synonyms set, you must first remove all indices that contain +// analyzers using it. +// You can migrate an index by creating a new index that does not contain the +// token filter with the synonyms set, and use the reindex API in order to copy +// over the index data. +// Once finished, you can delete the index. +// When the synonyms set is not used in analyzers, you will be able to delete +// it. +package deletesynonym + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteSynonym struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteSynonym type alias for index. +type NewDeleteSynonym func(id string) *DeleteSynonym + +// NewDeleteSynonymFunc returns a new instance of DeleteSynonym with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteSynonymFunc(tp elastictransport.Interface) NewDeleteSynonym { + return func(id string) *DeleteSynonym { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete a synonym set. +// +// You can only delete a synonyms set that is not in use by any index analyzer. +// +// Synonyms sets can be used in synonym graph token filters and synonym token +// filters. +// These synonym filters can be used as part of search analyzers. +// +// Analyzers need to be loaded when an index is restored (such as when a node +// starts, or the index becomes open). +// Even if the analyzer is not used on any field mapping, it still needs to be +// loaded on the index recovery phase. +// +// If any analyzers cannot be loaded, the index becomes unavailable and the +// cluster status becomes red or yellow as index shards are not available. +// To prevent that, synonyms sets that are used in analyzers can't be deleted. +// A delete request in this case will return a 400 response code. +// +// To remove a synonyms set, you must first remove all indices that contain +// analyzers using it. +// You can migrate an index by creating a new index that does not contain the +// token filter with the synonyms set, and use the reindex API in order to copy +// over the index data. +// Once finished, you can delete the index. +// When the synonyms set is not used in analyzers, you will be able to delete +// it. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym +func New(tp elastictransport.Interface) *DeleteSynonym { + r := &DeleteSynonym{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteSynonym) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteSynonym) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "synonyms.delete_synonym") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "synonyms.delete_synonym") + if reader := instrument.RecordRequestBody(ctx, "synonyms.delete_synonym", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "synonyms.delete_synonym") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteSynonym query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletesynonym.Response +func (r DeleteSynonym) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.delete_synonym") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteSynonym) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.delete_synonym") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteSynonym query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteSynonym headers map. +func (r *DeleteSynonym) Header(key, value string) *DeleteSynonym { + r.headers.Set(key, value) + + return r +} + +// Id The synonyms set identifier to delete. +// API Name: id +func (r *DeleteSynonym) _id(id string) *DeleteSynonym { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteSynonym) ErrorTrace(errortrace bool) *DeleteSynonym { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteSynonym) FilterPath(filterpaths ...string) *DeleteSynonym { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteSynonym) Human(human bool) *DeleteSynonym { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteSynonym) Pretty(pretty bool) *DeleteSynonym { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonym/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonym/response.go new file mode 100644 index 000000000..1463e0a70 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonym/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletesynonym + +// Response holds the response body struct for the package deletesynonym +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/synonyms/delete_synonym/SynonymsDeleteResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonymrule/delete_synonym_rule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonymrule/delete_synonym_rule.go new file mode 100644 index 000000000..9ef6e0491 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonymrule/delete_synonym_rule.go @@ -0,0 +1,380 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a synonym rule. +// Delete a synonym rule from a synonym set. +package deletesynonymrule + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + setidMask = iota + 1 + + ruleidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteSynonymRule struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + setid string + ruleid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteSynonymRule type alias for index. +type NewDeleteSynonymRule func(setid, ruleid string) *DeleteSynonymRule + +// NewDeleteSynonymRuleFunc returns a new instance of DeleteSynonymRule with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteSynonymRuleFunc(tp elastictransport.Interface) NewDeleteSynonymRule { + return func(setid, ruleid string) *DeleteSynonymRule { + n := New(tp) + + n._setid(setid) + + n._ruleid(ruleid) + + return n + } +} + +// Delete a synonym rule. +// Delete a synonym rule from a synonym set. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule +func New(tp elastictransport.Interface) *DeleteSynonymRule { + r := &DeleteSynonymRule{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteSynonymRule) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == setidMask|ruleidMask: + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "setid", r.setid) + } + path.WriteString(r.setid) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "ruleid", r.ruleid) + } + path.WriteString(r.ruleid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteSynonymRule) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "synonyms.delete_synonym_rule") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "synonyms.delete_synonym_rule") + if reader := instrument.RecordRequestBody(ctx, "synonyms.delete_synonym_rule", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "synonyms.delete_synonym_rule") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteSynonymRule query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletesynonymrule.Response +func (r DeleteSynonymRule) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.delete_synonym_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteSynonymRule) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.delete_synonym_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteSynonymRule query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteSynonymRule headers map. +func (r *DeleteSynonymRule) Header(key, value string) *DeleteSynonymRule { + r.headers.Set(key, value) + + return r +} + +// SetId The ID of the synonym set to update. +// API Name: setid +func (r *DeleteSynonymRule) _setid(setid string) *DeleteSynonymRule { + r.paramSet |= setidMask + r.setid = setid + + return r +} + +// RuleId The ID of the synonym rule to delete. +// API Name: ruleid +func (r *DeleteSynonymRule) _ruleid(ruleid string) *DeleteSynonymRule { + r.paramSet |= ruleidMask + r.ruleid = ruleid + + return r +} + +// Refresh If `true`, the request will refresh the analyzers with the deleted synonym +// rule and wait for the new synonyms to be available before returning. +// If `false`, analyzers will not be reloaded with the deleted synonym rule +// API name: refresh +func (r *DeleteSynonymRule) Refresh(refresh bool) *DeleteSynonymRule { + r.values.Set("refresh", strconv.FormatBool(refresh)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteSynonymRule) ErrorTrace(errortrace bool) *DeleteSynonymRule { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteSynonymRule) FilterPath(filterpaths ...string) *DeleteSynonymRule { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteSynonymRule) Human(human bool) *DeleteSynonymRule { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteSynonymRule) Pretty(pretty bool) *DeleteSynonymRule { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonymrule/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonymrule/response.go new file mode 100644 index 000000000..f335ccc86 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonymrule/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletesynonymrule + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package deletesynonymrule +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/synonyms/delete_synonym_rule/SynonymRuleDeleteResponse.ts#L22-L25 +type Response struct { + + // ReloadAnalyzersDetails Updating synonyms in a synonym set can reload the associated analyzers in + // case refresh is set to true. + // This information is the analyzers reloading result. + ReloadAnalyzersDetails *types.ReloadResult `json:"reload_analyzers_details,omitempty"` + // Result The update operation result. + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonym/get_synonym.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonym/get_synonym.go new file mode 100644 index 000000000..563056820 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonym/get_synonym.go @@ -0,0 +1,358 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get a synonym set. +package getsynonym + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetSynonym struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetSynonym type alias for index. +type NewGetSynonym func(id string) *GetSynonym + +// NewGetSynonymFunc returns a new instance of GetSynonym with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetSynonymFunc(tp elastictransport.Interface) NewGetSynonym { + return func(id string) *GetSynonym { + n := New(tp) + + n._id(id) + + return n + } +} + +// Get a synonym set. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym +func New(tp elastictransport.Interface) *GetSynonym { + r := &GetSynonym{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetSynonym) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetSynonym) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "synonyms.get_synonym") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "synonyms.get_synonym") + if reader := instrument.RecordRequestBody(ctx, "synonyms.get_synonym", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "synonyms.get_synonym") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetSynonym query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getsynonym.Response +func (r GetSynonym) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.get_synonym") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetSynonym) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.get_synonym") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetSynonym query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetSynonym headers map. +func (r *GetSynonym) Header(key, value string) *GetSynonym { + r.headers.Set(key, value) + + return r +} + +// Id The synonyms set identifier to retrieve. +// API Name: id +func (r *GetSynonym) _id(id string) *GetSynonym { + r.paramSet |= idMask + r.id = id + + return r +} + +// From The starting offset for query rules to retrieve. +// API name: from +func (r *GetSynonym) From(from int) *GetSynonym { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size The max number of query rules to retrieve. +// API name: size +func (r *GetSynonym) Size(size int) *GetSynonym { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetSynonym) ErrorTrace(errortrace bool) *GetSynonym { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetSynonym) FilterPath(filterpaths ...string) *GetSynonym { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetSynonym) Human(human bool) *GetSynonym { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetSynonym) Pretty(pretty bool) *GetSynonym { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonym/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonym/response.go new file mode 100644 index 000000000..39f92bb6e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonym/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getsynonym + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getsynonym +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/synonyms/get_synonym/SynonymsGetResponse.ts#L23-L34 +type Response struct { + + // Count The total number of synonyms rules that the synonyms set contains. + Count int `json:"count"` + // SynonymsSet Synonym rule details. + SynonymsSet []types.SynonymRuleRead `json:"synonyms_set"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymrule/get_synonym_rule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymrule/get_synonym_rule.go new file mode 100644 index 000000000..ac5d09271 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymrule/get_synonym_rule.go @@ -0,0 +1,370 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get a synonym rule. +// Get a synonym rule from a synonym set. +package getsynonymrule + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + setidMask = iota + 1 + + ruleidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetSynonymRule struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + setid string + ruleid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetSynonymRule type alias for index. +type NewGetSynonymRule func(setid, ruleid string) *GetSynonymRule + +// NewGetSynonymRuleFunc returns a new instance of GetSynonymRule with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetSynonymRuleFunc(tp elastictransport.Interface) NewGetSynonymRule { + return func(setid, ruleid string) *GetSynonymRule { + n := New(tp) + + n._setid(setid) + + n._ruleid(ruleid) + + return n + } +} + +// Get a synonym rule. +// Get a synonym rule from a synonym set. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule +func New(tp elastictransport.Interface) *GetSynonymRule { + r := &GetSynonymRule{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetSynonymRule) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == setidMask|ruleidMask: + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "setid", r.setid) + } + path.WriteString(r.setid) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "ruleid", r.ruleid) + } + path.WriteString(r.ruleid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetSynonymRule) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "synonyms.get_synonym_rule") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "synonyms.get_synonym_rule") + if reader := instrument.RecordRequestBody(ctx, "synonyms.get_synonym_rule", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "synonyms.get_synonym_rule") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetSynonymRule query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getsynonymrule.Response +func (r GetSynonymRule) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.get_synonym_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetSynonymRule) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.get_synonym_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetSynonymRule query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetSynonymRule headers map. +func (r *GetSynonymRule) Header(key, value string) *GetSynonymRule { + r.headers.Set(key, value) + + return r +} + +// SetId The ID of the synonym set to retrieve the synonym rule from. +// API Name: setid +func (r *GetSynonymRule) _setid(setid string) *GetSynonymRule { + r.paramSet |= setidMask + r.setid = setid + + return r +} + +// RuleId The ID of the synonym rule to retrieve. +// API Name: ruleid +func (r *GetSynonymRule) _ruleid(ruleid string) *GetSynonymRule { + r.paramSet |= ruleidMask + r.ruleid = ruleid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetSynonymRule) ErrorTrace(errortrace bool) *GetSynonymRule { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetSynonymRule) FilterPath(filterpaths ...string) *GetSynonymRule { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetSynonymRule) Human(human bool) *GetSynonymRule { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetSynonymRule) Pretty(pretty bool) *GetSynonymRule { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymrule/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymrule/response.go new file mode 100644 index 000000000..28e3886a2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymrule/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getsynonymrule + +// Response holds the response body struct for the package getsynonymrule +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/synonyms/get_synonym_rule/SynonymRuleGetResponse.ts#L22-L25 +type Response struct { + + // Id Synonym Rule identifier + Id string `json:"id"` + // Synonyms Synonyms, in Solr format, that conform the synonym rule. + Synonyms string `json:"synonyms"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymssets/get_synonyms_sets.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymssets/get_synonyms_sets.go new file mode 100644 index 000000000..abbc4b81e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymssets/get_synonyms_sets.go @@ -0,0 +1,337 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get all synonym sets. +// Get a summary of all defined synonym sets. +package getsynonymssets + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetSynonymsSets struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetSynonymsSets type alias for index. +type NewGetSynonymsSets func() *GetSynonymsSets + +// NewGetSynonymsSetsFunc returns a new instance of GetSynonymsSets with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetSynonymsSetsFunc(tp elastictransport.Interface) NewGetSynonymsSets { + return func() *GetSynonymsSets { + n := New(tp) + + return n + } +} + +// Get all synonym sets. +// Get a summary of all defined synonym sets. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym +func New(tp elastictransport.Interface) *GetSynonymsSets { + r := &GetSynonymsSets{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetSynonymsSets) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_synonyms") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetSynonymsSets) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "synonyms.get_synonyms_sets") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "synonyms.get_synonyms_sets") + if reader := instrument.RecordRequestBody(ctx, "synonyms.get_synonyms_sets", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "synonyms.get_synonyms_sets") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetSynonymsSets query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getsynonymssets.Response +func (r GetSynonymsSets) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.get_synonyms_sets") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetSynonymsSets) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.get_synonyms_sets") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetSynonymsSets query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetSynonymsSets headers map. +func (r *GetSynonymsSets) Header(key, value string) *GetSynonymsSets { + r.headers.Set(key, value) + + return r +} + +// From The starting offset for synonyms sets to retrieve. +// API name: from +func (r *GetSynonymsSets) From(from int) *GetSynonymsSets { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size The maximum number of synonyms sets to retrieve. +// API name: size +func (r *GetSynonymsSets) Size(size int) *GetSynonymsSets { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetSynonymsSets) ErrorTrace(errortrace bool) *GetSynonymsSets { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetSynonymsSets) FilterPath(filterpaths ...string) *GetSynonymsSets { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetSynonymsSets) Human(human bool) *GetSynonymsSets { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetSynonymsSets) Pretty(pretty bool) *GetSynonymsSets { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymssets/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymssets/response.go new file mode 100644 index 000000000..6b820c4df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymssets/response.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getsynonymssets + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getsynonymssets +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/synonyms/get_synonyms_sets/SynonymsSetsGetResponse.ts#L23-L34 +type Response struct { + + // Count The total number of synonyms sets defined. + Count int `json:"count"` + // Results The identifier and total number of defined synonym rules for each synonyms + // set. + Results []types.SynonymsSetItem `json:"results"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonym/put_synonym.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonym/put_synonym.go new file mode 100644 index 000000000..784456466 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonym/put_synonym.go @@ -0,0 +1,403 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a synonym set. +// Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +// If you need to manage more synonym rules, you can create multiple synonym +// sets. +// +// When an existing synonyms set is updated, the search analyzers that use the +// synonyms set are reloaded automatically for all indices. +// This is equivalent to invoking the reload search analyzers API for all +// indices that use the synonyms set. +// +// For practical examples of how to create or update a synonyms set, refer to +// the External documentation. +package putsynonym + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutSynonym struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutSynonym type alias for index. +type NewPutSynonym func(id string) *PutSynonym + +// NewPutSynonymFunc returns a new instance of PutSynonym with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutSynonymFunc(tp elastictransport.Interface) NewPutSynonym { + return func(id string) *PutSynonym { + n := New(tp) + + n._id(id) + + return n + } +} + +// Create or update a synonym set. +// Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +// If you need to manage more synonym rules, you can create multiple synonym +// sets. +// +// When an existing synonyms set is updated, the search analyzers that use the +// synonyms set are reloaded automatically for all indices. +// This is equivalent to invoking the reload search analyzers API for all +// indices that use the synonyms set. +// +// For practical examples of how to create or update a synonyms set, refer to +// the External documentation. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym +func New(tp elastictransport.Interface) *PutSynonym { + r := &PutSynonym{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutSynonym) Raw(raw io.Reader) *PutSynonym { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutSynonym) Request(req *Request) *PutSynonym { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutSynonym) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutSynonym: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutSynonym) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "synonyms.put_synonym") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "synonyms.put_synonym") + if reader := instrument.RecordRequestBody(ctx, "synonyms.put_synonym", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "synonyms.put_synonym") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutSynonym query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putsynonym.Response +func (r PutSynonym) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.put_synonym") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutSynonym headers map. +func (r *PutSynonym) Header(key, value string) *PutSynonym { + r.headers.Set(key, value) + + return r +} + +// Id The ID of the synonyms set to be created or updated. +// API Name: id +func (r *PutSynonym) _id(id string) *PutSynonym { + r.paramSet |= idMask + r.id = id + + return r +} + +// Refresh If `true`, the request will refresh the analyzers with the new synonyms set +// and wait for the new synonyms to be available before returning. +// If `false`, analyzers will not be reloaded with the new synonym set +// API name: refresh +func (r *PutSynonym) Refresh(refresh bool) *PutSynonym { + r.values.Set("refresh", strconv.FormatBool(refresh)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutSynonym) ErrorTrace(errortrace bool) *PutSynonym { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutSynonym) FilterPath(filterpaths ...string) *PutSynonym { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutSynonym) Human(human bool) *PutSynonym { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutSynonym) Pretty(pretty bool) *PutSynonym { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The synonym rules definitions for the synonyms set. +// API name: synonyms_set +func (r *PutSynonym) SynonymsSet(synonymssets ...types.SynonymRuleVariant) *PutSynonym { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.SynonymsSet = make([]types.SynonymRule, len(synonymssets)) + for i, v := range synonymssets { + r.req.SynonymsSet[i] = *v.SynonymRuleCaster() + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonym/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonym/request.go new file mode 100644 index 000000000..7b67f6f2f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonym/request.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putsynonym + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putsynonym +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/synonyms/put_synonym/SynonymsPutRequest.ts#L23-L67 +type Request struct { + + // SynonymsSet The synonym rules definitions for the synonyms set. + SynonymsSet []types.SynonymRule `json:"synonyms_set"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putsynonym request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "synonyms_set": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := types.NewSynonymRule() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "SynonymsSet", err) + } + + s.SynonymsSet = append(s.SynonymsSet, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.SynonymsSet); err != nil { + return fmt.Errorf("%s | %w", "SynonymsSet", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonym/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonym/response.go new file mode 100644 index 000000000..fbf1179e7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonym/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putsynonym + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package putsynonym +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/synonyms/put_synonym/SynonymsPutResponse.ts#L23-L36 +type Response struct { + + // ReloadAnalyzersDetails Updating a synonyms set can reload the associated analyzers in case refresh + // is set to true. + // This information is the analyzers reloading result. + ReloadAnalyzersDetails *types.ReloadResult `json:"reload_analyzers_details,omitempty"` + // Result The update operation result. + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonymrule/put_synonym_rule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonymrule/put_synonym_rule.go new file mode 100644 index 000000000..5f1e42c62 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonymrule/put_synonym_rule.go @@ -0,0 +1,411 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a synonym rule. +// Create or update a synonym rule in a synonym set. +// +// If any of the synonym rules included is invalid, the API returns an error. +// +// When you update a synonym rule, all analyzers using the synonyms set will be +// reloaded automatically to reflect the new rule. +package putsynonymrule + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + setidMask = iota + 1 + + ruleidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutSynonymRule struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + setid string + ruleid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutSynonymRule type alias for index. +type NewPutSynonymRule func(setid, ruleid string) *PutSynonymRule + +// NewPutSynonymRuleFunc returns a new instance of PutSynonymRule with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutSynonymRuleFunc(tp elastictransport.Interface) NewPutSynonymRule { + return func(setid, ruleid string) *PutSynonymRule { + n := New(tp) + + n._setid(setid) + + n._ruleid(ruleid) + + return n + } +} + +// Create or update a synonym rule. +// Create or update a synonym rule in a synonym set. +// +// If any of the synonym rules included is invalid, the API returns an error. +// +// When you update a synonym rule, all analyzers using the synonyms set will be +// reloaded automatically to reflect the new rule. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule +func New(tp elastictransport.Interface) *PutSynonymRule { + r := &PutSynonymRule{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutSynonymRule) Raw(raw io.Reader) *PutSynonymRule { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutSynonymRule) Request(req *Request) *PutSynonymRule { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutSynonymRule) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutSynonymRule: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == setidMask|ruleidMask: + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "setid", r.setid) + } + path.WriteString(r.setid) + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "ruleid", r.ruleid) + } + path.WriteString(r.ruleid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutSynonymRule) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "synonyms.put_synonym_rule") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "synonyms.put_synonym_rule") + if reader := instrument.RecordRequestBody(ctx, "synonyms.put_synonym_rule", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "synonyms.put_synonym_rule") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutSynonymRule query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putsynonymrule.Response +func (r PutSynonymRule) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "synonyms.put_synonym_rule") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutSynonymRule headers map. +func (r *PutSynonymRule) Header(key, value string) *PutSynonymRule { + r.headers.Set(key, value) + + return r +} + +// SetId The ID of the synonym set. +// API Name: setid +func (r *PutSynonymRule) _setid(setid string) *PutSynonymRule { + r.paramSet |= setidMask + r.setid = setid + + return r +} + +// RuleId The ID of the synonym rule to be updated or created. +// API Name: ruleid +func (r *PutSynonymRule) _ruleid(ruleid string) *PutSynonymRule { + r.paramSet |= ruleidMask + r.ruleid = ruleid + + return r +} + +// Refresh If `true`, the request will refresh the analyzers with the new synonym rule +// and wait for the new synonyms to be available before returning. +// If `false`, analyzers will not be reloaded with the new synonym rule +// API name: refresh +func (r *PutSynonymRule) Refresh(refresh bool) *PutSynonymRule { + r.values.Set("refresh", strconv.FormatBool(refresh)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutSynonymRule) ErrorTrace(errortrace bool) *PutSynonymRule { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutSynonymRule) FilterPath(filterpaths ...string) *PutSynonymRule { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutSynonymRule) Human(human bool) *PutSynonymRule { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutSynonymRule) Pretty(pretty bool) *PutSynonymRule { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The synonym rule information definition, which must be in Solr format. +// API name: synonyms +func (r *PutSynonymRule) Synonyms(synonymstring string) *PutSynonymRule { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Synonyms = synonymstring + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonymrule/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonymrule/request.go new file mode 100644 index 000000000..05f256bca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonymrule/request.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putsynonymrule + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package putsynonymrule +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/synonyms/put_synonym_rule/SynonymRulePutRequest.ts#L23-L69 +type Request struct { + + // Synonyms The synonym rule information definition, which must be in Solr format. + Synonyms string `json:"synonyms"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putsynonymrule request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "synonyms": + if err := dec.Decode(&s.Synonyms); err != nil { + return fmt.Errorf("%s | %w", "Synonyms", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonymrule/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonymrule/response.go new file mode 100644 index 000000000..2d6707549 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonymrule/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putsynonymrule + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package putsynonymrule +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/synonyms/put_synonym_rule/SynonymRulePutResponse.ts#L22-L25 +type Response struct { + + // ReloadAnalyzersDetails Updating synonyms in a synonym set can reload the associated analyzers in + // case refresh is set to true. + // This information is the analyzers reloading result. + ReloadAnalyzersDetails *types.ReloadResult `json:"reload_analyzers_details,omitempty"` + // Result The update operation result. + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/cancel/cancel.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/cancel/cancel.go new file mode 100644 index 000000000..5fe0b20b5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/cancel/cancel.go @@ -0,0 +1,429 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Cancel a task. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// A task may continue to run for some time after it has been cancelled because +// it may not be able to safely stop its current activity straight away. +// It is also possible that Elasticsearch must complete its work on other tasks +// before it can process the cancellation. +// The get task information API will continue to list these cancelled tasks +// until they complete. +// The cancelled flag in the response indicates that the cancellation command +// has been processed and the task will stop as soon as possible. +// +// To troubleshoot why a cancelled task does not complete promptly, use the get +// task information API with the `?detailed` parameter to identify the other +// tasks the system is running. +// You can also use the node hot threads API to obtain detailed information +// about the work the system is doing instead of completing the cancelled task. +package cancel + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + taskidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Cancel struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + taskid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewCancel type alias for index. +type NewCancel func() *Cancel + +// NewCancelFunc returns a new instance of Cancel with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCancelFunc(tp elastictransport.Interface) NewCancel { + return func() *Cancel { + n := New(tp) + + return n + } +} + +// Cancel a task. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// A task may continue to run for some time after it has been cancelled because +// it may not be able to safely stop its current activity straight away. +// It is also possible that Elasticsearch must complete its work on other tasks +// before it can process the cancellation. +// The get task information API will continue to list these cancelled tasks +// until they complete. +// The cancelled flag in the response indicates that the cancellation command +// has been processed and the task will stop as soon as possible. +// +// To troubleshoot why a cancelled task does not complete promptly, use the get +// task information API with the `?detailed` parameter to identify the other +// tasks the system is running. +// You can also use the node hot threads API to obtain detailed information +// about the work the system is doing instead of completing the cancelled task. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks +func New(tp elastictransport.Interface) *Cancel { + r := &Cancel{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Cancel) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_tasks") + path.WriteString("/") + path.WriteString("_cancel") + + method = http.MethodPost + case r.paramSet == taskidMask: + path.WriteString("/") + path.WriteString("_tasks") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "taskid", r.taskid) + } + path.WriteString(r.taskid) + path.WriteString("/") + path.WriteString("_cancel") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Cancel) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "tasks.cancel") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "tasks.cancel") + if reader := instrument.RecordRequestBody(ctx, "tasks.cancel", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "tasks.cancel") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Cancel query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a cancel.Response +func (r Cancel) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "tasks.cancel") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Cancel) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "tasks.cancel") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Cancel query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Cancel headers map. +func (r *Cancel) Header(key, value string) *Cancel { + r.headers.Set(key, value) + + return r +} + +// TaskId The task identifier. +// API Name: taskid +func (r *Cancel) TaskId(taskid string) *Cancel { + r.paramSet |= taskidMask + r.taskid = taskid + + return r +} + +// Actions A comma-separated list or wildcard expression of actions that is used to +// limit the request. +// API name: actions +func (r *Cancel) Actions(actions ...string) *Cancel { + tmp := []string{} + for _, item := range actions { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("actions", strings.Join(tmp, ",")) + + return r +} + +// Nodes A comma-separated list of node IDs or names that is used to limit the +// request. +// API name: nodes +func (r *Cancel) Nodes(nodes ...string) *Cancel { + tmp := []string{} + for _, item := range nodes { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("nodes", strings.Join(tmp, ",")) + + return r +} + +// ParentTaskId A parent task ID that is used to limit the tasks. +// API name: parent_task_id +func (r *Cancel) ParentTaskId(parenttaskid string) *Cancel { + r.values.Set("parent_task_id", parenttaskid) + + return r +} + +// WaitForCompletion If true, the request blocks until all found tasks are complete. +// API name: wait_for_completion +func (r *Cancel) WaitForCompletion(waitforcompletion bool) *Cancel { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Cancel) ErrorTrace(errortrace bool) *Cancel { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Cancel) FilterPath(filterpaths ...string) *Cancel { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Cancel) Human(human bool) *Cancel { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Cancel) Pretty(pretty bool) *Cancel { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/cancel/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/cancel/response.go new file mode 100644 index 000000000..5077caac2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/cancel/response.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package cancel + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package cancel +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/tasks/cancel/CancelTasksResponse.ts#L22-L25 +type Response struct { + NodeFailures []types.ErrorCause `json:"node_failures,omitempty"` + // Nodes Task information grouped by node, if `group_by` was set to `node` (the + // default). + Nodes map[string]types.NodeTasks `json:"nodes,omitempty"` + TaskFailures []types.TaskFailure `json:"task_failures,omitempty"` + // Tasks Either a flat list of tasks if `group_by` was set to `none`, or grouped by + // parents if + // `group_by` was set to `parents`. + Tasks types.TaskInfos `json:"tasks,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.NodeTasks, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node_failures": + if err := dec.Decode(&s.NodeFailures); err != nil { + return fmt.Errorf("%s | %w", "NodeFailures", err) + } + + case "nodes": + if s.Nodes == nil { + s.Nodes = make(map[string]types.NodeTasks, 0) + } + if err := dec.Decode(&s.Nodes); err != nil { + return fmt.Errorf("%s | %w", "Nodes", err) + } + + case "task_failures": + if err := dec.Decode(&s.TaskFailures); err != nil { + return fmt.Errorf("%s | %w", "TaskFailures", err) + } + + case "tasks": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]types.ParentTaskInfo, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Tasks", err) + } + s.Tasks = o + case '[': + o := []types.TaskInfo{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Tasks", err) + } + s.Tasks = o + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/get/get.go new file mode 100644 index 000000000..d8d148376 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/get/get.go @@ -0,0 +1,376 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get task information. +// Get information about a task currently running in the cluster. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// If the task identifier is not found, a 404 response code indicates that there +// are no resources that match the request. +package get + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + taskidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Get struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + taskid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGet type alias for index. +type NewGet func(taskid string) *Get + +// NewGetFunc returns a new instance of Get with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFunc(tp elastictransport.Interface) NewGet { + return func(taskid string) *Get { + n := New(tp) + + n._taskid(taskid) + + return n + } +} + +// Get task information. +// Get information about a task currently running in the cluster. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// If the task identifier is not found, a 404 response code indicates that there +// are no resources that match the request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks +func New(tp elastictransport.Interface) *Get { + r := &Get{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Get) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == taskidMask: + path.WriteString("/") + path.WriteString("_tasks") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "taskid", r.taskid) + } + path.WriteString(r.taskid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Get) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "tasks.get") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "tasks.get") + if reader := instrument.RecordRequestBody(ctx, "tasks.get", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "tasks.get") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Get query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a get.Response +func (r Get) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "tasks.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Get) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "tasks.get") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Get query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Get headers map. +func (r *Get) Header(key, value string) *Get { + r.headers.Set(key, value) + + return r +} + +// TaskId The task identifier. +// API Name: taskid +func (r *Get) _taskid(taskid string) *Get { + r.paramSet |= taskidMask + r.taskid = taskid + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *Get) Timeout(duration string) *Get { + r.values.Set("timeout", duration) + + return r +} + +// WaitForCompletion If `true`, the request blocks until the task has completed. +// API name: wait_for_completion +func (r *Get) WaitForCompletion(waitforcompletion bool) *Get { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Get) ErrorTrace(errortrace bool) *Get { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Get) FilterPath(filterpaths ...string) *Get { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Get) Human(human bool) *Get { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Get) Pretty(pretty bool) *Get { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/get/response.go new file mode 100644 index 000000000..ddfa9aa54 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/get/response.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package get + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package get +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/tasks/get/GetTaskResponse.ts#L24-L31 +type Response struct { + Completed bool `json:"completed"` + Error *types.ErrorCause `json:"error,omitempty"` + Response json.RawMessage `json:"response,omitempty"` + Task types.TaskInfo `json:"task"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/list/list.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/list/list.go new file mode 100644 index 000000000..a5c2d8f32 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/list/list.go @@ -0,0 +1,528 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get all tasks. +// Get information about the tasks currently running on one or more nodes in the +// cluster. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// **Identifying running tasks** +// +// The `X-Opaque-Id header`, when provided on the HTTP request header, is going +// to be returned as a header in the response as well as in the headers field +// for in the task information. +// This enables you to track certain calls or associate certain tasks with the +// client that started them. +// For example: +// +// ``` +// curl -i -H "X-Opaque-Id: 123456" +// "http://localhost:9200/_tasks?group_by=parents" +// ``` +// +// The API returns the following result: +// +// ``` +// HTTP/1.1 200 OK +// X-Opaque-Id: 123456 +// content-type: application/json; charset=UTF-8 +// content-length: 831 +// +// { +// "tasks" : { +// "u5lcZHqcQhu-rUoFaqDphA:45" : { +// "node" : "u5lcZHqcQhu-rUoFaqDphA", +// "id" : 45, +// "type" : "transport", +// "action" : "cluster:monitor/tasks/lists", +// "start_time_in_millis" : 1513823752749, +// "running_time_in_nanos" : 293139, +// "cancellable" : false, +// "headers" : { +// "X-Opaque-Id" : "123456" +// }, +// "children" : [ +// { +// "node" : "u5lcZHqcQhu-rUoFaqDphA", +// "id" : 46, +// "type" : "direct", +// "action" : "cluster:monitor/tasks/lists[n]", +// "start_time_in_millis" : 1513823752750, +// "running_time_in_nanos" : 92133, +// "cancellable" : false, +// "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", +// "headers" : { +// "X-Opaque-Id" : "123456" +// } +// } +// ] +// } +// } +// } +// +// ``` +// In this example, `X-Opaque-Id: 123456` is the ID as a part of the response +// header. +// The `X-Opaque-Id` in the task `headers` is the ID for the task that was +// initiated by the REST request. +// The `X-Opaque-Id` in the children `headers` is the child task of the task +// that was initiated by the REST request. +package list + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/groupby" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type List struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewList type alias for index. +type NewList func() *List + +// NewListFunc returns a new instance of List with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewListFunc(tp elastictransport.Interface) NewList { + return func() *List { + n := New(tp) + + return n + } +} + +// Get all tasks. +// Get information about the tasks currently running on one or more nodes in the +// cluster. +// +// WARNING: The task management API is new and should still be considered a beta +// feature. +// The API may change in ways that are not backwards compatible. +// +// **Identifying running tasks** +// +// The `X-Opaque-Id header`, when provided on the HTTP request header, is going +// to be returned as a header in the response as well as in the headers field +// for in the task information. +// This enables you to track certain calls or associate certain tasks with the +// client that started them. +// For example: +// +// ``` +// curl -i -H "X-Opaque-Id: 123456" +// "http://localhost:9200/_tasks?group_by=parents" +// ``` +// +// The API returns the following result: +// +// ``` +// HTTP/1.1 200 OK +// X-Opaque-Id: 123456 +// content-type: application/json; charset=UTF-8 +// content-length: 831 +// +// { +// "tasks" : { +// "u5lcZHqcQhu-rUoFaqDphA:45" : { +// "node" : "u5lcZHqcQhu-rUoFaqDphA", +// "id" : 45, +// "type" : "transport", +// "action" : "cluster:monitor/tasks/lists", +// "start_time_in_millis" : 1513823752749, +// "running_time_in_nanos" : 293139, +// "cancellable" : false, +// "headers" : { +// "X-Opaque-Id" : "123456" +// }, +// "children" : [ +// { +// "node" : "u5lcZHqcQhu-rUoFaqDphA", +// "id" : 46, +// "type" : "direct", +// "action" : "cluster:monitor/tasks/lists[n]", +// "start_time_in_millis" : 1513823752750, +// "running_time_in_nanos" : 92133, +// "cancellable" : false, +// "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", +// "headers" : { +// "X-Opaque-Id" : "123456" +// } +// } +// ] +// } +// } +// } +// +// ``` +// In this example, `X-Opaque-Id: 123456` is the ID as a part of the response +// header. +// The `X-Opaque-Id` in the task `headers` is the ID for the task that was +// initiated by the REST request. +// The `X-Opaque-Id` in the children `headers` is the child task of the task +// that was initiated by the REST request. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks +func New(tp elastictransport.Interface) *List { + r := &List{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *List) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_tasks") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r List) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "tasks.list") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "tasks.list") + if reader := instrument.RecordRequestBody(ctx, "tasks.list", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "tasks.list") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the List query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a list.Response +func (r List) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "tasks.list") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r List) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "tasks.list") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the List query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the List headers map. +func (r *List) Header(key, value string) *List { + r.headers.Set(key, value) + + return r +} + +// Actions A comma-separated list or wildcard expression of actions used to limit the +// request. +// For example, you can use `cluser:*` to retrieve all cluster-related tasks. +// API name: actions +func (r *List) Actions(actions ...string) *List { + tmp := []string{} + for _, item := range actions { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("actions", strings.Join(tmp, ",")) + + return r +} + +// Detailed If `true`, the response includes detailed information about the running +// tasks. +// This information is useful to distinguish tasks from each other but is more +// costly to run. +// API name: detailed +func (r *List) Detailed(detailed bool) *List { + r.values.Set("detailed", strconv.FormatBool(detailed)) + + return r +} + +// GroupBy A key that is used to group tasks in the response. +// The task lists can be grouped either by nodes or by parent tasks. +// API name: group_by +func (r *List) GroupBy(groupby groupby.GroupBy) *List { + r.values.Set("group_by", groupby.String()) + + return r +} + +// Nodes A comma-separated list of node IDs or names that is used to limit the +// returned information. +// API name: nodes +func (r *List) Nodes(nodeids ...string) *List { + r.values.Set("nodes", strings.Join(nodeids, ",")) + + return r +} + +// ParentTaskId A parent task identifier that is used to limit returned information. +// To return all tasks, omit this parameter or use a value of `-1`. +// If the parent task is not found, the API does not return a 404 response code. +// API name: parent_task_id +func (r *List) ParentTaskId(id string) *List { + r.values.Set("parent_task_id", id) + + return r +} + +// Timeout The period to wait for each node to respond. +// If a node does not respond before its timeout expires, the response does not +// include its information. +// However, timed out nodes are included in the `node_failures` property. +// API name: timeout +func (r *List) Timeout(duration string) *List { + r.values.Set("timeout", duration) + + return r +} + +// WaitForCompletion If `true`, the request blocks until the operation is complete. +// API name: wait_for_completion +func (r *List) WaitForCompletion(waitforcompletion bool) *List { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *List) ErrorTrace(errortrace bool) *List { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *List) FilterPath(filterpaths ...string) *List { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *List) Human(human bool) *List { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *List) Pretty(pretty bool) *List { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/list/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/list/response.go new file mode 100644 index 000000000..75e8bb99b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/tasks/list/response.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package list + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package list +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/tasks/list/ListTasksResponse.ts#L22-L25 +type Response struct { + NodeFailures []types.ErrorCause `json:"node_failures,omitempty"` + // Nodes Task information grouped by node, if `group_by` was set to `node` (the + // default). + Nodes map[string]types.NodeTasks `json:"nodes,omitempty"` + TaskFailures []types.TaskFailure `json:"task_failures,omitempty"` + // Tasks Either a flat list of tasks if `group_by` was set to `none`, or grouped by + // parents if + // `group_by` was set to `parents`. + Tasks types.TaskInfos `json:"tasks,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Nodes: make(map[string]types.NodeTasks, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node_failures": + if err := dec.Decode(&s.NodeFailures); err != nil { + return fmt.Errorf("%s | %w", "NodeFailures", err) + } + + case "nodes": + if s.Nodes == nil { + s.Nodes = make(map[string]types.NodeTasks, 0) + } + if err := dec.Decode(&s.Nodes); err != nil { + return fmt.Errorf("%s | %w", "Nodes", err) + } + + case "task_failures": + if err := dec.Decode(&s.TaskFailures); err != nil { + return fmt.Errorf("%s | %w", "TaskFailures", err) + } + + case "tasks": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]types.ParentTaskInfo, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Tasks", err) + } + s.Tasks = o + case '[': + o := []types.TaskInfo{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Tasks", err) + } + s.Tasks = o + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findfieldstructure/find_field_structure.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findfieldstructure/find_field_structure.go new file mode 100644 index 000000000..8822373c2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findfieldstructure/find_field_structure.go @@ -0,0 +1,596 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Find the structure of a text field. +// Find the structure of a text field in an Elasticsearch index. +// +// This API provides a starting point for extracting further information from +// log messages already ingested into Elasticsearch. +// For example, if you have ingested data into a very simple index that has just +// `@timestamp` and message fields, you can use this API to see what common +// structure exists in the message field. +// +// The response from the API contains: +// +// * Sample messages. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// * Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// If the structure finder produces unexpected results, specify the `explain` +// query parameter and an explanation will appear in the response. +// It helps determine why the returned structure was chosen. +package findfieldstructure + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ecscompatibilitytype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/formattype" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type FindFieldStructure struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewFindFieldStructure type alias for index. +type NewFindFieldStructure func() *FindFieldStructure + +// NewFindFieldStructureFunc returns a new instance of FindFieldStructure with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewFindFieldStructureFunc(tp elastictransport.Interface) NewFindFieldStructure { + return func() *FindFieldStructure { + n := New(tp) + + return n + } +} + +// Find the structure of a text field. +// Find the structure of a text field in an Elasticsearch index. +// +// This API provides a starting point for extracting further information from +// log messages already ingested into Elasticsearch. +// For example, if you have ingested data into a very simple index that has just +// `@timestamp` and message fields, you can use this API to see what common +// structure exists in the message field. +// +// The response from the API contains: +// +// * Sample messages. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// * Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// If the structure finder produces unexpected results, specify the `explain` +// query parameter and an explanation will appear in the response. +// It helps determine why the returned structure was chosen. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure +func New(tp elastictransport.Interface) *FindFieldStructure { + r := &FindFieldStructure{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *FindFieldStructure) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_text_structure") + path.WriteString("/") + path.WriteString("find_field_structure") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r FindFieldStructure) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "text_structure.find_field_structure") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "text_structure.find_field_structure") + if reader := instrument.RecordRequestBody(ctx, "text_structure.find_field_structure", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "text_structure.find_field_structure") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the FindFieldStructure query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a findfieldstructure.Response +func (r FindFieldStructure) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "text_structure.find_field_structure") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r FindFieldStructure) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "text_structure.find_field_structure") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the FindFieldStructure query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the FindFieldStructure headers map. +func (r *FindFieldStructure) Header(key, value string) *FindFieldStructure { + r.headers.Set(key, value) + + return r +} + +// ColumnNames If `format` is set to `delimited`, you can specify the column names in a +// comma-separated list. +// If this parameter is not specified, the structure finder uses the column +// names from the header row of the text. +// If the text does not have a header row, columns are named "column1", +// "column2", "column3", for example. +// API name: column_names +func (r *FindFieldStructure) ColumnNames(columnnames string) *FindFieldStructure { + r.values.Set("column_names", columnnames) + + return r +} + +// Delimiter If you have set `format` to `delimited`, you can specify the character used +// to delimit the values in each row. +// Only a single character is supported; the delimiter cannot have multiple +// characters. +// By default, the API considers the following possibilities: comma, tab, +// semi-colon, and pipe (`|`). +// In this default scenario, all rows must have the same number of fields for +// the delimited format to be detected. +// If you specify a delimiter, up to 10% of the rows can have a different number +// of columns than the first row. +// API name: delimiter +func (r *FindFieldStructure) Delimiter(delimiter string) *FindFieldStructure { + r.values.Set("delimiter", delimiter) + + return r +} + +// DocumentsToSample The number of documents to include in the structural analysis. +// The minimum value is 2. +// API name: documents_to_sample +func (r *FindFieldStructure) DocumentsToSample(documentstosample string) *FindFieldStructure { + r.values.Set("documents_to_sample", documentstosample) + + return r +} + +// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns. +// Use this parameter to specify whether to use ECS Grok patterns instead of +// legacy ones when the structure finder creates a Grok pattern. +// This setting primarily has an impact when a whole message Grok pattern such +// as `%{CATALINALOG}` matches the input. +// If the structure finder identifies a common structure but has no idea of the +// meaning then generic field names such as `path`, `ipaddress`, `field1`, and +// `field2` are used in the `grok_pattern` output. +// The intention in that situation is that a user who knows the meanings will +// rename the fields before using them. +// API name: ecs_compatibility +func (r *FindFieldStructure) EcsCompatibility(ecscompatibility ecscompatibilitytype.EcsCompatibilityType) *FindFieldStructure { + r.values.Set("ecs_compatibility", ecscompatibility.String()) + + return r +} + +// Explain If `true`, the response includes a field named `explanation`, which is an +// array of strings that indicate how the structure finder produced its result. +// API name: explain +func (r *FindFieldStructure) Explain(explain bool) *FindFieldStructure { + r.values.Set("explain", strconv.FormatBool(explain)) + + return r +} + +// Field The field that should be analyzed. +// API name: field +func (r *FindFieldStructure) Field(field string) *FindFieldStructure { + r.values.Set("field", field) + + return r +} + +// Format The high level structure of the text. +// By default, the API chooses the format. +// In this default scenario, all rows must have the same number of fields for a +// delimited format to be detected. +// If the format is set to delimited and the delimiter is not set, however, the +// API tolerates up to 5% of rows that have a different number of columns than +// the first row. +// API name: format +func (r *FindFieldStructure) Format(format formattype.FormatType) *FindFieldStructure { + r.values.Set("format", format.String()) + + return r +} + +// GrokPattern If the format is `semi_structured_text`, you can specify a Grok pattern that +// is used to extract fields from every message in the text. +// The name of the timestamp field in the Grok pattern must match what is +// specified in the `timestamp_field` parameter. +// If that parameter is not specified, the name of the timestamp field in the +// Grok pattern must match "timestamp". +// If `grok_pattern` is not specified, the structure finder creates a Grok +// pattern. +// API name: grok_pattern +func (r *FindFieldStructure) GrokPattern(grokpattern string) *FindFieldStructure { + r.values.Set("grok_pattern", grokpattern) + + return r +} + +// Index The name of the index that contains the analyzed field. +// API name: index +func (r *FindFieldStructure) Index(indexname string) *FindFieldStructure { + r.values.Set("index", indexname) + + return r +} + +// Quote If the format is `delimited`, you can specify the character used to quote the +// values in each row if they contain newlines or the delimiter character. +// Only a single character is supported. +// If this parameter is not specified, the default value is a double quote +// (`"`). +// If your delimited text format does not use quoting, a workaround is to set +// this argument to a character that does not appear anywhere in the sample. +// API name: quote +func (r *FindFieldStructure) Quote(quote string) *FindFieldStructure { + r.values.Set("quote", quote) + + return r +} + +// ShouldTrimFields If the format is `delimited`, you can specify whether values between +// delimiters should have whitespace trimmed from them. +// If this parameter is not specified and the delimiter is pipe (`|`), the +// default value is true. +// Otherwise, the default value is `false`. +// API name: should_trim_fields +func (r *FindFieldStructure) ShouldTrimFields(shouldtrimfields bool) *FindFieldStructure { + r.values.Set("should_trim_fields", strconv.FormatBool(shouldtrimfields)) + + return r +} + +// Timeout The maximum amount of time that the structure analysis can take. +// If the analysis is still running when the timeout expires, it will be +// stopped. +// API name: timeout +func (r *FindFieldStructure) Timeout(duration string) *FindFieldStructure { + r.values.Set("timeout", duration) + + return r +} + +// TimestampField The name of the field that contains the primary timestamp of each record in +// the text. +// In particular, if the text was ingested into an index, this is the field that +// would be used to populate the `@timestamp` field. +// +// If the format is `semi_structured_text`, this field must match the name of +// the appropriate extraction in the `grok_pattern`. +// Therefore, for semi-structured text, it is best not to specify this parameter +// unless `grok_pattern` is also specified. +// +// For structured text, if you specify this parameter, the field must exist +// within the text. +// +// If this parameter is not specified, the structure finder makes a decision +// about which field (if any) is the primary timestamp field. +// For structured text, it is not compulsory to have a timestamp in the text. +// API name: timestamp_field +func (r *FindFieldStructure) TimestampField(field string) *FindFieldStructure { + r.values.Set("timestamp_field", field) + + return r +} + +// TimestampFormat The Java time format of the timestamp field in the text. +// Only a subset of Java time format letter groups are supported: +// +// * `a` +// * `d` +// * `dd` +// * `EEE` +// * `EEEE` +// * `H` +// * `HH` +// * `h` +// * `M` +// * `MM` +// * `MMM` +// * `MMMM` +// * `mm` +// * `ss` +// * `XX` +// * `XXX` +// * `yy` +// * `yyyy` +// * `zzz` +// +// Additionally `S` letter groups (fractional seconds) of length one to nine are +// supported providing they occur after `ss` and are separated from the `ss` by +// a period (`.`), comma (`,`), or colon (`:`). +// Spacing and punctuation is also permitted with the exception a question mark +// (`?`), newline, and carriage return, together with literal text enclosed in +// single quotes. +// For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +// +// One valuable use case for this parameter is when the format is +// semi-structured text, there are multiple timestamp formats in the text, and +// you know which format corresponds to the primary timestamp, but you do not +// want to specify the full `grok_pattern`. +// Another is when the timestamp format is one that the structure finder does +// not consider by default. +// +// If this parameter is not specified, the structure finder chooses the best +// format from a built-in set. +// +// If the special value `null` is specified, the structure finder will not look +// for a primary timestamp in the text. +// When the format is semi-structured text, this will result in the structure +// finder treating the text as single-line messages. +// API name: timestamp_format +func (r *FindFieldStructure) TimestampFormat(timestampformat string) *FindFieldStructure { + r.values.Set("timestamp_format", timestampformat) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *FindFieldStructure) ErrorTrace(errortrace bool) *FindFieldStructure { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *FindFieldStructure) FilterPath(filterpaths ...string) *FindFieldStructure { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *FindFieldStructure) Human(human bool) *FindFieldStructure { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *FindFieldStructure) Pretty(pretty bool) *FindFieldStructure { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findfieldstructure/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findfieldstructure/response.go new file mode 100644 index 000000000..c4ef74ba8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findfieldstructure/response.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package findfieldstructure + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ecscompatibilitytype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/formattype" +) + +// Response holds the response body struct for the package findfieldstructure +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/text_structure/find_field_structure/FindFieldStructureResponse.ts#L31-L49 +type Response struct { + Charset string `json:"charset"` + EcsCompatibility *ecscompatibilitytype.EcsCompatibilityType `json:"ecs_compatibility,omitempty"` + FieldStats map[string]types.FieldStat `json:"field_stats"` + Format formattype.FormatType `json:"format"` + GrokPattern *string `json:"grok_pattern,omitempty"` + IngestPipeline types.PipelineConfig `json:"ingest_pipeline"` + JavaTimestampFormats []string `json:"java_timestamp_formats,omitempty"` + JodaTimestampFormats []string `json:"joda_timestamp_formats,omitempty"` + Mappings types.TypeMapping `json:"mappings"` + MultilineStartPattern *string `json:"multiline_start_pattern,omitempty"` + NeedClientTimezone bool `json:"need_client_timezone"` + NumLinesAnalyzed int `json:"num_lines_analyzed"` + NumMessagesAnalyzed int `json:"num_messages_analyzed"` + SampleStart string `json:"sample_start"` + TimestampField *string `json:"timestamp_field,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + FieldStats: make(map[string]types.FieldStat, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findmessagestructure/find_message_structure.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findmessagestructure/find_message_structure.go new file mode 100644 index 000000000..c8ecc873a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findmessagestructure/find_message_structure.go @@ -0,0 +1,604 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Find the structure of text messages. +// Find the structure of a list of text messages. +// The messages must contain data that is suitable to be ingested into +// Elasticsearch. +// +// This API provides a starting point for ingesting data into Elasticsearch in a +// format that is suitable for subsequent use with other Elastic Stack +// functionality. +// Use this API rather than the find text structure API if your input text has +// already been split up into separate messages by some other process. +// +// The response from the API contains: +// +// * Sample messages. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// If the structure finder produces unexpected results, specify the `explain` +// query parameter and an explanation will appear in the response. +// It helps determine why the returned structure was chosen. +package findmessagestructure + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ecscompatibilitytype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/formattype" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type FindMessageStructure struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewFindMessageStructure type alias for index. +type NewFindMessageStructure func() *FindMessageStructure + +// NewFindMessageStructureFunc returns a new instance of FindMessageStructure with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewFindMessageStructureFunc(tp elastictransport.Interface) NewFindMessageStructure { + return func() *FindMessageStructure { + n := New(tp) + + return n + } +} + +// Find the structure of text messages. +// Find the structure of a list of text messages. +// The messages must contain data that is suitable to be ingested into +// Elasticsearch. +// +// This API provides a starting point for ingesting data into Elasticsearch in a +// format that is suitable for subsequent use with other Elastic Stack +// functionality. +// Use this API rather than the find text structure API if your input text has +// already been split up into separate messages by some other process. +// +// The response from the API contains: +// +// * Sample messages. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// If the structure finder produces unexpected results, specify the `explain` +// query parameter and an explanation will appear in the response. +// It helps determine why the returned structure was chosen. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure +func New(tp elastictransport.Interface) *FindMessageStructure { + r := &FindMessageStructure{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *FindMessageStructure) Raw(raw io.Reader) *FindMessageStructure { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *FindMessageStructure) Request(req *Request) *FindMessageStructure { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *FindMessageStructure) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for FindMessageStructure: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_text_structure") + path.WriteString("/") + path.WriteString("find_message_structure") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r FindMessageStructure) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "text_structure.find_message_structure") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "text_structure.find_message_structure") + if reader := instrument.RecordRequestBody(ctx, "text_structure.find_message_structure", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "text_structure.find_message_structure") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the FindMessageStructure query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a findmessagestructure.Response +func (r FindMessageStructure) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "text_structure.find_message_structure") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the FindMessageStructure headers map. +func (r *FindMessageStructure) Header(key, value string) *FindMessageStructure { + r.headers.Set(key, value) + + return r +} + +// ColumnNames If the format is `delimited`, you can specify the column names in a +// comma-separated list. +// If this parameter is not specified, the structure finder uses the column +// names from the header row of the text. +// If the text does not have a header role, columns are named "column1", +// "column2", "column3", for example. +// API name: column_names +func (r *FindMessageStructure) ColumnNames(columnnames string) *FindMessageStructure { + r.values.Set("column_names", columnnames) + + return r +} + +// Delimiter If you the format is `delimited`, you can specify the character used to +// delimit the values in each row. +// Only a single character is supported; the delimiter cannot have multiple +// characters. +// By default, the API considers the following possibilities: comma, tab, +// semi-colon, and pipe (`|`). +// In this default scenario, all rows must have the same number of fields for +// the delimited format to be detected. +// If you specify a delimiter, up to 10% of the rows can have a different number +// of columns than the first row. +// API name: delimiter +func (r *FindMessageStructure) Delimiter(delimiter string) *FindMessageStructure { + r.values.Set("delimiter", delimiter) + + return r +} + +// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns. +// Use this parameter to specify whether to use ECS Grok patterns instead of +// legacy ones when the structure finder creates a Grok pattern. +// This setting primarily has an impact when a whole message Grok pattern such +// as `%{CATALINALOG}` matches the input. +// If the structure finder identifies a common structure but has no idea of +// meaning then generic field names such as `path`, `ipaddress`, `field1`, and +// `field2` are used in the `grok_pattern` output, with the intention that a +// user who knows the meanings rename these fields before using it. +// API name: ecs_compatibility +func (r *FindMessageStructure) EcsCompatibility(ecscompatibility ecscompatibilitytype.EcsCompatibilityType) *FindMessageStructure { + r.values.Set("ecs_compatibility", ecscompatibility.String()) + + return r +} + +// Explain If this parameter is set to true, the response includes a field named +// `explanation`, which is an array of strings that indicate how the structure +// finder produced its result. +// API name: explain +func (r *FindMessageStructure) Explain(explain bool) *FindMessageStructure { + r.values.Set("explain", strconv.FormatBool(explain)) + + return r +} + +// Format The high level structure of the text. +// By default, the API chooses the format. +// In this default scenario, all rows must have the same number of fields for a +// delimited format to be detected. +// If the format is `delimited` and the delimiter is not set, however, the API +// tolerates up to 5% of rows that have a different number of columns than the +// first row. +// API name: format +func (r *FindMessageStructure) Format(format formattype.FormatType) *FindMessageStructure { + r.values.Set("format", format.String()) + + return r +} + +// GrokPattern If the format is `semi_structured_text`, you can specify a Grok pattern that +// is used to extract fields from every message in the text. +// The name of the timestamp field in the Grok pattern must match what is +// specified in the `timestamp_field` parameter. +// If that parameter is not specified, the name of the timestamp field in the +// Grok pattern must match "timestamp". +// If `grok_pattern` is not specified, the structure finder creates a Grok +// pattern. +// API name: grok_pattern +func (r *FindMessageStructure) GrokPattern(grokpattern string) *FindMessageStructure { + r.values.Set("grok_pattern", grokpattern) + + return r +} + +// Quote If the format is `delimited`, you can specify the character used to quote the +// values in each row if they contain newlines or the delimiter character. +// Only a single character is supported. +// If this parameter is not specified, the default value is a double quote +// (`"`). +// If your delimited text format does not use quoting, a workaround is to set +// this argument to a character that does not appear anywhere in the sample. +// API name: quote +func (r *FindMessageStructure) Quote(quote string) *FindMessageStructure { + r.values.Set("quote", quote) + + return r +} + +// ShouldTrimFields If the format is `delimited`, you can specify whether values between +// delimiters should have whitespace trimmed from them. +// If this parameter is not specified and the delimiter is pipe (`|`), the +// default value is true. +// Otherwise, the default value is `false`. +// API name: should_trim_fields +func (r *FindMessageStructure) ShouldTrimFields(shouldtrimfields bool) *FindMessageStructure { + r.values.Set("should_trim_fields", strconv.FormatBool(shouldtrimfields)) + + return r +} + +// Timeout The maximum amount of time that the structure analysis can take. +// If the analysis is still running when the timeout expires, it will be +// stopped. +// API name: timeout +func (r *FindMessageStructure) Timeout(duration string) *FindMessageStructure { + r.values.Set("timeout", duration) + + return r +} + +// TimestampField The name of the field that contains the primary timestamp of each record in +// the text. +// In particular, if the text was ingested into an index, this is the field that +// would be used to populate the `@timestamp` field. +// +// If the format is `semi_structured_text`, this field must match the name of +// the appropriate extraction in the `grok_pattern`. +// Therefore, for semi-structured text, it is best not to specify this parameter +// unless `grok_pattern` is also specified. +// +// For structured text, if you specify this parameter, the field must exist +// within the text. +// +// If this parameter is not specified, the structure finder makes a decision +// about which field (if any) is the primary timestamp field. +// For structured text, it is not compulsory to have a timestamp in the text. +// API name: timestamp_field +func (r *FindMessageStructure) TimestampField(field string) *FindMessageStructure { + r.values.Set("timestamp_field", field) + + return r +} + +// TimestampFormat The Java time format of the timestamp field in the text. +// Only a subset of Java time format letter groups are supported: +// +// * `a` +// * `d` +// * `dd` +// * `EEE` +// * `EEEE` +// * `H` +// * `HH` +// * `h` +// * `M` +// * `MM` +// * `MMM` +// * `MMMM` +// * `mm` +// * `ss` +// * `XX` +// * `XXX` +// * `yy` +// * `yyyy` +// * `zzz` +// +// Additionally `S` letter groups (fractional seconds) of length one to nine are +// supported providing they occur after `ss` and are separated from the `ss` by +// a period (`.`), comma (`,`), or colon (`:`). +// Spacing and punctuation is also permitted with the exception a question mark +// (`?`), newline, and carriage return, together with literal text enclosed in +// single quotes. +// For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +// +// One valuable use case for this parameter is when the format is +// semi-structured text, there are multiple timestamp formats in the text, and +// you know which format corresponds to the primary timestamp, but you do not +// want to specify the full `grok_pattern`. +// Another is when the timestamp format is one that the structure finder does +// not consider by default. +// +// If this parameter is not specified, the structure finder chooses the best +// format from a built-in set. +// +// If the special value `null` is specified, the structure finder will not look +// for a primary timestamp in the text. +// When the format is semi-structured text, this will result in the structure +// finder treating the text as single-line messages. +// API name: timestamp_format +func (r *FindMessageStructure) TimestampFormat(timestampformat string) *FindMessageStructure { + r.values.Set("timestamp_format", timestampformat) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *FindMessageStructure) ErrorTrace(errortrace bool) *FindMessageStructure { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *FindMessageStructure) FilterPath(filterpaths ...string) *FindMessageStructure { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *FindMessageStructure) Human(human bool) *FindMessageStructure { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *FindMessageStructure) Pretty(pretty bool) *FindMessageStructure { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The list of messages you want to analyze. +// API name: messages +func (r *FindMessageStructure) Messages(messages ...string) *FindMessageStructure { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range messages { + + r.req.Messages = append(r.req.Messages, v) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findmessagestructure/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findmessagestructure/request.go new file mode 100644 index 000000000..b8ffb9e05 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findmessagestructure/request.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package findmessagestructure + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package findmessagestructure +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/text_structure/find_message_structure/FindMessageStructureRequest.ts#L25-L174 +type Request struct { + + // Messages The list of messages you want to analyze. + Messages []string `json:"messages"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Findmessagestructure request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findmessagestructure/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findmessagestructure/response.go new file mode 100644 index 000000000..7e9f9a844 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findmessagestructure/response.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package findmessagestructure + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ecscompatibilitytype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/formattype" +) + +// Response holds the response body struct for the package findmessagestructure +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/text_structure/find_message_structure/FindMessageStructureResponse.ts#L31-L49 +type Response struct { + Charset string `json:"charset"` + EcsCompatibility *ecscompatibilitytype.EcsCompatibilityType `json:"ecs_compatibility,omitempty"` + FieldStats map[string]types.FieldStat `json:"field_stats"` + Format formattype.FormatType `json:"format"` + GrokPattern *string `json:"grok_pattern,omitempty"` + IngestPipeline types.PipelineConfig `json:"ingest_pipeline"` + JavaTimestampFormats []string `json:"java_timestamp_formats,omitempty"` + JodaTimestampFormats []string `json:"joda_timestamp_formats,omitempty"` + Mappings types.TypeMapping `json:"mappings"` + MultilineStartPattern *string `json:"multiline_start_pattern,omitempty"` + NeedClientTimezone bool `json:"need_client_timezone"` + NumLinesAnalyzed int `json:"num_lines_analyzed"` + NumMessagesAnalyzed int `json:"num_messages_analyzed"` + SampleStart string `json:"sample_start"` + TimestampField *string `json:"timestamp_field,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + FieldStats: make(map[string]types.FieldStat, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findstructure/find_structure.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findstructure/find_structure.go new file mode 100644 index 000000000..05a43e523 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findstructure/find_structure.go @@ -0,0 +1,607 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Find the structure of a text file. +// The text file must contain data that is suitable to be ingested into +// Elasticsearch. +// +// This API provides a starting point for ingesting data into Elasticsearch in a +// format that is suitable for subsequent use with other Elastic Stack +// functionality. +// Unlike other Elasticsearch endpoints, the data that is posted to this +// endpoint does not need to be UTF-8 encoded and in JSON format. +// It must, however, be text; binary text formats are not currently supported. +// The size is limited to the Elasticsearch HTTP receive buffer size, which +// defaults to 100 Mb. +// +// The response from the API contains: +// +// * A couple of messages from the beginning of the text. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// * Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +package findstructure + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type FindStructure struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewFindStructure type alias for index. +type NewFindStructure func() *FindStructure + +// NewFindStructureFunc returns a new instance of FindStructure with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewFindStructureFunc(tp elastictransport.Interface) NewFindStructure { + return func() *FindStructure { + n := New(tp) + + return n + } +} + +// Find the structure of a text file. +// The text file must contain data that is suitable to be ingested into +// Elasticsearch. +// +// This API provides a starting point for ingesting data into Elasticsearch in a +// format that is suitable for subsequent use with other Elastic Stack +// functionality. +// Unlike other Elasticsearch endpoints, the data that is posted to this +// endpoint does not need to be UTF-8 encoded and in JSON format. +// It must, however, be text; binary text formats are not currently supported. +// The size is limited to the Elasticsearch HTTP receive buffer size, which +// defaults to 100 Mb. +// +// The response from the API contains: +// +// * A couple of messages from the beginning of the text. +// * Statistics that reveal the most common values for all fields detected +// within the text and basic numeric statistics for numeric fields. +// * Information about the structure of the text, which is useful when you write +// ingest configurations to index it or similarly formatted text. +// * Appropriate mappings for an Elasticsearch index, which you could use to +// ingest the text. +// +// All this information can be calculated by the structure finder with no +// guidance. +// However, you can optionally override some of the decisions about the text +// structure by specifying one or more query parameters. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure +func New(tp elastictransport.Interface) *FindStructure { + r := &FindStructure{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *FindStructure) Raw(raw io.Reader) *FindStructure { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *FindStructure) Request(req *Request) *FindStructure { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *FindStructure) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + for _, elem := range *r.req { + data, err := json.Marshal(elem) + if err != nil { + return nil, err + } + r.buf.Write(data) + r.buf.Write([]byte("\n")) + } + + if err != nil { + return nil, fmt.Errorf("could not serialise request for FindStructure: %w", err) + } + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_text_structure") + path.WriteString("/") + path.WriteString("find_structure") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+x-ndjson;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r FindStructure) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "text_structure.find_structure") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "text_structure.find_structure") + if reader := instrument.RecordRequestBody(ctx, "text_structure.find_structure", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "text_structure.find_structure") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the FindStructure query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a findstructure.Response +func (r FindStructure) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "text_structure.find_structure") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the FindStructure headers map. +func (r *FindStructure) Header(key, value string) *FindStructure { + r.headers.Set(key, value) + + return r +} + +// Charset The text's character set. +// It must be a character set that is supported by the JVM that Elasticsearch +// uses. +// For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. +// If this parameter is not specified, the structure finder chooses an +// appropriate character set. +// API name: charset +func (r *FindStructure) Charset(charset string) *FindStructure { + r.values.Set("charset", charset) + + return r +} + +// ColumnNames If you have set format to `delimited`, you can specify the column names in a +// comma-separated list. +// If this parameter is not specified, the structure finder uses the column +// names from the header row of the text. +// If the text does not have a header role, columns are named "column1", +// "column2", "column3", for example. +// API name: column_names +func (r *FindStructure) ColumnNames(columnnames string) *FindStructure { + r.values.Set("column_names", columnnames) + + return r +} + +// Delimiter If you have set `format` to `delimited`, you can specify the character used +// to delimit the values in each row. +// Only a single character is supported; the delimiter cannot have multiple +// characters. +// By default, the API considers the following possibilities: comma, tab, +// semi-colon, and pipe (`|`). +// In this default scenario, all rows must have the same number of fields for +// the delimited format to be detected. +// If you specify a delimiter, up to 10% of the rows can have a different number +// of columns than the first row. +// API name: delimiter +func (r *FindStructure) Delimiter(delimiter string) *FindStructure { + r.values.Set("delimiter", delimiter) + + return r +} + +// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns. +// Use this parameter to specify whether to use ECS Grok patterns instead of +// legacy ones when the structure finder creates a Grok pattern. +// Valid values are `disabled` and `v1`. +// This setting primarily has an impact when a whole message Grok pattern such +// as `%{CATALINALOG}` matches the input. +// If the structure finder identifies a common structure but has no idea of +// meaning then generic field names such as `path`, `ipaddress`, `field1`, and +// `field2` are used in the `grok_pattern` output, with the intention that a +// user who knows the meanings rename these fields before using it. +// API name: ecs_compatibility +func (r *FindStructure) EcsCompatibility(ecscompatibility string) *FindStructure { + r.values.Set("ecs_compatibility", ecscompatibility) + + return r +} + +// Explain If this parameter is set to `true`, the response includes a field named +// explanation, which is an array of strings that indicate how the structure +// finder produced its result. +// If the structure finder produces unexpected results for some text, use this +// query parameter to help you determine why the returned structure was chosen. +// API name: explain +func (r *FindStructure) Explain(explain bool) *FindStructure { + r.values.Set("explain", strconv.FormatBool(explain)) + + return r +} + +// Format The high level structure of the text. +// Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. +// By default, the API chooses the format. +// In this default scenario, all rows must have the same number of fields for a +// delimited format to be detected. +// If the format is set to `delimited` and the delimiter is not set, however, +// the API tolerates up to 5% of rows that have a different number of columns +// than the first row. +// API name: format +func (r *FindStructure) Format(format string) *FindStructure { + r.values.Set("format", format) + + return r +} + +// GrokPattern If you have set `format` to `semi_structured_text`, you can specify a Grok +// pattern that is used to extract fields from every message in the text. +// The name of the timestamp field in the Grok pattern must match what is +// specified in the `timestamp_field` parameter. +// If that parameter is not specified, the name of the timestamp field in the +// Grok pattern must match "timestamp". +// If `grok_pattern` is not specified, the structure finder creates a Grok +// pattern. +// API name: grok_pattern +func (r *FindStructure) GrokPattern(grokpattern string) *FindStructure { + r.values.Set("grok_pattern", grokpattern) + + return r +} + +// HasHeaderRow If you have set `format` to `delimited`, you can use this parameter to +// indicate whether the column names are in the first row of the text. +// If this parameter is not specified, the structure finder guesses based on the +// similarity of the first row of the text to other rows. +// API name: has_header_row +func (r *FindStructure) HasHeaderRow(hasheaderrow bool) *FindStructure { + r.values.Set("has_header_row", strconv.FormatBool(hasheaderrow)) + + return r +} + +// LineMergeSizeLimit The maximum number of characters in a message when lines are merged to form +// messages while analyzing semi-structured text. +// If you have extremely long messages you may need to increase this, but be +// aware that this may lead to very long processing times if the way to group +// lines into messages is misdetected. +// API name: line_merge_size_limit +func (r *FindStructure) LineMergeSizeLimit(linemergesizelimit string) *FindStructure { + r.values.Set("line_merge_size_limit", linemergesizelimit) + + return r +} + +// LinesToSample The number of lines to include in the structural analysis, starting from the +// beginning of the text. +// The minimum is 2. +// If the value of this parameter is greater than the number of lines in the +// text, the analysis proceeds (as long as there are at least two lines in the +// text) for all of the lines. +// +// NOTE: The number of lines and the variation of the lines affects the speed of +// the analysis. +// For example, if you upload text where the first 1000 lines are all variations +// on the same message, the analysis will find more commonality than would be +// seen with a bigger sample. +// If possible, however, it is more efficient to upload sample text with more +// variety in the first 1000 lines than to request analysis of 100000 lines to +// achieve some variety. +// API name: lines_to_sample +func (r *FindStructure) LinesToSample(linestosample string) *FindStructure { + r.values.Set("lines_to_sample", linestosample) + + return r +} + +// Quote If you have set `format` to `delimited`, you can specify the character used +// to quote the values in each row if they contain newlines or the delimiter +// character. +// Only a single character is supported. +// If this parameter is not specified, the default value is a double quote +// (`"`). +// If your delimited text format does not use quoting, a workaround is to set +// this argument to a character that does not appear anywhere in the sample. +// API name: quote +func (r *FindStructure) Quote(quote string) *FindStructure { + r.values.Set("quote", quote) + + return r +} + +// ShouldTrimFields If you have set `format` to `delimited`, you can specify whether values +// between delimiters should have whitespace trimmed from them. +// If this parameter is not specified and the delimiter is pipe (`|`), the +// default value is `true`. +// Otherwise, the default value is `false`. +// API name: should_trim_fields +func (r *FindStructure) ShouldTrimFields(shouldtrimfields bool) *FindStructure { + r.values.Set("should_trim_fields", strconv.FormatBool(shouldtrimfields)) + + return r +} + +// Timeout The maximum amount of time that the structure analysis can take. +// If the analysis is still running when the timeout expires then it will be +// stopped. +// API name: timeout +func (r *FindStructure) Timeout(duration string) *FindStructure { + r.values.Set("timeout", duration) + + return r +} + +// TimestampField The name of the field that contains the primary timestamp of each record in +// the text. +// In particular, if the text were ingested into an index, this is the field +// that would be used to populate the `@timestamp` field. +// +// If the `format` is `semi_structured_text`, this field must match the name of +// the appropriate extraction in the `grok_pattern`. +// Therefore, for semi-structured text, it is best not to specify this parameter +// unless `grok_pattern` is also specified. +// +// For structured text, if you specify this parameter, the field must exist +// within the text. +// +// If this parameter is not specified, the structure finder makes a decision +// about which field (if any) is the primary timestamp field. +// For structured text, it is not compulsory to have a timestamp in the text. +// API name: timestamp_field +func (r *FindStructure) TimestampField(field string) *FindStructure { + r.values.Set("timestamp_field", field) + + return r +} + +// TimestampFormat The Java time format of the timestamp field in the text. +// +// Only a subset of Java time format letter groups are supported: +// +// * `a` +// * `d` +// * `dd` +// * `EEE` +// * `EEEE` +// * `H` +// * `HH` +// * `h` +// * `M` +// * `MM` +// * `MMM` +// * `MMMM` +// * `mm` +// * `ss` +// * `XX` +// * `XXX` +// * `yy` +// * `yyyy` +// * `zzz` +// +// Additionally `S` letter groups (fractional seconds) of length one to nine are +// supported providing they occur after `ss` and separated from the `ss` by a +// `.`, `,` or `:`. +// Spacing and punctuation is also permitted with the exception of `?`, newline +// and carriage return, together with literal text enclosed in single quotes. +// For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +// +// One valuable use case for this parameter is when the format is +// semi-structured text, there are multiple timestamp formats in the text, and +// you know which format corresponds to the primary timestamp, but you do not +// want to specify the full `grok_pattern`. +// Another is when the timestamp format is one that the structure finder does +// not consider by default. +// +// If this parameter is not specified, the structure finder chooses the best +// format from a built-in set. +// +// If the special value `null` is specified the structure finder will not look +// for a primary timestamp in the text. +// When the format is semi-structured text this will result in the structure +// finder treating the text as single-line messages. +// API name: timestamp_format +func (r *FindStructure) TimestampFormat(timestampformat string) *FindStructure { + r.values.Set("timestamp_format", timestampformat) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findstructure/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findstructure/request.go new file mode 100644 index 000000000..bd66c394b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findstructure/request.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package findstructure + +import ( + "encoding/json" +) + +// Request holds the request body struct for the package findstructure +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/text_structure/find_structure/FindStructureRequest.ts#L24-L208 +type Request = []json.RawMessage + +// NewRequest returns a Request +func NewRequest() *Request { + r := make([]json.RawMessage, 0) + + return &r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findstructure/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findstructure/response.go new file mode 100644 index 000000000..4958fc3f4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findstructure/response.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package findstructure + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package findstructure +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/text_structure/find_structure/FindStructureResponse.ts#L27-L97 +type Response struct { + + // Charset The character encoding used to parse the text. + Charset string `json:"charset"` + // ColumnNames If `format` is `delimited`, the `column_names` field lists the column names + // in the order they appear in the sample. + ColumnNames []string `json:"column_names,omitempty"` + Delimiter *string `json:"delimiter,omitempty"` + ExcludeLinesPattern *string `json:"exclude_lines_pattern,omitempty"` + Explanation []string `json:"explanation,omitempty"` + // FieldStats The most common values of each field, plus basic numeric statistics for the + // numeric `page_count` field. + // This information may provide clues that the data needs to be cleaned or + // transformed prior to use by other Elastic Stack functionality. + FieldStats map[string]types.FieldStat `json:"field_stats"` + // Format Valid values include `ndjson`, `xml`, `delimited`, and + // `semi_structured_text`. + Format string `json:"format"` + GrokPattern *string `json:"grok_pattern,omitempty"` + // HasByteOrderMarker For UTF character encodings, it indicates whether the text begins with a byte + // order marker. + HasByteOrderMarker bool `json:"has_byte_order_marker"` + HasHeaderRow *bool `json:"has_header_row,omitempty"` + IngestPipeline types.PipelineConfig `json:"ingest_pipeline"` + // JavaTimestampFormats The Java time formats recognized in the time fields. + // Elasticsearch mappings and ingest pipelines use this format. + JavaTimestampFormats []string `json:"java_timestamp_formats,omitempty"` + // JodaTimestampFormats Information that is used to tell Logstash how to parse timestamps. + JodaTimestampFormats []string `json:"joda_timestamp_formats,omitempty"` + // Mappings Some suitable mappings for an index into which the data could be ingested. + Mappings types.TypeMapping `json:"mappings"` + MultilineStartPattern *string `json:"multiline_start_pattern,omitempty"` + // NeedClientTimezone If a timestamp format is detected that does not include a timezone, + // `need_client_timezone` is `true`. + // The server that parses the text must therefore be told the correct timezone + // by the client. + NeedClientTimezone bool `json:"need_client_timezone"` + // NumLinesAnalyzed The number of lines of the text that were analyzed. + NumLinesAnalyzed int `json:"num_lines_analyzed"` + // NumMessagesAnalyzed The number of distinct messages the lines contained. + // For NDJSON, this value is the same as `num_lines_analyzed`. + // For other text formats, messages can span several lines. + NumMessagesAnalyzed int `json:"num_messages_analyzed"` + Quote *string `json:"quote,omitempty"` + // SampleStart The first two messages in the text verbatim. + // This may help diagnose parse errors or accidental uploads of the wrong text. + SampleStart string `json:"sample_start"` + ShouldTrimFields *bool `json:"should_trim_fields,omitempty"` + // TimestampField The field considered most likely to be the primary timestamp of each + // document. + TimestampField *string `json:"timestamp_field,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + FieldStats: make(map[string]types.FieldStat, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/testgrokpattern/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/testgrokpattern/request.go new file mode 100644 index 000000000..7a3cf6618 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/testgrokpattern/request.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package testgrokpattern + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Request holds the request body struct for the package testgrokpattern +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/text_structure/test_grok_pattern/TestGrokPatternRequest.ts#L23-L59 +type Request struct { + + // GrokPattern The Grok pattern to run on the text. + GrokPattern string `json:"grok_pattern"` + // Text The lines of text to run the Grok pattern on. + Text []string `json:"text"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Testgrokpattern request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "grok_pattern": + if err := dec.Decode(&s.GrokPattern); err != nil { + return fmt.Errorf("%s | %w", "GrokPattern", err) + } + + case "text": + if err := dec.Decode(&s.Text); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/testgrokpattern/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/testgrokpattern/response.go new file mode 100644 index 000000000..7faeadbb4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/testgrokpattern/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package testgrokpattern + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package testgrokpattern +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/text_structure/test_grok_pattern/TestGrokPatternResponse.ts#L22-L26 +type Response struct { + Matches []types.MatchedText `json:"matches"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/testgrokpattern/test_grok_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/testgrokpattern/test_grok_pattern.go new file mode 100644 index 000000000..811abcd36 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/testgrokpattern/test_grok_pattern.go @@ -0,0 +1,380 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Test a Grok pattern. +// Test a Grok pattern on one or more lines of text. +// The API indicates whether the lines match the pattern together with the +// offsets and lengths of the matched substrings. +package testgrokpattern + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type TestGrokPattern struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewTestGrokPattern type alias for index. +type NewTestGrokPattern func() *TestGrokPattern + +// NewTestGrokPatternFunc returns a new instance of TestGrokPattern with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewTestGrokPatternFunc(tp elastictransport.Interface) NewTestGrokPattern { + return func() *TestGrokPattern { + n := New(tp) + + return n + } +} + +// Test a Grok pattern. +// Test a Grok pattern on one or more lines of text. +// The API indicates whether the lines match the pattern together with the +// offsets and lengths of the matched substrings. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern +func New(tp elastictransport.Interface) *TestGrokPattern { + r := &TestGrokPattern{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *TestGrokPattern) Raw(raw io.Reader) *TestGrokPattern { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *TestGrokPattern) Request(req *Request) *TestGrokPattern { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *TestGrokPattern) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for TestGrokPattern: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_text_structure") + path.WriteString("/") + path.WriteString("test_grok_pattern") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r TestGrokPattern) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "text_structure.test_grok_pattern") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "text_structure.test_grok_pattern") + if reader := instrument.RecordRequestBody(ctx, "text_structure.test_grok_pattern", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "text_structure.test_grok_pattern") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the TestGrokPattern query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a testgrokpattern.Response +func (r TestGrokPattern) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "text_structure.test_grok_pattern") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the TestGrokPattern headers map. +func (r *TestGrokPattern) Header(key, value string) *TestGrokPattern { + r.headers.Set(key, value) + + return r +} + +// EcsCompatibility The mode of compatibility with ECS compliant Grok patterns. +// Use this parameter to specify whether to use ECS Grok patterns instead of +// legacy ones when the structure finder creates a Grok pattern. +// Valid values are `disabled` and `v1`. +// API name: ecs_compatibility +func (r *TestGrokPattern) EcsCompatibility(ecscompatibility string) *TestGrokPattern { + r.values.Set("ecs_compatibility", ecscompatibility) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *TestGrokPattern) ErrorTrace(errortrace bool) *TestGrokPattern { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *TestGrokPattern) FilterPath(filterpaths ...string) *TestGrokPattern { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *TestGrokPattern) Human(human bool) *TestGrokPattern { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *TestGrokPattern) Pretty(pretty bool) *TestGrokPattern { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The Grok pattern to run on the text. +// API name: grok_pattern +func (r *TestGrokPattern) GrokPattern(grokpattern string) *TestGrokPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.GrokPattern = grokpattern + + return r +} + +// The lines of text to run the Grok pattern on. +// API name: text +func (r *TestGrokPattern) Text(texts ...string) *TestGrokPattern { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + for _, v := range texts { + + r.req.Text = append(r.req.Text, v) + + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/deletetransform/delete_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/deletetransform/delete_transform.go new file mode 100644 index 000000000..ee88184db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/deletetransform/delete_transform.go @@ -0,0 +1,371 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a transform. +package deletetransform + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + transformidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteTransform struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + transformid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteTransform type alias for index. +type NewDeleteTransform func(transformid string) *DeleteTransform + +// NewDeleteTransformFunc returns a new instance of DeleteTransform with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteTransformFunc(tp elastictransport.Interface) NewDeleteTransform { + return func(transformid string) *DeleteTransform { + n := New(tp) + + n._transformid(transformid) + + return n + } +} + +// Delete a transform. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform +func New(tp elastictransport.Interface) *DeleteTransform { + r := &DeleteTransform{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteTransform) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == transformidMask: + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "transformid", r.transformid) + } + path.WriteString(r.transformid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteTransform) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "transform.delete_transform") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "transform.delete_transform") + if reader := instrument.RecordRequestBody(ctx, "transform.delete_transform", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.delete_transform") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteTransform query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletetransform.Response +func (r DeleteTransform) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.delete_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteTransform) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.delete_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteTransform query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteTransform headers map. +func (r *DeleteTransform) Header(key, value string) *DeleteTransform { + r.headers.Set(key, value) + + return r +} + +// TransformId Identifier for the transform. +// API Name: transformid +func (r *DeleteTransform) _transformid(transformid string) *DeleteTransform { + r.paramSet |= transformidMask + r.transformid = transformid + + return r +} + +// Force If this value is false, the transform must be stopped before it can be +// deleted. If true, the transform is +// deleted regardless of its current state. +// API name: force +func (r *DeleteTransform) Force(force bool) *DeleteTransform { + r.values.Set("force", strconv.FormatBool(force)) + + return r +} + +// DeleteDestIndex If this value is true, the destination index is deleted together with the +// transform. If false, the destination +// index will not be deleted +// API name: delete_dest_index +func (r *DeleteTransform) DeleteDestIndex(deletedestindex bool) *DeleteTransform { + r.values.Set("delete_dest_index", strconv.FormatBool(deletedestindex)) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *DeleteTransform) Timeout(duration string) *DeleteTransform { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteTransform) ErrorTrace(errortrace bool) *DeleteTransform { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteTransform) FilterPath(filterpaths ...string) *DeleteTransform { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteTransform) Human(human bool) *DeleteTransform { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteTransform) Pretty(pretty bool) *DeleteTransform { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/deletetransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/deletetransform/response.go new file mode 100644 index 000000000..9c47b55be --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/deletetransform/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletetransform + +// Response holds the response body struct for the package deletetransform +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/delete_transform/DeleteTransformResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/getnodestats/get_node_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/getnodestats/get_node_stats.go new file mode 100644 index 000000000..a17c3fe8e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/getnodestats/get_node_stats.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Retrieves transform usage information for transform nodes. +package getnodestats + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetNodeStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetNodeStats type alias for index. +type NewGetNodeStats func() *GetNodeStats + +// NewGetNodeStatsFunc returns a new instance of GetNodeStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetNodeStatsFunc(tp elastictransport.Interface) NewGetNodeStats { + return func() *GetNodeStats { + n := New(tp) + + return n + } +} + +// Retrieves transform usage information for transform nodes. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-node-stats.html +func New(tp elastictransport.Interface) *GetNodeStats { + r := &GetNodeStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetNodeStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + path.WriteString("_node_stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetNodeStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "transform.get_node_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "transform.get_node_stats") + if reader := instrument.RecordRequestBody(ctx, "transform.get_node_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.get_node_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetNodeStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getnodestats.Response +func (r GetNodeStats) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetNodeStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.get_node_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetNodeStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetNodeStats headers map. +func (r *GetNodeStats) Header(key, value string) *GetNodeStats { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransform/get_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransform/get_transform.go new file mode 100644 index 000000000..cee117a64 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransform/get_transform.go @@ -0,0 +1,391 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get transforms. +// Get configuration information for transforms. +package gettransform + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + transformidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetTransform struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + transformid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetTransform type alias for index. +type NewGetTransform func() *GetTransform + +// NewGetTransformFunc returns a new instance of GetTransform with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetTransformFunc(tp elastictransport.Interface) NewGetTransform { + return func() *GetTransform { + n := New(tp) + + return n + } +} + +// Get transforms. +// Get configuration information for transforms. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform +func New(tp elastictransport.Interface) *GetTransform { + r := &GetTransform{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetTransform) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == transformidMask: + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "transformid", r.transformid) + } + path.WriteString(r.transformid) + + method = http.MethodGet + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_transform") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetTransform) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "transform.get_transform") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "transform.get_transform") + if reader := instrument.RecordRequestBody(ctx, "transform.get_transform", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.get_transform") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetTransform query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a gettransform.Response +func (r GetTransform) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.get_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetTransform) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.get_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetTransform query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetTransform headers map. +func (r *GetTransform) Header(key, value string) *GetTransform { + r.headers.Set(key, value) + + return r +} + +// TransformId Identifier for the transform. It can be a transform identifier or a +// wildcard expression. You can get information for all transforms by using +// `_all`, by specifying `*` as the ``, or by omitting the +// ``. +// API Name: transformid +func (r *GetTransform) TransformId(transformid string) *GetTransform { + r.paramSet |= transformidMask + r.transformid = transformid + + return r +} + +// AllowNoMatch Specifies what to do when the request: +// +// 1. Contains wildcard expressions and there are no transforms that match. +// 2. Contains the _all string or no identifiers and there are no matches. +// 3. Contains wildcard expressions and there are only partial matches. +// +// If this parameter is false, the request returns a 404 status code when +// there are no matches or only partial matches. +// API name: allow_no_match +func (r *GetTransform) AllowNoMatch(allownomatch bool) *GetTransform { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// From Skips the specified number of transforms. +// API name: from +func (r *GetTransform) From(from int) *GetTransform { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size Specifies the maximum number of transforms to obtain. +// API name: size +func (r *GetTransform) Size(size int) *GetTransform { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// ExcludeGenerated Excludes fields that were automatically added when creating the +// transform. This allows the configuration to be in an acceptable format to +// be retrieved and then added to another cluster. +// API name: exclude_generated +func (r *GetTransform) ExcludeGenerated(excludegenerated bool) *GetTransform { + r.values.Set("exclude_generated", strconv.FormatBool(excludegenerated)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetTransform) ErrorTrace(errortrace bool) *GetTransform { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetTransform) FilterPath(filterpaths ...string) *GetTransform { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetTransform) Human(human bool) *GetTransform { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetTransform) Pretty(pretty bool) *GetTransform { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransform/response.go new file mode 100644 index 000000000..422c9a9be --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransform/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package gettransform + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package gettransform +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/get_transform/GetTransformResponse.ts#L23-L25 +type Response struct { + Count int64 `json:"count"` + Transforms []types.TransformSummary `json:"transforms"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransformstats/get_transform_stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransformstats/get_transform_stats.go new file mode 100644 index 000000000..05591aca4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransformstats/get_transform_stats.go @@ -0,0 +1,390 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get transform stats. +// +// Get usage information for transforms. +package gettransformstats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + transformidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetTransformStats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + transformid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetTransformStats type alias for index. +type NewGetTransformStats func(transformid string) *GetTransformStats + +// NewGetTransformStatsFunc returns a new instance of GetTransformStats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetTransformStatsFunc(tp elastictransport.Interface) NewGetTransformStats { + return func(transformid string) *GetTransformStats { + n := New(tp) + + n._transformid(transformid) + + return n + } +} + +// Get transform stats. +// +// Get usage information for transforms. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats +func New(tp elastictransport.Interface) *GetTransformStats { + r := &GetTransformStats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetTransformStats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == transformidMask: + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "transformid", r.transformid) + } + path.WriteString(r.transformid) + path.WriteString("/") + path.WriteString("_stats") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetTransformStats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "transform.get_transform_stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "transform.get_transform_stats") + if reader := instrument.RecordRequestBody(ctx, "transform.get_transform_stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.get_transform_stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetTransformStats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a gettransformstats.Response +func (r GetTransformStats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.get_transform_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetTransformStats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.get_transform_stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetTransformStats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetTransformStats headers map. +func (r *GetTransformStats) Header(key, value string) *GetTransformStats { + r.headers.Set(key, value) + + return r +} + +// TransformId Identifier for the transform. It can be a transform identifier or a +// wildcard expression. You can get information for all transforms by using +// `_all`, by specifying `*` as the ``, or by omitting the +// ``. +// API Name: transformid +func (r *GetTransformStats) _transformid(transformid string) *GetTransformStats { + r.paramSet |= transformidMask + r.transformid = transformid + + return r +} + +// AllowNoMatch Specifies what to do when the request: +// +// 1. Contains wildcard expressions and there are no transforms that match. +// 2. Contains the _all string or no identifiers and there are no matches. +// 3. Contains wildcard expressions and there are only partial matches. +// +// If this parameter is false, the request returns a 404 status code when +// there are no matches or only partial matches. +// API name: allow_no_match +func (r *GetTransformStats) AllowNoMatch(allownomatch bool) *GetTransformStats { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// From Skips the specified number of transforms. +// API name: from +func (r *GetTransformStats) From(from string) *GetTransformStats { + r.values.Set("from", from) + + return r +} + +// Size Specifies the maximum number of transforms to obtain. +// API name: size +func (r *GetTransformStats) Size(size string) *GetTransformStats { + r.values.Set("size", size) + + return r +} + +// Timeout Controls the time to wait for the stats +// API name: timeout +func (r *GetTransformStats) Timeout(duration string) *GetTransformStats { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetTransformStats) ErrorTrace(errortrace bool) *GetTransformStats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetTransformStats) FilterPath(filterpaths ...string) *GetTransformStats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetTransformStats) Human(human bool) *GetTransformStats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetTransformStats) Pretty(pretty bool) *GetTransformStats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransformstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransformstats/response.go new file mode 100644 index 000000000..d90626ee5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransformstats/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package gettransformstats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package gettransformstats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/get_transform_stats/GetTransformStatsResponse.ts#L23-L25 +type Response struct { + Count int64 `json:"count"` + Transforms []types.TransformStats `json:"transforms"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/previewtransform/preview_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/previewtransform/preview_transform.go new file mode 100644 index 000000000..2988120be --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/previewtransform/preview_transform.go @@ -0,0 +1,514 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Preview a transform. +// Generates a preview of the results that you will get when you create a +// transform with the same configuration. +// +// It returns a maximum of 100 results. The calculations are based on all the +// current data in the source index. It also +// generates a list of mappings and settings for the destination index. These +// values are determined based on the field +// types of the source index and the transform aggregations. +package previewtransform + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + transformidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PreviewTransform struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + transformid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPreviewTransform type alias for index. +type NewPreviewTransform func() *PreviewTransform + +// NewPreviewTransformFunc returns a new instance of PreviewTransform with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPreviewTransformFunc(tp elastictransport.Interface) NewPreviewTransform { + return func() *PreviewTransform { + n := New(tp) + + return n + } +} + +// Preview a transform. +// Generates a preview of the results that you will get when you create a +// transform with the same configuration. +// +// It returns a maximum of 100 results. The calculations are based on all the +// current data in the source index. It also +// generates a list of mappings and settings for the destination index. These +// values are determined based on the field +// types of the source index and the transform aggregations. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform +func New(tp elastictransport.Interface) *PreviewTransform { + r := &PreviewTransform{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PreviewTransform) Raw(raw io.Reader) *PreviewTransform { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PreviewTransform) Request(req *Request) *PreviewTransform { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PreviewTransform) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PreviewTransform: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == transformidMask: + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "transformid", r.transformid) + } + path.WriteString(r.transformid) + path.WriteString("/") + path.WriteString("_preview") + + method = http.MethodPost + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + path.WriteString("_preview") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PreviewTransform) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "transform.preview_transform") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "transform.preview_transform") + if reader := instrument.RecordRequestBody(ctx, "transform.preview_transform", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.preview_transform") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PreviewTransform query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a previewtransform.Response +func (r PreviewTransform) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.preview_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PreviewTransform headers map. +func (r *PreviewTransform) Header(key, value string) *PreviewTransform { + r.headers.Set(key, value) + + return r +} + +// TransformId Identifier for the transform to preview. If you specify this path parameter, +// you cannot provide transform +// configuration details in the request body. +// API Name: transformid +func (r *PreviewTransform) TransformId(transformid string) *PreviewTransform { + r.paramSet |= transformidMask + r.transformid = transformid + + return r +} + +// Timeout Period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. +// API name: timeout +func (r *PreviewTransform) Timeout(duration string) *PreviewTransform { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PreviewTransform) ErrorTrace(errortrace bool) *PreviewTransform { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PreviewTransform) FilterPath(filterpaths ...string) *PreviewTransform { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PreviewTransform) Human(human bool) *PreviewTransform { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PreviewTransform) Pretty(pretty bool) *PreviewTransform { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Free text description of the transform. +// API name: description +func (r *PreviewTransform) Description(description string) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// The destination for the transform. +// API name: dest +func (r *PreviewTransform) Dest(dest types.TransformDestinationVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Dest = dest.TransformDestinationCaster() + + return r +} + +// The interval between checks for changes in the source indices when the +// transform is running continuously. Also determines the retry interval in +// the event of transient failures while the transform is searching or +// indexing. The minimum value is 1s and the maximum is 1h. +// API name: frequency +func (r *PreviewTransform) Frequency(duration types.DurationVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() + + return r +} + +// The latest method transforms the data by finding the latest document for +// each unique key. +// API name: latest +func (r *PreviewTransform) Latest(latest types.LatestVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Latest = latest.LatestCaster() + + return r +} + +// The pivot method transforms the data by aggregating and grouping it. +// These objects define the group by fields and the aggregation to reduce +// the data. +// API name: pivot +func (r *PreviewTransform) Pivot(pivot types.PivotVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Pivot = pivot.PivotCaster() + + return r +} + +// Defines a retention policy for the transform. Data that meets the defined +// criteria is deleted from the destination index. +// API name: retention_policy +func (r *PreviewTransform) RetentionPolicy(retentionpolicy types.RetentionPolicyContainerVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RetentionPolicy = retentionpolicy.RetentionPolicyContainerCaster() + + return r +} + +// Defines optional transform settings. +// API name: settings +func (r *PreviewTransform) Settings(settings types.SettingsVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Settings = settings.SettingsCaster() + + return r +} + +// The source of the data for the transform. +// API name: source +func (r *PreviewTransform) Source(source types.TransformSourceVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source = source.TransformSourceCaster() + + return r +} + +// Defines the properties transforms require to run continuously. +// API name: sync +func (r *PreviewTransform) Sync(sync types.SyncContainerVariant) *PreviewTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Sync = sync.SyncContainerCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/previewtransform/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/previewtransform/request.go new file mode 100644 index 000000000..e599c32df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/previewtransform/request.go @@ -0,0 +1,154 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package previewtransform + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package previewtransform +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/preview_transform/PreviewTransformRequest.ts#L33-L119 +type Request struct { + + // Description Free text description of the transform. + Description *string `json:"description,omitempty"` + // Dest The destination for the transform. + Dest *types.TransformDestination `json:"dest,omitempty"` + // Frequency The interval between checks for changes in the source indices when the + // transform is running continuously. Also determines the retry interval in + // the event of transient failures while the transform is searching or + // indexing. The minimum value is 1s and the maximum is 1h. + Frequency types.Duration `json:"frequency,omitempty"` + // Latest The latest method transforms the data by finding the latest document for + // each unique key. + Latest *types.Latest `json:"latest,omitempty"` + // Pivot The pivot method transforms the data by aggregating and grouping it. + // These objects define the group by fields and the aggregation to reduce + // the data. + Pivot *types.Pivot `json:"pivot,omitempty"` + // RetentionPolicy Defines a retention policy for the transform. Data that meets the defined + // criteria is deleted from the destination index. + RetentionPolicy *types.RetentionPolicyContainer `json:"retention_policy,omitempty"` + // Settings Defines optional transform settings. + Settings *types.Settings `json:"settings,omitempty"` + // Source The source of the data for the transform. + Source *types.TransformSource `json:"source,omitempty"` + // Sync Defines the properties transforms require to run continuously. + Sync *types.SyncContainer `json:"sync,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Previewtransform request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "dest": + if err := dec.Decode(&s.Dest); err != nil { + return fmt.Errorf("%s | %w", "Dest", err) + } + + case "frequency": + if err := dec.Decode(&s.Frequency); err != nil { + return fmt.Errorf("%s | %w", "Frequency", err) + } + + case "latest": + if err := dec.Decode(&s.Latest); err != nil { + return fmt.Errorf("%s | %w", "Latest", err) + } + + case "pivot": + if err := dec.Decode(&s.Pivot); err != nil { + return fmt.Errorf("%s | %w", "Pivot", err) + } + + case "retention_policy": + if err := dec.Decode(&s.RetentionPolicy); err != nil { + return fmt.Errorf("%s | %w", "RetentionPolicy", err) + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "source": + if err := dec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + + case "sync": + if err := dec.Decode(&s.Sync); err != nil { + return fmt.Errorf("%s | %w", "Sync", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/previewtransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/previewtransform/response.go new file mode 100644 index 000000000..5d1a3b970 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/previewtransform/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package previewtransform + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package previewtransform +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/preview_transform/PreviewTransformResponse.ts#L22-L27 +type Response struct { + GeneratedDestIndex types.IndexState `json:"generated_dest_index"` + Preview []json.RawMessage `json:"preview"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/puttransform/put_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/puttransform/put_transform.go new file mode 100644 index 000000000..9bc3ce7b3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/puttransform/put_transform.go @@ -0,0 +1,591 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create a transform. +// Creates a transform. +// +// A transform copies data from source indices, transforms it, and persists it +// into an entity-centric destination index. You can also think of the +// destination index as a two-dimensional tabular data structure (known as +// a data frame). The ID for each document in the data frame is generated from a +// hash of the entity, so there is a +// unique row per entity. +// +// You must choose either the latest or pivot method for your transform; you +// cannot use both in a single transform. If +// you choose to use the pivot method for your transform, the entities are +// defined by the set of `group_by` fields in +// the pivot object. If you choose to use the latest method, the entities are +// defined by the `unique_key` field values +// in the latest object. +// +// You must have `create_index`, `index`, and `read` privileges on the +// destination index and `read` and +// `view_index_metadata` privileges on the source indices. When Elasticsearch +// security features are enabled, the +// transform remembers which roles the user that created it had at the time of +// creation and uses those same roles. If +// those roles do not have the required privileges on the source and destination +// indices, the transform fails when it +// attempts unauthorized operations. +// +// NOTE: You must use Kibana or this API to create a transform. Do not add a +// transform directly into any +// `.transform-internal*` indices using the Elasticsearch index API. If +// Elasticsearch security features are enabled, do +// not give users any privileges on `.transform-internal*` indices. If you used +// transforms prior to 7.5, also do not +// give users any privileges on `.data-frame-internal*` indices. +package puttransform + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + transformidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutTransform struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + transformid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutTransform type alias for index. +type NewPutTransform func(transformid string) *PutTransform + +// NewPutTransformFunc returns a new instance of PutTransform with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutTransformFunc(tp elastictransport.Interface) NewPutTransform { + return func(transformid string) *PutTransform { + n := New(tp) + + n._transformid(transformid) + + return n + } +} + +// Create a transform. +// Creates a transform. +// +// A transform copies data from source indices, transforms it, and persists it +// into an entity-centric destination index. You can also think of the +// destination index as a two-dimensional tabular data structure (known as +// a data frame). The ID for each document in the data frame is generated from a +// hash of the entity, so there is a +// unique row per entity. +// +// You must choose either the latest or pivot method for your transform; you +// cannot use both in a single transform. If +// you choose to use the pivot method for your transform, the entities are +// defined by the set of `group_by` fields in +// the pivot object. If you choose to use the latest method, the entities are +// defined by the `unique_key` field values +// in the latest object. +// +// You must have `create_index`, `index`, and `read` privileges on the +// destination index and `read` and +// `view_index_metadata` privileges on the source indices. When Elasticsearch +// security features are enabled, the +// transform remembers which roles the user that created it had at the time of +// creation and uses those same roles. If +// those roles do not have the required privileges on the source and destination +// indices, the transform fails when it +// attempts unauthorized operations. +// +// NOTE: You must use Kibana or this API to create a transform. Do not add a +// transform directly into any +// `.transform-internal*` indices using the Elasticsearch index API. If +// Elasticsearch security features are enabled, do +// not give users any privileges on `.transform-internal*` indices. If you used +// transforms prior to 7.5, also do not +// give users any privileges on `.data-frame-internal*` indices. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform +func New(tp elastictransport.Interface) *PutTransform { + r := &PutTransform{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutTransform) Raw(raw io.Reader) *PutTransform { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutTransform) Request(req *Request) *PutTransform { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutTransform) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutTransform: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == transformidMask: + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "transformid", r.transformid) + } + path.WriteString(r.transformid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutTransform) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "transform.put_transform") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "transform.put_transform") + if reader := instrument.RecordRequestBody(ctx, "transform.put_transform", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.put_transform") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutTransform query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a puttransform.Response +func (r PutTransform) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.put_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutTransform headers map. +func (r *PutTransform) Header(key, value string) *PutTransform { + r.headers.Set(key, value) + + return r +} + +// TransformId Identifier for the transform. This identifier can contain lowercase +// alphanumeric characters (a-z and 0-9), +// hyphens, and underscores. It has a 64 character limit and must start and end +// with alphanumeric characters. +// API Name: transformid +func (r *PutTransform) _transformid(transformid string) *PutTransform { + r.paramSet |= transformidMask + r.transformid = transformid + + return r +} + +// DeferValidation When the transform is created, a series of validations occur to ensure its +// success. For example, there is a +// check for the existence of the source indices and a check that the +// destination index is not part of the source +// index pattern. You can use this parameter to skip the checks, for example +// when the source index does not exist +// until after the transform is created. The validations are always run when you +// start the transform, however, with +// the exception of privilege checks. +// API name: defer_validation +func (r *PutTransform) DeferValidation(defervalidation bool) *PutTransform { + r.values.Set("defer_validation", strconv.FormatBool(defervalidation)) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *PutTransform) Timeout(duration string) *PutTransform { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutTransform) ErrorTrace(errortrace bool) *PutTransform { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutTransform) FilterPath(filterpaths ...string) *PutTransform { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutTransform) Human(human bool) *PutTransform { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutTransform) Pretty(pretty bool) *PutTransform { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Free text description of the transform. +// API name: description +func (r *PutTransform) Description(description string) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// The destination for the transform. +// API name: dest +func (r *PutTransform) Dest(dest types.TransformDestinationVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Dest = *dest.TransformDestinationCaster() + + return r +} + +// The interval between checks for changes in the source indices when the +// transform is running continuously. Also +// determines the retry interval in the event of transient failures while the +// transform is searching or indexing. +// The minimum value is `1s` and the maximum is `1h`. +// API name: frequency +func (r *PutTransform) Frequency(duration types.DurationVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() + + return r +} + +// The latest method transforms the data by finding the latest document for each +// unique key. +// API name: latest +func (r *PutTransform) Latest(latest types.LatestVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Latest = latest.LatestCaster() + + return r +} + +// Defines optional transform metadata. +// API name: _meta +func (r *PutTransform) Meta_(metadata types.MetadataVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() + + return r +} + +// The pivot method transforms the data by aggregating and grouping it. These +// objects define the group by fields +// and the aggregation to reduce the data. +// API name: pivot +func (r *PutTransform) Pivot(pivot types.PivotVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Pivot = pivot.PivotCaster() + + return r +} + +// Defines a retention policy for the transform. Data that meets the defined +// criteria is deleted from the +// destination index. +// API name: retention_policy +func (r *PutTransform) RetentionPolicy(retentionpolicy types.RetentionPolicyContainerVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RetentionPolicy = retentionpolicy.RetentionPolicyContainerCaster() + + return r +} + +// Defines optional transform settings. +// API name: settings +func (r *PutTransform) Settings(settings types.SettingsVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Settings = settings.SettingsCaster() + + return r +} + +// The source of the data for the transform. +// API name: source +func (r *PutTransform) Source(source types.TransformSourceVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source = *source.TransformSourceCaster() + + return r +} + +// Defines the properties transforms require to run continuously. +// API name: sync +func (r *PutTransform) Sync(sync types.SyncContainerVariant) *PutTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Sync = sync.SyncContainerCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/puttransform/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/puttransform/request.go new file mode 100644 index 000000000..f21878d60 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/puttransform/request.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package puttransform + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package puttransform +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/put_transform/PutTransformRequest.ts#L33-L130 +type Request struct { + + // Description Free text description of the transform. + Description *string `json:"description,omitempty"` + // Dest The destination for the transform. + Dest types.TransformDestination `json:"dest"` + // Frequency The interval between checks for changes in the source indices when the + // transform is running continuously. Also + // determines the retry interval in the event of transient failures while the + // transform is searching or indexing. + // The minimum value is `1s` and the maximum is `1h`. + Frequency types.Duration `json:"frequency,omitempty"` + // Latest The latest method transforms the data by finding the latest document for each + // unique key. + Latest *types.Latest `json:"latest,omitempty"` + // Meta_ Defines optional transform metadata. + Meta_ types.Metadata `json:"_meta,omitempty"` + // Pivot The pivot method transforms the data by aggregating and grouping it. These + // objects define the group by fields + // and the aggregation to reduce the data. + Pivot *types.Pivot `json:"pivot,omitempty"` + // RetentionPolicy Defines a retention policy for the transform. Data that meets the defined + // criteria is deleted from the + // destination index. + RetentionPolicy *types.RetentionPolicyContainer `json:"retention_policy,omitempty"` + // Settings Defines optional transform settings. + Settings *types.Settings `json:"settings,omitempty"` + // Source The source of the data for the transform. + Source types.TransformSource `json:"source"` + // Sync Defines the properties transforms require to run continuously. + Sync *types.SyncContainer `json:"sync,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Puttransform request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "dest": + if err := dec.Decode(&s.Dest); err != nil { + return fmt.Errorf("%s | %w", "Dest", err) + } + + case "frequency": + if err := dec.Decode(&s.Frequency); err != nil { + return fmt.Errorf("%s | %w", "Frequency", err) + } + + case "latest": + if err := dec.Decode(&s.Latest); err != nil { + return fmt.Errorf("%s | %w", "Latest", err) + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "pivot": + if err := dec.Decode(&s.Pivot); err != nil { + return fmt.Errorf("%s | %w", "Pivot", err) + } + + case "retention_policy": + if err := dec.Decode(&s.RetentionPolicy); err != nil { + return fmt.Errorf("%s | %w", "RetentionPolicy", err) + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "source": + if err := dec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + + case "sync": + if err := dec.Decode(&s.Sync); err != nil { + return fmt.Errorf("%s | %w", "Sync", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/puttransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/puttransform/response.go new file mode 100644 index 000000000..d41a468d2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/puttransform/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package puttransform + +// Response holds the response body struct for the package puttransform +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/put_transform/PutTransformResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/resettransform/reset_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/resettransform/reset_transform.go new file mode 100644 index 000000000..530276320 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/resettransform/reset_transform.go @@ -0,0 +1,374 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Reset a transform. +// +// Before you can reset it, you must stop it; alternatively, use the `force` +// query parameter. +// If the destination index was created by the transform, it is deleted. +package resettransform + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + transformidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ResetTransform struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + transformid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewResetTransform type alias for index. +type NewResetTransform func(transformid string) *ResetTransform + +// NewResetTransformFunc returns a new instance of ResetTransform with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewResetTransformFunc(tp elastictransport.Interface) NewResetTransform { + return func(transformid string) *ResetTransform { + n := New(tp) + + n._transformid(transformid) + + return n + } +} + +// Reset a transform. +// +// Before you can reset it, you must stop it; alternatively, use the `force` +// query parameter. +// If the destination index was created by the transform, it is deleted. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform +func New(tp elastictransport.Interface) *ResetTransform { + r := &ResetTransform{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ResetTransform) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == transformidMask: + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "transformid", r.transformid) + } + path.WriteString(r.transformid) + path.WriteString("/") + path.WriteString("_reset") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ResetTransform) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "transform.reset_transform") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "transform.reset_transform") + if reader := instrument.RecordRequestBody(ctx, "transform.reset_transform", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.reset_transform") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ResetTransform query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a resettransform.Response +func (r ResetTransform) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.reset_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ResetTransform) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.reset_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ResetTransform query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ResetTransform headers map. +func (r *ResetTransform) Header(key, value string) *ResetTransform { + r.headers.Set(key, value) + + return r +} + +// TransformId Identifier for the transform. This identifier can contain lowercase +// alphanumeric characters (a-z and 0-9), +// hyphens, and underscores. It has a 64 character limit and must start and end +// with alphanumeric characters. +// API Name: transformid +func (r *ResetTransform) _transformid(transformid string) *ResetTransform { + r.paramSet |= transformidMask + r.transformid = transformid + + return r +} + +// Force If this value is `true`, the transform is reset regardless of its current +// state. If it's `false`, the transform +// must be stopped before it can be reset. +// API name: force +func (r *ResetTransform) Force(force bool) *ResetTransform { + r.values.Set("force", strconv.FormatBool(force)) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *ResetTransform) Timeout(duration string) *ResetTransform { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ResetTransform) ErrorTrace(errortrace bool) *ResetTransform { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ResetTransform) FilterPath(filterpaths ...string) *ResetTransform { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ResetTransform) Human(human bool) *ResetTransform { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ResetTransform) Pretty(pretty bool) *ResetTransform { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/resettransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/resettransform/response.go new file mode 100644 index 000000000..54ff1c0bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/resettransform/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package resettransform + +// Response holds the response body struct for the package resettransform +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/reset_transform/ResetTransformResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/schedulenowtransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/schedulenowtransform/response.go new file mode 100644 index 000000000..efb5be55d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/schedulenowtransform/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package schedulenowtransform + +// Response holds the response body struct for the package schedulenowtransform +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/schedule_now_transform/ScheduleNowTransformResponse.ts#L21-L24 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/schedulenowtransform/schedule_now_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/schedulenowtransform/schedule_now_transform.go new file mode 100644 index 000000000..8cac877ce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/schedulenowtransform/schedule_now_transform.go @@ -0,0 +1,372 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Schedule a transform to start now. +// +// Instantly run a transform to process data. +// If you run this API, the transform will process the new data instantly, +// without waiting for the configured frequency interval. After the API is +// called, +// the transform will be processed again at `now + frequency` unless the API +// is called again in the meantime. +package schedulenowtransform + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + transformidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ScheduleNowTransform struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + transformid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewScheduleNowTransform type alias for index. +type NewScheduleNowTransform func(transformid string) *ScheduleNowTransform + +// NewScheduleNowTransformFunc returns a new instance of ScheduleNowTransform with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewScheduleNowTransformFunc(tp elastictransport.Interface) NewScheduleNowTransform { + return func(transformid string) *ScheduleNowTransform { + n := New(tp) + + n._transformid(transformid) + + return n + } +} + +// Schedule a transform to start now. +// +// Instantly run a transform to process data. +// If you run this API, the transform will process the new data instantly, +// without waiting for the configured frequency interval. After the API is +// called, +// the transform will be processed again at `now + frequency` unless the API +// is called again in the meantime. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform +func New(tp elastictransport.Interface) *ScheduleNowTransform { + r := &ScheduleNowTransform{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ScheduleNowTransform) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == transformidMask: + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "transformid", r.transformid) + } + path.WriteString(r.transformid) + path.WriteString("/") + path.WriteString("_schedule_now") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ScheduleNowTransform) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "transform.schedule_now_transform") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "transform.schedule_now_transform") + if reader := instrument.RecordRequestBody(ctx, "transform.schedule_now_transform", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.schedule_now_transform") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ScheduleNowTransform query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a schedulenowtransform.Response +func (r ScheduleNowTransform) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.schedule_now_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ScheduleNowTransform) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.schedule_now_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ScheduleNowTransform query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ScheduleNowTransform headers map. +func (r *ScheduleNowTransform) Header(key, value string) *ScheduleNowTransform { + r.headers.Set(key, value) + + return r +} + +// TransformId Identifier for the transform. +// API Name: transformid +func (r *ScheduleNowTransform) _transformid(transformid string) *ScheduleNowTransform { + r.paramSet |= transformidMask + r.transformid = transformid + + return r +} + +// Timeout Controls the time to wait for the scheduling to take place +// API name: timeout +func (r *ScheduleNowTransform) Timeout(duration string) *ScheduleNowTransform { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ScheduleNowTransform) ErrorTrace(errortrace bool) *ScheduleNowTransform { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ScheduleNowTransform) FilterPath(filterpaths ...string) *ScheduleNowTransform { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ScheduleNowTransform) Human(human bool) *ScheduleNowTransform { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ScheduleNowTransform) Pretty(pretty bool) *ScheduleNowTransform { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/starttransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/starttransform/response.go new file mode 100644 index 000000000..363546f08 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/starttransform/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package starttransform + +// Response holds the response body struct for the package starttransform +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/start_transform/StartTransformResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/starttransform/start_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/starttransform/start_transform.go new file mode 100644 index 000000000..8a7e7951f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/starttransform/start_transform.go @@ -0,0 +1,417 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Start a transform. +// +// When you start a transform, it creates the destination index if it does not +// already exist. The `number_of_shards` is +// set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot +// transform, it deduces the mapping +// definitions for the destination index from the source indices and the +// transform aggregations. If fields in the +// destination index are derived from scripts (as in the case of +// `scripted_metric` or `bucket_script` aggregations), +// the transform uses dynamic mappings unless an index template exists. If it is +// a latest transform, it does not deduce +// mapping definitions; it uses dynamic mappings. To use explicit mappings, +// create the destination index before you +// start the transform. Alternatively, you can create an index template, though +// it does not affect the deduced mappings +// in a pivot transform. +// +// When the transform starts, a series of validations occur to ensure its +// success. If you deferred validation when you +// created the transform, they occur when you start the transform—​with the +// exception of privilege checks. When +// Elasticsearch security features are enabled, the transform remembers which +// roles the user that created it had at the +// time of creation and uses those same roles. If those roles do not have the +// required privileges on the source and +// destination indices, the transform fails when it attempts unauthorized +// operations. +package starttransform + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + transformidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type StartTransform struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + transformid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStartTransform type alias for index. +type NewStartTransform func(transformid string) *StartTransform + +// NewStartTransformFunc returns a new instance of StartTransform with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStartTransformFunc(tp elastictransport.Interface) NewStartTransform { + return func(transformid string) *StartTransform { + n := New(tp) + + n._transformid(transformid) + + return n + } +} + +// Start a transform. +// +// When you start a transform, it creates the destination index if it does not +// already exist. The `number_of_shards` is +// set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot +// transform, it deduces the mapping +// definitions for the destination index from the source indices and the +// transform aggregations. If fields in the +// destination index are derived from scripts (as in the case of +// `scripted_metric` or `bucket_script` aggregations), +// the transform uses dynamic mappings unless an index template exists. If it is +// a latest transform, it does not deduce +// mapping definitions; it uses dynamic mappings. To use explicit mappings, +// create the destination index before you +// start the transform. Alternatively, you can create an index template, though +// it does not affect the deduced mappings +// in a pivot transform. +// +// When the transform starts, a series of validations occur to ensure its +// success. If you deferred validation when you +// created the transform, they occur when you start the transform—​with the +// exception of privilege checks. When +// Elasticsearch security features are enabled, the transform remembers which +// roles the user that created it had at the +// time of creation and uses those same roles. If those roles do not have the +// required privileges on the source and +// destination indices, the transform fails when it attempts unauthorized +// operations. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform +func New(tp elastictransport.Interface) *StartTransform { + r := &StartTransform{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *StartTransform) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == transformidMask: + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "transformid", r.transformid) + } + path.WriteString(r.transformid) + path.WriteString("/") + path.WriteString("_start") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r StartTransform) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "transform.start_transform") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "transform.start_transform") + if reader := instrument.RecordRequestBody(ctx, "transform.start_transform", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.start_transform") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the StartTransform query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a starttransform.Response +func (r StartTransform) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.start_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r StartTransform) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.start_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the StartTransform query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the StartTransform headers map. +func (r *StartTransform) Header(key, value string) *StartTransform { + r.headers.Set(key, value) + + return r +} + +// TransformId Identifier for the transform. +// API Name: transformid +func (r *StartTransform) _transformid(transformid string) *StartTransform { + r.paramSet |= transformidMask + r.transformid = transformid + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and returns an error. +// API name: timeout +func (r *StartTransform) Timeout(duration string) *StartTransform { + r.values.Set("timeout", duration) + + return r +} + +// From Restricts the set of transformed entities to those changed after this time. +// Relative times like now-30d are supported. Only applicable for continuous +// transforms. +// API name: from +func (r *StartTransform) From(from string) *StartTransform { + r.values.Set("from", from) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *StartTransform) ErrorTrace(errortrace bool) *StartTransform { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *StartTransform) FilterPath(filterpaths ...string) *StartTransform { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *StartTransform) Human(human bool) *StartTransform { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *StartTransform) Pretty(pretty bool) *StartTransform { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/stoptransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/stoptransform/response.go new file mode 100644 index 000000000..52671b677 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/stoptransform/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stoptransform + +// Response holds the response body struct for the package stoptransform +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/stop_transform/StopTransformResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/stoptransform/stop_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/stoptransform/stop_transform.go new file mode 100644 index 000000000..1e1c3639f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/stoptransform/stop_transform.go @@ -0,0 +1,407 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Stop transforms. +// Stops one or more transforms. +package stoptransform + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + transformidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type StopTransform struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + transformid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStopTransform type alias for index. +type NewStopTransform func(transformid string) *StopTransform + +// NewStopTransformFunc returns a new instance of StopTransform with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStopTransformFunc(tp elastictransport.Interface) NewStopTransform { + return func(transformid string) *StopTransform { + n := New(tp) + + n._transformid(transformid) + + return n + } +} + +// Stop transforms. +// Stops one or more transforms. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform +func New(tp elastictransport.Interface) *StopTransform { + r := &StopTransform{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *StopTransform) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == transformidMask: + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "transformid", r.transformid) + } + path.WriteString(r.transformid) + path.WriteString("/") + path.WriteString("_stop") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r StopTransform) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "transform.stop_transform") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "transform.stop_transform") + if reader := instrument.RecordRequestBody(ctx, "transform.stop_transform", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.stop_transform") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the StopTransform query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stoptransform.Response +func (r StopTransform) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.stop_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r StopTransform) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.stop_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the StopTransform query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the StopTransform headers map. +func (r *StopTransform) Header(key, value string) *StopTransform { + r.headers.Set(key, value) + + return r +} + +// TransformId Identifier for the transform. To stop multiple transforms, use a +// comma-separated list or a wildcard expression. +// To stop all transforms, use `_all` or `*` as the identifier. +// API Name: transformid +func (r *StopTransform) _transformid(transformid string) *StopTransform { + r.paramSet |= transformidMask + r.transformid = transformid + + return r +} + +// AllowNoMatch Specifies what to do when the request: contains wildcard expressions and +// there are no transforms that match; +// contains the `_all` string or no identifiers and there are no matches; +// contains wildcard expressions and there +// are only partial matches. +// +// If it is true, the API returns a successful acknowledgement message when +// there are no matches. When there are +// only partial matches, the API stops the appropriate transforms. +// +// If it is false, the request returns a 404 status code when there are no +// matches or only partial matches. +// API name: allow_no_match +func (r *StopTransform) AllowNoMatch(allownomatch bool) *StopTransform { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) + + return r +} + +// Force If it is true, the API forcefully stops the transforms. +// API name: force +func (r *StopTransform) Force(force bool) *StopTransform { + r.values.Set("force", strconv.FormatBool(force)) + + return r +} + +// Timeout Period to wait for a response when `wait_for_completion` is `true`. If no +// response is received before the +// timeout expires, the request returns a timeout exception. However, the +// request continues processing and +// eventually moves the transform to a STOPPED state. +// API name: timeout +func (r *StopTransform) Timeout(duration string) *StopTransform { + r.values.Set("timeout", duration) + + return r +} + +// WaitForCheckpoint If it is true, the transform does not completely stop until the current +// checkpoint is completed. If it is false, +// the transform stops as soon as possible. +// API name: wait_for_checkpoint +func (r *StopTransform) WaitForCheckpoint(waitforcheckpoint bool) *StopTransform { + r.values.Set("wait_for_checkpoint", strconv.FormatBool(waitforcheckpoint)) + + return r +} + +// WaitForCompletion If it is true, the API blocks until the indexer state completely stops. If it +// is false, the API returns +// immediately and the indexer is stopped asynchronously in the background. +// API name: wait_for_completion +func (r *StopTransform) WaitForCompletion(waitforcompletion bool) *StopTransform { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *StopTransform) ErrorTrace(errortrace bool) *StopTransform { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *StopTransform) FilterPath(filterpaths ...string) *StopTransform { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *StopTransform) Human(human bool) *StopTransform { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *StopTransform) Pretty(pretty bool) *StopTransform { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/updatetransform/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/updatetransform/request.go new file mode 100644 index 000000000..bc89e1771 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/updatetransform/request.go @@ -0,0 +1,144 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatetransform + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package updatetransform +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/update_transform/UpdateTransformRequest.ts#L31-L113 +type Request struct { + + // Description Free text description of the transform. + Description *string `json:"description,omitempty"` + // Dest The destination for the transform. + Dest *types.TransformDestination `json:"dest,omitempty"` + // Frequency The interval between checks for changes in the source indices when the + // transform is running continuously. Also determines the retry interval in + // the event of transient failures while the transform is searching or + // indexing. The minimum value is 1s and the maximum is 1h. + Frequency types.Duration `json:"frequency,omitempty"` + // Meta_ Defines optional transform metadata. + Meta_ types.Metadata `json:"_meta,omitempty"` + // RetentionPolicy Defines a retention policy for the transform. Data that meets the defined + // criteria is deleted from the destination index. + RetentionPolicy *types.RetentionPolicyContainer `json:"retention_policy,omitempty"` + // Settings Defines optional transform settings. + Settings *types.Settings `json:"settings,omitempty"` + // Source The source of the data for the transform. + Source *types.TransformSource `json:"source,omitempty"` + // Sync Defines the properties transforms require to run continuously. + Sync *types.SyncContainer `json:"sync,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatetransform request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "dest": + if err := dec.Decode(&s.Dest); err != nil { + return fmt.Errorf("%s | %w", "Dest", err) + } + + case "frequency": + if err := dec.Decode(&s.Frequency); err != nil { + return fmt.Errorf("%s | %w", "Frequency", err) + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "retention_policy": + if err := dec.Decode(&s.RetentionPolicy); err != nil { + return fmt.Errorf("%s | %w", "RetentionPolicy", err) + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "source": + if err := dec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + + case "sync": + if err := dec.Decode(&s.Sync); err != nil { + return fmt.Errorf("%s | %w", "Sync", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/updatetransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/updatetransform/response.go new file mode 100644 index 000000000..f71dac22a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/updatetransform/response.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatetransform + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package updatetransform +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/update_transform/UpdateTransformResponse.ts#L33-L51 +type Response struct { + Authorization *types.TransformAuthorization `json:"authorization,omitempty"` + CreateTime int64 `json:"create_time"` + Description string `json:"description"` + Dest types.ReindexDestination `json:"dest"` + Frequency types.Duration `json:"frequency,omitempty"` + Id string `json:"id"` + Latest *types.Latest `json:"latest,omitempty"` + Meta_ types.Metadata `json:"_meta,omitempty"` + Pivot *types.Pivot `json:"pivot,omitempty"` + RetentionPolicy *types.RetentionPolicyContainer `json:"retention_policy,omitempty"` + Settings types.Settings `json:"settings"` + Source types.ReindexSource `json:"source"` + Sync *types.SyncContainer `json:"sync,omitempty"` + Version string `json:"version"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/updatetransform/update_transform.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/updatetransform/update_transform.go new file mode 100644 index 000000000..d908ea1bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/updatetransform/update_transform.go @@ -0,0 +1,507 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update a transform. +// Updates certain properties of a transform. +// +// All updated properties except `description` do not take effect until after +// the transform starts the next checkpoint, +// thus there is data consistency in each checkpoint. To use this API, you must +// have `read` and `view_index_metadata` +// privileges for the source indices. You must also have `index` and `read` +// privileges for the destination index. When +// Elasticsearch security features are enabled, the transform remembers which +// roles the user who updated it had at the +// time of update and runs with those privileges. +package updatetransform + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + transformidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateTransform struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + transformid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateTransform type alias for index. +type NewUpdateTransform func(transformid string) *UpdateTransform + +// NewUpdateTransformFunc returns a new instance of UpdateTransform with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateTransformFunc(tp elastictransport.Interface) NewUpdateTransform { + return func(transformid string) *UpdateTransform { + n := New(tp) + + n._transformid(transformid) + + return n + } +} + +// Update a transform. +// Updates certain properties of a transform. +// +// All updated properties except `description` do not take effect until after +// the transform starts the next checkpoint, +// thus there is data consistency in each checkpoint. To use this API, you must +// have `read` and `view_index_metadata` +// privileges for the source indices. You must also have `index` and `read` +// privileges for the destination index. When +// Elasticsearch security features are enabled, the transform remembers which +// roles the user who updated it had at the +// time of update and runs with those privileges. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform +func New(tp elastictransport.Interface) *UpdateTransform { + r := &UpdateTransform{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateTransform) Raw(raw io.Reader) *UpdateTransform { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateTransform) Request(req *Request) *UpdateTransform { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateTransform) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateTransform: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == transformidMask: + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "transformid", r.transformid) + } + path.WriteString(r.transformid) + path.WriteString("/") + path.WriteString("_update") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateTransform) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "transform.update_transform") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "transform.update_transform") + if reader := instrument.RecordRequestBody(ctx, "transform.update_transform", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.update_transform") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateTransform query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatetransform.Response +func (r UpdateTransform) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.update_transform") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateTransform headers map. +func (r *UpdateTransform) Header(key, value string) *UpdateTransform { + r.headers.Set(key, value) + + return r +} + +// TransformId Identifier for the transform. +// API Name: transformid +func (r *UpdateTransform) _transformid(transformid string) *UpdateTransform { + r.paramSet |= transformidMask + r.transformid = transformid + + return r +} + +// DeferValidation When true, deferrable validations are not run. This behavior may be +// desired if the source index does not exist until after the transform is +// created. +// API name: defer_validation +func (r *UpdateTransform) DeferValidation(defervalidation bool) *UpdateTransform { + r.values.Set("defer_validation", strconv.FormatBool(defervalidation)) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. +// API name: timeout +func (r *UpdateTransform) Timeout(duration string) *UpdateTransform { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateTransform) ErrorTrace(errortrace bool) *UpdateTransform { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateTransform) FilterPath(filterpaths ...string) *UpdateTransform { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateTransform) Human(human bool) *UpdateTransform { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateTransform) Pretty(pretty bool) *UpdateTransform { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Free text description of the transform. +// API name: description +func (r *UpdateTransform) Description(description string) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Description = &description + + return r +} + +// The destination for the transform. +// API name: dest +func (r *UpdateTransform) Dest(dest types.TransformDestinationVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Dest = dest.TransformDestinationCaster() + + return r +} + +// The interval between checks for changes in the source indices when the +// transform is running continuously. Also determines the retry interval in +// the event of transient failures while the transform is searching or +// indexing. The minimum value is 1s and the maximum is 1h. +// API name: frequency +func (r *UpdateTransform) Frequency(duration types.DurationVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Frequency = *duration.DurationCaster() + + return r +} + +// Defines optional transform metadata. +// API name: _meta +func (r *UpdateTransform) Meta_(metadata types.MetadataVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Meta_ = *metadata.MetadataCaster() + + return r +} + +// Defines a retention policy for the transform. Data that meets the defined +// criteria is deleted from the destination index. +// API name: retention_policy +func (r *UpdateTransform) RetentionPolicy(retentionpolicy types.RetentionPolicyContainerVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RetentionPolicy = retentionpolicy.RetentionPolicyContainerCaster() + + return r +} + +// Defines optional transform settings. +// API name: settings +func (r *UpdateTransform) Settings(settings types.SettingsVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Settings = settings.SettingsCaster() + + return r +} + +// The source of the data for the transform. +// API name: source +func (r *UpdateTransform) Source(source types.TransformSourceVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Source = source.TransformSourceCaster() + + return r +} + +// Defines the properties transforms require to run continuously. +// API name: sync +func (r *UpdateTransform) Sync(sync types.SyncContainerVariant) *UpdateTransform { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Sync = sync.SyncContainerCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/upgradetransforms/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/upgradetransforms/response.go new file mode 100644 index 000000000..9eec1e219 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/upgradetransforms/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package upgradetransforms + +// Response holds the response body struct for the package upgradetransforms +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/upgrade_transforms/UpgradeTransformsResponse.ts#L25-L34 +type Response struct { + + // NeedsUpdate The number of transforms that need to be upgraded. + NeedsUpdate int `json:"needs_update"` + // NoAction The number of transforms that don’t require upgrading. + NoAction int `json:"no_action"` + // Updated The number of transforms that have been upgraded. + Updated int `json:"updated"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/upgradetransforms/upgrade_transforms.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/upgradetransforms/upgrade_transforms.go new file mode 100644 index 000000000..be5a92144 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/transform/upgradetransforms/upgrade_transforms.go @@ -0,0 +1,391 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Upgrade all transforms. +// +// Transforms are compatible across minor versions and between supported major +// versions. +// However, over time, the format of transform configuration information may +// change. +// This API identifies transforms that have a legacy configuration format and +// upgrades them to the latest version. +// It also cleans up the internal data structures that store the transform state +// and checkpoints. +// The upgrade does not affect the source and destination indices. +// The upgrade also does not affect the roles that transforms use when +// Elasticsearch security features are enabled; the role used to read source +// data and write to the destination index remains unchanged. +// +// If a transform upgrade step fails, the upgrade stops and an error is returned +// about the underlying issue. +// Resolve the issue then re-run the process again. +// A summary is returned when the upgrade is finished. +// +// To ensure continuous transforms remain running during a major version upgrade +// of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade +// transforms before upgrading the cluster. +// You may want to perform a recent cluster backup prior to the upgrade. +package upgradetransforms + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpgradeTransforms struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpgradeTransforms type alias for index. +type NewUpgradeTransforms func() *UpgradeTransforms + +// NewUpgradeTransformsFunc returns a new instance of UpgradeTransforms with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpgradeTransformsFunc(tp elastictransport.Interface) NewUpgradeTransforms { + return func() *UpgradeTransforms { + n := New(tp) + + return n + } +} + +// Upgrade all transforms. +// +// Transforms are compatible across minor versions and between supported major +// versions. +// However, over time, the format of transform configuration information may +// change. +// This API identifies transforms that have a legacy configuration format and +// upgrades them to the latest version. +// It also cleans up the internal data structures that store the transform state +// and checkpoints. +// The upgrade does not affect the source and destination indices. +// The upgrade also does not affect the roles that transforms use when +// Elasticsearch security features are enabled; the role used to read source +// data and write to the destination index remains unchanged. +// +// If a transform upgrade step fails, the upgrade stops and an error is returned +// about the underlying issue. +// Resolve the issue then re-run the process again. +// A summary is returned when the upgrade is finished. +// +// To ensure continuous transforms remain running during a major version upgrade +// of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade +// transforms before upgrading the cluster. +// You may want to perform a recent cluster backup prior to the upgrade. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms +func New(tp elastictransport.Interface) *UpgradeTransforms { + r := &UpgradeTransforms{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpgradeTransforms) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + path.WriteString("_upgrade") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpgradeTransforms) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "transform.upgrade_transforms") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "transform.upgrade_transforms") + if reader := instrument.RecordRequestBody(ctx, "transform.upgrade_transforms", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "transform.upgrade_transforms") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpgradeTransforms query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a upgradetransforms.Response +func (r UpgradeTransforms) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.upgrade_transforms") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r UpgradeTransforms) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "transform.upgrade_transforms") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the UpgradeTransforms query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the UpgradeTransforms headers map. +func (r *UpgradeTransforms) Header(key, value string) *UpgradeTransforms { + r.headers.Set(key, value) + + return r +} + +// DryRun When true, the request checks for updates but does not run them. +// API name: dry_run +func (r *UpgradeTransforms) DryRun(dryrun bool) *UpgradeTransforms { + r.values.Set("dry_run", strconv.FormatBool(dryrun)) + + return r +} + +// Timeout Period to wait for a response. If no response is received before the timeout +// expires, the request fails and +// returns an error. +// API name: timeout +func (r *UpgradeTransforms) Timeout(duration string) *UpgradeTransforms { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpgradeTransforms) ErrorTrace(errortrace bool) *UpgradeTransforms { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpgradeTransforms) FilterPath(filterpaths ...string) *UpgradeTransforms { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpgradeTransforms) Human(human bool) *UpgradeTransforms { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpgradeTransforms) Pretty(pretty bool) *UpgradeTransforms { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/typedclient.request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/typedclient.request.go new file mode 100644 index 000000000..007396618 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/typedclient.request.go @@ -0,0 +1,28 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package typedapi + +import ( + "context" + "net/http" +) + +// Request defines the TypedApi requests. +type Request interface { + Perform(ctx context.Context) (*http.Response, error) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/access.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/access.go new file mode 100644 index 000000000..a267f0412 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/access.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Access type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Access.ts#L22-L31 +type Access struct { + // Replication A list of indices permission entries for cross-cluster replication. + Replication []ReplicationAccess `json:"replication,omitempty"` + // Search A list of indices permission entries for cross-cluster search. + Search []SearchAccess `json:"search,omitempty"` +} + +// NewAccess returns a Access. +func NewAccess() *Access { + r := &Access{} + + return r +} + +type AccessVariant interface { + AccessCaster() *Access +} + +func (s *Access) AccessCaster() *Access { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/acknowledgement.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/acknowledgement.go new file mode 100644 index 000000000..fb085f773 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/acknowledgement.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Acknowledgement type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/license/post/types.ts#L20-L23 +type Acknowledgement struct { + License []string `json:"license"` + Message string `json:"message"` +} + +func (s *Acknowledgement) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "license": + if err := dec.Decode(&s.License); err != nil { + return fmt.Errorf("%s | %w", "License", err) + } + + case "message": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Message", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Message = o + + } + } + return nil +} + +// NewAcknowledgement returns a Acknowledgement. +func NewAcknowledgement() *Acknowledgement { + r := &Acknowledgement{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/acknowledgestate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/acknowledgestate.go new file mode 100644 index 000000000..67aca0f8f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/acknowledgestate.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/acknowledgementoptions" +) + +// AcknowledgeState type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Action.ts#L109-L112 +type AcknowledgeState struct { + State acknowledgementoptions.AcknowledgementOptions `json:"state"` + Timestamp DateTime `json:"timestamp"` +} + +func (s *AcknowledgeState) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "state": + if err := dec.Decode(&s.State); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + } + } + return nil +} + +// NewAcknowledgeState returns a AcknowledgeState. +func NewAcknowledgeState() *AcknowledgeState { + r := &AcknowledgeState{} + + return r +} + +type AcknowledgeStateVariant interface { + AcknowledgeStateCaster() *AcknowledgeState +} + +func (s *AcknowledgeState) AcknowledgeStateCaster() *AcknowledgeState { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/actionstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/actionstatus.go new file mode 100644 index 000000000..9dfd018eb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/actionstatus.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ActionStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Action.ts#L125-L130 +type ActionStatus struct { + Ack AcknowledgeState `json:"ack"` + LastExecution *ExecutionState `json:"last_execution,omitempty"` + LastSuccessfulExecution *ExecutionState `json:"last_successful_execution,omitempty"` + LastThrottle *ThrottleState `json:"last_throttle,omitempty"` +} + +// NewActionStatus returns a ActionStatus. +func NewActionStatus() *ActionStatus { + r := &ActionStatus{} + + return r +} + +type ActionStatusVariant interface { + ActionStatusCaster() *ActionStatus +} + +func (s *ActionStatus) ActionStatusCaster() *ActionStatus { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/activationstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/activationstate.go new file mode 100644 index 000000000..2ec1e7d43 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/activationstate.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ActivationState type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Activation.ts#L24-L27 +type ActivationState struct { + Active bool `json:"active"` + Timestamp DateTime `json:"timestamp"` +} + +func (s *ActivationState) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Active", err) + } + s.Active = value + case bool: + s.Active = v + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + } + } + return nil +} + +// NewActivationState returns a ActivationState. +func NewActivationState() *ActivationState { + r := &ActivationState{} + + return r +} + +type ActivationStateVariant interface { + ActivationStateCaster() *ActivationState +} + +func (s *ActivationState) ActivationStateCaster() *ActivationState { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/activationstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/activationstatus.go new file mode 100644 index 000000000..f66932e81 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/activationstatus.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ActivationStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Activation.ts#L29-L33 +type ActivationStatus struct { + Actions WatcherStatusActions `json:"actions"` + State ActivationState `json:"state"` + Version int64 `json:"version"` +} + +func (s *ActivationStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return fmt.Errorf("%s | %w", "Actions", err) + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewActivationStatus returns a ActivationStatus. +func NewActivationStatus() *ActivationStatus { + r := &ActivationStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adaptiveallocations.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adaptiveallocations.go new file mode 100644 index 000000000..c9e057542 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adaptiveallocations.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AdaptiveAllocations type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L99-L116 +type AdaptiveAllocations struct { + // Enabled Turn on `adaptive_allocations`. + Enabled *bool `json:"enabled,omitempty"` + // MaxNumberOfAllocations The maximum number of allocations to scale to. + // If set, it must be greater than or equal to `min_number_of_allocations`. + MaxNumberOfAllocations *int `json:"max_number_of_allocations,omitempty"` + // MinNumberOfAllocations The minimum number of allocations to scale to. + // If set, it must be greater than or equal to 0. + // If not defined, the deployment scales to 0. + MinNumberOfAllocations *int `json:"min_number_of_allocations,omitempty"` +} + +func (s *AdaptiveAllocations) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "max_number_of_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNumberOfAllocations", err) + } + s.MaxNumberOfAllocations = &value + case float64: + f := int(v) + s.MaxNumberOfAllocations = &f + } + + case "min_number_of_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinNumberOfAllocations", err) + } + s.MinNumberOfAllocations = &value + case float64: + f := int(v) + s.MinNumberOfAllocations = &f + } + + } + } + return nil +} + +// NewAdaptiveAllocations returns a AdaptiveAllocations. +func NewAdaptiveAllocations() *AdaptiveAllocations { + r := &AdaptiveAllocations{} + + return r +} + +type AdaptiveAllocationsVariant interface { + AdaptiveAllocationsCaster() *AdaptiveAllocations +} + +func (s *AdaptiveAllocations) AdaptiveAllocationsCaster() *AdaptiveAllocations { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adaptiveallocationssettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adaptiveallocationssettings.go new file mode 100644 index 000000000..5eaa430f6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adaptiveallocationssettings.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AdaptiveAllocationsSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L109-L125 +type AdaptiveAllocationsSettings struct { + // Enabled If true, adaptive_allocations is enabled + Enabled bool `json:"enabled"` + // MaxNumberOfAllocations Specifies the maximum number of allocations to scale to. + // If set, it must be greater than or equal to min_number_of_allocations. + MaxNumberOfAllocations *int `json:"max_number_of_allocations,omitempty"` + // MinNumberOfAllocations Specifies the minimum number of allocations to scale to. + // If set, it must be greater than or equal to 0. + // If not defined, the deployment scales to 0. + MinNumberOfAllocations *int `json:"min_number_of_allocations,omitempty"` +} + +func (s *AdaptiveAllocationsSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "max_number_of_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNumberOfAllocations", err) + } + s.MaxNumberOfAllocations = &value + case float64: + f := int(v) + s.MaxNumberOfAllocations = &f + } + + case "min_number_of_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinNumberOfAllocations", err) + } + s.MinNumberOfAllocations = &value + case float64: + f := int(v) + s.MinNumberOfAllocations = &f + } + + } + } + return nil +} + +// NewAdaptiveAllocationsSettings returns a AdaptiveAllocationsSettings. +func NewAdaptiveAllocationsSettings() *AdaptiveAllocationsSettings { + r := &AdaptiveAllocationsSettings{} + + return r +} + +type AdaptiveAllocationsSettingsVariant interface { + AdaptiveAllocationsSettingsCaster() *AdaptiveAllocationsSettings +} + +func (s *AdaptiveAllocationsSettings) AdaptiveAllocationsSettingsCaster() *AdaptiveAllocationsSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adaptiveselection.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adaptiveselection.go new file mode 100644 index 000000000..d2ac559ac --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adaptiveselection.go @@ -0,0 +1,165 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AdaptiveSelection type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L441-L470 +type AdaptiveSelection struct { + // AvgQueueSize The exponentially weighted moving average queue size of search requests on + // the keyed node. + AvgQueueSize *int64 `json:"avg_queue_size,omitempty"` + // AvgResponseTime The exponentially weighted moving average response time of search requests on + // the keyed node. + AvgResponseTime Duration `json:"avg_response_time,omitempty"` + // AvgResponseTimeNs The exponentially weighted moving average response time, in nanoseconds, of + // search requests on the keyed node. + AvgResponseTimeNs *int64 `json:"avg_response_time_ns,omitempty"` + // AvgServiceTime The exponentially weighted moving average service time of search requests on + // the keyed node. + AvgServiceTime Duration `json:"avg_service_time,omitempty"` + // AvgServiceTimeNs The exponentially weighted moving average service time, in nanoseconds, of + // search requests on the keyed node. + AvgServiceTimeNs *int64 `json:"avg_service_time_ns,omitempty"` + // OutgoingSearches The number of outstanding search requests to the keyed node from the node + // these stats are for. + OutgoingSearches *int64 `json:"outgoing_searches,omitempty"` + // Rank The rank of this node; used for shard selection when routing search requests. + Rank *string `json:"rank,omitempty"` +} + +func (s *AdaptiveSelection) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg_queue_size": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AvgQueueSize", err) + } + s.AvgQueueSize = &value + case float64: + f := int64(v) + s.AvgQueueSize = &f + } + + case "avg_response_time": + if err := dec.Decode(&s.AvgResponseTime); err != nil { + return fmt.Errorf("%s | %w", "AvgResponseTime", err) + } + + case "avg_response_time_ns": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AvgResponseTimeNs", err) + } + s.AvgResponseTimeNs = &value + case float64: + f := int64(v) + s.AvgResponseTimeNs = &f + } + + case "avg_service_time": + if err := dec.Decode(&s.AvgServiceTime); err != nil { + return fmt.Errorf("%s | %w", "AvgServiceTime", err) + } + + case "avg_service_time_ns": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AvgServiceTimeNs", err) + } + s.AvgServiceTimeNs = &value + case float64: + f := int64(v) + s.AvgServiceTimeNs = &f + } + + case "outgoing_searches": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "OutgoingSearches", err) + } + s.OutgoingSearches = &value + case float64: + f := int64(v) + s.OutgoingSearches = &f + } + + case "rank": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Rank", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Rank = &o + + } + } + return nil +} + +// NewAdaptiveSelection returns a AdaptiveSelection. +func NewAdaptiveSelection() *AdaptiveSelection { + r := &AdaptiveSelection{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/addaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/addaction.go new file mode 100644 index 000000000..3d8f27693 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/addaction.go @@ -0,0 +1,206 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AddAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/update_aliases/types.ts#L41-L95 +type AddAction struct { + // Alias Alias for the action. + // Index alias names support date math. + Alias *string `json:"alias,omitempty"` + // Aliases Aliases for the action. + // Index alias names support date math. + Aliases []string `json:"aliases,omitempty"` + // Filter Query used to limit documents the alias can access. + Filter *Query `json:"filter,omitempty"` + // Index Data stream or index for the action. + // Supports wildcards (`*`). + Index *string `json:"index,omitempty"` + // IndexRouting Value used to route indexing operations to a specific shard. + // If specified, this overwrites the `routing` value for indexing operations. + // Data stream aliases don’t support this parameter. + IndexRouting *string `json:"index_routing,omitempty"` + // Indices Data streams or indices for the action. + // Supports wildcards (`*`). + Indices []string `json:"indices,omitempty"` + // IsHidden If `true`, the alias is hidden. + IsHidden *bool `json:"is_hidden,omitempty"` + // IsWriteIndex If `true`, sets the write index or data stream for the alias. + IsWriteIndex *bool `json:"is_write_index,omitempty"` + // MustExist If `true`, the alias must exist to perform the action. + MustExist *bool `json:"must_exist,omitempty"` + // Routing Value used to route indexing and search operations to a specific shard. + // Data stream aliases don’t support this parameter. + Routing *string `json:"routing,omitempty"` + // SearchRouting Value used to route search operations to a specific shard. + // If specified, this overwrites the `routing` value for search operations. + // Data stream aliases don’t support this parameter. + SearchRouting *string `json:"search_routing,omitempty"` +} + +func (s *AddAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alias": + if err := dec.Decode(&s.Alias); err != nil { + return fmt.Errorf("%s | %w", "Alias", err) + } + + case "aliases": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aliases", err) + } + + s.Aliases = append(s.Aliases, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Aliases); err != nil { + return fmt.Errorf("%s | %w", "Aliases", err) + } + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "index_routing": + if err := dec.Decode(&s.IndexRouting); err != nil { + return fmt.Errorf("%s | %w", "IndexRouting", err) + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + case "is_hidden": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsHidden", err) + } + s.IsHidden = &value + case bool: + s.IsHidden = &v + } + + case "is_write_index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsWriteIndex", err) + } + s.IsWriteIndex = &value + case bool: + s.IsWriteIndex = &v + } + + case "must_exist": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MustExist", err) + } + s.MustExist = &value + case bool: + s.MustExist = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "search_routing": + if err := dec.Decode(&s.SearchRouting); err != nil { + return fmt.Errorf("%s | %w", "SearchRouting", err) + } + + } + } + return nil +} + +// NewAddAction returns a AddAction. +func NewAddAction() *AddAction { + r := &AddAction{} + + return r +} + +type AddActionVariant interface { + AddActionCaster() *AddAction +} + +func (s *AddAction) AddActionCaster() *AddAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/addindicesblockstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/addindicesblockstatus.go new file mode 100644 index 000000000..53d5ad8a3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/addindicesblockstatus.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AddIndicesBlockStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/add_block/IndicesAddBlockResponse.ts#L30-L33 +type AddIndicesBlockStatus struct { + Blocked bool `json:"blocked"` + Name string `json:"name"` +} + +func (s *AddIndicesBlockStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "blocked": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Blocked", err) + } + s.Blocked = value + case bool: + s.Blocked = v + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewAddIndicesBlockStatus returns a AddIndicesBlockStatus. +func NewAddIndicesBlockStatus() *AddIndicesBlockStatus { + r := &AddIndicesBlockStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adjacencymatrixaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adjacencymatrixaggregate.go new file mode 100644 index 000000000..d8315862a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adjacencymatrixaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// AdjacencyMatrixAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L649-L654 +type AdjacencyMatrixAggregate struct { + Buckets BucketsAdjacencyMatrixBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *AdjacencyMatrixAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]AdjacencyMatrixBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []AdjacencyMatrixBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewAdjacencyMatrixAggregate returns a AdjacencyMatrixAggregate. +func NewAdjacencyMatrixAggregate() *AdjacencyMatrixAggregate { + r := &AdjacencyMatrixAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adjacencymatrixaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adjacencymatrixaggregation.go new file mode 100644 index 000000000..99cb4f61e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adjacencymatrixaggregation.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AdjacencyMatrixAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L60-L70 +type AdjacencyMatrixAggregation struct { + // Filters Filters used to create buckets. + // At least one filter is required. + Filters map[string]Query `json:"filters,omitempty"` + // Separator Separator used to concatenate filter names. Defaults to &. + Separator *string `json:"separator,omitempty"` +} + +func (s *AdjacencyMatrixAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filters": + if s.Filters == nil { + s.Filters = make(map[string]Query, 0) + } + if err := dec.Decode(&s.Filters); err != nil { + return fmt.Errorf("%s | %w", "Filters", err) + } + + case "separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Separator", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Separator = &o + + } + } + return nil +} + +// NewAdjacencyMatrixAggregation returns a AdjacencyMatrixAggregation. +func NewAdjacencyMatrixAggregation() *AdjacencyMatrixAggregation { + r := &AdjacencyMatrixAggregation{ + Filters: make(map[string]Query), + } + + return r +} + +type AdjacencyMatrixAggregationVariant interface { + AdjacencyMatrixAggregationCaster() *AdjacencyMatrixAggregation +} + +func (s *AdjacencyMatrixAggregation) AdjacencyMatrixAggregationCaster() *AdjacencyMatrixAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adjacencymatrixbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adjacencymatrixbucket.go new file mode 100644 index 000000000..95278bf64 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/adjacencymatrixbucket.go @@ -0,0 +1,645 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// AdjacencyMatrixBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L656-L658 +type AdjacencyMatrixBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key string `json:"key"` +} + +func (s *AdjacencyMatrixBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s AdjacencyMatrixBucket) MarshalJSON() ([]byte, error) { + type opt AdjacencyMatrixBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewAdjacencyMatrixBucket returns a AdjacencyMatrixBucket. +func NewAdjacencyMatrixBucket() *AdjacencyMatrixBucket { + r := &AdjacencyMatrixBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregate.go new file mode 100644 index 000000000..72b08b39b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregate.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Aggregate holds the union for the following types: +// +// CardinalityAggregate +// HdrPercentilesAggregate +// HdrPercentileRanksAggregate +// TDigestPercentilesAggregate +// TDigestPercentileRanksAggregate +// PercentilesBucketAggregate +// MedianAbsoluteDeviationAggregate +// MinAggregate +// MaxAggregate +// SumAggregate +// AvgAggregate +// WeightedAvgAggregate +// ValueCountAggregate +// SimpleValueAggregate +// DerivativeAggregate +// BucketMetricValueAggregate +// StatsAggregate +// StatsBucketAggregate +// ExtendedStatsAggregate +// ExtendedStatsBucketAggregate +// GeoBoundsAggregate +// GeoCentroidAggregate +// HistogramAggregate +// DateHistogramAggregate +// AutoDateHistogramAggregate +// VariableWidthHistogramAggregate +// StringTermsAggregate +// LongTermsAggregate +// DoubleTermsAggregate +// UnmappedTermsAggregate +// LongRareTermsAggregate +// StringRareTermsAggregate +// UnmappedRareTermsAggregate +// MultiTermsAggregate +// MissingAggregate +// NestedAggregate +// ReverseNestedAggregate +// GlobalAggregate +// FilterAggregate +// ChildrenAggregate +// ParentAggregate +// SamplerAggregate +// UnmappedSamplerAggregate +// GeoHashGridAggregate +// GeoTileGridAggregate +// GeoHexGridAggregate +// RangeAggregate +// DateRangeAggregate +// GeoDistanceAggregate +// IpRangeAggregate +// IpPrefixAggregate +// FiltersAggregate +// AdjacencyMatrixAggregate +// SignificantLongTermsAggregate +// SignificantStringTermsAggregate +// UnmappedSignificantTermsAggregate +// CompositeAggregate +// FrequentItemSetsAggregate +// TimeSeriesAggregate +// ScriptedMetricAggregate +// TopHitsAggregate +// InferenceAggregate +// StringStatsAggregate +// BoxPlotAggregate +// TopMetricsAggregate +// TTestAggregate +// RateAggregate +// CumulativeCardinalityAggregate +// MatrixStatsAggregate +// GeoLineAggregate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L38-L125 +type Aggregate any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregatemetricdoubleproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregatemetricdoubleproperty.go new file mode 100644 index 000000000..4b6d78d7b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregatemetricdoubleproperty.go @@ -0,0 +1,867 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype" +) + +// AggregateMetricDoubleProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/complex.ts#L60-L66 +type AggregateMetricDoubleProperty struct { + DefaultMetric string `json:"default_metric"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Metrics []string `json:"metrics"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "default_metric": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DefaultMetric", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DefaultMetric = o + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "metrics": + if err := dec.Decode(&s.Metrics); err != nil { + return fmt.Errorf("%s | %w", "Metrics", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_metric": + if err := dec.Decode(&s.TimeSeriesMetric); err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s AggregateMetricDoubleProperty) MarshalJSON() ([]byte, error) { + type innerAggregateMetricDoubleProperty AggregateMetricDoubleProperty + tmp := innerAggregateMetricDoubleProperty{ + DefaultMetric: s.DefaultMetric, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Meta: s.Meta, + Metrics: s.Metrics, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "aggregate_metric_double" + + return json.Marshal(tmp) +} + +// NewAggregateMetricDoubleProperty returns a AggregateMetricDoubleProperty. +func NewAggregateMetricDoubleProperty() *AggregateMetricDoubleProperty { + r := &AggregateMetricDoubleProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type AggregateMetricDoublePropertyVariant interface { + AggregateMetricDoublePropertyCaster() *AggregateMetricDoubleProperty +} + +func (s *AggregateMetricDoubleProperty) AggregateMetricDoublePropertyCaster() *AggregateMetricDoubleProperty { + return s +} + +func (s *AggregateMetricDoubleProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregateorder.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregateorder.go new file mode 100644 index 000000000..80a150073 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregateorder.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// AggregateOrder holds the union for the following types: +// +// map[string]sortorder.SortOrder +// []map[string]sortorder.SortOrder +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L1052-L1054 +type AggregateOrder any + +type AggregateOrderVariant interface { + AggregateOrderCaster() *AggregateOrder +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregateoutput.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregateoutput.go new file mode 100644 index 000000000..f92b34eaa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregateoutput.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// AggregateOutput type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model/types.ts#L101-L106 +type AggregateOutput struct { + Exponent *Weights `json:"exponent,omitempty"` + LogisticRegression *Weights `json:"logistic_regression,omitempty"` + WeightedMode *Weights `json:"weighted_mode,omitempty"` + WeightedSum *Weights `json:"weighted_sum,omitempty"` +} + +// NewAggregateOutput returns a AggregateOutput. +func NewAggregateOutput() *AggregateOutput { + r := &AggregateOutput{} + + return r +} + +type AggregateOutputVariant interface { + AggregateOutputCaster() *AggregateOutput +} + +func (s *AggregateOutput) AggregateOutputCaster() *AggregateOutput { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationbreakdown.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationbreakdown.go new file mode 100644 index 000000000..19eb48deb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationbreakdown.go @@ -0,0 +1,255 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AggregationBreakdown type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L26-L39 +type AggregationBreakdown struct { + BuildAggregation int64 `json:"build_aggregation"` + BuildAggregationCount int64 `json:"build_aggregation_count"` + BuildLeafCollector int64 `json:"build_leaf_collector"` + BuildLeafCollectorCount int64 `json:"build_leaf_collector_count"` + Collect int64 `json:"collect"` + CollectCount int64 `json:"collect_count"` + Initialize int64 `json:"initialize"` + InitializeCount int64 `json:"initialize_count"` + PostCollection *int64 `json:"post_collection,omitempty"` + PostCollectionCount *int64 `json:"post_collection_count,omitempty"` + Reduce int64 `json:"reduce"` + ReduceCount int64 `json:"reduce_count"` +} + +func (s *AggregationBreakdown) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "build_aggregation": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BuildAggregation", err) + } + s.BuildAggregation = value + case float64: + f := int64(v) + s.BuildAggregation = f + } + + case "build_aggregation_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BuildAggregationCount", err) + } + s.BuildAggregationCount = value + case float64: + f := int64(v) + s.BuildAggregationCount = f + } + + case "build_leaf_collector": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BuildLeafCollector", err) + } + s.BuildLeafCollector = value + case float64: + f := int64(v) + s.BuildLeafCollector = f + } + + case "build_leaf_collector_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BuildLeafCollectorCount", err) + } + s.BuildLeafCollectorCount = value + case float64: + f := int64(v) + s.BuildLeafCollectorCount = f + } + + case "collect": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Collect", err) + } + s.Collect = value + case float64: + f := int64(v) + s.Collect = f + } + + case "collect_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CollectCount", err) + } + s.CollectCount = value + case float64: + f := int64(v) + s.CollectCount = f + } + + case "initialize": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Initialize", err) + } + s.Initialize = value + case float64: + f := int64(v) + s.Initialize = f + } + + case "initialize_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InitializeCount", err) + } + s.InitializeCount = value + case float64: + f := int64(v) + s.InitializeCount = f + } + + case "post_collection": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PostCollection", err) + } + s.PostCollection = &value + case float64: + f := int64(v) + s.PostCollection = &f + } + + case "post_collection_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PostCollectionCount", err) + } + s.PostCollectionCount = &value + case float64: + f := int64(v) + s.PostCollectionCount = &f + } + + case "reduce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Reduce", err) + } + s.Reduce = value + case float64: + f := int64(v) + s.Reduce = f + } + + case "reduce_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ReduceCount", err) + } + s.ReduceCount = value + case float64: + f := int64(v) + s.ReduceCount = f + } + + } + } + return nil +} + +// NewAggregationBreakdown returns a AggregationBreakdown. +func NewAggregationBreakdown() *AggregationBreakdown { + r := &AggregationBreakdown{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationprofile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationprofile.go new file mode 100644 index 000000000..7c0cf301d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationprofile.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AggregationProfile type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L85-L92 +type AggregationProfile struct { + Breakdown AggregationBreakdown `json:"breakdown"` + Children []AggregationProfile `json:"children,omitempty"` + Debug *AggregationProfileDebug `json:"debug,omitempty"` + Description string `json:"description"` + TimeInNanos int64 `json:"time_in_nanos"` + Type string `json:"type"` +} + +func (s *AggregationProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "breakdown": + if err := dec.Decode(&s.Breakdown); err != nil { + return fmt.Errorf("%s | %w", "Breakdown", err) + } + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return fmt.Errorf("%s | %w", "Children", err) + } + + case "debug": + if err := dec.Decode(&s.Debug); err != nil { + return fmt.Errorf("%s | %w", "Debug", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "time_in_nanos": + if err := dec.Decode(&s.TimeInNanos); err != nil { + return fmt.Errorf("%s | %w", "TimeInNanos", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewAggregationProfile returns a AggregationProfile. +func NewAggregationProfile() *AggregationProfile { + r := &AggregationProfile{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationprofiledebug.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationprofiledebug.go new file mode 100644 index 000000000..410f22147 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationprofiledebug.go @@ -0,0 +1,556 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AggregationProfileDebug type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L42-L76 +type AggregationProfileDebug struct { + BruteForceUsed *int `json:"brute_force_used,omitempty"` + BuiltBuckets *int `json:"built_buckets,omitempty"` + CharsFetched *int `json:"chars_fetched,omitempty"` + CollectAnalyzedCount *int `json:"collect_analyzed_count,omitempty"` + CollectAnalyzedNs *int `json:"collect_analyzed_ns,omitempty"` + CollectionStrategy *string `json:"collection_strategy,omitempty"` + DeferredAggregators []string `json:"deferred_aggregators,omitempty"` + Delegate *string `json:"delegate,omitempty"` + DelegateDebug *AggregationProfileDebug `json:"delegate_debug,omitempty"` + DynamicPruningAttempted *int `json:"dynamic_pruning_attempted,omitempty"` + DynamicPruningUsed *int `json:"dynamic_pruning_used,omitempty"` + EmptyCollectorsUsed *int `json:"empty_collectors_used,omitempty"` + ExtractCount *int `json:"extract_count,omitempty"` + ExtractNs *int `json:"extract_ns,omitempty"` + Filters []AggregationProfileDelegateDebugFilter `json:"filters,omitempty"` + HasFilter *bool `json:"has_filter,omitempty"` + MapReducer *string `json:"map_reducer,omitempty"` + NumericCollectorsUsed *int `json:"numeric_collectors_used,omitempty"` + OrdinalsCollectorsOverheadTooHigh *int `json:"ordinals_collectors_overhead_too_high,omitempty"` + OrdinalsCollectorsUsed *int `json:"ordinals_collectors_used,omitempty"` + ResultStrategy *string `json:"result_strategy,omitempty"` + SegmentsCollected *int `json:"segments_collected,omitempty"` + SegmentsCounted *int `json:"segments_counted,omitempty"` + SegmentsWithDeletedDocs *int `json:"segments_with_deleted_docs,omitempty"` + SegmentsWithDocCountField *int `json:"segments_with_doc_count_field,omitempty"` + SegmentsWithMultiValuedOrds *int `json:"segments_with_multi_valued_ords,omitempty"` + SegmentsWithSingleValuedOrds *int `json:"segments_with_single_valued_ords,omitempty"` + SkippedDueToNoData *int `json:"skipped_due_to_no_data,omitempty"` + StringHashingCollectorsUsed *int `json:"string_hashing_collectors_used,omitempty"` + SurvivingBuckets *int `json:"surviving_buckets,omitempty"` + TotalBuckets *int `json:"total_buckets,omitempty"` + ValuesFetched *int `json:"values_fetched,omitempty"` +} + +func (s *AggregationProfileDebug) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "brute_force_used": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "BruteForceUsed", err) + } + s.BruteForceUsed = &value + case float64: + f := int(v) + s.BruteForceUsed = &f + } + + case "built_buckets": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "BuiltBuckets", err) + } + s.BuiltBuckets = &value + case float64: + f := int(v) + s.BuiltBuckets = &f + } + + case "chars_fetched": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CharsFetched", err) + } + s.CharsFetched = &value + case float64: + f := int(v) + s.CharsFetched = &f + } + + case "collect_analyzed_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CollectAnalyzedCount", err) + } + s.CollectAnalyzedCount = &value + case float64: + f := int(v) + s.CollectAnalyzedCount = &f + } + + case "collect_analyzed_ns": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CollectAnalyzedNs", err) + } + s.CollectAnalyzedNs = &value + case float64: + f := int(v) + s.CollectAnalyzedNs = &f + } + + case "collection_strategy": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CollectionStrategy", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CollectionStrategy = &o + + case "deferred_aggregators": + if err := dec.Decode(&s.DeferredAggregators); err != nil { + return fmt.Errorf("%s | %w", "DeferredAggregators", err) + } + + case "delegate": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Delegate", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Delegate = &o + + case "delegate_debug": + if err := dec.Decode(&s.DelegateDebug); err != nil { + return fmt.Errorf("%s | %w", "DelegateDebug", err) + } + + case "dynamic_pruning_attempted": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DynamicPruningAttempted", err) + } + s.DynamicPruningAttempted = &value + case float64: + f := int(v) + s.DynamicPruningAttempted = &f + } + + case "dynamic_pruning_used": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DynamicPruningUsed", err) + } + s.DynamicPruningUsed = &value + case float64: + f := int(v) + s.DynamicPruningUsed = &f + } + + case "empty_collectors_used": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "EmptyCollectorsUsed", err) + } + s.EmptyCollectorsUsed = &value + case float64: + f := int(v) + s.EmptyCollectorsUsed = &f + } + + case "extract_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ExtractCount", err) + } + s.ExtractCount = &value + case float64: + f := int(v) + s.ExtractCount = &f + } + + case "extract_ns": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ExtractNs", err) + } + s.ExtractNs = &value + case float64: + f := int(v) + s.ExtractNs = &f + } + + case "filters": + if err := dec.Decode(&s.Filters); err != nil { + return fmt.Errorf("%s | %w", "Filters", err) + } + + case "has_filter": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "HasFilter", err) + } + s.HasFilter = &value + case bool: + s.HasFilter = &v + } + + case "map_reducer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MapReducer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MapReducer = &o + + case "numeric_collectors_used": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumericCollectorsUsed", err) + } + s.NumericCollectorsUsed = &value + case float64: + f := int(v) + s.NumericCollectorsUsed = &f + } + + case "ordinals_collectors_overhead_too_high": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "OrdinalsCollectorsOverheadTooHigh", err) + } + s.OrdinalsCollectorsOverheadTooHigh = &value + case float64: + f := int(v) + s.OrdinalsCollectorsOverheadTooHigh = &f + } + + case "ordinals_collectors_used": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "OrdinalsCollectorsUsed", err) + } + s.OrdinalsCollectorsUsed = &value + case float64: + f := int(v) + s.OrdinalsCollectorsUsed = &f + } + + case "result_strategy": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultStrategy", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultStrategy = &o + + case "segments_collected": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SegmentsCollected", err) + } + s.SegmentsCollected = &value + case float64: + f := int(v) + s.SegmentsCollected = &f + } + + case "segments_counted": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SegmentsCounted", err) + } + s.SegmentsCounted = &value + case float64: + f := int(v) + s.SegmentsCounted = &f + } + + case "segments_with_deleted_docs": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SegmentsWithDeletedDocs", err) + } + s.SegmentsWithDeletedDocs = &value + case float64: + f := int(v) + s.SegmentsWithDeletedDocs = &f + } + + case "segments_with_doc_count_field": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SegmentsWithDocCountField", err) + } + s.SegmentsWithDocCountField = &value + case float64: + f := int(v) + s.SegmentsWithDocCountField = &f + } + + case "segments_with_multi_valued_ords": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SegmentsWithMultiValuedOrds", err) + } + s.SegmentsWithMultiValuedOrds = &value + case float64: + f := int(v) + s.SegmentsWithMultiValuedOrds = &f + } + + case "segments_with_single_valued_ords": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SegmentsWithSingleValuedOrds", err) + } + s.SegmentsWithSingleValuedOrds = &value + case float64: + f := int(v) + s.SegmentsWithSingleValuedOrds = &f + } + + case "skipped_due_to_no_data": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SkippedDueToNoData", err) + } + s.SkippedDueToNoData = &value + case float64: + f := int(v) + s.SkippedDueToNoData = &f + } + + case "string_hashing_collectors_used": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "StringHashingCollectorsUsed", err) + } + s.StringHashingCollectorsUsed = &value + case float64: + f := int(v) + s.StringHashingCollectorsUsed = &f + } + + case "surviving_buckets": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SurvivingBuckets", err) + } + s.SurvivingBuckets = &value + case float64: + f := int(v) + s.SurvivingBuckets = &f + } + + case "total_buckets": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalBuckets", err) + } + s.TotalBuckets = &value + case float64: + f := int(v) + s.TotalBuckets = &f + } + + case "values_fetched": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ValuesFetched", err) + } + s.ValuesFetched = &value + case float64: + f := int(v) + s.ValuesFetched = &f + } + + } + } + return nil +} + +// NewAggregationProfileDebug returns a AggregationProfileDebug. +func NewAggregationProfileDebug() *AggregationProfileDebug { + r := &AggregationProfileDebug{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationprofiledelegatedebugfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationprofiledelegatedebugfilter.go new file mode 100644 index 000000000..796f122cf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationprofiledelegatedebugfilter.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AggregationProfileDelegateDebugFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L78-L83 +type AggregationProfileDelegateDebugFilter struct { + Query *string `json:"query,omitempty"` + ResultsFromMetadata *int `json:"results_from_metadata,omitempty"` + SegmentsCountedInConstantTime *int `json:"segments_counted_in_constant_time,omitempty"` + SpecializedFor *string `json:"specialized_for,omitempty"` +} + +func (s *AggregationProfileDelegateDebugFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = &o + + case "results_from_metadata": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ResultsFromMetadata", err) + } + s.ResultsFromMetadata = &value + case float64: + f := int(v) + s.ResultsFromMetadata = &f + } + + case "segments_counted_in_constant_time": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SegmentsCountedInConstantTime", err) + } + s.SegmentsCountedInConstantTime = &value + case float64: + f := int(v) + s.SegmentsCountedInConstantTime = &f + } + + case "specialized_for": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SpecializedFor", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SpecializedFor = &o + + } + } + return nil +} + +// NewAggregationProfileDelegateDebugFilter returns a AggregationProfileDelegateDebugFilter. +func NewAggregationProfileDelegateDebugFilter() *AggregationProfileDelegateDebugFilter { + r := &AggregationProfileDelegateDebugFilter{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationrange.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationrange.go new file mode 100644 index 000000000..4589a3583 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregationrange.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AggregationRange type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L691-L704 +type AggregationRange struct { + // From Start of the range (inclusive). + From *Float64 `json:"from,omitempty"` + // Key Custom key to return the range with. + Key *string `json:"key,omitempty"` + // To End of the range (exclusive). + To *Float64 `json:"to,omitempty"` +} + +func (s *AggregationRange) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from": + if err := dec.Decode(&s.From); err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = &o + + case "to": + if err := dec.Decode(&s.To); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } + + } + } + return nil +} + +// NewAggregationRange returns a AggregationRange. +func NewAggregationRange() *AggregationRange { + r := &AggregationRange{} + + return r +} + +type AggregationRangeVariant interface { + AggregationRangeCaster() *AggregationRange +} + +func (s *AggregationRange) AggregationRangeCaster() *AggregationRange { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregations.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregations.go new file mode 100644 index 000000000..a06d4da29 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aggregations.go @@ -0,0 +1,823 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Aggregations type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/AggregationContainer.ts#L107-L533 +type Aggregations struct { + AdditionalAggregationsProperty map[string]json.RawMessage `json:"-"` + // AdjacencyMatrix A bucket aggregation returning a form of adjacency matrix. + // The request provides a collection of named filter expressions, similar to the + // `filters` aggregation. + // Each bucket in the response represents a non-empty cell in the matrix of + // intersecting filters. + AdjacencyMatrix *AdjacencyMatrixAggregation `json:"adjacency_matrix,omitempty"` + // Aggregations Sub-aggregations for this aggregation. + // Only applies to bucket aggregations. + Aggregations map[string]Aggregations `json:"aggregations,omitempty"` + // AutoDateHistogram A multi-bucket aggregation similar to the date histogram, except instead of + // providing an interval to use as the width of each bucket, a target number of + // buckets is provided. + AutoDateHistogram *AutoDateHistogramAggregation `json:"auto_date_histogram,omitempty"` + // Avg A single-value metrics aggregation that computes the average of numeric + // values that are extracted from the aggregated documents. + Avg *AverageAggregation `json:"avg,omitempty"` + // AvgBucket A sibling pipeline aggregation which calculates the mean value of a specified + // metric in a sibling aggregation. + // The specified metric must be numeric and the sibling aggregation must be a + // multi-bucket aggregation. + AvgBucket *AverageBucketAggregation `json:"avg_bucket,omitempty"` + // Boxplot A metrics aggregation that computes a box plot of numeric values extracted + // from the aggregated documents. + Boxplot *BoxplotAggregation `json:"boxplot,omitempty"` + // BucketCorrelation A sibling pipeline aggregation which runs a correlation function on the + // configured sibling multi-bucket aggregation. + BucketCorrelation *BucketCorrelationAggregation `json:"bucket_correlation,omitempty"` + // BucketCountKsTest A sibling pipeline aggregation which runs a two sample Kolmogorov–Smirnov + // test ("K-S test") against a provided distribution and the distribution + // implied by the documents counts in the configured sibling aggregation. + BucketCountKsTest *BucketKsAggregation `json:"bucket_count_ks_test,omitempty"` + // BucketScript A parent pipeline aggregation which runs a script which can perform per + // bucket computations on metrics in the parent multi-bucket aggregation. + BucketScript *BucketScriptAggregation `json:"bucket_script,omitempty"` + // BucketSelector A parent pipeline aggregation which runs a script to determine whether the + // current bucket will be retained in the parent multi-bucket aggregation. + BucketSelector *BucketSelectorAggregation `json:"bucket_selector,omitempty"` + // BucketSort A parent pipeline aggregation which sorts the buckets of its parent + // multi-bucket aggregation. + BucketSort *BucketSortAggregation `json:"bucket_sort,omitempty"` + // Cardinality A single-value metrics aggregation that calculates an approximate count of + // distinct values. + Cardinality *CardinalityAggregation `json:"cardinality,omitempty"` + // CategorizeText A multi-bucket aggregation that groups semi-structured text into buckets. + CategorizeText *CategorizeTextAggregation `json:"categorize_text,omitempty"` + // Children A single bucket aggregation that selects child documents that have the + // specified type, as defined in a `join` field. + Children *ChildrenAggregation `json:"children,omitempty"` + // Composite A multi-bucket aggregation that creates composite buckets from different + // sources. + // Unlike the other multi-bucket aggregations, you can use the `composite` + // aggregation to paginate *all* buckets from a multi-level aggregation + // efficiently. + Composite *CompositeAggregation `json:"composite,omitempty"` + // CumulativeCardinality A parent pipeline aggregation which calculates the cumulative cardinality in + // a parent `histogram` or `date_histogram` aggregation. + CumulativeCardinality *CumulativeCardinalityAggregation `json:"cumulative_cardinality,omitempty"` + // CumulativeSum A parent pipeline aggregation which calculates the cumulative sum of a + // specified metric in a parent `histogram` or `date_histogram` aggregation. + CumulativeSum *CumulativeSumAggregation `json:"cumulative_sum,omitempty"` + // DateHistogram A multi-bucket values source based aggregation that can be applied on date + // values or date range values extracted from the documents. + // It dynamically builds fixed size (interval) buckets over the values. + DateHistogram *DateHistogramAggregation `json:"date_histogram,omitempty"` + // DateRange A multi-bucket value source based aggregation that enables the user to define + // a set of date ranges - each representing a bucket. + DateRange *DateRangeAggregation `json:"date_range,omitempty"` + // Derivative A parent pipeline aggregation which calculates the derivative of a specified + // metric in a parent `histogram` or `date_histogram` aggregation. + Derivative *DerivativeAggregation `json:"derivative,omitempty"` + // DiversifiedSampler A filtering aggregation used to limit any sub aggregations' processing to a + // sample of the top-scoring documents. + // Similar to the `sampler` aggregation, but adds the ability to limit the + // number of matches that share a common value. + DiversifiedSampler *DiversifiedSamplerAggregation `json:"diversified_sampler,omitempty"` + // ExtendedStats A multi-value metrics aggregation that computes stats over numeric values + // extracted from the aggregated documents. + ExtendedStats *ExtendedStatsAggregation `json:"extended_stats,omitempty"` + // ExtendedStatsBucket A sibling pipeline aggregation which calculates a variety of stats across all + // bucket of a specified metric in a sibling aggregation. + ExtendedStatsBucket *ExtendedStatsBucketAggregation `json:"extended_stats_bucket,omitempty"` + // Filter A single bucket aggregation that narrows the set of documents to those that + // match a query. + Filter *Query `json:"filter,omitempty"` + // Filters A multi-bucket aggregation where each bucket contains the documents that + // match a query. + Filters *FiltersAggregation `json:"filters,omitempty"` + // FrequentItemSets A bucket aggregation which finds frequent item sets, a form of association + // rules mining that identifies items that often occur together. + FrequentItemSets *FrequentItemSetsAggregation `json:"frequent_item_sets,omitempty"` + // GeoBounds A metric aggregation that computes the geographic bounding box containing all + // values for a Geopoint or Geoshape field. + GeoBounds *GeoBoundsAggregation `json:"geo_bounds,omitempty"` + // GeoCentroid A metric aggregation that computes the weighted centroid from all coordinate + // values for geo fields. + GeoCentroid *GeoCentroidAggregation `json:"geo_centroid,omitempty"` + // GeoDistance A multi-bucket aggregation that works on `geo_point` fields. + // Evaluates the distance of each document value from an origin point and + // determines the buckets it belongs to, based on ranges defined in the request. + GeoDistance *GeoDistanceAggregation `json:"geo_distance,omitempty"` + // GeoLine Aggregates all `geo_point` values within a bucket into a `LineString` ordered + // by the chosen sort field. + GeoLine *GeoLineAggregation `json:"geo_line,omitempty"` + // GeohashGrid A multi-bucket aggregation that groups `geo_point` and `geo_shape` values + // into buckets that represent a grid. + // Each cell is labeled using a geohash which is of user-definable precision. + GeohashGrid *GeoHashGridAggregation `json:"geohash_grid,omitempty"` + // GeohexGrid A multi-bucket aggregation that groups `geo_point` and `geo_shape` values + // into buckets that represent a grid. + // Each cell corresponds to a H3 cell index and is labeled using the H3Index + // representation. + GeohexGrid *GeohexGridAggregation `json:"geohex_grid,omitempty"` + // GeotileGrid A multi-bucket aggregation that groups `geo_point` and `geo_shape` values + // into buckets that represent a grid. + // Each cell corresponds to a map tile as used by many online map sites. + GeotileGrid *GeoTileGridAggregation `json:"geotile_grid,omitempty"` + // Global Defines a single bucket of all the documents within the search execution + // context. + // This context is defined by the indices and the document types you’re + // searching on, but is not influenced by the search query itself. + Global *GlobalAggregation `json:"global,omitempty"` + // Histogram A multi-bucket values source based aggregation that can be applied on numeric + // values or numeric range values extracted from the documents. + // It dynamically builds fixed size (interval) buckets over the values. + Histogram *HistogramAggregation `json:"histogram,omitempty"` + // Inference A parent pipeline aggregation which loads a pre-trained model and performs + // inference on the collated result fields from the parent bucket aggregation. + Inference *InferenceAggregation `json:"inference,omitempty"` + // IpPrefix A bucket aggregation that groups documents based on the network or + // sub-network of an IP address. + IpPrefix *IpPrefixAggregation `json:"ip_prefix,omitempty"` + // IpRange A multi-bucket value source based aggregation that enables the user to define + // a set of IP ranges - each representing a bucket. + IpRange *IpRangeAggregation `json:"ip_range,omitempty"` + Line *GeoLineAggregation `json:"line,omitempty"` + // MatrixStats A numeric aggregation that computes the following statistics over a set of + // document fields: `count`, `mean`, `variance`, `skewness`, `kurtosis`, + // `covariance`, and `covariance`. + MatrixStats *MatrixStatsAggregation `json:"matrix_stats,omitempty"` + // Max A single-value metrics aggregation that returns the maximum value among the + // numeric values extracted from the aggregated documents. + Max *MaxAggregation `json:"max,omitempty"` + // MaxBucket A sibling pipeline aggregation which identifies the bucket(s) with the + // maximum value of a specified metric in a sibling aggregation and outputs both + // the value and the key(s) of the bucket(s). + MaxBucket *MaxBucketAggregation `json:"max_bucket,omitempty"` + // MedianAbsoluteDeviation A single-value aggregation that approximates the median absolute deviation of + // its search results. + MedianAbsoluteDeviation *MedianAbsoluteDeviationAggregation `json:"median_absolute_deviation,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // Min A single-value metrics aggregation that returns the minimum value among + // numeric values extracted from the aggregated documents. + Min *MinAggregation `json:"min,omitempty"` + // MinBucket A sibling pipeline aggregation which identifies the bucket(s) with the + // minimum value of a specified metric in a sibling aggregation and outputs both + // the value and the key(s) of the bucket(s). + MinBucket *MinBucketAggregation `json:"min_bucket,omitempty"` + // Missing A field data based single bucket aggregation, that creates a bucket of all + // documents in the current document set context that are missing a field value + // (effectively, missing a field or having the configured NULL value set). + Missing *MissingAggregation `json:"missing,omitempty"` + MovingAvg MovingAverageAggregation `json:"moving_avg,omitempty"` + // MovingFn Given an ordered series of data, "slides" a window across the data and runs a + // custom script on each window of data. + // For convenience, a number of common functions are predefined such as `min`, + // `max`, and moving averages. + MovingFn *MovingFunctionAggregation `json:"moving_fn,omitempty"` + // MovingPercentiles Given an ordered series of percentiles, "slides" a window across those + // percentiles and computes cumulative percentiles. + MovingPercentiles *MovingPercentilesAggregation `json:"moving_percentiles,omitempty"` + // MultiTerms A multi-bucket value source based aggregation where buckets are dynamically + // built - one per unique set of values. + MultiTerms *MultiTermsAggregation `json:"multi_terms,omitempty"` + // Nested A special single bucket aggregation that enables aggregating nested + // documents. + Nested *NestedAggregation `json:"nested,omitempty"` + // Normalize A parent pipeline aggregation which calculates the specific + // normalized/rescaled value for a specific bucket value. + Normalize *NormalizeAggregation `json:"normalize,omitempty"` + // Parent A special single bucket aggregation that selects parent documents that have + // the specified type, as defined in a `join` field. + Parent *ParentAggregation `json:"parent,omitempty"` + // PercentileRanks A multi-value metrics aggregation that calculates one or more percentile + // ranks over numeric values extracted from the aggregated documents. + PercentileRanks *PercentileRanksAggregation `json:"percentile_ranks,omitempty"` + // Percentiles A multi-value metrics aggregation that calculates one or more percentiles + // over numeric values extracted from the aggregated documents. + Percentiles *PercentilesAggregation `json:"percentiles,omitempty"` + // PercentilesBucket A sibling pipeline aggregation which calculates percentiles across all bucket + // of a specified metric in a sibling aggregation. + PercentilesBucket *PercentilesBucketAggregation `json:"percentiles_bucket,omitempty"` + // RandomSampler A single bucket aggregation that randomly includes documents in the + // aggregated results. + // Sampling provides significant speed improvement at the cost of accuracy. + RandomSampler *RandomSamplerAggregation `json:"random_sampler,omitempty"` + // Range A multi-bucket value source based aggregation that enables the user to define + // a set of ranges - each representing a bucket. + Range *RangeAggregation `json:"range,omitempty"` + // RareTerms A multi-bucket value source based aggregation which finds "rare" terms — + // terms that are at the long-tail of the distribution and are not frequent. + RareTerms *RareTermsAggregation `json:"rare_terms,omitempty"` + // Rate Calculates a rate of documents or a field in each bucket. + // Can only be used inside a `date_histogram` or `composite` aggregation. + Rate *RateAggregation `json:"rate,omitempty"` + // ReverseNested A special single bucket aggregation that enables aggregating on parent + // documents from nested documents. + // Should only be defined inside a `nested` aggregation. + ReverseNested *ReverseNestedAggregation `json:"reverse_nested,omitempty"` + // Sampler A filtering aggregation used to limit any sub aggregations' processing to a + // sample of the top-scoring documents. + Sampler *SamplerAggregation `json:"sampler,omitempty"` + // ScriptedMetric A metric aggregation that uses scripts to provide a metric output. + ScriptedMetric *ScriptedMetricAggregation `json:"scripted_metric,omitempty"` + // SerialDiff An aggregation that subtracts values in a time series from themselves at + // different time lags or periods. + SerialDiff *SerialDifferencingAggregation `json:"serial_diff,omitempty"` + // SignificantTerms Returns interesting or unusual occurrences of terms in a set. + SignificantTerms *SignificantTermsAggregation `json:"significant_terms,omitempty"` + // SignificantText Returns interesting or unusual occurrences of free-text terms in a set. + SignificantText *SignificantTextAggregation `json:"significant_text,omitempty"` + // Stats A multi-value metrics aggregation that computes stats over numeric values + // extracted from the aggregated documents. + Stats *StatsAggregation `json:"stats,omitempty"` + // StatsBucket A sibling pipeline aggregation which calculates a variety of stats across all + // bucket of a specified metric in a sibling aggregation. + StatsBucket *StatsBucketAggregation `json:"stats_bucket,omitempty"` + // StringStats A multi-value metrics aggregation that computes statistics over string values + // extracted from the aggregated documents. + StringStats *StringStatsAggregation `json:"string_stats,omitempty"` + // Sum A single-value metrics aggregation that sums numeric values that are + // extracted from the aggregated documents. + Sum *SumAggregation `json:"sum,omitempty"` + // SumBucket A sibling pipeline aggregation which calculates the sum of a specified metric + // across all buckets in a sibling aggregation. + SumBucket *SumBucketAggregation `json:"sum_bucket,omitempty"` + // TTest A metrics aggregation that performs a statistical hypothesis test in which + // the test statistic follows a Student’s t-distribution under the null + // hypothesis on numeric values extracted from the aggregated documents. + TTest *TTestAggregation `json:"t_test,omitempty"` + // Terms A multi-bucket value source based aggregation where buckets are dynamically + // built - one per unique value. + Terms *TermsAggregation `json:"terms,omitempty"` + // TimeSeries The time series aggregation queries data created using a time series index. + // This is typically data such as metrics or other data streams with a time + // component, and requires creating an index using the time series mode. + TimeSeries *TimeSeriesAggregation `json:"time_series,omitempty"` + // TopHits A metric aggregation that returns the top matching documents per bucket. + TopHits *TopHitsAggregation `json:"top_hits,omitempty"` + // TopMetrics A metric aggregation that selects metrics from the document with the largest + // or smallest sort value. + TopMetrics *TopMetricsAggregation `json:"top_metrics,omitempty"` + // ValueCount A single-value metrics aggregation that counts the number of values that are + // extracted from the aggregated documents. + ValueCount *ValueCountAggregation `json:"value_count,omitempty"` + // VariableWidthHistogram A multi-bucket aggregation similar to the histogram, except instead of + // providing an interval to use as the width of each bucket, a target number of + // buckets is provided. + VariableWidthHistogram *VariableWidthHistogramAggregation `json:"variable_width_histogram,omitempty"` + // WeightedAvg A single-value metrics aggregation that computes the weighted average of + // numeric values that are extracted from the aggregated documents. + WeightedAvg *WeightedAverageAggregation `json:"weighted_avg,omitempty"` +} + +func (s *Aggregations) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adjacency_matrix": + if err := dec.Decode(&s.AdjacencyMatrix); err != nil { + return fmt.Errorf("%s | %w", "AdjacencyMatrix", err) + } + + case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "auto_date_histogram": + if err := dec.Decode(&s.AutoDateHistogram); err != nil { + return fmt.Errorf("%s | %w", "AutoDateHistogram", err) + } + + case "avg": + if err := dec.Decode(&s.Avg); err != nil { + return fmt.Errorf("%s | %w", "Avg", err) + } + + case "avg_bucket": + if err := dec.Decode(&s.AvgBucket); err != nil { + return fmt.Errorf("%s | %w", "AvgBucket", err) + } + + case "boxplot": + if err := dec.Decode(&s.Boxplot); err != nil { + return fmt.Errorf("%s | %w", "Boxplot", err) + } + + case "bucket_correlation": + if err := dec.Decode(&s.BucketCorrelation); err != nil { + return fmt.Errorf("%s | %w", "BucketCorrelation", err) + } + + case "bucket_count_ks_test": + if err := dec.Decode(&s.BucketCountKsTest); err != nil { + return fmt.Errorf("%s | %w", "BucketCountKsTest", err) + } + + case "bucket_script": + if err := dec.Decode(&s.BucketScript); err != nil { + return fmt.Errorf("%s | %w", "BucketScript", err) + } + + case "bucket_selector": + if err := dec.Decode(&s.BucketSelector); err != nil { + return fmt.Errorf("%s | %w", "BucketSelector", err) + } + + case "bucket_sort": + if err := dec.Decode(&s.BucketSort); err != nil { + return fmt.Errorf("%s | %w", "BucketSort", err) + } + + case "cardinality": + if err := dec.Decode(&s.Cardinality); err != nil { + return fmt.Errorf("%s | %w", "Cardinality", err) + } + + case "categorize_text": + if err := dec.Decode(&s.CategorizeText); err != nil { + return fmt.Errorf("%s | %w", "CategorizeText", err) + } + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return fmt.Errorf("%s | %w", "Children", err) + } + + case "composite": + if err := dec.Decode(&s.Composite); err != nil { + return fmt.Errorf("%s | %w", "Composite", err) + } + + case "cumulative_cardinality": + if err := dec.Decode(&s.CumulativeCardinality); err != nil { + return fmt.Errorf("%s | %w", "CumulativeCardinality", err) + } + + case "cumulative_sum": + if err := dec.Decode(&s.CumulativeSum); err != nil { + return fmt.Errorf("%s | %w", "CumulativeSum", err) + } + + case "date_histogram": + if err := dec.Decode(&s.DateHistogram); err != nil { + return fmt.Errorf("%s | %w", "DateHistogram", err) + } + + case "date_range": + if err := dec.Decode(&s.DateRange); err != nil { + return fmt.Errorf("%s | %w", "DateRange", err) + } + + case "derivative": + if err := dec.Decode(&s.Derivative); err != nil { + return fmt.Errorf("%s | %w", "Derivative", err) + } + + case "diversified_sampler": + if err := dec.Decode(&s.DiversifiedSampler); err != nil { + return fmt.Errorf("%s | %w", "DiversifiedSampler", err) + } + + case "extended_stats": + if err := dec.Decode(&s.ExtendedStats); err != nil { + return fmt.Errorf("%s | %w", "ExtendedStats", err) + } + + case "extended_stats_bucket": + if err := dec.Decode(&s.ExtendedStatsBucket); err != nil { + return fmt.Errorf("%s | %w", "ExtendedStatsBucket", err) + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "filters": + if err := dec.Decode(&s.Filters); err != nil { + return fmt.Errorf("%s | %w", "Filters", err) + } + + case "frequent_item_sets": + if err := dec.Decode(&s.FrequentItemSets); err != nil { + return fmt.Errorf("%s | %w", "FrequentItemSets", err) + } + + case "geo_bounds": + if err := dec.Decode(&s.GeoBounds); err != nil { + return fmt.Errorf("%s | %w", "GeoBounds", err) + } + + case "geo_centroid": + if err := dec.Decode(&s.GeoCentroid); err != nil { + return fmt.Errorf("%s | %w", "GeoCentroid", err) + } + + case "geo_distance": + if err := dec.Decode(&s.GeoDistance); err != nil { + return fmt.Errorf("%s | %w", "GeoDistance", err) + } + + case "geo_line": + if err := dec.Decode(&s.GeoLine); err != nil { + return fmt.Errorf("%s | %w", "GeoLine", err) + } + + case "geohash_grid": + if err := dec.Decode(&s.GeohashGrid); err != nil { + return fmt.Errorf("%s | %w", "GeohashGrid", err) + } + + case "geohex_grid": + if err := dec.Decode(&s.GeohexGrid); err != nil { + return fmt.Errorf("%s | %w", "GeohexGrid", err) + } + + case "geotile_grid": + if err := dec.Decode(&s.GeotileGrid); err != nil { + return fmt.Errorf("%s | %w", "GeotileGrid", err) + } + + case "global": + if err := dec.Decode(&s.Global); err != nil { + return fmt.Errorf("%s | %w", "Global", err) + } + + case "histogram": + if err := dec.Decode(&s.Histogram); err != nil { + return fmt.Errorf("%s | %w", "Histogram", err) + } + + case "inference": + if err := dec.Decode(&s.Inference); err != nil { + return fmt.Errorf("%s | %w", "Inference", err) + } + + case "ip_prefix": + if err := dec.Decode(&s.IpPrefix); err != nil { + return fmt.Errorf("%s | %w", "IpPrefix", err) + } + + case "ip_range": + if err := dec.Decode(&s.IpRange); err != nil { + return fmt.Errorf("%s | %w", "IpRange", err) + } + + case "line": + if err := dec.Decode(&s.Line); err != nil { + return fmt.Errorf("%s | %w", "Line", err) + } + + case "matrix_stats": + if err := dec.Decode(&s.MatrixStats); err != nil { + return fmt.Errorf("%s | %w", "MatrixStats", err) + } + + case "max": + if err := dec.Decode(&s.Max); err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + + case "max_bucket": + if err := dec.Decode(&s.MaxBucket); err != nil { + return fmt.Errorf("%s | %w", "MaxBucket", err) + } + + case "median_absolute_deviation": + if err := dec.Decode(&s.MedianAbsoluteDeviation); err != nil { + return fmt.Errorf("%s | %w", "MedianAbsoluteDeviation", err) + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "min": + if err := dec.Decode(&s.Min); err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + + case "min_bucket": + if err := dec.Decode(&s.MinBucket); err != nil { + return fmt.Errorf("%s | %w", "MinBucket", err) + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "moving_avg": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + kind := make(map[string]string, 0) + localDec := json.NewDecoder(source) + localDec.Decode(&kind) + source.Seek(0, io.SeekStart) + + switch kind["model"] { + + case "linear": + o := NewLinearMovingAverageAggregation() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "linear", err) + } + s.MovingAvg = *o + case "simple": + o := NewSimpleMovingAverageAggregation() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "simple", err) + } + s.MovingAvg = *o + case "ewma": + o := NewEwmaMovingAverageAggregation() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ewma", err) + } + s.MovingAvg = *o + case "holt": + o := NewHoltMovingAverageAggregation() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "holt", err) + } + s.MovingAvg = *o + case "holt_winters": + o := NewHoltWintersMovingAverageAggregation() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "holt_winters", err) + } + s.MovingAvg = *o + default: + if err := localDec.Decode(&s.MovingAvg); err != nil { + return fmt.Errorf("MovingAvg | %w", err) + } + } + + case "moving_fn": + if err := dec.Decode(&s.MovingFn); err != nil { + return fmt.Errorf("%s | %w", "MovingFn", err) + } + + case "moving_percentiles": + if err := dec.Decode(&s.MovingPercentiles); err != nil { + return fmt.Errorf("%s | %w", "MovingPercentiles", err) + } + + case "multi_terms": + if err := dec.Decode(&s.MultiTerms); err != nil { + return fmt.Errorf("%s | %w", "MultiTerms", err) + } + + case "nested": + if err := dec.Decode(&s.Nested); err != nil { + return fmt.Errorf("%s | %w", "Nested", err) + } + + case "normalize": + if err := dec.Decode(&s.Normalize); err != nil { + return fmt.Errorf("%s | %w", "Normalize", err) + } + + case "parent": + if err := dec.Decode(&s.Parent); err != nil { + return fmt.Errorf("%s | %w", "Parent", err) + } + + case "percentile_ranks": + if err := dec.Decode(&s.PercentileRanks); err != nil { + return fmt.Errorf("%s | %w", "PercentileRanks", err) + } + + case "percentiles": + if err := dec.Decode(&s.Percentiles); err != nil { + return fmt.Errorf("%s | %w", "Percentiles", err) + } + + case "percentiles_bucket": + if err := dec.Decode(&s.PercentilesBucket); err != nil { + return fmt.Errorf("%s | %w", "PercentilesBucket", err) + } + + case "random_sampler": + if err := dec.Decode(&s.RandomSampler); err != nil { + return fmt.Errorf("%s | %w", "RandomSampler", err) + } + + case "range": + if err := dec.Decode(&s.Range); err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + + case "rare_terms": + if err := dec.Decode(&s.RareTerms); err != nil { + return fmt.Errorf("%s | %w", "RareTerms", err) + } + + case "rate": + if err := dec.Decode(&s.Rate); err != nil { + return fmt.Errorf("%s | %w", "Rate", err) + } + + case "reverse_nested": + if err := dec.Decode(&s.ReverseNested); err != nil { + return fmt.Errorf("%s | %w", "ReverseNested", err) + } + + case "sampler": + if err := dec.Decode(&s.Sampler); err != nil { + return fmt.Errorf("%s | %w", "Sampler", err) + } + + case "scripted_metric": + if err := dec.Decode(&s.ScriptedMetric); err != nil { + return fmt.Errorf("%s | %w", "ScriptedMetric", err) + } + + case "serial_diff": + if err := dec.Decode(&s.SerialDiff); err != nil { + return fmt.Errorf("%s | %w", "SerialDiff", err) + } + + case "significant_terms": + if err := dec.Decode(&s.SignificantTerms); err != nil { + return fmt.Errorf("%s | %w", "SignificantTerms", err) + } + + case "significant_text": + if err := dec.Decode(&s.SignificantText); err != nil { + return fmt.Errorf("%s | %w", "SignificantText", err) + } + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return fmt.Errorf("%s | %w", "Stats", err) + } + + case "stats_bucket": + if err := dec.Decode(&s.StatsBucket); err != nil { + return fmt.Errorf("%s | %w", "StatsBucket", err) + } + + case "string_stats": + if err := dec.Decode(&s.StringStats); err != nil { + return fmt.Errorf("%s | %w", "StringStats", err) + } + + case "sum": + if err := dec.Decode(&s.Sum); err != nil { + return fmt.Errorf("%s | %w", "Sum", err) + } + + case "sum_bucket": + if err := dec.Decode(&s.SumBucket); err != nil { + return fmt.Errorf("%s | %w", "SumBucket", err) + } + + case "t_test": + if err := dec.Decode(&s.TTest); err != nil { + return fmt.Errorf("%s | %w", "TTest", err) + } + + case "terms": + if err := dec.Decode(&s.Terms); err != nil { + return fmt.Errorf("%s | %w", "Terms", err) + } + + case "time_series": + if err := dec.Decode(&s.TimeSeries); err != nil { + return fmt.Errorf("%s | %w", "TimeSeries", err) + } + + case "top_hits": + if err := dec.Decode(&s.TopHits); err != nil { + return fmt.Errorf("%s | %w", "TopHits", err) + } + + case "top_metrics": + if err := dec.Decode(&s.TopMetrics); err != nil { + return fmt.Errorf("%s | %w", "TopMetrics", err) + } + + case "value_count": + if err := dec.Decode(&s.ValueCount); err != nil { + return fmt.Errorf("%s | %w", "ValueCount", err) + } + + case "variable_width_histogram": + if err := dec.Decode(&s.VariableWidthHistogram); err != nil { + return fmt.Errorf("%s | %w", "VariableWidthHistogram", err) + } + + case "weighted_avg": + if err := dec.Decode(&s.WeightedAvg); err != nil { + return fmt.Errorf("%s | %w", "WeightedAvg", err) + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalAggregationsProperty == nil { + s.AdditionalAggregationsProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalAggregationsProperty", err) + } + s.AdditionalAggregationsProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s Aggregations) MarshalJSON() ([]byte, error) { + type opt Aggregations + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalAggregationsProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalAggregationsProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewAggregations returns a Aggregations. +func NewAggregations() *Aggregations { + r := &Aggregations{ + AdditionalAggregationsProperty: make(map[string]json.RawMessage), + Aggregations: make(map[string]Aggregations), + } + + return r +} + +type AggregationsVariant interface { + AggregationsCaster() *Aggregations +} + +func (s *Aggregations) AggregationsCaster() *Aggregations { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/alias.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/alias.go new file mode 100644 index 000000000..0fd0286cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/alias.go @@ -0,0 +1,134 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Alias type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/Alias.ts#L23-L53 +type Alias struct { + // Filter Query used to limit documents the alias can access. + Filter *Query `json:"filter,omitempty"` + // IndexRouting Value used to route indexing operations to a specific shard. + // If specified, this overwrites the `routing` value for indexing operations. + IndexRouting *string `json:"index_routing,omitempty"` + // IsHidden If `true`, the alias is hidden. + // All indices for the alias must have the same `is_hidden` value. + IsHidden *bool `json:"is_hidden,omitempty"` + // IsWriteIndex If `true`, the index is the write index for the alias. + IsWriteIndex *bool `json:"is_write_index,omitempty"` + // Routing Value used to route indexing and search operations to a specific shard. + Routing *string `json:"routing,omitempty"` + // SearchRouting Value used to route search operations to a specific shard. + // If specified, this overwrites the `routing` value for search operations. + SearchRouting *string `json:"search_routing,omitempty"` +} + +func (s *Alias) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "index_routing": + if err := dec.Decode(&s.IndexRouting); err != nil { + return fmt.Errorf("%s | %w", "IndexRouting", err) + } + + case "is_hidden": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsHidden", err) + } + s.IsHidden = &value + case bool: + s.IsHidden = &v + } + + case "is_write_index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsWriteIndex", err) + } + s.IsWriteIndex = &value + case bool: + s.IsWriteIndex = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "search_routing": + if err := dec.Decode(&s.SearchRouting); err != nil { + return fmt.Errorf("%s | %w", "SearchRouting", err) + } + + } + } + return nil +} + +// NewAlias returns a Alias. +func NewAlias() *Alias { + r := &Alias{} + + return r +} + +type AliasVariant interface { + AliasCaster() *Alias +} + +func (s *Alias) AliasCaster() *Alias { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aliasdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aliasdefinition.go new file mode 100644 index 000000000..79f6a1418 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aliasdefinition.go @@ -0,0 +1,155 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AliasDefinition type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/AliasDefinition.ts#L22-L54 +type AliasDefinition struct { + // Filter Query used to limit documents the alias can access. + Filter *Query `json:"filter,omitempty"` + // IndexRouting Value used to route indexing operations to a specific shard. + // If specified, this overwrites the `routing` value for indexing operations. + IndexRouting *string `json:"index_routing,omitempty"` + // IsHidden If `true`, the alias is hidden. + // All indices for the alias must have the same `is_hidden` value. + IsHidden *bool `json:"is_hidden,omitempty"` + // IsWriteIndex If `true`, the index is the write index for the alias. + IsWriteIndex *bool `json:"is_write_index,omitempty"` + // Routing Value used to route indexing and search operations to a specific shard. + Routing *string `json:"routing,omitempty"` + // SearchRouting Value used to route search operations to a specific shard. + // If specified, this overwrites the `routing` value for search operations. + SearchRouting *string `json:"search_routing,omitempty"` +} + +func (s *AliasDefinition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "index_routing": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexRouting", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexRouting = &o + + case "is_hidden": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsHidden", err) + } + s.IsHidden = &value + case bool: + s.IsHidden = &v + } + + case "is_write_index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsWriteIndex", err) + } + s.IsWriteIndex = &value + case bool: + s.IsWriteIndex = &v + } + + case "routing": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Routing = &o + + case "search_routing": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchRouting", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchRouting = &o + + } + } + return nil +} + +// NewAliasDefinition returns a AliasDefinition. +func NewAliasDefinition() *AliasDefinition { + r := &AliasDefinition{} + + return r +} + +type AliasDefinitionVariant interface { + AliasDefinitionCaster() *AliasDefinition +} + +func (s *AliasDefinition) AliasDefinitionCaster() *AliasDefinition { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aliasesrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aliasesrecord.go new file mode 100644 index 000000000..2649a0508 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/aliasesrecord.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AliasesRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/aliases/types.ts#L22-L53 +type AliasesRecord struct { + // Alias alias name + Alias *string `json:"alias,omitempty"` + // Filter filter + Filter *string `json:"filter,omitempty"` + // Index index alias points to + Index *string `json:"index,omitempty"` + // IsWriteIndex write index + IsWriteIndex *string `json:"is_write_index,omitempty"` + // RoutingIndex index routing + RoutingIndex *string `json:"routing.index,omitempty"` + // RoutingSearch search routing + RoutingSearch *string `json:"routing.search,omitempty"` +} + +func (s *AliasesRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alias", "a": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Alias", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Alias = &o + + case "filter", "f", "fi": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Filter = &o + + case "index", "i", "idx": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "is_write_index", "w", "isWriteIndex": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IsWriteIndex", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IsWriteIndex = &o + + case "routing.index", "ri", "routingIndex": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RoutingIndex", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RoutingIndex = &o + + case "routing.search", "rs", "routingSearch": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RoutingSearch", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RoutingSearch = &o + + } + } + return nil +} + +// NewAliasesRecord returns a AliasesRecord. +func NewAliasesRecord() *AliasesRecord { + r := &AliasesRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/alibabacloudservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/alibabacloudservicesettings.go new file mode 100644 index 000000000..85b72c75b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/alibabacloudservicesettings.go @@ -0,0 +1,159 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AlibabaCloudServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L292-L337 +type AlibabaCloudServiceSettings struct { + // ApiKey A valid API key for the AlibabaCloud AI Search API. + ApiKey string `json:"api_key"` + // Host The name of the host address used for the inference task. + // You can find the host address in the API keys section of the documentation. + Host string `json:"host"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // AlibabaCloud AI Search. + // By default, the `alibabacloud-ai-search` service sets the number of requests + // allowed per minute to `1000`. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` + // ServiceId The name of the model service to use for the inference task. + // The following service IDs are available for the `completion` task: + // + // * `ops-qwen-turbo` + // * `qwen-turbo` + // * `qwen-plus` + // * `qwen-max ÷ qwen-max-longcontext` + // + // The following service ID is available for the `rerank` task: + // + // * `ops-bge-reranker-larger` + // + // The following service ID is available for the `sparse_embedding` task: + // + // * `ops-text-sparse-embedding-001` + // + // The following service IDs are available for the `text_embedding` task: + // + // `ops-text-embedding-001` + // `ops-text-embedding-zh-001` + // `ops-text-embedding-en-001` + // `ops-text-embedding-002` + ServiceId string `json:"service_id"` + // Workspace The name of the workspace used for the inference task. + Workspace string `json:"workspace"` +} + +func (s *AlibabaCloudServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "host": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Host = o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + case "service_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ServiceId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServiceId = o + + case "workspace": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Workspace", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Workspace = o + + } + } + return nil +} + +// NewAlibabaCloudServiceSettings returns a AlibabaCloudServiceSettings. +func NewAlibabaCloudServiceSettings() *AlibabaCloudServiceSettings { + r := &AlibabaCloudServiceSettings{} + + return r +} + +type AlibabaCloudServiceSettingsVariant interface { + AlibabaCloudServiceSettingsCaster() *AlibabaCloudServiceSettings +} + +func (s *AlibabaCloudServiceSettings) AlibabaCloudServiceSettingsCaster() *AlibabaCloudServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/alibabacloudtasksettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/alibabacloudtasksettings.go new file mode 100644 index 000000000..b66cb4923 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/alibabacloudtasksettings.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AlibabaCloudTaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L339-L353 +type AlibabaCloudTaskSettings struct { + // InputType For a `sparse_embedding` or `text_embedding` task, specify the type of input + // passed to the model. + // Valid values are: + // + // * `ingest` for storing document embeddings in a vector database. + // * `search` for storing embeddings of search queries run against a vector + // database to find relevant documents. + InputType *string `json:"input_type,omitempty"` + // ReturnToken For a `sparse_embedding` task, it affects whether the token name will be + // returned in the response. + // It defaults to `false`, which means only the token ID will be returned in the + // response. + ReturnToken *bool `json:"return_token,omitempty"` +} + +func (s *AlibabaCloudTaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "InputType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InputType = &o + + case "return_token": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ReturnToken", err) + } + s.ReturnToken = &value + case bool: + s.ReturnToken = &v + } + + } + } + return nil +} + +// NewAlibabaCloudTaskSettings returns a AlibabaCloudTaskSettings. +func NewAlibabaCloudTaskSettings() *AlibabaCloudTaskSettings { + r := &AlibabaCloudTaskSettings{} + + return r +} + +type AlibabaCloudTaskSettingsVariant interface { + AlibabaCloudTaskSettingsCaster() *AlibabaCloudTaskSettings +} + +func (s *AlibabaCloudTaskSettings) AlibabaCloudTaskSettingsCaster() *AlibabaCloudTaskSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allfield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allfield.go new file mode 100644 index 000000000..578aac383 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allfield.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AllField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/meta-fields.ts#L29-L40 +type AllField struct { + Analyzer string `json:"analyzer"` + Enabled bool `json:"enabled"` + OmitNorms bool `json:"omit_norms"` + SearchAnalyzer string `json:"search_analyzer"` + Similarity string `json:"similarity"` + Store bool `json:"store"` + StoreTermVectorOffsets bool `json:"store_term_vector_offsets"` + StoreTermVectorPayloads bool `json:"store_term_vector_payloads"` + StoreTermVectorPositions bool `json:"store_term_vector_positions"` + StoreTermVectors bool `json:"store_term_vectors"` +} + +func (s *AllField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = o + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "omit_norms": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "OmitNorms", err) + } + s.OmitNorms = value + case bool: + s.OmitNorms = v + } + + case "search_analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchAnalyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchAnalyzer = o + + case "similarity": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Similarity", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = o + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = value + case bool: + s.Store = v + } + + case "store_term_vector_offsets": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "StoreTermVectorOffsets", err) + } + s.StoreTermVectorOffsets = value + case bool: + s.StoreTermVectorOffsets = v + } + + case "store_term_vector_payloads": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "StoreTermVectorPayloads", err) + } + s.StoreTermVectorPayloads = value + case bool: + s.StoreTermVectorPayloads = v + } + + case "store_term_vector_positions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "StoreTermVectorPositions", err) + } + s.StoreTermVectorPositions = value + case bool: + s.StoreTermVectorPositions = v + } + + case "store_term_vectors": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "StoreTermVectors", err) + } + s.StoreTermVectors = value + case bool: + s.StoreTermVectors = v + } + + } + } + return nil +} + +// NewAllField returns a AllField. +func NewAllField() *AllField { + r := &AllField{} + + return r +} + +type AllFieldVariant interface { + AllFieldCaster() *AllField +} + +func (s *AllField) AllFieldCaster() *AllField { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allocateaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allocateaction.go new file mode 100644 index 000000000..d59516c21 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allocateaction.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AllocateAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/_types/Phase.ts#L133-L139 +type AllocateAction struct { + Exclude map[string]string `json:"exclude,omitempty"` + Include map[string]string `json:"include,omitempty"` + NumberOfReplicas *int `json:"number_of_replicas,omitempty"` + Require map[string]string `json:"require,omitempty"` + TotalShardsPerNode *int `json:"total_shards_per_node,omitempty"` +} + +func (s *AllocateAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exclude": + if s.Exclude == nil { + s.Exclude = make(map[string]string, 0) + } + if err := dec.Decode(&s.Exclude); err != nil { + return fmt.Errorf("%s | %w", "Exclude", err) + } + + case "include": + if s.Include == nil { + s.Include = make(map[string]string, 0) + } + if err := dec.Decode(&s.Include); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + + case "number_of_replicas": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfReplicas", err) + } + s.NumberOfReplicas = &value + case float64: + f := int(v) + s.NumberOfReplicas = &f + } + + case "require": + if s.Require == nil { + s.Require = make(map[string]string, 0) + } + if err := dec.Decode(&s.Require); err != nil { + return fmt.Errorf("%s | %w", "Require", err) + } + + case "total_shards_per_node": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalShardsPerNode", err) + } + s.TotalShardsPerNode = &value + case float64: + f := int(v) + s.TotalShardsPerNode = &f + } + + } + } + return nil +} + +// NewAllocateAction returns a AllocateAction. +func NewAllocateAction() *AllocateAction { + r := &AllocateAction{ + Exclude: make(map[string]string), + Include: make(map[string]string), + Require: make(map[string]string), + } + + return r +} + +type AllocateActionVariant interface { + AllocateActionCaster() *AllocateAction +} + +func (s *AllocateAction) AllocateActionCaster() *AllocateAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allocationdecision.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allocationdecision.go new file mode 100644 index 000000000..70406b468 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allocationdecision.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/allocationexplaindecision" +) + +// AllocationDecision type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/allocation_explain/types.ts#L27-L31 +type AllocationDecision struct { + Decider string `json:"decider"` + Decision allocationexplaindecision.AllocationExplainDecision `json:"decision"` + Explanation string `json:"explanation"` +} + +func (s *AllocationDecision) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decider": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Decider", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Decider = o + + case "decision": + if err := dec.Decode(&s.Decision); err != nil { + return fmt.Errorf("%s | %w", "Decision", err) + } + + case "explanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Explanation", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Explanation = o + + } + } + return nil +} + +// NewAllocationDecision returns a AllocationDecision. +func NewAllocationDecision() *AllocationDecision { + r := &AllocationDecision{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allocationrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allocationrecord.go new file mode 100644 index 000000000..eba33c064 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allocationrecord.go @@ -0,0 +1,196 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AllocationRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/allocation/types.ts#L25-L99 +type AllocationRecord struct { + // DiskAvail Free disk space available to Elasticsearch. + // Elasticsearch retrieves this metric from the node’s operating system. + // Disk-based shard allocation uses this metric to assign shards to nodes based + // on available disk space. + DiskAvail ByteSize `json:"disk.avail,omitempty"` + // DiskIndices Disk space used by the node’s shards. Does not include disk space for the + // translog or unassigned shards. + // IMPORTANT: This metric double-counts disk space for hard-linked files, such + // as those created when shrinking, splitting, or cloning an index. + DiskIndices ByteSize `json:"disk.indices,omitempty"` + // DiskIndicesForecast Sum of shard size forecasts + DiskIndicesForecast ByteSize `json:"disk.indices.forecast,omitempty"` + // DiskPercent Total percentage of disk space in use. Calculated as `disk.used / + // disk.total`. + DiskPercent Percentage `json:"disk.percent,omitempty"` + // DiskTotal Total disk space for the node, including in-use and available space. + DiskTotal ByteSize `json:"disk.total,omitempty"` + // DiskUsed Total disk space in use. + // Elasticsearch retrieves this metric from the node’s operating system (OS). + // The metric includes disk space for: Elasticsearch, including the translog and + // unassigned shards; the node’s operating system; any other applications or + // files on the node. + // Unlike `disk.indices`, this metric does not double-count disk space for + // hard-linked files. + DiskUsed ByteSize `json:"disk.used,omitempty"` + // Host Network host for the node. Set using the `network.host` setting. + Host *string `json:"host,omitempty"` + // Ip IP address and port for the node. + Ip *string `json:"ip,omitempty"` + // Node Name for the node. Set using the `node.name` setting. + Node *string `json:"node,omitempty"` + // NodeRole Node roles + NodeRole *string `json:"node.role,omitempty"` + // Shards Number of primary and replica shards assigned to the node. + Shards *string `json:"shards,omitempty"` + // ShardsUndesired Amount of shards that are scheduled to be moved elsewhere in the cluster or + // -1 other than desired balance allocator is used + ShardsUndesired *string `json:"shards.undesired,omitempty"` + // WriteLoadForecast Sum of index write load forecasts + WriteLoadForecast Stringifieddouble `json:"write_load.forecast,omitempty"` +} + +func (s *AllocationRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "disk.avail", "da", "diskAvail": + if err := dec.Decode(&s.DiskAvail); err != nil { + return fmt.Errorf("%s | %w", "DiskAvail", err) + } + + case "disk.indices", "di", "diskIndices": + if err := dec.Decode(&s.DiskIndices); err != nil { + return fmt.Errorf("%s | %w", "DiskIndices", err) + } + + case "disk.indices.forecast", "dif", "diskIndicesForecast": + if err := dec.Decode(&s.DiskIndicesForecast); err != nil { + return fmt.Errorf("%s | %w", "DiskIndicesForecast", err) + } + + case "disk.percent", "dp", "diskPercent": + if err := dec.Decode(&s.DiskPercent); err != nil { + return fmt.Errorf("%s | %w", "DiskPercent", err) + } + + case "disk.total", "dt", "diskTotal": + if err := dec.Decode(&s.DiskTotal); err != nil { + return fmt.Errorf("%s | %w", "DiskTotal", err) + } + + case "disk.used", "du", "diskUsed": + if err := dec.Decode(&s.DiskUsed); err != nil { + return fmt.Errorf("%s | %w", "DiskUsed", err) + } + + case "host", "h": + if err := dec.Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + case "ip": + if err := dec.Decode(&s.Ip); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + + case "node", "n": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = &o + + case "node.role", "r", "role", "nodeRole": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NodeRole", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeRole = &o + + case "shards", "s": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Shards", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Shards = &o + + case "shards.undesired": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ShardsUndesired", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ShardsUndesired = &o + + case "write_load.forecast", "wlf", "writeLoadForecast": + if err := dec.Decode(&s.WriteLoadForecast); err != nil { + return fmt.Errorf("%s | %w", "WriteLoadForecast", err) + } + + } + } + return nil +} + +// NewAllocationRecord returns a AllocationRecord. +func NewAllocationRecord() *AllocationRecord { + r := &AllocationRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allocationstore.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allocationstore.go new file mode 100644 index 000000000..0470f80c4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/allocationstore.go @@ -0,0 +1,150 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AllocationStore type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/allocation_explain/types.ts#L40-L47 +type AllocationStore struct { + AllocationId string `json:"allocation_id"` + Found bool `json:"found"` + InSync bool `json:"in_sync"` + MatchingSizeInBytes int64 `json:"matching_size_in_bytes"` + MatchingSyncId bool `json:"matching_sync_id"` + StoreException string `json:"store_exception"` +} + +func (s *AllocationStore) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocation_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AllocationId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AllocationId = o + + case "found": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Found", err) + } + s.Found = value + case bool: + s.Found = v + } + + case "in_sync": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "InSync", err) + } + s.InSync = value + case bool: + s.InSync = v + } + + case "matching_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MatchingSizeInBytes", err) + } + s.MatchingSizeInBytes = value + case float64: + f := int64(v) + s.MatchingSizeInBytes = f + } + + case "matching_sync_id": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MatchingSyncId", err) + } + s.MatchingSyncId = value + case bool: + s.MatchingSyncId = v + } + + case "store_exception": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StoreException", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StoreException = o + + } + } + return nil +} + +// NewAllocationStore returns a AllocationStore. +func NewAllocationStore() *AllocationStore { + r := &AllocationStore{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/alwayscondition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/alwayscondition.go new file mode 100644 index 000000000..f1c797017 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/alwayscondition.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// AlwaysCondition type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Conditions.ts#L26-L26 +type AlwaysCondition struct { +} + +// NewAlwaysCondition returns a AlwaysCondition. +func NewAlwaysCondition() *AlwaysCondition { + r := &AlwaysCondition{} + + return r +} + +type AlwaysConditionVariant interface { + AlwaysConditionCaster() *AlwaysCondition +} + +func (s *AlwaysCondition) AlwaysConditionCaster() *AlwaysCondition { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/amazonbedrockservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/amazonbedrockservicesettings.go new file mode 100644 index 000000000..5c83b9de9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/amazonbedrockservicesettings.go @@ -0,0 +1,168 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AmazonBedrockServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L366-L408 +type AmazonBedrockServiceSettings struct { + // AccessKey A valid AWS access key that has permissions to use Amazon Bedrock and access + // to models for inference requests. + AccessKey string `json:"access_key"` + // Model The base model ID or an ARN to a custom model based on a foundational model. + // The base model IDs can be found in the Amazon Bedrock documentation. + // Note that the model ID must be available for the provider chosen and your IAM + // user must have access to the model. + Model string `json:"model"` + // Provider The model provider for your deployment. + // Note that some providers may support only certain task types. + // Supported providers include: + // + // * `amazontitan` - available for `text_embedding` and `completion` task types + // * `anthropic` - available for `completion` task type only + // * `ai21labs` - available for `completion` task type only + // * `cohere` - available for `text_embedding` and `completion` task types + // * `meta` - available for `completion` task type only + // * `mistral` - available for `completion` task type only + Provider *string `json:"provider,omitempty"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // Watsonx. + // By default, the `watsonxai` service sets the number of requests allowed per + // minute to 120. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` + // Region The region that your model or ARN is deployed in. + // The list of available regions per model can be found in the Amazon Bedrock + // documentation. + Region string `json:"region"` + // SecretKey A valid AWS secret key that is paired with the `access_key`. + // For informationg about creating and managing access and secret keys, refer to + // the AWS documentation. + SecretKey string `json:"secret_key"` +} + +func (s *AmazonBedrockServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "access_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AccessKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AccessKey = o + + case "model": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Model", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Model = o + + case "provider": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Provider", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Provider = &o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + case "region": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Region", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Region = o + + case "secret_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SecretKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SecretKey = o + + } + } + return nil +} + +// NewAmazonBedrockServiceSettings returns a AmazonBedrockServiceSettings. +func NewAmazonBedrockServiceSettings() *AmazonBedrockServiceSettings { + r := &AmazonBedrockServiceSettings{} + + return r +} + +type AmazonBedrockServiceSettingsVariant interface { + AmazonBedrockServiceSettingsCaster() *AmazonBedrockServiceSettings +} + +func (s *AmazonBedrockServiceSettings) AmazonBedrockServiceSettingsCaster() *AmazonBedrockServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/amazonbedrocktasksettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/amazonbedrocktasksettings.go new file mode 100644 index 000000000..53f76ae29 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/amazonbedrocktasksettings.go @@ -0,0 +1,157 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AmazonBedrockTaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L410-L434 +type AmazonBedrockTaskSettings struct { + // MaxNewTokens For a `completion` task, it sets the maximum number for the output tokens to + // be generated. + MaxNewTokens *int `json:"max_new_tokens,omitempty"` + // Temperature For a `completion` task, it is a number between 0.0 and 1.0 that controls the + // apparent creativity of the results. + // At temperature 0.0 the model is most deterministic, at temperature 1.0 most + // random. + // It should not be used if `top_p` or `top_k` is specified. + Temperature *float32 `json:"temperature,omitempty"` + // TopK For a `completion` task, it limits samples to the top-K most likely words, + // balancing coherence and variability. + // It is only available for anthropic, cohere, and mistral providers. + // It is an alternative to `temperature`; it should not be used if `temperature` + // is specified. + TopK *float32 `json:"top_k,omitempty"` + // TopP For a `completion` task, it is a number in the range of 0.0 to 1.0, to + // eliminate low-probability tokens. + // Top-p uses nucleus sampling to select top tokens whose sum of likelihoods + // does not exceed a certain value, ensuring both variety and coherence. + // It is an alternative to `temperature`; it should not be used if `temperature` + // is specified. + TopP *float32 `json:"top_p,omitempty"` +} + +func (s *AmazonBedrockTaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_new_tokens": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNewTokens", err) + } + s.MaxNewTokens = &value + case float64: + f := int(v) + s.MaxNewTokens = &f + } + + case "temperature": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Temperature", err) + } + f := float32(value) + s.Temperature = &f + case float64: + f := float32(v) + s.Temperature = &f + } + + case "top_k": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "TopK", err) + } + f := float32(value) + s.TopK = &f + case float64: + f := float32(v) + s.TopK = &f + } + + case "top_p": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "TopP", err) + } + f := float32(value) + s.TopP = &f + case float64: + f := float32(v) + s.TopP = &f + } + + } + } + return nil +} + +// NewAmazonBedrockTaskSettings returns a AmazonBedrockTaskSettings. +func NewAmazonBedrockTaskSettings() *AmazonBedrockTaskSettings { + r := &AmazonBedrockTaskSettings{} + + return r +} + +type AmazonBedrockTaskSettingsVariant interface { + AmazonBedrockTaskSettingsCaster() *AmazonBedrockTaskSettings +} + +func (s *AmazonBedrockTaskSettings) AmazonBedrockTaskSettingsCaster() *AmazonBedrockTaskSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/amazonsagemakerservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/amazonsagemakerservicesettings.go new file mode 100644 index 000000000..93ec8cf6b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/amazonsagemakerservicesettings.go @@ -0,0 +1,228 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonsagemakerapi" +) + +// AmazonSageMakerServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L445-L499 +type AmazonSageMakerServiceSettings struct { + // AccessKey A valid AWS access key that has permissions to use Amazon SageMaker and + // access to models for invoking requests. + AccessKey string `json:"access_key"` + // Api The API format to use when calling SageMaker. + // Elasticsearch will convert the POST _inference request to this data format + // when invoking the SageMaker endpoint. + Api amazonsagemakerapi.AmazonSageMakerApi `json:"api"` + // BatchSize The maximum number of inputs in each batch. This value is used by inference + // ingestion pipelines + // when processing semantic values. It correlates to the number of times the + // SageMaker endpoint is + // invoked (one per batch of input). + BatchSize *int `json:"batch_size,omitempty"` + // Dimensions The number of dimensions returned by the text embedding models. If this value + // is not provided, then + // it is guessed by making invoking the endpoint for the `text_embedding` task. + Dimensions *int `json:"dimensions,omitempty"` + // EndpointName The name of the SageMaker endpoint. + EndpointName string `json:"endpoint_name"` + // InferenceComponentName The inference component to directly invoke when calling a multi-component + // endpoint. + InferenceComponentName *string `json:"inference_component_name,omitempty"` + // Region The region that your endpoint or Amazon Resource Name (ARN) is deployed in. + // The list of available regions per model can be found in the Amazon SageMaker + // documentation. + Region string `json:"region"` + // SecretKey A valid AWS secret key that is paired with the `access_key`. + // For information about creating and managing access and secret keys, refer to + // the AWS documentation. + SecretKey string `json:"secret_key"` + // TargetContainerHostname The container to directly invoke when calling a multi-container endpoint. + TargetContainerHostname *string `json:"target_container_hostname,omitempty"` + // TargetModel The model ID when calling a multi-model endpoint. + TargetModel *string `json:"target_model,omitempty"` +} + +func (s *AmazonSageMakerServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "access_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AccessKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AccessKey = o + + case "api": + if err := dec.Decode(&s.Api); err != nil { + return fmt.Errorf("%s | %w", "Api", err) + } + + case "batch_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "BatchSize", err) + } + s.BatchSize = &value + case float64: + f := int(v) + s.BatchSize = &f + } + + case "dimensions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Dimensions", err) + } + s.Dimensions = &value + case float64: + f := int(v) + s.Dimensions = &f + } + + case "endpoint_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "EndpointName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.EndpointName = o + + case "inference_component_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "InferenceComponentName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InferenceComponentName = &o + + case "region": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Region", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Region = o + + case "secret_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SecretKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SecretKey = o + + case "target_container_hostname": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TargetContainerHostname", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TargetContainerHostname = &o + + case "target_model": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TargetModel", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TargetModel = &o + + } + } + return nil +} + +// NewAmazonSageMakerServiceSettings returns a AmazonSageMakerServiceSettings. +func NewAmazonSageMakerServiceSettings() *AmazonSageMakerServiceSettings { + r := &AmazonSageMakerServiceSettings{} + + return r +} + +type AmazonSageMakerServiceSettingsVariant interface { + AmazonSageMakerServiceSettingsCaster() *AmazonSageMakerServiceSettings +} + +func (s *AmazonSageMakerServiceSettings) AmazonSageMakerServiceSettingsCaster() *AmazonSageMakerServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/amazonsagemakertasksettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/amazonsagemakertasksettings.go new file mode 100644 index 000000000..34fbcf471 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/amazonsagemakertasksettings.go @@ -0,0 +1,149 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AmazonSageMakerTaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L535-L564 +type AmazonSageMakerTaskSettings struct { + // CustomAttributes The AWS custom attributes passed verbatim through to the model running in the + // SageMaker Endpoint. + // Values will be returned in the `X-elastic-sagemaker-custom-attributes` + // header. + CustomAttributes *string `json:"custom_attributes,omitempty"` + // EnableExplanations The optional JMESPath expression used to override the EnableExplanations + // provided during endpoint creation. + EnableExplanations *string `json:"enable_explanations,omitempty"` + // InferenceId The capture data ID when enabled in the endpoint. + InferenceId *string `json:"inference_id,omitempty"` + // SessionId The stateful session identifier for a new or existing session. + // New sessions will be returned in the `X-elastic-sagemaker-new-session-id` + // header. + // Closed sessions will be returned in the + // `X-elastic-sagemaker-closed-session-id` header. + SessionId *string `json:"session_id,omitempty"` + // TargetVariant Specifies the variant when running with multi-variant Endpoints. + TargetVariant *string `json:"target_variant,omitempty"` +} + +func (s *AmazonSageMakerTaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "custom_attributes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CustomAttributes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CustomAttributes = &o + + case "enable_explanations": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "EnableExplanations", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.EnableExplanations = &o + + case "inference_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "InferenceId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InferenceId = &o + + case "session_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SessionId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SessionId = &o + + case "target_variant": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TargetVariant", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TargetVariant = &o + + } + } + return nil +} + +// NewAmazonSageMakerTaskSettings returns a AmazonSageMakerTaskSettings. +func NewAmazonSageMakerTaskSettings() *AmazonSageMakerTaskSettings { + r := &AmazonSageMakerTaskSettings{} + + return r +} + +type AmazonSageMakerTaskSettingsVariant interface { + AmazonSageMakerTaskSettingsCaster() *AmazonSageMakerTaskSettings +} + +func (s *AmazonSageMakerTaskSettings) AmazonSageMakerTaskSettingsCaster() *AmazonSageMakerTaskSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analysisconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analysisconfig.go new file mode 100644 index 000000000..5e42a1317 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analysisconfig.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AnalysisConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Analysis.ts#L29-L77 +type AnalysisConfig struct { + // BucketSpan The size of the interval that the analysis is aggregated into, typically + // between `5m` and `1h`. This value should be either a whole number of days or + // equate to a + // whole number of buckets in one day. If the anomaly detection job uses a + // datafeed with aggregations, this value must also be divisible by the interval + // of the date histogram aggregation. + BucketSpan Duration `json:"bucket_span,omitempty"` + // CategorizationAnalyzer If `categorization_field_name` is specified, you can also define the analyzer + // that is used to interpret the categorization field. This property cannot be + // used at the same time as `categorization_filters`. The categorization + // analyzer specifies how the `categorization_field` is interpreted by the + // categorization process. The `categorization_analyzer` field can be specified + // either as a string or as an object. If it is a string, it must refer to a + // built-in analyzer or one added by another plugin. + CategorizationAnalyzer CategorizationAnalyzer `json:"categorization_analyzer,omitempty"` + // CategorizationFieldName If this property is specified, the values of the specified field will be + // categorized. The resulting categories must be used in a detector by setting + // `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword + // `mlcategory`. + CategorizationFieldName *string `json:"categorization_field_name,omitempty"` + // CategorizationFilters If `categorization_field_name` is specified, you can also define optional + // filters. This property expects an array of regular expressions. The + // expressions are used to filter out matching sequences from the categorization + // field values. You can use this functionality to fine tune the categorization + // by excluding sequences from consideration when categories are defined. For + // example, you can exclude SQL statements that appear in your log files. This + // property cannot be used at the same time as `categorization_analyzer`. If you + // only want to define simple regular expression filters that are applied prior + // to tokenization, setting this property is the easiest method. If you also + // want to customize the tokenizer or post-tokenization filtering, use the + // `categorization_analyzer` property instead and include the filters as + // pattern_replace character filters. The effect is exactly the same. + CategorizationFilters []string `json:"categorization_filters,omitempty"` + // Detectors Detector configuration objects specify which data fields a job analyzes. They + // also specify which analytical functions are used. You can specify multiple + // detectors for a job. If the detectors array does not contain at least one + // detector, no analysis can occur and an error is returned. + Detectors []Detector `json:"detectors"` + // Influencers A comma separated list of influencer field names. Typically these can be the + // by, over, or partition fields that are used in the detector configuration. + // You might also want to use a field name that is not specifically named in a + // detector, but is available as part of the input data. When you use multiple + // detectors, the use of influencers is recommended as it aggregates results for + // each influencer entity. + Influencers []string `json:"influencers,omitempty"` + // Latency The size of the window in which to expect data that is out of time order. If + // you specify a non-zero value, it must be greater than or equal to one second. + // NOTE: Latency is applicable only when you send data by using the post data + // API. + Latency Duration `json:"latency,omitempty"` + // ModelPruneWindow Advanced configuration option. Affects the pruning of models that have not + // been updated for the given time duration. The value must be set to a multiple + // of the `bucket_span`. If set too low, important information may be removed + // from the model. For jobs created in 8.1 and later, the default value is the + // greater of `30d` or 20 times `bucket_span`. + ModelPruneWindow Duration `json:"model_prune_window,omitempty"` + // MultivariateByFields This functionality is reserved for internal use. It is not supported for use + // in customer environments and is not subject to the support SLA of official GA + // features. If set to `true`, the analysis will automatically find correlations + // between metrics for a given by field value and report anomalies when those + // correlations cease to hold. For example, suppose CPU and memory usage on host + // A is usually highly correlated with the same metrics on host B. Perhaps this + // correlation occurs because they are running a load-balanced application. If + // you enable this property, anomalies will be reported when, for example, CPU + // usage on host A is high and the value of CPU usage on host B is low. That is + // to say, you’ll see an anomaly when the CPU of host A is unusual given the CPU + // of host B. To use the `multivariate_by_fields` property, you must also + // specify `by_field_name` in your detector. + MultivariateByFields *bool `json:"multivariate_by_fields,omitempty"` + // PerPartitionCategorization Settings related to how categorization interacts with partition fields. + PerPartitionCategorization *PerPartitionCategorization `json:"per_partition_categorization,omitempty"` + // SummaryCountFieldName If this property is specified, the data that is fed to the job is expected to + // be pre-summarized. This property value is the name of the field that contains + // the count of raw data points that have been summarized. The same + // `summary_count_field_name` applies to all detectors in the job. NOTE: The + // `summary_count_field_name` property cannot be used with the `metric` + // function. + SummaryCountFieldName *string `json:"summary_count_field_name,omitempty"` +} + +func (s *AnalysisConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bucket_span": + if err := dec.Decode(&s.BucketSpan); err != nil { + return fmt.Errorf("%s | %w", "BucketSpan", err) + } + + case "categorization_analyzer": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + categorizationanalyzer_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + + switch t { + + case "char_filter", "filter", "tokenizer": + o := NewCategorizationAnalyzerDefinition() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + s.CategorizationAnalyzer = o + break categorizationanalyzer_field + + } + } + if s.CategorizationAnalyzer == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.CategorizationAnalyzer); err != nil { + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + } + + case "categorization_field_name": + if err := dec.Decode(&s.CategorizationFieldName); err != nil { + return fmt.Errorf("%s | %w", "CategorizationFieldName", err) + } + + case "categorization_filters": + if err := dec.Decode(&s.CategorizationFilters); err != nil { + return fmt.Errorf("%s | %w", "CategorizationFilters", err) + } + + case "detectors": + if err := dec.Decode(&s.Detectors); err != nil { + return fmt.Errorf("%s | %w", "Detectors", err) + } + + case "influencers": + if err := dec.Decode(&s.Influencers); err != nil { + return fmt.Errorf("%s | %w", "Influencers", err) + } + + case "latency": + if err := dec.Decode(&s.Latency); err != nil { + return fmt.Errorf("%s | %w", "Latency", err) + } + + case "model_prune_window": + if err := dec.Decode(&s.ModelPruneWindow); err != nil { + return fmt.Errorf("%s | %w", "ModelPruneWindow", err) + } + + case "multivariate_by_fields": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MultivariateByFields", err) + } + s.MultivariateByFields = &value + case bool: + s.MultivariateByFields = &v + } + + case "per_partition_categorization": + if err := dec.Decode(&s.PerPartitionCategorization); err != nil { + return fmt.Errorf("%s | %w", "PerPartitionCategorization", err) + } + + case "summary_count_field_name": + if err := dec.Decode(&s.SummaryCountFieldName); err != nil { + return fmt.Errorf("%s | %w", "SummaryCountFieldName", err) + } + + } + } + return nil +} + +// NewAnalysisConfig returns a AnalysisConfig. +func NewAnalysisConfig() *AnalysisConfig { + r := &AnalysisConfig{} + + return r +} + +type AnalysisConfigVariant interface { + AnalysisConfigCaster() *AnalysisConfig +} + +func (s *AnalysisConfig) AnalysisConfigCaster() *AnalysisConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analysisconfigread.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analysisconfigread.go new file mode 100644 index 000000000..338781044 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analysisconfigread.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AnalysisConfigRead type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Analysis.ts#L79-L148 +type AnalysisConfigRead struct { + // BucketSpan The size of the interval that the analysis is aggregated into, typically + // between `5m` and `1h`. + BucketSpan Duration `json:"bucket_span"` + // CategorizationAnalyzer If `categorization_field_name` is specified, you can also define the analyzer + // that is used to interpret the categorization field. + // This property cannot be used at the same time as `categorization_filters`. + // The categorization analyzer specifies how the `categorization_field` is + // interpreted by the categorization process. + CategorizationAnalyzer CategorizationAnalyzer `json:"categorization_analyzer,omitempty"` + // CategorizationFieldName If this property is specified, the values of the specified field will be + // categorized. + // The resulting categories must be used in a detector by setting + // `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword + // `mlcategory`. + CategorizationFieldName *string `json:"categorization_field_name,omitempty"` + // CategorizationFilters If `categorization_field_name` is specified, you can also define optional + // filters. + // This property expects an array of regular expressions. + // The expressions are used to filter out matching sequences from the + // categorization field values. + CategorizationFilters []string `json:"categorization_filters,omitempty"` + // Detectors An array of detector configuration objects. + // Detector configuration objects specify which data fields a job analyzes. + // They also specify which analytical functions are used. + // You can specify multiple detectors for a job. + Detectors []DetectorRead `json:"detectors"` + // Influencers A comma separated list of influencer field names. + // Typically these can be the by, over, or partition fields that are used in the + // detector configuration. + // You might also want to use a field name that is not specifically named in a + // detector, but is available as part of the input data. + // When you use multiple detectors, the use of influencers is recommended as it + // aggregates results for each influencer entity. + Influencers []string `json:"influencers"` + // Latency The size of the window in which to expect data that is out of time order. + // Defaults to no latency. + // If you specify a non-zero value, it must be greater than or equal to one + // second. + Latency Duration `json:"latency,omitempty"` + // ModelPruneWindow Advanced configuration option. + // Affects the pruning of models that have not been updated for the given time + // duration. + // The value must be set to a multiple of the `bucket_span`. + // If set too low, important information may be removed from the model. + // Typically, set to `30d` or longer. + // If not set, model pruning only occurs if the model memory status reaches the + // soft limit or the hard limit. + // For jobs created in 8.1 and later, the default value is the greater of `30d` + // or 20 times `bucket_span`. + ModelPruneWindow Duration `json:"model_prune_window,omitempty"` + // MultivariateByFields This functionality is reserved for internal use. + // It is not supported for use in customer environments and is not subject to + // the support SLA of official GA features. + // If set to `true`, the analysis will automatically find correlations between + // metrics for a given by field value and report anomalies when those + // correlations cease to hold. + MultivariateByFields *bool `json:"multivariate_by_fields,omitempty"` + // PerPartitionCategorization Settings related to how categorization interacts with partition fields. + PerPartitionCategorization *PerPartitionCategorization `json:"per_partition_categorization,omitempty"` + // SummaryCountFieldName If this property is specified, the data that is fed to the job is expected to + // be pre-summarized. + // This property value is the name of the field that contains the count of raw + // data points that have been summarized. + // The same `summary_count_field_name` applies to all detectors in the job. + SummaryCountFieldName *string `json:"summary_count_field_name,omitempty"` +} + +func (s *AnalysisConfigRead) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bucket_span": + if err := dec.Decode(&s.BucketSpan); err != nil { + return fmt.Errorf("%s | %w", "BucketSpan", err) + } + + case "categorization_analyzer": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + categorizationanalyzer_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + + switch t { + + case "char_filter", "filter", "tokenizer": + o := NewCategorizationAnalyzerDefinition() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + s.CategorizationAnalyzer = o + break categorizationanalyzer_field + + } + } + if s.CategorizationAnalyzer == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.CategorizationAnalyzer); err != nil { + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + } + + case "categorization_field_name": + if err := dec.Decode(&s.CategorizationFieldName); err != nil { + return fmt.Errorf("%s | %w", "CategorizationFieldName", err) + } + + case "categorization_filters": + if err := dec.Decode(&s.CategorizationFilters); err != nil { + return fmt.Errorf("%s | %w", "CategorizationFilters", err) + } + + case "detectors": + if err := dec.Decode(&s.Detectors); err != nil { + return fmt.Errorf("%s | %w", "Detectors", err) + } + + case "influencers": + if err := dec.Decode(&s.Influencers); err != nil { + return fmt.Errorf("%s | %w", "Influencers", err) + } + + case "latency": + if err := dec.Decode(&s.Latency); err != nil { + return fmt.Errorf("%s | %w", "Latency", err) + } + + case "model_prune_window": + if err := dec.Decode(&s.ModelPruneWindow); err != nil { + return fmt.Errorf("%s | %w", "ModelPruneWindow", err) + } + + case "multivariate_by_fields": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MultivariateByFields", err) + } + s.MultivariateByFields = &value + case bool: + s.MultivariateByFields = &v + } + + case "per_partition_categorization": + if err := dec.Decode(&s.PerPartitionCategorization); err != nil { + return fmt.Errorf("%s | %w", "PerPartitionCategorization", err) + } + + case "summary_count_field_name": + if err := dec.Decode(&s.SummaryCountFieldName); err != nil { + return fmt.Errorf("%s | %w", "SummaryCountFieldName", err) + } + + } + } + return nil +} + +// NewAnalysisConfigRead returns a AnalysisConfigRead. +func NewAnalysisConfigRead() *AnalysisConfigRead { + r := &AnalysisConfigRead{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analysislimits.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analysislimits.go new file mode 100644 index 000000000..9423b8ea5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analysislimits.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AnalysisLimits type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Analysis.ts#L161-L172 +type AnalysisLimits struct { + // CategorizationExamplesLimit The maximum number of examples stored per category in memory and in the + // results data store. If you increase this value, more examples are available, + // however it requires that you have more storage available. If you set this + // value to 0, no examples are stored. NOTE: The `categorization_examples_limit` + // applies only to analysis that uses categorization. + CategorizationExamplesLimit *int64 `json:"categorization_examples_limit,omitempty"` + // ModelMemoryLimit The approximate maximum amount of memory resources that are required for + // analytical processing. Once this limit is approached, data pruning becomes + // more aggressive. Upon exceeding this limit, new entities are not modeled. If + // the `xpack.ml.max_model_memory_limit` setting has a value greater than 0 and + // less than 1024mb, that value is used instead of the default. The default + // value is relatively small to ensure that high resource usage is a conscious + // decision. If you have jobs that are expected to analyze high cardinality + // fields, you will likely need to use a higher value. If you specify a number + // instead of a string, the units are assumed to be MiB. Specifying a string is + // recommended for clarity. If you specify a byte size unit of `b` or `kb` and + // the number does not equate to a discrete number of megabytes, it is rounded + // down to the closest MiB. The minimum valid value is 1 MiB. If you specify a + // value less than 1 MiB, an error occurs. If you specify a value for the + // `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to + // create jobs that have `model_memory_limit` values greater than that setting + // value. + ModelMemoryLimit ByteSize `json:"model_memory_limit,omitempty"` +} + +func (s *AnalysisLimits) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "categorization_examples_limit": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CategorizationExamplesLimit", err) + } + s.CategorizationExamplesLimit = &value + case float64: + f := int64(v) + s.CategorizationExamplesLimit = &f + } + + case "model_memory_limit": + if err := dec.Decode(&s.ModelMemoryLimit); err != nil { + return fmt.Errorf("%s | %w", "ModelMemoryLimit", err) + } + + } + } + return nil +} + +// NewAnalysisLimits returns a AnalysisLimits. +func NewAnalysisLimits() *AnalysisLimits { + r := &AnalysisLimits{} + + return r +} + +type AnalysisLimitsVariant interface { + AnalysisLimitsCaster() *AnalysisLimits +} + +func (s *AnalysisLimits) AnalysisLimitsCaster() *AnalysisLimits { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analysismemorylimit.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analysismemorylimit.go new file mode 100644 index 000000000..cb408c912 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analysismemorylimit.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AnalysisMemoryLimit type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Analysis.ts#L174-L179 +type AnalysisMemoryLimit struct { + // ModelMemoryLimit Limits can be applied for the resources required to hold the mathematical + // models in memory. These limits are approximate and can be set per job. They + // do not control the memory used by other processes, for example the + // Elasticsearch Java processes. + ModelMemoryLimit string `json:"model_memory_limit"` +} + +func (s *AnalysisMemoryLimit) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "model_memory_limit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelMemoryLimit", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelMemoryLimit = o + + } + } + return nil +} + +// NewAnalysisMemoryLimit returns a AnalysisMemoryLimit. +func NewAnalysisMemoryLimit() *AnalysisMemoryLimit { + r := &AnalysisMemoryLimit{} + + return r +} + +type AnalysisMemoryLimitVariant interface { + AnalysisMemoryLimitCaster() *AnalysisMemoryLimit +} + +func (s *AnalysisMemoryLimit) AnalysisMemoryLimitCaster() *AnalysisMemoryLimit { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analytics.go new file mode 100644 index 000000000..f370e9a29 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analytics.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Analytics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L340-L342 +type Analytics struct { + Available bool `json:"available"` + Enabled bool `json:"enabled"` + Stats AnalyticsStatistics `json:"stats"` +} + +func (s *Analytics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return fmt.Errorf("%s | %w", "Stats", err) + } + + } + } + return nil +} + +// NewAnalytics returns a Analytics. +func NewAnalytics() *Analytics { + r := &Analytics{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyticscollection.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyticscollection.go new file mode 100644 index 000000000..86e8c1421 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyticscollection.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// AnalyticsCollection type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/_types/BehavioralAnalytics.ts#L22-L27 +type AnalyticsCollection struct { + // EventDataStream Data stream for the collection. + EventDataStream EventDataStream `json:"event_data_stream"` +} + +// NewAnalyticsCollection returns a AnalyticsCollection. +func NewAnalyticsCollection() *AnalyticsCollection { + r := &AnalyticsCollection{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyticsstatistics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyticsstatistics.go new file mode 100644 index 000000000..85bd7f74d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyticsstatistics.go @@ -0,0 +1,207 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AnalyticsStatistics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L58-L68 +type AnalyticsStatistics struct { + BoxplotUsage int64 `json:"boxplot_usage"` + CumulativeCardinalityUsage int64 `json:"cumulative_cardinality_usage"` + MovingPercentilesUsage int64 `json:"moving_percentiles_usage"` + MultiTermsUsage *int64 `json:"multi_terms_usage,omitempty"` + NormalizeUsage int64 `json:"normalize_usage"` + RateUsage int64 `json:"rate_usage"` + StringStatsUsage int64 `json:"string_stats_usage"` + TTestUsage int64 `json:"t_test_usage"` + TopMetricsUsage int64 `json:"top_metrics_usage"` +} + +func (s *AnalyticsStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boxplot_usage": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BoxplotUsage", err) + } + s.BoxplotUsage = value + case float64: + f := int64(v) + s.BoxplotUsage = f + } + + case "cumulative_cardinality_usage": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CumulativeCardinalityUsage", err) + } + s.CumulativeCardinalityUsage = value + case float64: + f := int64(v) + s.CumulativeCardinalityUsage = f + } + + case "moving_percentiles_usage": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MovingPercentilesUsage", err) + } + s.MovingPercentilesUsage = value + case float64: + f := int64(v) + s.MovingPercentilesUsage = f + } + + case "multi_terms_usage": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MultiTermsUsage", err) + } + s.MultiTermsUsage = &value + case float64: + f := int64(v) + s.MultiTermsUsage = &f + } + + case "normalize_usage": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NormalizeUsage", err) + } + s.NormalizeUsage = value + case float64: + f := int64(v) + s.NormalizeUsage = f + } + + case "rate_usage": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RateUsage", err) + } + s.RateUsage = value + case float64: + f := int64(v) + s.RateUsage = f + } + + case "string_stats_usage": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "StringStatsUsage", err) + } + s.StringStatsUsage = value + case float64: + f := int64(v) + s.StringStatsUsage = f + } + + case "t_test_usage": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TTestUsage", err) + } + s.TTestUsage = value + case float64: + f := int64(v) + s.TTestUsage = f + } + + case "top_metrics_usage": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TopMetricsUsage", err) + } + s.TopMetricsUsage = value + case float64: + f := int64(v) + s.TopMetricsUsage = f + } + + } + } + return nil +} + +// NewAnalyticsStatistics returns a AnalyticsStatistics. +func NewAnalyticsStatistics() *AnalyticsStatistics { + r := &AnalyticsStatistics{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyzedetail.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyzedetail.go new file mode 100644 index 000000000..c7515da21 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyzedetail.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AnalyzeDetail type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/analyze/types.ts#L24-L30 +type AnalyzeDetail struct { + Analyzer *AnalyzerDetail `json:"analyzer,omitempty"` + Charfilters []CharFilterDetail `json:"charfilters,omitempty"` + CustomAnalyzer bool `json:"custom_analyzer"` + Tokenfilters []TokenDetail `json:"tokenfilters,omitempty"` + Tokenizer *TokenDetail `json:"tokenizer,omitempty"` +} + +func (s *AnalyzeDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + if err := dec.Decode(&s.Analyzer); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + + case "charfilters": + if err := dec.Decode(&s.Charfilters); err != nil { + return fmt.Errorf("%s | %w", "Charfilters", err) + } + + case "custom_analyzer": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CustomAnalyzer", err) + } + s.CustomAnalyzer = value + case bool: + s.CustomAnalyzer = v + } + + case "tokenfilters": + if err := dec.Decode(&s.Tokenfilters); err != nil { + return fmt.Errorf("%s | %w", "Tokenfilters", err) + } + + case "tokenizer": + if err := dec.Decode(&s.Tokenizer); err != nil { + return fmt.Errorf("%s | %w", "Tokenizer", err) + } + + } + } + return nil +} + +// NewAnalyzeDetail returns a AnalyzeDetail. +func NewAnalyzeDetail() *AnalyzeDetail { + r := &AnalyzeDetail{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyzer.go new file mode 100644 index 000000000..62b2b8106 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyzer.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Analyzer holds the union for the following types: +// +// CustomAnalyzer +// FingerprintAnalyzer +// KeywordAnalyzer +// NoriAnalyzer +// PatternAnalyzer +// SimpleAnalyzer +// StandardAnalyzer +// StopAnalyzer +// WhitespaceAnalyzer +// IcuAnalyzer +// KuromojiAnalyzer +// SnowballAnalyzer +// ArabicAnalyzer +// ArmenianAnalyzer +// BasqueAnalyzer +// BengaliAnalyzer +// BrazilianAnalyzer +// BulgarianAnalyzer +// CatalanAnalyzer +// ChineseAnalyzer +// CjkAnalyzer +// CzechAnalyzer +// DanishAnalyzer +// DutchAnalyzer +// EnglishAnalyzer +// EstonianAnalyzer +// FinnishAnalyzer +// FrenchAnalyzer +// GalicianAnalyzer +// GermanAnalyzer +// GreekAnalyzer +// HindiAnalyzer +// HungarianAnalyzer +// IndonesianAnalyzer +// IrishAnalyzer +// ItalianAnalyzer +// LatvianAnalyzer +// LithuanianAnalyzer +// NorwegianAnalyzer +// PersianAnalyzer +// PortugueseAnalyzer +// RomanianAnalyzer +// RussianAnalyzer +// SerbianAnalyzer +// SoraniAnalyzer +// SpanishAnalyzer +// SwedishAnalyzer +// TurkishAnalyzer +// ThaiAnalyzer +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L427-L480 +type Analyzer any + +type AnalyzerVariant interface { + AnalyzerCaster() *Analyzer +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyzerdetail.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyzerdetail.go new file mode 100644 index 000000000..a30035f8d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyzerdetail.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AnalyzerDetail type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/analyze/types.ts#L32-L35 +type AnalyzerDetail struct { + Name string `json:"name"` + Tokens []ExplainAnalyzeToken `json:"tokens"` +} + +func (s *AnalyzerDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "tokens": + if err := dec.Decode(&s.Tokens); err != nil { + return fmt.Errorf("%s | %w", "Tokens", err) + } + + } + } + return nil +} + +// NewAnalyzerDetail returns a AnalyzerDetail. +func NewAnalyzerDetail() *AnalyzerDetail { + r := &AnalyzerDetail{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyzetoken.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyzetoken.go new file mode 100644 index 000000000..ebb183b20 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/analyzetoken.go @@ -0,0 +1,153 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AnalyzeToken type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/analyze/types.ts#L37-L44 +type AnalyzeToken struct { + EndOffset int64 `json:"end_offset"` + Position int64 `json:"position"` + PositionLength *int64 `json:"positionLength,omitempty"` + StartOffset int64 `json:"start_offset"` + Token string `json:"token"` + Type string `json:"type"` +} + +func (s *AnalyzeToken) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "end_offset": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "EndOffset", err) + } + s.EndOffset = value + case float64: + f := int64(v) + s.EndOffset = f + } + + case "position": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Position", err) + } + s.Position = value + case float64: + f := int64(v) + s.Position = f + } + + case "positionLength": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PositionLength", err) + } + s.PositionLength = &value + case float64: + f := int64(v) + s.PositionLength = &f + } + + case "start_offset": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "StartOffset", err) + } + s.StartOffset = value + case float64: + f := int64(v) + s.StartOffset = f + } + + case "token": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Token", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Token = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewAnalyzeToken returns a AnalyzeToken. +func NewAnalyzeToken() *AnalyzeToken { + r := &AnalyzeToken{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anomaly.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anomaly.go new file mode 100644 index 000000000..ae23a1fff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anomaly.go @@ -0,0 +1,390 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Anomaly type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Anomaly.ts#L24-L121 +type Anomaly struct { + // Actual The actual value for the bucket. + Actual []Float64 `json:"actual,omitempty"` + // AnomalyScoreExplanation Information about the factors impacting the initial anomaly score. + AnomalyScoreExplanation *AnomalyExplanation `json:"anomaly_score_explanation,omitempty"` + // BucketSpan The length of the bucket in seconds. This value matches the `bucket_span` + // that is specified in the job. + BucketSpan int64 `json:"bucket_span"` + // ByFieldName The field used to split the data. In particular, this property is used for + // analyzing the splits with respect to their own history. It is used for + // finding unusual values in the context of the split. + ByFieldName *string `json:"by_field_name,omitempty"` + // ByFieldValue The value of `by_field_name`. + ByFieldValue *string `json:"by_field_value,omitempty"` + // Causes For population analysis, an over field must be specified in the detector. + // This property contains an array of anomaly records that are the causes for + // the anomaly that has been identified for the over field. This sub-resource + // contains the most anomalous records for the `over_field_name`. For + // scalability reasons, a maximum of the 10 most significant causes of the + // anomaly are returned. As part of the core analytical modeling, these + // low-level anomaly records are aggregated for their parent over field record. + // The `causes` resource contains similar elements to the record resource, + // namely `actual`, `typical`, `geo_results.actual_point`, + // `geo_results.typical_point`, `*_field_name` and `*_field_value`. Probability + // and scores are not applicable to causes. + Causes []AnomalyCause `json:"causes,omitempty"` + // DetectorIndex A unique identifier for the detector. + DetectorIndex int `json:"detector_index"` + // FieldName Certain functions require a field to operate on, for example, `sum()`. For + // those functions, this value is the name of the field to be analyzed. + FieldName *string `json:"field_name,omitempty"` + // Function The function in which the anomaly occurs, as specified in the detector + // configuration. For example, `max`. + Function *string `json:"function,omitempty"` + // FunctionDescription The description of the function in which the anomaly occurs, as specified in + // the detector configuration. + FunctionDescription *string `json:"function_description,omitempty"` + // GeoResults If the detector function is `lat_long`, this object contains comma delimited + // strings for the latitude and longitude of the actual and typical values. + GeoResults *GeoResults `json:"geo_results,omitempty"` + // Influencers If influencers were specified in the detector configuration, this array + // contains influencers that contributed to or were to blame for an anomaly. + Influencers []Influence `json:"influencers,omitempty"` + // InitialRecordScore A normalized score between 0-100, which is based on the probability of the + // anomalousness of this record. This is the initial value that was calculated + // at the time the bucket was processed. + InitialRecordScore Float64 `json:"initial_record_score"` + // IsInterim If true, this is an interim result. In other words, the results are + // calculated based on partial input data. + IsInterim bool `json:"is_interim"` + // JobId Identifier for the anomaly detection job. + JobId string `json:"job_id"` + // OverFieldName The field used to split the data. In particular, this property is used for + // analyzing the splits with respect to the history of all splits. It is used + // for finding unusual values in the population of all splits. + OverFieldName *string `json:"over_field_name,omitempty"` + // OverFieldValue The value of `over_field_name`. + OverFieldValue *string `json:"over_field_value,omitempty"` + // PartitionFieldName The field used to segment the analysis. When you use this property, you have + // completely independent baselines for each value of this field. + PartitionFieldName *string `json:"partition_field_name,omitempty"` + // PartitionFieldValue The value of `partition_field_name`. + PartitionFieldValue *string `json:"partition_field_value,omitempty"` + // Probability The probability of the individual anomaly occurring, in the range 0 to 1. For + // example, `0.0000772031`. This value can be held to a high precision of over + // 300 decimal places, so the `record_score` is provided as a human-readable and + // friendly interpretation of this. + Probability Float64 `json:"probability"` + // RecordScore A normalized score between 0-100, which is based on the probability of the + // anomalousness of this record. Unlike `initial_record_score`, this value will + // be updated by a re-normalization process as new data is analyzed. + RecordScore Float64 `json:"record_score"` + // ResultType Internal. This is always set to `record`. + ResultType string `json:"result_type"` + // Timestamp The start time of the bucket for which these results were calculated. + Timestamp int64 `json:"timestamp"` + // Typical The typical value for the bucket, according to analytical modeling. + Typical []Float64 `json:"typical,omitempty"` +} + +func (s *Anomaly) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actual": + if err := dec.Decode(&s.Actual); err != nil { + return fmt.Errorf("%s | %w", "Actual", err) + } + + case "anomaly_score_explanation": + if err := dec.Decode(&s.AnomalyScoreExplanation); err != nil { + return fmt.Errorf("%s | %w", "AnomalyScoreExplanation", err) + } + + case "bucket_span": + if err := dec.Decode(&s.BucketSpan); err != nil { + return fmt.Errorf("%s | %w", "BucketSpan", err) + } + + case "by_field_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ByFieldName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ByFieldName = &o + + case "by_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ByFieldValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ByFieldValue = &o + + case "causes": + if err := dec.Decode(&s.Causes); err != nil { + return fmt.Errorf("%s | %w", "Causes", err) + } + + case "detector_index": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DetectorIndex", err) + } + s.DetectorIndex = value + case float64: + f := int(v) + s.DetectorIndex = f + } + + case "field_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FieldName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FieldName = &o + + case "function": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Function", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Function = &o + + case "function_description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FunctionDescription", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FunctionDescription = &o + + case "geo_results": + if err := dec.Decode(&s.GeoResults); err != nil { + return fmt.Errorf("%s | %w", "GeoResults", err) + } + + case "influencers": + if err := dec.Decode(&s.Influencers); err != nil { + return fmt.Errorf("%s | %w", "Influencers", err) + } + + case "initial_record_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InitialRecordScore", err) + } + f := Float64(value) + s.InitialRecordScore = f + case float64: + f := Float64(v) + s.InitialRecordScore = f + } + + case "is_interim": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsInterim", err) + } + s.IsInterim = value + case bool: + s.IsInterim = v + } + + case "job_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.JobId = o + + case "over_field_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "OverFieldName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OverFieldName = &o + + case "over_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "OverFieldValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OverFieldValue = &o + + case "partition_field_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PartitionFieldName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PartitionFieldName = &o + + case "partition_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PartitionFieldValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PartitionFieldValue = &o + + case "probability": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Probability", err) + } + f := Float64(value) + s.Probability = f + case float64: + f := Float64(v) + s.Probability = f + } + + case "record_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RecordScore", err) + } + f := Float64(value) + s.RecordScore = f + case float64: + f := Float64(v) + s.RecordScore = f + } + + case "result_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultType = o + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + case "typical": + if err := dec.Decode(&s.Typical); err != nil { + return fmt.Errorf("%s | %w", "Typical", err) + } + + } + } + return nil +} + +// NewAnomaly returns a Anomaly. +func NewAnomaly() *Anomaly { + r := &Anomaly{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anomalycause.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anomalycause.go new file mode 100644 index 000000000..bde01c1e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anomalycause.go @@ -0,0 +1,213 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AnomalyCause type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Anomaly.ts#L123-L139 +type AnomalyCause struct { + Actual []Float64 `json:"actual,omitempty"` + ByFieldName *string `json:"by_field_name,omitempty"` + ByFieldValue *string `json:"by_field_value,omitempty"` + CorrelatedByFieldValue *string `json:"correlated_by_field_value,omitempty"` + FieldName *string `json:"field_name,omitempty"` + Function *string `json:"function,omitempty"` + FunctionDescription *string `json:"function_description,omitempty"` + GeoResults *GeoResults `json:"geo_results,omitempty"` + Influencers []Influence `json:"influencers,omitempty"` + OverFieldName *string `json:"over_field_name,omitempty"` + OverFieldValue *string `json:"over_field_value,omitempty"` + PartitionFieldName *string `json:"partition_field_name,omitempty"` + PartitionFieldValue *string `json:"partition_field_value,omitempty"` + Probability Float64 `json:"probability"` + Typical []Float64 `json:"typical,omitempty"` +} + +func (s *AnomalyCause) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actual": + if err := dec.Decode(&s.Actual); err != nil { + return fmt.Errorf("%s | %w", "Actual", err) + } + + case "by_field_name": + if err := dec.Decode(&s.ByFieldName); err != nil { + return fmt.Errorf("%s | %w", "ByFieldName", err) + } + + case "by_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ByFieldValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ByFieldValue = &o + + case "correlated_by_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CorrelatedByFieldValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CorrelatedByFieldValue = &o + + case "field_name": + if err := dec.Decode(&s.FieldName); err != nil { + return fmt.Errorf("%s | %w", "FieldName", err) + } + + case "function": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Function", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Function = &o + + case "function_description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FunctionDescription", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FunctionDescription = &o + + case "geo_results": + if err := dec.Decode(&s.GeoResults); err != nil { + return fmt.Errorf("%s | %w", "GeoResults", err) + } + + case "influencers": + if err := dec.Decode(&s.Influencers); err != nil { + return fmt.Errorf("%s | %w", "Influencers", err) + } + + case "over_field_name": + if err := dec.Decode(&s.OverFieldName); err != nil { + return fmt.Errorf("%s | %w", "OverFieldName", err) + } + + case "over_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "OverFieldValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OverFieldValue = &o + + case "partition_field_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PartitionFieldName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PartitionFieldName = &o + + case "partition_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PartitionFieldValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PartitionFieldValue = &o + + case "probability": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Probability", err) + } + f := Float64(value) + s.Probability = f + case float64: + f := Float64(v) + s.Probability = f + } + + case "typical": + if err := dec.Decode(&s.Typical); err != nil { + return fmt.Errorf("%s | %w", "Typical", err) + } + + } + } + return nil +} + +// NewAnomalyCause returns a AnomalyCause. +func NewAnomalyCause() *AnomalyCause { + r := &AnomalyCause{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anomalydetectors.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anomalydetectors.go new file mode 100644 index 000000000..0c5d4e034 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anomalydetectors.go @@ -0,0 +1,164 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AnomalyDetectors type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/info/types.ts#L46-L52 +type AnomalyDetectors struct { + CategorizationAnalyzer CategorizationAnalyzer `json:"categorization_analyzer"` + CategorizationExamplesLimit int `json:"categorization_examples_limit"` + DailyModelSnapshotRetentionAfterDays int `json:"daily_model_snapshot_retention_after_days"` + ModelMemoryLimit string `json:"model_memory_limit"` + ModelSnapshotRetentionDays int `json:"model_snapshot_retention_days"` +} + +func (s *AnomalyDetectors) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "categorization_analyzer": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + categorizationanalyzer_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + + switch t { + + case "char_filter", "filter", "tokenizer": + o := NewCategorizationAnalyzerDefinition() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + s.CategorizationAnalyzer = o + break categorizationanalyzer_field + + } + } + if s.CategorizationAnalyzer == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.CategorizationAnalyzer); err != nil { + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + } + + case "categorization_examples_limit": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CategorizationExamplesLimit", err) + } + s.CategorizationExamplesLimit = value + case float64: + f := int(v) + s.CategorizationExamplesLimit = f + } + + case "daily_model_snapshot_retention_after_days": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DailyModelSnapshotRetentionAfterDays", err) + } + s.DailyModelSnapshotRetentionAfterDays = value + case float64: + f := int(v) + s.DailyModelSnapshotRetentionAfterDays = f + } + + case "model_memory_limit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelMemoryLimit", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelMemoryLimit = o + + case "model_snapshot_retention_days": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ModelSnapshotRetentionDays", err) + } + s.ModelSnapshotRetentionDays = value + case float64: + f := int(v) + s.ModelSnapshotRetentionDays = f + } + + } + } + return nil +} + +// NewAnomalyDetectors returns a AnomalyDetectors. +func NewAnomalyDetectors() *AnomalyDetectors { + r := &AnomalyDetectors{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anomalyexplanation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anomalyexplanation.go new file mode 100644 index 000000000..ba6833834 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anomalyexplanation.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AnomalyExplanation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Anomaly.ts#L157-L198 +type AnomalyExplanation struct { + // AnomalyCharacteristicsImpact Impact from the duration and magnitude of the detected anomaly relative to + // the historical average. + AnomalyCharacteristicsImpact *int `json:"anomaly_characteristics_impact,omitempty"` + // AnomalyLength Length of the detected anomaly in the number of buckets. + AnomalyLength *int `json:"anomaly_length,omitempty"` + // AnomalyType Type of the detected anomaly: `spike` or `dip`. + AnomalyType *string `json:"anomaly_type,omitempty"` + // HighVariancePenalty Indicates reduction of anomaly score for the bucket with large confidence + // intervals. If a bucket has large confidence intervals, the score is reduced. + HighVariancePenalty *bool `json:"high_variance_penalty,omitempty"` + // IncompleteBucketPenalty If the bucket contains fewer samples than expected, the score is reduced. + IncompleteBucketPenalty *bool `json:"incomplete_bucket_penalty,omitempty"` + // LowerConfidenceBound Lower bound of the 95% confidence interval. + LowerConfidenceBound *Float64 `json:"lower_confidence_bound,omitempty"` + // MultiBucketImpact Impact of the deviation between actual and typical values in the past 12 + // buckets. + MultiBucketImpact *int `json:"multi_bucket_impact,omitempty"` + // SingleBucketImpact Impact of the deviation between actual and typical values in the current + // bucket. + SingleBucketImpact *int `json:"single_bucket_impact,omitempty"` + // TypicalValue Typical (expected) value for this bucket. + TypicalValue *Float64 `json:"typical_value,omitempty"` + // UpperConfidenceBound Upper bound of the 95% confidence interval. + UpperConfidenceBound *Float64 `json:"upper_confidence_bound,omitempty"` +} + +func (s *AnomalyExplanation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "anomaly_characteristics_impact": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "AnomalyCharacteristicsImpact", err) + } + s.AnomalyCharacteristicsImpact = &value + case float64: + f := int(v) + s.AnomalyCharacteristicsImpact = &f + } + + case "anomaly_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "AnomalyLength", err) + } + s.AnomalyLength = &value + case float64: + f := int(v) + s.AnomalyLength = &f + } + + case "anomaly_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AnomalyType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AnomalyType = &o + + case "high_variance_penalty": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "HighVariancePenalty", err) + } + s.HighVariancePenalty = &value + case bool: + s.HighVariancePenalty = &v + } + + case "incomplete_bucket_penalty": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncompleteBucketPenalty", err) + } + s.IncompleteBucketPenalty = &value + case bool: + s.IncompleteBucketPenalty = &v + } + + case "lower_confidence_bound": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LowerConfidenceBound", err) + } + f := Float64(value) + s.LowerConfidenceBound = &f + case float64: + f := Float64(v) + s.LowerConfidenceBound = &f + } + + case "multi_bucket_impact": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MultiBucketImpact", err) + } + s.MultiBucketImpact = &value + case float64: + f := int(v) + s.MultiBucketImpact = &f + } + + case "single_bucket_impact": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SingleBucketImpact", err) + } + s.SingleBucketImpact = &value + case float64: + f := int(v) + s.SingleBucketImpact = &f + } + + case "typical_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TypicalValue", err) + } + f := Float64(value) + s.TypicalValue = &f + case float64: + f := Float64(v) + s.TypicalValue = &f + } + + case "upper_confidence_bound": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "UpperConfidenceBound", err) + } + f := Float64(value) + s.UpperConfidenceBound = &f + case float64: + f := Float64(v) + s.UpperConfidenceBound = &f + } + + } + } + return nil +} + +// NewAnomalyExplanation returns a AnomalyExplanation. +func NewAnomalyExplanation() *AnomalyExplanation { + r := &AnomalyExplanation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anthropicservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anthropicservicesettings.go new file mode 100644 index 000000000..424a42794 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anthropicservicesettings.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AnthropicServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L588-L604 +type AnthropicServiceSettings struct { + // ApiKey A valid API key for the Anthropic API. + ApiKey string `json:"api_key"` + // ModelId The name of the model to use for the inference task. + // Refer to the Anthropic documentation for the list of supported models. + ModelId string `json:"model_id"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // Anthropic. + // By default, the `anthropic` service sets the number of requests allowed per + // minute to 50. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` +} + +func (s *AnthropicServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + } + } + return nil +} + +// NewAnthropicServiceSettings returns a AnthropicServiceSettings. +func NewAnthropicServiceSettings() *AnthropicServiceSettings { + r := &AnthropicServiceSettings{} + + return r +} + +type AnthropicServiceSettingsVariant interface { + AnthropicServiceSettingsCaster() *AnthropicServiceSettings +} + +func (s *AnthropicServiceSettings) AnthropicServiceSettingsCaster() *AnthropicServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anthropictasksettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anthropictasksettings.go new file mode 100644 index 000000000..096981877 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/anthropictasksettings.go @@ -0,0 +1,155 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AnthropicTaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L606-L631 +type AnthropicTaskSettings struct { + // MaxTokens For a `completion` task, it is the maximum number of tokens to generate + // before stopping. + MaxTokens int `json:"max_tokens"` + // Temperature For a `completion` task, it is the amount of randomness injected into the + // response. + // For more details about the supported range, refer to Anthropic documentation. + Temperature *float32 `json:"temperature,omitempty"` + // TopK For a `completion` task, it specifies to only sample from the top K options + // for each subsequent token. + // It is recommended for advanced use cases only. + // You usually only need to use `temperature`. + TopK *int `json:"top_k,omitempty"` + // TopP For a `completion` task, it specifies to use Anthropic's nucleus sampling. + // In nucleus sampling, Anthropic computes the cumulative distribution over all + // the options for each subsequent token in decreasing probability order and + // cuts it off once it reaches the specified probability. + // You should either alter `temperature` or `top_p`, but not both. + // It is recommended for advanced use cases only. + // You usually only need to use `temperature`. + TopP *float32 `json:"top_p,omitempty"` +} + +func (s *AnthropicTaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_tokens": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTokens", err) + } + s.MaxTokens = value + case float64: + f := int(v) + s.MaxTokens = f + } + + case "temperature": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Temperature", err) + } + f := float32(value) + s.Temperature = &f + case float64: + f := float32(v) + s.Temperature = &f + } + + case "top_k": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TopK", err) + } + s.TopK = &value + case float64: + f := int(v) + s.TopK = &f + } + + case "top_p": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "TopP", err) + } + f := float32(value) + s.TopP = &f + case float64: + f := float32(v) + s.TopP = &f + } + + } + } + return nil +} + +// NewAnthropicTaskSettings returns a AnthropicTaskSettings. +func NewAnthropicTaskSettings() *AnthropicTaskSettings { + r := &AnthropicTaskSettings{} + + return r +} + +type AnthropicTaskSettingsVariant interface { + AnthropicTaskSettingsCaster() *AnthropicTaskSettings +} + +func (s *AnthropicTaskSettings) AnthropicTaskSettingsCaster() *AnthropicTaskSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikey.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikey.go new file mode 100644 index 000000000..d3ae354a9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikey.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/apikeytype" +) + +// ApiKey type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/ApiKey.ts#L27-L113 +type ApiKey struct { + // Access The access granted to cross-cluster API keys. + // The access is composed of permissions for cross cluster search and cross + // cluster replication. + // At least one of them must be specified. + // When specified, the new access assignment fully replaces the previously + // assigned access. + Access *Access `json:"access,omitempty"` + // Creation Creation time for the API key in milliseconds. + Creation int64 `json:"creation"` + // Expiration Expiration time for the API key in milliseconds. + Expiration *int64 `json:"expiration,omitempty"` + // Id Id for the API key + Id string `json:"id"` + // Invalidated Invalidation status for the API key. + // If the key has been invalidated, it has a value of `true`. Otherwise, it is + // `false`. + Invalidated bool `json:"invalidated"` + // Invalidation If the key has been invalidated, invalidation time in milliseconds. + Invalidation *int64 `json:"invalidation,omitempty"` + // LimitedBy The owner user’s permissions associated with the API key. + // It is a point-in-time snapshot captured at creation and subsequent updates. + // An API key’s effective permissions are an intersection of its assigned + // privileges and the owner user’s permissions. + LimitedBy []map[string]RoleDescriptor `json:"limited_by,omitempty"` + // Metadata Metadata of the API key + Metadata Metadata `json:"metadata"` + // Name Name of the API key. + Name string `json:"name"` + // ProfileUid The profile uid for the API key owner principal, if requested and if it + // exists + ProfileUid *string `json:"profile_uid,omitempty"` + // Realm Realm name of the principal for which this API key was created. + Realm string `json:"realm"` + // RealmType Realm type of the principal for which this API key was created + RealmType *string `json:"realm_type,omitempty"` + // RoleDescriptors The role descriptors assigned to this API key when it was created or last + // updated. + // An empty role descriptor means the API key inherits the owner user’s + // permissions. + RoleDescriptors map[string]RoleDescriptor `json:"role_descriptors,omitempty"` + // Sort_ Sorting values when using the `sort` parameter with the + // `security.query_api_keys` API. + Sort_ []FieldValue `json:"_sort,omitempty"` + // Type The type of the API key (e.g. `rest` or `cross_cluster`). + Type apikeytype.ApiKeyType `json:"type"` + // Username Principal for which this API key was created + Username string `json:"username"` +} + +func (s *ApiKey) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "access": + if err := dec.Decode(&s.Access); err != nil { + return fmt.Errorf("%s | %w", "Access", err) + } + + case "creation": + if err := dec.Decode(&s.Creation); err != nil { + return fmt.Errorf("%s | %w", "Creation", err) + } + + case "expiration": + if err := dec.Decode(&s.Expiration); err != nil { + return fmt.Errorf("%s | %w", "Expiration", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "invalidated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Invalidated", err) + } + s.Invalidated = value + case bool: + s.Invalidated = v + } + + case "invalidation": + if err := dec.Decode(&s.Invalidation); err != nil { + return fmt.Errorf("%s | %w", "Invalidation", err) + } + + case "limited_by": + if err := dec.Decode(&s.LimitedBy); err != nil { + return fmt.Errorf("%s | %w", "LimitedBy", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "profile_uid": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ProfileUid", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProfileUid = &o + + case "realm": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Realm", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Realm = o + + case "realm_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RealmType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RealmType = &o + + case "role_descriptors": + if s.RoleDescriptors == nil { + s.RoleDescriptors = make(map[string]RoleDescriptor, 0) + } + if err := dec.Decode(&s.RoleDescriptors); err != nil { + return fmt.Errorf("%s | %w", "RoleDescriptors", err) + } + + case "_sort": + if err := dec.Decode(&s.Sort_); err != nil { + return fmt.Errorf("%s | %w", "Sort_", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + + } + } + return nil +} + +// NewApiKey returns a ApiKey. +func NewApiKey() *ApiKey { + r := &ApiKey{ + RoleDescriptors: make(map[string]RoleDescriptor), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyaggregate.go new file mode 100644 index 000000000..9a165d2f0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyaggregate.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ApiKeyAggregate holds the union for the following types: +// +// CardinalityAggregate +// ValueCountAggregate +// StringTermsAggregate +// LongTermsAggregate +// DoubleTermsAggregate +// UnmappedTermsAggregate +// MultiTermsAggregate +// MissingAggregate +// FilterAggregate +// FiltersAggregate +// RangeAggregate +// DateRangeAggregate +// CompositeAggregate +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/query_api_keys/types.ts#L122-L139 +type ApiKeyAggregate any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyaggregationcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyaggregationcontainer.go new file mode 100644 index 000000000..e759df9aa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyaggregationcontainer.go @@ -0,0 +1,206 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ApiKeyAggregationContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/query_api_keys/types.ts#L63-L120 +type ApiKeyAggregationContainer struct { + AdditionalApiKeyAggregationContainerProperty map[string]json.RawMessage `json:"-"` + // Aggregations Sub-aggregations for this aggregation. + // Only applies to bucket aggregations. + Aggregations map[string]ApiKeyAggregationContainer `json:"aggregations,omitempty"` + // Cardinality A single-value metrics aggregation that calculates an approximate count of + // distinct values. + Cardinality *CardinalityAggregation `json:"cardinality,omitempty"` + // Composite A multi-bucket aggregation that creates composite buckets from different + // sources. + // Unlike the other multi-bucket aggregations, you can use the `composite` + // aggregation to paginate *all* buckets from a multi-level aggregation + // efficiently. + Composite *CompositeAggregation `json:"composite,omitempty"` + // DateRange A multi-bucket value source based aggregation that enables the user to define + // a set of date ranges - each representing a bucket. + DateRange *DateRangeAggregation `json:"date_range,omitempty"` + // Filter A single bucket aggregation that narrows the set of documents to those that + // match a query. + Filter *ApiKeyQueryContainer `json:"filter,omitempty"` + // Filters A multi-bucket aggregation where each bucket contains the documents that + // match a query. + Filters *ApiKeyFiltersAggregation `json:"filters,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Missing *MissingAggregation `json:"missing,omitempty"` + // Range A multi-bucket value source based aggregation that enables the user to define + // a set of ranges - each representing a bucket. + Range *RangeAggregation `json:"range,omitempty"` + // Terms A multi-bucket value source based aggregation where buckets are dynamically + // built - one per unique value. + Terms *TermsAggregation `json:"terms,omitempty"` + // ValueCount A single-value metrics aggregation that counts the number of values that are + // extracted from the aggregated documents. + ValueCount *ValueCountAggregation `json:"value_count,omitempty"` +} + +func (s *ApiKeyAggregationContainer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]ApiKeyAggregationContainer, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "cardinality": + if err := dec.Decode(&s.Cardinality); err != nil { + return fmt.Errorf("%s | %w", "Cardinality", err) + } + + case "composite": + if err := dec.Decode(&s.Composite); err != nil { + return fmt.Errorf("%s | %w", "Composite", err) + } + + case "date_range": + if err := dec.Decode(&s.DateRange); err != nil { + return fmt.Errorf("%s | %w", "DateRange", err) + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "filters": + if err := dec.Decode(&s.Filters); err != nil { + return fmt.Errorf("%s | %w", "Filters", err) + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "range": + if err := dec.Decode(&s.Range); err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + + case "terms": + if err := dec.Decode(&s.Terms); err != nil { + return fmt.Errorf("%s | %w", "Terms", err) + } + + case "value_count": + if err := dec.Decode(&s.ValueCount); err != nil { + return fmt.Errorf("%s | %w", "ValueCount", err) + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalApiKeyAggregationContainerProperty == nil { + s.AdditionalApiKeyAggregationContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalApiKeyAggregationContainerProperty", err) + } + s.AdditionalApiKeyAggregationContainerProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s ApiKeyAggregationContainer) MarshalJSON() ([]byte, error) { + type opt ApiKeyAggregationContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalApiKeyAggregationContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalApiKeyAggregationContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewApiKeyAggregationContainer returns a ApiKeyAggregationContainer. +func NewApiKeyAggregationContainer() *ApiKeyAggregationContainer { + r := &ApiKeyAggregationContainer{ + AdditionalApiKeyAggregationContainerProperty: make(map[string]json.RawMessage), + Aggregations: make(map[string]ApiKeyAggregationContainer), + } + + return r +} + +type ApiKeyAggregationContainerVariant interface { + ApiKeyAggregationContainerCaster() *ApiKeyAggregationContainer +} + +func (s *ApiKeyAggregationContainer) ApiKeyAggregationContainerCaster() *ApiKeyAggregationContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyauthorization.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyauthorization.go new file mode 100644 index 000000000..06aa4f5e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyauthorization.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ApiKeyAuthorization type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Authorization.ts#L20-L29 +type ApiKeyAuthorization struct { + // Id The identifier for the API key. + Id string `json:"id"` + // Name The name of the API key. + Name string `json:"name"` +} + +func (s *ApiKeyAuthorization) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + } + } + return nil +} + +// NewApiKeyAuthorization returns a ApiKeyAuthorization. +func NewApiKeyAuthorization() *ApiKeyAuthorization { + r := &ApiKeyAuthorization{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyfiltersaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyfiltersaggregation.go new file mode 100644 index 000000000..2ed1bfd66 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyfiltersaggregation.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ApiKeyFiltersAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/query_api_keys/types.ts#L207-L227 +type ApiKeyFiltersAggregation struct { + // Filters Collection of queries from which to build buckets. + Filters BucketsApiKeyQueryContainer `json:"filters,omitempty"` + // Keyed By default, the named filters aggregation returns the buckets as an object. + // Set to `false` to return the buckets as an array of objects. + Keyed *bool `json:"keyed,omitempty"` + // OtherBucket Set to `true` to add a bucket to the response which will contain all + // documents that do not match any of the given filters. + OtherBucket *bool `json:"other_bucket,omitempty"` + // OtherBucketKey The key with which the other bucket is returned. + OtherBucketKey *string `json:"other_bucket_key,omitempty"` +} + +func (s *ApiKeyFiltersAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filters": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]ApiKeyQueryContainer, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filters", err) + } + s.Filters = o + case '[': + o := []ApiKeyQueryContainer{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filters", err) + } + s.Filters = o + } + + case "keyed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Keyed", err) + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "other_bucket": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "OtherBucket", err) + } + s.OtherBucket = &value + case bool: + s.OtherBucket = &v + } + + case "other_bucket_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "OtherBucketKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OtherBucketKey = &o + + } + } + return nil +} + +// NewApiKeyFiltersAggregation returns a ApiKeyFiltersAggregation. +func NewApiKeyFiltersAggregation() *ApiKeyFiltersAggregation { + r := &ApiKeyFiltersAggregation{} + + return r +} + +type ApiKeyFiltersAggregationVariant interface { + ApiKeyFiltersAggregationCaster() *ApiKeyFiltersAggregation +} + +func (s *ApiKeyFiltersAggregation) ApiKeyFiltersAggregationCaster() *ApiKeyFiltersAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyquerycontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyquerycontainer.go new file mode 100644 index 000000000..dba61f75e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apikeyquerycontainer.go @@ -0,0 +1,230 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ApiKeyQueryContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/query_api_keys/types.ts#L141-L205 +type ApiKeyQueryContainer struct { + AdditionalApiKeyQueryContainerProperty map[string]json.RawMessage `json:"-"` + // Bool Matches documents matching boolean combinations of other queries. + Bool *BoolQuery `json:"bool,omitempty"` + // Exists Returns documents that contain an indexed value for a field. + Exists *ExistsQuery `json:"exists,omitempty"` + // Ids Returns documents based on their IDs. + // This query uses document IDs stored in the `_id` field. + Ids *IdsQuery `json:"ids,omitempty"` + // Match Returns documents that match a provided text, number, date or boolean value. + // The provided text is analyzed before matching. + Match map[string]MatchQuery `json:"match,omitempty"` + // MatchAll Matches all documents, giving them all a `_score` of 1.0. + MatchAll *MatchAllQuery `json:"match_all,omitempty"` + // Prefix Returns documents that contain a specific prefix in a provided field. + Prefix map[string]PrefixQuery `json:"prefix,omitempty"` + // Range Returns documents that contain terms within a provided range. + Range map[string]RangeQuery `json:"range,omitempty"` + // SimpleQueryString Returns documents based on a provided query string, using a parser with a + // limited but fault-tolerant syntax. + SimpleQueryString *SimpleQueryStringQuery `json:"simple_query_string,omitempty"` + // Term Returns documents that contain an exact term in a provided field. + // To return a document, the query term must exactly match the queried field's + // value, including whitespace and capitalization. + Term map[string]TermQuery `json:"term,omitempty"` + // Terms Returns documents that contain one or more exact terms in a provided field. + // To return a document, one or more terms must exactly match a field value, + // including whitespace and capitalization. + Terms *TermsQuery `json:"terms,omitempty"` + // Wildcard Returns documents that contain terms matching a wildcard pattern. + Wildcard map[string]WildcardQuery `json:"wildcard,omitempty"` +} + +func (s *ApiKeyQueryContainer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bool": + if err := dec.Decode(&s.Bool); err != nil { + return fmt.Errorf("%s | %w", "Bool", err) + } + + case "exists": + if err := dec.Decode(&s.Exists); err != nil { + return fmt.Errorf("%s | %w", "Exists", err) + } + + case "ids": + if err := dec.Decode(&s.Ids); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + + case "match": + if s.Match == nil { + s.Match = make(map[string]MatchQuery, 0) + } + if err := dec.Decode(&s.Match); err != nil { + return fmt.Errorf("%s | %w", "Match", err) + } + + case "match_all": + if err := dec.Decode(&s.MatchAll); err != nil { + return fmt.Errorf("%s | %w", "MatchAll", err) + } + + case "prefix": + if s.Prefix == nil { + s.Prefix = make(map[string]PrefixQuery, 0) + } + if err := dec.Decode(&s.Prefix); err != nil { + return fmt.Errorf("%s | %w", "Prefix", err) + } + + case "range": + if s.Range == nil { + s.Range = make(map[string]RangeQuery, 0) + } + messages := make(map[string]json.RawMessage) + err := dec.Decode(&messages) + if err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + + untyped := NewUntypedRangeQuery() + for key, message := range messages { + err := json.Unmarshal(message, &untyped) + if err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + s.Range[key] = untyped + } + + case "simple_query_string": + if err := dec.Decode(&s.SimpleQueryString); err != nil { + return fmt.Errorf("%s | %w", "SimpleQueryString", err) + } + + case "term": + if s.Term == nil { + s.Term = make(map[string]TermQuery, 0) + } + if err := dec.Decode(&s.Term); err != nil { + return fmt.Errorf("%s | %w", "Term", err) + } + + case "terms": + if err := dec.Decode(&s.Terms); err != nil { + return fmt.Errorf("%s | %w", "Terms", err) + } + + case "wildcard": + if s.Wildcard == nil { + s.Wildcard = make(map[string]WildcardQuery, 0) + } + if err := dec.Decode(&s.Wildcard); err != nil { + return fmt.Errorf("%s | %w", "Wildcard", err) + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalApiKeyQueryContainerProperty == nil { + s.AdditionalApiKeyQueryContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalApiKeyQueryContainerProperty", err) + } + s.AdditionalApiKeyQueryContainerProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s ApiKeyQueryContainer) MarshalJSON() ([]byte, error) { + type opt ApiKeyQueryContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalApiKeyQueryContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalApiKeyQueryContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewApiKeyQueryContainer returns a ApiKeyQueryContainer. +func NewApiKeyQueryContainer() *ApiKeyQueryContainer { + r := &ApiKeyQueryContainer{ + AdditionalApiKeyQueryContainerProperty: make(map[string]json.RawMessage), + Match: make(map[string]MatchQuery), + Prefix: make(map[string]PrefixQuery), + Range: make(map[string]RangeQuery), + Term: make(map[string]TermQuery), + Wildcard: make(map[string]WildcardQuery), + } + + return r +} + +type ApiKeyQueryContainerVariant interface { + ApiKeyQueryContainerCaster() *ApiKeyQueryContainer +} + +func (s *ApiKeyQueryContainer) ApiKeyQueryContainerCaster() *ApiKeyQueryContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apostrophetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apostrophetokenfilter.go new file mode 100644 index 000000000..cf87cb214 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/apostrophetokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ApostropheTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L451-L453 +type ApostropheTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ApostropheTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ApostropheTokenFilter) MarshalJSON() ([]byte, error) { + type innerApostropheTokenFilter ApostropheTokenFilter + tmp := innerApostropheTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "apostrophe" + + return json.Marshal(tmp) +} + +// NewApostropheTokenFilter returns a ApostropheTokenFilter. +func NewApostropheTokenFilter() *ApostropheTokenFilter { + r := &ApostropheTokenFilter{} + + return r +} + +type ApostropheTokenFilterVariant interface { + ApostropheTokenFilterCaster() *ApostropheTokenFilter +} + +func (s *ApostropheTokenFilter) ApostropheTokenFilterCaster() *ApostropheTokenFilter { + return s +} + +func (s *ApostropheTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/appendprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/appendprocessor.go new file mode 100644 index 000000000..6e282db4d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/appendprocessor.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AppendProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L329-L344 +type AppendProcessor struct { + // AllowDuplicates If `false`, the processor does not append values already present in the + // field. + AllowDuplicates *bool `json:"allow_duplicates,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to be appended to. + // Supports template snippets. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // Value The value to be appended. Supports template snippets. + Value []json.RawMessage `json:"value"` +} + +func (s *AppendProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_duplicates": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowDuplicates", err) + } + s.AllowDuplicates = &value + case bool: + s.AllowDuplicates = &v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "value": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(json.RawMessage) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + s.Value = append(s.Value, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + } + + } + } + return nil +} + +// NewAppendProcessor returns a AppendProcessor. +func NewAppendProcessor() *AppendProcessor { + r := &AppendProcessor{} + + return r +} + +type AppendProcessorVariant interface { + AppendProcessorCaster() *AppendProcessor +} + +func (s *AppendProcessor) AppendProcessorCaster() *AppendProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/applicationglobaluserprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/applicationglobaluserprivileges.go new file mode 100644 index 000000000..db2bf7ccb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/applicationglobaluserprivileges.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ApplicationGlobalUserPrivileges type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L436-L438 +type ApplicationGlobalUserPrivileges struct { + Manage ManageUserPrivileges `json:"manage"` +} + +// NewApplicationGlobalUserPrivileges returns a ApplicationGlobalUserPrivileges. +func NewApplicationGlobalUserPrivileges() *ApplicationGlobalUserPrivileges { + r := &ApplicationGlobalUserPrivileges{} + + return r +} + +type ApplicationGlobalUserPrivilegesVariant interface { + ApplicationGlobalUserPrivilegesCaster() *ApplicationGlobalUserPrivileges +} + +func (s *ApplicationGlobalUserPrivileges) ApplicationGlobalUserPrivilegesCaster() *ApplicationGlobalUserPrivileges { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/applicationprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/applicationprivileges.go new file mode 100644 index 000000000..54a70b598 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/applicationprivileges.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ApplicationPrivileges type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L27-L40 +type ApplicationPrivileges struct { + // Application The name of the application to which this entry applies. + Application string `json:"application"` + // Privileges A list of strings, where each element is the name of an application privilege + // or action. + Privileges []string `json:"privileges"` + // Resources A list resources to which the privileges are applied. + Resources []string `json:"resources"` +} + +func (s *ApplicationPrivileges) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "application": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Application", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Application = o + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return fmt.Errorf("%s | %w", "Privileges", err) + } + + case "resources": + if err := dec.Decode(&s.Resources); err != nil { + return fmt.Errorf("%s | %w", "Resources", err) + } + + } + } + return nil +} + +// NewApplicationPrivileges returns a ApplicationPrivileges. +func NewApplicationPrivileges() *ApplicationPrivileges { + r := &ApplicationPrivileges{} + + return r +} + +type ApplicationPrivilegesVariant interface { + ApplicationPrivilegesCaster() *ApplicationPrivileges +} + +func (s *ApplicationPrivileges) ApplicationPrivilegesCaster() *ApplicationPrivileges { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/applicationprivilegescheck.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/applicationprivilegescheck.go new file mode 100644 index 000000000..16bf6b368 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/applicationprivilegescheck.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ApplicationPrivilegesCheck type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/has_privileges/types.ts#L24-L32 +type ApplicationPrivilegesCheck struct { + // Application The name of the application. + Application string `json:"application"` + // Privileges A list of the privileges that you want to check for the specified resources. + // It may be either application privilege names or the names of actions that are + // granted by those privileges + Privileges []string `json:"privileges"` + // Resources A list of resource names against which the privileges should be checked. + Resources []string `json:"resources"` +} + +func (s *ApplicationPrivilegesCheck) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "application": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Application", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Application = o + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return fmt.Errorf("%s | %w", "Privileges", err) + } + + case "resources": + if err := dec.Decode(&s.Resources); err != nil { + return fmt.Errorf("%s | %w", "Resources", err) + } + + } + } + return nil +} + +// NewApplicationPrivilegesCheck returns a ApplicationPrivilegesCheck. +func NewApplicationPrivilegesCheck() *ApplicationPrivilegesCheck { + r := &ApplicationPrivilegesCheck{} + + return r +} + +type ApplicationPrivilegesCheckVariant interface { + ApplicationPrivilegesCheckCaster() *ApplicationPrivilegesCheck +} + +func (s *ApplicationPrivilegesCheck) ApplicationPrivilegesCheckCaster() *ApplicationPrivilegesCheck { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/applicationsprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/applicationsprivileges.go new file mode 100644 index 000000000..a7da8252c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/applicationsprivileges.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ApplicationsPrivileges type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/has_privileges/types.ts#L47-L47 +type ApplicationsPrivileges map[string]ResourcePrivileges diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arabicanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arabicanalyzer.go new file mode 100644 index 000000000..de49bee8e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arabicanalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ArabicAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L72-L77 +type ArabicAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *ArabicAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ArabicAnalyzer) MarshalJSON() ([]byte, error) { + type innerArabicAnalyzer ArabicAnalyzer + tmp := innerArabicAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "arabic" + + return json.Marshal(tmp) +} + +// NewArabicAnalyzer returns a ArabicAnalyzer. +func NewArabicAnalyzer() *ArabicAnalyzer { + r := &ArabicAnalyzer{} + + return r +} + +type ArabicAnalyzerVariant interface { + ArabicAnalyzerCaster() *ArabicAnalyzer +} + +func (s *ArabicAnalyzer) ArabicAnalyzerCaster() *ArabicAnalyzer { + return s +} + +func (s *ArabicAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arabicnormalizationtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arabicnormalizationtokenfilter.go new file mode 100644 index 000000000..c7549a723 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arabicnormalizationtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ArabicNormalizationTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L455-L457 +type ArabicNormalizationTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ArabicNormalizationTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ArabicNormalizationTokenFilter) MarshalJSON() ([]byte, error) { + type innerArabicNormalizationTokenFilter ArabicNormalizationTokenFilter + tmp := innerArabicNormalizationTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "arabic_normalization" + + return json.Marshal(tmp) +} + +// NewArabicNormalizationTokenFilter returns a ArabicNormalizationTokenFilter. +func NewArabicNormalizationTokenFilter() *ArabicNormalizationTokenFilter { + r := &ArabicNormalizationTokenFilter{} + + return r +} + +type ArabicNormalizationTokenFilterVariant interface { + ArabicNormalizationTokenFilterCaster() *ArabicNormalizationTokenFilter +} + +func (s *ArabicNormalizationTokenFilter) ArabicNormalizationTokenFilterCaster() *ArabicNormalizationTokenFilter { + return s +} + +func (s *ArabicNormalizationTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arabicstemtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arabicstemtokenfilter.go new file mode 100644 index 000000000..c95fc7529 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arabicstemtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ArabicStemTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L547-L549 +type ArabicStemTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ArabicStemTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ArabicStemTokenFilter) MarshalJSON() ([]byte, error) { + type innerArabicStemTokenFilter ArabicStemTokenFilter + tmp := innerArabicStemTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "arabic_stem" + + return json.Marshal(tmp) +} + +// NewArabicStemTokenFilter returns a ArabicStemTokenFilter. +func NewArabicStemTokenFilter() *ArabicStemTokenFilter { + r := &ArabicStemTokenFilter{} + + return r +} + +type ArabicStemTokenFilterVariant interface { + ArabicStemTokenFilterCaster() *ArabicStemTokenFilter +} + +func (s *ArabicStemTokenFilter) ArabicStemTokenFilterCaster() *ArabicStemTokenFilter { + return s +} + +func (s *ArabicStemTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/archive.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/archive.go new file mode 100644 index 000000000..68d837f5b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/archive.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Archive type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L45-L47 +type Archive struct { + Available bool `json:"available"` + Enabled bool `json:"enabled"` + IndicesCount int64 `json:"indices_count"` +} + +func (s *Archive) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "indices_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndicesCount", err) + } + s.IndicesCount = value + case float64: + f := int64(v) + s.IndicesCount = f + } + + } + } + return nil +} + +// NewArchive returns a Archive. +func NewArchive() *Archive { + r := &Archive{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/armeniananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/armeniananalyzer.go new file mode 100644 index 000000000..0144bebad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/armeniananalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ArmenianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L79-L84 +type ArmenianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *ArmenianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ArmenianAnalyzer) MarshalJSON() ([]byte, error) { + type innerArmenianAnalyzer ArmenianAnalyzer + tmp := innerArmenianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "armenian" + + return json.Marshal(tmp) +} + +// NewArmenianAnalyzer returns a ArmenianAnalyzer. +func NewArmenianAnalyzer() *ArmenianAnalyzer { + r := &ArmenianAnalyzer{} + + return r +} + +type ArmenianAnalyzerVariant interface { + ArmenianAnalyzerCaster() *ArmenianAnalyzer +} + +func (s *ArmenianAnalyzer) ArmenianAnalyzerCaster() *ArmenianAnalyzer { + return s +} + +func (s *ArmenianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arraycomparecondition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arraycomparecondition.go new file mode 100644 index 000000000..5b19f7fad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arraycomparecondition.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditionop" +) + +// ArrayCompareCondition type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Conditions.ts#L33-L40 +type ArrayCompareCondition struct { + ArrayCompareCondition map[conditionop.ConditionOp]ArrayCompareOpParams `json:"-"` + Path string `json:"path"` +} + +func (s *ArrayCompareCondition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = o + + default: + + if key, ok := t.(string); ok { + if s.ArrayCompareCondition == nil { + s.ArrayCompareCondition = make(map[conditionop.ConditionOp]ArrayCompareOpParams, 0) + } + raw := NewArrayCompareOpParams() + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "ArrayCompareCondition", err) + } + enum := conditionop.ConditionOp{} + err := enum.UnmarshalText([]byte(key)) + if err != nil { + return fmt.Errorf("cannot unmarshal enum conditionop.ConditionOp: %w", err) + } + s.ArrayCompareCondition[enum] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s ArrayCompareCondition) MarshalJSON() ([]byte, error) { + type opt ArrayCompareCondition + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.ArrayCompareCondition { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "ArrayCompareCondition") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewArrayCompareCondition returns a ArrayCompareCondition. +func NewArrayCompareCondition() *ArrayCompareCondition { + r := &ArrayCompareCondition{ + ArrayCompareCondition: make(map[conditionop.ConditionOp]ArrayCompareOpParams), + } + + return r +} + +type ArrayCompareConditionVariant interface { + ArrayCompareConditionCaster() *ArrayCompareCondition +} + +func (s *ArrayCompareCondition) ArrayCompareConditionCaster() *ArrayCompareCondition { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arraycompareopparams.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arraycompareopparams.go new file mode 100644 index 000000000..5039db3b1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arraycompareopparams.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/quantifier" +) + +// ArrayCompareOpParams type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Conditions.ts#L28-L31 +type ArrayCompareOpParams struct { + Quantifier quantifier.Quantifier `json:"quantifier"` + Value FieldValue `json:"value"` +} + +func (s *ArrayCompareOpParams) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "quantifier": + if err := dec.Decode(&s.Quantifier); err != nil { + return fmt.Errorf("%s | %w", "Quantifier", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + } + } + return nil +} + +// NewArrayCompareOpParams returns a ArrayCompareOpParams. +func NewArrayCompareOpParams() *ArrayCompareOpParams { + r := &ArrayCompareOpParams{} + + return r +} + +type ArrayCompareOpParamsVariant interface { + ArrayCompareOpParamsCaster() *ArrayCompareOpParams +} + +func (s *ArrayCompareOpParams) ArrayCompareOpParamsCaster() *ArrayCompareOpParams { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arraypercentilesitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arraypercentilesitem.go new file mode 100644 index 000000000..a36b3408a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/arraypercentilesitem.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ArrayPercentilesItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L162-L166 +type ArrayPercentilesItem struct { + Key string `json:"key"` + Value *Float64 `json:"value,omitempty"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *ArrayPercentilesItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = o + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + +// NewArrayPercentilesItem returns a ArrayPercentilesItem. +func NewArrayPercentilesItem() *ArrayPercentilesItem { + r := &ArrayPercentilesItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/asciifoldingtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/asciifoldingtokenfilter.go new file mode 100644 index 000000000..4ecb1df93 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/asciifoldingtokenfilter.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// AsciiFoldingTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L213-L217 +type AsciiFoldingTokenFilter struct { + // PreserveOriginal If `true`, emit both original tokens and folded tokens. Defaults to `false`. + PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *AsciiFoldingTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "preserve_original": + if err := dec.Decode(&s.PreserveOriginal); err != nil { + return fmt.Errorf("%s | %w", "PreserveOriginal", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s AsciiFoldingTokenFilter) MarshalJSON() ([]byte, error) { + type innerAsciiFoldingTokenFilter AsciiFoldingTokenFilter + tmp := innerAsciiFoldingTokenFilter{ + PreserveOriginal: s.PreserveOriginal, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "asciifolding" + + return json.Marshal(tmp) +} + +// NewAsciiFoldingTokenFilter returns a AsciiFoldingTokenFilter. +func NewAsciiFoldingTokenFilter() *AsciiFoldingTokenFilter { + r := &AsciiFoldingTokenFilter{} + + return r +} + +type AsciiFoldingTokenFilterVariant interface { + AsciiFoldingTokenFilterCaster() *AsciiFoldingTokenFilter +} + +func (s *AsciiFoldingTokenFilter) AsciiFoldingTokenFilterCaster() *AsciiFoldingTokenFilter { + return s +} + +func (s *AsciiFoldingTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/asyncsearch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/asyncsearch.go new file mode 100644 index 000000000..197c7566d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/asyncsearch.go @@ -0,0 +1,798 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// AsyncSearch type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/async_search/_types/AsyncSearch.ts#L30-L56 +type AsyncSearch struct { + // Aggregations Partial aggregations results, coming from the shards that have already + // completed running the query. + Aggregations map[string]Aggregate `json:"aggregations,omitempty"` + Clusters_ *ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + Hits HitsMetadata `json:"hits"` + MaxScore *Float64 `json:"max_score,omitempty"` + // NumReducePhases Indicates how many reductions of the results have been performed. + // If this number increases compared to the last retrieved results for a get + // asynch search request, you can expect additional results included in the + // search response. + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *Profile `json:"profile,omitempty"` + ScrollId_ *string `json:"_scroll_id,omitempty"` + // Shards_ Indicates how many shards have run the query. + // Note that in order for shard results to be included in the search response, + // they need to be reduced first. + Shards_ ShardStatistics `json:"_shards"` + Suggest map[string][]Suggest `json:"suggest,omitempty"` + TerminatedEarly *bool `json:"terminated_early,omitempty"` + TimedOut bool `json:"timed_out"` + Took int64 `json:"took"` +} + +func (s *AsyncSearch) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + } + + case "_clusters": + if err := dec.Decode(&s.Clusters_); err != nil { + return fmt.Errorf("%s | %w", "Clusters_", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return fmt.Errorf("%s | %w", "Hits", err) + } + + case "max_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxScore", err) + } + f := Float64(value) + s.MaxScore = &f + case float64: + f := Float64(v) + s.MaxScore = &f + } + + case "num_reduce_phases": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumReducePhases", err) + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f + } + + case "pit_id": + if err := dec.Decode(&s.PitId); err != nil { + return fmt.Errorf("%s | %w", "PitId", err) + } + + case "profile": + if err := dec.Decode(&s.Profile); err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + + case "_scroll_id": + if err := dec.Decode(&s.ScrollId_); err != nil { + return fmt.Errorf("%s | %w", "ScrollId_", err) + } + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return fmt.Errorf("%s | %w", "Shards_", err) + } + + case "suggest": + if s.Suggest == nil { + s.Suggest = make(map[string][]Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]Suggest, 0) + } + switch elems[0] { + + case "completion": + o := NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } + } + + case "terminated_early": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TerminatedEarly", err) + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v + } + + case "timed_out": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimedOut", err) + } + s.TimedOut = value + case bool: + s.TimedOut = v + } + + case "took": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + s.Took = value + case float64: + f := int64(v) + s.Took = f + } + + } + } + return nil +} + +// NewAsyncSearch returns a AsyncSearch. +func NewAsyncSearch() *AsyncSearch { + r := &AsyncSearch{ + Aggregations: make(map[string]Aggregate), + Fields: make(map[string]json.RawMessage), + Suggest: make(map[string][]Suggest), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/attachmentprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/attachmentprocessor.go new file mode 100644 index 000000000..cc9fd4e23 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/attachmentprocessor.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AttachmentProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L346-L387 +type AttachmentProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to get the base64 encoded field from. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and field does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // IndexedChars The number of chars being used for extraction to prevent huge fields. + // Use `-1` for no limit. + IndexedChars *int64 `json:"indexed_chars,omitempty"` + // IndexedCharsField Field name from which you can overwrite the number of chars being used for + // extraction. + IndexedCharsField *string `json:"indexed_chars_field,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Properties Array of properties to select to be stored. + // Can be `content`, `title`, `name`, `author`, `keywords`, `date`, + // `content_type`, `content_length`, `language`. + Properties []string `json:"properties,omitempty"` + // RemoveBinary If true, the binary field will be removed from the document + RemoveBinary *bool `json:"remove_binary,omitempty"` + // ResourceName Field containing the name of the resource to decode. + // If specified, the processor passes this resource name to the underlying Tika + // library to enable Resource Name Based Detection. + ResourceName *string `json:"resource_name,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field that will hold the attachment information. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *AttachmentProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "indexed_chars": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexedChars", err) + } + s.IndexedChars = &value + case float64: + f := int64(v) + s.IndexedChars = &f + } + + case "indexed_chars_field": + if err := dec.Decode(&s.IndexedCharsField); err != nil { + return fmt.Errorf("%s | %w", "IndexedCharsField", err) + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "properties": + if err := dec.Decode(&s.Properties); err != nil { + return fmt.Errorf("%s | %w", "Properties", err) + } + + case "remove_binary": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RemoveBinary", err) + } + s.RemoveBinary = &value + case bool: + s.RemoveBinary = &v + } + + case "resource_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResourceName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResourceName = &o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewAttachmentProcessor returns a AttachmentProcessor. +func NewAttachmentProcessor() *AttachmentProcessor { + r := &AttachmentProcessor{} + + return r +} + +type AttachmentProcessorVariant interface { + AttachmentProcessorCaster() *AttachmentProcessor +} + +func (s *AttachmentProcessor) AttachmentProcessorCaster() *AttachmentProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/audit.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/audit.go new file mode 100644 index 000000000..eb86d8c53 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/audit.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Audit type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L70-L72 +type Audit struct { + Enabled bool `json:"enabled"` + Outputs []string `json:"outputs,omitempty"` +} + +func (s *Audit) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "outputs": + if err := dec.Decode(&s.Outputs); err != nil { + return fmt.Errorf("%s | %w", "Outputs", err) + } + + } + } + return nil +} + +// NewAudit returns a Audit. +func NewAudit() *Audit { + r := &Audit{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticateapikey.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticateapikey.go new file mode 100644 index 000000000..6b9e9be69 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticateapikey.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// AuthenticateApiKey type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/authenticate/SecurityAuthenticateResponse.ts#L44-L47 +type AuthenticateApiKey struct { + Id string `json:"id"` + Name *string `json:"name,omitempty"` +} + +func (s *AuthenticateApiKey) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewAuthenticateApiKey returns a AuthenticateApiKey. +func NewAuthenticateApiKey() *AuthenticateApiKey { + r := &AuthenticateApiKey{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticateduser.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticateduser.go new file mode 100644 index 000000000..9a2cf55a3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticateduser.go @@ -0,0 +1,152 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AuthenticatedUser type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_token/types.ts#L60-L65 +type AuthenticatedUser struct { + AuthenticationProvider *AuthenticationProvider `json:"authentication_provider,omitempty"` + AuthenticationRealm UserRealm `json:"authentication_realm"` + AuthenticationType string `json:"authentication_type"` + Email *string `json:"email,omitempty"` + Enabled bool `json:"enabled"` + FullName *string `json:"full_name,omitempty"` + LookupRealm UserRealm `json:"lookup_realm"` + Metadata Metadata `json:"metadata"` + ProfileUid *string `json:"profile_uid,omitempty"` + Roles []string `json:"roles"` + Username string `json:"username"` +} + +func (s *AuthenticatedUser) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "authentication_provider": + if err := dec.Decode(&s.AuthenticationProvider); err != nil { + return fmt.Errorf("%s | %w", "AuthenticationProvider", err) + } + + case "authentication_realm": + if err := dec.Decode(&s.AuthenticationRealm); err != nil { + return fmt.Errorf("%s | %w", "AuthenticationRealm", err) + } + + case "authentication_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AuthenticationType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AuthenticationType = o + + case "email": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Email", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Email = &o + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "full_name": + if err := dec.Decode(&s.FullName); err != nil { + return fmt.Errorf("%s | %w", "FullName", err) + } + + case "lookup_realm": + if err := dec.Decode(&s.LookupRealm); err != nil { + return fmt.Errorf("%s | %w", "LookupRealm", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "profile_uid": + if err := dec.Decode(&s.ProfileUid); err != nil { + return fmt.Errorf("%s | %w", "ProfileUid", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + + } + } + return nil +} + +// NewAuthenticatedUser returns a AuthenticatedUser. +func NewAuthenticatedUser() *AuthenticatedUser { + r := &AuthenticatedUser{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticatetoken.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticatetoken.go new file mode 100644 index 000000000..bd1a8273d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticatetoken.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AuthenticateToken type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/authenticate/types.ts#L22-L29 +type AuthenticateToken struct { + Name string `json:"name"` + Type *string `json:"type,omitempty"` +} + +func (s *AuthenticateToken) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + } + } + return nil +} + +// NewAuthenticateToken returns a AuthenticateToken. +func NewAuthenticateToken() *AuthenticateToken { + r := &AuthenticateToken{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authentication.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authentication.go new file mode 100644 index 000000000..4ba9f3d58 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authentication.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Authentication type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/delegate_pki/SecurityDelegatePkiResponse.ts#L43-L55 +type Authentication struct { + ApiKey map[string]string `json:"api_key,omitempty"` + AuthenticationRealm AuthenticationRealm `json:"authentication_realm"` + AuthenticationType string `json:"authentication_type"` + Email *string `json:"email,omitempty"` + Enabled bool `json:"enabled"` + FullName *string `json:"full_name,omitempty"` + LookupRealm AuthenticationRealm `json:"lookup_realm"` + Metadata Metadata `json:"metadata"` + Roles []string `json:"roles"` + Token map[string]string `json:"token,omitempty"` + Username string `json:"username"` +} + +func (s *Authentication) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + if s.ApiKey == nil { + s.ApiKey = make(map[string]string, 0) + } + if err := dec.Decode(&s.ApiKey); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + + case "authentication_realm": + if err := dec.Decode(&s.AuthenticationRealm); err != nil { + return fmt.Errorf("%s | %w", "AuthenticationRealm", err) + } + + case "authentication_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AuthenticationType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AuthenticationType = o + + case "email": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Email", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Email = &o + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "full_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FullName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FullName = &o + + case "lookup_realm": + if err := dec.Decode(&s.LookupRealm); err != nil { + return fmt.Errorf("%s | %w", "LookupRealm", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "token": + if s.Token == nil { + s.Token = make(map[string]string, 0) + } + if err := dec.Decode(&s.Token); err != nil { + return fmt.Errorf("%s | %w", "Token", err) + } + + case "username": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Username = o + + } + } + return nil +} + +// NewAuthentication returns a Authentication. +func NewAuthentication() *Authentication { + r := &Authentication{ + ApiKey: make(map[string]string), + Token: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticationprovider.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticationprovider.go new file mode 100644 index 000000000..b629d7746 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticationprovider.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AuthenticationProvider type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_token/types.ts#L55-L58 +type AuthenticationProvider struct { + Name string `json:"name"` + Type string `json:"type"` +} + +func (s *AuthenticationProvider) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewAuthenticationProvider returns a AuthenticationProvider. +func NewAuthenticationProvider() *AuthenticationProvider { + r := &AuthenticationProvider{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticationrealm.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticationrealm.go new file mode 100644 index 000000000..4b3083824 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/authenticationrealm.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AuthenticationRealm type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/delegate_pki/SecurityDelegatePkiResponse.ts#L57-L61 +type AuthenticationRealm struct { + Domain *string `json:"domain,omitempty"` + Name string `json:"name"` + Type string `json:"type"` +} + +func (s *AuthenticationRealm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "domain": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Domain", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Domain = &o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewAuthenticationRealm returns a AuthenticationRealm. +func NewAuthenticationRealm() *AuthenticationRealm { + r := &AuthenticationRealm{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autodatehistogramaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autodatehistogramaggregate.go new file mode 100644 index 000000000..4e020cf0f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autodatehistogramaggregate.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// AutoDateHistogramAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L393-L400 +type AutoDateHistogramAggregate struct { + Buckets BucketsDateHistogramBucket `json:"buckets"` + Interval string `json:"interval"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *AutoDateHistogramAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]DateHistogramBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []DateHistogramBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "interval": + if err := dec.Decode(&s.Interval); err != nil { + return fmt.Errorf("%s | %w", "Interval", err) + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewAutoDateHistogramAggregate returns a AutoDateHistogramAggregate. +func NewAutoDateHistogramAggregate() *AutoDateHistogramAggregate { + r := &AutoDateHistogramAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autodatehistogramaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autodatehistogramaggregation.go new file mode 100644 index 000000000..68fb5f9b8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autodatehistogramaggregation.go @@ -0,0 +1,169 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/minimuminterval" +) + +// AutoDateHistogramAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L72-L110 +type AutoDateHistogramAggregation struct { + // Buckets The target number of buckets. + Buckets *int `json:"buckets,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // Format The date format used to format `key_as_string` in the response. + // If no `format` is specified, the first date format specified in the field + // mapping is used. + Format *string `json:"format,omitempty"` + // MinimumInterval The minimum rounding interval. + // This can make the collection process more efficient, as the aggregation will + // not attempt to round at any interval lower than `minimum_interval`. + MinimumInterval *minimuminterval.MinimumInterval `json:"minimum_interval,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing DateTime `json:"missing,omitempty"` + // Offset Time zone specified as a ISO 8601 UTC offset. + Offset *string `json:"offset,omitempty"` + Params map[string]json.RawMessage `json:"params,omitempty"` + Script *Script `json:"script,omitempty"` + // TimeZone Time zone ID. + TimeZone *string `json:"time_zone,omitempty"` +} + +func (s *AutoDateHistogramAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = &value + case float64: + f := int(v) + s.Buckets = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "minimum_interval": + if err := dec.Decode(&s.MinimumInterval); err != nil { + return fmt.Errorf("%s | %w", "MinimumInterval", err) + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "offset": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Offset", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Offset = &o + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return fmt.Errorf("%s | %w", "TimeZone", err) + } + + } + } + return nil +} + +// NewAutoDateHistogramAggregation returns a AutoDateHistogramAggregation. +func NewAutoDateHistogramAggregation() *AutoDateHistogramAggregation { + r := &AutoDateHistogramAggregation{ + Params: make(map[string]json.RawMessage), + } + + return r +} + +type AutoDateHistogramAggregationVariant interface { + AutoDateHistogramAggregationCaster() *AutoDateHistogramAggregation +} + +func (s *AutoDateHistogramAggregation) AutoDateHistogramAggregationCaster() *AutoDateHistogramAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autofollowedcluster.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autofollowedcluster.go new file mode 100644 index 000000000..cf4df00dc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autofollowedcluster.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// AutoFollowedCluster type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/stats/types.ts.ts#L26-L30 +type AutoFollowedCluster struct { + ClusterName string `json:"cluster_name"` + LastSeenMetadataVersion int64 `json:"last_seen_metadata_version"` + TimeSinceLastCheckMillis int64 `json:"time_since_last_check_millis"` +} + +func (s *AutoFollowedCluster) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cluster_name": + if err := dec.Decode(&s.ClusterName); err != nil { + return fmt.Errorf("%s | %w", "ClusterName", err) + } + + case "last_seen_metadata_version": + if err := dec.Decode(&s.LastSeenMetadataVersion); err != nil { + return fmt.Errorf("%s | %w", "LastSeenMetadataVersion", err) + } + + case "time_since_last_check_millis": + if err := dec.Decode(&s.TimeSinceLastCheckMillis); err != nil { + return fmt.Errorf("%s | %w", "TimeSinceLastCheckMillis", err) + } + + } + } + return nil +} + +// NewAutoFollowedCluster returns a AutoFollowedCluster. +func NewAutoFollowedCluster() *AutoFollowedCluster { + r := &AutoFollowedCluster{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autofollowpattern.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autofollowpattern.go new file mode 100644 index 000000000..2b88bd1c8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autofollowpattern.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// AutoFollowPattern type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/get_auto_follow_pattern/types.ts#L23-L26 +type AutoFollowPattern struct { + Name string `json:"name"` + Pattern AutoFollowPatternSummary `json:"pattern"` +} + +func (s *AutoFollowPattern) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "pattern": + if err := dec.Decode(&s.Pattern); err != nil { + return fmt.Errorf("%s | %w", "Pattern", err) + } + + } + } + return nil +} + +// NewAutoFollowPattern returns a AutoFollowPattern. +func NewAutoFollowPattern() *AutoFollowPattern { + r := &AutoFollowPattern{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autofollowpatternsummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autofollowpatternsummary.go new file mode 100644 index 000000000..5543a389e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autofollowpatternsummary.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AutoFollowPatternSummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/get_auto_follow_pattern/types.ts#L28-L52 +type AutoFollowPatternSummary struct { + Active bool `json:"active"` + // FollowIndexPattern The name of follower index. + FollowIndexPattern *string `json:"follow_index_pattern,omitempty"` + // LeaderIndexExclusionPatterns An array of simple index patterns that can be used to exclude indices from + // being auto-followed. + LeaderIndexExclusionPatterns []string `json:"leader_index_exclusion_patterns"` + // LeaderIndexPatterns An array of simple index patterns to match against indices in the remote + // cluster specified by the remote_cluster field. + LeaderIndexPatterns []string `json:"leader_index_patterns"` + // MaxOutstandingReadRequests The maximum number of outstanding reads requests from the remote cluster. + MaxOutstandingReadRequests int `json:"max_outstanding_read_requests"` + // RemoteCluster The remote cluster containing the leader indices to match against. + RemoteCluster string `json:"remote_cluster"` +} + +func (s *AutoFollowPatternSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Active", err) + } + s.Active = value + case bool: + s.Active = v + } + + case "follow_index_pattern": + if err := dec.Decode(&s.FollowIndexPattern); err != nil { + return fmt.Errorf("%s | %w", "FollowIndexPattern", err) + } + + case "leader_index_exclusion_patterns": + if err := dec.Decode(&s.LeaderIndexExclusionPatterns); err != nil { + return fmt.Errorf("%s | %w", "LeaderIndexExclusionPatterns", err) + } + + case "leader_index_patterns": + if err := dec.Decode(&s.LeaderIndexPatterns); err != nil { + return fmt.Errorf("%s | %w", "LeaderIndexPatterns", err) + } + + case "max_outstanding_read_requests": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxOutstandingReadRequests", err) + } + s.MaxOutstandingReadRequests = value + case float64: + f := int(v) + s.MaxOutstandingReadRequests = f + } + + case "remote_cluster": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RemoteCluster = o + + } + } + return nil +} + +// NewAutoFollowPatternSummary returns a AutoFollowPatternSummary. +func NewAutoFollowPatternSummary() *AutoFollowPatternSummary { + r := &AutoFollowPatternSummary{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autofollowstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autofollowstats.go new file mode 100644 index 000000000..7b2e04b02 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autofollowstats.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AutoFollowStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/stats/types.ts.ts#L32-L47 +type AutoFollowStats struct { + AutoFollowedClusters []AutoFollowedCluster `json:"auto_followed_clusters"` + // NumberOfFailedFollowIndices The number of indices that the auto-follow coordinator failed to + // automatically follow. + // The causes of recent failures are captured in the logs of the elected master + // node and in the `auto_follow_stats.recent_auto_follow_errors` field. + NumberOfFailedFollowIndices int64 `json:"number_of_failed_follow_indices"` + // NumberOfFailedRemoteClusterStateRequests The number of times that the auto-follow coordinator failed to retrieve the + // cluster state from a remote cluster registered in a collection of auto-follow + // patterns. + NumberOfFailedRemoteClusterStateRequests int64 `json:"number_of_failed_remote_cluster_state_requests"` + // NumberOfSuccessfulFollowIndices The number of indices that the auto-follow coordinator successfully followed. + NumberOfSuccessfulFollowIndices int64 `json:"number_of_successful_follow_indices"` + // RecentAutoFollowErrors An array of objects representing failures by the auto-follow coordinator. + RecentAutoFollowErrors []ErrorCause `json:"recent_auto_follow_errors"` +} + +func (s *AutoFollowStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "auto_followed_clusters": + if err := dec.Decode(&s.AutoFollowedClusters); err != nil { + return fmt.Errorf("%s | %w", "AutoFollowedClusters", err) + } + + case "number_of_failed_follow_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfFailedFollowIndices", err) + } + s.NumberOfFailedFollowIndices = value + case float64: + f := int64(v) + s.NumberOfFailedFollowIndices = f + } + + case "number_of_failed_remote_cluster_state_requests": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfFailedRemoteClusterStateRequests", err) + } + s.NumberOfFailedRemoteClusterStateRequests = value + case float64: + f := int64(v) + s.NumberOfFailedRemoteClusterStateRequests = f + } + + case "number_of_successful_follow_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfSuccessfulFollowIndices", err) + } + s.NumberOfSuccessfulFollowIndices = value + case float64: + f := int64(v) + s.NumberOfSuccessfulFollowIndices = f + } + + case "recent_auto_follow_errors": + if err := dec.Decode(&s.RecentAutoFollowErrors); err != nil { + return fmt.Errorf("%s | %w", "RecentAutoFollowErrors", err) + } + + } + } + return nil +} + +// NewAutoFollowStats returns a AutoFollowStats. +func NewAutoFollowStats() *AutoFollowStats { + r := &AutoFollowStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingcapacity.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingcapacity.go new file mode 100644 index 000000000..9aeb3ec00 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingcapacity.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// AutoscalingCapacity type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L38-L41 +type AutoscalingCapacity struct { + Node AutoscalingResources `json:"node"` + Total AutoscalingResources `json:"total"` +} + +// NewAutoscalingCapacity returns a AutoscalingCapacity. +func NewAutoscalingCapacity() *AutoscalingCapacity { + r := &AutoscalingCapacity{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingdecider.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingdecider.go new file mode 100644 index 000000000..9c591e03d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingdecider.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AutoscalingDecider type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L52-L56 +type AutoscalingDecider struct { + ReasonDetails json.RawMessage `json:"reason_details,omitempty"` + ReasonSummary *string `json:"reason_summary,omitempty"` + RequiredCapacity AutoscalingCapacity `json:"required_capacity"` +} + +func (s *AutoscalingDecider) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "reason_details": + if err := dec.Decode(&s.ReasonDetails); err != nil { + return fmt.Errorf("%s | %w", "ReasonDetails", err) + } + + case "reason_summary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ReasonSummary", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ReasonSummary = &o + + case "required_capacity": + if err := dec.Decode(&s.RequiredCapacity); err != nil { + return fmt.Errorf("%s | %w", "RequiredCapacity", err) + } + + } + } + return nil +} + +// NewAutoscalingDecider returns a AutoscalingDecider. +func NewAutoscalingDecider() *AutoscalingDecider { + r := &AutoscalingDecider{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingdeciders.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingdeciders.go new file mode 100644 index 000000000..3bc696a77 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingdeciders.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// AutoscalingDeciders type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L31-L36 +type AutoscalingDeciders struct { + CurrentCapacity AutoscalingCapacity `json:"current_capacity"` + CurrentNodes []AutoscalingNode `json:"current_nodes"` + Deciders map[string]AutoscalingDecider `json:"deciders"` + RequiredCapacity AutoscalingCapacity `json:"required_capacity"` +} + +// NewAutoscalingDeciders returns a AutoscalingDeciders. +func NewAutoscalingDeciders() *AutoscalingDeciders { + r := &AutoscalingDeciders{ + Deciders: make(map[string]AutoscalingDecider), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingnode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingnode.go new file mode 100644 index 000000000..f76d97f92 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingnode.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// AutoscalingNode type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L48-L50 +type AutoscalingNode struct { + Name string `json:"name"` +} + +func (s *AutoscalingNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewAutoscalingNode returns a AutoscalingNode. +func NewAutoscalingNode() *AutoscalingNode { + r := &AutoscalingNode{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingpolicy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingpolicy.go new file mode 100644 index 000000000..a3f8e7a2e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingpolicy.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// AutoscalingPolicy type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/autoscaling/_types/AutoscalingPolicy.ts#L23-L30 +type AutoscalingPolicy struct { + // Deciders Decider settings. + Deciders map[string]json.RawMessage `json:"deciders"` + Roles []string `json:"roles"` +} + +// NewAutoscalingPolicy returns a AutoscalingPolicy. +func NewAutoscalingPolicy() *AutoscalingPolicy { + r := &AutoscalingPolicy{ + Deciders: make(map[string]json.RawMessage), + } + + return r +} + +type AutoscalingPolicyVariant interface { + AutoscalingPolicyCaster() *AutoscalingPolicy +} + +func (s *AutoscalingPolicy) AutoscalingPolicyCaster() *AutoscalingPolicy { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingresources.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingresources.go new file mode 100644 index 000000000..c36e0d937 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/autoscalingresources.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AutoscalingResources type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L43-L46 +type AutoscalingResources struct { + Memory int `json:"memory"` + Storage int `json:"storage"` +} + +func (s *AutoscalingResources) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "memory": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Memory", err) + } + s.Memory = value + case float64: + f := int(v) + s.Memory = f + } + + case "storage": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Storage", err) + } + s.Storage = value + case float64: + f := int(v) + s.Storage = f + } + + } + } + return nil +} + +// NewAutoscalingResources returns a AutoscalingResources. +func NewAutoscalingResources() *AutoscalingResources { + r := &AutoscalingResources{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/averageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/averageaggregation.go new file mode 100644 index 000000000..327149200 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/averageaggregation.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AverageAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L55-L55 +type AverageAggregation struct { + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *AverageAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewAverageAggregation returns a AverageAggregation. +func NewAverageAggregation() *AverageAggregation { + r := &AverageAggregation{} + + return r +} + +type AverageAggregationVariant interface { + AverageAggregationCaster() *AverageAggregation +} + +func (s *AverageAggregation) AverageAggregationCaster() *AverageAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/averagebucketaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/averagebucketaggregation.go new file mode 100644 index 000000000..445333649 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/averagebucketaggregation.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// AverageBucketAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L78-L81 +type AverageBucketAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` +} + +func (s *AverageBucketAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + } + } + return nil +} + +// NewAverageBucketAggregation returns a AverageBucketAggregation. +func NewAverageBucketAggregation() *AverageBucketAggregation { + r := &AverageBucketAggregation{} + + return r +} + +type AverageBucketAggregationVariant interface { + AverageBucketAggregationCaster() *AverageBucketAggregation +} + +func (s *AverageBucketAggregation) AverageBucketAggregationCaster() *AverageBucketAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/avgaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/avgaggregate.go new file mode 100644 index 000000000..b8c856cd7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/avgaggregate.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AvgAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L218-L222 +type AvgAggregate struct { + Meta Metadata `json:"meta,omitempty"` + // Value The metric value. A missing value generally means that there was no data to + // aggregate, + // unless specified otherwise. + Value *Float64 `json:"value,omitempty"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *AvgAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + +// NewAvgAggregate returns a AvgAggregate. +func NewAvgAggregate() *AvgAggregate { + r := &AvgAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azureaistudioservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azureaistudioservicesettings.go new file mode 100644 index 000000000..9b08c1723 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azureaistudioservicesettings.go @@ -0,0 +1,162 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AzureAiStudioServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L641-L683 +type AzureAiStudioServiceSettings struct { + // ApiKey A valid API key of your Azure AI Studio model deployment. + // This key can be found on the overview page for your deployment in the + // management section of your Azure AI Studio account. + // + // IMPORTANT: You need to provide the API key only once, during the inference + // model creation. + // The get inference endpoint API does not retrieve your API key. + // After creating the inference model, you cannot change the associated API key. + // If you want to use a different API key, delete the inference model and + // recreate it with the same name and the updated API key. + ApiKey string `json:"api_key"` + // EndpointType The type of endpoint that is available for deployment through Azure AI + // Studio: `token` or `realtime`. + // The `token` endpoint type is for "pay as you go" endpoints that are billed + // per token. + // The `realtime` endpoint type is for "real-time" endpoints that are billed per + // hour of usage. + EndpointType string `json:"endpoint_type"` + // Provider The model provider for your deployment. + // Note that some providers may support only certain task types. + // Supported providers include: + // + // * `cohere` - available for `text_embedding` and `completion` task types + // * `databricks` - available for `completion` task type only + // * `meta` - available for `completion` task type only + // * `microsoft_phi` - available for `completion` task type only + // * `mistral` - available for `completion` task type only + // * `openai` - available for `text_embedding` and `completion` task types + Provider string `json:"provider"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // Azure AI Studio. + // By default, the `azureaistudio` service sets the number of requests allowed + // per minute to 240. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` + // Target The target URL of your Azure AI Studio model deployment. + // This can be found on the overview page for your deployment in the management + // section of your Azure AI Studio account. + Target string `json:"target"` +} + +func (s *AzureAiStudioServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "endpoint_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "EndpointType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.EndpointType = o + + case "provider": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Provider", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Provider = o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + case "target": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Target", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Target = o + + } + } + return nil +} + +// NewAzureAiStudioServiceSettings returns a AzureAiStudioServiceSettings. +func NewAzureAiStudioServiceSettings() *AzureAiStudioServiceSettings { + r := &AzureAiStudioServiceSettings{} + + return r +} + +type AzureAiStudioServiceSettingsVariant interface { + AzureAiStudioServiceSettingsCaster() *AzureAiStudioServiceSettings +} + +func (s *AzureAiStudioServiceSettings) AzureAiStudioServiceSettingsCaster() *AzureAiStudioServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azureaistudiotasksettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azureaistudiotasksettings.go new file mode 100644 index 000000000..7869c49ae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azureaistudiotasksettings.go @@ -0,0 +1,167 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AzureAiStudioTaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L685-L713 +type AzureAiStudioTaskSettings struct { + // DoSample For a `completion` task, instruct the inference process to perform sampling. + // It has no effect unless `temperature` or `top_p` is specified. + DoSample *float32 `json:"do_sample,omitempty"` + // MaxNewTokens For a `completion` task, provide a hint for the maximum number of output + // tokens to be generated. + MaxNewTokens *int `json:"max_new_tokens,omitempty"` + // Temperature For a `completion` task, control the apparent creativity of generated + // completions with a sampling temperature. + // It must be a number in the range of 0.0 to 2.0. + // It should not be used if `top_p` is specified. + Temperature *float32 `json:"temperature,omitempty"` + // TopP For a `completion` task, make the model consider the results of the tokens + // with nucleus sampling probability. + // It is an alternative value to `temperature` and must be a number in the range + // of 0.0 to 2.0. + // It should not be used if `temperature` is specified. + TopP *float32 `json:"top_p,omitempty"` + // User For a `text_embedding` task, specify the user issuing the request. + // This information can be used for abuse detection. + User *string `json:"user,omitempty"` +} + +func (s *AzureAiStudioTaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "do_sample": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "DoSample", err) + } + f := float32(value) + s.DoSample = &f + case float64: + f := float32(v) + s.DoSample = &f + } + + case "max_new_tokens": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNewTokens", err) + } + s.MaxNewTokens = &value + case float64: + f := int(v) + s.MaxNewTokens = &f + } + + case "temperature": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Temperature", err) + } + f := float32(value) + s.Temperature = &f + case float64: + f := float32(v) + s.Temperature = &f + } + + case "top_p": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "TopP", err) + } + f := float32(value) + s.TopP = &f + case float64: + f := float32(v) + s.TopP = &f + } + + case "user": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "User", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.User = &o + + } + } + return nil +} + +// NewAzureAiStudioTaskSettings returns a AzureAiStudioTaskSettings. +func NewAzureAiStudioTaskSettings() *AzureAiStudioTaskSettings { + r := &AzureAiStudioTaskSettings{} + + return r +} + +type AzureAiStudioTaskSettingsVariant interface { + AzureAiStudioTaskSettingsCaster() *AzureAiStudioTaskSettings +} + +func (s *AzureAiStudioTaskSettings) AzureAiStudioTaskSettingsCaster() *AzureAiStudioTaskSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azureopenaiservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azureopenaiservicesettings.go new file mode 100644 index 000000000..544b0ee44 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azureopenaiservicesettings.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AzureOpenAIServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L724-L769 +type AzureOpenAIServiceSettings struct { + // ApiKey A valid API key for your Azure OpenAI account. + // You must specify either `api_key` or `entra_id`. + // If you do not provide either or you provide both, you will receive an error + // when you try to create your model. + // + // IMPORTANT: You need to provide the API key only once, during the inference + // model creation. + // The get inference endpoint API does not retrieve your API key. + // After creating the inference model, you cannot change the associated API key. + // If you want to use a different API key, delete the inference model and + // recreate it with the same name and the updated API key. + ApiKey *string `json:"api_key,omitempty"` + // ApiVersion The Azure API version ID to use. + // It is recommended to use the latest supported non-preview version. + ApiVersion string `json:"api_version"` + // DeploymentId The deployment name of your deployed models. + // Your Azure OpenAI deployments can be found though the Azure OpenAI Studio + // portal that is linked to your subscription. + DeploymentId string `json:"deployment_id"` + // EntraId A valid Microsoft Entra token. + // You must specify either `api_key` or `entra_id`. + // If you do not provide either or you provide both, you will receive an error + // when you try to create your model. + EntraId *string `json:"entra_id,omitempty"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // Azure. + // The `azureopenai` service sets a default number of requests allowed per + // minute depending on the task type. + // For `text_embedding`, it is set to `1440`. + // For `completion`, it is set to `120`. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` + // ResourceName The name of your Azure OpenAI resource. + // You can find this from the list of resources in the Azure Portal for your + // subscription. + ResourceName string `json:"resource_name"` +} + +func (s *AzureOpenAIServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = &o + + case "api_version": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiVersion", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiVersion = o + + case "deployment_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DeploymentId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DeploymentId = o + + case "entra_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "EntraId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.EntraId = &o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + case "resource_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResourceName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResourceName = o + + } + } + return nil +} + +// NewAzureOpenAIServiceSettings returns a AzureOpenAIServiceSettings. +func NewAzureOpenAIServiceSettings() *AzureOpenAIServiceSettings { + r := &AzureOpenAIServiceSettings{} + + return r +} + +type AzureOpenAIServiceSettingsVariant interface { + AzureOpenAIServiceSettingsCaster() *AzureOpenAIServiceSettings +} + +func (s *AzureOpenAIServiceSettings) AzureOpenAIServiceSettingsCaster() *AzureOpenAIServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azureopenaitasksettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azureopenaitasksettings.go new file mode 100644 index 000000000..9a3c6946f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azureopenaitasksettings.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AzureOpenAITaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L771-L777 +type AzureOpenAITaskSettings struct { + // User For a `completion` or `text_embedding` task, specify the user issuing the + // request. + // This information can be used for abuse detection. + User *string `json:"user,omitempty"` +} + +func (s *AzureOpenAITaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "user": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "User", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.User = &o + + } + } + return nil +} + +// NewAzureOpenAITaskSettings returns a AzureOpenAITaskSettings. +func NewAzureOpenAITaskSettings() *AzureOpenAITaskSettings { + r := &AzureOpenAITaskSettings{} + + return r +} + +type AzureOpenAITaskSettingsVariant interface { + AzureOpenAITaskSettingsCaster() *AzureOpenAITaskSettings +} + +func (s *AzureOpenAITaskSettings) AzureOpenAITaskSettingsCaster() *AzureOpenAITaskSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azurerepository.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azurerepository.go new file mode 100644 index 000000000..be262954d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azurerepository.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// AzureRepository type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotRepository.ts#L40-L50 +type AzureRepository struct { + // Settings The repository settings. + Settings *AzureRepositorySettings `json:"settings,omitempty"` + // Type The Azure repository type. + Type string `json:"type,omitempty"` + Uuid *string `json:"uuid,omitempty"` +} + +func (s *AzureRepository) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return fmt.Errorf("%s | %w", "Uuid", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s AzureRepository) MarshalJSON() ([]byte, error) { + type innerAzureRepository AzureRepository + tmp := innerAzureRepository{ + Settings: s.Settings, + Type: s.Type, + Uuid: s.Uuid, + } + + tmp.Type = "azure" + + return json.Marshal(tmp) +} + +// NewAzureRepository returns a AzureRepository. +func NewAzureRepository() *AzureRepository { + r := &AzureRepository{} + + return r +} + +type AzureRepositoryVariant interface { + AzureRepositoryCaster() *AzureRepository +} + +func (s *AzureRepository) AzureRepositoryCaster() *AzureRepository { + return s +} + +func (s *AzureRepository) RepositoryCaster() *Repository { + o := Repository(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azurerepositorysettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azurerepositorysettings.go new file mode 100644 index 000000000..6aa6eadaf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/azurerepositorysettings.go @@ -0,0 +1,260 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// AzureRepositorySettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotRepository.ts#L145-L196 +type AzureRepositorySettings struct { + // BasePath The path to the repository data within the container. + // It defaults to the root directory. + // + // NOTE: Don't set `base_path` when configuring a snapshot repository for + // Elastic Cloud Enterprise. + // Elastic Cloud Enterprise automatically generates the `base_path` for each + // deployment so that multiple deployments can share the same bucket. + BasePath *string `json:"base_path,omitempty"` + // ChunkSize Big files can be broken down into multiple smaller blobs in the blob store + // during snapshotting. + // It is not recommended to change this value from its default unless there is + // an explicit reason for limiting the size of blobs in the repository. + // Setting a value lower than the default can result in an increased number of + // API calls to the blob store during snapshot create and restore operations + // compared to using the default value and thus make both operations slower and + // more costly. + // Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + // The default varies by repository type. + ChunkSize ByteSize `json:"chunk_size,omitempty"` + // Client The name of the Azure repository client to use. + Client *string `json:"client,omitempty"` + // Compress When set to `true`, metadata files are stored in compressed format. + // This setting doesn't affect index files that are already compressed by + // default. + Compress *bool `json:"compress,omitempty"` + // Container The Azure container. + Container *string `json:"container,omitempty"` + // DeleteObjectsMaxSize The maxmimum batch size, between 1 and 256, used for `BlobBatch` requests. + // Defaults to 256 which is the maximum number supported by the Azure blob batch + // API. + DeleteObjectsMaxSize *int `json:"delete_objects_max_size,omitempty"` + // LocationMode Either `primary_only` or `secondary_only`. + // Note that if you set it to `secondary_only`, it will force `readonly` to + // `true`. + LocationMode *string `json:"location_mode,omitempty"` + // MaxConcurrentBatchDeletes The maximum number of concurrent batch delete requests that will be submitted + // for any individual bulk delete with `BlobBatch`. + // Note that the effective number of concurrent deletes is further limited by + // the Azure client connection and event loop thread limits. + // Defaults to 10, minimum is 1, maximum is 100. + MaxConcurrentBatchDeletes *int `json:"max_concurrent_batch_deletes,omitempty"` + // MaxRestoreBytesPerSec The maximum snapshot restore rate per node. + // It defaults to unlimited. + // Note that restores are also throttled through recovery settings. + MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // MaxSnapshotBytesPerSec The maximum snapshot creation rate per node. + // It defaults to 40mb per second. + // Note that if the recovery settings for managed services are set, then it + // defaults to unlimited, and the rate is additionally throttled through + // recovery settings. + MaxSnapshotBytesPerSec ByteSize `json:"max_snapshot_bytes_per_sec,omitempty"` + // Readonly If `true`, the repository is read-only. + // The cluster can retrieve and restore snapshots from the repository but not + // write to the repository or create snapshots in it. + // + // Only a cluster with write access can create snapshots in the repository. + // All other clusters connected to the repository should have the `readonly` + // parameter set to `true`. + // If `false`, the cluster can write to the repository and create snapshots in + // it. + // + // IMPORTANT: If you register the same snapshot repository with multiple + // clusters, only one cluster should have write access to the repository. + // Having multiple clusters write to the repository at the same time risks + // corrupting the contents of the repository. + Readonly *bool `json:"readonly,omitempty"` +} + +func (s *AzureRepositorySettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "base_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BasePath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BasePath = &o + + case "chunk_size": + if err := dec.Decode(&s.ChunkSize); err != nil { + return fmt.Errorf("%s | %w", "ChunkSize", err) + } + + case "client": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Client", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Client = &o + + case "compress": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Compress", err) + } + s.Compress = &value + case bool: + s.Compress = &v + } + + case "container": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Container", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Container = &o + + case "delete_objects_max_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DeleteObjectsMaxSize", err) + } + s.DeleteObjectsMaxSize = &value + case float64: + f := int(v) + s.DeleteObjectsMaxSize = &f + } + + case "location_mode": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LocationMode", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LocationMode = &o + + case "max_concurrent_batch_deletes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxConcurrentBatchDeletes", err) + } + s.MaxConcurrentBatchDeletes = &value + case float64: + f := int(v) + s.MaxConcurrentBatchDeletes = &f + } + + case "max_restore_bytes_per_sec": + if err := dec.Decode(&s.MaxRestoreBytesPerSec); err != nil { + return fmt.Errorf("%s | %w", "MaxRestoreBytesPerSec", err) + } + + case "max_snapshot_bytes_per_sec": + if err := dec.Decode(&s.MaxSnapshotBytesPerSec); err != nil { + return fmt.Errorf("%s | %w", "MaxSnapshotBytesPerSec", err) + } + + case "readonly": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Readonly", err) + } + s.Readonly = &value + case bool: + s.Readonly = &v + } + + } + } + return nil +} + +// NewAzureRepositorySettings returns a AzureRepositorySettings. +func NewAzureRepositorySettings() *AzureRepositorySettings { + r := &AzureRepositorySettings{} + + return r +} + +type AzureRepositorySettingsVariant interface { + AzureRepositorySettingsCaster() *AzureRepositorySettings +} + +func (s *AzureRepositorySettings) AzureRepositorySettingsCaster() *AzureRepositorySettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/base.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/base.go new file mode 100644 index 000000000..3c1db531b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/base.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Base type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L27-L30 +type Base struct { + Available bool `json:"available"` + Enabled bool `json:"enabled"` +} + +func (s *Base) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + +// NewBase returns a Base. +func NewBase() *Base { + r := &Base{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/basqueanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/basqueanalyzer.go new file mode 100644 index 000000000..c0b5894ed --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/basqueanalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BasqueAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L86-L91 +type BasqueAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *BasqueAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s BasqueAnalyzer) MarshalJSON() ([]byte, error) { + type innerBasqueAnalyzer BasqueAnalyzer + tmp := innerBasqueAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "basque" + + return json.Marshal(tmp) +} + +// NewBasqueAnalyzer returns a BasqueAnalyzer. +func NewBasqueAnalyzer() *BasqueAnalyzer { + r := &BasqueAnalyzer{} + + return r +} + +type BasqueAnalyzerVariant interface { + BasqueAnalyzerCaster() *BasqueAnalyzer +} + +func (s *BasqueAnalyzer) BasqueAnalyzerCaster() *BasqueAnalyzer { + return s +} + +func (s *BasqueAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bengalianalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bengalianalyzer.go new file mode 100644 index 000000000..92c4e8f75 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bengalianalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BengaliAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L93-L98 +type BengaliAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *BengaliAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s BengaliAnalyzer) MarshalJSON() ([]byte, error) { + type innerBengaliAnalyzer BengaliAnalyzer + tmp := innerBengaliAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "bengali" + + return json.Marshal(tmp) +} + +// NewBengaliAnalyzer returns a BengaliAnalyzer. +func NewBengaliAnalyzer() *BengaliAnalyzer { + r := &BengaliAnalyzer{} + + return r +} + +type BengaliAnalyzerVariant interface { + BengaliAnalyzerCaster() *BengaliAnalyzer +} + +func (s *BengaliAnalyzer) BengaliAnalyzerCaster() *BengaliAnalyzer { + return s +} + +func (s *BengaliAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bengalinormalizationtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bengalinormalizationtokenfilter.go new file mode 100644 index 000000000..6d45a6ccf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bengalinormalizationtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// BengaliNormalizationTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L490-L492 +type BengaliNormalizationTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *BengaliNormalizationTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s BengaliNormalizationTokenFilter) MarshalJSON() ([]byte, error) { + type innerBengaliNormalizationTokenFilter BengaliNormalizationTokenFilter + tmp := innerBengaliNormalizationTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "bengali_normalization" + + return json.Marshal(tmp) +} + +// NewBengaliNormalizationTokenFilter returns a BengaliNormalizationTokenFilter. +func NewBengaliNormalizationTokenFilter() *BengaliNormalizationTokenFilter { + r := &BengaliNormalizationTokenFilter{} + + return r +} + +type BengaliNormalizationTokenFilterVariant interface { + BengaliNormalizationTokenFilterCaster() *BengaliNormalizationTokenFilter +} + +func (s *BengaliNormalizationTokenFilter) BengaliNormalizationTokenFilterCaster() *BengaliNormalizationTokenFilter { + return s +} + +func (s *BengaliNormalizationTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/binaryproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/binaryproperty.go new file mode 100644 index 000000000..52bbc64b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/binaryproperty.go @@ -0,0 +1,872 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// BinaryProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L56-L58 +type BinaryProperty struct { + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *BinaryProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s BinaryProperty) MarshalJSON() ([]byte, error) { + type innerBinaryProperty BinaryProperty + tmp := innerBinaryProperty{ + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "binary" + + return json.Marshal(tmp) +} + +// NewBinaryProperty returns a BinaryProperty. +func NewBinaryProperty() *BinaryProperty { + r := &BinaryProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type BinaryPropertyVariant interface { + BinaryPropertyCaster() *BinaryProperty +} + +func (s *BinaryProperty) BinaryPropertyCaster() *BinaryProperty { + return s +} + +func (s *BinaryProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/blobdetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/blobdetails.go new file mode 100644 index 000000000..ef869305d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/blobdetails.go @@ -0,0 +1,176 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BlobDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L250-L284 +type BlobDetails struct { + // Name The name of the blob. + Name string `json:"name"` + // Overwritten Indicates whether the blob was overwritten while the read operations were + // ongoing. + // + // /** + Overwritten bool `json:"overwritten"` + ReadEarly bool `json:"read_early"` + // ReadEnd The position, in bytes, at which read operations completed. + ReadEnd int64 `json:"read_end"` + // ReadStart The position, in bytes, at which read operations started. + ReadStart int64 `json:"read_start"` + // Reads A description of every read operation performed on the blob. + Reads ReadBlobDetails `json:"reads"` + // Size The size of the blob. + Size ByteSize `json:"size"` + // SizeBytes The size of the blob in bytes. + SizeBytes int64 `json:"size_bytes"` +} + +func (s *BlobDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "overwritten": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Overwritten", err) + } + s.Overwritten = value + case bool: + s.Overwritten = v + } + + case "read_early": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ReadEarly", err) + } + s.ReadEarly = value + case bool: + s.ReadEarly = v + } + + case "read_end": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ReadEnd", err) + } + s.ReadEnd = value + case float64: + f := int64(v) + s.ReadEnd = f + } + + case "read_start": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ReadStart", err) + } + s.ReadStart = value + case float64: + f := int64(v) + s.ReadStart = f + } + + case "reads": + if err := dec.Decode(&s.Reads); err != nil { + return fmt.Errorf("%s | %w", "Reads", err) + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + case "size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SizeBytes", err) + } + s.SizeBytes = value + case float64: + f := int64(v) + s.SizeBytes = f + } + + } + } + return nil +} + +// NewBlobDetails returns a BlobDetails. +func NewBlobDetails() *BlobDetails { + r := &BlobDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/body.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/body.go new file mode 100644 index 000000000..74deee527 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/body.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Body type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/list_queries/ListQueriesResponse.ts#L24-L30 +type Body struct { + Id int64 `json:"id"` + Node string `json:"node"` + Query string `json:"query"` + RunningTimeNanos int64 `json:"running_time_nanos"` + StartTimeMillis int64 `json:"start_time_millis"` +} + +func (s *Body) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + s.Id = value + case float64: + f := int64(v) + s.Id = f + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "running_time_nanos": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RunningTimeNanos", err) + } + s.RunningTimeNanos = value + case float64: + f := int64(v) + s.RunningTimeNanos = f + } + + case "start_time_millis": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "StartTimeMillis", err) + } + s.StartTimeMillis = value + case float64: + f := int64(v) + s.StartTimeMillis = f + } + + } + } + return nil +} + +// NewBody returns a Body. +func NewBody() *Body { + r := &Body{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/booleanproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/booleanproperty.go new file mode 100644 index 000000000..fc26f6216 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/booleanproperty.go @@ -0,0 +1,978 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// BooleanProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L60-L75 +type BooleanProperty struct { + Boost *Float64 `json:"boost,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fielddata *NumericFielddata `json:"fielddata,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *bool `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *BooleanProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fielddata": + if err := dec.Decode(&s.Fielddata); err != nil { + return fmt.Errorf("%s | %w", "Fielddata", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + s.NullValue = &value + case bool: + s.NullValue = &v + } + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s BooleanProperty) MarshalJSON() ([]byte, error) { + type innerBooleanProperty BooleanProperty + tmp := innerBooleanProperty{ + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fielddata: s.Fielddata, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + Type: s.Type, + } + + tmp.Type = "boolean" + + return json.Marshal(tmp) +} + +// NewBooleanProperty returns a BooleanProperty. +func NewBooleanProperty() *BooleanProperty { + r := &BooleanProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type BooleanPropertyVariant interface { + BooleanPropertyCaster() *BooleanProperty +} + +func (s *BooleanProperty) BooleanPropertyCaster() *BooleanProperty { + return s +} + +func (s *BooleanProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/boolquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/boolquery.go new file mode 100644 index 000000000..414b25c74 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/boolquery.go @@ -0,0 +1,189 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BoolQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L29-L56 +type BoolQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Filter The clause (query) must appear in matching documents. + // However, unlike `must`, the score of the query will be ignored. + Filter []Query `json:"filter,omitempty"` + // MinimumShouldMatch Specifies the number or percentage of `should` clauses returned documents + // must match. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // Must The clause (query) must appear in matching documents and will contribute to + // the score. + Must []Query `json:"must,omitempty"` + // MustNot The clause (query) must not appear in the matching documents. + // Because scoring is ignored, a score of `0` is returned for all documents. + MustNot []Query `json:"must_not,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Should The clause (query) should appear in the matching document. + Should []Query `json:"should,omitempty"` +} + +func (s *BoolQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return fmt.Errorf("%s | %w", "MinimumShouldMatch", err) + } + + case "must": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Must", err) + } + + s.Must = append(s.Must, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Must); err != nil { + return fmt.Errorf("%s | %w", "Must", err) + } + } + + case "must_not": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "MustNot", err) + } + + s.MustNot = append(s.MustNot, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.MustNot); err != nil { + return fmt.Errorf("%s | %w", "MustNot", err) + } + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "should": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Should", err) + } + + s.Should = append(s.Should, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Should); err != nil { + return fmt.Errorf("%s | %w", "Should", err) + } + } + + } + } + return nil +} + +// NewBoolQuery returns a BoolQuery. +func NewBoolQuery() *BoolQuery { + r := &BoolQuery{} + + return r +} + +type BoolQueryVariant interface { + BoolQueryCaster() *BoolQuery +} + +func (s *BoolQuery) BoolQueryCaster() *BoolQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/boostingquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/boostingquery.go new file mode 100644 index 000000000..b1be9f4f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/boostingquery.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BoostingQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L58-L74 +type BoostingQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Negative Query used to decrease the relevance score of matching documents. + Negative Query `json:"negative"` + // NegativeBoost Floating point number between 0 and 1.0 used to decrease the relevance scores + // of documents matching the `negative` query. + NegativeBoost Float64 `json:"negative_boost"` + // Positive Any returned documents must match this query. + Positive Query `json:"positive"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *BoostingQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "negative": + if err := dec.Decode(&s.Negative); err != nil { + return fmt.Errorf("%s | %w", "Negative", err) + } + + case "negative_boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NegativeBoost", err) + } + f := Float64(value) + s.NegativeBoost = f + case float64: + f := Float64(v) + s.NegativeBoost = f + } + + case "positive": + if err := dec.Decode(&s.Positive); err != nil { + return fmt.Errorf("%s | %w", "Positive", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewBoostingQuery returns a BoostingQuery. +func NewBoostingQuery() *BoostingQuery { + r := &BoostingQuery{} + + return r +} + +type BoostingQueryVariant interface { + BoostingQueryCaster() *BoostingQuery +} + +func (s *BoostingQuery) BoostingQueryCaster() *BoostingQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/boxplotaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/boxplotaggregate.go new file mode 100644 index 000000000..3006a1fae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/boxplotaggregate.go @@ -0,0 +1,279 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BoxPlotAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L808-L827 +type BoxPlotAggregate struct { + Lower Float64 `json:"lower"` + LowerAsString *string `json:"lower_as_string,omitempty"` + Max Float64 `json:"max"` + MaxAsString *string `json:"max_as_string,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Min Float64 `json:"min"` + MinAsString *string `json:"min_as_string,omitempty"` + Q1 Float64 `json:"q1"` + Q1AsString *string `json:"q1_as_string,omitempty"` + Q2 Float64 `json:"q2"` + Q2AsString *string `json:"q2_as_string,omitempty"` + Q3 Float64 `json:"q3"` + Q3AsString *string `json:"q3_as_string,omitempty"` + Upper Float64 `json:"upper"` + UpperAsString *string `json:"upper_as_string,omitempty"` +} + +func (s *BoxPlotAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lower": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Lower", err) + } + f := Float64(value) + s.Lower = f + case float64: + f := Float64(v) + s.Lower = f + } + + case "lower_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LowerAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LowerAsString = &o + + case "max": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + f := Float64(value) + s.Max = f + case float64: + f := Float64(v) + s.Max = f + } + + case "max_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxAsString = &o + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "min": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + f := Float64(value) + s.Min = f + case float64: + f := Float64(v) + s.Min = f + } + + case "min_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MinAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinAsString = &o + + case "q1": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Q1", err) + } + f := Float64(value) + s.Q1 = f + case float64: + f := Float64(v) + s.Q1 = f + } + + case "q1_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Q1AsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Q1AsString = &o + + case "q2": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Q2", err) + } + f := Float64(value) + s.Q2 = f + case float64: + f := Float64(v) + s.Q2 = f + } + + case "q2_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Q2AsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Q2AsString = &o + + case "q3": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Q3", err) + } + f := Float64(value) + s.Q3 = f + case float64: + f := Float64(v) + s.Q3 = f + } + + case "q3_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Q3AsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Q3AsString = &o + + case "upper": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Upper", err) + } + f := Float64(value) + s.Upper = f + case float64: + f := Float64(v) + s.Upper = f + } + + case "upper_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UpperAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UpperAsString = &o + + } + } + return nil +} + +// NewBoxPlotAggregate returns a BoxPlotAggregate. +func NewBoxPlotAggregate() *BoxPlotAggregate { + r := &BoxPlotAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/boxplotaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/boxplotaggregation.go new file mode 100644 index 000000000..756408d07 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/boxplotaggregation.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tdigestexecutionhint" +) + +// BoxplotAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L57-L68 +type BoxplotAggregation struct { + // Compression Limits the maximum number of nodes used by the underlying TDigest algorithm + // to `20 * compression`, enabling control of memory usage and approximation + // error. + Compression *Float64 `json:"compression,omitempty"` + // ExecutionHint The default implementation of TDigest is optimized for performance, scaling + // to millions or even billions of sample values while maintaining acceptable + // accuracy levels (close to 1% relative error for millions of samples in some + // cases). + // To use an implementation optimized for accuracy, set this parameter to + // high_accuracy instead. + ExecutionHint *tdigestexecutionhint.TDigestExecutionHint `json:"execution_hint,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *BoxplotAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compression": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Compression", err) + } + f := Float64(value) + s.Compression = &f + case float64: + f := Float64(v) + s.Compression = &f + } + + case "execution_hint": + if err := dec.Decode(&s.ExecutionHint); err != nil { + return fmt.Errorf("%s | %w", "ExecutionHint", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewBoxplotAggregation returns a BoxplotAggregation. +func NewBoxplotAggregation() *BoxplotAggregation { + r := &BoxplotAggregation{} + + return r +} + +type BoxplotAggregationVariant interface { + BoxplotAggregationCaster() *BoxplotAggregation +} + +func (s *BoxplotAggregation) BoxplotAggregationCaster() *BoxplotAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/braziliananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/braziliananalyzer.go new file mode 100644 index 000000000..a1eb2ae97 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/braziliananalyzer.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BrazilianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L100-L104 +type BrazilianAnalyzer struct { + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *BrazilianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s BrazilianAnalyzer) MarshalJSON() ([]byte, error) { + type innerBrazilianAnalyzer BrazilianAnalyzer + tmp := innerBrazilianAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "brazilian" + + return json.Marshal(tmp) +} + +// NewBrazilianAnalyzer returns a BrazilianAnalyzer. +func NewBrazilianAnalyzer() *BrazilianAnalyzer { + r := &BrazilianAnalyzer{} + + return r +} + +type BrazilianAnalyzerVariant interface { + BrazilianAnalyzerCaster() *BrazilianAnalyzer +} + +func (s *BrazilianAnalyzer) BrazilianAnalyzerCaster() *BrazilianAnalyzer { + return s +} + +func (s *BrazilianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/brazilianstemtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/brazilianstemtokenfilter.go new file mode 100644 index 000000000..589076b68 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/brazilianstemtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// BrazilianStemTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L494-L496 +type BrazilianStemTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *BrazilianStemTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s BrazilianStemTokenFilter) MarshalJSON() ([]byte, error) { + type innerBrazilianStemTokenFilter BrazilianStemTokenFilter + tmp := innerBrazilianStemTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "brazilian_stem" + + return json.Marshal(tmp) +} + +// NewBrazilianStemTokenFilter returns a BrazilianStemTokenFilter. +func NewBrazilianStemTokenFilter() *BrazilianStemTokenFilter { + r := &BrazilianStemTokenFilter{} + + return r +} + +type BrazilianStemTokenFilterVariant interface { + BrazilianStemTokenFilterCaster() *BrazilianStemTokenFilter +} + +func (s *BrazilianStemTokenFilter) BrazilianStemTokenFilterCaster() *BrazilianStemTokenFilter { + return s +} + +func (s *BrazilianStemTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/breaker.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/breaker.go new file mode 100644 index 000000000..edf7d8880 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/breaker.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Breaker type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L472-L497 +type Breaker struct { + // EstimatedSize Estimated memory used for the operation. + EstimatedSize *string `json:"estimated_size,omitempty"` + // EstimatedSizeInBytes Estimated memory used, in bytes, for the operation. + EstimatedSizeInBytes *int64 `json:"estimated_size_in_bytes,omitempty"` + // LimitSize Memory limit for the circuit breaker. + LimitSize *string `json:"limit_size,omitempty"` + // LimitSizeInBytes Memory limit, in bytes, for the circuit breaker. + LimitSizeInBytes *int64 `json:"limit_size_in_bytes,omitempty"` + // Overhead A constant that all estimates for the circuit breaker are multiplied with to + // calculate a final estimate. + Overhead *float32 `json:"overhead,omitempty"` + // Tripped Total number of times the circuit breaker has been triggered and prevented an + // out of memory error. + Tripped *float32 `json:"tripped,omitempty"` +} + +func (s *Breaker) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "estimated_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "EstimatedSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.EstimatedSize = &o + + case "estimated_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "EstimatedSizeInBytes", err) + } + s.EstimatedSizeInBytes = &value + case float64: + f := int64(v) + s.EstimatedSizeInBytes = &f + } + + case "limit_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LimitSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LimitSize = &o + + case "limit_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LimitSizeInBytes", err) + } + s.LimitSizeInBytes = &value + case float64: + f := int64(v) + s.LimitSizeInBytes = &f + } + + case "overhead": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Overhead", err) + } + f := float32(value) + s.Overhead = &f + case float64: + f := float32(v) + s.Overhead = &f + } + + case "tripped": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Tripped", err) + } + f := float32(value) + s.Tripped = &f + case float64: + f := float32(v) + s.Tripped = &f + } + + } + } + return nil +} + +// NewBreaker returns a Breaker. +func NewBreaker() *Breaker { + r := &Breaker{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketcorrelationaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketcorrelationaggregation.go new file mode 100644 index 000000000..b10e22d69 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketcorrelationaggregation.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// BucketCorrelationAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L139-L146 +type BucketCorrelationAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Function The correlation function to execute. + Function BucketCorrelationFunction `json:"function"` +} + +func (s *BucketCorrelationAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "function": + if err := dec.Decode(&s.Function); err != nil { + return fmt.Errorf("%s | %w", "Function", err) + } + + } + } + return nil +} + +// NewBucketCorrelationAggregation returns a BucketCorrelationAggregation. +func NewBucketCorrelationAggregation() *BucketCorrelationAggregation { + r := &BucketCorrelationAggregation{} + + return r +} + +type BucketCorrelationAggregationVariant interface { + BucketCorrelationAggregationCaster() *BucketCorrelationAggregation +} + +func (s *BucketCorrelationAggregation) BucketCorrelationAggregationCaster() *BucketCorrelationAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketcorrelationfunction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketcorrelationfunction.go new file mode 100644 index 000000000..16f336648 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketcorrelationfunction.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketCorrelationFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L148-L153 +type BucketCorrelationFunction struct { + // CountCorrelation The configuration to calculate a count correlation. This function is designed + // for determining the correlation of a term value and a given metric. + CountCorrelation BucketCorrelationFunctionCountCorrelation `json:"count_correlation"` +} + +// NewBucketCorrelationFunction returns a BucketCorrelationFunction. +func NewBucketCorrelationFunction() *BucketCorrelationFunction { + r := &BucketCorrelationFunction{} + + return r +} + +type BucketCorrelationFunctionVariant interface { + BucketCorrelationFunctionCaster() *BucketCorrelationFunction +} + +func (s *BucketCorrelationFunction) BucketCorrelationFunctionCaster() *BucketCorrelationFunction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketcorrelationfunctioncountcorrelation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketcorrelationfunctioncountcorrelation.go new file mode 100644 index 000000000..d5d6ebd4b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketcorrelationfunctioncountcorrelation.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketCorrelationFunctionCountCorrelation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L155-L158 +type BucketCorrelationFunctionCountCorrelation struct { + // Indicator The indicator with which to correlate the configured `bucket_path` values. + Indicator BucketCorrelationFunctionCountCorrelationIndicator `json:"indicator"` +} + +// NewBucketCorrelationFunctionCountCorrelation returns a BucketCorrelationFunctionCountCorrelation. +func NewBucketCorrelationFunctionCountCorrelation() *BucketCorrelationFunctionCountCorrelation { + r := &BucketCorrelationFunctionCountCorrelation{} + + return r +} + +type BucketCorrelationFunctionCountCorrelationVariant interface { + BucketCorrelationFunctionCountCorrelationCaster() *BucketCorrelationFunctionCountCorrelation +} + +func (s *BucketCorrelationFunctionCountCorrelation) BucketCorrelationFunctionCountCorrelationCaster() *BucketCorrelationFunctionCountCorrelation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go new file mode 100644 index 000000000..b69a3905c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BucketCorrelationFunctionCountCorrelationIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L160-L178 +type BucketCorrelationFunctionCountCorrelationIndicator struct { + // DocCount The total number of documents that initially created the expectations. It’s + // required to be greater + // than or equal to the sum of all values in the buckets_path as this is the + // originating superset of data + // to which the term values are correlated. + DocCount int `json:"doc_count"` + // Expectations An array of numbers with which to correlate the configured `bucket_path` + // values. + // The length of this value must always equal the number of buckets returned by + // the `bucket_path`. + Expectations []Float64 `json:"expectations"` + // Fractions An array of fractions to use when averaging and calculating variance. This + // should be used if + // the pre-calculated data and the buckets_path have known gaps. The length of + // fractions, if provided, + // must equal expectations. + Fractions []Float64 `json:"fractions,omitempty"` +} + +func (s *BucketCorrelationFunctionCountCorrelationIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int(v) + s.DocCount = f + } + + case "expectations": + if err := dec.Decode(&s.Expectations); err != nil { + return fmt.Errorf("%s | %w", "Expectations", err) + } + + case "fractions": + if err := dec.Decode(&s.Fractions); err != nil { + return fmt.Errorf("%s | %w", "Fractions", err) + } + + } + } + return nil +} + +// NewBucketCorrelationFunctionCountCorrelationIndicator returns a BucketCorrelationFunctionCountCorrelationIndicator. +func NewBucketCorrelationFunctionCountCorrelationIndicator() *BucketCorrelationFunctionCountCorrelationIndicator { + r := &BucketCorrelationFunctionCountCorrelationIndicator{} + + return r +} + +type BucketCorrelationFunctionCountCorrelationIndicatorVariant interface { + BucketCorrelationFunctionCountCorrelationIndicatorCaster() *BucketCorrelationFunctionCountCorrelationIndicator +} + +func (s *BucketCorrelationFunctionCountCorrelationIndicator) BucketCorrelationFunctionCountCorrelationIndicatorCaster() *BucketCorrelationFunctionCountCorrelationIndicator { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketinfluencer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketinfluencer.go new file mode 100644 index 000000000..fc69a1260 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketinfluencer.go @@ -0,0 +1,210 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BucketInfluencer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Bucket.ts#L79-L127 +type BucketInfluencer struct { + // AnomalyScore A normalized score between 0-100, which is calculated for each bucket + // influencer. This score might be updated as + // newer data is analyzed. + AnomalyScore Float64 `json:"anomaly_score"` + // BucketSpan The length of the bucket in seconds. This value matches the bucket span that + // is specified in the job. + BucketSpan int64 `json:"bucket_span"` + // InfluencerFieldName The field name of the influencer. + InfluencerFieldName string `json:"influencer_field_name"` + // InitialAnomalyScore The score between 0-100 for each bucket influencer. This score is the initial + // value that was calculated at the + // time the bucket was processed. + InitialAnomalyScore Float64 `json:"initial_anomaly_score"` + // IsInterim If true, this is an interim result. In other words, the results are + // calculated based on partial input data. + IsInterim bool `json:"is_interim"` + // JobId Identifier for the anomaly detection job. + JobId string `json:"job_id"` + // Probability The probability that the bucket has this behavior, in the range 0 to 1. This + // value can be held to a high precision + // of over 300 decimal places, so the `anomaly_score` is provided as a + // human-readable and friendly interpretation of + // this. + Probability Float64 `json:"probability"` + // RawAnomalyScore Internal. + RawAnomalyScore Float64 `json:"raw_anomaly_score"` + // ResultType Internal. This value is always set to `bucket_influencer`. + ResultType string `json:"result_type"` + // Timestamp The start time of the bucket for which these results were calculated. + Timestamp int64 `json:"timestamp"` + // TimestampString The start time of the bucket for which these results were calculated. + TimestampString DateTime `json:"timestamp_string,omitempty"` +} + +func (s *BucketInfluencer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "anomaly_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AnomalyScore", err) + } + f := Float64(value) + s.AnomalyScore = f + case float64: + f := Float64(v) + s.AnomalyScore = f + } + + case "bucket_span": + if err := dec.Decode(&s.BucketSpan); err != nil { + return fmt.Errorf("%s | %w", "BucketSpan", err) + } + + case "influencer_field_name": + if err := dec.Decode(&s.InfluencerFieldName); err != nil { + return fmt.Errorf("%s | %w", "InfluencerFieldName", err) + } + + case "initial_anomaly_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InitialAnomalyScore", err) + } + f := Float64(value) + s.InitialAnomalyScore = f + case float64: + f := Float64(v) + s.InitialAnomalyScore = f + } + + case "is_interim": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsInterim", err) + } + s.IsInterim = value + case bool: + s.IsInterim = v + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "probability": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Probability", err) + } + f := Float64(value) + s.Probability = f + case float64: + f := Float64(v) + s.Probability = f + } + + case "raw_anomaly_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RawAnomalyScore", err) + } + f := Float64(value) + s.RawAnomalyScore = f + case float64: + f := Float64(v) + s.RawAnomalyScore = f + } + + case "result_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultType = o + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + case "timestamp_string": + if err := dec.Decode(&s.TimestampString); err != nil { + return fmt.Errorf("%s | %w", "TimestampString", err) + } + + } + } + return nil +} + +// NewBucketInfluencer returns a BucketInfluencer. +func NewBucketInfluencer() *BucketInfluencer { + r := &BucketInfluencer{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketksaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketksaggregation.go new file mode 100644 index 000000000..85d261bb6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketksaggregation.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BucketKsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L103-L137 +type BucketKsAggregation struct { + // Alternative A list of string values indicating which K-S test alternative to calculate. + // The valid values + // are: "greater", "less", "two_sided". This parameter is key for determining + // the K-S statistic used + // when calculating the K-S test. Default value is all possible alternative + // hypotheses. + Alternative []string `json:"alternative,omitempty"` + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Fractions A list of doubles indicating the distribution of the samples with which to + // compare to the `buckets_path` results. + // In typical usage this is the overall proportion of documents in each bucket, + // which is compared with the actual + // document proportions in each bucket from the sibling aggregation counts. The + // default is to assume that overall + // documents are uniformly distributed on these buckets, which they would be if + // one used equal percentiles of a + // metric to define the bucket end points. + Fractions []Float64 `json:"fractions,omitempty"` + // SamplingMethod Indicates the sampling methodology when calculating the K-S test. Note, this + // is sampling of the returned values. + // This determines the cumulative distribution function (CDF) points used + // comparing the two samples. Default is + // `upper_tail`, which emphasizes the upper end of the CDF points. Valid options + // are: `upper_tail`, `uniform`, + // and `lower_tail`. + SamplingMethod *string `json:"sampling_method,omitempty"` +} + +func (s *BucketKsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alternative": + if err := dec.Decode(&s.Alternative); err != nil { + return fmt.Errorf("%s | %w", "Alternative", err) + } + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "fractions": + if err := dec.Decode(&s.Fractions); err != nil { + return fmt.Errorf("%s | %w", "Fractions", err) + } + + case "sampling_method": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SamplingMethod", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SamplingMethod = &o + + } + } + return nil +} + +// NewBucketKsAggregation returns a BucketKsAggregation. +func NewBucketKsAggregation() *BucketKsAggregation { + r := &BucketKsAggregation{} + + return r +} + +type BucketKsAggregationVariant interface { + BucketKsAggregationCaster() *BucketKsAggregation +} + +func (s *BucketKsAggregation) BucketKsAggregationCaster() *BucketKsAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketmetricvalueaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketmetricvalueaggregate.go new file mode 100644 index 000000000..3936ad7c3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketmetricvalueaggregate.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BucketMetricValueAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L250-L253 +type BucketMetricValueAggregate struct { + Keys []string `json:"keys"` + Meta Metadata `json:"meta,omitempty"` + // Value The metric value. A missing value generally means that there was no data to + // aggregate, + // unless specified otherwise. + Value *Float64 `json:"value,omitempty"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *BucketMetricValueAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "keys": + if err := dec.Decode(&s.Keys); err != nil { + return fmt.Errorf("%s | %w", "Keys", err) + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + +// NewBucketMetricValueAggregate returns a BucketMetricValueAggregate. +func NewBucketMetricValueAggregate() *BucketMetricValueAggregate { + r := &BucketMetricValueAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsadjacencymatrixbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsadjacencymatrixbucket.go new file mode 100644 index 000000000..431ff39a4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsadjacencymatrixbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsAdjacencyMatrixBucket holds the union for the following types: +// +// map[string]AdjacencyMatrixBucket +// []AdjacencyMatrixBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsAdjacencyMatrixBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsapikeyquerycontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsapikeyquerycontainer.go new file mode 100644 index 000000000..5e40c30fc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsapikeyquerycontainer.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsApiKeyQueryContainer holds the union for the following types: +// +// map[string]ApiKeyQueryContainer +// []ApiKeyQueryContainer +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsApiKeyQueryContainer any + +type BucketsApiKeyQueryContainerVariant interface { + BucketsApiKeyQueryContainerCaster() *BucketsApiKeyQueryContainer +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketscompositebucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketscompositebucket.go new file mode 100644 index 000000000..e787c357d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketscompositebucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsCompositeBucket holds the union for the following types: +// +// map[string]CompositeBucket +// []CompositeBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsCompositeBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketscriptaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketscriptaggregation.go new file mode 100644 index 000000000..74040521c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketscriptaggregation.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// BucketScriptAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L83-L91 +type BucketScriptAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + // Script The script to run for this aggregation. + Script *Script `json:"script,omitempty"` +} + +func (s *BucketScriptAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewBucketScriptAggregation returns a BucketScriptAggregation. +func NewBucketScriptAggregation() *BucketScriptAggregation { + r := &BucketScriptAggregation{} + + return r +} + +type BucketScriptAggregationVariant interface { + BucketScriptAggregationCaster() *BucketScriptAggregation +} + +func (s *BucketScriptAggregation) BucketScriptAggregationCaster() *BucketScriptAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsdatehistogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsdatehistogrambucket.go new file mode 100644 index 000000000..749519d8e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsdatehistogrambucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsDateHistogramBucket holds the union for the following types: +// +// map[string]DateHistogramBucket +// []DateHistogramBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsDateHistogramBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsdoubletermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsdoubletermsbucket.go new file mode 100644 index 000000000..384d78382 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsdoubletermsbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsDoubleTermsBucket holds the union for the following types: +// +// map[string]DoubleTermsBucket +// []DoubleTermsBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsDoubleTermsBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketselectoraggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketselectoraggregation.go new file mode 100644 index 000000000..14d07c7ca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketselectoraggregation.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// BucketSelectorAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L93-L101 +type BucketSelectorAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + // Script The script to run for this aggregation. + Script *Script `json:"script,omitempty"` +} + +func (s *BucketSelectorAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewBucketSelectorAggregation returns a BucketSelectorAggregation. +func NewBucketSelectorAggregation() *BucketSelectorAggregation { + r := &BucketSelectorAggregation{} + + return r +} + +type BucketSelectorAggregationVariant interface { + BucketSelectorAggregationCaster() *BucketSelectorAggregation +} + +func (s *BucketSelectorAggregation) BucketSelectorAggregationCaster() *BucketSelectorAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsfiltersbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsfiltersbucket.go new file mode 100644 index 000000000..c440eed52 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsfiltersbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsFiltersBucket holds the union for the following types: +// +// map[string]FiltersBucket +// []FiltersBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsFiltersBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsfrequentitemsetsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsfrequentitemsetsbucket.go new file mode 100644 index 000000000..0ed7aafa8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsfrequentitemsetsbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsFrequentItemSetsBucket holds the union for the following types: +// +// map[string]FrequentItemSetsBucket +// []FrequentItemSetsBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsFrequentItemSetsBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsgeohashgridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsgeohashgridbucket.go new file mode 100644 index 000000000..49a8baa87 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsgeohashgridbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsGeoHashGridBucket holds the union for the following types: +// +// map[string]GeoHashGridBucket +// []GeoHashGridBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsGeoHashGridBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsgeohexgridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsgeohexgridbucket.go new file mode 100644 index 000000000..614c430bb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsgeohexgridbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsGeoHexGridBucket holds the union for the following types: +// +// map[string]GeoHexGridBucket +// []GeoHexGridBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsGeoHexGridBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsgeotilegridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsgeotilegridbucket.go new file mode 100644 index 000000000..c8ec99d78 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsgeotilegridbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsGeoTileGridBucket holds the union for the following types: +// +// map[string]GeoTileGridBucket +// []GeoTileGridBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsGeoTileGridBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketshistogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketshistogrambucket.go new file mode 100644 index 000000000..ec2c542ad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketshistogrambucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsHistogramBucket holds the union for the following types: +// +// map[string]HistogramBucket +// []HistogramBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsHistogramBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsipprefixbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsipprefixbucket.go new file mode 100644 index 000000000..0ea52705b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsipprefixbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsIpPrefixBucket holds the union for the following types: +// +// map[string]IpPrefixBucket +// []IpPrefixBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsIpPrefixBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsiprangebucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsiprangebucket.go new file mode 100644 index 000000000..63deffabc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsiprangebucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsIpRangeBucket holds the union for the following types: +// +// map[string]IpRangeBucket +// []IpRangeBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsIpRangeBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketslongraretermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketslongraretermsbucket.go new file mode 100644 index 000000000..286d5e7eb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketslongraretermsbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsLongRareTermsBucket holds the union for the following types: +// +// map[string]LongRareTermsBucket +// []LongRareTermsBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsLongRareTermsBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketslongtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketslongtermsbucket.go new file mode 100644 index 000000000..dee4f3acb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketslongtermsbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsLongTermsBucket holds the union for the following types: +// +// map[string]LongTermsBucket +// []LongTermsBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsLongTermsBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsmultitermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsmultitermsbucket.go new file mode 100644 index 000000000..5c283375a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsmultitermsbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsMultiTermsBucket holds the union for the following types: +// +// map[string]MultiTermsBucket +// []MultiTermsBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsMultiTermsBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsortaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsortaggregation.go new file mode 100644 index 000000000..c587aec08 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsortaggregation.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// BucketSortAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L180-L204 +type BucketSortAggregation struct { + // From Buckets in positions prior to `from` will be truncated. + From *int `json:"from,omitempty"` + // GapPolicy The policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + // Size The number of buckets to return. + // Defaults to all buckets of the parent aggregation. + Size *int `json:"size,omitempty"` + // Sort The list of fields to sort on. + Sort []SortCombinations `json:"sort,omitempty"` +} + +func (s *BucketSortAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + } + } + return nil +} + +// NewBucketSortAggregation returns a BucketSortAggregation. +func NewBucketSortAggregation() *BucketSortAggregation { + r := &BucketSortAggregation{} + + return r +} + +type BucketSortAggregationVariant interface { + BucketSortAggregationCaster() *BucketSortAggregation +} + +func (s *BucketSortAggregation) BucketSortAggregationCaster() *BucketSortAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketspath.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketspath.go new file mode 100644 index 000000000..2f175fa3f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketspath.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsPath holds the union for the following types: +// +// string +// []string +// map[string]string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L53-L59 +type BucketsPath any + +type BucketsPathVariant interface { + BucketsPathCaster() *BucketsPath +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsquery.go new file mode 100644 index 000000000..fa4ca1d79 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsquery.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsQuery holds the union for the following types: +// +// map[string]Query +// []Query +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsQuery any + +type BucketsQueryVariant interface { + BucketsQueryCaster() *BucketsQuery +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsrangebucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsrangebucket.go new file mode 100644 index 000000000..5dc406477 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsrangebucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsRangeBucket holds the union for the following types: +// +// map[string]RangeBucket +// []RangeBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsRangeBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketssignificantlongtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketssignificantlongtermsbucket.go new file mode 100644 index 000000000..ceb2f680a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketssignificantlongtermsbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsSignificantLongTermsBucket holds the union for the following types: +// +// map[string]SignificantLongTermsBucket +// []SignificantLongTermsBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsSignificantLongTermsBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketssignificantstringtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketssignificantstringtermsbucket.go new file mode 100644 index 000000000..f04349356 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketssignificantstringtermsbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsSignificantStringTermsBucket holds the union for the following types: +// +// map[string]SignificantStringTermsBucket +// []SignificantStringTermsBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsSignificantStringTermsBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsstringraretermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsstringraretermsbucket.go new file mode 100644 index 000000000..ae46524d1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsstringraretermsbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsStringRareTermsBucket holds the union for the following types: +// +// map[string]StringRareTermsBucket +// []StringRareTermsBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsStringRareTermsBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsstringtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsstringtermsbucket.go new file mode 100644 index 000000000..fd306c9eb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsstringtermsbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsStringTermsBucket holds the union for the following types: +// +// map[string]StringTermsBucket +// []StringTermsBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsStringTermsBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketstimeseriesbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketstimeseriesbucket.go new file mode 100644 index 000000000..4821f77d3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketstimeseriesbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsTimeSeriesBucket holds the union for the following types: +// +// map[string]TimeSeriesBucket +// []TimeSeriesBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsTimeSeriesBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsummary.go new file mode 100644 index 000000000..2155c8064 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsummary.go @@ -0,0 +1,200 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BucketSummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Bucket.ts#L30-L77 +type BucketSummary struct { + // AnomalyScore The maximum anomaly score, between 0-100, for any of the bucket influencers. + // This is an overall, rate-limited + // score for the job. All the anomaly records in the bucket contribute to this + // score. This value might be updated as + // new data is analyzed. + AnomalyScore Float64 `json:"anomaly_score"` + BucketInfluencers []BucketInfluencer `json:"bucket_influencers"` + // BucketSpan The length of the bucket in seconds. This value matches the bucket span that + // is specified in the job. + BucketSpan int64 `json:"bucket_span"` + // EventCount The number of input data records processed in this bucket. + EventCount int64 `json:"event_count"` + // InitialAnomalyScore The maximum anomaly score for any of the bucket influencers. This is the + // initial value that was calculated at the + // time the bucket was processed. + InitialAnomalyScore Float64 `json:"initial_anomaly_score"` + // IsInterim If true, this is an interim result. In other words, the results are + // calculated based on partial input data. + IsInterim bool `json:"is_interim"` + // JobId Identifier for the anomaly detection job. + JobId string `json:"job_id"` + // ProcessingTimeMs The amount of time, in milliseconds, that it took to analyze the bucket + // contents and calculate results. + ProcessingTimeMs int64 `json:"processing_time_ms"` + // ResultType Internal. This value is always set to bucket. + ResultType string `json:"result_type"` + // Timestamp The start time of the bucket. This timestamp uniquely identifies the bucket. + // Events that occur exactly at the + // timestamp of the bucket are included in the results for the bucket. + Timestamp int64 `json:"timestamp"` + // TimestampString The start time of the bucket. This timestamp uniquely identifies the bucket. + // Events that occur exactly at the + // timestamp of the bucket are included in the results for the bucket. + TimestampString DateTime `json:"timestamp_string,omitempty"` +} + +func (s *BucketSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "anomaly_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AnomalyScore", err) + } + f := Float64(value) + s.AnomalyScore = f + case float64: + f := Float64(v) + s.AnomalyScore = f + } + + case "bucket_influencers": + if err := dec.Decode(&s.BucketInfluencers); err != nil { + return fmt.Errorf("%s | %w", "BucketInfluencers", err) + } + + case "bucket_span": + if err := dec.Decode(&s.BucketSpan); err != nil { + return fmt.Errorf("%s | %w", "BucketSpan", err) + } + + case "event_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "EventCount", err) + } + s.EventCount = value + case float64: + f := int64(v) + s.EventCount = f + } + + case "initial_anomaly_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InitialAnomalyScore", err) + } + f := Float64(value) + s.InitialAnomalyScore = f + case float64: + f := Float64(v) + s.InitialAnomalyScore = f + } + + case "is_interim": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsInterim", err) + } + s.IsInterim = value + case bool: + s.IsInterim = v + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "processing_time_ms": + if err := dec.Decode(&s.ProcessingTimeMs); err != nil { + return fmt.Errorf("%s | %w", "ProcessingTimeMs", err) + } + + case "result_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultType = o + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + case "timestamp_string": + if err := dec.Decode(&s.TimestampString); err != nil { + return fmt.Errorf("%s | %w", "TimestampString", err) + } + + } + } + return nil +} + +// NewBucketSummary returns a BucketSummary. +func NewBucketSummary() *BucketSummary { + r := &BucketSummary{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsvariablewidthhistogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsvariablewidthhistogrambucket.go new file mode 100644 index 000000000..dbc3f1d40 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsvariablewidthhistogrambucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsVariableWidthHistogramBucket holds the union for the following types: +// +// map[string]VariableWidthHistogramBucket +// []VariableWidthHistogramBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsVariableWidthHistogramBucket any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsvoid.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsvoid.go new file mode 100644 index 000000000..d839ca8ed --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bucketsvoid.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// BucketsVoid holds the union for the following types: +// +// map[string]any +// []any +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L346-L355 +type BucketsVoid any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/buildinformation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/buildinformation.go new file mode 100644 index 000000000..7acfd62cd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/buildinformation.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BuildInformation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/info/types.ts#L24-L27 +type BuildInformation struct { + Date DateTime `json:"date"` + Hash string `json:"hash"` +} + +func (s *BuildInformation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "date": + if err := dec.Decode(&s.Date); err != nil { + return fmt.Errorf("%s | %w", "Date", err) + } + + case "hash": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Hash", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Hash = o + + } + } + return nil +} + +// NewBuildInformation returns a BuildInformation. +func NewBuildInformation() *BuildInformation { + r := &BuildInformation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bulgariananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bulgariananalyzer.go new file mode 100644 index 000000000..98f8330af --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bulgariananalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BulgarianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L106-L111 +type BulgarianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *BulgarianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s BulgarianAnalyzer) MarshalJSON() ([]byte, error) { + type innerBulgarianAnalyzer BulgarianAnalyzer + tmp := innerBulgarianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "bulgarian" + + return json.Marshal(tmp) +} + +// NewBulgarianAnalyzer returns a BulgarianAnalyzer. +func NewBulgarianAnalyzer() *BulgarianAnalyzer { + r := &BulgarianAnalyzer{} + + return r +} + +type BulgarianAnalyzerVariant interface { + BulgarianAnalyzerCaster() *BulgarianAnalyzer +} + +func (s *BulgarianAnalyzer) BulgarianAnalyzerCaster() *BulgarianAnalyzer { + return s +} + +func (s *BulgarianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bulkerror.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bulkerror.go new file mode 100644 index 000000000..dc2a62625 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bulkerror.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BulkError type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Bulk.ts#L24-L33 +type BulkError struct { + // Count The number of errors + Count int `json:"count"` + // Details Details about the errors, keyed by role name + Details map[string]ErrorCause `json:"details"` +} + +func (s *BulkError) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "details": + if s.Details == nil { + s.Details = make(map[string]ErrorCause, 0) + } + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + } + } + return nil +} + +// NewBulkError returns a BulkError. +func NewBulkError() *BulkError { + r := &BulkError{ + Details: make(map[string]ErrorCause), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bulkindexbyscrollfailure.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bulkindexbyscrollfailure.go new file mode 100644 index 000000000..11caa6376 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bulkindexbyscrollfailure.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BulkIndexByScrollFailure type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Errors.ts#L60-L65 +type BulkIndexByScrollFailure struct { + Cause ErrorCause `json:"cause"` + Id string `json:"id"` + Index string `json:"index"` + Status int `json:"status"` +} + +func (s *BulkIndexByScrollFailure) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cause": + if err := dec.Decode(&s.Cause); err != nil { + return fmt.Errorf("%s | %w", "Cause", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "status": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + s.Status = value + case float64: + f := int(v) + s.Status = f + } + + } + } + return nil +} + +// NewBulkIndexByScrollFailure returns a BulkIndexByScrollFailure. +func NewBulkIndexByScrollFailure() *BulkIndexByScrollFailure { + r := &BulkIndexByScrollFailure{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bulkstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bulkstats.go new file mode 100644 index 000000000..74ebfdc7b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bulkstats.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BulkStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L71-L81 +type BulkStats struct { + AvgSize ByteSize `json:"avg_size,omitempty"` + AvgSizeInBytes int64 `json:"avg_size_in_bytes"` + AvgTime Duration `json:"avg_time,omitempty"` + AvgTimeInMillis int64 `json:"avg_time_in_millis"` + TotalOperations int64 `json:"total_operations"` + TotalSize ByteSize `json:"total_size,omitempty"` + TotalSizeInBytes int64 `json:"total_size_in_bytes"` + TotalTime Duration `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +func (s *BulkStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg_size": + if err := dec.Decode(&s.AvgSize); err != nil { + return fmt.Errorf("%s | %w", "AvgSize", err) + } + + case "avg_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AvgSizeInBytes", err) + } + s.AvgSizeInBytes = value + case float64: + f := int64(v) + s.AvgSizeInBytes = f + } + + case "avg_time": + if err := dec.Decode(&s.AvgTime); err != nil { + return fmt.Errorf("%s | %w", "AvgTime", err) + } + + case "avg_time_in_millis": + if err := dec.Decode(&s.AvgTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "AvgTimeInMillis", err) + } + + case "total_operations": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalOperations", err) + } + s.TotalOperations = value + case float64: + f := int64(v) + s.TotalOperations = f + } + + case "total_size": + if err := dec.Decode(&s.TotalSize); err != nil { + return fmt.Errorf("%s | %w", "TotalSize", err) + } + + case "total_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeInBytes", err) + } + s.TotalSizeInBytes = value + case float64: + f := int64(v) + s.TotalSizeInBytes = f + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return fmt.Errorf("%s | %w", "TotalTime", err) + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalTimeInMillis", err) + } + + } + } + return nil +} + +// NewBulkStats returns a BulkStats. +func NewBulkStats() *BulkStats { + r := &BulkStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bytenumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bytenumberproperty.go new file mode 100644 index 000000000..4635801dd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bytenumberproperty.go @@ -0,0 +1,988 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype" +) + +// ByteNumberProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L187-L190 +type ByteNumberProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *byte `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + // TimeSeriesMetric For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + if err := dec.Decode(&s.NullValue); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "time_series_metric": + if err := dec.Decode(&s.TimeSeriesMetric); err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ByteNumberProperty) MarshalJSON() ([]byte, error) { + type innerByteNumberProperty ByteNumberProperty + tmp := innerByteNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "byte" + + return json.Marshal(tmp) +} + +// NewByteNumberProperty returns a ByteNumberProperty. +func NewByteNumberProperty() *ByteNumberProperty { + r := &ByteNumberProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type ByteNumberPropertyVariant interface { + ByteNumberPropertyCaster() *ByteNumberProperty +} + +func (s *ByteNumberProperty) ByteNumberPropertyCaster() *ByteNumberProperty { + return s +} + +func (s *ByteNumberProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bytesize.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bytesize.go new file mode 100644 index 000000000..842904e53 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bytesize.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ByteSize holds the union for the following types: +// +// int64 +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L90-L91 +type ByteSize any + +type ByteSizeVariant interface { + ByteSizeCaster() *ByteSize +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bytesprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bytesprocessor.go new file mode 100644 index 000000000..318c02c43 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/bytesprocessor.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// BytesProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L556-L572 +type BytesProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to convert. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the converted value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *BytesProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewBytesProcessor returns a BytesProcessor. +func NewBytesProcessor() *BytesProcessor { + r := &BytesProcessor{} + + return r +} + +type BytesProcessorVariant interface { + BytesProcessorCaster() *BytesProcessor +} + +func (s *BytesProcessor) BytesProcessorCaster() *BytesProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cachequeries.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cachequeries.go new file mode 100644 index 000000000..e216124cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cachequeries.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CacheQueries type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L434-L436 +type CacheQueries struct { + Enabled bool `json:"enabled"` +} + +func (s *CacheQueries) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + +// NewCacheQueries returns a CacheQueries. +func NewCacheQueries() *CacheQueries { + r := &CacheQueries{} + + return r +} + +type CacheQueriesVariant interface { + CacheQueriesCaster() *CacheQueries +} + +func (s *CacheQueries) CacheQueriesCaster() *CacheQueries { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cachestats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cachestats.go new file mode 100644 index 000000000..665b81ed5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cachestats.go @@ -0,0 +1,165 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CacheStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/enrich/stats/types.ts#L38-L50 +type CacheStats struct { + Count int `json:"count"` + Evictions int `json:"evictions"` + Hits int `json:"hits"` + HitsTimeInMillis int64 `json:"hits_time_in_millis"` + Misses int `json:"misses"` + MissesTimeInMillis int64 `json:"misses_time_in_millis"` + NodeId string `json:"node_id"` + SizeInBytes int64 `json:"size_in_bytes"` +} + +func (s *CacheStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "evictions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Evictions", err) + } + s.Evictions = value + case float64: + f := int(v) + s.Evictions = f + } + + case "hits": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Hits", err) + } + s.Hits = value + case float64: + f := int(v) + s.Hits = f + } + + case "hits_time_in_millis": + if err := dec.Decode(&s.HitsTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "HitsTimeInMillis", err) + } + + case "misses": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Misses", err) + } + s.Misses = value + case float64: + f := int(v) + s.Misses = f + } + + case "misses_time_in_millis": + if err := dec.Decode(&s.MissesTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "MissesTimeInMillis", err) + } + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + + case "size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SizeInBytes", err) + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + } + } + return nil +} + +// NewCacheStats returns a CacheStats. +func NewCacheStats() *CacheStats { + r := &CacheStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/calendar.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/calendar.go new file mode 100644 index 000000000..e08e299d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/calendar.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Calendar type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_calendars/types.ts#L22-L29 +type Calendar struct { + // CalendarId A string that uniquely identifies a calendar. + CalendarId string `json:"calendar_id"` + // Description A description of the calendar. + Description *string `json:"description,omitempty"` + // JobIds An array of anomaly detection job identifiers. + JobIds []string `json:"job_ids"` +} + +func (s *Calendar) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_id": + if err := dec.Decode(&s.CalendarId); err != nil { + return fmt.Errorf("%s | %w", "CalendarId", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "job_ids": + if err := dec.Decode(&s.JobIds); err != nil { + return fmt.Errorf("%s | %w", "JobIds", err) + } + + } + } + return nil +} + +// NewCalendar returns a Calendar. +func NewCalendar() *Calendar { + r := &Calendar{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/calendarevent.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/calendarevent.go new file mode 100644 index 000000000..00574ebe7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/calendarevent.go @@ -0,0 +1,165 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CalendarEvent type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/CalendarEvent.ts#L24-L44 +type CalendarEvent struct { + // CalendarId A string that uniquely identifies a calendar. + CalendarId *string `json:"calendar_id,omitempty"` + // Description A description of the scheduled event. + Description string `json:"description"` + // EndTime The timestamp for the end of the scheduled event in milliseconds since the + // epoch or ISO 8601 format. + EndTime DateTime `json:"end_time"` + EventId *string `json:"event_id,omitempty"` + // ForceTimeShift Shift time by this many seconds. For example adjust time for daylight savings + // changes + ForceTimeShift *int `json:"force_time_shift,omitempty"` + // SkipModelUpdate When true the model will not be updated for this calendar period. + SkipModelUpdate *bool `json:"skip_model_update,omitempty"` + // SkipResult When true the model will not create results for this calendar period. + SkipResult *bool `json:"skip_result,omitempty"` + // StartTime The timestamp for the beginning of the scheduled event in milliseconds since + // the epoch or ISO 8601 format. + StartTime DateTime `json:"start_time"` +} + +func (s *CalendarEvent) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_id": + if err := dec.Decode(&s.CalendarId); err != nil { + return fmt.Errorf("%s | %w", "CalendarId", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "end_time": + if err := dec.Decode(&s.EndTime); err != nil { + return fmt.Errorf("%s | %w", "EndTime", err) + } + + case "event_id": + if err := dec.Decode(&s.EventId); err != nil { + return fmt.Errorf("%s | %w", "EventId", err) + } + + case "force_time_shift": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ForceTimeShift", err) + } + s.ForceTimeShift = &value + case float64: + f := int(v) + s.ForceTimeShift = &f + } + + case "skip_model_update": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SkipModelUpdate", err) + } + s.SkipModelUpdate = &value + case bool: + s.SkipModelUpdate = &v + } + + case "skip_result": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SkipResult", err) + } + s.SkipResult = &value + case bool: + s.SkipResult = &v + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + + } + } + return nil +} + +// NewCalendarEvent returns a CalendarEvent. +func NewCalendarEvent() *CalendarEvent { + r := &CalendarEvent{} + + return r +} + +type CalendarEventVariant interface { + CalendarEventCaster() *CalendarEvent +} + +func (s *CalendarEvent) CalendarEventCaster() *CalendarEvent { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cardinalityaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cardinalityaggregate.go new file mode 100644 index 000000000..eebec5424 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cardinalityaggregate.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CardinalityAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L140-L143 +type CardinalityAggregate struct { + Meta Metadata `json:"meta,omitempty"` + Value int64 `json:"value"` +} + +func (s *CardinalityAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + s.Value = value + case float64: + f := int64(v) + s.Value = f + } + + } + } + return nil +} + +// NewCardinalityAggregate returns a CardinalityAggregate. +func NewCardinalityAggregate() *CardinalityAggregate { + r := &CardinalityAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cardinalityaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cardinalityaggregation.go new file mode 100644 index 000000000..09fa23e69 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cardinalityaggregation.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cardinalityexecutionmode" +) + +// CardinalityAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L93-L105 +type CardinalityAggregation struct { + // ExecutionHint Mechanism by which cardinality aggregations is run. + ExecutionHint *cardinalityexecutionmode.CardinalityExecutionMode `json:"execution_hint,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + // PrecisionThreshold A unique count below which counts are expected to be close to accurate. + // This allows to trade memory for accuracy. + PrecisionThreshold *int `json:"precision_threshold,omitempty"` + Rehash *bool `json:"rehash,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *CardinalityAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "execution_hint": + if err := dec.Decode(&s.ExecutionHint); err != nil { + return fmt.Errorf("%s | %w", "ExecutionHint", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "precision_threshold": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrecisionThreshold", err) + } + s.PrecisionThreshold = &value + case float64: + f := int(v) + s.PrecisionThreshold = &f + } + + case "rehash": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Rehash", err) + } + s.Rehash = &value + case bool: + s.Rehash = &v + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewCardinalityAggregation returns a CardinalityAggregation. +func NewCardinalityAggregation() *CardinalityAggregation { + r := &CardinalityAggregation{} + + return r +} + +type CardinalityAggregationVariant interface { + CardinalityAggregationCaster() *CardinalityAggregation +} + +func (s *CardinalityAggregation) CardinalityAggregationCaster() *CardinalityAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catalananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catalananalyzer.go new file mode 100644 index 000000000..0cf465bb2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catalananalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CatalanAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L113-L118 +type CatalanAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *CatalanAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CatalanAnalyzer) MarshalJSON() ([]byte, error) { + type innerCatalanAnalyzer CatalanAnalyzer + tmp := innerCatalanAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "catalan" + + return json.Marshal(tmp) +} + +// NewCatalanAnalyzer returns a CatalanAnalyzer. +func NewCatalanAnalyzer() *CatalanAnalyzer { + r := &CatalanAnalyzer{} + + return r +} + +type CatalanAnalyzerVariant interface { + CatalanAnalyzerCaster() *CatalanAnalyzer +} + +func (s *CatalanAnalyzer) CatalanAnalyzerCaster() *CatalanAnalyzer { + return s +} + +func (s *CatalanAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catanonalydetectorcolumns.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catanonalydetectorcolumns.go new file mode 100644 index 000000000..e57a9b680 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catanonalydetectorcolumns.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catanomalydetectorcolumn" +) + +// CatAnonalyDetectorColumns type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L402-L404 +type CatAnonalyDetectorColumns []catanomalydetectorcolumn.CatAnomalyDetectorColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catcomponenttemplate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catcomponenttemplate.go new file mode 100644 index 000000000..54152babc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catcomponenttemplate.go @@ -0,0 +1,154 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CatComponentTemplate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/component_templates/types.ts#L20-L28 +type CatComponentTemplate struct { + AliasCount string `json:"alias_count"` + IncludedIn string `json:"included_in"` + MappingCount string `json:"mapping_count"` + MetadataCount string `json:"metadata_count"` + Name string `json:"name"` + SettingsCount string `json:"settings_count"` + Version *string `json:"version,omitempty"` +} + +func (s *CatComponentTemplate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alias_count": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AliasCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AliasCount = o + + case "included_in": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IncludedIn", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IncludedIn = o + + case "mapping_count": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MappingCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MappingCount = o + + case "metadata_count": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MetadataCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MetadataCount = o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "settings_count": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SettingsCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SettingsCount = o + + case "version": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Version = &o + + } + } + return nil +} + +// NewCatComponentTemplate returns a CatComponentTemplate. +func NewCatComponentTemplate() *CatComponentTemplate { + r := &CatComponentTemplate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catdatafeedcolumns.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catdatafeedcolumns.go new file mode 100644 index 000000000..b22d335ba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catdatafeedcolumns.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catdatafeedcolumn" +) + +// CatDatafeedColumns type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1284-L1284 +type CatDatafeedColumns []catdatafeedcolumn.CatDatafeedColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catdfacolumns.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catdfacolumns.go new file mode 100644 index 000000000..b0acc5a86 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catdfacolumns.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catdfacolumn" +) + +// CatDfaColumns type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1283-L1283 +type CatDfaColumns []catdfacolumn.CatDfaColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/categorizationanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/categorizationanalyzer.go new file mode 100644 index 000000000..4d8dd82de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/categorizationanalyzer.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// CategorizationAnalyzer holds the union for the following types: +// +// string +// CategorizationAnalyzerDefinition +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Analysis.ts#L181-L182 +type CategorizationAnalyzer any + +type CategorizationAnalyzerVariant interface { + CategorizationAnalyzerCaster() *CategorizationAnalyzer +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/categorizationanalyzerdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/categorizationanalyzerdefinition.go new file mode 100644 index 000000000..49ee8cfc0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/categorizationanalyzerdefinition.go @@ -0,0 +1,747 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// CategorizationAnalyzerDefinition type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Analysis.ts#L184-L198 +type CategorizationAnalyzerDefinition struct { + // CharFilter One or more character filters. In addition to the built-in character filters, + // other plugins can provide more character filters. If this property is not + // specified, no character filters are applied prior to categorization. If you + // are customizing some other aspect of the analyzer and you need to achieve the + // equivalent of `categorization_filters` (which are not permitted when some + // other aspect of the analyzer is customized), add them here as pattern replace + // character filters. + CharFilter []CharFilter `json:"char_filter,omitempty"` + // Filter One or more token filters. In addition to the built-in token filters, other + // plugins can provide more token filters. If this property is not specified, no + // token filters are applied prior to categorization. + Filter []TokenFilter `json:"filter,omitempty"` + // Tokenizer The name or definition of the tokenizer to use after character filters are + // applied. This property is compulsory if `categorization_analyzer` is + // specified as an object. Machine learning provides a tokenizer called + // `ml_standard` that tokenizes in a way that has been determined to produce + // good categorization results on a variety of log file formats for logs in + // English. If you want to use that tokenizer but change the character or token + // filters, specify "tokenizer": "ml_standard" in your + // `categorization_analyzer`. Additionally, the `ml_classic` tokenizer is + // available, which tokenizes in the same way as the non-customizable tokenizer + // in old versions of the product (before 6.2). `ml_classic` was the default + // categorization tokenizer in versions 6.2 to 7.13, so if you need + // categorization identical to the default for jobs created in these versions, + // specify "tokenizer": "ml_classic" in your `categorization_analyzer`. + Tokenizer Tokenizer `json:"tokenizer,omitempty"` +} + +func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "char_filter": + + buf := []json.RawMessage{} + dec.Decode(&buf) + for _, rawMsg := range buf { + + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + kind := make(map[string]string, 0) + localDec.Decode(&kind) + source.Seek(0, io.SeekStart) + + switch kind["type"] { + + case "html_strip": + o := NewHtmlStripCharFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "html_strip", err) + } + s.CharFilter = append(s.CharFilter, *o) + case "mapping": + o := NewMappingCharFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "mapping", err) + } + s.CharFilter = append(s.CharFilter, *o) + case "pattern_replace": + o := NewPatternReplaceCharFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "pattern_replace", err) + } + s.CharFilter = append(s.CharFilter, *o) + case "icu_normalizer": + o := NewIcuNormalizationCharFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "icu_normalizer", err) + } + s.CharFilter = append(s.CharFilter, *o) + case "kuromoji_iteration_mark": + o := NewKuromojiIterationMarkCharFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "kuromoji_iteration_mark", err) + } + s.CharFilter = append(s.CharFilter, *o) + default: + o := new(any) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("CharFilter | %w", err) + } + s.CharFilter = append(s.CharFilter, *o) + } + } + + case "filter": + + buf := []json.RawMessage{} + dec.Decode(&buf) + for _, rawMsg := range buf { + + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + kind := make(map[string]string, 0) + localDec.Decode(&kind) + source.Seek(0, io.SeekStart) + + switch kind["type"] { + + case "apostrophe": + o := NewApostropheTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "apostrophe", err) + } + s.Filter = append(s.Filter, *o) + case "arabic_stem": + o := NewArabicStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "arabic_stem", err) + } + s.Filter = append(s.Filter, *o) + case "arabic_normalization": + o := NewArabicNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "arabic_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "asciifolding": + o := NewAsciiFoldingTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "asciifolding", err) + } + s.Filter = append(s.Filter, *o) + case "bengali_normalization": + o := NewBengaliNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "bengali_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "brazilian_stem": + o := NewBrazilianStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "brazilian_stem", err) + } + s.Filter = append(s.Filter, *o) + case "cjk_bigram": + o := NewCjkBigramTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "cjk_bigram", err) + } + s.Filter = append(s.Filter, *o) + case "cjk_width": + o := NewCjkWidthTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "cjk_width", err) + } + s.Filter = append(s.Filter, *o) + case "classic": + o := NewClassicTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "classic", err) + } + s.Filter = append(s.Filter, *o) + case "common_grams": + o := NewCommonGramsTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "common_grams", err) + } + s.Filter = append(s.Filter, *o) + case "condition": + o := NewConditionTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "condition", err) + } + s.Filter = append(s.Filter, *o) + case "czech_stem": + o := NewCzechStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "czech_stem", err) + } + s.Filter = append(s.Filter, *o) + case "decimal_digit": + o := NewDecimalDigitTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "decimal_digit", err) + } + s.Filter = append(s.Filter, *o) + case "delimited_payload": + o := NewDelimitedPayloadTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "delimited_payload", err) + } + s.Filter = append(s.Filter, *o) + case "dutch_stem": + o := NewDutchStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "dutch_stem", err) + } + s.Filter = append(s.Filter, *o) + case "edge_ngram": + o := NewEdgeNGramTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "edge_ngram", err) + } + s.Filter = append(s.Filter, *o) + case "elision": + o := NewElisionTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "elision", err) + } + s.Filter = append(s.Filter, *o) + case "fingerprint": + o := NewFingerprintTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "fingerprint", err) + } + s.Filter = append(s.Filter, *o) + case "flatten_graph": + o := NewFlattenGraphTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "flatten_graph", err) + } + s.Filter = append(s.Filter, *o) + case "french_stem": + o := NewFrenchStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "french_stem", err) + } + s.Filter = append(s.Filter, *o) + case "german_normalization": + o := NewGermanNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "german_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "german_stem": + o := NewGermanStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "german_stem", err) + } + s.Filter = append(s.Filter, *o) + case "hindi_normalization": + o := NewHindiNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "hindi_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "hunspell": + o := NewHunspellTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "hunspell", err) + } + s.Filter = append(s.Filter, *o) + case "hyphenation_decompounder": + o := NewHyphenationDecompounderTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "hyphenation_decompounder", err) + } + s.Filter = append(s.Filter, *o) + case "indic_normalization": + o := NewIndicNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "indic_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "keep_types": + o := NewKeepTypesTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "keep_types", err) + } + s.Filter = append(s.Filter, *o) + case "keep": + o := NewKeepWordsTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "keep", err) + } + s.Filter = append(s.Filter, *o) + case "keyword_marker": + o := NewKeywordMarkerTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "keyword_marker", err) + } + s.Filter = append(s.Filter, *o) + case "keyword_repeat": + o := NewKeywordRepeatTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "keyword_repeat", err) + } + s.Filter = append(s.Filter, *o) + case "kstem": + o := NewKStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "kstem", err) + } + s.Filter = append(s.Filter, *o) + case "length": + o := NewLengthTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "length", err) + } + s.Filter = append(s.Filter, *o) + case "limit": + o := NewLimitTokenCountTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "limit", err) + } + s.Filter = append(s.Filter, *o) + case "lowercase": + o := NewLowercaseTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "lowercase", err) + } + s.Filter = append(s.Filter, *o) + case "min_hash": + o := NewMinHashTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "min_hash", err) + } + s.Filter = append(s.Filter, *o) + case "multiplexer": + o := NewMultiplexerTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "multiplexer", err) + } + s.Filter = append(s.Filter, *o) + case "ngram": + o := NewNGramTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ngram", err) + } + s.Filter = append(s.Filter, *o) + case "nori_part_of_speech": + o := NewNoriPartOfSpeechTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "nori_part_of_speech", err) + } + s.Filter = append(s.Filter, *o) + case "pattern_capture": + o := NewPatternCaptureTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "pattern_capture", err) + } + s.Filter = append(s.Filter, *o) + case "pattern_replace": + o := NewPatternReplaceTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "pattern_replace", err) + } + s.Filter = append(s.Filter, *o) + case "persian_normalization": + o := NewPersianNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "persian_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "persian_stem": + o := NewPersianStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "persian_stem", err) + } + s.Filter = append(s.Filter, *o) + case "porter_stem": + o := NewPorterStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "porter_stem", err) + } + s.Filter = append(s.Filter, *o) + case "predicate_token_filter": + o := NewPredicateTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "predicate_token_filter", err) + } + s.Filter = append(s.Filter, *o) + case "remove_duplicates": + o := NewRemoveDuplicatesTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "remove_duplicates", err) + } + s.Filter = append(s.Filter, *o) + case "reverse": + o := NewReverseTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "reverse", err) + } + s.Filter = append(s.Filter, *o) + case "russian_stem": + o := NewRussianStemTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "russian_stem", err) + } + s.Filter = append(s.Filter, *o) + case "scandinavian_folding": + o := NewScandinavianFoldingTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "scandinavian_folding", err) + } + s.Filter = append(s.Filter, *o) + case "scandinavian_normalization": + o := NewScandinavianNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "scandinavian_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "serbian_normalization": + o := NewSerbianNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "serbian_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "shingle": + o := NewShingleTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "shingle", err) + } + s.Filter = append(s.Filter, *o) + case "snowball": + o := NewSnowballTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "snowball", err) + } + s.Filter = append(s.Filter, *o) + case "sorani_normalization": + o := NewSoraniNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "sorani_normalization", err) + } + s.Filter = append(s.Filter, *o) + case "stemmer_override": + o := NewStemmerOverrideTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "stemmer_override", err) + } + s.Filter = append(s.Filter, *o) + case "stemmer": + o := NewStemmerTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "stemmer", err) + } + s.Filter = append(s.Filter, *o) + case "stop": + o := NewStopTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "stop", err) + } + s.Filter = append(s.Filter, *o) + case "synonym_graph": + o := NewSynonymGraphTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "synonym_graph", err) + } + s.Filter = append(s.Filter, *o) + case "synonym": + o := NewSynonymTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "synonym", err) + } + s.Filter = append(s.Filter, *o) + case "trim": + o := NewTrimTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "trim", err) + } + s.Filter = append(s.Filter, *o) + case "truncate": + o := NewTruncateTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "truncate", err) + } + s.Filter = append(s.Filter, *o) + case "unique": + o := NewUniqueTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "unique", err) + } + s.Filter = append(s.Filter, *o) + case "uppercase": + o := NewUppercaseTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "uppercase", err) + } + s.Filter = append(s.Filter, *o) + case "word_delimiter_graph": + o := NewWordDelimiterGraphTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "word_delimiter_graph", err) + } + s.Filter = append(s.Filter, *o) + case "word_delimiter": + o := NewWordDelimiterTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "word_delimiter", err) + } + s.Filter = append(s.Filter, *o) + case "ja_stop": + o := NewJaStopTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ja_stop", err) + } + s.Filter = append(s.Filter, *o) + case "kuromoji_stemmer": + o := NewKuromojiStemmerTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "kuromoji_stemmer", err) + } + s.Filter = append(s.Filter, *o) + case "kuromoji_readingform": + o := NewKuromojiReadingFormTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "kuromoji_readingform", err) + } + s.Filter = append(s.Filter, *o) + case "kuromoji_part_of_speech": + o := NewKuromojiPartOfSpeechTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "kuromoji_part_of_speech", err) + } + s.Filter = append(s.Filter, *o) + case "icu_collation": + o := NewIcuCollationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "icu_collation", err) + } + s.Filter = append(s.Filter, *o) + case "icu_folding": + o := NewIcuFoldingTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "icu_folding", err) + } + s.Filter = append(s.Filter, *o) + case "icu_normalizer": + o := NewIcuNormalizationTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "icu_normalizer", err) + } + s.Filter = append(s.Filter, *o) + case "icu_transform": + o := NewIcuTransformTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "icu_transform", err) + } + s.Filter = append(s.Filter, *o) + case "phonetic": + o := NewPhoneticTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "phonetic", err) + } + s.Filter = append(s.Filter, *o) + case "dictionary_decompounder": + o := NewDictionaryDecompounderTokenFilter() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "dictionary_decompounder", err) + } + s.Filter = append(s.Filter, *o) + default: + o := new(any) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter = append(s.Filter, *o) + } + } + + case "tokenizer": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + kind := make(map[string]string, 0) + localDec := json.NewDecoder(source) + localDec.Decode(&kind) + source.Seek(0, io.SeekStart) + + switch kind["type"] { + + case "char_group": + o := NewCharGroupTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "char_group", err) + } + s.Tokenizer = *o + case "classic": + o := NewClassicTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "classic", err) + } + s.Tokenizer = *o + case "edge_ngram": + o := NewEdgeNGramTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "edge_ngram", err) + } + s.Tokenizer = *o + case "keyword": + o := NewKeywordTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "keyword", err) + } + s.Tokenizer = *o + case "letter": + o := NewLetterTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "letter", err) + } + s.Tokenizer = *o + case "lowercase": + o := NewLowercaseTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "lowercase", err) + } + s.Tokenizer = *o + case "ngram": + o := NewNGramTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ngram", err) + } + s.Tokenizer = *o + case "path_hierarchy": + o := NewPathHierarchyTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "path_hierarchy", err) + } + s.Tokenizer = *o + case "pattern": + o := NewPatternTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "pattern", err) + } + s.Tokenizer = *o + case "simple_pattern": + o := NewSimplePatternTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "simple_pattern", err) + } + s.Tokenizer = *o + case "simple_pattern_split": + o := NewSimplePatternSplitTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "simple_pattern_split", err) + } + s.Tokenizer = *o + case "standard": + o := NewStandardTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "standard", err) + } + s.Tokenizer = *o + case "thai": + o := NewThaiTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "thai", err) + } + s.Tokenizer = *o + case "uax_url_email": + o := NewUaxEmailUrlTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "uax_url_email", err) + } + s.Tokenizer = *o + case "whitespace": + o := NewWhitespaceTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "whitespace", err) + } + s.Tokenizer = *o + case "icu_tokenizer": + o := NewIcuTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "icu_tokenizer", err) + } + s.Tokenizer = *o + case "kuromoji_tokenizer": + o := NewKuromojiTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "kuromoji_tokenizer", err) + } + s.Tokenizer = *o + case "nori_tokenizer": + o := NewNoriTokenizer() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "nori_tokenizer", err) + } + s.Tokenizer = *o + default: + if err := localDec.Decode(&s.Tokenizer); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + } + + } + } + return nil +} + +// NewCategorizationAnalyzerDefinition returns a CategorizationAnalyzerDefinition. +func NewCategorizationAnalyzerDefinition() *CategorizationAnalyzerDefinition { + r := &CategorizationAnalyzerDefinition{} + + return r +} + +type CategorizationAnalyzerDefinitionVariant interface { + CategorizationAnalyzerDefinitionCaster() *CategorizationAnalyzerDefinition +} + +func (s *CategorizationAnalyzerDefinition) CategorizationAnalyzerDefinitionCaster() *CategorizationAnalyzerDefinition { + return s +} + +func (s *CategorizationAnalyzerDefinition) CategorizationAnalyzerCaster() *CategorizationAnalyzer { + o := CategorizationAnalyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/categorizetextaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/categorizetextaggregation.go new file mode 100644 index 000000000..d09a8fcc1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/categorizetextaggregation.go @@ -0,0 +1,279 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CategorizeTextAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L1117-L1183 +type CategorizeTextAggregation struct { + // CategorizationAnalyzer The categorization analyzer specifies how the text is analyzed and tokenized + // before being categorized. + // The syntax is very similar to that used to define the analyzer in the analyze + // API. This property + // cannot be used at the same time as `categorization_filters`. + CategorizationAnalyzer CategorizeTextAnalyzer `json:"categorization_analyzer,omitempty"` + // CategorizationFilters This property expects an array of regular expressions. The expressions are + // used to filter out matching + // sequences from the categorization field values. You can use this + // functionality to fine tune the categorization + // by excluding sequences from consideration when categories are defined. For + // example, you can exclude SQL + // statements that appear in your log files. This property cannot be used at the + // same time as categorization_analyzer. + // If you only want to define simple regular expression filters that are applied + // prior to tokenization, setting + // this property is the easiest method. If you also want to customize the + // tokenizer or post-tokenization filtering, + // use the categorization_analyzer property instead and include the filters as + // pattern_replace character filters. + CategorizationFilters []string `json:"categorization_filters,omitempty"` + // Field The semi-structured text field to categorize. + Field string `json:"field"` + // MaxMatchedTokens The maximum number of token positions to match on before attempting to merge + // categories. Larger + // values will use more memory and create narrower categories. Max allowed value + // is 100. + MaxMatchedTokens *int `json:"max_matched_tokens,omitempty"` + // MaxUniqueTokens The maximum number of unique tokens at any position up to max_matched_tokens. + // Must be larger than 1. + // Smaller values use less memory and create fewer categories. Larger values + // will use more memory and + // create narrower categories. Max allowed value is 100. + MaxUniqueTokens *int `json:"max_unique_tokens,omitempty"` + // MinDocCount The minimum number of documents in a bucket to be returned to the results. + MinDocCount *int `json:"min_doc_count,omitempty"` + // ShardMinDocCount The minimum number of documents in a bucket to be returned from the shard + // before merging. + ShardMinDocCount *int `json:"shard_min_doc_count,omitempty"` + // ShardSize The number of categorization buckets to return from each shard before merging + // all the results. + ShardSize *int `json:"shard_size,omitempty"` + // SimilarityThreshold The minimum percentage of tokens that must match for text to be added to the + // category bucket. Must + // be between 1 and 100. The larger the value the narrower the categories. + // Larger values will increase memory + // usage and create narrower categories. + SimilarityThreshold *int `json:"similarity_threshold,omitempty"` + // Size The number of buckets to return. + Size *int `json:"size,omitempty"` +} + +func (s *CategorizeTextAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "categorization_analyzer": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + categorizationanalyzer_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + + switch t { + + case "char_filter", "filter", "tokenizer": + o := NewCustomCategorizeTextAnalyzer() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + s.CategorizationAnalyzer = o + break categorizationanalyzer_field + + } + } + if s.CategorizationAnalyzer == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.CategorizationAnalyzer); err != nil { + return fmt.Errorf("%s | %w", "CategorizationAnalyzer", err) + } + } + + case "categorization_filters": + if err := dec.Decode(&s.CategorizationFilters); err != nil { + return fmt.Errorf("%s | %w", "CategorizationFilters", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "max_matched_tokens": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxMatchedTokens", err) + } + s.MaxMatchedTokens = &value + case float64: + f := int(v) + s.MaxMatchedTokens = &f + } + + case "max_unique_tokens": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxUniqueTokens", err) + } + s.MaxUniqueTokens = &value + case float64: + f := int(v) + s.MaxUniqueTokens = &f + } + + case "min_doc_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocCount", err) + } + s.MinDocCount = &value + case float64: + f := int(v) + s.MinDocCount = &f + } + + case "shard_min_doc_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardMinDocCount", err) + } + s.ShardMinDocCount = &value + case float64: + f := int(v) + s.ShardMinDocCount = &f + } + + case "shard_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSize", err) + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "similarity_threshold": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SimilarityThreshold", err) + } + s.SimilarityThreshold = &value + case float64: + f := int(v) + s.SimilarityThreshold = &f + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + +// NewCategorizeTextAggregation returns a CategorizeTextAggregation. +func NewCategorizeTextAggregation() *CategorizeTextAggregation { + r := &CategorizeTextAggregation{} + + return r +} + +type CategorizeTextAggregationVariant interface { + CategorizeTextAggregationCaster() *CategorizeTextAggregation +} + +func (s *CategorizeTextAggregation) CategorizeTextAggregationCaster() *CategorizeTextAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/categorizetextanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/categorizetextanalyzer.go new file mode 100644 index 000000000..83d3808dc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/categorizetextanalyzer.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// CategorizeTextAnalyzer holds the union for the following types: +// +// string +// CustomCategorizeTextAnalyzer +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L1185-L1188 +type CategorizeTextAnalyzer any + +type CategorizeTextAnalyzerVariant interface { + CategorizeTextAnalyzerCaster() *CategorizeTextAnalyzer +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/category.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/category.go new file mode 100644 index 000000000..dc4e2eddb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/category.go @@ -0,0 +1,234 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Category type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Category.ts#L23-L49 +type Category struct { + // CategoryId A unique identifier for the category. category_id is unique at the job level, + // even when per-partition categorization is enabled. + CategoryId uint64 `json:"category_id"` + // Examples A list of examples of actual values that matched the category. + Examples []string `json:"examples"` + // GrokPattern [experimental] A Grok pattern that could be used in Logstash or an ingest + // pipeline to extract fields from messages that match the category. This field + // is experimental and may be changed or removed in a future release. The Grok + // patterns that are found are not optimal, but are often a good starting point + // for manual tweaking. + GrokPattern *string `json:"grok_pattern,omitempty"` + // JobId Identifier for the anomaly detection job. + JobId string `json:"job_id"` + // MaxMatchingLength The maximum length of the fields that matched the category. The value is + // increased by 10% to enable matching for similar fields that have not been + // analyzed. + MaxMatchingLength uint64 `json:"max_matching_length"` + Mlcategory string `json:"mlcategory"` + // NumMatches The number of messages that have been matched by this category. This is only + // guaranteed to have the latest accurate count after a job _flush or _close + NumMatches *int64 `json:"num_matches,omitempty"` + P *string `json:"p,omitempty"` + // PartitionFieldName If per-partition categorization is enabled, this property identifies the + // field used to segment the categorization. It is not present when + // per-partition categorization is disabled. + PartitionFieldName *string `json:"partition_field_name,omitempty"` + // PartitionFieldValue If per-partition categorization is enabled, this property identifies the + // value of the partition_field_name for the category. It is not present when + // per-partition categorization is disabled. + PartitionFieldValue *string `json:"partition_field_value,omitempty"` + // PreferredToCategories A list of category_id entries that this current category encompasses. Any new + // message that is processed by the categorizer will match against this category + // and not any of the categories in this list. This is only guaranteed to have + // the latest accurate list of categories after a job _flush or _close + PreferredToCategories []string `json:"preferred_to_categories,omitempty"` + // Regex A regular expression that is used to search for values that match the + // category. + Regex string `json:"regex"` + ResultType string `json:"result_type"` + // Terms A space separated list of the common tokens that are matched in values of the + // category. + Terms string `json:"terms"` +} + +func (s *Category) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "category_id": + if err := dec.Decode(&s.CategoryId); err != nil { + return fmt.Errorf("%s | %w", "CategoryId", err) + } + + case "examples": + if err := dec.Decode(&s.Examples); err != nil { + return fmt.Errorf("%s | %w", "Examples", err) + } + + case "grok_pattern": + if err := dec.Decode(&s.GrokPattern); err != nil { + return fmt.Errorf("%s | %w", "GrokPattern", err) + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "max_matching_length": + if err := dec.Decode(&s.MaxMatchingLength); err != nil { + return fmt.Errorf("%s | %w", "MaxMatchingLength", err) + } + + case "mlcategory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Mlcategory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Mlcategory = o + + case "num_matches": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumMatches", err) + } + s.NumMatches = &value + case float64: + f := int64(v) + s.NumMatches = &f + } + + case "p": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "P", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.P = &o + + case "partition_field_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PartitionFieldName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PartitionFieldName = &o + + case "partition_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PartitionFieldValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PartitionFieldValue = &o + + case "preferred_to_categories": + if err := dec.Decode(&s.PreferredToCategories); err != nil { + return fmt.Errorf("%s | %w", "PreferredToCategories", err) + } + + case "regex": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Regex", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Regex = o + + case "result_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultType = o + + case "terms": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Terms", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Terms = o + + } + } + return nil +} + +// NewCategory returns a Category. +func NewCategory() *Category { + r := &Category{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catnodecolumns.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catnodecolumns.go new file mode 100644 index 000000000..300e39aeb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catnodecolumns.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catnodecolumn" +) + +// CatNodeColumns type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1285-L1285 +type CatNodeColumns []catnodecolumn.CatNodeColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catrecoverycolumns.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catrecoverycolumns.go new file mode 100644 index 000000000..057dac56f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catrecoverycolumns.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catrecoverycolumn" +) + +// CatRecoveryColumns type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1286-L1286 +type CatRecoveryColumns []catrecoverycolumn.CatRecoveryColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catsegmentscolumns.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catsegmentscolumns.go new file mode 100644 index 000000000..e1fdb9cbc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catsegmentscolumns.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catsegmentscolumn" +) + +// CatSegmentsColumns type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1287-L1287 +type CatSegmentsColumns []catsegmentscolumn.CatSegmentsColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catshardcolumns.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catshardcolumns.go new file mode 100644 index 000000000..588b00a7a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catshardcolumns.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catshardcolumn" +) + +// CatShardColumns type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1950-L1950 +type CatShardColumns []catshardcolumn.CatShardColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catsnapshotscolumns.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catsnapshotscolumns.go new file mode 100644 index 000000000..053fe27c8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catsnapshotscolumns.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catsnapshotscolumn" +) + +// CatSnapshotsColumns type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1288-L1288 +type CatSnapshotsColumns []catsnapshotscolumn.CatSnapshotsColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catthreadpoolcolumns.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catthreadpoolcolumns.go new file mode 100644 index 000000000..d4d4d20b3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/catthreadpoolcolumns.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catthreadpoolcolumn" +) + +// CatThreadPoolColumns type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L2053-L2053 +type CatThreadPoolColumns []catthreadpoolcolumn.CatThreadPoolColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cattrainedmodelscolumns.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cattrainedmodelscolumns.go new file mode 100644 index 000000000..062c2c40a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cattrainedmodelscolumns.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cattrainedmodelscolumn" +) + +// CatTrainedModelsColumns type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1365-L1367 +type CatTrainedModelsColumns []cattrainedmodelscolumn.CatTrainedModelsColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cattransformcolumns.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cattransformcolumns.go new file mode 100644 index 000000000..4f833ba1a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cattransformcolumns.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cattransformcolumn" +) + +// CatTransformColumns type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1574-L1574 +type CatTransformColumns []cattransformcolumn.CatTransformColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccr.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccr.go new file mode 100644 index 000000000..b264b840a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccr.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Ccr type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L344-L347 +type Ccr struct { + AutoFollowPatternsCount int `json:"auto_follow_patterns_count"` + Available bool `json:"available"` + Enabled bool `json:"enabled"` + FollowerIndicesCount int `json:"follower_indices_count"` +} + +func (s *Ccr) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "auto_follow_patterns_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "AutoFollowPatternsCount", err) + } + s.AutoFollowPatternsCount = value + case float64: + f := int(v) + s.AutoFollowPatternsCount = f + } + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "follower_indices_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FollowerIndicesCount", err) + } + s.FollowerIndicesCount = value + case float64: + f := int(v) + s.FollowerIndicesCount = f + } + + } + } + return nil +} + +// NewCcr returns a Ccr. +func NewCcr() *Ccr { + r := &Ccr{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccrshardstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccrshardstats.go new file mode 100644 index 000000000..29cd54ae6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccrshardstats.go @@ -0,0 +1,453 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CcrShardStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/_types/FollowIndexStats.ts#L37-L109 +type CcrShardStats struct { + // BytesRead The total of transferred bytes read from the leader. + // This is only an estimate and does not account for compression if enabled. + BytesRead int64 `json:"bytes_read"` + // FailedReadRequests The number of failed reads. + FailedReadRequests int64 `json:"failed_read_requests"` + // FailedWriteRequests The number of failed bulk write requests on the follower. + FailedWriteRequests int64 `json:"failed_write_requests"` + FatalException *ErrorCause `json:"fatal_exception,omitempty"` + // FollowerAliasesVersion The index aliases version the follower is synced up to. + FollowerAliasesVersion int64 `json:"follower_aliases_version"` + // FollowerGlobalCheckpoint The current global checkpoint on the follower. + // The difference between the `leader_global_checkpoint` and the + // `follower_global_checkpoint` is an indication of how much the follower is + // lagging the leader. + FollowerGlobalCheckpoint int64 `json:"follower_global_checkpoint"` + // FollowerIndex The name of the follower index. + FollowerIndex string `json:"follower_index"` + // FollowerMappingVersion The mapping version the follower is synced up to. + FollowerMappingVersion int64 `json:"follower_mapping_version"` + // FollowerMaxSeqNo The current maximum sequence number on the follower. + FollowerMaxSeqNo int64 `json:"follower_max_seq_no"` + // FollowerSettingsVersion The index settings version the follower is synced up to. + FollowerSettingsVersion int64 `json:"follower_settings_version"` + // LastRequestedSeqNo The starting sequence number of the last batch of operations requested from + // the leader. + LastRequestedSeqNo int64 `json:"last_requested_seq_no"` + // LeaderGlobalCheckpoint The current global checkpoint on the leader known to the follower task. + LeaderGlobalCheckpoint int64 `json:"leader_global_checkpoint"` + // LeaderIndex The name of the index in the leader cluster being followed. + LeaderIndex string `json:"leader_index"` + // LeaderMaxSeqNo The current maximum sequence number on the leader known to the follower task. + LeaderMaxSeqNo int64 `json:"leader_max_seq_no"` + // OperationsRead The total number of operations read from the leader. + OperationsRead int64 `json:"operations_read"` + // OperationsWritten The number of operations written on the follower. + OperationsWritten int64 `json:"operations_written"` + // OutstandingReadRequests The number of active read requests from the follower. + OutstandingReadRequests int `json:"outstanding_read_requests"` + // OutstandingWriteRequests The number of active bulk write requests on the follower. + OutstandingWriteRequests int `json:"outstanding_write_requests"` + // ReadExceptions An array of objects representing failed reads. + ReadExceptions []ReadException `json:"read_exceptions"` + // RemoteCluster The remote cluster containing the leader index. + RemoteCluster string `json:"remote_cluster"` + // ShardId The numerical shard ID, with values from 0 to one less than the number of + // replicas. + ShardId int `json:"shard_id"` + // SuccessfulReadRequests The number of successful fetches. + SuccessfulReadRequests int64 `json:"successful_read_requests"` + // SuccessfulWriteRequests The number of bulk write requests run on the follower. + SuccessfulWriteRequests int64 `json:"successful_write_requests"` + TimeSinceLastRead Duration `json:"time_since_last_read,omitempty"` + // TimeSinceLastReadMillis The number of milliseconds since a read request was sent to the leader. + // When the follower is caught up to the leader, this number will increase up to + // the configured `read_poll_timeout` at which point another read request will + // be sent to the leader. + TimeSinceLastReadMillis int64 `json:"time_since_last_read_millis"` + TotalReadRemoteExecTime Duration `json:"total_read_remote_exec_time,omitempty"` + // TotalReadRemoteExecTimeMillis The total time reads spent running on the remote cluster. + TotalReadRemoteExecTimeMillis int64 `json:"total_read_remote_exec_time_millis"` + TotalReadTime Duration `json:"total_read_time,omitempty"` + // TotalReadTimeMillis The total time reads were outstanding, measured from the time a read was sent + // to the leader to the time a reply was returned to the follower. + TotalReadTimeMillis int64 `json:"total_read_time_millis"` + TotalWriteTime Duration `json:"total_write_time,omitempty"` + // TotalWriteTimeMillis The total time spent writing on the follower. + TotalWriteTimeMillis int64 `json:"total_write_time_millis"` + // WriteBufferOperationCount The number of write operations queued on the follower. + WriteBufferOperationCount int64 `json:"write_buffer_operation_count"` + // WriteBufferSizeInBytes The total number of bytes of operations currently queued for writing. + WriteBufferSizeInBytes ByteSize `json:"write_buffer_size_in_bytes"` +} + +func (s *CcrShardStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bytes_read": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BytesRead", err) + } + s.BytesRead = value + case float64: + f := int64(v) + s.BytesRead = f + } + + case "failed_read_requests": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FailedReadRequests", err) + } + s.FailedReadRequests = value + case float64: + f := int64(v) + s.FailedReadRequests = f + } + + case "failed_write_requests": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FailedWriteRequests", err) + } + s.FailedWriteRequests = value + case float64: + f := int64(v) + s.FailedWriteRequests = f + } + + case "fatal_exception": + if err := dec.Decode(&s.FatalException); err != nil { + return fmt.Errorf("%s | %w", "FatalException", err) + } + + case "follower_aliases_version": + if err := dec.Decode(&s.FollowerAliasesVersion); err != nil { + return fmt.Errorf("%s | %w", "FollowerAliasesVersion", err) + } + + case "follower_global_checkpoint": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FollowerGlobalCheckpoint", err) + } + s.FollowerGlobalCheckpoint = value + case float64: + f := int64(v) + s.FollowerGlobalCheckpoint = f + } + + case "follower_index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FollowerIndex", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FollowerIndex = o + + case "follower_mapping_version": + if err := dec.Decode(&s.FollowerMappingVersion); err != nil { + return fmt.Errorf("%s | %w", "FollowerMappingVersion", err) + } + + case "follower_max_seq_no": + if err := dec.Decode(&s.FollowerMaxSeqNo); err != nil { + return fmt.Errorf("%s | %w", "FollowerMaxSeqNo", err) + } + + case "follower_settings_version": + if err := dec.Decode(&s.FollowerSettingsVersion); err != nil { + return fmt.Errorf("%s | %w", "FollowerSettingsVersion", err) + } + + case "last_requested_seq_no": + if err := dec.Decode(&s.LastRequestedSeqNo); err != nil { + return fmt.Errorf("%s | %w", "LastRequestedSeqNo", err) + } + + case "leader_global_checkpoint": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LeaderGlobalCheckpoint", err) + } + s.LeaderGlobalCheckpoint = value + case float64: + f := int64(v) + s.LeaderGlobalCheckpoint = f + } + + case "leader_index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LeaderIndex", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LeaderIndex = o + + case "leader_max_seq_no": + if err := dec.Decode(&s.LeaderMaxSeqNo); err != nil { + return fmt.Errorf("%s | %w", "LeaderMaxSeqNo", err) + } + + case "operations_read": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "OperationsRead", err) + } + s.OperationsRead = value + case float64: + f := int64(v) + s.OperationsRead = f + } + + case "operations_written": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "OperationsWritten", err) + } + s.OperationsWritten = value + case float64: + f := int64(v) + s.OperationsWritten = f + } + + case "outstanding_read_requests": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "OutstandingReadRequests", err) + } + s.OutstandingReadRequests = value + case float64: + f := int(v) + s.OutstandingReadRequests = f + } + + case "outstanding_write_requests": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "OutstandingWriteRequests", err) + } + s.OutstandingWriteRequests = value + case float64: + f := int(v) + s.OutstandingWriteRequests = f + } + + case "read_exceptions": + if err := dec.Decode(&s.ReadExceptions); err != nil { + return fmt.Errorf("%s | %w", "ReadExceptions", err) + } + + case "remote_cluster": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RemoteCluster = o + + case "shard_id": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardId", err) + } + s.ShardId = value + case float64: + f := int(v) + s.ShardId = f + } + + case "successful_read_requests": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SuccessfulReadRequests", err) + } + s.SuccessfulReadRequests = value + case float64: + f := int64(v) + s.SuccessfulReadRequests = f + } + + case "successful_write_requests": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SuccessfulWriteRequests", err) + } + s.SuccessfulWriteRequests = value + case float64: + f := int64(v) + s.SuccessfulWriteRequests = f + } + + case "time_since_last_read": + if err := dec.Decode(&s.TimeSinceLastRead); err != nil { + return fmt.Errorf("%s | %w", "TimeSinceLastRead", err) + } + + case "time_since_last_read_millis": + if err := dec.Decode(&s.TimeSinceLastReadMillis); err != nil { + return fmt.Errorf("%s | %w", "TimeSinceLastReadMillis", err) + } + + case "total_read_remote_exec_time": + if err := dec.Decode(&s.TotalReadRemoteExecTime); err != nil { + return fmt.Errorf("%s | %w", "TotalReadRemoteExecTime", err) + } + + case "total_read_remote_exec_time_millis": + if err := dec.Decode(&s.TotalReadRemoteExecTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalReadRemoteExecTimeMillis", err) + } + + case "total_read_time": + if err := dec.Decode(&s.TotalReadTime); err != nil { + return fmt.Errorf("%s | %w", "TotalReadTime", err) + } + + case "total_read_time_millis": + if err := dec.Decode(&s.TotalReadTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalReadTimeMillis", err) + } + + case "total_write_time": + if err := dec.Decode(&s.TotalWriteTime); err != nil { + return fmt.Errorf("%s | %w", "TotalWriteTime", err) + } + + case "total_write_time_millis": + if err := dec.Decode(&s.TotalWriteTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalWriteTimeMillis", err) + } + + case "write_buffer_operation_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "WriteBufferOperationCount", err) + } + s.WriteBufferOperationCount = value + case float64: + f := int64(v) + s.WriteBufferOperationCount = f + } + + case "write_buffer_size_in_bytes": + if err := dec.Decode(&s.WriteBufferSizeInBytes); err != nil { + return fmt.Errorf("%s | %w", "WriteBufferSizeInBytes", err) + } + + } + } + return nil +} + +// NewCcrShardStats returns a CcrShardStats. +func NewCcrShardStats() *CcrShardStats { + r := &CcrShardStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccsstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccsstats.go new file mode 100644 index 000000000..a8db81083 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccsstats.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// CCSStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L769-L784 +type CCSStats struct { + // Clusters Contains remote cluster settings and metrics collected from them. + // The keys are cluster names, and the values are per-cluster data. + // Only present if `include_remotes` option is set to true. + Clusters map[string]RemoteClusterInfo `json:"clusters,omitempty"` + // Esql_ Information about ES|QL cross-cluster query usage. + Esql_ *CCSUsageStats `json:"_esql,omitempty"` + // Search_ Information about cross-cluster search usage. + Search_ CCSUsageStats `json:"_search"` +} + +// NewCCSStats returns a CCSStats. +func NewCCSStats() *CCSStats { + r := &CCSStats{ + Clusters: make(map[string]RemoteClusterInfo), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccsusageclusterstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccsusageclusterstats.go new file mode 100644 index 000000000..1a825f1e4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccsusageclusterstats.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CCSUsageClusterStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L855-L862 +type CCSUsageClusterStats struct { + // Skipped The total number of cross-cluster search requests for which this cluster was + // skipped. + Skipped int `json:"skipped"` + // Took Statistics about the time taken to execute requests against this cluster. + Took CCSUsageTimeValue `json:"took"` + // Total The total number of successful (not skipped) cross-cluster search requests + // that were executed against this cluster. This may include requests where + // partial results were returned, but not requests in which the cluster has been + // skipped entirely. + Total int `json:"total"` +} + +func (s *CCSUsageClusterStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "skipped": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Skipped", err) + } + s.Skipped = value + case float64: + f := int(v) + s.Skipped = f + } + + case "took": + if err := dec.Decode(&s.Took); err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + + case "total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int(v) + s.Total = f + } + + } + } + return nil +} + +// NewCCSUsageClusterStats returns a CCSUsageClusterStats. +func NewCCSUsageClusterStats() *CCSUsageClusterStats { + r := &CCSUsageClusterStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccsusagestats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccsusagestats.go new file mode 100644 index 000000000..69b0a2997 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccsusagestats.go @@ -0,0 +1,236 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CCSUsageStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L819-L844 +type CCSUsageStats struct { + // Clients Statistics about the clients that executed cross-cluster search requests. The + // keys are the names of the clients, and the values are the number of requests + // that were executed by that client. Only known clients (such as `kibana` or + // `elasticsearch`) are counted. + Clients map[string]int `json:"clients"` + // Clusters Statistics about the clusters that were queried in cross-cluster search + // requests. The keys are cluster names, and the values are per-cluster + // telemetry data. This also includes the local cluster itself, which uses the + // name `(local)`. + Clusters map[string]CCSUsageClusterStats `json:"clusters"` + // FailureReasons Statistics about the reasons for cross-cluster search request failures. The + // keys are the failure reason names and the values are the number of requests + // that failed for that reason. + FailureReasons map[string]int `json:"failure_reasons"` + // Features The keys are the names of the search feature, and the values are the number + // of requests that used that feature. Single request can use more than one + // feature (e.g. both `async` and `wildcard`). + Features map[string]int `json:"features"` + // RemotesPerSearchAvg The average number of remote clusters that were queried in a single + // cross-cluster search request. + RemotesPerSearchAvg Float64 `json:"remotes_per_search_avg"` + // RemotesPerSearchMax The maximum number of remote clusters that were queried in a single + // cross-cluster search request. + RemotesPerSearchMax int `json:"remotes_per_search_max"` + // Skipped The total number of cross-cluster search requests (successful or failed) that + // had at least one remote cluster skipped. + Skipped int `json:"skipped"` + // Success The total number of cross-cluster search requests that have been successfully + // executed by the cluster. + Success int `json:"success"` + // Took Statistics about the time taken to execute cross-cluster search requests. + Took CCSUsageTimeValue `json:"took"` + // TookMrtFalse Statistics about the time taken to execute cross-cluster search requests for + // which the `ccs_minimize_roundtrips` setting was set to `false`. + TookMrtFalse *CCSUsageTimeValue `json:"took_mrt_false,omitempty"` + // TookMrtTrue Statistics about the time taken to execute cross-cluster search requests for + // which the `ccs_minimize_roundtrips` setting was set to `true`. + TookMrtTrue *CCSUsageTimeValue `json:"took_mrt_true,omitempty"` + // Total The total number of cross-cluster search requests that have been executed by + // the cluster. + Total int `json:"total"` +} + +func (s *CCSUsageStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "clients": + if s.Clients == nil { + s.Clients = make(map[string]int, 0) + } + if err := dec.Decode(&s.Clients); err != nil { + return fmt.Errorf("%s | %w", "Clients", err) + } + + case "clusters": + if s.Clusters == nil { + s.Clusters = make(map[string]CCSUsageClusterStats, 0) + } + if err := dec.Decode(&s.Clusters); err != nil { + return fmt.Errorf("%s | %w", "Clusters", err) + } + + case "failure_reasons": + if s.FailureReasons == nil { + s.FailureReasons = make(map[string]int, 0) + } + if err := dec.Decode(&s.FailureReasons); err != nil { + return fmt.Errorf("%s | %w", "FailureReasons", err) + } + + case "features": + if s.Features == nil { + s.Features = make(map[string]int, 0) + } + if err := dec.Decode(&s.Features); err != nil { + return fmt.Errorf("%s | %w", "Features", err) + } + + case "remotes_per_search_avg": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RemotesPerSearchAvg", err) + } + f := Float64(value) + s.RemotesPerSearchAvg = f + case float64: + f := Float64(v) + s.RemotesPerSearchAvg = f + } + + case "remotes_per_search_max": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RemotesPerSearchMax", err) + } + s.RemotesPerSearchMax = value + case float64: + f := int(v) + s.RemotesPerSearchMax = f + } + + case "skipped": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Skipped", err) + } + s.Skipped = value + case float64: + f := int(v) + s.Skipped = f + } + + case "success": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Success", err) + } + s.Success = value + case float64: + f := int(v) + s.Success = f + } + + case "took": + if err := dec.Decode(&s.Took); err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + + case "took_mrt_false": + if err := dec.Decode(&s.TookMrtFalse); err != nil { + return fmt.Errorf("%s | %w", "TookMrtFalse", err) + } + + case "took_mrt_true": + if err := dec.Decode(&s.TookMrtTrue); err != nil { + return fmt.Errorf("%s | %w", "TookMrtTrue", err) + } + + case "total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int(v) + s.Total = f + } + + } + } + return nil +} + +// NewCCSUsageStats returns a CCSUsageStats. +func NewCCSUsageStats() *CCSUsageStats { + r := &CCSUsageStats{ + Clients: make(map[string]int), + Clusters: make(map[string]CCSUsageClusterStats), + FailureReasons: make(map[string]int), + Features: make(map[string]int), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccsusagetimevalue.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccsusagetimevalue.go new file mode 100644 index 000000000..f9f8eead4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ccsusagetimevalue.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// CCSUsageTimeValue type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L846-L853 +type CCSUsageTimeValue struct { + // Avg The average time taken to execute a request, in milliseconds. + Avg int64 `json:"avg"` + // Max The maximum time taken to execute a request, in milliseconds. + Max int64 `json:"max"` + // P90 The 90th percentile of the time taken to execute requests, in milliseconds. + P90 int64 `json:"p90"` +} + +func (s *CCSUsageTimeValue) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + if err := dec.Decode(&s.Avg); err != nil { + return fmt.Errorf("%s | %w", "Avg", err) + } + + case "max": + if err := dec.Decode(&s.Max); err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + + case "p90": + if err := dec.Decode(&s.P90); err != nil { + return fmt.Errorf("%s | %w", "P90", err) + } + + } + } + return nil +} + +// NewCCSUsageTimeValue returns a CCSUsageTimeValue. +func NewCCSUsageTimeValue() *CCSUsageTimeValue { + r := &CCSUsageTimeValue{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/certificateinformation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/certificateinformation.go new file mode 100644 index 000000000..99665f1d8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/certificateinformation.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CertificateInformation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ssl/certificates/types.ts#L22-L57 +type CertificateInformation struct { + // Alias If the path refers to a container file (a jks keystore, or a PKCS#12 file), + // it is the alias of the certificate. + // Otherwise, it is null. + Alias *string `json:"alias,omitempty"` + // Expiry The ISO formatted date of the certificate's expiry (not-after) date. + Expiry DateTime `json:"expiry"` + // Format The format of the file. + // Valid values include `jks`, `PKCS12`, and `PEM`. + Format string `json:"format"` + // HasPrivateKey Indicates whether Elasticsearch has access to the private key for this + // certificate. + HasPrivateKey bool `json:"has_private_key"` + // Issuer The Distinguished Name of the certificate's issuer. + Issuer *string `json:"issuer,omitempty"` + // Path The path to the certificate, as configured in the `elasticsearch.yml` file. + Path string `json:"path"` + // SerialNumber The hexadecimal representation of the certificate's serial number. + SerialNumber string `json:"serial_number"` + // SubjectDn The Distinguished Name of the certificate's subject. + SubjectDn string `json:"subject_dn"` +} + +func (s *CertificateInformation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alias": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Alias", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Alias = &o + + case "expiry": + if err := dec.Decode(&s.Expiry); err != nil { + return fmt.Errorf("%s | %w", "Expiry", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = o + + case "has_private_key": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "HasPrivateKey", err) + } + s.HasPrivateKey = value + case bool: + s.HasPrivateKey = v + } + + case "issuer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Issuer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Issuer = &o + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = o + + case "serial_number": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SerialNumber", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SerialNumber = o + + case "subject_dn": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SubjectDn", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SubjectDn = o + + } + } + return nil +} + +// NewCertificateInformation returns a CertificateInformation. +func NewCertificateInformation() *CertificateInformation { + r := &CertificateInformation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cgroup.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cgroup.go new file mode 100644 index 000000000..b1cfc8637 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cgroup.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Cgroup type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L499-L512 +type Cgroup struct { + // Cpu Contains statistics about `cpu` control group for the node. + Cpu *CgroupCpu `json:"cpu,omitempty"` + // Cpuacct Contains statistics about `cpuacct` control group for the node. + Cpuacct *CpuAcct `json:"cpuacct,omitempty"` + // Memory Contains statistics about the memory control group for the node. + Memory *CgroupMemory `json:"memory,omitempty"` +} + +// NewCgroup returns a Cgroup. +func NewCgroup() *Cgroup { + r := &Cgroup{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cgroupcpu.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cgroupcpu.go new file mode 100644 index 000000000..0d4b26184 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cgroupcpu.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CgroupCpu type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L525-L542 +type CgroupCpu struct { + // CfsPeriodMicros The period of time, in microseconds, for how regularly all tasks in the same + // cgroup as the Elasticsearch process should have their access to CPU resources + // reallocated. + CfsPeriodMicros *int `json:"cfs_period_micros,omitempty"` + // CfsQuotaMicros The total amount of time, in microseconds, for which all tasks in the same + // cgroup as the Elasticsearch process can run during one period + // `cfs_period_micros`. + CfsQuotaMicros *int `json:"cfs_quota_micros,omitempty"` + // ControlGroup The `cpu` control group to which the Elasticsearch process belongs. + ControlGroup *string `json:"control_group,omitempty"` + // Stat Contains CPU statistics for the node. + Stat *CgroupCpuStat `json:"stat,omitempty"` +} + +func (s *CgroupCpu) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cfs_period_micros": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CfsPeriodMicros", err) + } + s.CfsPeriodMicros = &value + case float64: + f := int(v) + s.CfsPeriodMicros = &f + } + + case "cfs_quota_micros": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CfsQuotaMicros", err) + } + s.CfsQuotaMicros = &value + case float64: + f := int(v) + s.CfsQuotaMicros = &f + } + + case "control_group": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ControlGroup", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ControlGroup = &o + + case "stat": + if err := dec.Decode(&s.Stat); err != nil { + return fmt.Errorf("%s | %w", "Stat", err) + } + + } + } + return nil +} + +// NewCgroupCpu returns a CgroupCpu. +func NewCgroupCpu() *CgroupCpu { + r := &CgroupCpu{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cgroupcpustat.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cgroupcpustat.go new file mode 100644 index 000000000..685659f75 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cgroupcpustat.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CgroupCpuStat type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L544-L557 +type CgroupCpuStat struct { + // NumberOfElapsedPeriods The number of reporting periods (as specified by `cfs_period_micros`) that + // have elapsed. + NumberOfElapsedPeriods *int64 `json:"number_of_elapsed_periods,omitempty"` + // NumberOfTimesThrottled The number of times all tasks in the same cgroup as the Elasticsearch process + // have been throttled. + NumberOfTimesThrottled *int64 `json:"number_of_times_throttled,omitempty"` + // TimeThrottledNanos The total amount of time, in nanoseconds, for which all tasks in the same + // cgroup as the Elasticsearch process have been throttled. + TimeThrottledNanos *int64 `json:"time_throttled_nanos,omitempty"` +} + +func (s *CgroupCpuStat) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "number_of_elapsed_periods": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfElapsedPeriods", err) + } + s.NumberOfElapsedPeriods = &value + case float64: + f := int64(v) + s.NumberOfElapsedPeriods = &f + } + + case "number_of_times_throttled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfTimesThrottled", err) + } + s.NumberOfTimesThrottled = &value + case float64: + f := int64(v) + s.NumberOfTimesThrottled = &f + } + + case "time_throttled_nanos": + if err := dec.Decode(&s.TimeThrottledNanos); err != nil { + return fmt.Errorf("%s | %w", "TimeThrottledNanos", err) + } + + } + } + return nil +} + +// NewCgroupCpuStat returns a CgroupCpuStat. +func NewCgroupCpuStat() *CgroupCpuStat { + r := &CgroupCpuStat{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cgroupmemory.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cgroupmemory.go new file mode 100644 index 000000000..8595571f9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cgroupmemory.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CgroupMemory type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L559-L575 +type CgroupMemory struct { + // ControlGroup The `memory` control group to which the Elasticsearch process belongs. + ControlGroup *string `json:"control_group,omitempty"` + // LimitInBytes The maximum amount of user memory (including file cache) allowed for all + // tasks in the same cgroup as the Elasticsearch process. + // This value can be too big to store in a `long`, so is returned as a string so + // that the value returned can exactly match what the underlying operating + // system interface returns. + // Any value that is too large to parse into a `long` almost certainly means no + // limit has been set for the cgroup. + LimitInBytes *string `json:"limit_in_bytes,omitempty"` + // UsageInBytes The total current memory usage by processes in the cgroup, in bytes, by all + // tasks in the same cgroup as the Elasticsearch process. + // This value is stored as a string for consistency with `limit_in_bytes`. + UsageInBytes *string `json:"usage_in_bytes,omitempty"` +} + +func (s *CgroupMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "control_group": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ControlGroup", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ControlGroup = &o + + case "limit_in_bytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LimitInBytes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LimitInBytes = &o + + case "usage_in_bytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UsageInBytes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UsageInBytes = &o + + } + } + return nil +} + +// NewCgroupMemory returns a CgroupMemory. +func NewCgroupMemory() *CgroupMemory { + r := &CgroupMemory{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chaininput.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chaininput.go new file mode 100644 index 000000000..34bd7725f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chaininput.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ChainInput type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L35-L37 +type ChainInput struct { + Inputs []map[string]WatcherInput `json:"inputs"` +} + +// NewChainInput returns a ChainInput. +func NewChainInput() *ChainInput { + r := &ChainInput{} + + return r +} + +type ChainInputVariant interface { + ChainInputCaster() *ChainInput +} + +func (s *ChainInput) ChainInputCaster() *ChainInput { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/charfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/charfilter.go new file mode 100644 index 000000000..d31916a7c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/charfilter.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// CharFilter holds the union for the following types: +// +// string +// CharFilterDefinition +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/char_filters.ts#L28-L33 +type CharFilter any + +type CharFilterVariant interface { + CharFilterCaster() *CharFilter +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/charfilterdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/charfilterdefinition.go new file mode 100644 index 000000000..acbc4bbc5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/charfilterdefinition.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// CharFilterDefinition holds the union for the following types: +// +// HtmlStripCharFilter +// MappingCharFilter +// PatternReplaceCharFilter +// IcuNormalizationCharFilter +// KuromojiIterationMarkCharFilter +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/char_filters.ts#L35-L44 +type CharFilterDefinition any + +type CharFilterDefinitionVariant interface { + CharFilterDefinitionCaster() *CharFilterDefinition +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/charfilterdetail.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/charfilterdetail.go new file mode 100644 index 000000000..843f9f650 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/charfilterdetail.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CharFilterDetail type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/analyze/types.ts#L46-L49 +type CharFilterDetail struct { + FilteredText []string `json:"filtered_text"` + Name string `json:"name"` +} + +func (s *CharFilterDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filtered_text": + if err := dec.Decode(&s.FilteredText); err != nil { + return fmt.Errorf("%s | %w", "FilteredText", err) + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + } + } + return nil +} + +// NewCharFilterDetail returns a CharFilterDetail. +func NewCharFilterDetail() *CharFilterDetail { + r := &CharFilterDetail{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/charfiltertypes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/charfiltertypes.go new file mode 100644 index 000000000..1db27d6bc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/charfiltertypes.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// CharFilterTypes type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L315-L352 +type CharFilterTypes struct { + // AnalyzerTypes Contains statistics about analyzer types used in selected nodes. + AnalyzerTypes []FieldTypes `json:"analyzer_types"` + // BuiltInAnalyzers Contains statistics about built-in analyzers used in selected nodes. + BuiltInAnalyzers []FieldTypes `json:"built_in_analyzers"` + // BuiltInCharFilters Contains statistics about built-in character filters used in selected nodes. + BuiltInCharFilters []FieldTypes `json:"built_in_char_filters"` + // BuiltInFilters Contains statistics about built-in token filters used in selected nodes. + BuiltInFilters []FieldTypes `json:"built_in_filters"` + // BuiltInTokenizers Contains statistics about built-in tokenizers used in selected nodes. + BuiltInTokenizers []FieldTypes `json:"built_in_tokenizers"` + // CharFilterTypes Contains statistics about character filter types used in selected nodes. + CharFilterTypes []FieldTypes `json:"char_filter_types"` + // FilterTypes Contains statistics about token filter types used in selected nodes. + FilterTypes []FieldTypes `json:"filter_types"` + // Synonyms Contains statistics about synonyms types used in selected nodes. + Synonyms map[string]SynonymsStats `json:"synonyms"` + // TokenizerTypes Contains statistics about tokenizer types used in selected nodes. + TokenizerTypes []FieldTypes `json:"tokenizer_types"` +} + +// NewCharFilterTypes returns a CharFilterTypes. +func NewCharFilterTypes() *CharFilterTypes { + r := &CharFilterTypes{ + Synonyms: make(map[string]SynonymsStats), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chargrouptokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chargrouptokenizer.go new file mode 100644 index 000000000..5d5fd9ff0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chargrouptokenizer.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CharGroupTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L31-L38 +type CharGroupTokenizer struct { + MaxTokenLength *int `json:"max_token_length,omitempty"` + TokenizeOnChars []string `json:"tokenize_on_chars"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *CharGroupTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_token_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTokenLength", err) + } + s.MaxTokenLength = &value + case float64: + f := int(v) + s.MaxTokenLength = &f + } + + case "tokenize_on_chars": + if err := dec.Decode(&s.TokenizeOnChars); err != nil { + return fmt.Errorf("%s | %w", "TokenizeOnChars", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CharGroupTokenizer) MarshalJSON() ([]byte, error) { + type innerCharGroupTokenizer CharGroupTokenizer + tmp := innerCharGroupTokenizer{ + MaxTokenLength: s.MaxTokenLength, + TokenizeOnChars: s.TokenizeOnChars, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "char_group" + + return json.Marshal(tmp) +} + +// NewCharGroupTokenizer returns a CharGroupTokenizer. +func NewCharGroupTokenizer() *CharGroupTokenizer { + r := &CharGroupTokenizer{} + + return r +} + +type CharGroupTokenizerVariant interface { + CharGroupTokenizerCaster() *CharGroupTokenizer +} + +func (s *CharGroupTokenizer) CharGroupTokenizerCaster() *CharGroupTokenizer { + return s +} + +func (s *CharGroupTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/checkpointing.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/checkpointing.go new file mode 100644 index 000000000..2210c85bf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/checkpointing.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Checkpointing type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/get_transform_stats/types.ts#L102-L110 +type Checkpointing struct { + ChangesLastDetectedAt *int64 `json:"changes_last_detected_at,omitempty"` + ChangesLastDetectedAtString DateTime `json:"changes_last_detected_at_string,omitempty"` + Last CheckpointStats `json:"last"` + LastSearchTime *int64 `json:"last_search_time,omitempty"` + LastSearchTimeString DateTime `json:"last_search_time_string,omitempty"` + Next *CheckpointStats `json:"next,omitempty"` + OperationsBehind *int64 `json:"operations_behind,omitempty"` +} + +func (s *Checkpointing) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "changes_last_detected_at": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ChangesLastDetectedAt", err) + } + s.ChangesLastDetectedAt = &value + case float64: + f := int64(v) + s.ChangesLastDetectedAt = &f + } + + case "changes_last_detected_at_string": + if err := dec.Decode(&s.ChangesLastDetectedAtString); err != nil { + return fmt.Errorf("%s | %w", "ChangesLastDetectedAtString", err) + } + + case "last": + if err := dec.Decode(&s.Last); err != nil { + return fmt.Errorf("%s | %w", "Last", err) + } + + case "last_search_time": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LastSearchTime", err) + } + s.LastSearchTime = &value + case float64: + f := int64(v) + s.LastSearchTime = &f + } + + case "last_search_time_string": + if err := dec.Decode(&s.LastSearchTimeString); err != nil { + return fmt.Errorf("%s | %w", "LastSearchTimeString", err) + } + + case "next": + if err := dec.Decode(&s.Next); err != nil { + return fmt.Errorf("%s | %w", "Next", err) + } + + case "operations_behind": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "OperationsBehind", err) + } + s.OperationsBehind = &value + case float64: + f := int64(v) + s.OperationsBehind = &f + } + + } + } + return nil +} + +// NewCheckpointing returns a Checkpointing. +func NewCheckpointing() *Checkpointing { + r := &Checkpointing{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/checkpointstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/checkpointstats.go new file mode 100644 index 000000000..380dddc5b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/checkpointstats.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CheckpointStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/get_transform_stats/types.ts#L93-L100 +type CheckpointStats struct { + Checkpoint int64 `json:"checkpoint"` + CheckpointProgress *TransformProgress `json:"checkpoint_progress,omitempty"` + TimeUpperBound DateTime `json:"time_upper_bound,omitempty"` + TimeUpperBoundMillis *int64 `json:"time_upper_bound_millis,omitempty"` + Timestamp DateTime `json:"timestamp,omitempty"` + TimestampMillis *int64 `json:"timestamp_millis,omitempty"` +} + +func (s *CheckpointStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "checkpoint": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Checkpoint", err) + } + s.Checkpoint = value + case float64: + f := int64(v) + s.Checkpoint = f + } + + case "checkpoint_progress": + if err := dec.Decode(&s.CheckpointProgress); err != nil { + return fmt.Errorf("%s | %w", "CheckpointProgress", err) + } + + case "time_upper_bound": + if err := dec.Decode(&s.TimeUpperBound); err != nil { + return fmt.Errorf("%s | %w", "TimeUpperBound", err) + } + + case "time_upper_bound_millis": + if err := dec.Decode(&s.TimeUpperBoundMillis); err != nil { + return fmt.Errorf("%s | %w", "TimeUpperBoundMillis", err) + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + case "timestamp_millis": + if err := dec.Decode(&s.TimestampMillis); err != nil { + return fmt.Errorf("%s | %w", "TimestampMillis", err) + } + + } + } + return nil +} + +// NewCheckpointStats returns a CheckpointStats. +func NewCheckpointStats() *CheckpointStats { + r := &CheckpointStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/childrenaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/childrenaggregate.go new file mode 100644 index 000000000..f9c43770f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/childrenaggregate.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// ChildrenAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L890-L894 +type ChildrenAggregate struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *ChildrenAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s ChildrenAggregate) MarshalJSON() ([]byte, error) { + type opt ChildrenAggregate + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewChildrenAggregate returns a ChildrenAggregate. +func NewChildrenAggregate() *ChildrenAggregate { + r := &ChildrenAggregate{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/childrenaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/childrenaggregation.go new file mode 100644 index 000000000..23dc8628c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/childrenaggregation.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ChildrenAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L121-L126 +type ChildrenAggregation struct { + // Type The child type that should be selected. + Type *string `json:"type,omitempty"` +} + +func (s *ChildrenAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewChildrenAggregation returns a ChildrenAggregation. +func NewChildrenAggregation() *ChildrenAggregation { + r := &ChildrenAggregation{} + + return r +} + +type ChildrenAggregationVariant interface { + ChildrenAggregationCaster() *ChildrenAggregation +} + +func (s *ChildrenAggregation) ChildrenAggregationCaster() *ChildrenAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chineseanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chineseanalyzer.go new file mode 100644 index 000000000..d9485a6c6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chineseanalyzer.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ChineseAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L120-L124 +type ChineseAnalyzer struct { + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *ChineseAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ChineseAnalyzer) MarshalJSON() ([]byte, error) { + type innerChineseAnalyzer ChineseAnalyzer + tmp := innerChineseAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "chinese" + + return json.Marshal(tmp) +} + +// NewChineseAnalyzer returns a ChineseAnalyzer. +func NewChineseAnalyzer() *ChineseAnalyzer { + r := &ChineseAnalyzer{} + + return r +} + +type ChineseAnalyzerVariant interface { + ChineseAnalyzerCaster() *ChineseAnalyzer +} + +func (s *ChineseAnalyzer) ChineseAnalyzerCaster() *ChineseAnalyzer { + return s +} + +func (s *ChineseAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chisquareheuristic.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chisquareheuristic.go new file mode 100644 index 000000000..8d412cf0e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chisquareheuristic.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ChiSquareHeuristic type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L782-L791 +type ChiSquareHeuristic struct { + // BackgroundIsSuperset Set to `false` if you defined a custom background filter that represents a + // different set of documents that you want to compare to. + BackgroundIsSuperset bool `json:"background_is_superset"` + // IncludeNegatives Set to `false` to filter out the terms that appear less often in the subset + // than in documents outside the subset. + IncludeNegatives bool `json:"include_negatives"` +} + +func (s *ChiSquareHeuristic) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "background_is_superset": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "BackgroundIsSuperset", err) + } + s.BackgroundIsSuperset = value + case bool: + s.BackgroundIsSuperset = v + } + + case "include_negatives": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncludeNegatives", err) + } + s.IncludeNegatives = value + case bool: + s.IncludeNegatives = v + } + + } + } + return nil +} + +// NewChiSquareHeuristic returns a ChiSquareHeuristic. +func NewChiSquareHeuristic() *ChiSquareHeuristic { + r := &ChiSquareHeuristic{} + + return r +} + +type ChiSquareHeuristicVariant interface { + ChiSquareHeuristicCaster() *ChiSquareHeuristic +} + +func (s *ChiSquareHeuristic) ChiSquareHeuristicCaster() *ChiSquareHeuristic { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chunkingconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chunkingconfig.go new file mode 100644 index 000000000..fa2caace4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chunkingconfig.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/chunkingmode" +) + +// ChunkingConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Datafeed.ts#L251-L264 +type ChunkingConfig struct { + // Mode If the mode is `auto`, the chunk size is dynamically calculated; + // this is the recommended value when the datafeed does not use aggregations. + // If the mode is `manual`, chunking is applied according to the specified + // `time_span`; + // use this mode when the datafeed uses aggregations. If the mode is `off`, no + // chunking is applied. + Mode chunkingmode.ChunkingMode `json:"mode"` + // TimeSpan The time span that each search will be querying. This setting is applicable + // only when the `mode` is set to `manual`. + TimeSpan Duration `json:"time_span,omitempty"` +} + +func (s *ChunkingConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + + case "time_span": + if err := dec.Decode(&s.TimeSpan); err != nil { + return fmt.Errorf("%s | %w", "TimeSpan", err) + } + + } + } + return nil +} + +// NewChunkingConfig returns a ChunkingConfig. +func NewChunkingConfig() *ChunkingConfig { + r := &ChunkingConfig{} + + return r +} + +type ChunkingConfigVariant interface { + ChunkingConfigCaster() *ChunkingConfig +} + +func (s *ChunkingConfig) ChunkingConfigCaster() *ChunkingConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chunkingsettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chunkingsettings.go new file mode 100644 index 000000000..9d552ccec --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/chunkingsettings.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ChunkingSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/ChunkingSettings.ts#L24-L32 +type ChunkingSettings struct { + // MaxChunkSize The maximum size of a chunk in words. + // This value cannot be higher than `300` or lower than `20` (for `sentence` + // strategy) or `10` (for `word` strategy). + MaxChunkSize int `json:"max_chunk_size"` + // Overlap The number of overlapping words for chunks. + // It is applicable only to a `word` chunking strategy. + // This value cannot be higher than half the `max_chunk_size` value. + Overlap *int `json:"overlap,omitempty"` + // SentenceOverlap The number of overlapping sentences for chunks. + // It is applicable only for a `sentence` chunking strategy. + // It can be either `1` or `0`. + SentenceOverlap *int `json:"sentence_overlap,omitempty"` + // Strategy The chunking strategy: `sentence` or `word`. + Strategy string `json:"strategy"` +} + +func (s *ChunkingSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_chunk_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxChunkSize", err) + } + s.MaxChunkSize = value + case float64: + f := int(v) + s.MaxChunkSize = f + } + + case "overlap": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Overlap", err) + } + s.Overlap = &value + case float64: + f := int(v) + s.Overlap = &f + } + + case "sentence_overlap": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SentenceOverlap", err) + } + s.SentenceOverlap = &value + case float64: + f := int(v) + s.SentenceOverlap = &f + } + + case "strategy": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Strategy", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Strategy = o + + } + } + return nil +} + +// NewChunkingSettings returns a ChunkingSettings. +func NewChunkingSettings() *ChunkingSettings { + r := &ChunkingSettings{} + + return r +} + +type ChunkingSettingsVariant interface { + ChunkingSettingsCaster() *ChunkingSettings +} + +func (s *ChunkingSettings) ChunkingSettingsCaster() *ChunkingSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/circleprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/circleprocessor.go new file mode 100644 index 000000000..d7db6dae9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/circleprocessor.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shapetype" +) + +// CircleProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L574-L597 +type CircleProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // ErrorDistance The difference between the resulting inscribed distance from center to side + // and the circle’s radius (measured in meters for `geo_shape`, unit-less for + // `shape`). + ErrorDistance Float64 `json:"error_distance"` + // Field The field to interpret as a circle. Either a string in WKT format or a map + // for GeoJSON. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // ShapeType Which field mapping type is to be used when processing the circle: + // `geo_shape` or `shape`. + ShapeType shapetype.ShapeType `json:"shape_type"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the polygon shape to + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *CircleProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "error_distance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ErrorDistance", err) + } + f := Float64(value) + s.ErrorDistance = f + case float64: + f := Float64(v) + s.ErrorDistance = f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "shape_type": + if err := dec.Decode(&s.ShapeType); err != nil { + return fmt.Errorf("%s | %w", "ShapeType", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewCircleProcessor returns a CircleProcessor. +func NewCircleProcessor() *CircleProcessor { + r := &CircleProcessor{} + + return r +} + +type CircleProcessorVariant interface { + CircleProcessorCaster() *CircleProcessor +} + +func (s *CircleProcessor) CircleProcessorCaster() *CircleProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cjkanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cjkanalyzer.go new file mode 100644 index 000000000..adcb95f32 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cjkanalyzer.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CjkAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L126-L130 +type CjkAnalyzer struct { + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *CjkAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CjkAnalyzer) MarshalJSON() ([]byte, error) { + type innerCjkAnalyzer CjkAnalyzer + tmp := innerCjkAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "cjk" + + return json.Marshal(tmp) +} + +// NewCjkAnalyzer returns a CjkAnalyzer. +func NewCjkAnalyzer() *CjkAnalyzer { + r := &CjkAnalyzer{} + + return r +} + +type CjkAnalyzerVariant interface { + CjkAnalyzerCaster() *CjkAnalyzer +} + +func (s *CjkAnalyzer) CjkAnalyzerCaster() *CjkAnalyzer { + return s +} + +func (s *CjkAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cjkbigramtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cjkbigramtokenfilter.go new file mode 100644 index 000000000..fe4a40b1d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cjkbigramtokenfilter.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cjkbigramignoredscript" +) + +// CjkBigramTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L466-L472 +type CjkBigramTokenFilter struct { + // IgnoredScripts Array of character scripts for which to disable bigrams. + IgnoredScripts []cjkbigramignoredscript.CjkBigramIgnoredScript `json:"ignored_scripts,omitempty"` + // OutputUnigrams If `true`, emit tokens in both bigram and unigram form. If `false`, a CJK + // character is output in unigram form when it has no adjacent characters. + // Defaults to `false`. + OutputUnigrams *bool `json:"output_unigrams,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *CjkBigramTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ignored_scripts": + if err := dec.Decode(&s.IgnoredScripts); err != nil { + return fmt.Errorf("%s | %w", "IgnoredScripts", err) + } + + case "output_unigrams": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "OutputUnigrams", err) + } + s.OutputUnigrams = &value + case bool: + s.OutputUnigrams = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CjkBigramTokenFilter) MarshalJSON() ([]byte, error) { + type innerCjkBigramTokenFilter CjkBigramTokenFilter + tmp := innerCjkBigramTokenFilter{ + IgnoredScripts: s.IgnoredScripts, + OutputUnigrams: s.OutputUnigrams, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "cjk_bigram" + + return json.Marshal(tmp) +} + +// NewCjkBigramTokenFilter returns a CjkBigramTokenFilter. +func NewCjkBigramTokenFilter() *CjkBigramTokenFilter { + r := &CjkBigramTokenFilter{} + + return r +} + +type CjkBigramTokenFilterVariant interface { + CjkBigramTokenFilterCaster() *CjkBigramTokenFilter +} + +func (s *CjkBigramTokenFilter) CjkBigramTokenFilterCaster() *CjkBigramTokenFilter { + return s +} + +func (s *CjkBigramTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cjkwidthtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cjkwidthtokenfilter.go new file mode 100644 index 000000000..d5562cb3c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cjkwidthtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// CjkWidthTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L474-L476 +type CjkWidthTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *CjkWidthTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CjkWidthTokenFilter) MarshalJSON() ([]byte, error) { + type innerCjkWidthTokenFilter CjkWidthTokenFilter + tmp := innerCjkWidthTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "cjk_width" + + return json.Marshal(tmp) +} + +// NewCjkWidthTokenFilter returns a CjkWidthTokenFilter. +func NewCjkWidthTokenFilter() *CjkWidthTokenFilter { + r := &CjkWidthTokenFilter{} + + return r +} + +type CjkWidthTokenFilterVariant interface { + CjkWidthTokenFilterCaster() *CjkWidthTokenFilter +} + +func (s *CjkWidthTokenFilter) CjkWidthTokenFilterCaster() *CjkWidthTokenFilter { + return s +} + +func (s *CjkWidthTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/classictokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/classictokenfilter.go new file mode 100644 index 000000000..18fb74af6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/classictokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ClassicTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L478-L480 +type ClassicTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ClassicTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ClassicTokenFilter) MarshalJSON() ([]byte, error) { + type innerClassicTokenFilter ClassicTokenFilter + tmp := innerClassicTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "classic" + + return json.Marshal(tmp) +} + +// NewClassicTokenFilter returns a ClassicTokenFilter. +func NewClassicTokenFilter() *ClassicTokenFilter { + r := &ClassicTokenFilter{} + + return r +} + +type ClassicTokenFilterVariant interface { + ClassicTokenFilterCaster() *ClassicTokenFilter +} + +func (s *ClassicTokenFilter) ClassicTokenFilterCaster() *ClassicTokenFilter { + return s +} + +func (s *ClassicTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/classictokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/classictokenizer.go new file mode 100644 index 000000000..10fc50c9b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/classictokenizer.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClassicTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L40-L46 +type ClassicTokenizer struct { + MaxTokenLength *int `json:"max_token_length,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ClassicTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_token_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTokenLength", err) + } + s.MaxTokenLength = &value + case float64: + f := int(v) + s.MaxTokenLength = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ClassicTokenizer) MarshalJSON() ([]byte, error) { + type innerClassicTokenizer ClassicTokenizer + tmp := innerClassicTokenizer{ + MaxTokenLength: s.MaxTokenLength, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "classic" + + return json.Marshal(tmp) +} + +// NewClassicTokenizer returns a ClassicTokenizer. +func NewClassicTokenizer() *ClassicTokenizer { + r := &ClassicTokenizer{} + + return r +} + +type ClassicTokenizerVariant interface { + ClassicTokenizerCaster() *ClassicTokenizer +} + +func (s *ClassicTokenizer) ClassicTokenizerCaster() *ClassicTokenizer { + return s +} + +func (s *ClassicTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/classificationinferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/classificationinferenceoptions.go new file mode 100644 index 000000000..50e6f716c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/classificationinferenceoptions.go @@ -0,0 +1,153 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClassificationInferenceOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L118-L133 +type ClassificationInferenceOptions struct { + // NumTopClasses Specifies the number of top class predictions to return. Defaults to 0. + NumTopClasses *int `json:"num_top_classes,omitempty"` + // NumTopFeatureImportanceValues Specifies the maximum number of feature importance values per document. + NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` + // PredictionFieldType Specifies the type of the predicted field to write. Acceptable values are: + // string, number, boolean. When boolean is provided 1.0 is transformed to true + // and 0.0 to false. + PredictionFieldType *string `json:"prediction_field_type,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // TopClassesResultsField Specifies the field to which the top classes are written. Defaults to + // top_classes. + TopClassesResultsField *string `json:"top_classes_results_field,omitempty"` +} + +func (s *ClassificationInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "num_top_classes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopClasses", err) + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "num_top_feature_importance_values": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopFeatureImportanceValues", err) + } + s.NumTopFeatureImportanceValues = &value + case float64: + f := int(v) + s.NumTopFeatureImportanceValues = &f + } + + case "prediction_field_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PredictionFieldType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PredictionFieldType = &o + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "top_classes_results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TopClassesResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TopClassesResultsField = &o + + } + } + return nil +} + +// NewClassificationInferenceOptions returns a ClassificationInferenceOptions. +func NewClassificationInferenceOptions() *ClassificationInferenceOptions { + r := &ClassificationInferenceOptions{} + + return r +} + +type ClassificationInferenceOptionsVariant interface { + ClassificationInferenceOptionsCaster() *ClassificationInferenceOptions +} + +func (s *ClassificationInferenceOptions) ClassificationInferenceOptionsCaster() *ClassificationInferenceOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cleanuprepositoryresults.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cleanuprepositoryresults.go new file mode 100644 index 000000000..ffa386723 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cleanuprepositoryresults.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CleanupRepositoryResults type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L29-L37 +type CleanupRepositoryResults struct { + // DeletedBlobs The number of binary large objects (blobs) removed from the snapshot + // repository during cleanup operations. + // A non-zero value indicates that unreferenced blobs were found and + // subsequently cleaned up. + DeletedBlobs int64 `json:"deleted_blobs"` + // DeletedBytes The number of bytes freed by cleanup operations. + DeletedBytes int64 `json:"deleted_bytes"` +} + +func (s *CleanupRepositoryResults) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deleted_blobs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DeletedBlobs", err) + } + s.DeletedBlobs = value + case float64: + f := int64(v) + s.DeletedBlobs = f + } + + case "deleted_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DeletedBytes", err) + } + s.DeletedBytes = value + case float64: + f := int64(v) + s.DeletedBytes = f + } + + } + } + return nil +} + +// NewCleanupRepositoryResults returns a CleanupRepositoryResults. +func NewCleanupRepositoryResults() *CleanupRepositoryResults { + r := &CleanupRepositoryResults{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/client.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/client.go new file mode 100644 index 000000000..73b61909d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/client.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Client type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L722-L769 +type Client struct { + // Agent Reported agent for the HTTP client. + // If unavailable, this property is not included in the response. + Agent *string `json:"agent,omitempty"` + // ClosedTimeMillis Time at which the client closed the connection if the connection is closed. + ClosedTimeMillis *int64 `json:"closed_time_millis,omitempty"` + // Id Unique ID for the HTTP client. + Id *int64 `json:"id,omitempty"` + // LastRequestTimeMillis Time of the most recent request from this client. + LastRequestTimeMillis *int64 `json:"last_request_time_millis,omitempty"` + // LastUri The URI of the client’s most recent request. + LastUri *string `json:"last_uri,omitempty"` + // LocalAddress Local address for the HTTP connection. + LocalAddress *string `json:"local_address,omitempty"` + // OpenedTimeMillis Time at which the client opened the connection. + OpenedTimeMillis *int64 `json:"opened_time_millis,omitempty"` + // RemoteAddress Remote address for the HTTP connection. + RemoteAddress *string `json:"remote_address,omitempty"` + // RequestCount Number of requests from this client. + RequestCount *int64 `json:"request_count,omitempty"` + // RequestSizeBytes Cumulative size in bytes of all requests from this client. + RequestSizeBytes *int64 `json:"request_size_bytes,omitempty"` + // XOpaqueId Value from the client’s `x-opaque-id` HTTP header. + // If unavailable, this property is not included in the response. + XOpaqueId *string `json:"x_opaque_id,omitempty"` +} + +func (s *Client) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "agent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Agent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Agent = &o + + case "closed_time_millis": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ClosedTimeMillis", err) + } + s.ClosedTimeMillis = &value + case float64: + f := int64(v) + s.ClosedTimeMillis = &f + } + + case "id": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + s.Id = &value + case float64: + f := int64(v) + s.Id = &f + } + + case "last_request_time_millis": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LastRequestTimeMillis", err) + } + s.LastRequestTimeMillis = &value + case float64: + f := int64(v) + s.LastRequestTimeMillis = &f + } + + case "last_uri": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LastUri", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LastUri = &o + + case "local_address": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LocalAddress", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LocalAddress = &o + + case "opened_time_millis": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "OpenedTimeMillis", err) + } + s.OpenedTimeMillis = &value + case float64: + f := int64(v) + s.OpenedTimeMillis = &f + } + + case "remote_address": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RemoteAddress", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RemoteAddress = &o + + case "request_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RequestCount", err) + } + s.RequestCount = &value + case float64: + f := int64(v) + s.RequestCount = &f + } + + case "request_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RequestSizeBytes", err) + } + s.RequestSizeBytes = &value + case float64: + f := int64(v) + s.RequestSizeBytes = &f + } + + case "x_opaque_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "XOpaqueId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.XOpaqueId = &o + + } + } + return nil +} + +// NewClient returns a Client. +func NewClient() *Client { + r := &Client{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/closeindexresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/closeindexresult.go new file mode 100644 index 000000000..55a05654c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/closeindexresult.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CloseIndexResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/close/CloseIndexResponse.ts#L32-L35 +type CloseIndexResult struct { + Closed bool `json:"closed"` + Shards map[string]CloseShardResult `json:"shards,omitempty"` +} + +func (s *CloseIndexResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "closed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Closed", err) + } + s.Closed = value + case bool: + s.Closed = v + } + + case "shards": + if s.Shards == nil { + s.Shards = make(map[string]CloseShardResult, 0) + } + if err := dec.Decode(&s.Shards); err != nil { + return fmt.Errorf("%s | %w", "Shards", err) + } + + } + } + return nil +} + +// NewCloseIndexResult returns a CloseIndexResult. +func NewCloseIndexResult() *CloseIndexResult { + r := &CloseIndexResult{ + Shards: make(map[string]CloseShardResult), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/closeshardresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/closeshardresult.go new file mode 100644 index 000000000..11fd137c0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/closeshardresult.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// CloseShardResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/close/CloseIndexResponse.ts#L37-L39 +type CloseShardResult struct { + Failures []ShardFailure `json:"failures"` +} + +// NewCloseShardResult returns a CloseShardResult. +func NewCloseShardResult() *CloseShardResult { + r := &CloseShardResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterappliedstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterappliedstats.go new file mode 100644 index 000000000..d0ef45764 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterappliedstats.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ClusterAppliedStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L223-L225 +type ClusterAppliedStats struct { + Recordings []Recording `json:"recordings,omitempty"` +} + +// NewClusterAppliedStats returns a ClusterAppliedStats. +func NewClusterAppliedStats() *ClusterAppliedStats { + r := &ClusterAppliedStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clustercomponenttemplate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clustercomponenttemplate.go new file mode 100644 index 000000000..4fffa1379 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clustercomponenttemplate.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ClusterComponentTemplate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/_types/ComponentTemplate.ts#L28-L31 +type ClusterComponentTemplate struct { + ComponentTemplate ComponentTemplateNode `json:"component_template"` + Name string `json:"name"` +} + +func (s *ClusterComponentTemplate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "component_template": + if err := dec.Decode(&s.ComponentTemplate); err != nil { + return fmt.Errorf("%s | %w", "ComponentTemplate", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewClusterComponentTemplate returns a ClusterComponentTemplate. +func NewClusterComponentTemplate() *ClusterComponentTemplate { + r := &ClusterComponentTemplate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterdetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterdetails.go new file mode 100644 index 000000000..8bfcfe410 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterdetails.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clustersearchstatus" +) + +// ClusterDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L45-L52 +type ClusterDetails struct { + Failures []ShardFailure `json:"failures,omitempty"` + Indices string `json:"indices"` + Shards_ *ShardStatistics `json:"_shards,omitempty"` + Status clustersearchstatus.ClusterSearchStatus `json:"status"` + TimedOut bool `json:"timed_out"` + Took *int64 `json:"took,omitempty"` +} + +func (s *ClusterDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "failures": + if err := dec.Decode(&s.Failures); err != nil { + return fmt.Errorf("%s | %w", "Failures", err) + } + + case "indices": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Indices = o + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return fmt.Errorf("%s | %w", "Shards_", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "timed_out": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimedOut", err) + } + s.TimedOut = value + case bool: + s.TimedOut = v + } + + case "took": + if err := dec.Decode(&s.Took); err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + + } + } + return nil +} + +// NewClusterDetails returns a ClusterDetails. +func NewClusterDetails() *ClusterDetails { + r := &ClusterDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterfilesystem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterfilesystem.go new file mode 100644 index 000000000..1cdfcbbac --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterfilesystem.go @@ -0,0 +1,273 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterFileSystem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L35-L75 +type ClusterFileSystem struct { + // Available Total number of bytes available to JVM in file stores across all selected + // nodes. + // Depending on operating system or process-level restrictions, this number may + // be less than `nodes.fs.free_in_byes`. + // This is the actual amount of free disk space the selected Elasticsearch nodes + // can use. + Available ByteSize `json:"available,omitempty"` + // AvailableInBytes Total number of bytes available to JVM in file stores across all selected + // nodes. + // Depending on operating system or process-level restrictions, this number may + // be less than `nodes.fs.free_in_byes`. + // This is the actual amount of free disk space the selected Elasticsearch nodes + // can use. + AvailableInBytes *int64 `json:"available_in_bytes,omitempty"` + FloodStageFreeSpace ByteSize `json:"flood_stage_free_space,omitempty"` + FloodStageFreeSpaceInBytes *int64 `json:"flood_stage_free_space_in_bytes,omitempty"` + // Free Total number of unallocated bytes in file stores across all selected nodes. + Free ByteSize `json:"free,omitempty"` + // FreeInBytes Total number, in bytes, of unallocated bytes in file stores across all + // selected nodes. + FreeInBytes *int64 `json:"free_in_bytes,omitempty"` + FrozenFloodStageFreeSpace ByteSize `json:"frozen_flood_stage_free_space,omitempty"` + FrozenFloodStageFreeSpaceInBytes *int64 `json:"frozen_flood_stage_free_space_in_bytes,omitempty"` + HighWatermarkFreeSpace ByteSize `json:"high_watermark_free_space,omitempty"` + HighWatermarkFreeSpaceInBytes *int64 `json:"high_watermark_free_space_in_bytes,omitempty"` + LowWatermarkFreeSpace ByteSize `json:"low_watermark_free_space,omitempty"` + LowWatermarkFreeSpaceInBytes *int64 `json:"low_watermark_free_space_in_bytes,omitempty"` + Mount *string `json:"mount,omitempty"` + Path *string `json:"path,omitempty"` + // Total Total size of all file stores across all selected nodes. + Total ByteSize `json:"total,omitempty"` + // TotalInBytes Total size, in bytes, of all file stores across all selected nodes. + TotalInBytes *int64 `json:"total_in_bytes,omitempty"` + Type *string `json:"type,omitempty"` +} + +func (s *ClusterFileSystem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + if err := dec.Decode(&s.Available); err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + + case "available_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AvailableInBytes", err) + } + s.AvailableInBytes = &value + case float64: + f := int64(v) + s.AvailableInBytes = &f + } + + case "flood_stage_free_space": + if err := dec.Decode(&s.FloodStageFreeSpace); err != nil { + return fmt.Errorf("%s | %w", "FloodStageFreeSpace", err) + } + + case "flood_stage_free_space_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FloodStageFreeSpaceInBytes", err) + } + s.FloodStageFreeSpaceInBytes = &value + case float64: + f := int64(v) + s.FloodStageFreeSpaceInBytes = &f + } + + case "free": + if err := dec.Decode(&s.Free); err != nil { + return fmt.Errorf("%s | %w", "Free", err) + } + + case "free_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FreeInBytes", err) + } + s.FreeInBytes = &value + case float64: + f := int64(v) + s.FreeInBytes = &f + } + + case "frozen_flood_stage_free_space": + if err := dec.Decode(&s.FrozenFloodStageFreeSpace); err != nil { + return fmt.Errorf("%s | %w", "FrozenFloodStageFreeSpace", err) + } + + case "frozen_flood_stage_free_space_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FrozenFloodStageFreeSpaceInBytes", err) + } + s.FrozenFloodStageFreeSpaceInBytes = &value + case float64: + f := int64(v) + s.FrozenFloodStageFreeSpaceInBytes = &f + } + + case "high_watermark_free_space": + if err := dec.Decode(&s.HighWatermarkFreeSpace); err != nil { + return fmt.Errorf("%s | %w", "HighWatermarkFreeSpace", err) + } + + case "high_watermark_free_space_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "HighWatermarkFreeSpaceInBytes", err) + } + s.HighWatermarkFreeSpaceInBytes = &value + case float64: + f := int64(v) + s.HighWatermarkFreeSpaceInBytes = &f + } + + case "low_watermark_free_space": + if err := dec.Decode(&s.LowWatermarkFreeSpace); err != nil { + return fmt.Errorf("%s | %w", "LowWatermarkFreeSpace", err) + } + + case "low_watermark_free_space_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LowWatermarkFreeSpaceInBytes", err) + } + s.LowWatermarkFreeSpaceInBytes = &value + case float64: + f := int64(v) + s.LowWatermarkFreeSpaceInBytes = &f + } + + case "mount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Mount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Mount = &o + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = &o + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + + case "total_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalInBytes", err) + } + s.TotalInBytes = &value + case float64: + f := int64(v) + s.TotalInBytes = &f + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + } + } + return nil +} + +// NewClusterFileSystem returns a ClusterFileSystem. +func NewClusterFileSystem() *ClusterFileSystem { + r := &ClusterFileSystem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterindexingpressure.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterindexingpressure.go new file mode 100644 index 000000000..f4790da28 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterindexingpressure.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ClusterIndexingPressure type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L765-L767 +type ClusterIndexingPressure struct { + Memory NodesIndexingPressureMemory `json:"memory"` +} + +// NewClusterIndexingPressure returns a ClusterIndexingPressure. +func NewClusterIndexingPressure() *ClusterIndexingPressure { + r := &ClusterIndexingPressure{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterindices.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterindices.go new file mode 100644 index 000000000..9dd2b63ff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterindices.go @@ -0,0 +1,177 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterIndices type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L100-L147 +type ClusterIndices struct { + // Analysis Contains statistics about analyzers and analyzer components used in selected + // nodes. + Analysis *CharFilterTypes `json:"analysis,omitempty"` + // Completion Contains statistics about memory used for completion in selected nodes. + Completion CompletionStats `json:"completion"` + // Count Total number of indices with shards assigned to selected nodes. + Count int64 `json:"count"` + // DenseVector Contains statistics about indexed dense vector + DenseVector DenseVectorStats `json:"dense_vector"` + // Docs Contains counts for documents in selected nodes. + Docs DocStats `json:"docs"` + // Fielddata Contains statistics about the field data cache of selected nodes. + Fielddata FielddataStats `json:"fielddata"` + // Mappings Contains statistics about field mappings in selected nodes. + Mappings *FieldTypesMappings `json:"mappings,omitempty"` + // QueryCache Contains statistics about the query cache of selected nodes. + QueryCache QueryCacheStats `json:"query_cache"` + // Search Holds a snapshot of the search usage statistics. + // Used to hold the stats for a single node that's part of a + // ClusterStatsNodeResponse, as well as to + // accumulate stats for the entire cluster and return them as part of the + // ClusterStatsResponse. + Search SearchUsageStats `json:"search"` + // Segments Contains statistics about segments in selected nodes. + Segments SegmentsStats `json:"segments"` + // Shards Contains statistics about indices with shards assigned to selected nodes. + Shards ClusterIndicesShards `json:"shards"` + // SparseVector Contains statistics about indexed sparse vector + SparseVector SparseVectorStats `json:"sparse_vector"` + // Store Contains statistics about the size of shards assigned to selected nodes. + Store StoreStats `json:"store"` + // Versions Contains statistics about analyzers and analyzer components used in selected + // nodes. + Versions []IndicesVersions `json:"versions,omitempty"` +} + +func (s *ClusterIndices) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analysis": + if err := dec.Decode(&s.Analysis); err != nil { + return fmt.Errorf("%s | %w", "Analysis", err) + } + + case "completion": + if err := dec.Decode(&s.Completion); err != nil { + return fmt.Errorf("%s | %w", "Completion", err) + } + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "dense_vector": + if err := dec.Decode(&s.DenseVector); err != nil { + return fmt.Errorf("%s | %w", "DenseVector", err) + } + + case "docs": + if err := dec.Decode(&s.Docs); err != nil { + return fmt.Errorf("%s | %w", "Docs", err) + } + + case "fielddata": + if err := dec.Decode(&s.Fielddata); err != nil { + return fmt.Errorf("%s | %w", "Fielddata", err) + } + + case "mappings": + if err := dec.Decode(&s.Mappings); err != nil { + return fmt.Errorf("%s | %w", "Mappings", err) + } + + case "query_cache": + if err := dec.Decode(&s.QueryCache); err != nil { + return fmt.Errorf("%s | %w", "QueryCache", err) + } + + case "search": + if err := dec.Decode(&s.Search); err != nil { + return fmt.Errorf("%s | %w", "Search", err) + } + + case "segments": + if err := dec.Decode(&s.Segments); err != nil { + return fmt.Errorf("%s | %w", "Segments", err) + } + + case "shards": + if err := dec.Decode(&s.Shards); err != nil { + return fmt.Errorf("%s | %w", "Shards", err) + } + + case "sparse_vector": + if err := dec.Decode(&s.SparseVector); err != nil { + return fmt.Errorf("%s | %w", "SparseVector", err) + } + + case "store": + if err := dec.Decode(&s.Store); err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + + case "versions": + if err := dec.Decode(&s.Versions); err != nil { + return fmt.Errorf("%s | %w", "Versions", err) + } + + } + } + return nil +} + +// NewClusterIndices returns a ClusterIndices. +func NewClusterIndices() *ClusterIndices { + r := &ClusterIndices{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterindicesshards.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterindicesshards.go new file mode 100644 index 000000000..1fac4b3bb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterindicesshards.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterIndicesShards type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L86-L98 +type ClusterIndicesShards struct { + // Index Contains statistics about shards assigned to selected nodes. + Index *ClusterIndicesShardsIndex `json:"index,omitempty"` + // Primaries Number of primary shards assigned to selected nodes. + Primaries *Float64 `json:"primaries,omitempty"` + // Replication Ratio of replica shards to primary shards across all selected nodes. + Replication *Float64 `json:"replication,omitempty"` + // Total Total number of shards assigned to selected nodes. + Total *Float64 `json:"total,omitempty"` +} + +func (s *ClusterIndicesShards) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "primaries": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Primaries", err) + } + f := Float64(value) + s.Primaries = &f + case float64: + f := Float64(v) + s.Primaries = &f + } + + case "replication": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Replication", err) + } + f := Float64(value) + s.Replication = &f + case float64: + f := Float64(v) + s.Replication = &f + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + f := Float64(value) + s.Total = &f + case float64: + f := Float64(v) + s.Total = &f + } + + } + } + return nil +} + +// NewClusterIndicesShards returns a ClusterIndicesShards. +func NewClusterIndicesShards() *ClusterIndicesShards { + r := &ClusterIndicesShards{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterindicesshardsindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterindicesshardsindex.go new file mode 100644 index 000000000..b14b518f6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterindicesshardsindex.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ClusterIndicesShardsIndex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L77-L84 +type ClusterIndicesShardsIndex struct { + // Primaries Contains statistics about the number of primary shards assigned to selected + // nodes. + Primaries ClusterShardMetrics `json:"primaries"` + // Replication Contains statistics about the number of replication shards assigned to + // selected nodes. + Replication ClusterShardMetrics `json:"replication"` + // Shards Contains statistics about the number of shards assigned to selected nodes. + Shards ClusterShardMetrics `json:"shards"` +} + +// NewClusterIndicesShardsIndex returns a ClusterIndicesShardsIndex. +func NewClusterIndicesShardsIndex() *ClusterIndicesShardsIndex { + r := &ClusterIndicesShardsIndex{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterinfo.go new file mode 100644 index 000000000..25b3f9321 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterinfo.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ClusterInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/allocation_explain/types.ts#L49-L55 +type ClusterInfo struct { + Nodes map[string]NodeDiskUsage `json:"nodes"` + ReservedSizes []ReservedSize `json:"reserved_sizes"` + ShardDataSetSizes map[string]string `json:"shard_data_set_sizes,omitempty"` + ShardPaths map[string]string `json:"shard_paths"` + ShardSizes map[string]int64 `json:"shard_sizes"` +} + +// NewClusterInfo returns a ClusterInfo. +func NewClusterInfo() *ClusterInfo { + r := &ClusterInfo{ + Nodes: make(map[string]NodeDiskUsage), + ShardDataSetSizes: make(map[string]string), + ShardPaths: make(map[string]string), + ShardSizes: make(map[string]int64), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterinfotargets.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterinfotargets.go new file mode 100644 index 000000000..1220a4617 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterinfotargets.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterinfotarget" +) + +// ClusterInfoTargets type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L388-L388 +type ClusterInfoTargets []clusterinfotarget.ClusterInfoTarget diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteringest.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteringest.go new file mode 100644 index 000000000..229ea020c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteringest.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterIngest type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L367-L370 +type ClusterIngest struct { + NumberOfPipelines int `json:"number_of_pipelines"` + ProcessorStats map[string]ClusterProcessor `json:"processor_stats"` +} + +func (s *ClusterIngest) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "number_of_pipelines": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfPipelines", err) + } + s.NumberOfPipelines = value + case float64: + f := int(v) + s.NumberOfPipelines = f + } + + case "processor_stats": + if s.ProcessorStats == nil { + s.ProcessorStats = make(map[string]ClusterProcessor, 0) + } + if err := dec.Decode(&s.ProcessorStats); err != nil { + return fmt.Errorf("%s | %w", "ProcessorStats", err) + } + + } + } + return nil +} + +// NewClusterIngest returns a ClusterIngest. +func NewClusterIngest() *ClusterIngest { + r := &ClusterIngest{ + ProcessorStats: make(map[string]ClusterProcessor), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterjvm.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterjvm.go new file mode 100644 index 000000000..e7a29f3aa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterjvm.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterJvm type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L372-L393 +type ClusterJvm struct { + // MaxUptime Uptime duration since JVM last started. + MaxUptime Duration `json:"max_uptime,omitempty"` + // MaxUptimeInMillis Uptime duration, in milliseconds, since JVM last started. + MaxUptimeInMillis int64 `json:"max_uptime_in_millis"` + // Mem Contains statistics about memory used by selected nodes. + Mem ClusterJvmMemory `json:"mem"` + // Threads Number of active threads in use by JVM across all selected nodes. + Threads int64 `json:"threads"` + // Versions Contains statistics about the JVM versions used by selected nodes. + Versions []ClusterJvmVersion `json:"versions"` +} + +func (s *ClusterJvm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_uptime": + if err := dec.Decode(&s.MaxUptime); err != nil { + return fmt.Errorf("%s | %w", "MaxUptime", err) + } + + case "max_uptime_in_millis": + if err := dec.Decode(&s.MaxUptimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "MaxUptimeInMillis", err) + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return fmt.Errorf("%s | %w", "Mem", err) + } + + case "threads": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Threads", err) + } + s.Threads = value + case float64: + f := int64(v) + s.Threads = f + } + + case "versions": + if err := dec.Decode(&s.Versions); err != nil { + return fmt.Errorf("%s | %w", "Versions", err) + } + + } + } + return nil +} + +// NewClusterJvm returns a ClusterJvm. +func NewClusterJvm() *ClusterJvm { + r := &ClusterJvm{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterjvmmemory.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterjvmmemory.go new file mode 100644 index 000000000..f3f93937e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterjvmmemory.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterJvmMemory type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L395-L412 +type ClusterJvmMemory struct { + // HeapMax Maximum amount of memory available for use by the heap across all selected + // nodes. + HeapMax ByteSize `json:"heap_max,omitempty"` + // HeapMaxInBytes Maximum amount of memory, in bytes, available for use by the heap across all + // selected nodes. + HeapMaxInBytes int64 `json:"heap_max_in_bytes"` + // HeapUsed Memory currently in use by the heap across all selected nodes. + HeapUsed ByteSize `json:"heap_used,omitempty"` + // HeapUsedInBytes Memory, in bytes, currently in use by the heap across all selected nodes. + HeapUsedInBytes int64 `json:"heap_used_in_bytes"` +} + +func (s *ClusterJvmMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "heap_max": + if err := dec.Decode(&s.HeapMax); err != nil { + return fmt.Errorf("%s | %w", "HeapMax", err) + } + + case "heap_max_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "HeapMaxInBytes", err) + } + s.HeapMaxInBytes = value + case float64: + f := int64(v) + s.HeapMaxInBytes = f + } + + case "heap_used": + if err := dec.Decode(&s.HeapUsed); err != nil { + return fmt.Errorf("%s | %w", "HeapUsed", err) + } + + case "heap_used_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "HeapUsedInBytes", err) + } + s.HeapUsedInBytes = value + case float64: + f := int64(v) + s.HeapUsedInBytes = f + } + + } + } + return nil +} + +// NewClusterJvmMemory returns a ClusterJvmMemory. +func NewClusterJvmMemory() *ClusterJvmMemory { + r := &ClusterJvmMemory{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterjvmversion.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterjvmversion.go new file mode 100644 index 000000000..40c3270f2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterjvmversion.go @@ -0,0 +1,158 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterJvmVersion type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L414-L444 +type ClusterJvmVersion struct { + // BundledJdk Always `true`. All distributions come with a bundled Java Development Kit + // (JDK). + BundledJdk bool `json:"bundled_jdk"` + // Count Total number of selected nodes using JVM. + Count int `json:"count"` + // UsingBundledJdk If `true`, a bundled JDK is in use by JVM. + UsingBundledJdk bool `json:"using_bundled_jdk"` + // Version Version of JVM used by one or more selected nodes. + Version string `json:"version"` + // VmName Name of the JVM. + VmName string `json:"vm_name"` + // VmVendor Vendor of the JVM. + VmVendor string `json:"vm_vendor"` + // VmVersion Full version number of JVM. + // The full version number includes a plus sign (+) followed by the build + // number. + VmVersion string `json:"vm_version"` +} + +func (s *ClusterJvmVersion) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bundled_jdk": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "BundledJdk", err) + } + s.BundledJdk = value + case bool: + s.BundledJdk = v + } + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "using_bundled_jdk": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "UsingBundledJdk", err) + } + s.UsingBundledJdk = value + case bool: + s.UsingBundledJdk = v + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "vm_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VmName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VmName = o + + case "vm_vendor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VmVendor", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VmVendor = o + + case "vm_version": + if err := dec.Decode(&s.VmVersion); err != nil { + return fmt.Errorf("%s | %w", "VmVersion", err) + } + + } + } + return nil +} + +// NewClusterJvmVersion returns a ClusterJvmVersion. +func NewClusterJvmVersion() *ClusterJvmVersion { + r := &ClusterJvmVersion{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusternetworktypes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusternetworktypes.go new file mode 100644 index 000000000..59d19d702 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusternetworktypes.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ClusterNetworkTypes type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L446-L455 +type ClusterNetworkTypes struct { + // HttpTypes Contains statistics about the HTTP network types used by selected nodes. + HttpTypes map[string]int `json:"http_types"` + // TransportTypes Contains statistics about the transport network types used by selected nodes. + TransportTypes map[string]int `json:"transport_types"` +} + +// NewClusterNetworkTypes returns a ClusterNetworkTypes. +func NewClusterNetworkTypes() *ClusterNetworkTypes { + r := &ClusterNetworkTypes{ + HttpTypes: make(map[string]int), + TransportTypes: make(map[string]int), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusternode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusternode.go new file mode 100644 index 000000000..8a4838d19 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusternode.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ClusterNode type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/ClusterNode.ts#L22-L24 +type ClusterNode struct { + Name string `json:"name"` +} + +func (s *ClusterNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewClusterNode returns a ClusterNode. +func NewClusterNode() *ClusterNode { + r := &ClusterNode{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusternodecount.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusternodecount.go new file mode 100644 index 000000000..83d0b4651 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusternodecount.go @@ -0,0 +1,335 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterNodeCount type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L457-L478 +type ClusterNodeCount struct { + CoordinatingOnly *int `json:"coordinating_only,omitempty"` + Data *int `json:"data,omitempty"` + DataCold *int `json:"data_cold,omitempty"` + DataContent *int `json:"data_content,omitempty"` + DataFrozen *int `json:"data_frozen,omitempty"` + DataHot *int `json:"data_hot,omitempty"` + DataWarm *int `json:"data_warm,omitempty"` + Index *int `json:"index,omitempty"` + Ingest *int `json:"ingest,omitempty"` + Master *int `json:"master,omitempty"` + Ml *int `json:"ml,omitempty"` + RemoteClusterClient *int `json:"remote_cluster_client,omitempty"` + Search *int `json:"search,omitempty"` + Total int `json:"total"` + Transform *int `json:"transform,omitempty"` + VotingOnly *int `json:"voting_only,omitempty"` +} + +func (s *ClusterNodeCount) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "coordinating_only": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CoordinatingOnly", err) + } + s.CoordinatingOnly = &value + case float64: + f := int(v) + s.CoordinatingOnly = &f + } + + case "data": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Data", err) + } + s.Data = &value + case float64: + f := int(v) + s.Data = &f + } + + case "data_cold": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DataCold", err) + } + s.DataCold = &value + case float64: + f := int(v) + s.DataCold = &f + } + + case "data_content": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DataContent", err) + } + s.DataContent = &value + case float64: + f := int(v) + s.DataContent = &f + } + + case "data_frozen": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DataFrozen", err) + } + s.DataFrozen = &value + case float64: + f := int(v) + s.DataFrozen = &f + } + + case "data_hot": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DataHot", err) + } + s.DataHot = &value + case float64: + f := int(v) + s.DataHot = &f + } + + case "data_warm": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DataWarm", err) + } + s.DataWarm = &value + case float64: + f := int(v) + s.DataWarm = &f + } + + case "index": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case float64: + f := int(v) + s.Index = &f + } + + case "ingest": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Ingest", err) + } + s.Ingest = &value + case float64: + f := int(v) + s.Ingest = &f + } + + case "master": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Master", err) + } + s.Master = &value + case float64: + f := int(v) + s.Master = &f + } + + case "ml": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Ml", err) + } + s.Ml = &value + case float64: + f := int(v) + s.Ml = &f + } + + case "remote_cluster_client": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RemoteClusterClient", err) + } + s.RemoteClusterClient = &value + case float64: + f := int(v) + s.RemoteClusterClient = &f + } + + case "search": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Search", err) + } + s.Search = &value + case float64: + f := int(v) + s.Search = &f + } + + case "total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int(v) + s.Total = f + } + + case "transform": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Transform", err) + } + s.Transform = &value + case float64: + f := int(v) + s.Transform = &f + } + + case "voting_only": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "VotingOnly", err) + } + s.VotingOnly = &value + case float64: + f := int(v) + s.VotingOnly = &f + } + + } + } + return nil +} + +// NewClusterNodeCount returns a ClusterNodeCount. +func NewClusterNodeCount() *ClusterNodeCount { + r := &ClusterNodeCount{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusternodes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusternodes.go new file mode 100644 index 000000000..8181e2a15 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusternodes.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ClusterNodes type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L480-L513 +type ClusterNodes struct { + // Count Contains counts for nodes selected by the request’s node filters. + Count ClusterNodeCount `json:"count"` + // DiscoveryTypes Contains statistics about the discovery types used by selected nodes. + DiscoveryTypes map[string]int `json:"discovery_types"` + // Fs Contains statistics about file stores by selected nodes. + Fs ClusterFileSystem `json:"fs"` + IndexingPressure ClusterIndexingPressure `json:"indexing_pressure"` + Ingest ClusterIngest `json:"ingest"` + // Jvm Contains statistics about the Java Virtual Machines (JVMs) used by selected + // nodes. + Jvm ClusterJvm `json:"jvm"` + // NetworkTypes Contains statistics about the transport and HTTP networks used by selected + // nodes. + NetworkTypes ClusterNetworkTypes `json:"network_types"` + // Os Contains statistics about the operating systems used by selected nodes. + Os ClusterOperatingSystem `json:"os"` + // PackagingTypes Contains statistics about Elasticsearch distributions installed on selected + // nodes. + PackagingTypes []NodePackagingType `json:"packaging_types"` + // Plugins Contains statistics about installed plugins and modules by selected nodes. + // If no plugins or modules are installed, this array is empty. + Plugins []PluginStats `json:"plugins"` + // Process Contains statistics about processes used by selected nodes. + Process ClusterProcess `json:"process"` + // Versions Array of Elasticsearch versions used on selected nodes. + Versions []string `json:"versions"` +} + +// NewClusterNodes returns a ClusterNodes. +func NewClusterNodes() *ClusterNodes { + r := &ClusterNodes{ + DiscoveryTypes: make(map[string]int), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteroperatingsystem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteroperatingsystem.go new file mode 100644 index 000000000..d63e75b43 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteroperatingsystem.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterOperatingSystem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L526-L553 +type ClusterOperatingSystem struct { + // AllocatedProcessors Number of processors used to calculate thread pool size across all selected + // nodes. + // This number can be set with the processors setting of a node and defaults to + // the number of processors reported by the operating system. + // In both cases, this number will never be larger than 32. + AllocatedProcessors int `json:"allocated_processors"` + // Architectures Contains statistics about processor architectures (for example, x86_64 or + // aarch64) used by selected nodes. + Architectures []ClusterOperatingSystemArchitecture `json:"architectures,omitempty"` + // AvailableProcessors Number of processors available to JVM across all selected nodes. + AvailableProcessors int `json:"available_processors"` + // Mem Contains statistics about memory used by selected nodes. + Mem OperatingSystemMemoryInfo `json:"mem"` + // Names Contains statistics about operating systems used by selected nodes. + Names []ClusterOperatingSystemName `json:"names"` + // PrettyNames Contains statistics about operating systems used by selected nodes. + PrettyNames []ClusterOperatingSystemPrettyName `json:"pretty_names"` +} + +func (s *ClusterOperatingSystem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocated_processors": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllocatedProcessors", err) + } + s.AllocatedProcessors = value + case float64: + f := int(v) + s.AllocatedProcessors = f + } + + case "architectures": + if err := dec.Decode(&s.Architectures); err != nil { + return fmt.Errorf("%s | %w", "Architectures", err) + } + + case "available_processors": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "AvailableProcessors", err) + } + s.AvailableProcessors = value + case float64: + f := int(v) + s.AvailableProcessors = f + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return fmt.Errorf("%s | %w", "Mem", err) + } + + case "names": + if err := dec.Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + case "pretty_names": + if err := dec.Decode(&s.PrettyNames); err != nil { + return fmt.Errorf("%s | %w", "PrettyNames", err) + } + + } + } + return nil +} + +// NewClusterOperatingSystem returns a ClusterOperatingSystem. +func NewClusterOperatingSystem() *ClusterOperatingSystem { + r := &ClusterOperatingSystem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteroperatingsystemarchitecture.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteroperatingsystemarchitecture.go new file mode 100644 index 000000000..a5e6d9aae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteroperatingsystemarchitecture.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterOperatingSystemArchitecture type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L515-L524 +type ClusterOperatingSystemArchitecture struct { + // Arch Name of an architecture used by one or more selected nodes. + Arch string `json:"arch"` + // Count Number of selected nodes using the architecture. + Count int `json:"count"` +} + +func (s *ClusterOperatingSystemArchitecture) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "arch": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Arch", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Arch = o + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + } + } + return nil +} + +// NewClusterOperatingSystemArchitecture returns a ClusterOperatingSystemArchitecture. +func NewClusterOperatingSystemArchitecture() *ClusterOperatingSystemArchitecture { + r := &ClusterOperatingSystemArchitecture{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteroperatingsystemname.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteroperatingsystemname.go new file mode 100644 index 000000000..c797e165c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteroperatingsystemname.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterOperatingSystemName type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L555-L564 +type ClusterOperatingSystemName struct { + // Count Number of selected nodes using the operating system. + Count int `json:"count"` + // Name Name of an operating system used by one or more selected nodes. + Name string `json:"name"` +} + +func (s *ClusterOperatingSystemName) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewClusterOperatingSystemName returns a ClusterOperatingSystemName. +func NewClusterOperatingSystemName() *ClusterOperatingSystemName { + r := &ClusterOperatingSystemName{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteroperatingsystemprettyname.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteroperatingsystemprettyname.go new file mode 100644 index 000000000..9c4a50e15 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusteroperatingsystemprettyname.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterOperatingSystemPrettyName type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L566-L575 +type ClusterOperatingSystemPrettyName struct { + // Count Number of selected nodes using the operating system. + Count int `json:"count"` + // PrettyName Human-readable name of an operating system used by one or more selected + // nodes. + PrettyName string `json:"pretty_name"` +} + +func (s *ClusterOperatingSystemPrettyName) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "pretty_name": + if err := dec.Decode(&s.PrettyName); err != nil { + return fmt.Errorf("%s | %w", "PrettyName", err) + } + + } + } + return nil +} + +// NewClusterOperatingSystemPrettyName returns a ClusterOperatingSystemPrettyName. +func NewClusterOperatingSystemPrettyName() *ClusterOperatingSystemPrettyName { + r := &ClusterOperatingSystemPrettyName{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterprocess.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterprocess.go new file mode 100644 index 000000000..8c5e03307 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterprocess.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ClusterProcess type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L577-L586 +type ClusterProcess struct { + // Cpu Contains statistics about CPU used by selected nodes. + Cpu ClusterProcessCpu `json:"cpu"` + // OpenFileDescriptors Contains statistics about open file descriptors in selected nodes. + OpenFileDescriptors ClusterProcessOpenFileDescriptors `json:"open_file_descriptors"` +} + +// NewClusterProcess returns a ClusterProcess. +func NewClusterProcess() *ClusterProcess { + r := &ClusterProcess{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterprocesscpu.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterprocesscpu.go new file mode 100644 index 000000000..eb7a27866 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterprocesscpu.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterProcessCpu type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L588-L594 +type ClusterProcessCpu struct { + // Percent Percentage of CPU used across all selected nodes. + // Returns `-1` if not supported. + Percent int `json:"percent"` +} + +func (s *ClusterProcessCpu) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "percent": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Percent", err) + } + s.Percent = value + case float64: + f := int(v) + s.Percent = f + } + + } + } + return nil +} + +// NewClusterProcessCpu returns a ClusterProcessCpu. +func NewClusterProcessCpu() *ClusterProcessCpu { + r := &ClusterProcessCpu{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterprocessopenfiledescriptors.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterprocessopenfiledescriptors.go new file mode 100644 index 000000000..483d1b7f7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterprocessopenfiledescriptors.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterProcessOpenFileDescriptors type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L596-L612 +type ClusterProcessOpenFileDescriptors struct { + // Avg Average number of concurrently open file descriptors. + // Returns `-1` if not supported. + Avg int64 `json:"avg"` + // Max Maximum number of concurrently open file descriptors allowed across all + // selected nodes. + // Returns `-1` if not supported. + Max int64 `json:"max"` + // Min Minimum number of concurrently open file descriptors across all selected + // nodes. + // Returns -1 if not supported. + Min int64 `json:"min"` +} + +func (s *ClusterProcessOpenFileDescriptors) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Avg", err) + } + s.Avg = value + case float64: + f := int64(v) + s.Avg = f + } + + case "max": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + s.Max = value + case float64: + f := int64(v) + s.Max = f + } + + case "min": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + s.Min = value + case float64: + f := int64(v) + s.Min = f + } + + } + } + return nil +} + +// NewClusterProcessOpenFileDescriptors returns a ClusterProcessOpenFileDescriptors. +func NewClusterProcessOpenFileDescriptors() *ClusterProcessOpenFileDescriptors { + r := &ClusterProcessOpenFileDescriptors{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterprocessor.go new file mode 100644 index 000000000..41e42ea13 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterprocessor.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L614-L620 +type ClusterProcessor struct { + Count int64 `json:"count"` + Current int64 `json:"current"` + Failed int64 `json:"failed"` + Time Duration `json:"time,omitempty"` + TimeInMillis int64 `json:"time_in_millis"` +} + +func (s *ClusterProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "current": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Current", err) + } + s.Current = value + case float64: + f := int64(v) + s.Current = f + } + + case "failed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Failed", err) + } + s.Failed = value + case float64: + f := int64(v) + s.Failed = f + } + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return fmt.Errorf("%s | %w", "Time", err) + } + + case "time_in_millis": + if err := dec.Decode(&s.TimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TimeInMillis", err) + } + + } + } + return nil +} + +// NewClusterProcessor returns a ClusterProcessor. +func NewClusterProcessor() *ClusterProcessor { + r := &ClusterProcessor{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterremoteinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterremoteinfo.go new file mode 100644 index 000000000..1f7fe86e6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterremoteinfo.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ClusterRemoteInfo holds the union for the following types: +// +// ClusterRemoteSniffInfo +// ClusterRemoteProxyInfo +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L29-L30 +type ClusterRemoteInfo any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterremoteproxyinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterremoteproxyinfo.go new file mode 100644 index 000000000..6c232c126 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterremoteproxyinfo.go @@ -0,0 +1,216 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterRemoteProxyInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L58-L83 +type ClusterRemoteProxyInfo struct { + // ClusterCredentials This field is present and has a value of `::es_redacted::` only when the + // remote cluster is configured with the API key based model. Otherwise, the + // field is not present. + ClusterCredentials *string `json:"cluster_credentials,omitempty"` + // Connected If it is `true`, there is at least one open connection to the remote cluster. + // If it is `false`, it means that the cluster no longer has an open connection + // to the remote cluster. + // It does not necessarily mean that the remote cluster is down or unavailable, + // just that at some point a connection was lost. + Connected bool `json:"connected"` + // InitialConnectTimeout The initial connect timeout for remote cluster connections. + InitialConnectTimeout Duration `json:"initial_connect_timeout"` + // MaxProxySocketConnections The maximum number of socket connections to the remote cluster when proxy + // mode is configured. + MaxProxySocketConnections int `json:"max_proxy_socket_connections"` + // Mode The connection mode for the remote cluster. + Mode string `json:"mode,omitempty"` + // NumProxySocketsConnected The number of open socket connections to the remote cluster when proxy mode + // is configured. + NumProxySocketsConnected int `json:"num_proxy_sockets_connected"` + // ProxyAddress The address for remote connections when proxy mode is configured. + ProxyAddress string `json:"proxy_address"` + ServerName string `json:"server_name"` + // SkipUnavailable If `true`, cross-cluster search skips the remote cluster when its nodes are + // unavailable during the search and ignores errors returned by the remote + // cluster. + SkipUnavailable bool `json:"skip_unavailable"` +} + +func (s *ClusterRemoteProxyInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cluster_credentials": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ClusterCredentials", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClusterCredentials = &o + + case "connected": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Connected", err) + } + s.Connected = value + case bool: + s.Connected = v + } + + case "initial_connect_timeout": + if err := dec.Decode(&s.InitialConnectTimeout); err != nil { + return fmt.Errorf("%s | %w", "InitialConnectTimeout", err) + } + + case "max_proxy_socket_connections": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxProxySocketConnections", err) + } + s.MaxProxySocketConnections = value + case float64: + f := int(v) + s.MaxProxySocketConnections = f + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + + case "num_proxy_sockets_connected": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumProxySocketsConnected", err) + } + s.NumProxySocketsConnected = value + case float64: + f := int(v) + s.NumProxySocketsConnected = f + } + + case "proxy_address": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ProxyAddress", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProxyAddress = o + + case "server_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ServerName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServerName = o + + case "skip_unavailable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SkipUnavailable", err) + } + s.SkipUnavailable = value + case bool: + s.SkipUnavailable = v + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ClusterRemoteProxyInfo) MarshalJSON() ([]byte, error) { + type innerClusterRemoteProxyInfo ClusterRemoteProxyInfo + tmp := innerClusterRemoteProxyInfo{ + ClusterCredentials: s.ClusterCredentials, + Connected: s.Connected, + InitialConnectTimeout: s.InitialConnectTimeout, + MaxProxySocketConnections: s.MaxProxySocketConnections, + Mode: s.Mode, + NumProxySocketsConnected: s.NumProxySocketsConnected, + ProxyAddress: s.ProxyAddress, + ServerName: s.ServerName, + SkipUnavailable: s.SkipUnavailable, + } + + tmp.Mode = "proxy" + + return json.Marshal(tmp) +} + +// NewClusterRemoteProxyInfo returns a ClusterRemoteProxyInfo. +func NewClusterRemoteProxyInfo() *ClusterRemoteProxyInfo { + r := &ClusterRemoteProxyInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterremotesniffinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterremotesniffinfo.go new file mode 100644 index 000000000..5f23d3798 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterremotesniffinfo.go @@ -0,0 +1,178 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterRemoteSniffInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L32-L56 +type ClusterRemoteSniffInfo struct { + // Connected If it is `true`, there is at least one open connection to the remote cluster. + // If it is `false`, it means that the cluster no longer has an open connection + // to the remote cluster. + // It does not necessarily mean that the remote cluster is down or unavailable, + // just that at some point a connection was lost. + Connected bool `json:"connected"` + // InitialConnectTimeout The initial connect timeout for remote cluster connections. + InitialConnectTimeout Duration `json:"initial_connect_timeout"` + // MaxConnectionsPerCluster The maximum number of connections maintained for the remote cluster when + // sniff mode is configured. + MaxConnectionsPerCluster int `json:"max_connections_per_cluster"` + // Mode The connection mode for the remote cluster. + Mode string `json:"mode,omitempty"` + // NumNodesConnected The number of connected nodes in the remote cluster when sniff mode is + // configured. + NumNodesConnected int64 `json:"num_nodes_connected"` + // Seeds The initial seed transport addresses of the remote cluster when sniff mode is + // configured. + Seeds []string `json:"seeds"` + // SkipUnavailable If `true`, cross-cluster search skips the remote cluster when its nodes are + // unavailable during the search and ignores errors returned by the remote + // cluster. + SkipUnavailable bool `json:"skip_unavailable"` +} + +func (s *ClusterRemoteSniffInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "connected": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Connected", err) + } + s.Connected = value + case bool: + s.Connected = v + } + + case "initial_connect_timeout": + if err := dec.Decode(&s.InitialConnectTimeout); err != nil { + return fmt.Errorf("%s | %w", "InitialConnectTimeout", err) + } + + case "max_connections_per_cluster": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxConnectionsPerCluster", err) + } + s.MaxConnectionsPerCluster = value + case float64: + f := int(v) + s.MaxConnectionsPerCluster = f + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + + case "num_nodes_connected": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumNodesConnected", err) + } + s.NumNodesConnected = value + case float64: + f := int64(v) + s.NumNodesConnected = f + } + + case "seeds": + if err := dec.Decode(&s.Seeds); err != nil { + return fmt.Errorf("%s | %w", "Seeds", err) + } + + case "skip_unavailable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SkipUnavailable", err) + } + s.SkipUnavailable = value + case bool: + s.SkipUnavailable = v + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ClusterRemoteSniffInfo) MarshalJSON() ([]byte, error) { + type innerClusterRemoteSniffInfo ClusterRemoteSniffInfo + tmp := innerClusterRemoteSniffInfo{ + Connected: s.Connected, + InitialConnectTimeout: s.InitialConnectTimeout, + MaxConnectionsPerCluster: s.MaxConnectionsPerCluster, + Mode: s.Mode, + NumNodesConnected: s.NumNodesConnected, + Seeds: s.Seeds, + SkipUnavailable: s.SkipUnavailable, + } + + tmp.Mode = "sniff" + + return json.Marshal(tmp) +} + +// NewClusterRemoteSniffInfo returns a ClusterRemoteSniffInfo. +func NewClusterRemoteSniffInfo() *ClusterRemoteSniffInfo { + r := &ClusterRemoteSniffInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterruntimefieldtypes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterruntimefieldtypes.go new file mode 100644 index 000000000..03f220d08 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterruntimefieldtypes.go @@ -0,0 +1,298 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterRuntimeFieldTypes type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L256-L313 +type ClusterRuntimeFieldTypes struct { + // CharsMax Maximum number of characters for a single runtime field script. + CharsMax int `json:"chars_max"` + // CharsTotal Total number of characters for the scripts that define the current runtime + // field data type. + CharsTotal int `json:"chars_total"` + // Count Number of runtime fields mapped to the field data type in selected nodes. + Count int `json:"count"` + // DocMax Maximum number of accesses to doc_values for a single runtime field script + DocMax int `json:"doc_max"` + // DocTotal Total number of accesses to doc_values for the scripts that define the + // current runtime field data type. + DocTotal int `json:"doc_total"` + // IndexCount Number of indices containing a mapping of the runtime field data type in + // selected nodes. + IndexCount int `json:"index_count"` + // Lang Script languages used for the runtime fields scripts. + Lang []string `json:"lang"` + // LinesMax Maximum number of lines for a single runtime field script. + LinesMax int `json:"lines_max"` + // LinesTotal Total number of lines for the scripts that define the current runtime field + // data type. + LinesTotal int `json:"lines_total"` + // Name Field data type used in selected nodes. + Name string `json:"name"` + // ScriptlessCount Number of runtime fields that don’t declare a script. + ScriptlessCount int `json:"scriptless_count"` + // ShadowedCount Number of runtime fields that shadow an indexed field. + ShadowedCount int `json:"shadowed_count"` + // SourceMax Maximum number of accesses to _source for a single runtime field script. + SourceMax int `json:"source_max"` + // SourceTotal Total number of accesses to _source for the scripts that define the current + // runtime field data type. + SourceTotal int `json:"source_total"` +} + +func (s *ClusterRuntimeFieldTypes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "chars_max": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CharsMax", err) + } + s.CharsMax = value + case float64: + f := int(v) + s.CharsMax = f + } + + case "chars_total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CharsTotal", err) + } + s.CharsTotal = value + case float64: + f := int(v) + s.CharsTotal = f + } + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "doc_max": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocMax", err) + } + s.DocMax = value + case float64: + f := int(v) + s.DocMax = f + } + + case "doc_total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocTotal", err) + } + s.DocTotal = value + case float64: + f := int(v) + s.DocTotal = f + } + + case "index_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IndexCount", err) + } + s.IndexCount = value + case float64: + f := int(v) + s.IndexCount = f + } + + case "lang": + if err := dec.Decode(&s.Lang); err != nil { + return fmt.Errorf("%s | %w", "Lang", err) + } + + case "lines_max": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "LinesMax", err) + } + s.LinesMax = value + case float64: + f := int(v) + s.LinesMax = f + } + + case "lines_total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "LinesTotal", err) + } + s.LinesTotal = value + case float64: + f := int(v) + s.LinesTotal = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "scriptless_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ScriptlessCount", err) + } + s.ScriptlessCount = value + case float64: + f := int(v) + s.ScriptlessCount = f + } + + case "shadowed_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShadowedCount", err) + } + s.ShadowedCount = value + case float64: + f := int(v) + s.ShadowedCount = f + } + + case "source_max": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SourceMax", err) + } + s.SourceMax = value + case float64: + f := int(v) + s.SourceMax = f + } + + case "source_total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SourceTotal", err) + } + s.SourceTotal = value + case float64: + f := int(v) + s.SourceTotal = f + } + + } + } + return nil +} + +// NewClusterRuntimeFieldTypes returns a ClusterRuntimeFieldTypes. +func NewClusterRuntimeFieldTypes() *ClusterRuntimeFieldTypes { + r := &ClusterRuntimeFieldTypes{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clustershardmetrics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clustershardmetrics.go new file mode 100644 index 000000000..571ca374d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clustershardmetrics.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterShardMetrics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L622-L635 +type ClusterShardMetrics struct { + // Avg Mean number of shards in an index, counting only shards assigned to selected + // nodes. + Avg Float64 `json:"avg"` + // Max Maximum number of shards in an index, counting only shards assigned to + // selected nodes. + Max Float64 `json:"max"` + // Min Minimum number of shards in an index, counting only shards assigned to + // selected nodes. + Min Float64 `json:"min"` +} + +func (s *ClusterShardMetrics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Avg", err) + } + f := Float64(value) + s.Avg = f + case float64: + f := Float64(v) + s.Avg = f + } + + case "max": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + f := Float64(value) + s.Max = f + case float64: + f := Float64(v) + s.Max = f + } + + case "min": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + f := Float64(value) + s.Min = f + case float64: + f := Float64(v) + s.Min = f + } + + } + } + return nil +} + +// NewClusterShardMetrics returns a ClusterShardMetrics. +func NewClusterShardMetrics() *ClusterShardMetrics { + r := &ClusterShardMetrics{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clustersnapshotstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clustersnapshotstats.go new file mode 100644 index 000000000..709db8549 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clustersnapshotstats.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ClusterSnapshotStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L637-L640 +type ClusterSnapshotStats struct { + CurrentCounts SnapshotCurrentCounts `json:"current_counts"` + Repositories map[string]PerRepositoryStats `json:"repositories"` +} + +// NewClusterSnapshotStats returns a ClusterSnapshotStats. +func NewClusterSnapshotStats() *ClusterSnapshotStats { + r := &ClusterSnapshotStats{ + Repositories: make(map[string]PerRepositoryStats), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterstatequeue.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterstatequeue.go new file mode 100644 index 000000000..ceb7bb76b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterstatequeue.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterStateQueue type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L250-L263 +type ClusterStateQueue struct { + // Committed Number of committed cluster states in queue. + Committed *int64 `json:"committed,omitempty"` + // Pending Number of pending cluster states in queue. + Pending *int64 `json:"pending,omitempty"` + // Total Total number of cluster states in queue. + Total *int64 `json:"total,omitempty"` +} + +func (s *ClusterStateQueue) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "committed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Committed", err) + } + s.Committed = &value + case float64: + f := int64(v) + s.Committed = &f + } + + case "pending": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Pending", err) + } + s.Pending = &value + case float64: + f := int64(v) + s.Pending = &f + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = &value + case float64: + f := int64(v) + s.Total = &f + } + + } + } + return nil +} + +// NewClusterStateQueue returns a ClusterStateQueue. +func NewClusterStateQueue() *ClusterStateQueue { + r := &ClusterStateQueue{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterstateupdate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterstateupdate.go new file mode 100644 index 000000000..693c0d959 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterstateupdate.go @@ -0,0 +1,218 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterStateUpdate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L280-L345 +type ClusterStateUpdate struct { + // CommitTime The cumulative amount of time spent waiting for a successful cluster state + // update to commit, which measures the time from the start of each publication + // until a majority of the master-eligible nodes have written the state to disk + // and confirmed the write to the elected master. + CommitTime Duration `json:"commit_time,omitempty"` + // CommitTimeMillis The cumulative amount of time, in milliseconds, spent waiting for a + // successful cluster state update to commit, which measures the time from the + // start of each publication until a majority of the master-eligible nodes have + // written the state to disk and confirmed the write to the elected master. + CommitTimeMillis *int64 `json:"commit_time_millis,omitempty"` + // CompletionTime The cumulative amount of time spent waiting for a successful cluster state + // update to complete, which measures the time from the start of each + // publication until all the other nodes have notified the elected master that + // they have applied the cluster state. + CompletionTime Duration `json:"completion_time,omitempty"` + // CompletionTimeMillis The cumulative amount of time, in milliseconds, spent waiting for a + // successful cluster state update to complete, which measures the time from the + // start of each publication until all the other nodes have notified the elected + // master that they have applied the cluster state. + CompletionTimeMillis *int64 `json:"completion_time_millis,omitempty"` + // ComputationTime The cumulative amount of time spent computing no-op cluster state updates + // since the node started. + ComputationTime Duration `json:"computation_time,omitempty"` + // ComputationTimeMillis The cumulative amount of time, in milliseconds, spent computing no-op cluster + // state updates since the node started. + ComputationTimeMillis *int64 `json:"computation_time_millis,omitempty"` + // ContextConstructionTime The cumulative amount of time spent constructing a publication context since + // the node started for publications that ultimately succeeded. + // This statistic includes the time spent computing the difference between the + // current and new cluster state preparing a serialized representation of this + // difference. + ContextConstructionTime Duration `json:"context_construction_time,omitempty"` + // ContextConstructionTimeMillis The cumulative amount of time, in milliseconds, spent constructing a + // publication context since the node started for publications that ultimately + // succeeded. + // This statistic includes the time spent computing the difference between the + // current and new cluster state preparing a serialized representation of this + // difference. + ContextConstructionTimeMillis *int64 `json:"context_construction_time_millis,omitempty"` + // Count The number of cluster state update attempts that did not change the cluster + // state since the node started. + Count int64 `json:"count"` + // MasterApplyTime The cumulative amount of time spent successfully applying cluster state + // updates on the elected master since the node started. + MasterApplyTime Duration `json:"master_apply_time,omitempty"` + // MasterApplyTimeMillis The cumulative amount of time, in milliseconds, spent successfully applying + // cluster state updates on the elected master since the node started. + MasterApplyTimeMillis *int64 `json:"master_apply_time_millis,omitempty"` + // NotificationTime The cumulative amount of time spent notifying listeners of a no-op cluster + // state update since the node started. + NotificationTime Duration `json:"notification_time,omitempty"` + // NotificationTimeMillis The cumulative amount of time, in milliseconds, spent notifying listeners of + // a no-op cluster state update since the node started. + NotificationTimeMillis *int64 `json:"notification_time_millis,omitempty"` + // PublicationTime The cumulative amount of time spent publishing cluster state updates which + // ultimately succeeded, which includes everything from the start of the + // publication (just after the computation of the new cluster state) until the + // publication has finished and the master node is ready to start processing the + // next state update. + // This includes the time measured by `context_construction_time`, + // `commit_time`, `completion_time` and `master_apply_time`. + PublicationTime Duration `json:"publication_time,omitempty"` + // PublicationTimeMillis The cumulative amount of time, in milliseconds, spent publishing cluster + // state updates which ultimately succeeded, which includes everything from the + // start of the publication (just after the computation of the new cluster + // state) until the publication has finished and the master node is ready to + // start processing the next state update. + // This includes the time measured by `context_construction_time`, + // `commit_time`, `completion_time` and `master_apply_time`. + PublicationTimeMillis *int64 `json:"publication_time_millis,omitempty"` +} + +func (s *ClusterStateUpdate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "commit_time": + if err := dec.Decode(&s.CommitTime); err != nil { + return fmt.Errorf("%s | %w", "CommitTime", err) + } + + case "commit_time_millis": + if err := dec.Decode(&s.CommitTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "CommitTimeMillis", err) + } + + case "completion_time": + if err := dec.Decode(&s.CompletionTime); err != nil { + return fmt.Errorf("%s | %w", "CompletionTime", err) + } + + case "completion_time_millis": + if err := dec.Decode(&s.CompletionTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "CompletionTimeMillis", err) + } + + case "computation_time": + if err := dec.Decode(&s.ComputationTime); err != nil { + return fmt.Errorf("%s | %w", "ComputationTime", err) + } + + case "computation_time_millis": + if err := dec.Decode(&s.ComputationTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "ComputationTimeMillis", err) + } + + case "context_construction_time": + if err := dec.Decode(&s.ContextConstructionTime); err != nil { + return fmt.Errorf("%s | %w", "ContextConstructionTime", err) + } + + case "context_construction_time_millis": + if err := dec.Decode(&s.ContextConstructionTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "ContextConstructionTimeMillis", err) + } + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "master_apply_time": + if err := dec.Decode(&s.MasterApplyTime); err != nil { + return fmt.Errorf("%s | %w", "MasterApplyTime", err) + } + + case "master_apply_time_millis": + if err := dec.Decode(&s.MasterApplyTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "MasterApplyTimeMillis", err) + } + + case "notification_time": + if err := dec.Decode(&s.NotificationTime); err != nil { + return fmt.Errorf("%s | %w", "NotificationTime", err) + } + + case "notification_time_millis": + if err := dec.Decode(&s.NotificationTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "NotificationTimeMillis", err) + } + + case "publication_time": + if err := dec.Decode(&s.PublicationTime); err != nil { + return fmt.Errorf("%s | %w", "PublicationTime", err) + } + + case "publication_time_millis": + if err := dec.Decode(&s.PublicationTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "PublicationTimeMillis", err) + } + + } + } + return nil +} + +// NewClusterStateUpdate returns a ClusterStateUpdate. +func NewClusterStateUpdate() *ClusterStateUpdate { + r := &ClusterStateUpdate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterstatistics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterstatistics.go new file mode 100644 index 000000000..6f8d60ac0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/clusterstatistics.go @@ -0,0 +1,176 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ClusterStatistics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L27-L35 +type ClusterStatistics struct { + Details map[string]ClusterDetails `json:"details,omitempty"` + Failed int `json:"failed"` + Partial int `json:"partial"` + Running int `json:"running"` + Skipped int `json:"skipped"` + Successful int `json:"successful"` + Total int `json:"total"` +} + +func (s *ClusterStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if s.Details == nil { + s.Details = make(map[string]ClusterDetails, 0) + } + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + case "failed": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Failed", err) + } + s.Failed = value + case float64: + f := int(v) + s.Failed = f + } + + case "partial": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Partial", err) + } + s.Partial = value + case float64: + f := int(v) + s.Partial = f + } + + case "running": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Running", err) + } + s.Running = value + case float64: + f := int(v) + s.Running = f + } + + case "skipped": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Skipped", err) + } + s.Skipped = value + case float64: + f := int(v) + s.Skipped = f + } + + case "successful": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Successful", err) + } + s.Successful = value + case float64: + f := int(v) + s.Successful = f + } + + case "total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int(v) + s.Total = f + } + + } + } + return nil +} + +// NewClusterStatistics returns a ClusterStatistics. +func NewClusterStatistics() *ClusterStatistics { + r := &ClusterStatistics{ + Details: make(map[string]ClusterDetails), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cohereservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cohereservicesettings.go new file mode 100644 index 000000000..7ce0eacff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cohereservicesettings.go @@ -0,0 +1,154 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereembeddingtype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/coheresimilaritytype" +) + +// CohereServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L788-L831 +type CohereServiceSettings struct { + // ApiKey A valid API key for your Cohere account. + // You can find or create your Cohere API keys on the Cohere API key settings + // page. + // + // IMPORTANT: You need to provide the API key only once, during the inference + // model creation. + // The get inference endpoint API does not retrieve your API key. + // After creating the inference model, you cannot change the associated API key. + // If you want to use a different API key, delete the inference model and + // recreate it with the same name and the updated API key. + ApiKey string `json:"api_key"` + // EmbeddingType For a `text_embedding` task, the types of embeddings you want to get back. + // Use `binary` for binary embeddings, which are encoded as bytes with signed + // int8 precision. + // Use `bit` for binary embeddings, which are encoded as bytes with signed int8 + // precision (this is a synonym of `binary`). + // Use `byte` for signed int8 embeddings (this is a synonym of `int8`). + // Use `float` for the default float embeddings. + // Use `int8` for signed int8 embeddings. + EmbeddingType *cohereembeddingtype.CohereEmbeddingType `json:"embedding_type,omitempty"` + // ModelId For a `completion`, `rerank`, or `text_embedding` task, the name of the model + // to use for the inference task. + // + // * For the available `completion` models, refer to the [Cohere command + // docs](https://docs.cohere.com/docs/models#command). + // * For the available `rerank` models, refer to the [Cohere rerank + // docs](https://docs.cohere.com/reference/rerank-1). + // * For the available `text_embedding` models, refer to [Cohere embed + // docs](https://docs.cohere.com/reference/embed). + // + // The default value for a text embedding task is `embed-english-v2.0`. + ModelId *string `json:"model_id,omitempty"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // Cohere. + // By default, the `cohere` service sets the number of requests allowed per + // minute to 10000. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` + // Similarity The similarity measure. + // If the `embedding_type` is `float`, the default value is `dot_product`. + // If the `embedding_type` is `int8` or `byte`, the default value is `cosine`. + Similarity *coheresimilaritytype.CohereSimilarityType `json:"similarity,omitempty"` +} + +func (s *CohereServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "embedding_type": + if err := dec.Decode(&s.EmbeddingType); err != nil { + return fmt.Errorf("%s | %w", "EmbeddingType", err) + } + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = &o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + case "similarity": + if err := dec.Decode(&s.Similarity); err != nil { + return fmt.Errorf("%s | %w", "Similarity", err) + } + + } + } + return nil +} + +// NewCohereServiceSettings returns a CohereServiceSettings. +func NewCohereServiceSettings() *CohereServiceSettings { + r := &CohereServiceSettings{} + + return r +} + +type CohereServiceSettingsVariant interface { + CohereServiceSettingsCaster() *CohereServiceSettings +} + +func (s *CohereServiceSettings) CohereServiceSettingsCaster() *CohereServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/coheretasksettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/coheretasksettings.go new file mode 100644 index 000000000..d274a8efc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/coheretasksettings.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereinputtype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/coheretruncatetype" +) + +// CohereTaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L870-L902 +type CohereTaskSettings struct { + // InputType For a `text_embedding` task, the type of input passed to the model. + // Valid values are: + // + // * `classification`: Use it for embeddings passed through a text classifier. + // * `clustering`: Use it for the embeddings run through a clustering algorithm. + // * `ingest`: Use it for storing document embeddings in a vector database. + // * `search`: Use it for storing embeddings of search queries run against a + // vector database to find relevant documents. + // + // IMPORTANT: The `input_type` field is required when using embedding models + // `v3` and higher. + InputType *cohereinputtype.CohereInputType `json:"input_type,omitempty"` + // ReturnDocuments For a `rerank` task, return doc text within the results. + ReturnDocuments *bool `json:"return_documents,omitempty"` + // TopN For a `rerank` task, the number of most relevant documents to return. + // It defaults to the number of the documents. + // If this inference endpoint is used in a `text_similarity_reranker` retriever + // query and `top_n` is set, it must be greater than or equal to + // `rank_window_size` in the query. + TopN *int `json:"top_n,omitempty"` + // Truncate For a `text_embedding` task, the method to handle inputs longer than the + // maximum token length. + // Valid values are: + // + // * `END`: When the input exceeds the maximum input token length, the end of + // the input is discarded. + // * `NONE`: When the input exceeds the maximum input token length, an error is + // returned. + // * `START`: When the input exceeds the maximum input token length, the start + // of the input is discarded. + Truncate *coheretruncatetype.CohereTruncateType `json:"truncate,omitempty"` +} + +func (s *CohereTaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input_type": + if err := dec.Decode(&s.InputType); err != nil { + return fmt.Errorf("%s | %w", "InputType", err) + } + + case "return_documents": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ReturnDocuments", err) + } + s.ReturnDocuments = &value + case bool: + s.ReturnDocuments = &v + } + + case "top_n": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TopN", err) + } + s.TopN = &value + case float64: + f := int(v) + s.TopN = &f + } + + case "truncate": + if err := dec.Decode(&s.Truncate); err != nil { + return fmt.Errorf("%s | %w", "Truncate", err) + } + + } + } + return nil +} + +// NewCohereTaskSettings returns a CohereTaskSettings. +func NewCohereTaskSettings() *CohereTaskSettings { + r := &CohereTaskSettings{} + + return r +} + +type CohereTaskSettingsVariant interface { + CohereTaskSettingsCaster() *CohereTaskSettings +} + +func (s *CohereTaskSettings) CohereTaskSettingsCaster() *CohereTaskSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/collector.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/collector.go new file mode 100644 index 000000000..32b3f55e6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/collector.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Collector type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L94-L99 +type Collector struct { + Children []Collector `json:"children,omitempty"` + Name string `json:"name"` + Reason string `json:"reason"` + TimeInNanos int64 `json:"time_in_nanos"` +} + +func (s *Collector) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return fmt.Errorf("%s | %w", "Children", err) + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = o + + case "time_in_nanos": + if err := dec.Decode(&s.TimeInNanos); err != nil { + return fmt.Errorf("%s | %w", "TimeInNanos", err) + } + + } + } + return nil +} + +// NewCollector returns a Collector. +func NewCollector() *Collector { + r := &Collector{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/column.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/column.go new file mode 100644 index 000000000..cd707d190 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/column.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Column type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/sql/_types/types.ts#L23-L26 +type Column struct { + Name string `json:"name"` + Type string `json:"type"` +} + +func (s *Column) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewColumn returns a Column. +func NewColumn() *Column { + r := &Column{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/combinedfieldsquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/combinedfieldsquery.go new file mode 100644 index 000000000..6b9f1f223 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/combinedfieldsquery.go @@ -0,0 +1,172 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/combinedfieldsoperator" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/combinedfieldszeroterms" +) + +// CombinedFieldsQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/abstractions.ts#L472-L506 +type CombinedFieldsQuery struct { + // AutoGenerateSynonymsPhraseQuery If true, match phrase queries are automatically created for multi-term + // synonyms. + AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Fields List of fields to search. Field wildcard patterns are allowed. Only `text` + // fields are supported, and they must all have the same search `analyzer`. + Fields []string `json:"fields"` + // MinimumShouldMatch Minimum number of clauses that must match for a document to be returned. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // Operator Boolean logic used to interpret text in the query value. + Operator *combinedfieldsoperator.CombinedFieldsOperator `json:"operator,omitempty"` + // Query Text to search for in the provided `fields`. + // The `combined_fields` query analyzes the provided text before performing a + // search. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // ZeroTermsQuery Indicates whether no documents are returned if the analyzer removes all + // tokens, such as when using a `stop` filter. + ZeroTermsQuery *combinedfieldszeroterms.CombinedFieldsZeroTerms `json:"zero_terms_query,omitempty"` +} + +func (s *CombinedFieldsQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "auto_generate_synonyms_phrase_query": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AutoGenerateSynonymsPhraseQuery", err) + } + s.AutoGenerateSynonymsPhraseQuery = &value + case bool: + s.AutoGenerateSynonymsPhraseQuery = &v + } + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return fmt.Errorf("%s | %w", "MinimumShouldMatch", err) + } + + case "operator": + if err := dec.Decode(&s.Operator); err != nil { + return fmt.Errorf("%s | %w", "Operator", err) + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "zero_terms_query": + if err := dec.Decode(&s.ZeroTermsQuery); err != nil { + return fmt.Errorf("%s | %w", "ZeroTermsQuery", err) + } + + } + } + return nil +} + +// NewCombinedFieldsQuery returns a CombinedFieldsQuery. +func NewCombinedFieldsQuery() *CombinedFieldsQuery { + r := &CombinedFieldsQuery{} + + return r +} + +type CombinedFieldsQueryVariant interface { + CombinedFieldsQueryCaster() *CombinedFieldsQuery +} + +func (s *CombinedFieldsQuery) CombinedFieldsQueryCaster() *CombinedFieldsQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/command.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/command.go new file mode 100644 index 000000000..5bb0083fb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/command.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Command type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/reroute/types.ts#L22-L43 +type Command struct { + // AllocateEmptyPrimary Allocate an empty primary shard to a node. Accepts the index and shard for + // index name and shard number, and node to allocate the shard to. Using this + // command leads to a complete loss of all data that was indexed into this + // shard, if it was previously started. If a node which has a copy of the data + // rejoins the cluster later on, that data will be deleted. To ensure that these + // implications are well-understood, this command requires the flag + // accept_data_loss to be explicitly set to true. + AllocateEmptyPrimary *CommandAllocatePrimaryAction `json:"allocate_empty_primary,omitempty"` + // AllocateReplica Allocate an unassigned replica shard to a node. Accepts index and shard for + // index name and shard number, and node to allocate the shard to. Takes + // allocation deciders into account. + AllocateReplica *CommandAllocateReplicaAction `json:"allocate_replica,omitempty"` + // AllocateStalePrimary Allocate a primary shard to a node that holds a stale copy. Accepts the index + // and shard for index name and shard number, and node to allocate the shard to. + // Using this command may lead to data loss for the provided shard id. If a node + // which has the good copy of the data rejoins the cluster later on, that data + // will be deleted or overwritten with the data of the stale copy that was + // forcefully allocated with this command. To ensure that these implications are + // well-understood, this command requires the flag accept_data_loss to be + // explicitly set to true. + AllocateStalePrimary *CommandAllocatePrimaryAction `json:"allocate_stale_primary,omitempty"` + // Cancel Cancel allocation of a shard (or recovery). Accepts index and shard for index + // name and shard number, and node for the node to cancel the shard allocation + // on. This can be used to force resynchronization of existing replicas from the + // primary shard by cancelling them and allowing them to be reinitialized + // through the standard recovery process. By default only replica shard + // allocations can be cancelled. If it is necessary to cancel the allocation of + // a primary shard then the allow_primary flag must also be included in the + // request. + Cancel *CommandCancelAction `json:"cancel,omitempty"` + // Move Move a started shard from one node to another node. Accepts index and shard + // for index name and shard number, from_node for the node to move the shard + // from, and to_node for the node to move the shard to. + Move *CommandMoveAction `json:"move,omitempty"` +} + +// NewCommand returns a Command. +func NewCommand() *Command { + r := &Command{} + + return r +} + +type CommandVariant interface { + CommandCaster() *Command +} + +func (s *Command) CommandCaster() *Command { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commandallocateprimaryaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commandallocateprimaryaction.go new file mode 100644 index 000000000..7b284385d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commandallocateprimaryaction.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CommandAllocatePrimaryAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/reroute/types.ts#L78-L84 +type CommandAllocatePrimaryAction struct { + // AcceptDataLoss If a node which has a copy of the data rejoins the cluster later on, that + // data will be deleted. To ensure that these implications are well-understood, + // this command requires the flag accept_data_loss to be explicitly set to true + AcceptDataLoss bool `json:"accept_data_loss"` + Index string `json:"index"` + Node string `json:"node"` + Shard int `json:"shard"` +} + +func (s *CommandAllocatePrimaryAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "accept_data_loss": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AcceptDataLoss", err) + } + s.AcceptDataLoss = value + case bool: + s.AcceptDataLoss = v + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = o + + case "shard": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Shard", err) + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + } + } + return nil +} + +// NewCommandAllocatePrimaryAction returns a CommandAllocatePrimaryAction. +func NewCommandAllocatePrimaryAction() *CommandAllocatePrimaryAction { + r := &CommandAllocatePrimaryAction{} + + return r +} + +type CommandAllocatePrimaryActionVariant interface { + CommandAllocatePrimaryActionCaster() *CommandAllocatePrimaryAction +} + +func (s *CommandAllocatePrimaryAction) CommandAllocatePrimaryActionCaster() *CommandAllocatePrimaryAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commandallocatereplicaaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commandallocatereplicaaction.go new file mode 100644 index 000000000..58c125701 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commandallocatereplicaaction.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CommandAllocateReplicaAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/reroute/types.ts#L69-L76 +type CommandAllocateReplicaAction struct { + Index string `json:"index"` + Node string `json:"node"` + Shard int `json:"shard"` +} + +func (s *CommandAllocateReplicaAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = o + + case "shard": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Shard", err) + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + } + } + return nil +} + +// NewCommandAllocateReplicaAction returns a CommandAllocateReplicaAction. +func NewCommandAllocateReplicaAction() *CommandAllocateReplicaAction { + r := &CommandAllocateReplicaAction{} + + return r +} + +type CommandAllocateReplicaActionVariant interface { + CommandAllocateReplicaActionCaster() *CommandAllocateReplicaAction +} + +func (s *CommandAllocateReplicaAction) CommandAllocateReplicaActionCaster() *CommandAllocateReplicaAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commandcancelaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commandcancelaction.go new file mode 100644 index 000000000..f44fcd78c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commandcancelaction.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CommandCancelAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/reroute/types.ts#L45-L50 +type CommandCancelAction struct { + AllowPrimary *bool `json:"allow_primary,omitempty"` + Index string `json:"index"` + Node string `json:"node"` + Shard int `json:"shard"` +} + +func (s *CommandCancelAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_primary": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowPrimary", err) + } + s.AllowPrimary = &value + case bool: + s.AllowPrimary = &v + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = o + + case "shard": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Shard", err) + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + } + } + return nil +} + +// NewCommandCancelAction returns a CommandCancelAction. +func NewCommandCancelAction() *CommandCancelAction { + r := &CommandCancelAction{} + + return r +} + +type CommandCancelActionVariant interface { + CommandCancelActionCaster() *CommandCancelAction +} + +func (s *CommandCancelAction) CommandCancelActionCaster() *CommandCancelAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commandmoveaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commandmoveaction.go new file mode 100644 index 000000000..3a32525df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commandmoveaction.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CommandMoveAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/reroute/types.ts#L60-L67 +type CommandMoveAction struct { + // FromNode The node to move the shard from + FromNode string `json:"from_node"` + Index string `json:"index"` + Shard int `json:"shard"` + // ToNode The node to move the shard to + ToNode string `json:"to_node"` +} + +func (s *CommandMoveAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from_node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FromNode", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FromNode = o + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "shard": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Shard", err) + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + case "to_node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ToNode", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ToNode = o + + } + } + return nil +} + +// NewCommandMoveAction returns a CommandMoveAction. +func NewCommandMoveAction() *CommandMoveAction { + r := &CommandMoveAction{} + + return r +} + +type CommandMoveActionVariant interface { + CommandMoveActionCaster() *CommandMoveAction +} + +func (s *CommandMoveAction) CommandMoveActionCaster() *CommandMoveAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commongramstokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commongramstokenfilter.go new file mode 100644 index 000000000..ac51ab64c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commongramstokenfilter.go @@ -0,0 +1,169 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CommonGramsTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L219-L235 +type CommonGramsTokenFilter struct { + // CommonWords A list of tokens. The filter generates bigrams for these tokens. + // Either this or the `common_words_path` parameter is required. + CommonWords []string `json:"common_words,omitempty"` + // CommonWordsPath Path to a file containing a list of tokens. The filter generates bigrams for + // these tokens. + // This path must be absolute or relative to the `config` location. The file + // must be UTF-8 encoded. Each token in the file must be separated by a line + // break. + // Either this or the `common_words` parameter is required. + CommonWordsPath *string `json:"common_words_path,omitempty"` + // IgnoreCase If `true`, matches for common words matching are case-insensitive. Defaults + // to `false`. + IgnoreCase *bool `json:"ignore_case,omitempty"` + // QueryMode If `true`, the filter excludes the following tokens from the output: + // - Unigrams for common words + // - Unigrams for terms followed by common words + // Defaults to `false`. We recommend enabling this parameter for search + // analyzers. + QueryMode *bool `json:"query_mode,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *CommonGramsTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "common_words": + if err := dec.Decode(&s.CommonWords); err != nil { + return fmt.Errorf("%s | %w", "CommonWords", err) + } + + case "common_words_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CommonWordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CommonWordsPath = &o + + case "ignore_case": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreCase", err) + } + s.IgnoreCase = &value + case bool: + s.IgnoreCase = &v + } + + case "query_mode": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "QueryMode", err) + } + s.QueryMode = &value + case bool: + s.QueryMode = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CommonGramsTokenFilter) MarshalJSON() ([]byte, error) { + type innerCommonGramsTokenFilter CommonGramsTokenFilter + tmp := innerCommonGramsTokenFilter{ + CommonWords: s.CommonWords, + CommonWordsPath: s.CommonWordsPath, + IgnoreCase: s.IgnoreCase, + QueryMode: s.QueryMode, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "common_grams" + + return json.Marshal(tmp) +} + +// NewCommonGramsTokenFilter returns a CommonGramsTokenFilter. +func NewCommonGramsTokenFilter() *CommonGramsTokenFilter { + r := &CommonGramsTokenFilter{} + + return r +} + +type CommonGramsTokenFilterVariant interface { + CommonGramsTokenFilterCaster() *CommonGramsTokenFilter +} + +func (s *CommonGramsTokenFilter) CommonGramsTokenFilterCaster() *CommonGramsTokenFilter { + return s +} + +func (s *CommonGramsTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commontermsquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commontermsquery.go new file mode 100644 index 000000000..e06c46361 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/commontermsquery.go @@ -0,0 +1,181 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator" +) + +// CommonTermsQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L34-L44 +type CommonTermsQuery struct { + Analyzer *string `json:"analyzer,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + CutoffFrequency *Float64 `json:"cutoff_frequency,omitempty"` + HighFreqOperator *operator.Operator `json:"high_freq_operator,omitempty"` + LowFreqOperator *operator.Operator `json:"low_freq_operator,omitempty"` + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *CommonTermsQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Query) + if err != nil { + return err + } + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "cutoff_frequency": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CutoffFrequency", err) + } + f := Float64(value) + s.CutoffFrequency = &f + case float64: + f := Float64(v) + s.CutoffFrequency = &f + } + + case "high_freq_operator": + if err := dec.Decode(&s.HighFreqOperator); err != nil { + return fmt.Errorf("%s | %w", "HighFreqOperator", err) + } + + case "low_freq_operator": + if err := dec.Decode(&s.LowFreqOperator); err != nil { + return fmt.Errorf("%s | %w", "LowFreqOperator", err) + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return fmt.Errorf("%s | %w", "MinimumShouldMatch", err) + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewCommonTermsQuery returns a CommonTermsQuery. +func NewCommonTermsQuery() *CommonTermsQuery { + r := &CommonTermsQuery{} + + return r +} + +type CommonTermsQueryVariant interface { + CommonTermsQueryCaster() *CommonTermsQuery +} + +func (s *CommonTermsQuery) CommonTermsQueryCaster() *CommonTermsQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/communityidprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/communityidprocessor.go new file mode 100644 index 000000000..9c9fed162 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/communityidprocessor.go @@ -0,0 +1,234 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CommunityIDProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L599-L660 +type CommunityIDProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // DestinationIp Field containing the destination IP address. + DestinationIp *string `json:"destination_ip,omitempty"` + // DestinationPort Field containing the destination port. + DestinationPort *string `json:"destination_port,omitempty"` + // IanaNumber Field containing the IANA number. + IanaNumber *string `json:"iana_number,omitempty"` + // IcmpCode Field containing the ICMP code. + IcmpCode *string `json:"icmp_code,omitempty"` + // IcmpType Field containing the ICMP type. + IcmpType *string `json:"icmp_type,omitempty"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If true and any required fields are missing, the processor quietly exits + // without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Seed Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The + // seed can prevent hash collisions between network domains, such as a staging + // and production network that use the same addressing scheme. + Seed *int `json:"seed,omitempty"` + // SourceIp Field containing the source IP address. + SourceIp *string `json:"source_ip,omitempty"` + // SourcePort Field containing the source port. + SourcePort *string `json:"source_port,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField Output field for the community ID. + TargetField *string `json:"target_field,omitempty"` + // Transport Field containing the transport protocol name or number. Used only when the + // iana_number field is not present. The following protocol names are currently + // supported: eigrp, gre, icmp, icmpv6, igmp, ipv6-icmp, ospf, pim, sctp, tcp, + // udp + Transport *string `json:"transport,omitempty"` +} + +func (s *CommunityIDProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "destination_ip": + if err := dec.Decode(&s.DestinationIp); err != nil { + return fmt.Errorf("%s | %w", "DestinationIp", err) + } + + case "destination_port": + if err := dec.Decode(&s.DestinationPort); err != nil { + return fmt.Errorf("%s | %w", "DestinationPort", err) + } + + case "iana_number": + if err := dec.Decode(&s.IanaNumber); err != nil { + return fmt.Errorf("%s | %w", "IanaNumber", err) + } + + case "icmp_code": + if err := dec.Decode(&s.IcmpCode); err != nil { + return fmt.Errorf("%s | %w", "IcmpCode", err) + } + + case "icmp_type": + if err := dec.Decode(&s.IcmpType); err != nil { + return fmt.Errorf("%s | %w", "IcmpType", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "seed": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Seed", err) + } + s.Seed = &value + case float64: + f := int(v) + s.Seed = &f + } + + case "source_ip": + if err := dec.Decode(&s.SourceIp); err != nil { + return fmt.Errorf("%s | %w", "SourceIp", err) + } + + case "source_port": + if err := dec.Decode(&s.SourcePort); err != nil { + return fmt.Errorf("%s | %w", "SourcePort", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + case "transport": + if err := dec.Decode(&s.Transport); err != nil { + return fmt.Errorf("%s | %w", "Transport", err) + } + + } + } + return nil +} + +// NewCommunityIDProcessor returns a CommunityIDProcessor. +func NewCommunityIDProcessor() *CommunityIDProcessor { + r := &CommunityIDProcessor{} + + return r +} + +type CommunityIDProcessorVariant interface { + CommunityIDProcessorCaster() *CommunityIDProcessor +} + +func (s *CommunityIDProcessor) CommunityIDProcessorCaster() *CommunityIDProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compactnodeinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compactnodeinfo.go new file mode 100644 index 000000000..25667e342 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compactnodeinfo.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// CompactNodeInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L33-L40 +type CompactNodeInfo struct { + // Name A human-readable name for the node. + // You can set this name using the `node.name` property in `elasticsearch.yml`. + // The default value is the machine's hostname. + Name string `json:"name"` +} + +func (s *CompactNodeInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewCompactNodeInfo returns a CompactNodeInfo. +func NewCompactNodeInfo() *CompactNodeInfo { + r := &CompactNodeInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completioncontext.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completioncontext.go new file mode 100644 index 000000000..fbd42508b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completioncontext.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionContext type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L236-L265 +type CompletionContext struct { + // Boost The factor by which the score of the suggestion should be boosted. + // The score is computed by multiplying the boost with the suggestion weight. + Boost *Float64 `json:"boost,omitempty"` + // Context The value of the category to filter/boost on. + Context Context `json:"context"` + // Neighbours An array of precision values at which neighboring geohashes should be taken + // into account. + // Precision value can be a distance value (`5m`, `10km`, etc.) or a raw geohash + // precision (`1`..`12`). + // Defaults to generating neighbors for index time precision level. + Neighbours []GeoHashPrecision `json:"neighbours,omitempty"` + // Precision The precision of the geohash to encode the query geo point. + // Can be specified as a distance value (`5m`, `10km`, etc.), or as a raw + // geohash precision (`1`..`12`). + // Defaults to index time precision level. + Precision GeoHashPrecision `json:"precision,omitempty"` + // Prefix Whether the category value should be treated as a prefix or not. + Prefix *bool `json:"prefix,omitempty"` +} + +func (s *CompletionContext) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Context) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "context": + if err := dec.Decode(&s.Context); err != nil { + return fmt.Errorf("%s | %w", "Context", err) + } + + case "neighbours": + if err := dec.Decode(&s.Neighbours); err != nil { + return fmt.Errorf("%s | %w", "Neighbours", err) + } + + case "precision": + if err := dec.Decode(&s.Precision); err != nil { + return fmt.Errorf("%s | %w", "Precision", err) + } + + case "prefix": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Prefix", err) + } + s.Prefix = &value + case bool: + s.Prefix = &v + } + + } + } + return nil +} + +// NewCompletionContext returns a CompletionContext. +func NewCompletionContext() *CompletionContext { + r := &CompletionContext{} + + return r +} + +type CompletionContextVariant interface { + CompletionContextCaster() *CompletionContext +} + +func (s *CompletionContext) CompletionContextCaster() *CompletionContext { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionproperty.go new file mode 100644 index 000000000..7506b67ea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionproperty.go @@ -0,0 +1,957 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// CompletionProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/specialized.ts#L33-L41 +type CompletionProperty struct { + Analyzer *string `json:"analyzer,omitempty"` + Contexts []SuggestContext `json:"contexts,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + MaxInputLength *int `json:"max_input_length,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + PreservePositionIncrements *bool `json:"preserve_position_increments,omitempty"` + PreserveSeparators *bool `json:"preserve_separators,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SearchAnalyzer *string `json:"search_analyzer,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *CompletionProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "contexts": + if err := dec.Decode(&s.Contexts); err != nil { + return fmt.Errorf("%s | %w", "Contexts", err) + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "max_input_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxInputLength", err) + } + s.MaxInputLength = &value + case float64: + f := int(v) + s.MaxInputLength = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "preserve_position_increments": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "PreservePositionIncrements", err) + } + s.PreservePositionIncrements = &value + case bool: + s.PreservePositionIncrements = &v + } + + case "preserve_separators": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "PreserveSeparators", err) + } + s.PreserveSeparators = &value + case bool: + s.PreserveSeparators = &v + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "search_analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchAnalyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchAnalyzer = &o + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CompletionProperty) MarshalJSON() ([]byte, error) { + type innerCompletionProperty CompletionProperty + tmp := innerCompletionProperty{ + Analyzer: s.Analyzer, + Contexts: s.Contexts, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + MaxInputLength: s.MaxInputLength, + Meta: s.Meta, + PreservePositionIncrements: s.PreservePositionIncrements, + PreserveSeparators: s.PreserveSeparators, + Properties: s.Properties, + SearchAnalyzer: s.SearchAnalyzer, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "completion" + + return json.Marshal(tmp) +} + +// NewCompletionProperty returns a CompletionProperty. +func NewCompletionProperty() *CompletionProperty { + r := &CompletionProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type CompletionPropertyVariant interface { + CompletionPropertyCaster() *CompletionProperty +} + +func (s *CompletionProperty) CompletionPropertyCaster() *CompletionProperty { + return s +} + +func (s *CompletionProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionresult.go new file mode 100644 index 000000000..797de8ad6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionresult.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/Results.ts#L77-L82 +type CompletionResult struct { + Result string `json:"result"` +} + +func (s *CompletionResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "result": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Result", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Result = o + + } + } + return nil +} + +// NewCompletionResult returns a CompletionResult. +func NewCompletionResult() *CompletionResult { + r := &CompletionResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionstats.go new file mode 100644 index 000000000..f6d437f14 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionstats.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L83-L93 +type CompletionStats struct { + Fields map[string]FieldSizeUsage `json:"fields,omitempty"` + // Size Total amount of memory used for completion across all shards assigned to + // selected nodes. + Size ByteSize `json:"size,omitempty"` + // SizeInBytes Total amount, in bytes, of memory used for completion across all shards + // assigned to selected nodes. + SizeInBytes int64 `json:"size_in_bytes"` +} + +func (s *CompletionStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]FieldSizeUsage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + case "size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SizeInBytes", err) + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + } + } + return nil +} + +// NewCompletionStats returns a CompletionStats. +func NewCompletionStats() *CompletionStats { + r := &CompletionStats{ + Fields: make(map[string]FieldSizeUsage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionsuggest.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionsuggest.go new file mode 100644 index 000000000..e342d240a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionsuggest.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionSuggest type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L49-L56 +type CompletionSuggest struct { + Length int `json:"length"` + Offset int `json:"offset"` + Options []CompletionSuggestOption `json:"options"` + Text string `json:"text"` +} + +func (s *CompletionSuggest) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Length", err) + } + s.Length = value + case float64: + f := int(v) + s.Length = f + } + + case "offset": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Offset", err) + } + s.Offset = value + case float64: + f := int(v) + s.Offset = f + } + + case "options": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewCompletionSuggestOption() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Options", err) + } + + s.Options = append(s.Options, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Options); err != nil { + return fmt.Errorf("%s | %w", "Options", err) + } + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + } + } + return nil +} + +// NewCompletionSuggest returns a CompletionSuggest. +func NewCompletionSuggest() *CompletionSuggest { + r := &CompletionSuggest{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionsuggester.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionsuggester.go new file mode 100644 index 000000000..f505ff293 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionsuggester.go @@ -0,0 +1,173 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionSuggester type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L164-L182 +type CompletionSuggester struct { + // Analyzer The analyzer to analyze the suggest text with. + // Defaults to the search analyzer of the suggest field. + Analyzer *string `json:"analyzer,omitempty"` + // Contexts A value, geo point object, or a geo hash string to filter or boost the + // suggestion on. + Contexts map[string][]CompletionContext `json:"contexts,omitempty"` + // Field The field to fetch the candidate suggestions from. + // Needs to be set globally or per suggestion. + Field string `json:"field"` + // Fuzzy Enables fuzziness, meaning you can have a typo in your search and still get + // results back. + Fuzzy *SuggestFuzziness `json:"fuzzy,omitempty"` + // Regex A regex query that expresses a prefix as a regular expression. + Regex *RegexOptions `json:"regex,omitempty"` + // Size The maximum corrections to be returned per suggest text token. + Size *int `json:"size,omitempty"` + // SkipDuplicates Whether duplicate suggestions should be filtered out. + SkipDuplicates *bool `json:"skip_duplicates,omitempty"` +} + +func (s *CompletionSuggester) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "contexts": + if s.Contexts == nil { + s.Contexts = make(map[string][]CompletionContext, 0) + } + rawMsg := make(map[string]json.RawMessage, 0) + dec.Decode(&rawMsg) + for key, value := range rawMsg { + switch { + case bytes.HasPrefix(value, []byte("\"")), bytes.HasPrefix(value, []byte("{")): + o := NewCompletionContext() + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return fmt.Errorf("%s | %w", "Contexts", err) + } + s.Contexts[key] = append(s.Contexts[key], *o) + default: + o := []CompletionContext{} + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return fmt.Errorf("%s | %w", "Contexts", err) + } + s.Contexts[key] = o + } + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "fuzzy": + if err := dec.Decode(&s.Fuzzy); err != nil { + return fmt.Errorf("%s | %w", "Fuzzy", err) + } + + case "regex": + if err := dec.Decode(&s.Regex); err != nil { + return fmt.Errorf("%s | %w", "Regex", err) + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "skip_duplicates": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SkipDuplicates", err) + } + s.SkipDuplicates = &value + case bool: + s.SkipDuplicates = &v + } + + } + } + return nil +} + +// NewCompletionSuggester returns a CompletionSuggester. +func NewCompletionSuggester() *CompletionSuggester { + r := &CompletionSuggester{ + Contexts: make(map[string][]CompletionContext), + } + + return r +} + +type CompletionSuggesterVariant interface { + CompletionSuggesterCaster() *CompletionSuggester +} + +func (s *CompletionSuggester) CompletionSuggesterCaster() *CompletionSuggester { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionsuggestoption.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionsuggestoption.go new file mode 100644 index 000000000..b2aa8f4b4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completionsuggestoption.go @@ -0,0 +1,177 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionSuggestOption type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L74-L85 +type CompletionSuggestOption struct { + CollateMatch *bool `json:"collate_match,omitempty"` + Contexts map[string][]Context `json:"contexts,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + Id_ *string `json:"_id,omitempty"` + Index_ *string `json:"_index,omitempty"` + Routing_ *string `json:"_routing,omitempty"` + Score *Float64 `json:"score,omitempty"` + Score_ *Float64 `json:"_score,omitempty"` + Source_ json.RawMessage `json:"_source,omitempty"` + Text string `json:"text"` +} + +func (s *CompletionSuggestOption) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collate_match": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CollateMatch", err) + } + s.CollateMatch = &value + case bool: + s.CollateMatch = &v + } + + case "contexts": + if s.Contexts == nil { + s.Contexts = make(map[string][]Context, 0) + } + if err := dec.Decode(&s.Contexts); err != nil { + return fmt.Errorf("%s | %w", "Contexts", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id_ = &o + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "_routing": + if err := dec.Decode(&s.Routing_); err != nil { + return fmt.Errorf("%s | %w", "Routing_", err) + } + + case "score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Score", err) + } + f := Float64(value) + s.Score = &f + case float64: + f := Float64(v) + s.Score = &f + } + + case "_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Score_", err) + } + f := Float64(value) + s.Score_ = &f + case float64: + f := Float64(v) + s.Score_ = &f + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + } + } + return nil +} + +// NewCompletionSuggestOption returns a CompletionSuggestOption. +func NewCompletionSuggestOption() *CompletionSuggestOption { + r := &CompletionSuggestOption{ + Contexts: make(map[string][]Context), + Fields: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontool.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontool.go new file mode 100644 index 000000000..1218d1e05 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontool.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionTool type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L278-L290 +type CompletionTool struct { + // Function The function definition. + Function CompletionToolFunction `json:"function"` + // Type The type of tool. + Type string `json:"type"` +} + +func (s *CompletionTool) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "function": + if err := dec.Decode(&s.Function); err != nil { + return fmt.Errorf("%s | %w", "Function", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewCompletionTool returns a CompletionTool. +func NewCompletionTool() *CompletionTool { + r := &CompletionTool{} + + return r +} + +type CompletionToolVariant interface { + CompletionToolCaster() *CompletionTool +} + +func (s *CompletionTool) CompletionToolCaster() *CompletionTool { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontoolchoice.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontoolchoice.go new file mode 100644 index 000000000..08faa0aea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontoolchoice.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionToolChoice type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L241-L253 +type CompletionToolChoice struct { + // Function The tool choice function. + Function CompletionToolChoiceFunction `json:"function"` + // Type The type of the tool. + Type string `json:"type"` +} + +func (s *CompletionToolChoice) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "function": + if err := dec.Decode(&s.Function); err != nil { + return fmt.Errorf("%s | %w", "Function", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewCompletionToolChoice returns a CompletionToolChoice. +func NewCompletionToolChoice() *CompletionToolChoice { + r := &CompletionToolChoice{} + + return r +} + +type CompletionToolChoiceVariant interface { + CompletionToolChoiceCaster() *CompletionToolChoice +} + +func (s *CompletionToolChoice) CompletionToolChoiceCaster() *CompletionToolChoice { + return s +} + +func (s *CompletionToolChoice) CompletionToolTypeCaster() *CompletionToolType { + o := CompletionToolType(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontoolchoicefunction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontoolchoicefunction.go new file mode 100644 index 000000000..eaf31e345 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontoolchoicefunction.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionToolChoiceFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L230-L239 +type CompletionToolChoiceFunction struct { + // Name The name of the function to call. + Name string `json:"name"` +} + +func (s *CompletionToolChoiceFunction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + } + } + return nil +} + +// NewCompletionToolChoiceFunction returns a CompletionToolChoiceFunction. +func NewCompletionToolChoiceFunction() *CompletionToolChoiceFunction { + r := &CompletionToolChoiceFunction{} + + return r +} + +type CompletionToolChoiceFunctionVariant interface { + CompletionToolChoiceFunctionCaster() *CompletionToolChoiceFunction +} + +func (s *CompletionToolChoiceFunction) CompletionToolChoiceFunctionCaster() *CompletionToolChoiceFunction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontoolfunction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontoolfunction.go new file mode 100644 index 000000000..869a60a79 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontoolfunction.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompletionToolFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L255-L276 +type CompletionToolFunction struct { + // Description A description of what the function does. + // This is used by the model to choose when and how to call the function. + Description *string `json:"description,omitempty"` + // Name The name of the function. + Name string `json:"name"` + // Parameters The parameters the functional accepts. This should be formatted as a JSON + // object. + Parameters json.RawMessage `json:"parameters,omitempty"` + // Strict Whether to enable schema adherence when generating the function call. + Strict *bool `json:"strict,omitempty"` +} + +func (s *CompletionToolFunction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "parameters": + if err := dec.Decode(&s.Parameters); err != nil { + return fmt.Errorf("%s | %w", "Parameters", err) + } + + case "strict": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Strict", err) + } + s.Strict = &value + case bool: + s.Strict = &v + } + + } + } + return nil +} + +// NewCompletionToolFunction returns a CompletionToolFunction. +func NewCompletionToolFunction() *CompletionToolFunction { + r := &CompletionToolFunction{} + + return r +} + +type CompletionToolFunctionVariant interface { + CompletionToolFunctionCaster() *CompletionToolFunction +} + +func (s *CompletionToolFunction) CompletionToolFunctionCaster() *CompletionToolFunction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontooltype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontooltype.go new file mode 100644 index 000000000..f33ff018a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/completiontooltype.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// CompletionToolType holds the union for the following types: +// +// string +// CompletionToolChoice +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L118-L121 +type CompletionToolType any + +type CompletionToolTypeVariant interface { + CompletionToolTypeCaster() *CompletionToolType +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/componenttemplatenode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/componenttemplatenode.go new file mode 100644 index 000000000..be5868b5b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/componenttemplatenode.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ComponentTemplateNode type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/_types/ComponentTemplate.ts#L33-L42 +type ComponentTemplateNode struct { + Deprecated *bool `json:"deprecated,omitempty"` + Meta_ Metadata `json:"_meta,omitempty"` + Template ComponentTemplateSummary `json:"template"` + Version *int64 `json:"version,omitempty"` +} + +func (s *ComponentTemplateNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deprecated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deprecated", err) + } + s.Deprecated = &value + case bool: + s.Deprecated = &v + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return fmt.Errorf("%s | %w", "Template", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewComponentTemplateNode returns a ComponentTemplateNode. +func NewComponentTemplateNode() *ComponentTemplateNode { + r := &ComponentTemplateNode{} + + return r +} + +type ComponentTemplateNodeVariant interface { + ComponentTemplateNodeCaster() *ComponentTemplateNode +} + +func (s *ComponentTemplateNode) ComponentTemplateNodeCaster() *ComponentTemplateNode { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/componenttemplatesummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/componenttemplatesummary.go new file mode 100644 index 000000000..bdb1ff4ef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/componenttemplatesummary.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ComponentTemplateSummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/_types/ComponentTemplate.ts#L44-L61 +type ComponentTemplateSummary struct { + Aliases map[string]AliasDefinition `json:"aliases,omitempty"` + DataStreamOptions *DataStreamOptionsTemplate `json:"data_stream_options,omitempty"` + Lifecycle *DataStreamLifecycleWithRollover `json:"lifecycle,omitempty"` + Mappings *TypeMapping `json:"mappings,omitempty"` + Meta_ Metadata `json:"_meta,omitempty"` + Settings map[string]IndexSettings `json:"settings,omitempty"` + Version *int64 `json:"version,omitempty"` +} + +func (s *ComponentTemplateSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aliases": + if s.Aliases == nil { + s.Aliases = make(map[string]AliasDefinition, 0) + } + if err := dec.Decode(&s.Aliases); err != nil { + return fmt.Errorf("%s | %w", "Aliases", err) + } + + case "data_stream_options": + if err := dec.Decode(&s.DataStreamOptions); err != nil { + return fmt.Errorf("%s | %w", "DataStreamOptions", err) + } + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return fmt.Errorf("%s | %w", "Lifecycle", err) + } + + case "mappings": + if err := dec.Decode(&s.Mappings); err != nil { + return fmt.Errorf("%s | %w", "Mappings", err) + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "settings": + if s.Settings == nil { + s.Settings = make(map[string]IndexSettings, 0) + } + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewComponentTemplateSummary returns a ComponentTemplateSummary. +func NewComponentTemplateSummary() *ComponentTemplateSummary { + r := &ComponentTemplateSummary{ + Aliases: make(map[string]AliasDefinition), + Settings: make(map[string]IndexSettings), + } + + return r +} + +type ComponentTemplateSummaryVariant interface { + ComponentTemplateSummaryCaster() *ComponentTemplateSummary +} + +func (s *ComponentTemplateSummary) ComponentTemplateSummaryCaster() *ComponentTemplateSummary { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositeaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositeaggregate.go new file mode 100644 index 000000000..09bdaed50 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositeaggregate.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// CompositeAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L700-L705 +type CompositeAggregate struct { + AfterKey CompositeAggregateKey `json:"after_key,omitempty"` + Buckets BucketsCompositeBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *CompositeAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "after_key": + if err := dec.Decode(&s.AfterKey); err != nil { + return fmt.Errorf("%s | %w", "AfterKey", err) + } + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]CompositeBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []CompositeBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewCompositeAggregate returns a CompositeAggregate. +func NewCompositeAggregate() *CompositeAggregate { + r := &CompositeAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositeaggregatekey.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositeaggregatekey.go new file mode 100644 index 000000000..f7b03fd4a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositeaggregatekey.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// CompositeAggregateKey type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L128-L128 +type CompositeAggregateKey map[string]FieldValue + +type CompositeAggregateKeyVariant interface { + CompositeAggregateKeyCaster() *CompositeAggregateKey +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositeaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositeaggregation.go new file mode 100644 index 000000000..54465bd2f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositeaggregation.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CompositeAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L130-L149 +type CompositeAggregation struct { + // After When paginating, use the `after_key` value returned in the previous response + // to retrieve the next page. + After CompositeAggregateKey `json:"after,omitempty"` + // Size The number of composite buckets that should be returned. + Size *int `json:"size,omitempty"` + // Sources The value sources used to build composite buckets. + // Keys are returned in the order of the `sources` definition. + Sources []map[string]CompositeAggregationSource `json:"sources,omitempty"` +} + +func (s *CompositeAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "after": + if err := dec.Decode(&s.After); err != nil { + return fmt.Errorf("%s | %w", "After", err) + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sources": + if err := dec.Decode(&s.Sources); err != nil { + return fmt.Errorf("%s | %w", "Sources", err) + } + + } + } + return nil +} + +// NewCompositeAggregation returns a CompositeAggregation. +func NewCompositeAggregation() *CompositeAggregation { + r := &CompositeAggregation{} + + return r +} + +type CompositeAggregationVariant interface { + CompositeAggregationCaster() *CompositeAggregation +} + +func (s *CompositeAggregation) CompositeAggregationCaster() *CompositeAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositeaggregationsource.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositeaggregationsource.go new file mode 100644 index 000000000..df0970055 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositeaggregationsource.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// CompositeAggregationSource type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L151-L168 +type CompositeAggregationSource struct { + // DateHistogram A date histogram aggregation. + DateHistogram *CompositeDateHistogramAggregation `json:"date_histogram,omitempty"` + // GeotileGrid A geotile grid aggregation. + GeotileGrid *CompositeGeoTileGridAggregation `json:"geotile_grid,omitempty"` + // Histogram A histogram aggregation. + Histogram *CompositeHistogramAggregation `json:"histogram,omitempty"` + // Terms A terms aggregation. + Terms *CompositeTermsAggregation `json:"terms,omitempty"` +} + +// NewCompositeAggregationSource returns a CompositeAggregationSource. +func NewCompositeAggregationSource() *CompositeAggregationSource { + r := &CompositeAggregationSource{} + + return r +} + +type CompositeAggregationSourceVariant interface { + CompositeAggregationSourceCaster() *CompositeAggregationSource +} + +func (s *CompositeAggregationSource) CompositeAggregationSourceCaster() *CompositeAggregationSource { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositebucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositebucket.go new file mode 100644 index 000000000..d347bf9b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositebucket.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// CompositeBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L707-L709 +type CompositeBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key CompositeAggregateKey `json:"key"` +} + +func (s *CompositeBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + if err := dec.Decode(&s.Key); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s CompositeBucket) MarshalJSON() ([]byte, error) { + type opt CompositeBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewCompositeBucket returns a CompositeBucket. +func NewCompositeBucket() *CompositeBucket { + r := &CompositeBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositedatehistogramaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositedatehistogramaggregation.go new file mode 100644 index 000000000..58e2b8eae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositedatehistogramaggregation.go @@ -0,0 +1,161 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/valuetype" +) + +// CompositeDateHistogramAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L187-L195 +type CompositeDateHistogramAggregation struct { + // CalendarInterval Either `calendar_interval` or `fixed_interval` must be present + CalendarInterval *string `json:"calendar_interval,omitempty"` + // Field Either `field` or `script` must be present + Field *string `json:"field,omitempty"` + // FixedInterval Either `calendar_interval` or `fixed_interval` must be present + FixedInterval *string `json:"fixed_interval,omitempty"` + Format *string `json:"format,omitempty"` + MissingBucket *bool `json:"missing_bucket,omitempty"` + MissingOrder *missingorder.MissingOrder `json:"missing_order,omitempty"` + Offset Duration `json:"offset,omitempty"` + Order *sortorder.SortOrder `json:"order,omitempty"` + // Script Either `field` or `script` must be present + Script *Script `json:"script,omitempty"` + TimeZone *string `json:"time_zone,omitempty"` + ValueType *valuetype.ValueType `json:"value_type,omitempty"` +} + +func (s *CompositeDateHistogramAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_interval": + if err := dec.Decode(&s.CalendarInterval); err != nil { + return fmt.Errorf("%s | %w", "CalendarInterval", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "fixed_interval": + if err := dec.Decode(&s.FixedInterval); err != nil { + return fmt.Errorf("%s | %w", "FixedInterval", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing_bucket": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MissingBucket", err) + } + s.MissingBucket = &value + case bool: + s.MissingBucket = &v + } + + case "missing_order": + if err := dec.Decode(&s.MissingOrder); err != nil { + return fmt.Errorf("%s | %w", "MissingOrder", err) + } + + case "offset": + if err := dec.Decode(&s.Offset); err != nil { + return fmt.Errorf("%s | %w", "Offset", err) + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return fmt.Errorf("%s | %w", "TimeZone", err) + } + + case "value_type": + if err := dec.Decode(&s.ValueType); err != nil { + return fmt.Errorf("%s | %w", "ValueType", err) + } + + } + } + return nil +} + +// NewCompositeDateHistogramAggregation returns a CompositeDateHistogramAggregation. +func NewCompositeDateHistogramAggregation() *CompositeDateHistogramAggregation { + r := &CompositeDateHistogramAggregation{} + + return r +} + +type CompositeDateHistogramAggregationVariant interface { + CompositeDateHistogramAggregationCaster() *CompositeDateHistogramAggregation +} + +func (s *CompositeDateHistogramAggregation) CompositeDateHistogramAggregationCaster() *CompositeDateHistogramAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositegeotilegridaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositegeotilegridaggregation.go new file mode 100644 index 000000000..3b6362de1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositegeotilegridaggregation.go @@ -0,0 +1,203 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/valuetype" +) + +// CompositeGeoTileGridAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L197-L200 +type CompositeGeoTileGridAggregation struct { + Bounds GeoBounds `json:"bounds,omitempty"` + // Field Either `field` or `script` must be present + Field *string `json:"field,omitempty"` + MissingBucket *bool `json:"missing_bucket,omitempty"` + MissingOrder *missingorder.MissingOrder `json:"missing_order,omitempty"` + Order *sortorder.SortOrder `json:"order,omitempty"` + Precision *int `json:"precision,omitempty"` + // Script Either `field` or `script` must be present + Script *Script `json:"script,omitempty"` + ValueType *valuetype.ValueType `json:"value_type,omitempty"` +} + +func (s *CompositeGeoTileGridAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bounds": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + bounds_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Bounds", err) + } + + switch t { + + case "bottom", "left", "right", "top": + o := NewCoordsGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "bottom_right", "top_left": + o := NewTopLeftBottomRightGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "bottom_left", "top_right": + o := NewTopRightBottomLeftGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "wkt": + o := NewWktGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + } + } + if s.Bounds == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Bounds); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "missing_bucket": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MissingBucket", err) + } + s.MissingBucket = &value + case bool: + s.MissingBucket = &v + } + + case "missing_order": + if err := dec.Decode(&s.MissingOrder); err != nil { + return fmt.Errorf("%s | %w", "MissingOrder", err) + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + + case "precision": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Precision", err) + } + s.Precision = &value + case float64: + f := int(v) + s.Precision = &f + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "value_type": + if err := dec.Decode(&s.ValueType); err != nil { + return fmt.Errorf("%s | %w", "ValueType", err) + } + + } + } + return nil +} + +// NewCompositeGeoTileGridAggregation returns a CompositeGeoTileGridAggregation. +func NewCompositeGeoTileGridAggregation() *CompositeGeoTileGridAggregation { + r := &CompositeGeoTileGridAggregation{} + + return r +} + +type CompositeGeoTileGridAggregationVariant interface { + CompositeGeoTileGridAggregationCaster() *CompositeGeoTileGridAggregation +} + +func (s *CompositeGeoTileGridAggregation) CompositeGeoTileGridAggregationCaster() *CompositeGeoTileGridAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositehistogramaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositehistogramaggregation.go new file mode 100644 index 000000000..12361e453 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositehistogramaggregation.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/valuetype" +) + +// CompositeHistogramAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L183-L185 +type CompositeHistogramAggregation struct { + // Field Either `field` or `script` must be present + Field *string `json:"field,omitempty"` + Interval Float64 `json:"interval"` + MissingBucket *bool `json:"missing_bucket,omitempty"` + MissingOrder *missingorder.MissingOrder `json:"missing_order,omitempty"` + Order *sortorder.SortOrder `json:"order,omitempty"` + // Script Either `field` or `script` must be present + Script *Script `json:"script,omitempty"` + ValueType *valuetype.ValueType `json:"value_type,omitempty"` +} + +func (s *CompositeHistogramAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "interval": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Interval", err) + } + f := Float64(value) + s.Interval = f + case float64: + f := Float64(v) + s.Interval = f + } + + case "missing_bucket": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MissingBucket", err) + } + s.MissingBucket = &value + case bool: + s.MissingBucket = &v + } + + case "missing_order": + if err := dec.Decode(&s.MissingOrder); err != nil { + return fmt.Errorf("%s | %w", "MissingOrder", err) + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "value_type": + if err := dec.Decode(&s.ValueType); err != nil { + return fmt.Errorf("%s | %w", "ValueType", err) + } + + } + } + return nil +} + +// NewCompositeHistogramAggregation returns a CompositeHistogramAggregation. +func NewCompositeHistogramAggregation() *CompositeHistogramAggregation { + r := &CompositeHistogramAggregation{} + + return r +} + +type CompositeHistogramAggregationVariant interface { + CompositeHistogramAggregationCaster() *CompositeHistogramAggregation +} + +func (s *CompositeHistogramAggregation) CompositeHistogramAggregationCaster() *CompositeHistogramAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositesubfield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositesubfield.go new file mode 100644 index 000000000..3525a4003 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositesubfield.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/runtimefieldtype" +) + +// CompositeSubField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/RuntimeFields.ts#L52-L54 +type CompositeSubField struct { + Type runtimefieldtype.RuntimeFieldType `json:"type"` +} + +// NewCompositeSubField returns a CompositeSubField. +func NewCompositeSubField() *CompositeSubField { + r := &CompositeSubField{} + + return r +} + +type CompositeSubFieldVariant interface { + CompositeSubFieldCaster() *CompositeSubField +} + +func (s *CompositeSubField) CompositeSubFieldCaster() *CompositeSubField { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositetermsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositetermsaggregation.go new file mode 100644 index 000000000..deb51840a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/compositetermsaggregation.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/valuetype" +) + +// CompositeTermsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L181-L181 +type CompositeTermsAggregation struct { + // Field Either `field` or `script` must be present + Field *string `json:"field,omitempty"` + MissingBucket *bool `json:"missing_bucket,omitempty"` + MissingOrder *missingorder.MissingOrder `json:"missing_order,omitempty"` + Order *sortorder.SortOrder `json:"order,omitempty"` + // Script Either `field` or `script` must be present + Script *Script `json:"script,omitempty"` + ValueType *valuetype.ValueType `json:"value_type,omitempty"` +} + +func (s *CompositeTermsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "missing_bucket": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MissingBucket", err) + } + s.MissingBucket = &value + case bool: + s.MissingBucket = &v + } + + case "missing_order": + if err := dec.Decode(&s.MissingOrder); err != nil { + return fmt.Errorf("%s | %w", "MissingOrder", err) + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "value_type": + if err := dec.Decode(&s.ValueType); err != nil { + return fmt.Errorf("%s | %w", "ValueType", err) + } + + } + } + return nil +} + +// NewCompositeTermsAggregation returns a CompositeTermsAggregation. +func NewCompositeTermsAggregation() *CompositeTermsAggregation { + r := &CompositeTermsAggregation{} + + return r +} + +type CompositeTermsAggregationVariant interface { + CompositeTermsAggregationCaster() *CompositeTermsAggregation +} + +func (s *CompositeTermsAggregation) CompositeTermsAggregationCaster() *CompositeTermsAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/conditiontokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/conditiontokenfilter.go new file mode 100644 index 000000000..6d0e87777 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/conditiontokenfilter.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ConditionTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L237-L243 +type ConditionTokenFilter struct { + // Filter Array of token filters. If a token matches the predicate script in the + // `script` parameter, these filters are applied to the token in the order + // provided. + Filter []string `json:"filter"` + // Script Predicate script used to apply token filters. If a token matches this script, + // the filters in the `filter` parameter are applied to the token. + Script Script `json:"script"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ConditionTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ConditionTokenFilter) MarshalJSON() ([]byte, error) { + type innerConditionTokenFilter ConditionTokenFilter + tmp := innerConditionTokenFilter{ + Filter: s.Filter, + Script: s.Script, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "condition" + + return json.Marshal(tmp) +} + +// NewConditionTokenFilter returns a ConditionTokenFilter. +func NewConditionTokenFilter() *ConditionTokenFilter { + r := &ConditionTokenFilter{} + + return r +} + +type ConditionTokenFilterVariant interface { + ConditionTokenFilterCaster() *ConditionTokenFilter +} + +func (s *ConditionTokenFilter) ConditionTokenFilterCaster() *ConditionTokenFilter { + return s +} + +func (s *ConditionTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/configuration.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/configuration.go new file mode 100644 index 000000000..bc92da832 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/configuration.go @@ -0,0 +1,168 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Configuration type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/_types/SnapshotLifecycle.ts#L109-L139 +type Configuration struct { + // FeatureStates A list of feature states to be included in this snapshot. A list of features + // available for inclusion in the snapshot and their descriptions be can be + // retrieved using the get features API. + // Each feature state includes one or more system indices containing data + // necessary for the function of that feature. Providing an empty array will + // include no feature states in the snapshot, regardless of the value of + // include_global_state. By default, all available feature states will be + // included in the snapshot if include_global_state is true, or no feature + // states if include_global_state is false. + FeatureStates []string `json:"feature_states,omitempty"` + // IgnoreUnavailable If false, the snapshot fails if any data stream or index in indices is + // missing or closed. If true, the snapshot ignores missing or closed data + // streams and indices. + IgnoreUnavailable *bool `json:"ignore_unavailable,omitempty"` + // IncludeGlobalState If true, the current global state is included in the snapshot. + IncludeGlobalState *bool `json:"include_global_state,omitempty"` + // Indices A comma-separated list of data streams and indices to include in the + // snapshot. Multi-index syntax is supported. + // By default, a snapshot includes all data streams and indices in the cluster. + // If this argument is provided, the snapshot only includes the specified data + // streams and clusters. + Indices []string `json:"indices,omitempty"` + // Metadata Attaches arbitrary metadata to the snapshot, such as a record of who took the + // snapshot, why it was taken, or any other useful data. Metadata must be less + // than 1024 bytes. + Metadata Metadata `json:"metadata,omitempty"` + // Partial If false, the entire snapshot will fail if one or more indices included in + // the snapshot do not have all primary shards available. + Partial *bool `json:"partial,omitempty"` +} + +func (s *Configuration) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_states": + if err := dec.Decode(&s.FeatureStates); err != nil { + return fmt.Errorf("%s | %w", "FeatureStates", err) + } + + case "ignore_unavailable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnavailable", err) + } + s.IgnoreUnavailable = &value + case bool: + s.IgnoreUnavailable = &v + } + + case "include_global_state": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncludeGlobalState", err) + } + s.IncludeGlobalState = &value + case bool: + s.IncludeGlobalState = &v + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "partial": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Partial", err) + } + s.Partial = &value + case bool: + s.Partial = &v + } + + } + } + return nil +} + +// NewConfiguration returns a Configuration. +func NewConfiguration() *Configuration { + r := &Configuration{} + + return r +} + +type ConfigurationVariant interface { + ConfigurationCaster() *Configuration +} + +func (s *Configuration) ConfigurationCaster() *Configuration { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/confusionmatrixitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/confusionmatrixitem.go new file mode 100644 index 000000000..0fd0b1680 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/confusionmatrixitem.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ConfusionMatrixItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/types.ts#L125-L130 +type ConfusionMatrixItem struct { + ActualClass string `json:"actual_class"` + ActualClassDocCount int `json:"actual_class_doc_count"` + OtherPredictedClassDocCount int `json:"other_predicted_class_doc_count"` + PredictedClasses []ConfusionMatrixPrediction `json:"predicted_classes"` +} + +func (s *ConfusionMatrixItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actual_class": + if err := dec.Decode(&s.ActualClass); err != nil { + return fmt.Errorf("%s | %w", "ActualClass", err) + } + + case "actual_class_doc_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ActualClassDocCount", err) + } + s.ActualClassDocCount = value + case float64: + f := int(v) + s.ActualClassDocCount = f + } + + case "other_predicted_class_doc_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "OtherPredictedClassDocCount", err) + } + s.OtherPredictedClassDocCount = value + case float64: + f := int(v) + s.OtherPredictedClassDocCount = f + } + + case "predicted_classes": + if err := dec.Decode(&s.PredictedClasses); err != nil { + return fmt.Errorf("%s | %w", "PredictedClasses", err) + } + + } + } + return nil +} + +// NewConfusionMatrixItem returns a ConfusionMatrixItem. +func NewConfusionMatrixItem() *ConfusionMatrixItem { + r := &ConfusionMatrixItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/confusionmatrixprediction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/confusionmatrixprediction.go new file mode 100644 index 000000000..ab4b7975d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/confusionmatrixprediction.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ConfusionMatrixPrediction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/types.ts#L132-L135 +type ConfusionMatrixPrediction struct { + Count int `json:"count"` + PredictedClass string `json:"predicted_class"` +} + +func (s *ConfusionMatrixPrediction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "predicted_class": + if err := dec.Decode(&s.PredictedClass); err != nil { + return fmt.Errorf("%s | %w", "PredictedClass", err) + } + + } + } + return nil +} + +// NewConfusionMatrixPrediction returns a ConfusionMatrixPrediction. +func NewConfusionMatrixPrediction() *ConfusionMatrixPrediction { + r := &ConfusionMatrixPrediction{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/confusionmatrixthreshold.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/confusionmatrixthreshold.go new file mode 100644 index 000000000..c4a830f64 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/confusionmatrixthreshold.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ConfusionMatrixThreshold type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/types.ts#L137-L158 +type ConfusionMatrixThreshold struct { + // FalseNegative False Negative + FalseNegative int `json:"fn"` + // FalsePositive False Positive + FalsePositive int `json:"fp"` + // TrueNegative True Negative + TrueNegative int `json:"tn"` + // TruePositive True Positive + TruePositive int `json:"tp"` +} + +func (s *ConfusionMatrixThreshold) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fn": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FalseNegative", err) + } + s.FalseNegative = value + case float64: + f := int(v) + s.FalseNegative = f + } + + case "fp": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FalsePositive", err) + } + s.FalsePositive = value + case float64: + f := int(v) + s.FalsePositive = f + } + + case "tn": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TrueNegative", err) + } + s.TrueNegative = value + case float64: + f := int(v) + s.TrueNegative = f + } + + case "tp": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TruePositive", err) + } + s.TruePositive = value + case float64: + f := int(v) + s.TruePositive = f + } + + } + } + return nil +} + +// NewConfusionMatrixThreshold returns a ConfusionMatrixThreshold. +func NewConfusionMatrixThreshold() *ConfusionMatrixThreshold { + r := &ConfusionMatrixThreshold{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connection.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connection.go new file mode 100644 index 000000000..ca91ba338 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connection.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Connection type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/graph/_types/Connection.ts#L22-L27 +type Connection struct { + DocCount int64 `json:"doc_count"` + Source int64 `json:"source"` + Target int64 `json:"target"` + Weight Float64 `json:"weight"` +} + +func (s *Connection) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "source": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + s.Source = value + case float64: + f := int64(v) + s.Source = f + } + + case "target": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Target", err) + } + s.Target = value + case float64: + f := int64(v) + s.Target = f + } + + case "weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Weight", err) + } + f := Float64(value) + s.Weight = f + case float64: + f := Float64(v) + s.Weight = f + } + + } + } + return nil +} + +// NewConnection returns a Connection. +func NewConnection() *Connection { + r := &Connection{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connector.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connector.go new file mode 100644 index 000000000..a837f3aab --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connector.go @@ -0,0 +1,362 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectorstatus" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncstatus" +) + +// Connector type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L252-L284 +type Connector struct { + ApiKeyId *string `json:"api_key_id,omitempty"` + ApiKeySecretId *string `json:"api_key_secret_id,omitempty"` + Configuration ConnectorConfiguration `json:"configuration"` + CustomScheduling ConnectorCustomScheduling `json:"custom_scheduling"` + Deleted bool `json:"deleted"` + Description *string `json:"description,omitempty"` + Error *string `json:"error,omitempty"` + Features *ConnectorFeatures `json:"features,omitempty"` + Filtering []FilteringConfig `json:"filtering"` + Id *string `json:"id,omitempty"` + IndexName *string `json:"index_name,omitempty"` + IsNative bool `json:"is_native"` + Language *string `json:"language,omitempty"` + LastAccessControlSyncError *string `json:"last_access_control_sync_error,omitempty"` + LastAccessControlSyncScheduledAt DateTime `json:"last_access_control_sync_scheduled_at,omitempty"` + LastAccessControlSyncStatus *syncstatus.SyncStatus `json:"last_access_control_sync_status,omitempty"` + LastDeletedDocumentCount *int64 `json:"last_deleted_document_count,omitempty"` + LastIncrementalSyncScheduledAt DateTime `json:"last_incremental_sync_scheduled_at,omitempty"` + LastIndexedDocumentCount *int64 `json:"last_indexed_document_count,omitempty"` + LastSeen DateTime `json:"last_seen,omitempty"` + LastSyncError *string `json:"last_sync_error,omitempty"` + LastSyncScheduledAt DateTime `json:"last_sync_scheduled_at,omitempty"` + LastSyncStatus *syncstatus.SyncStatus `json:"last_sync_status,omitempty"` + LastSynced DateTime `json:"last_synced,omitempty"` + Name *string `json:"name,omitempty"` + Pipeline *IngestPipelineParams `json:"pipeline,omitempty"` + Scheduling SchedulingConfiguration `json:"scheduling"` + ServiceType *string `json:"service_type,omitempty"` + Status connectorstatus.ConnectorStatus `json:"status"` + SyncCursor json.RawMessage `json:"sync_cursor,omitempty"` + SyncNow bool `json:"sync_now"` +} + +func (s *Connector) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKeyId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKeyId = &o + + case "api_key_secret_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKeySecretId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKeySecretId = &o + + case "configuration": + if err := dec.Decode(&s.Configuration); err != nil { + return fmt.Errorf("%s | %w", "Configuration", err) + } + + case "custom_scheduling": + if err := dec.Decode(&s.CustomScheduling); err != nil { + return fmt.Errorf("%s | %w", "CustomScheduling", err) + } + + case "deleted": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deleted", err) + } + s.Deleted = value + case bool: + s.Deleted = v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Error = &o + + case "features": + if err := dec.Decode(&s.Features); err != nil { + return fmt.Errorf("%s | %w", "Features", err) + } + + case "filtering": + if err := dec.Decode(&s.Filtering); err != nil { + return fmt.Errorf("%s | %w", "Filtering", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "index_name": + if err := dec.Decode(&s.IndexName); err != nil { + return fmt.Errorf("%s | %w", "IndexName", err) + } + + case "is_native": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsNative", err) + } + s.IsNative = value + case bool: + s.IsNative = v + } + + case "language": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Language", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Language = &o + + case "last_access_control_sync_error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LastAccessControlSyncError", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LastAccessControlSyncError = &o + + case "last_access_control_sync_scheduled_at": + if err := dec.Decode(&s.LastAccessControlSyncScheduledAt); err != nil { + return fmt.Errorf("%s | %w", "LastAccessControlSyncScheduledAt", err) + } + + case "last_access_control_sync_status": + if err := dec.Decode(&s.LastAccessControlSyncStatus); err != nil { + return fmt.Errorf("%s | %w", "LastAccessControlSyncStatus", err) + } + + case "last_deleted_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LastDeletedDocumentCount", err) + } + s.LastDeletedDocumentCount = &value + case float64: + f := int64(v) + s.LastDeletedDocumentCount = &f + } + + case "last_incremental_sync_scheduled_at": + if err := dec.Decode(&s.LastIncrementalSyncScheduledAt); err != nil { + return fmt.Errorf("%s | %w", "LastIncrementalSyncScheduledAt", err) + } + + case "last_indexed_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LastIndexedDocumentCount", err) + } + s.LastIndexedDocumentCount = &value + case float64: + f := int64(v) + s.LastIndexedDocumentCount = &f + } + + case "last_seen": + if err := dec.Decode(&s.LastSeen); err != nil { + return fmt.Errorf("%s | %w", "LastSeen", err) + } + + case "last_sync_error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LastSyncError", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LastSyncError = &o + + case "last_sync_scheduled_at": + if err := dec.Decode(&s.LastSyncScheduledAt); err != nil { + return fmt.Errorf("%s | %w", "LastSyncScheduledAt", err) + } + + case "last_sync_status": + if err := dec.Decode(&s.LastSyncStatus); err != nil { + return fmt.Errorf("%s | %w", "LastSyncStatus", err) + } + + case "last_synced": + if err := dec.Decode(&s.LastSynced); err != nil { + return fmt.Errorf("%s | %w", "LastSynced", err) + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "pipeline": + if err := dec.Decode(&s.Pipeline); err != nil { + return fmt.Errorf("%s | %w", "Pipeline", err) + } + + case "scheduling": + if err := dec.Decode(&s.Scheduling); err != nil { + return fmt.Errorf("%s | %w", "Scheduling", err) + } + + case "service_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ServiceType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServiceType = &o + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "sync_cursor": + if err := dec.Decode(&s.SyncCursor); err != nil { + return fmt.Errorf("%s | %w", "SyncCursor", err) + } + + case "sync_now": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SyncNow", err) + } + s.SyncNow = value + case bool: + s.SyncNow = v + } + + } + } + return nil +} + +// NewConnector returns a Connector. +func NewConnector() *Connector { + r := &Connector{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorconfigproperties.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorconfigproperties.go new file mode 100644 index 000000000..a2830cf2b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorconfigproperties.go @@ -0,0 +1,269 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectorfieldtype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/displaytype" +) + +// ConnectorConfigProperties type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L83-L99 +type ConnectorConfigProperties struct { + Category *string `json:"category,omitempty"` + DefaultValue ScalarValue `json:"default_value"` + DependsOn []Dependency `json:"depends_on"` + Display displaytype.DisplayType `json:"display"` + Label string `json:"label"` + Options []SelectOption `json:"options"` + Order *int `json:"order,omitempty"` + Placeholder *string `json:"placeholder,omitempty"` + Required bool `json:"required"` + Sensitive bool `json:"sensitive"` + Tooltip *string `json:"tooltip,omitempty"` + Type *connectorfieldtype.ConnectorFieldType `json:"type,omitempty"` + UiRestrictions []string `json:"ui_restrictions,omitempty"` + Validations []Validation `json:"validations,omitempty"` + Value json.RawMessage `json:"value,omitempty"` +} + +func (s *ConnectorConfigProperties) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "category": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Category", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Category = &o + + case "default_value": + if err := dec.Decode(&s.DefaultValue); err != nil { + return fmt.Errorf("%s | %w", "DefaultValue", err) + } + + case "depends_on": + if err := dec.Decode(&s.DependsOn); err != nil { + return fmt.Errorf("%s | %w", "DependsOn", err) + } + + case "display": + if err := dec.Decode(&s.Display); err != nil { + return fmt.Errorf("%s | %w", "Display", err) + } + + case "label": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Label", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Label = o + + case "options": + if err := dec.Decode(&s.Options); err != nil { + return fmt.Errorf("%s | %w", "Options", err) + } + + case "order": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + s.Order = &value + case float64: + f := int(v) + s.Order = &f + } + + case "placeholder": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Placeholder", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Placeholder = &o + + case "required": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Required", err) + } + s.Required = value + case bool: + s.Required = v + } + + case "sensitive": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Sensitive", err) + } + s.Sensitive = value + case bool: + s.Sensitive = v + } + + case "tooltip": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tooltip", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tooltip = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "ui_restrictions": + if err := dec.Decode(&s.UiRestrictions); err != nil { + return fmt.Errorf("%s | %w", "UiRestrictions", err) + } + + case "validations": + + buf := []json.RawMessage{} + dec.Decode(&buf) + for _, rawMsg := range buf { + + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + kind := make(map[string]string, 0) + localDec.Decode(&kind) + source.Seek(0, io.SeekStart) + + switch kind["type"] { + + case "less_than": + o := NewLessThanValidation() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "less_than", err) + } + s.Validations = append(s.Validations, *o) + case "greater_than": + o := NewGreaterThanValidation() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "greater_than", err) + } + s.Validations = append(s.Validations, *o) + case "list_type": + o := NewListTypeValidation() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "list_type", err) + } + s.Validations = append(s.Validations, *o) + case "included_in": + o := NewIncludedInValidation() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "included_in", err) + } + s.Validations = append(s.Validations, *o) + case "regex": + o := NewRegexValidation() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "regex", err) + } + s.Validations = append(s.Validations, *o) + default: + o := new(any) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("Validations | %w", err) + } + s.Validations = append(s.Validations, *o) + } + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + } + } + return nil +} + +// NewConnectorConfigProperties returns a ConnectorConfigProperties. +func NewConnectorConfigProperties() *ConnectorConfigProperties { + r := &ConnectorConfigProperties{} + + return r +} + +type ConnectorConfigPropertiesVariant interface { + ConnectorConfigPropertiesCaster() *ConnectorConfigProperties +} + +func (s *ConnectorConfigProperties) ConnectorConfigPropertiesCaster() *ConnectorConfigProperties { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorconfiguration.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorconfiguration.go new file mode 100644 index 000000000..cca98d302 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorconfiguration.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ConnectorConfiguration type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L101-L104 +type ConnectorConfiguration map[string]ConnectorConfigProperties + +type ConnectorConfigurationVariant interface { + ConnectorConfigurationCaster() *ConnectorConfiguration +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorcustomscheduling.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorcustomscheduling.go new file mode 100644 index 000000000..00633dad9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorcustomscheduling.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ConnectorCustomScheduling type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L128-L128 +type ConnectorCustomScheduling map[string]CustomScheduling diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorfeatures.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorfeatures.go new file mode 100644 index 000000000..d39ecbd47 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorfeatures.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ConnectorFeatures type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L230-L244 +type ConnectorFeatures struct { + // DocumentLevelSecurity Indicates whether document-level security is enabled. + DocumentLevelSecurity *FeatureEnabled `json:"document_level_security,omitempty"` + // IncrementalSync Indicates whether incremental syncs are enabled. + IncrementalSync *FeatureEnabled `json:"incremental_sync,omitempty"` + // NativeConnectorApiKeys Indicates whether managed connector API keys are enabled. + NativeConnectorApiKeys *FeatureEnabled `json:"native_connector_api_keys,omitempty"` + SyncRules *SyncRulesFeature `json:"sync_rules,omitempty"` +} + +// NewConnectorFeatures returns a ConnectorFeatures. +func NewConnectorFeatures() *ConnectorFeatures { + r := &ConnectorFeatures{} + + return r +} + +type ConnectorFeaturesVariant interface { + ConnectorFeaturesCaster() *ConnectorFeatures +} + +func (s *ConnectorFeatures) ConnectorFeaturesCaster() *ConnectorFeatures { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorscheduling.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorscheduling.go new file mode 100644 index 000000000..697104597 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorscheduling.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ConnectorScheduling type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L106-L110 +type ConnectorScheduling struct { + Enabled bool `json:"enabled"` + // Interval The interval is expressed using the crontab syntax + Interval string `json:"interval"` +} + +func (s *ConnectorScheduling) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "interval": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Interval", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Interval = o + + } + } + return nil +} + +// NewConnectorScheduling returns a ConnectorScheduling. +func NewConnectorScheduling() *ConnectorScheduling { + r := &ConnectorScheduling{} + + return r +} + +type ConnectorSchedulingVariant interface { + ConnectorSchedulingCaster() *ConnectorScheduling +} + +func (s *ConnectorScheduling) ConnectorSchedulingCaster() *ConnectorScheduling { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorsyncjob.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorsyncjob.go new file mode 100644 index 000000000..a6b111f57 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/connectorsyncjob.go @@ -0,0 +1,234 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtriggermethod" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncstatus" +) + +// ConnectorSyncJob type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/SyncJob.ts#L53-L72 +type ConnectorSyncJob struct { + CancelationRequestedAt DateTime `json:"cancelation_requested_at,omitempty"` + CanceledAt DateTime `json:"canceled_at,omitempty"` + CompletedAt DateTime `json:"completed_at,omitempty"` + Connector SyncJobConnectorReference `json:"connector"` + CreatedAt DateTime `json:"created_at"` + DeletedDocumentCount int64 `json:"deleted_document_count"` + Error *string `json:"error,omitempty"` + Id string `json:"id"` + IndexedDocumentCount int64 `json:"indexed_document_count"` + IndexedDocumentVolume int64 `json:"indexed_document_volume"` + JobType syncjobtype.SyncJobType `json:"job_type"` + LastSeen DateTime `json:"last_seen,omitempty"` + Metadata map[string]json.RawMessage `json:"metadata"` + StartedAt DateTime `json:"started_at,omitempty"` + Status syncstatus.SyncStatus `json:"status"` + TotalDocumentCount int64 `json:"total_document_count"` + TriggerMethod syncjobtriggermethod.SyncJobTriggerMethod `json:"trigger_method"` + WorkerHostname *string `json:"worker_hostname,omitempty"` +} + +func (s *ConnectorSyncJob) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cancelation_requested_at": + if err := dec.Decode(&s.CancelationRequestedAt); err != nil { + return fmt.Errorf("%s | %w", "CancelationRequestedAt", err) + } + + case "canceled_at": + if err := dec.Decode(&s.CanceledAt); err != nil { + return fmt.Errorf("%s | %w", "CanceledAt", err) + } + + case "completed_at": + if err := dec.Decode(&s.CompletedAt); err != nil { + return fmt.Errorf("%s | %w", "CompletedAt", err) + } + + case "connector": + if err := dec.Decode(&s.Connector); err != nil { + return fmt.Errorf("%s | %w", "Connector", err) + } + + case "created_at": + if err := dec.Decode(&s.CreatedAt); err != nil { + return fmt.Errorf("%s | %w", "CreatedAt", err) + } + + case "deleted_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DeletedDocumentCount", err) + } + s.DeletedDocumentCount = value + case float64: + f := int64(v) + s.DeletedDocumentCount = f + } + + case "error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Error = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "indexed_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexedDocumentCount", err) + } + s.IndexedDocumentCount = value + case float64: + f := int64(v) + s.IndexedDocumentCount = f + } + + case "indexed_document_volume": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexedDocumentVolume", err) + } + s.IndexedDocumentVolume = value + case float64: + f := int64(v) + s.IndexedDocumentVolume = f + } + + case "job_type": + if err := dec.Decode(&s.JobType); err != nil { + return fmt.Errorf("%s | %w", "JobType", err) + } + + case "last_seen": + if err := dec.Decode(&s.LastSeen); err != nil { + return fmt.Errorf("%s | %w", "LastSeen", err) + } + + case "metadata": + if s.Metadata == nil { + s.Metadata = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "started_at": + if err := dec.Decode(&s.StartedAt); err != nil { + return fmt.Errorf("%s | %w", "StartedAt", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "total_document_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalDocumentCount", err) + } + s.TotalDocumentCount = value + case float64: + f := int64(v) + s.TotalDocumentCount = f + } + + case "trigger_method": + if err := dec.Decode(&s.TriggerMethod); err != nil { + return fmt.Errorf("%s | %w", "TriggerMethod", err) + } + + case "worker_hostname": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "WorkerHostname", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WorkerHostname = &o + + } + } + return nil +} + +// NewConnectorSyncJob returns a ConnectorSyncJob. +func NewConnectorSyncJob() *ConnectorSyncJob { + r := &ConnectorSyncJob{ + Metadata: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/constantkeywordproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/constantkeywordproperty.go new file mode 100644 index 000000000..9bad7eadf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/constantkeywordproperty.go @@ -0,0 +1,829 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// ConstantKeywordProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/specialized.ts#L50-L53 +type ConstantKeywordProperty struct { + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` + Value json.RawMessage `json:"value,omitempty"` +} + +func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ConstantKeywordProperty) MarshalJSON() ([]byte, error) { + type innerConstantKeywordProperty ConstantKeywordProperty + tmp := innerConstantKeywordProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + Value: s.Value, + } + + tmp.Type = "constant_keyword" + + return json.Marshal(tmp) +} + +// NewConstantKeywordProperty returns a ConstantKeywordProperty. +func NewConstantKeywordProperty() *ConstantKeywordProperty { + r := &ConstantKeywordProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type ConstantKeywordPropertyVariant interface { + ConstantKeywordPropertyCaster() *ConstantKeywordProperty +} + +func (s *ConstantKeywordProperty) ConstantKeywordPropertyCaster() *ConstantKeywordProperty { + return s +} + +func (s *ConstantKeywordProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/constantscorequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/constantscorequery.go new file mode 100644 index 000000000..11aabad1b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/constantscorequery.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ConstantScoreQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L76-L86 +type ConstantScoreQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Filter Filter query you wish to run. Any returned documents must match this query. + // Filter queries do not calculate relevance scores. + // To speed up performance, Elasticsearch automatically caches frequently used + // filter queries. + Filter Query `json:"filter"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *ConstantScoreQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewConstantScoreQuery returns a ConstantScoreQuery. +func NewConstantScoreQuery() *ConstantScoreQuery { + r := &ConstantScoreQuery{} + + return r +} + +type ConstantScoreQueryVariant interface { + ConstantScoreQueryCaster() *ConstantScoreQuery +} + +func (s *ConstantScoreQuery) ConstantScoreQueryCaster() *ConstantScoreQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/contentobject.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/contentobject.go new file mode 100644 index 000000000..a5dea12d4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/contentobject.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ContentObject type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L123-L135 +type ContentObject struct { + // Text The text content. + Text string `json:"text"` + // Type The type of content. + Type string `json:"type"` +} + +func (s *ContentObject) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewContentObject returns a ContentObject. +func NewContentObject() *ContentObject { + r := &ContentObject{} + + return r +} + +type ContentObjectVariant interface { + ContentObjectCaster() *ContentObject +} + +func (s *ContentObject) ContentObjectCaster() *ContentObject { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/context.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/context.go new file mode 100644 index 000000000..7ee2e9a13 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/context.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Context holds the union for the following types: +// +// string +// GeoLocation +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L229-L234 +type Context any + +type ContextVariant interface { + ContextCaster() *Context +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/contextmethod.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/contextmethod.go new file mode 100644 index 000000000..40527f591 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/contextmethod.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ContextMethod type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/get_script_context/types.ts#L27-L31 +type ContextMethod struct { + Name string `json:"name"` + Params []ContextMethodParam `json:"params"` + ReturnType string `json:"return_type"` +} + +func (s *ContextMethod) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "params": + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "return_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ReturnType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ReturnType = o + + } + } + return nil +} + +// NewContextMethod returns a ContextMethod. +func NewContextMethod() *ContextMethod { + r := &ContextMethod{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/contextmethodparam.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/contextmethodparam.go new file mode 100644 index 000000000..003a97578 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/contextmethodparam.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ContextMethodParam type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/get_script_context/types.ts#L33-L36 +type ContextMethodParam struct { + Name string `json:"name"` + Type string `json:"type"` +} + +func (s *ContextMethodParam) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewContextMethodParam returns a ContextMethodParam. +func NewContextMethodParam() *ContextMethodParam { + r := &ContextMethodParam{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/convertprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/convertprocessor.go new file mode 100644 index 000000000..9056bcbc2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/convertprocessor.go @@ -0,0 +1,172 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/converttype" +) + +// ConvertProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L673-L693 +type ConvertProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field whose value is to be converted. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the converted value to. + // By default, the `field` is updated in-place. + TargetField *string `json:"target_field,omitempty"` + // Type The type to convert the existing value to. + Type converttype.ConvertType `json:"type"` +} + +func (s *ConvertProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewConvertProcessor returns a ConvertProcessor. +func NewConvertProcessor() *ConvertProcessor { + r := &ConvertProcessor{} + + return r +} + +type ConvertProcessorVariant interface { + ConvertProcessorCaster() *ConvertProcessor +} + +func (s *ConvertProcessor) ConvertProcessorCaster() *ConvertProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/coordinatorstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/coordinatorstats.go new file mode 100644 index 000000000..4ef7d3ca8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/coordinatorstats.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CoordinatorStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/enrich/stats/types.ts#L30-L36 +type CoordinatorStats struct { + ExecutedSearchesTotal int64 `json:"executed_searches_total"` + NodeId string `json:"node_id"` + QueueSize int `json:"queue_size"` + RemoteRequestsCurrent int `json:"remote_requests_current"` + RemoteRequestsTotal int64 `json:"remote_requests_total"` +} + +func (s *CoordinatorStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "executed_searches_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ExecutedSearchesTotal", err) + } + s.ExecutedSearchesTotal = value + case float64: + f := int64(v) + s.ExecutedSearchesTotal = f + } + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + + case "queue_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "QueueSize", err) + } + s.QueueSize = value + case float64: + f := int(v) + s.QueueSize = f + } + + case "remote_requests_current": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RemoteRequestsCurrent", err) + } + s.RemoteRequestsCurrent = value + case float64: + f := int(v) + s.RemoteRequestsCurrent = f + } + + case "remote_requests_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RemoteRequestsTotal", err) + } + s.RemoteRequestsTotal = value + case float64: + f := int64(v) + s.RemoteRequestsTotal = f + } + + } + } + return nil +} + +// NewCoordinatorStats returns a CoordinatorStats. +func NewCoordinatorStats() *CoordinatorStats { + r := &CoordinatorStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/coordsgeobounds.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/coordsgeobounds.go new file mode 100644 index 000000000..4342bed2f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/coordsgeobounds.go @@ -0,0 +1,144 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CoordsGeoBounds type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Geo.ts#L154-L159 +type CoordsGeoBounds struct { + Bottom Float64 `json:"bottom"` + Left Float64 `json:"left"` + Right Float64 `json:"right"` + Top Float64 `json:"top"` +} + +func (s *CoordsGeoBounds) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bottom": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Bottom", err) + } + f := Float64(value) + s.Bottom = f + case float64: + f := Float64(v) + s.Bottom = f + } + + case "left": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Left", err) + } + f := Float64(value) + s.Left = f + case float64: + f := Float64(v) + s.Left = f + } + + case "right": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Right", err) + } + f := Float64(value) + s.Right = f + case float64: + f := Float64(v) + s.Right = f + } + + case "top": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Top", err) + } + f := Float64(value) + s.Top = f + case float64: + f := Float64(v) + s.Top = f + } + + } + } + return nil +} + +// NewCoordsGeoBounds returns a CoordsGeoBounds. +func NewCoordsGeoBounds() *CoordsGeoBounds { + r := &CoordsGeoBounds{} + + return r +} + +type CoordsGeoBoundsVariant interface { + CoordsGeoBoundsCaster() *CoordsGeoBounds +} + +func (s *CoordsGeoBounds) CoordsGeoBoundsCaster() *CoordsGeoBounds { + return s +} + +func (s *CoordsGeoBounds) GeoBoundsCaster() *GeoBounds { + o := GeoBounds(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/countedkeywordproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/countedkeywordproperty.go new file mode 100644 index 000000000..f33ff14b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/countedkeywordproperty.go @@ -0,0 +1,838 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// CountedKeywordProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/specialized.ts#L55-L62 +type CountedKeywordProperty struct { + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *CountedKeywordProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CountedKeywordProperty) MarshalJSON() ([]byte, error) { + type innerCountedKeywordProperty CountedKeywordProperty + tmp := innerCountedKeywordProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "counted_keyword" + + return json.Marshal(tmp) +} + +// NewCountedKeywordProperty returns a CountedKeywordProperty. +func NewCountedKeywordProperty() *CountedKeywordProperty { + r := &CountedKeywordProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type CountedKeywordPropertyVariant interface { + CountedKeywordPropertyCaster() *CountedKeywordProperty +} + +func (s *CountedKeywordProperty) CountedKeywordPropertyCaster() *CountedKeywordProperty { + return s +} + +func (s *CountedKeywordProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/counter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/counter.go new file mode 100644 index 000000000..e2c409a78 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/counter.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Counter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L32-L35 +type Counter struct { + Active int64 `json:"active"` + Total int64 `json:"total"` +} + +func (s *Counter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Active", err) + } + s.Active = value + case float64: + f := int64(v) + s.Active = f + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + +// NewCounter returns a Counter. +func NewCounter() *Counter { + r := &Counter{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/countrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/countrecord.go new file mode 100644 index 000000000..b03a8840b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/countrecord.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CountRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/count/types.ts#L23-L39 +type CountRecord struct { + // Count the document count + Count *string `json:"count,omitempty"` + // Epoch seconds since 1970-01-01 00:00:00 + Epoch StringifiedEpochTimeUnitSeconds `json:"epoch,omitempty"` + // Timestamp time in HH:MM:SS + Timestamp *string `json:"timestamp,omitempty"` +} + +func (s *CountRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count", "dc", "docs.count", "docsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Count = &o + + case "epoch", "t", "time": + if err := dec.Decode(&s.Epoch); err != nil { + return fmt.Errorf("%s | %w", "Epoch", err) + } + + case "timestamp", "ts", "hms", "hhmmss": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + } + } + return nil +} + +// NewCountRecord returns a CountRecord. +func NewCountRecord() *CountRecord { + r := &CountRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cpu.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cpu.go new file mode 100644 index 000000000..5746cf560 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cpu.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Cpu type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L577-L586 +type Cpu struct { + LoadAverage map[string]Float64 `json:"load_average,omitempty"` + Percent *int `json:"percent,omitempty"` + Sys Duration `json:"sys,omitempty"` + SysInMillis *int64 `json:"sys_in_millis,omitempty"` + Total Duration `json:"total,omitempty"` + TotalInMillis *int64 `json:"total_in_millis,omitempty"` + User Duration `json:"user,omitempty"` + UserInMillis *int64 `json:"user_in_millis,omitempty"` +} + +func (s *Cpu) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "load_average": + if s.LoadAverage == nil { + s.LoadAverage = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.LoadAverage); err != nil { + return fmt.Errorf("%s | %w", "LoadAverage", err) + } + + case "percent": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Percent", err) + } + s.Percent = &value + case float64: + f := int(v) + s.Percent = &f + } + + case "sys": + if err := dec.Decode(&s.Sys); err != nil { + return fmt.Errorf("%s | %w", "Sys", err) + } + + case "sys_in_millis": + if err := dec.Decode(&s.SysInMillis); err != nil { + return fmt.Errorf("%s | %w", "SysInMillis", err) + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + + case "total_in_millis": + if err := dec.Decode(&s.TotalInMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalInMillis", err) + } + + case "user": + if err := dec.Decode(&s.User); err != nil { + return fmt.Errorf("%s | %w", "User", err) + } + + case "user_in_millis": + if err := dec.Decode(&s.UserInMillis); err != nil { + return fmt.Errorf("%s | %w", "UserInMillis", err) + } + + } + } + return nil +} + +// NewCpu returns a Cpu. +func NewCpu() *Cpu { + r := &Cpu{ + LoadAverage: make(map[string]Float64), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cpuacct.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cpuacct.go new file mode 100644 index 000000000..33858a0ff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cpuacct.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CpuAcct type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L514-L523 +type CpuAcct struct { + // ControlGroup The `cpuacct` control group to which the Elasticsearch process belongs. + ControlGroup *string `json:"control_group,omitempty"` + // UsageNanos The total CPU time, in nanoseconds, consumed by all tasks in the same cgroup + // as the Elasticsearch process. + UsageNanos *int64 `json:"usage_nanos,omitempty"` +} + +func (s *CpuAcct) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "control_group": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ControlGroup", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ControlGroup = &o + + case "usage_nanos": + if err := dec.Decode(&s.UsageNanos); err != nil { + return fmt.Errorf("%s | %w", "UsageNanos", err) + } + + } + } + return nil +} + +// NewCpuAcct returns a CpuAcct. +func NewCpuAcct() *CpuAcct { + r := &CpuAcct{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/createdstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/createdstatus.go new file mode 100644 index 000000000..f31b36021 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/createdstatus.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CreatedStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/CreatedStatus.ts#L20-L22 +type CreatedStatus struct { + Created bool `json:"created"` +} + +func (s *CreatedStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "created": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Created", err) + } + s.Created = value + case bool: + s.Created = v + } + + } + } + return nil +} + +// NewCreatedStatus returns a CreatedStatus. +func NewCreatedStatus() *CreatedStatus { + r := &CreatedStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/createfrom.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/createfrom.go new file mode 100644 index 000000000..fe6f7ccf3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/createfrom.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CreateFrom type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/create_from/MigrateCreateFromRequest.ts#L46-L60 +type CreateFrom struct { + // MappingsOverride Mappings overrides to be applied to the destination index (optional) + MappingsOverride *TypeMapping `json:"mappings_override,omitempty"` + // RemoveIndexBlocks If index blocks should be removed when creating destination index (optional) + RemoveIndexBlocks *bool `json:"remove_index_blocks,omitempty"` + // SettingsOverride Settings overrides to be applied to the destination index (optional) + SettingsOverride *IndexSettings `json:"settings_override,omitempty"` +} + +func (s *CreateFrom) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mappings_override": + if err := dec.Decode(&s.MappingsOverride); err != nil { + return fmt.Errorf("%s | %w", "MappingsOverride", err) + } + + case "remove_index_blocks": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RemoveIndexBlocks", err) + } + s.RemoveIndexBlocks = &value + case bool: + s.RemoveIndexBlocks = &v + } + + case "settings_override": + if err := dec.Decode(&s.SettingsOverride); err != nil { + return fmt.Errorf("%s | %w", "SettingsOverride", err) + } + + } + } + return nil +} + +// NewCreateFrom returns a CreateFrom. +func NewCreateFrom() *CreateFrom { + r := &CreateFrom{} + + return r +} + +type CreateFromVariant interface { + CreateFromCaster() *CreateFrom +} + +func (s *CreateFrom) CreateFromCaster() *CreateFrom { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/createoperation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/createoperation.go new file mode 100644 index 000000000..ff2412752 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/createoperation.go @@ -0,0 +1,179 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +// CreateOperation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/bulk/types.ts#L140-L140 +type CreateOperation struct { + // DynamicTemplates A map from the full name of fields to the name of dynamic templates. + // It defaults to an empty map. + // If a name matches a dynamic template, that template will be applied + // regardless of other match predicates defined in the template. + // If a field is already defined in the mapping, then this parameter won't be + // used. + DynamicTemplates map[string]string `json:"dynamic_templates,omitempty"` + // Id_ The document ID. + Id_ *string `json:"_id,omitempty"` + IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` + IfSeqNo *int64 `json:"if_seq_no,omitempty"` + // Index_ The name of the index or index alias to perform the action on. + Index_ *string `json:"_index,omitempty"` + // Pipeline The ID of the pipeline to use to preprocess incoming documents. + // If the index has a default ingest pipeline specified, setting the value to + // `_none` turns off the default ingest pipeline for this request. + // If a final pipeline is configured, it will always run regardless of the value + // of this parameter. + Pipeline *string `json:"pipeline,omitempty"` + // RequireAlias If `true`, the request's actions must target an index alias. + RequireAlias *bool `json:"require_alias,omitempty"` + // Routing A custom value used to route operations to a specific shard. + Routing *string `json:"routing,omitempty"` + Version *int64 `json:"version,omitempty"` + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *CreateOperation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic_templates": + if s.DynamicTemplates == nil { + s.DynamicTemplates = make(map[string]string, 0) + } + if err := dec.Decode(&s.DynamicTemplates); err != nil { + return fmt.Errorf("%s | %w", "DynamicTemplates", err) + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "if_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IfPrimaryTerm", err) + } + s.IfPrimaryTerm = &value + case float64: + f := int64(v) + s.IfPrimaryTerm = &f + } + + case "if_seq_no": + if err := dec.Decode(&s.IfSeqNo); err != nil { + return fmt.Errorf("%s | %w", "IfSeqNo", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "pipeline": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pipeline", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pipeline = &o + + case "require_alias": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RequireAlias", err) + } + s.RequireAlias = &value + case bool: + s.RequireAlias = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return fmt.Errorf("%s | %w", "VersionType", err) + } + + } + } + return nil +} + +// NewCreateOperation returns a CreateOperation. +func NewCreateOperation() *CreateOperation { + r := &CreateOperation{ + DynamicTemplates: make(map[string]string), + } + + return r +} + +type CreateOperationVariant interface { + CreateOperationCaster() *CreateOperation +} + +func (s *CreateOperation) CreateOperationCaster() *CreateOperation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/csvprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/csvprocessor.go new file mode 100644 index 000000000..982d95e92 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/csvprocessor.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CsvProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L695-L728 +type CsvProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // EmptyValue Value used to fill empty fields. + // Empty fields are skipped if this is not provided. + // An empty field is one with no value (2 consecutive separators) or empty + // quotes (`""`). + EmptyValue json.RawMessage `json:"empty_value,omitempty"` + // Field The field to extract data from. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Quote Quote used in CSV, has to be single character string. + Quote *string `json:"quote,omitempty"` + // Separator Separator used in CSV, has to be single character string. + Separator *string `json:"separator,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetFields The array of fields to assign extracted values to. + TargetFields []string `json:"target_fields"` + // Trim Trim whitespaces in unquoted fields. + Trim *bool `json:"trim,omitempty"` +} + +func (s *CsvProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "empty_value": + if err := dec.Decode(&s.EmptyValue); err != nil { + return fmt.Errorf("%s | %w", "EmptyValue", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "quote": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Quote", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Quote = &o + + case "separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Separator", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Separator = &o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "TargetFields", err) + } + + s.TargetFields = append(s.TargetFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.TargetFields); err != nil { + return fmt.Errorf("%s | %w", "TargetFields", err) + } + } + + case "trim": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Trim", err) + } + s.Trim = &value + case bool: + s.Trim = &v + } + + } + } + return nil +} + +// NewCsvProcessor returns a CsvProcessor. +func NewCsvProcessor() *CsvProcessor { + r := &CsvProcessor{} + + return r +} + +type CsvProcessorVariant interface { + CsvProcessorCaster() *CsvProcessor +} + +func (s *CsvProcessor) CsvProcessorCaster() *CsvProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cumulativecardinalityaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cumulativecardinalityaggregate.go new file mode 100644 index 000000000..c27b1a99a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cumulativecardinalityaggregate.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CumulativeCardinalityAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L858-L866 +type CumulativeCardinalityAggregate struct { + Meta Metadata `json:"meta,omitempty"` + Value int64 `json:"value"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *CumulativeCardinalityAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + s.Value = value + case float64: + f := int64(v) + s.Value = f + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + +// NewCumulativeCardinalityAggregate returns a CumulativeCardinalityAggregate. +func NewCumulativeCardinalityAggregate() *CumulativeCardinalityAggregate { + r := &CumulativeCardinalityAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cumulativecardinalityaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cumulativecardinalityaggregation.go new file mode 100644 index 000000000..37a4e2b49 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cumulativecardinalityaggregation.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// CumulativeCardinalityAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L206-L209 +type CumulativeCardinalityAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` +} + +func (s *CumulativeCardinalityAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + } + } + return nil +} + +// NewCumulativeCardinalityAggregation returns a CumulativeCardinalityAggregation. +func NewCumulativeCardinalityAggregation() *CumulativeCardinalityAggregation { + r := &CumulativeCardinalityAggregation{} + + return r +} + +type CumulativeCardinalityAggregationVariant interface { + CumulativeCardinalityAggregationCaster() *CumulativeCardinalityAggregation +} + +func (s *CumulativeCardinalityAggregation) CumulativeCardinalityAggregationCaster() *CumulativeCardinalityAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cumulativesumaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cumulativesumaggregation.go new file mode 100644 index 000000000..36045fa12 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/cumulativesumaggregation.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// CumulativeSumAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L211-L214 +type CumulativeSumAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` +} + +func (s *CumulativeSumAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + } + } + return nil +} + +// NewCumulativeSumAggregation returns a CumulativeSumAggregation. +func NewCumulativeSumAggregation() *CumulativeSumAggregation { + r := &CumulativeSumAggregation{} + + return r +} + +type CumulativeSumAggregationVariant interface { + CumulativeSumAggregationCaster() *CumulativeSumAggregation +} + +func (s *CumulativeSumAggregation) CumulativeSumAggregationCaster() *CumulativeSumAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/currentnode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/currentnode.go new file mode 100644 index 000000000..dc792dc60 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/currentnode.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noderole" +) + +// CurrentNode type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/allocation_explain/types.ts#L79-L90 +type CurrentNode struct { + Attributes map[string]string `json:"attributes"` + Id string `json:"id"` + Name string `json:"name"` + Roles []noderole.NodeRole `json:"roles"` + TransportAddress string `json:"transport_address"` + WeightRanking int `json:"weight_ranking"` +} + +func (s *CurrentNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + case "weight_ranking": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "WeightRanking", err) + } + s.WeightRanking = value + case float64: + f := int(v) + s.WeightRanking = f + } + + } + } + return nil +} + +// NewCurrentNode returns a CurrentNode. +func NewCurrentNode() *CurrentNode { + r := &CurrentNode{ + Attributes: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customanalyzer.go new file mode 100644 index 000000000..356f06890 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customanalyzer.go @@ -0,0 +1,180 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CustomAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L28-L35 +type CustomAnalyzer struct { + CharFilter []string `json:"char_filter,omitempty"` + Filter []string `json:"filter,omitempty"` + PositionIncrementGap *int `json:"position_increment_gap,omitempty"` + PositionOffsetGap *int `json:"position_offset_gap,omitempty"` + Tokenizer string `json:"tokenizer"` + Type string `json:"type,omitempty"` +} + +func (s *CustomAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "char_filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CharFilter", err) + } + + s.CharFilter = append(s.CharFilter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CharFilter); err != nil { + return fmt.Errorf("%s | %w", "CharFilter", err) + } + } + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "position_increment_gap": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PositionIncrementGap", err) + } + s.PositionIncrementGap = &value + case float64: + f := int(v) + s.PositionIncrementGap = &f + } + + case "position_offset_gap": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PositionOffsetGap", err) + } + s.PositionOffsetGap = &value + case float64: + f := int(v) + s.PositionOffsetGap = &f + } + + case "tokenizer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tokenizer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tokenizer = o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CustomAnalyzer) MarshalJSON() ([]byte, error) { + type innerCustomAnalyzer CustomAnalyzer + tmp := innerCustomAnalyzer{ + CharFilter: s.CharFilter, + Filter: s.Filter, + PositionIncrementGap: s.PositionIncrementGap, + PositionOffsetGap: s.PositionOffsetGap, + Tokenizer: s.Tokenizer, + Type: s.Type, + } + + tmp.Type = "custom" + + return json.Marshal(tmp) +} + +// NewCustomAnalyzer returns a CustomAnalyzer. +func NewCustomAnalyzer() *CustomAnalyzer { + r := &CustomAnalyzer{} + + return r +} + +type CustomAnalyzerVariant interface { + CustomAnalyzerCaster() *CustomAnalyzer +} + +func (s *CustomAnalyzer) CustomAnalyzerCaster() *CustomAnalyzer { + return s +} + +func (s *CustomAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customcategorizetextanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customcategorizetextanalyzer.go new file mode 100644 index 000000000..182e8eb79 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customcategorizetextanalyzer.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CustomCategorizeTextAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L1190-L1194 +type CustomCategorizeTextAnalyzer struct { + CharFilter []string `json:"char_filter,omitempty"` + Filter []string `json:"filter,omitempty"` + Tokenizer *string `json:"tokenizer,omitempty"` +} + +func (s *CustomCategorizeTextAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "char_filter": + if err := dec.Decode(&s.CharFilter); err != nil { + return fmt.Errorf("%s | %w", "CharFilter", err) + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "tokenizer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tokenizer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tokenizer = &o + + } + } + return nil +} + +// NewCustomCategorizeTextAnalyzer returns a CustomCategorizeTextAnalyzer. +func NewCustomCategorizeTextAnalyzer() *CustomCategorizeTextAnalyzer { + r := &CustomCategorizeTextAnalyzer{} + + return r +} + +type CustomCategorizeTextAnalyzerVariant interface { + CustomCategorizeTextAnalyzerCaster() *CustomCategorizeTextAnalyzer +} + +func (s *CustomCategorizeTextAnalyzer) CustomCategorizeTextAnalyzerCaster() *CustomCategorizeTextAnalyzer { + return s +} + +func (s *CustomCategorizeTextAnalyzer) CategorizeTextAnalyzerCaster() *CategorizeTextAnalyzer { + o := CategorizeTextAnalyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customnormalizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customnormalizer.go new file mode 100644 index 000000000..d835d44eb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customnormalizer.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// CustomNormalizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/normalizers.ts#L30-L34 +type CustomNormalizer struct { + CharFilter []string `json:"char_filter,omitempty"` + Filter []string `json:"filter,omitempty"` + Type string `json:"type,omitempty"` +} + +// MarshalJSON override marshalling to include literal value +func (s CustomNormalizer) MarshalJSON() ([]byte, error) { + type innerCustomNormalizer CustomNormalizer + tmp := innerCustomNormalizer{ + CharFilter: s.CharFilter, + Filter: s.Filter, + Type: s.Type, + } + + tmp.Type = "custom" + + return json.Marshal(tmp) +} + +// NewCustomNormalizer returns a CustomNormalizer. +func NewCustomNormalizer() *CustomNormalizer { + r := &CustomNormalizer{} + + return r +} + +type CustomNormalizerVariant interface { + CustomNormalizerCaster() *CustomNormalizer +} + +func (s *CustomNormalizer) CustomNormalizerCaster() *CustomNormalizer { + return s +} + +func (s *CustomNormalizer) NormalizerCaster() *Normalizer { + o := Normalizer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customrequestparams.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customrequestparams.go new file mode 100644 index 000000000..02b2ccfb4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customrequestparams.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CustomRequestParams type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L974-L985 +type CustomRequestParams struct { + // Content The body structure of the request. It requires passing in the string-escaped + // result of the JSON format HTTP request body. + // For example: + // ``` + // "request": "{\"input\":${input}}" + // ``` + // > info + // > The content string needs to be a single line except when using the Kibana + // console. + Content string `json:"content"` +} + +func (s *CustomRequestParams) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "content": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Content", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Content = o + + } + } + return nil +} + +// NewCustomRequestParams returns a CustomRequestParams. +func NewCustomRequestParams() *CustomRequestParams { + r := &CustomRequestParams{} + + return r +} + +type CustomRequestParamsVariant interface { + CustomRequestParamsCaster() *CustomRequestParams +} + +func (s *CustomRequestParams) CustomRequestParamsCaster() *CustomRequestParams { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customresponseparams.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customresponseparams.go new file mode 100644 index 000000000..8a95e52a6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customresponseparams.go @@ -0,0 +1,182 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// CustomResponseParams type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L987-L1125 +type CustomResponseParams struct { + // JsonParser Specifies the JSON parser that is used to parse the response from the custom + // service. + // Different task types require different json_parser parameters. + // For example: + // ``` + // # text_embedding + // # For a response like this: + // + // { + // "object": "list", + // "data": [ + // { + // "object": "embedding", + // "index": 0, + // "embedding": [ + // 0.014539449, + // -0.015288644 + // ] + // } + // ], + // "model": "text-embedding-ada-002-v2", + // "usage": { + // "prompt_tokens": 8, + // "total_tokens": 8 + // } + // } + // + // # the json_parser definition should look like this: + // + // "response":{ + // "json_parser":{ + // "text_embeddings":"$.data[*].embedding[*]" + // } + // } + // + // # sparse_embedding + // # For a response like this: + // + // { + // "request_id": "75C50B5B-E79E-4930-****-F48DBB392231", + // "latency": 22, + // "usage": { + // "token_count": 11 + // }, + // "result": { + // "sparse_embeddings": [ + // { + // "index": 0, + // "embedding": [ + // { + // "token_id": 6, + // "weight": 0.101 + // }, + // { + // "token_id": 163040, + // "weight": 0.28417 + // } + // ] + // } + // ] + // } + // } + // + // # the json_parser definition should look like this: + // + // "response":{ + // "json_parser":{ + // "token_path":"$.result.sparse_embeddings[*].embedding[*].token_id", + // "weight_path":"$.result.sparse_embeddings[*].embedding[*].weight" + // } + // } + // + // # rerank + // # For a response like this: + // + // { + // "results": [ + // { + // "index": 3, + // "relevance_score": 0.999071, + // "document": "abc" + // }, + // { + // "index": 4, + // "relevance_score": 0.7867867, + // "document": "123" + // }, + // { + // "index": 0, + // "relevance_score": 0.32713068, + // "document": "super" + // } + // ], + // } + // + // # the json_parser definition should look like this: + // + // "response":{ + // "json_parser":{ + // "reranked_index":"$.result.scores[*].index", // optional + // "relevance_score":"$.result.scores[*].score", + // "document_text":"xxx" // optional + // } + // } + // + // # completion + // # For a response like this: + // + // { + // "id": "chatcmpl-B9MBs8CjcvOU2jLn4n570S5qMJKcT", + // "object": "chat.completion", + // "created": 1741569952, + // "model": "gpt-4.1-2025-04-14", + // "choices": [ + // { + // "index": 0, + // "message": { + // "role": "assistant", + // "content": "Hello! How can I assist you today?", + // "refusal": null, + // "annotations": [] + // }, + // "logprobs": null, + // "finish_reason": "stop" + // } + // ] + // } + // + // # the json_parser definition should look like this: + // + // "response":{ + // "json_parser":{ + // "completion_result":"$.choices[*].message.content" + // } + // } + JsonParser json.RawMessage `json:"json_parser,omitempty"` +} + +// NewCustomResponseParams returns a CustomResponseParams. +func NewCustomResponseParams() *CustomResponseParams { + r := &CustomResponseParams{} + + return r +} + +type CustomResponseParamsVariant interface { + CustomResponseParamsCaster() *CustomResponseParams +} + +func (s *CustomResponseParams) CustomResponseParamsCaster() *CustomResponseParams { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customscheduling.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customscheduling.go new file mode 100644 index 000000000..d5c2f617e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customscheduling.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CustomScheduling type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L120-L126 +type CustomScheduling struct { + ConfigurationOverrides CustomSchedulingConfigurationOverrides `json:"configuration_overrides"` + Enabled bool `json:"enabled"` + Interval string `json:"interval"` + LastSynced DateTime `json:"last_synced,omitempty"` + Name string `json:"name"` +} + +func (s *CustomScheduling) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "configuration_overrides": + if err := dec.Decode(&s.ConfigurationOverrides); err != nil { + return fmt.Errorf("%s | %w", "ConfigurationOverrides", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "interval": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Interval", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Interval = o + + case "last_synced": + if err := dec.Decode(&s.LastSynced); err != nil { + return fmt.Errorf("%s | %w", "LastSynced", err) + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + } + } + return nil +} + +// NewCustomScheduling returns a CustomScheduling. +func NewCustomScheduling() *CustomScheduling { + r := &CustomScheduling{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customschedulingconfigurationoverrides.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customschedulingconfigurationoverrides.go new file mode 100644 index 000000000..c2ef54995 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customschedulingconfigurationoverrides.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CustomSchedulingConfigurationOverrides type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L112-L118 +type CustomSchedulingConfigurationOverrides struct { + DomainAllowlist []string `json:"domain_allowlist,omitempty"` + MaxCrawlDepth *int `json:"max_crawl_depth,omitempty"` + SeedUrls []string `json:"seed_urls,omitempty"` + SitemapDiscoveryDisabled *bool `json:"sitemap_discovery_disabled,omitempty"` + SitemapUrls []string `json:"sitemap_urls,omitempty"` +} + +func (s *CustomSchedulingConfigurationOverrides) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "domain_allowlist": + if err := dec.Decode(&s.DomainAllowlist); err != nil { + return fmt.Errorf("%s | %w", "DomainAllowlist", err) + } + + case "max_crawl_depth": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxCrawlDepth", err) + } + s.MaxCrawlDepth = &value + case float64: + f := int(v) + s.MaxCrawlDepth = &f + } + + case "seed_urls": + if err := dec.Decode(&s.SeedUrls); err != nil { + return fmt.Errorf("%s | %w", "SeedUrls", err) + } + + case "sitemap_discovery_disabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SitemapDiscoveryDisabled", err) + } + s.SitemapDiscoveryDisabled = &value + case bool: + s.SitemapDiscoveryDisabled = &v + } + + case "sitemap_urls": + if err := dec.Decode(&s.SitemapUrls); err != nil { + return fmt.Errorf("%s | %w", "SitemapUrls", err) + } + + } + } + return nil +} + +// NewCustomSchedulingConfigurationOverrides returns a CustomSchedulingConfigurationOverrides. +func NewCustomSchedulingConfigurationOverrides() *CustomSchedulingConfigurationOverrides { + r := &CustomSchedulingConfigurationOverrides{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customservicesettings.go new file mode 100644 index 000000000..bdaff22da --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customservicesettings.go @@ -0,0 +1,182 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CustomServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L904-L972 +type CustomServiceSettings struct { + // Headers Specifies the HTTPS header parameters – such as `Authentication` or + // `Contet-Type` – that are required to access the custom service. + // For example: + // ``` + // + // "headers":{ + // "Authorization": "Bearer ${api_key}", + // "Content-Type": "application/json;charset=utf-8" + // } + // + // ``` + Headers json.RawMessage `json:"headers,omitempty"` + // InputType Specifies the input type translation values that are used to replace the + // `${input_type}` template in the request body. + // For example: + // ``` + // + // "input_type": { + // "translation": { + // "ingest": "do_ingest", + // "search": "do_search" + // }, + // "default": "a_default" + // }, + // + // ``` + // If the subsequent inference requests come from a search context, the `search` + // key will be used and the template will be replaced with `do_search`. + // If it comes from the ingest context `do_ingest` is used. If it's a different + // context that is not specified, the default value will be used. If no default + // is specified an empty string is used. + // `translation` can be: + // * `classification` + // * `clustering` + // * `ingest` + // * `search` + InputType json.RawMessage `json:"input_type,omitempty"` + // QueryParameters Specifies the query parameters as a list of tuples. The arrays inside the + // `query_parameters` must have two items, a key and a value. + // For example: + // ``` + // "query_parameters":[ + // + // ["param_key", "some_value"], + // ["param_key", "another_value"], + // ["other_key", "other_value"] + // + // ] + // ``` + // If the base url is `https://www.elastic.co` it results in: + // `https://www.elastic.co?param_key=some_value¶m_key=another_value&other_key=other_value`. + QueryParameters json.RawMessage `json:"query_parameters,omitempty"` + // Request The request configuration object. + Request CustomRequestParams `json:"request"` + // Response The response configuration object. + Response CustomResponseParams `json:"response"` + // SecretParameters Specifies secret parameters, like `api_key` or `api_token`, that are required + // to access the custom service. + // For example: + // ``` + // + // "secret_parameters":{ + // "api_key":"" + // } + // + // ``` + SecretParameters json.RawMessage `json:"secret_parameters,omitempty"` + // Url The URL endpoint to use for the requests. + Url *string `json:"url,omitempty"` +} + +func (s *CustomServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "headers": + if err := dec.Decode(&s.Headers); err != nil { + return fmt.Errorf("%s | %w", "Headers", err) + } + + case "input_type": + if err := dec.Decode(&s.InputType); err != nil { + return fmt.Errorf("%s | %w", "InputType", err) + } + + case "query_parameters": + if err := dec.Decode(&s.QueryParameters); err != nil { + return fmt.Errorf("%s | %w", "QueryParameters", err) + } + + case "request": + if err := dec.Decode(&s.Request); err != nil { + return fmt.Errorf("%s | %w", "Request", err) + } + + case "response": + if err := dec.Decode(&s.Response); err != nil { + return fmt.Errorf("%s | %w", "Response", err) + } + + case "secret_parameters": + if err := dec.Decode(&s.SecretParameters); err != nil { + return fmt.Errorf("%s | %w", "SecretParameters", err) + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = &o + + } + } + return nil +} + +// NewCustomServiceSettings returns a CustomServiceSettings. +func NewCustomServiceSettings() *CustomServiceSettings { + r := &CustomServiceSettings{} + + return r +} + +type CustomServiceSettingsVariant interface { + CustomServiceSettingsCaster() *CustomServiceSettings +} + +func (s *CustomServiceSettings) CustomServiceSettingsCaster() *CustomServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customtasksettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customtasksettings.go new file mode 100644 index 000000000..9413e1c70 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/customtasksettings.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// CustomTaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1138-L1152 +type CustomTaskSettings struct { + // Parameters Specifies parameters that are required to run the custom service. The + // parameters depend on the model your custom service uses. + // For example: + // ``` + // + // "task_settings":{ + // "parameters":{ + // "input_type":"query", + // "return_token":true + // } + // } + // + // ``` + Parameters json.RawMessage `json:"parameters,omitempty"` +} + +// NewCustomTaskSettings returns a CustomTaskSettings. +func NewCustomTaskSettings() *CustomTaskSettings { + r := &CustomTaskSettings{} + + return r +} + +type CustomTaskSettingsVariant interface { + CustomTaskSettingsCaster() *CustomTaskSettings +} + +func (s *CustomTaskSettings) CustomTaskSettingsCaster() *CustomTaskSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/czechanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/czechanalyzer.go new file mode 100644 index 000000000..7281038c2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/czechanalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// CzechAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L132-L137 +type CzechAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *CzechAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CzechAnalyzer) MarshalJSON() ([]byte, error) { + type innerCzechAnalyzer CzechAnalyzer + tmp := innerCzechAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "czech" + + return json.Marshal(tmp) +} + +// NewCzechAnalyzer returns a CzechAnalyzer. +func NewCzechAnalyzer() *CzechAnalyzer { + r := &CzechAnalyzer{} + + return r +} + +type CzechAnalyzerVariant interface { + CzechAnalyzerCaster() *CzechAnalyzer +} + +func (s *CzechAnalyzer) CzechAnalyzerCaster() *CzechAnalyzer { + return s +} + +func (s *CzechAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/czechstemtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/czechstemtokenfilter.go new file mode 100644 index 000000000..296ac8e79 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/czechstemtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// CzechStemTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L551-L553 +type CzechStemTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *CzechStemTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CzechStemTokenFilter) MarshalJSON() ([]byte, error) { + type innerCzechStemTokenFilter CzechStemTokenFilter + tmp := innerCzechStemTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "czech_stem" + + return json.Marshal(tmp) +} + +// NewCzechStemTokenFilter returns a CzechStemTokenFilter. +func NewCzechStemTokenFilter() *CzechStemTokenFilter { + r := &CzechStemTokenFilter{} + + return r +} + +type CzechStemTokenFilterVariant interface { + CzechStemTokenFilterCaster() *CzechStemTokenFilter +} + +func (s *CzechStemTokenFilter) CzechStemTokenFilterCaster() *CzechStemTokenFilter { + return s +} + +func (s *CzechStemTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dailyschedule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dailyschedule.go new file mode 100644 index 000000000..770eef619 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dailyschedule.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DailySchedule type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Schedule.ts#L33-L35 +type DailySchedule struct { + At []ScheduleTimeOfDay `json:"at"` +} + +func (s *DailySchedule) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "at": + if err := dec.Decode(&s.At); err != nil { + return fmt.Errorf("%s | %w", "At", err) + } + + } + } + return nil +} + +// NewDailySchedule returns a DailySchedule. +func NewDailySchedule() *DailySchedule { + r := &DailySchedule{} + + return r +} + +type DailyScheduleVariant interface { + DailyScheduleCaster() *DailySchedule +} + +func (s *DailySchedule) DailyScheduleCaster() *DailySchedule { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/danglingindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/danglingindex.go new file mode 100644 index 000000000..29ca10b93 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/danglingindex.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DanglingIndex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L29-L34 +type DanglingIndex struct { + CreationDateMillis int64 `json:"creation_date_millis"` + IndexName string `json:"index_name"` + IndexUuid string `json:"index_uuid"` + NodeIds []string `json:"node_ids"` +} + +func (s *DanglingIndex) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "creation_date_millis": + if err := dec.Decode(&s.CreationDateMillis); err != nil { + return fmt.Errorf("%s | %w", "CreationDateMillis", err) + } + + case "index_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexName = o + + case "index_uuid": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexUuid", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexUuid = o + + case "node_ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "NodeIds", err) + } + + s.NodeIds = append(s.NodeIds, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.NodeIds); err != nil { + return fmt.Errorf("%s | %w", "NodeIds", err) + } + } + + } + } + return nil +} + +// NewDanglingIndex returns a DanglingIndex. +func NewDanglingIndex() *DanglingIndex { + r := &DanglingIndex{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/danishanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/danishanalyzer.go new file mode 100644 index 000000000..dbe99dbf8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/danishanalyzer.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DanishAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L139-L143 +type DanishAnalyzer struct { + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *DanishAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DanishAnalyzer) MarshalJSON() ([]byte, error) { + type innerDanishAnalyzer DanishAnalyzer + tmp := innerDanishAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "danish" + + return json.Marshal(tmp) +} + +// NewDanishAnalyzer returns a DanishAnalyzer. +func NewDanishAnalyzer() *DanishAnalyzer { + r := &DanishAnalyzer{} + + return r +} + +type DanishAnalyzerVariant interface { + DanishAnalyzerCaster() *DanishAnalyzer +} + +func (s *DanishAnalyzer) DanishAnalyzerCaster() *DanishAnalyzer { + return s +} + +func (s *DanishAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/databaseconfiguration.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/databaseconfiguration.go new file mode 100644 index 000000000..e885d0601 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/databaseconfiguration.go @@ -0,0 +1,134 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DatabaseConfiguration type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Database.ts#L22-L37 +type DatabaseConfiguration struct { + AdditionalDatabaseConfigurationProperty map[string]json.RawMessage `json:"-"` + Ipinfo *Ipinfo `json:"ipinfo,omitempty"` + Maxmind *Maxmind `json:"maxmind,omitempty"` + // Name The provider-assigned name of the IP geolocation database to download. + Name string `json:"name"` +} + +func (s *DatabaseConfiguration) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ipinfo": + if err := dec.Decode(&s.Ipinfo); err != nil { + return fmt.Errorf("%s | %w", "Ipinfo", err) + } + + case "maxmind": + if err := dec.Decode(&s.Maxmind); err != nil { + return fmt.Errorf("%s | %w", "Maxmind", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalDatabaseConfigurationProperty == nil { + s.AdditionalDatabaseConfigurationProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalDatabaseConfigurationProperty", err) + } + s.AdditionalDatabaseConfigurationProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s DatabaseConfiguration) MarshalJSON() ([]byte, error) { + type opt DatabaseConfiguration + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDatabaseConfigurationProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDatabaseConfigurationProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewDatabaseConfiguration returns a DatabaseConfiguration. +func NewDatabaseConfiguration() *DatabaseConfiguration { + r := &DatabaseConfiguration{ + AdditionalDatabaseConfigurationProperty: make(map[string]json.RawMessage), + } + + return r +} + +type DatabaseConfigurationVariant interface { + DatabaseConfigurationCaster() *DatabaseConfiguration +} + +func (s *DatabaseConfiguration) DatabaseConfigurationCaster() *DatabaseConfiguration { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/databaseconfigurationfull.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/databaseconfigurationfull.go new file mode 100644 index 000000000..9452e1dea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/databaseconfigurationfull.go @@ -0,0 +1,138 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DatabaseConfigurationFull type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Database.ts#L39-L53 +type DatabaseConfigurationFull struct { + AdditionalDatabaseConfigurationFullProperty map[string]json.RawMessage `json:"-"` + Ipinfo *Ipinfo `json:"ipinfo,omitempty"` + Local *Local `json:"local,omitempty"` + Maxmind *Maxmind `json:"maxmind,omitempty"` + // Name The provider-assigned name of the IP geolocation database to download. + Name string `json:"name"` + Web *Web `json:"web,omitempty"` +} + +func (s *DatabaseConfigurationFull) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ipinfo": + if err := dec.Decode(&s.Ipinfo); err != nil { + return fmt.Errorf("%s | %w", "Ipinfo", err) + } + + case "local": + if err := dec.Decode(&s.Local); err != nil { + return fmt.Errorf("%s | %w", "Local", err) + } + + case "maxmind": + if err := dec.Decode(&s.Maxmind); err != nil { + return fmt.Errorf("%s | %w", "Maxmind", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "web": + if err := dec.Decode(&s.Web); err != nil { + return fmt.Errorf("%s | %w", "Web", err) + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalDatabaseConfigurationFullProperty == nil { + s.AdditionalDatabaseConfigurationFullProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalDatabaseConfigurationFullProperty", err) + } + s.AdditionalDatabaseConfigurationFullProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s DatabaseConfigurationFull) MarshalJSON() ([]byte, error) { + type opt DatabaseConfigurationFull + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDatabaseConfigurationFullProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDatabaseConfigurationFullProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewDatabaseConfigurationFull returns a DatabaseConfigurationFull. +func NewDatabaseConfigurationFull() *DatabaseConfigurationFull { + r := &DatabaseConfigurationFull{ + AdditionalDatabaseConfigurationFullProperty: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datacounts.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datacounts.go new file mode 100644 index 000000000..f9439171a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datacounts.go @@ -0,0 +1,357 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataCounts type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Job.ts#L352-L372 +type DataCounts struct { + BucketCount int64 `json:"bucket_count"` + EarliestRecordTimestamp *int64 `json:"earliest_record_timestamp,omitempty"` + EmptyBucketCount int64 `json:"empty_bucket_count"` + InputBytes int64 `json:"input_bytes"` + InputFieldCount int64 `json:"input_field_count"` + InputRecordCount int64 `json:"input_record_count"` + InvalidDateCount int64 `json:"invalid_date_count"` + JobId string `json:"job_id"` + LastDataTime *int64 `json:"last_data_time,omitempty"` + LatestBucketTimestamp *int64 `json:"latest_bucket_timestamp,omitempty"` + LatestEmptyBucketTimestamp *int64 `json:"latest_empty_bucket_timestamp,omitempty"` + LatestRecordTimestamp *int64 `json:"latest_record_timestamp,omitempty"` + LatestSparseBucketTimestamp *int64 `json:"latest_sparse_bucket_timestamp,omitempty"` + LogTime *int64 `json:"log_time,omitempty"` + MissingFieldCount int64 `json:"missing_field_count"` + OutOfOrderTimestampCount int64 `json:"out_of_order_timestamp_count"` + ProcessedFieldCount int64 `json:"processed_field_count"` + ProcessedRecordCount int64 `json:"processed_record_count"` + SparseBucketCount int64 `json:"sparse_bucket_count"` +} + +func (s *DataCounts) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bucket_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BucketCount", err) + } + s.BucketCount = value + case float64: + f := int64(v) + s.BucketCount = f + } + + case "earliest_record_timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "EarliestRecordTimestamp", err) + } + s.EarliestRecordTimestamp = &value + case float64: + f := int64(v) + s.EarliestRecordTimestamp = &f + } + + case "empty_bucket_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "EmptyBucketCount", err) + } + s.EmptyBucketCount = value + case float64: + f := int64(v) + s.EmptyBucketCount = f + } + + case "input_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InputBytes", err) + } + s.InputBytes = value + case float64: + f := int64(v) + s.InputBytes = f + } + + case "input_field_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InputFieldCount", err) + } + s.InputFieldCount = value + case float64: + f := int64(v) + s.InputFieldCount = f + } + + case "input_record_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InputRecordCount", err) + } + s.InputRecordCount = value + case float64: + f := int64(v) + s.InputRecordCount = f + } + + case "invalid_date_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InvalidDateCount", err) + } + s.InvalidDateCount = value + case float64: + f := int64(v) + s.InvalidDateCount = f + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "last_data_time": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LastDataTime", err) + } + s.LastDataTime = &value + case float64: + f := int64(v) + s.LastDataTime = &f + } + + case "latest_bucket_timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LatestBucketTimestamp", err) + } + s.LatestBucketTimestamp = &value + case float64: + f := int64(v) + s.LatestBucketTimestamp = &f + } + + case "latest_empty_bucket_timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LatestEmptyBucketTimestamp", err) + } + s.LatestEmptyBucketTimestamp = &value + case float64: + f := int64(v) + s.LatestEmptyBucketTimestamp = &f + } + + case "latest_record_timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LatestRecordTimestamp", err) + } + s.LatestRecordTimestamp = &value + case float64: + f := int64(v) + s.LatestRecordTimestamp = &f + } + + case "latest_sparse_bucket_timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LatestSparseBucketTimestamp", err) + } + s.LatestSparseBucketTimestamp = &value + case float64: + f := int64(v) + s.LatestSparseBucketTimestamp = &f + } + + case "log_time": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LogTime", err) + } + s.LogTime = &value + case float64: + f := int64(v) + s.LogTime = &f + } + + case "missing_field_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MissingFieldCount", err) + } + s.MissingFieldCount = value + case float64: + f := int64(v) + s.MissingFieldCount = f + } + + case "out_of_order_timestamp_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "OutOfOrderTimestampCount", err) + } + s.OutOfOrderTimestampCount = value + case float64: + f := int64(v) + s.OutOfOrderTimestampCount = f + } + + case "processed_field_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ProcessedFieldCount", err) + } + s.ProcessedFieldCount = value + case float64: + f := int64(v) + s.ProcessedFieldCount = f + } + + case "processed_record_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ProcessedRecordCount", err) + } + s.ProcessedRecordCount = value + case float64: + f := int64(v) + s.ProcessedRecordCount = f + } + + case "sparse_bucket_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SparseBucketCount", err) + } + s.SparseBucketCount = value + case float64: + f := int64(v) + s.SparseBucketCount = f + } + + } + } + return nil +} + +// NewDataCounts returns a DataCounts. +func NewDataCounts() *DataCounts { + r := &DataCounts{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datadescription.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datadescription.go new file mode 100644 index 000000000..aa3255972 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datadescription.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataDescription type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Job.ts#L374-L390 +type DataDescription struct { + FieldDelimiter *string `json:"field_delimiter,omitempty"` + // Format Only JSON format is supported at this time. + Format *string `json:"format,omitempty"` + // TimeField The name of the field that contains the timestamp. + TimeField *string `json:"time_field,omitempty"` + // TimeFormat The time format, which can be `epoch`, `epoch_ms`, or a custom pattern. The + // value `epoch` refers to UNIX or Epoch time (the number of seconds since 1 Jan + // 1970). The value `epoch_ms` indicates that time is measured in milliseconds + // since the epoch. The `epoch` and `epoch_ms` time formats accept either + // integer or real values. Custom patterns must conform to the Java + // DateTimeFormatter class. When you use date-time formatting patterns, it is + // recommended that you provide the full date, time and time zone. For example: + // `yyyy-MM-dd'T'HH:mm:ssX`. If the pattern that you specify is not sufficient + // to produce a complete timestamp, job creation fails. + TimeFormat *string `json:"time_format,omitempty"` +} + +func (s *DataDescription) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field_delimiter": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FieldDelimiter", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FieldDelimiter = &o + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "time_field": + if err := dec.Decode(&s.TimeField); err != nil { + return fmt.Errorf("%s | %w", "TimeField", err) + } + + case "time_format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TimeFormat", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TimeFormat = &o + + } + } + return nil +} + +// NewDataDescription returns a DataDescription. +func NewDataDescription() *DataDescription { + r := &DataDescription{} + + return r +} + +type DataDescriptionVariant interface { + DataDescriptionCaster() *DataDescription +} + +func (s *DataDescription) DataDescriptionCaster() *DataDescription { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataemailattachment.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataemailattachment.go new file mode 100644 index 000000000..b81f3cd17 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataemailattachment.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dataattachmentformat" +) + +// DataEmailAttachment type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L234-L236 +type DataEmailAttachment struct { + Format *dataattachmentformat.DataAttachmentFormat `json:"format,omitempty"` +} + +// NewDataEmailAttachment returns a DataEmailAttachment. +func NewDataEmailAttachment() *DataEmailAttachment { + r := &DataEmailAttachment{} + + return r +} + +type DataEmailAttachmentVariant interface { + DataEmailAttachmentCaster() *DataEmailAttachment +} + +func (s *DataEmailAttachment) DataEmailAttachmentCaster() *DataEmailAttachment { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedauthorization.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedauthorization.go new file mode 100644 index 000000000..280d936ca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedauthorization.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DatafeedAuthorization type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Authorization.ts#L31-L43 +type DatafeedAuthorization struct { + // ApiKey If an API key was used for the most recent update to the datafeed, its name + // and identifier are listed in the response. + ApiKey *ApiKeyAuthorization `json:"api_key,omitempty"` + // Roles If a user ID was used for the most recent update to the datafeed, its roles + // at the time of the update are listed in the response. + Roles []string `json:"roles,omitempty"` + // ServiceAccount If a service account was used for the most recent update to the datafeed, the + // account name is listed in the response. + ServiceAccount *string `json:"service_account,omitempty"` +} + +func (s *DatafeedAuthorization) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + if err := dec.Decode(&s.ApiKey); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "service_account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ServiceAccount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServiceAccount = &o + + } + } + return nil +} + +// NewDatafeedAuthorization returns a DatafeedAuthorization. +func NewDatafeedAuthorization() *DatafeedAuthorization { + r := &DatafeedAuthorization{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedconfig.go new file mode 100644 index 000000000..ee694b870 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedconfig.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DatafeedConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Datafeed.ts#L63-L120 +type DatafeedConfig struct { + // Aggregations If set, the datafeed performs aggregation searches. Support for aggregations + // is limited and should be used only with low cardinality data. + Aggregations map[string]Aggregations `json:"aggregations,omitempty"` + // ChunkingConfig Datafeeds might be required to search over long time periods, for several + // months or years. This search is split into time chunks in order to ensure the + // load on Elasticsearch is managed. Chunking configuration controls how the + // size of these time chunks are calculated and is an advanced configuration + // option. + ChunkingConfig *ChunkingConfig `json:"chunking_config,omitempty"` + // DatafeedId A numerical character string that uniquely identifies the datafeed. This + // identifier can contain lowercase alphanumeric characters (a-z and 0-9), + // hyphens, and underscores. It must start and end with alphanumeric characters. + // The default value is the job identifier. + DatafeedId *string `json:"datafeed_id,omitempty"` + // DelayedDataCheckConfig Specifies whether the datafeed checks for missing data and the size of the + // window. The datafeed can optionally search over indices that have already + // been read in an effort to determine whether any data has subsequently been + // added to the index. If missing data is found, it is a good indication that + // the `query_delay` option is set too low and the data is being indexed after + // the datafeed has passed that moment in time. This check runs only on + // real-time datafeeds. + DelayedDataCheckConfig *DelayedDataCheckConfig `json:"delayed_data_check_config,omitempty"` + // Frequency The interval at which scheduled queries are made while the datafeed runs in + // real time. The default value is either the bucket span for short bucket + // spans, or, for longer bucket spans, a sensible fraction of the bucket span. + // For example: `150s`. When `frequency` is shorter than the bucket span, + // interim results for the last (partial) bucket are written then eventually + // overwritten by the full bucket results. If the datafeed uses aggregations, + // this value must be divisible by the interval of the date histogram + // aggregation. + Frequency Duration `json:"frequency,omitempty"` + // Indices An array of index names. Wildcards are supported. If any indices are in + // remote clusters, the machine learning nodes must have the + // `remote_cluster_client` role. + Indices []string `json:"indices,omitempty"` + // IndicesOptions Specifies index expansion options that are used during search. + IndicesOptions *IndicesOptions `json:"indices_options,omitempty"` + JobId *string `json:"job_id,omitempty"` + // MaxEmptySearches If a real-time datafeed has never seen any data (including during any initial + // training period) then it will automatically stop itself and close its + // associated job after this many real-time searches that return no documents. + // In other words, it will stop after `frequency` times `max_empty_searches` of + // real-time operation. If not set then a datafeed with no end time that sees no + // data will remain started until it is explicitly stopped. + MaxEmptySearches *int `json:"max_empty_searches,omitempty"` + // Query The Elasticsearch query domain-specific language (DSL). This value + // corresponds to the query object in an Elasticsearch search POST body. All the + // options that are supported by Elasticsearch can be used, as this object is + // passed verbatim to Elasticsearch. + Query *Query `json:"query,omitempty"` + // QueryDelay The number of seconds behind real time that data is queried. For example, if + // data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 + // a.m., set this property to 120 seconds. The default value is randomly + // selected between `60s` and `120s`. This randomness improves the query + // performance when there are multiple jobs running on the same node. + QueryDelay Duration `json:"query_delay,omitempty"` + // RuntimeMappings Specifies runtime fields for the datafeed search. + RuntimeMappings RuntimeFields `json:"runtime_mappings,omitempty"` + // ScriptFields Specifies scripts that evaluate custom expressions and returns script fields + // to the datafeed. The detector configuration objects in a job can contain + // functions that use these script fields. + ScriptFields map[string]ScriptField `json:"script_fields,omitempty"` + // ScrollSize The size parameter that is used in Elasticsearch searches when the datafeed + // does not use aggregations. The maximum value is the value of + // `index.max_result_window`, which is 10,000 by default. + ScrollSize *int `json:"scroll_size,omitempty"` +} + +func (s *DatafeedConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "chunking_config": + if err := dec.Decode(&s.ChunkingConfig); err != nil { + return fmt.Errorf("%s | %w", "ChunkingConfig", err) + } + + case "datafeed_id": + if err := dec.Decode(&s.DatafeedId); err != nil { + return fmt.Errorf("%s | %w", "DatafeedId", err) + } + + case "delayed_data_check_config": + if err := dec.Decode(&s.DelayedDataCheckConfig); err != nil { + return fmt.Errorf("%s | %w", "DelayedDataCheckConfig", err) + } + + case "frequency": + if err := dec.Decode(&s.Frequency); err != nil { + return fmt.Errorf("%s | %w", "Frequency", err) + } + + case "indices", "indexes": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + case "indices_options": + if err := dec.Decode(&s.IndicesOptions); err != nil { + return fmt.Errorf("%s | %w", "IndicesOptions", err) + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "max_empty_searches": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxEmptySearches", err) + } + s.MaxEmptySearches = &value + case float64: + f := int(v) + s.MaxEmptySearches = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "query_delay": + if err := dec.Decode(&s.QueryDelay); err != nil { + return fmt.Errorf("%s | %w", "QueryDelay", err) + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return fmt.Errorf("%s | %w", "ScriptFields", err) + } + + case "scroll_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ScrollSize", err) + } + s.ScrollSize = &value + case float64: + f := int(v) + s.ScrollSize = &f + } + + } + } + return nil +} + +// NewDatafeedConfig returns a DatafeedConfig. +func NewDatafeedConfig() *DatafeedConfig { + r := &DatafeedConfig{ + Aggregations: make(map[string]Aggregations), + ScriptFields: make(map[string]ScriptField), + } + + return r +} + +type DatafeedConfigVariant interface { + DatafeedConfigCaster() *DatafeedConfig +} + +func (s *DatafeedConfig) DatafeedConfigCaster() *DatafeedConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedrunningstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedrunningstate.go new file mode 100644 index 000000000..7782286e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedrunningstate.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DatafeedRunningState type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Datafeed.ts#L210-L224 +type DatafeedRunningState struct { + // RealTimeConfigured Indicates if the datafeed is "real-time"; meaning that the datafeed has no + // configured `end` time. + RealTimeConfigured bool `json:"real_time_configured"` + // RealTimeRunning Indicates whether the datafeed has finished running on the available past + // data. + // For datafeeds without a configured `end` time, this means that the datafeed + // is now running on "real-time" data. + RealTimeRunning bool `json:"real_time_running"` + // SearchInterval Provides the latest time interval the datafeed has searched. + SearchInterval *RunningStateSearchInterval `json:"search_interval,omitempty"` +} + +func (s *DatafeedRunningState) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "real_time_configured": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RealTimeConfigured", err) + } + s.RealTimeConfigured = value + case bool: + s.RealTimeConfigured = v + } + + case "real_time_running": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RealTimeRunning", err) + } + s.RealTimeRunning = value + case bool: + s.RealTimeRunning = v + } + + case "search_interval": + if err := dec.Decode(&s.SearchInterval); err != nil { + return fmt.Errorf("%s | %w", "SearchInterval", err) + } + + } + } + return nil +} + +// NewDatafeedRunningState returns a DatafeedRunningState. +func NewDatafeedRunningState() *DatafeedRunningState { + r := &DatafeedRunningState{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeeds.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeeds.go new file mode 100644 index 000000000..37d05c9a4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeeds.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Datafeeds type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/info/types.ts#L42-L44 +type Datafeeds struct { + ScrollSize int `json:"scroll_size"` +} + +func (s *Datafeeds) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "scroll_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ScrollSize", err) + } + s.ScrollSize = value + case float64: + f := int(v) + s.ScrollSize = f + } + + } + } + return nil +} + +// NewDatafeeds returns a Datafeeds. +func NewDatafeeds() *Datafeeds { + r := &Datafeeds{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedsrecord.go new file mode 100644 index 000000000..61fadb589 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedsrecord.go @@ -0,0 +1,235 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/datafeedstate" +) + +// DatafeedsRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/ml_datafeeds/types.ts#L22-L87 +type DatafeedsRecord struct { + // AssignmentExplanation For started datafeeds only, contains messages relating to the selection of a + // node. + AssignmentExplanation *string `json:"assignment_explanation,omitempty"` + // BucketsCount The number of buckets processed. + BucketsCount *string `json:"buckets.count,omitempty"` + // Id The datafeed identifier. + Id *string `json:"id,omitempty"` + // NodeAddress The network address of the assigned node. + // For started datafeeds only, this information pertains to the node upon which + // the datafeed is started. + NodeAddress *string `json:"node.address,omitempty"` + // NodeEphemeralId The ephemeral identifier of the assigned node. + // For started datafeeds only, this information pertains to the node upon which + // the datafeed is started. + NodeEphemeralId *string `json:"node.ephemeral_id,omitempty"` + // NodeId The unique identifier of the assigned node. + // For started datafeeds only, this information pertains to the node upon which + // the datafeed is started. + NodeId *string `json:"node.id,omitempty"` + // NodeName The name of the assigned node. + // For started datafeeds only, this information pertains to the node upon which + // the datafeed is started. + NodeName *string `json:"node.name,omitempty"` + // SearchBucketAvg The average search time per bucket, in milliseconds. + SearchBucketAvg *string `json:"search.bucket_avg,omitempty"` + // SearchCount The number of searches run by the datafeed. + SearchCount *string `json:"search.count,omitempty"` + // SearchExpAvgHour The exponential average search time per hour, in milliseconds. + SearchExpAvgHour *string `json:"search.exp_avg_hour,omitempty"` + // SearchTime The total time the datafeed spent searching, in milliseconds. + SearchTime *string `json:"search.time,omitempty"` + // State The status of the datafeed. + State *datafeedstate.DatafeedState `json:"state,omitempty"` +} + +func (s *DatafeedsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_explanation", "ae": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AssignmentExplanation", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentExplanation = &o + + case "buckets.count", "bc", "bucketsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BucketsCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BucketsCount = &o + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "node.address", "na", "nodeAddress": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NodeAddress", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeAddress = &o + + case "node.ephemeral_id", "ne", "nodeEphemeralId": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NodeEphemeralId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeEphemeralId = &o + + case "node.id", "ni", "nodeId": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeId = &o + + case "node.name", "nn", "nodeName": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NodeName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeName = &o + + case "search.bucket_avg", "sba", "searchBucketAvg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchBucketAvg", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchBucketAvg = &o + + case "search.count", "sc", "searchCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchCount = &o + + case "search.exp_avg_hour", "seah", "searchExpAvgHour": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchExpAvgHour", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchExpAvgHour = &o + + case "search.time", "st", "searchTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchTime = &o + + case "state", "s": + if err := dec.Decode(&s.State); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + + } + } + return nil +} + +// NewDatafeedsRecord returns a DatafeedsRecord. +func NewDatafeedsRecord() *DatafeedsRecord { + r := &DatafeedsRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedstats.go new file mode 100644 index 000000000..cec150a28 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedstats.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/datafeedstate" +) + +// DatafeedStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Datafeed.ts#L143-L172 +type DatafeedStats struct { + // AssignmentExplanation For started datafeeds only, contains messages relating to the selection of a + // node. + AssignmentExplanation *string `json:"assignment_explanation,omitempty"` + // DatafeedId A numerical character string that uniquely identifies the datafeed. + // This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + // hyphens, and underscores. + // It must start and end with alphanumeric characters. + DatafeedId string `json:"datafeed_id"` + // Node For started datafeeds only, this information pertains to the node upon which + // the datafeed is started. + Node *DiscoveryNodeCompact `json:"node,omitempty"` + // RunningState An object containing the running state for this datafeed. + // It is only provided if the datafeed is started. + RunningState *DatafeedRunningState `json:"running_state,omitempty"` + // State The status of the datafeed, which can be one of the following values: + // `starting`, `started`, `stopping`, `stopped`. + State datafeedstate.DatafeedState `json:"state"` + // TimingStats An object that provides statistical information about timing aspect of this + // datafeed. + TimingStats *DatafeedTimingStats `json:"timing_stats,omitempty"` +} + +func (s *DatafeedStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_explanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AssignmentExplanation", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentExplanation = &o + + case "datafeed_id": + if err := dec.Decode(&s.DatafeedId); err != nil { + return fmt.Errorf("%s | %w", "DatafeedId", err) + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "running_state": + if err := dec.Decode(&s.RunningState); err != nil { + return fmt.Errorf("%s | %w", "RunningState", err) + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + + case "timing_stats": + if err := dec.Decode(&s.TimingStats); err != nil { + return fmt.Errorf("%s | %w", "TimingStats", err) + } + + } + } + return nil +} + +// NewDatafeedStats returns a DatafeedStats. +func NewDatafeedStats() *DatafeedStats { + r := &DatafeedStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedtimingstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedtimingstats.go new file mode 100644 index 000000000..b20515fe3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datafeedtimingstats.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DatafeedTimingStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Datafeed.ts#L174-L202 +type DatafeedTimingStats struct { + // AverageSearchTimePerBucketMs The average search time per bucket, in milliseconds. + AverageSearchTimePerBucketMs Float64 `json:"average_search_time_per_bucket_ms,omitempty"` + // BucketCount The number of buckets processed. + BucketCount int64 `json:"bucket_count"` + ExponentialAverageCalculationContext *ExponentialAverageCalculationContext `json:"exponential_average_calculation_context,omitempty"` + // ExponentialAverageSearchTimePerHourMs The exponential average search time per hour, in milliseconds. + ExponentialAverageSearchTimePerHourMs Float64 `json:"exponential_average_search_time_per_hour_ms"` + // JobId Identifier for the anomaly detection job. + JobId string `json:"job_id"` + // SearchCount The number of searches run by the datafeed. + SearchCount int64 `json:"search_count"` + // TotalSearchTimeMs The total time the datafeed spent searching, in milliseconds. + TotalSearchTimeMs Float64 `json:"total_search_time_ms"` +} + +func (s *DatafeedTimingStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "average_search_time_per_bucket_ms": + if err := dec.Decode(&s.AverageSearchTimePerBucketMs); err != nil { + return fmt.Errorf("%s | %w", "AverageSearchTimePerBucketMs", err) + } + + case "bucket_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BucketCount", err) + } + s.BucketCount = value + case float64: + f := int64(v) + s.BucketCount = f + } + + case "exponential_average_calculation_context": + if err := dec.Decode(&s.ExponentialAverageCalculationContext); err != nil { + return fmt.Errorf("%s | %w", "ExponentialAverageCalculationContext", err) + } + + case "exponential_average_search_time_per_hour_ms": + if err := dec.Decode(&s.ExponentialAverageSearchTimePerHourMs); err != nil { + return fmt.Errorf("%s | %w", "ExponentialAverageSearchTimePerHourMs", err) + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "search_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SearchCount", err) + } + s.SearchCount = value + case float64: + f := int64(v) + s.SearchCount = f + } + + case "total_search_time_ms": + if err := dec.Decode(&s.TotalSearchTimeMs); err != nil { + return fmt.Errorf("%s | %w", "TotalSearchTimeMs", err) + } + + } + } + return nil +} + +// NewDatafeedTimingStats returns a DatafeedTimingStats. +func NewDatafeedTimingStats() *DatafeedTimingStats { + r := &DatafeedTimingStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisanalyzedfields.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisanalyzedfields.go new file mode 100644 index 000000000..d885c2450 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisanalyzedfields.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DataframeAnalysisAnalyzedFields type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L238-L244 +type DataframeAnalysisAnalyzedFields struct { + // Excludes An array of strings that defines the fields that will be included in the + // analysis. + Excludes []string `json:"excludes,omitempty"` + // Includes An array of strings that defines the fields that will be excluded from the + // analysis. You do not need to add fields with unsupported data types to + // excludes, these fields are excluded from the analysis automatically. + Includes []string `json:"includes,omitempty"` +} + +func (s *DataframeAnalysisAnalyzedFields) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + var item string + err := json.NewDecoder(bytes.NewReader(data)).Decode(&item) + if err != nil { + return err + } + s.Includes = append(s.Includes, item) + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "excludes": + if err := dec.Decode(&s.Excludes); err != nil { + return fmt.Errorf("%s | %w", "Excludes", err) + } + + case "includes": + if err := dec.Decode(&s.Includes); err != nil { + return fmt.Errorf("%s | %w", "Includes", err) + } + + } + } + return nil +} + +// NewDataframeAnalysisAnalyzedFields returns a DataframeAnalysisAnalyzedFields. +func NewDataframeAnalysisAnalyzedFields() *DataframeAnalysisAnalyzedFields { + r := &DataframeAnalysisAnalyzedFields{} + + return r +} + +type DataframeAnalysisAnalyzedFieldsVariant interface { + DataframeAnalysisAnalyzedFieldsCaster() *DataframeAnalysisAnalyzedFields +} + +func (s *DataframeAnalysisAnalyzedFields) DataframeAnalysisAnalyzedFieldsCaster() *DataframeAnalysisAnalyzedFields { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisclassification.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisclassification.go new file mode 100644 index 000000000..fb3a76f90 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisclassification.go @@ -0,0 +1,470 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeAnalysisClassification type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L227-L236 +type DataframeAnalysisClassification struct { + // Alpha Advanced configuration option. Machine learning uses loss guided tree + // growing, which means that the decision trees grow where the regularized loss + // decreases most quickly. This parameter affects loss calculations by acting as + // a multiplier of the tree depth. Higher alpha values result in shallower trees + // and faster training times. By default, this value is calculated during + // hyperparameter optimization. It must be greater than or equal to zero. + Alpha *Float64 `json:"alpha,omitempty"` + ClassAssignmentObjective *string `json:"class_assignment_objective,omitempty"` + // DependentVariable Defines which field of the document is to be predicted. It must match one of + // the fields in the index being used to train. If this field is missing from a + // document, then that document will not be used for training, but a prediction + // with the trained model will be generated for it. It is also known as + // continuous target variable. + // For classification analysis, the data type of the field must be numeric + // (`integer`, `short`, `long`, `byte`), categorical (`ip` or `keyword`), or + // `boolean`. There must be no more than 30 different values in this field. + // For regression analysis, the data type of the field must be numeric. + DependentVariable string `json:"dependent_variable"` + // DownsampleFactor Advanced configuration option. Controls the fraction of data that is used to + // compute the derivatives of the loss function for tree training. A small value + // results in the use of a small fraction of the data. If this value is set to + // be less than 1, accuracy typically improves. However, too small a value may + // result in poor convergence for the ensemble and so require more trees. By + // default, this value is calculated during hyperparameter optimization. It must + // be greater than zero and less than or equal to 1. + DownsampleFactor *Float64 `json:"downsample_factor,omitempty"` + // EarlyStoppingEnabled Advanced configuration option. Specifies whether the training process should + // finish if it is not finding any better performing models. If disabled, the + // training process can take significantly longer and the chance of finding a + // better performing model is unremarkable. + EarlyStoppingEnabled *bool `json:"early_stopping_enabled,omitempty"` + // Eta Advanced configuration option. The shrinkage applied to the weights. Smaller + // values result in larger forests which have a better generalization error. + // However, larger forests cause slower training. By default, this value is + // calculated during hyperparameter optimization. It must be a value between + // 0.001 and 1. + Eta *Float64 `json:"eta,omitempty"` + // EtaGrowthRatePerTree Advanced configuration option. Specifies the rate at which `eta` increases + // for each new tree that is added to the forest. For example, a rate of 1.05 + // increases `eta` by 5% for each extra tree. By default, this value is + // calculated during hyperparameter optimization. It must be between 0.5 and 2. + EtaGrowthRatePerTree *Float64 `json:"eta_growth_rate_per_tree,omitempty"` + // FeatureBagFraction Advanced configuration option. Defines the fraction of features that will be + // used when selecting a random bag for each candidate split. By default, this + // value is calculated during hyperparameter optimization. + FeatureBagFraction *Float64 `json:"feature_bag_fraction,omitempty"` + // FeatureProcessors Advanced configuration option. A collection of feature preprocessors that + // modify one or more included fields. The analysis uses the resulting one or + // more features instead of the original document field. However, these features + // are ephemeral; they are not stored in the destination index. Multiple + // `feature_processors` entries can refer to the same document fields. Automatic + // categorical feature encoding still occurs for the fields that are unprocessed + // by a custom processor or that have categorical values. Use this property only + // if you want to override the automatic feature encoding of the specified + // fields. + FeatureProcessors []DataframeAnalysisFeatureProcessor `json:"feature_processors,omitempty"` + // Gamma Advanced configuration option. Regularization parameter to prevent + // overfitting on the training data set. Multiplies a linear penalty associated + // with the size of individual trees in the forest. A high gamma value causes + // training to prefer small trees. A small gamma value results in larger + // individual trees and slower training. By default, this value is calculated + // during hyperparameter optimization. It must be a nonnegative value. + Gamma *Float64 `json:"gamma,omitempty"` + // Lambda Advanced configuration option. Regularization parameter to prevent + // overfitting on the training data set. Multiplies an L2 regularization term + // which applies to leaf weights of the individual trees in the forest. A high + // lambda value causes training to favor small leaf weights. This behavior makes + // the prediction function smoother at the expense of potentially not being able + // to capture relevant relationships between the features and the dependent + // variable. A small lambda value results in large individual trees and slower + // training. By default, this value is calculated during hyperparameter + // optimization. It must be a nonnegative value. + Lambda *Float64 `json:"lambda,omitempty"` + // MaxOptimizationRoundsPerHyperparameter Advanced configuration option. A multiplier responsible for determining the + // maximum number of hyperparameter optimization steps in the Bayesian + // optimization procedure. The maximum number of steps is determined based on + // the number of undefined hyperparameters times the maximum optimization rounds + // per hyperparameter. By default, this value is calculated during + // hyperparameter optimization. + MaxOptimizationRoundsPerHyperparameter *int `json:"max_optimization_rounds_per_hyperparameter,omitempty"` + // MaxTrees Advanced configuration option. Defines the maximum number of decision trees + // in the forest. The maximum value is 2000. By default, this value is + // calculated during hyperparameter optimization. + MaxTrees *int `json:"max_trees,omitempty"` + // NumTopClasses Defines the number of categories for which the predicted probabilities are + // reported. It must be non-negative or -1. If it is -1 or greater than the + // total number of categories, probabilities are reported for all categories; if + // you have a large number of categories, there could be a significant effect on + // the size of your destination index. NOTE: To use the AUC ROC evaluation + // method, `num_top_classes` must be set to -1 or a value greater than or equal + // to the total number of categories. + NumTopClasses *int `json:"num_top_classes,omitempty"` + // NumTopFeatureImportanceValues Advanced configuration option. Specifies the maximum number of feature + // importance values per document to return. By default, no feature importance + // calculation occurs. + NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` + // PredictionFieldName Defines the name of the prediction field in the results. Defaults to + // `_prediction`. + PredictionFieldName *string `json:"prediction_field_name,omitempty"` + // RandomizeSeed Defines the seed for the random generator that is used to pick training data. + // By default, it is randomly generated. Set it to a specific value to use the + // same training data each time you start a job (assuming other related + // parameters such as `source` and `analyzed_fields` are the same). + RandomizeSeed *Float64 `json:"randomize_seed,omitempty"` + // SoftTreeDepthLimit Advanced configuration option. Machine learning uses loss guided tree + // growing, which means that the decision trees grow where the regularized loss + // decreases most quickly. This soft limit combines with the + // `soft_tree_depth_tolerance` to penalize trees that exceed the specified + // depth; the regularized loss increases quickly beyond this depth. By default, + // this value is calculated during hyperparameter optimization. It must be + // greater than or equal to 0. + SoftTreeDepthLimit *int `json:"soft_tree_depth_limit,omitempty"` + // SoftTreeDepthTolerance Advanced configuration option. This option controls how quickly the + // regularized loss increases when the tree depth exceeds + // `soft_tree_depth_limit`. By default, this value is calculated during + // hyperparameter optimization. It must be greater than or equal to 0.01. + SoftTreeDepthTolerance *Float64 `json:"soft_tree_depth_tolerance,omitempty"` + // TrainingPercent Defines what percentage of the eligible documents that will be used for + // training. Documents that are ignored by the analysis (for example those that + // contain arrays with more than one value) won’t be included in the calculation + // for used percentage. + TrainingPercent Percentage `json:"training_percent,omitempty"` +} + +func (s *DataframeAnalysisClassification) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Alpha", err) + } + f := Float64(value) + s.Alpha = &f + case float64: + f := Float64(v) + s.Alpha = &f + } + + case "class_assignment_objective": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ClassAssignmentObjective", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClassAssignmentObjective = &o + + case "dependent_variable": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DependentVariable", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DependentVariable = o + + case "downsample_factor": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DownsampleFactor", err) + } + f := Float64(value) + s.DownsampleFactor = &f + case float64: + f := Float64(v) + s.DownsampleFactor = &f + } + + case "early_stopping_enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "EarlyStoppingEnabled", err) + } + s.EarlyStoppingEnabled = &value + case bool: + s.EarlyStoppingEnabled = &v + } + + case "eta": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Eta", err) + } + f := Float64(value) + s.Eta = &f + case float64: + f := Float64(v) + s.Eta = &f + } + + case "eta_growth_rate_per_tree": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "EtaGrowthRatePerTree", err) + } + f := Float64(value) + s.EtaGrowthRatePerTree = &f + case float64: + f := Float64(v) + s.EtaGrowthRatePerTree = &f + } + + case "feature_bag_fraction": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FeatureBagFraction", err) + } + f := Float64(value) + s.FeatureBagFraction = &f + case float64: + f := Float64(v) + s.FeatureBagFraction = &f + } + + case "feature_processors": + if err := dec.Decode(&s.FeatureProcessors); err != nil { + return fmt.Errorf("%s | %w", "FeatureProcessors", err) + } + + case "gamma": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Gamma", err) + } + f := Float64(value) + s.Gamma = &f + case float64: + f := Float64(v) + s.Gamma = &f + } + + case "lambda": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Lambda", err) + } + f := Float64(value) + s.Lambda = &f + case float64: + f := Float64(v) + s.Lambda = &f + } + + case "max_optimization_rounds_per_hyperparameter": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxOptimizationRoundsPerHyperparameter", err) + } + s.MaxOptimizationRoundsPerHyperparameter = &value + case float64: + f := int(v) + s.MaxOptimizationRoundsPerHyperparameter = &f + } + + case "max_trees", "maximum_number_trees": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTrees", err) + } + s.MaxTrees = &value + case float64: + f := int(v) + s.MaxTrees = &f + } + + case "num_top_classes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopClasses", err) + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "num_top_feature_importance_values": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopFeatureImportanceValues", err) + } + s.NumTopFeatureImportanceValues = &value + case float64: + f := int(v) + s.NumTopFeatureImportanceValues = &f + } + + case "prediction_field_name": + if err := dec.Decode(&s.PredictionFieldName); err != nil { + return fmt.Errorf("%s | %w", "PredictionFieldName", err) + } + + case "randomize_seed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RandomizeSeed", err) + } + f := Float64(value) + s.RandomizeSeed = &f + case float64: + f := Float64(v) + s.RandomizeSeed = &f + } + + case "soft_tree_depth_limit": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SoftTreeDepthLimit", err) + } + s.SoftTreeDepthLimit = &value + case float64: + f := int(v) + s.SoftTreeDepthLimit = &f + } + + case "soft_tree_depth_tolerance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SoftTreeDepthTolerance", err) + } + f := Float64(value) + s.SoftTreeDepthTolerance = &f + case float64: + f := Float64(v) + s.SoftTreeDepthTolerance = &f + } + + case "training_percent": + if err := dec.Decode(&s.TrainingPercent); err != nil { + return fmt.Errorf("%s | %w", "TrainingPercent", err) + } + + } + } + return nil +} + +// NewDataframeAnalysisClassification returns a DataframeAnalysisClassification. +func NewDataframeAnalysisClassification() *DataframeAnalysisClassification { + r := &DataframeAnalysisClassification{} + + return r +} + +type DataframeAnalysisClassificationVariant interface { + DataframeAnalysisClassificationCaster() *DataframeAnalysisClassification +} + +func (s *DataframeAnalysisClassification) DataframeAnalysisClassificationCaster() *DataframeAnalysisClassification { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysiscontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysiscontainer.go new file mode 100644 index 000000000..3541a1a74 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysiscontainer.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// DataframeAnalysisContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L84-L101 +type DataframeAnalysisContainer struct { + AdditionalDataframeAnalysisContainerProperty map[string]json.RawMessage `json:"-"` + // Classification The configuration information necessary to perform classification. + Classification *DataframeAnalysisClassification `json:"classification,omitempty"` + // OutlierDetection The configuration information necessary to perform outlier detection. NOTE: + // Advanced parameters are for fine-tuning classification analysis. They are set + // automatically by hyperparameter optimization to give the minimum validation + // error. It is highly recommended to use the default values unless you fully + // understand the function of these parameters. + OutlierDetection *DataframeAnalysisOutlierDetection `json:"outlier_detection,omitempty"` + // Regression The configuration information necessary to perform regression. NOTE: Advanced + // parameters are for fine-tuning regression analysis. They are set + // automatically by hyperparameter optimization to give the minimum validation + // error. It is highly recommended to use the default values unless you fully + // understand the function of these parameters. + Regression *DataframeAnalysisRegression `json:"regression,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s DataframeAnalysisContainer) MarshalJSON() ([]byte, error) { + type opt DataframeAnalysisContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDataframeAnalysisContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDataframeAnalysisContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewDataframeAnalysisContainer returns a DataframeAnalysisContainer. +func NewDataframeAnalysisContainer() *DataframeAnalysisContainer { + r := &DataframeAnalysisContainer{ + AdditionalDataframeAnalysisContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type DataframeAnalysisContainerVariant interface { + DataframeAnalysisContainerCaster() *DataframeAnalysisContainer +} + +func (s *DataframeAnalysisContainer) DataframeAnalysisContainerCaster() *DataframeAnalysisContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessor.go new file mode 100644 index 000000000..e00d32531 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessor.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// DataframeAnalysisFeatureProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L246-L258 +type DataframeAnalysisFeatureProcessor struct { + AdditionalDataframeAnalysisFeatureProcessorProperty map[string]json.RawMessage `json:"-"` + // FrequencyEncoding The configuration information necessary to perform frequency encoding. + FrequencyEncoding *DataframeAnalysisFeatureProcessorFrequencyEncoding `json:"frequency_encoding,omitempty"` + // MultiEncoding The configuration information necessary to perform multi encoding. It allows + // multiple processors to be changed together. This way the output of a + // processor can then be passed to another as an input. + MultiEncoding *DataframeAnalysisFeatureProcessorMultiEncoding `json:"multi_encoding,omitempty"` + // NGramEncoding The configuration information necessary to perform n-gram encoding. Features + // created by this encoder have the following name format: + // .. For example, if the feature_prefix + // is f, the feature name for the second unigram in a string is f.11. + NGramEncoding *DataframeAnalysisFeatureProcessorNGramEncoding `json:"n_gram_encoding,omitempty"` + // OneHotEncoding The configuration information necessary to perform one hot encoding. + OneHotEncoding *DataframeAnalysisFeatureProcessorOneHotEncoding `json:"one_hot_encoding,omitempty"` + // TargetMeanEncoding The configuration information necessary to perform target mean encoding. + TargetMeanEncoding *DataframeAnalysisFeatureProcessorTargetMeanEncoding `json:"target_mean_encoding,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s DataframeAnalysisFeatureProcessor) MarshalJSON() ([]byte, error) { + type opt DataframeAnalysisFeatureProcessor + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDataframeAnalysisFeatureProcessorProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDataframeAnalysisFeatureProcessorProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewDataframeAnalysisFeatureProcessor returns a DataframeAnalysisFeatureProcessor. +func NewDataframeAnalysisFeatureProcessor() *DataframeAnalysisFeatureProcessor { + r := &DataframeAnalysisFeatureProcessor{ + AdditionalDataframeAnalysisFeatureProcessorProperty: make(map[string]json.RawMessage), + } + + return r +} + +type DataframeAnalysisFeatureProcessorVariant interface { + DataframeAnalysisFeatureProcessorCaster() *DataframeAnalysisFeatureProcessor +} + +func (s *DataframeAnalysisFeatureProcessor) DataframeAnalysisFeatureProcessorCaster() *DataframeAnalysisFeatureProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go new file mode 100644 index 000000000..8f028793e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DataframeAnalysisFeatureProcessorFrequencyEncoding type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L260-L267 +type DataframeAnalysisFeatureProcessorFrequencyEncoding struct { + // FeatureName The resulting feature name. + FeatureName string `json:"feature_name"` + Field string `json:"field"` + // FrequencyMap The resulting frequency map for the field value. If the field value is + // missing from the frequency_map, the resulting value is 0. + FrequencyMap map[string]Float64 `json:"frequency_map"` +} + +func (s *DataframeAnalysisFeatureProcessorFrequencyEncoding) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_name": + if err := dec.Decode(&s.FeatureName); err != nil { + return fmt.Errorf("%s | %w", "FeatureName", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "frequency_map": + if s.FrequencyMap == nil { + s.FrequencyMap = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.FrequencyMap); err != nil { + return fmt.Errorf("%s | %w", "FrequencyMap", err) + } + + } + } + return nil +} + +// NewDataframeAnalysisFeatureProcessorFrequencyEncoding returns a DataframeAnalysisFeatureProcessorFrequencyEncoding. +func NewDataframeAnalysisFeatureProcessorFrequencyEncoding() *DataframeAnalysisFeatureProcessorFrequencyEncoding { + r := &DataframeAnalysisFeatureProcessorFrequencyEncoding{ + FrequencyMap: make(map[string]Float64), + } + + return r +} + +type DataframeAnalysisFeatureProcessorFrequencyEncodingVariant interface { + DataframeAnalysisFeatureProcessorFrequencyEncodingCaster() *DataframeAnalysisFeatureProcessorFrequencyEncoding +} + +func (s *DataframeAnalysisFeatureProcessorFrequencyEncoding) DataframeAnalysisFeatureProcessorFrequencyEncodingCaster() *DataframeAnalysisFeatureProcessorFrequencyEncoding { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go new file mode 100644 index 000000000..d4f816e83 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DataframeAnalysisFeatureProcessorMultiEncoding type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L269-L272 +type DataframeAnalysisFeatureProcessorMultiEncoding struct { + // Processors The ordered array of custom processors to execute. Must be more than 1. + Processors []int `json:"processors"` +} + +// NewDataframeAnalysisFeatureProcessorMultiEncoding returns a DataframeAnalysisFeatureProcessorMultiEncoding. +func NewDataframeAnalysisFeatureProcessorMultiEncoding() *DataframeAnalysisFeatureProcessorMultiEncoding { + r := &DataframeAnalysisFeatureProcessorMultiEncoding{} + + return r +} + +type DataframeAnalysisFeatureProcessorMultiEncodingVariant interface { + DataframeAnalysisFeatureProcessorMultiEncodingCaster() *DataframeAnalysisFeatureProcessorMultiEncoding +} + +func (s *DataframeAnalysisFeatureProcessorMultiEncoding) DataframeAnalysisFeatureProcessorMultiEncodingCaster() *DataframeAnalysisFeatureProcessorMultiEncoding { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go new file mode 100644 index 000000000..efce01ea6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go @@ -0,0 +1,153 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeAnalysisFeatureProcessorNGramEncoding type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L274-L286 +type DataframeAnalysisFeatureProcessorNGramEncoding struct { + Custom *bool `json:"custom,omitempty"` + // FeaturePrefix The feature name prefix. Defaults to ngram__. + FeaturePrefix *string `json:"feature_prefix,omitempty"` + // Field The name of the text field to encode. + Field string `json:"field"` + // Length Specifies the length of the n-gram substring. Defaults to 50. Must be greater + // than 0. + Length *int `json:"length,omitempty"` + // NGrams Specifies which n-grams to gather. It’s an array of integer values where the + // minimum value is 1, and a maximum value is 5. + NGrams []int `json:"n_grams"` + // Start Specifies the zero-indexed start of the n-gram substring. Negative values are + // allowed for encoding n-grams of string suffixes. Defaults to 0. + Start *int `json:"start,omitempty"` +} + +func (s *DataframeAnalysisFeatureProcessorNGramEncoding) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "custom": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Custom", err) + } + s.Custom = &value + case bool: + s.Custom = &v + } + + case "feature_prefix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FeaturePrefix", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeaturePrefix = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Length", err) + } + s.Length = &value + case float64: + f := int(v) + s.Length = &f + } + + case "n_grams": + if err := dec.Decode(&s.NGrams); err != nil { + return fmt.Errorf("%s | %w", "NGrams", err) + } + + case "start": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Start", err) + } + s.Start = &value + case float64: + f := int(v) + s.Start = &f + } + + } + } + return nil +} + +// NewDataframeAnalysisFeatureProcessorNGramEncoding returns a DataframeAnalysisFeatureProcessorNGramEncoding. +func NewDataframeAnalysisFeatureProcessorNGramEncoding() *DataframeAnalysisFeatureProcessorNGramEncoding { + r := &DataframeAnalysisFeatureProcessorNGramEncoding{} + + return r +} + +type DataframeAnalysisFeatureProcessorNGramEncodingVariant interface { + DataframeAnalysisFeatureProcessorNGramEncodingCaster() *DataframeAnalysisFeatureProcessorNGramEncoding +} + +func (s *DataframeAnalysisFeatureProcessorNGramEncoding) DataframeAnalysisFeatureProcessorNGramEncodingCaster() *DataframeAnalysisFeatureProcessorNGramEncoding { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go new file mode 100644 index 000000000..b93fc8f8d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeAnalysisFeatureProcessorOneHotEncoding type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L288-L293 +type DataframeAnalysisFeatureProcessorOneHotEncoding struct { + // Field The name of the field to encode. + Field string `json:"field"` + // HotMap The one hot map mapping the field value with the column name. + HotMap string `json:"hot_map"` +} + +func (s *DataframeAnalysisFeatureProcessorOneHotEncoding) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "hot_map": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "HotMap", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HotMap = o + + } + } + return nil +} + +// NewDataframeAnalysisFeatureProcessorOneHotEncoding returns a DataframeAnalysisFeatureProcessorOneHotEncoding. +func NewDataframeAnalysisFeatureProcessorOneHotEncoding() *DataframeAnalysisFeatureProcessorOneHotEncoding { + r := &DataframeAnalysisFeatureProcessorOneHotEncoding{} + + return r +} + +type DataframeAnalysisFeatureProcessorOneHotEncodingVariant interface { + DataframeAnalysisFeatureProcessorOneHotEncodingCaster() *DataframeAnalysisFeatureProcessorOneHotEncoding +} + +func (s *DataframeAnalysisFeatureProcessorOneHotEncoding) DataframeAnalysisFeatureProcessorOneHotEncodingCaster() *DataframeAnalysisFeatureProcessorOneHotEncoding { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go new file mode 100644 index 000000000..8ec95f215 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeAnalysisFeatureProcessorTargetMeanEncoding type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L295-L304 +type DataframeAnalysisFeatureProcessorTargetMeanEncoding struct { + // DefaultValue The default value if field value is not found in the target_map. + DefaultValue int `json:"default_value"` + // FeatureName The resulting feature name. + FeatureName string `json:"feature_name"` + // Field The name of the field to encode. + Field string `json:"field"` + // TargetMap The field value to target mean transition map. + TargetMap map[string]json.RawMessage `json:"target_map"` +} + +func (s *DataframeAnalysisFeatureProcessorTargetMeanEncoding) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "default_value": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DefaultValue", err) + } + s.DefaultValue = value + case float64: + f := int(v) + s.DefaultValue = f + } + + case "feature_name": + if err := dec.Decode(&s.FeatureName); err != nil { + return fmt.Errorf("%s | %w", "FeatureName", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "target_map": + if s.TargetMap == nil { + s.TargetMap = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.TargetMap); err != nil { + return fmt.Errorf("%s | %w", "TargetMap", err) + } + + } + } + return nil +} + +// NewDataframeAnalysisFeatureProcessorTargetMeanEncoding returns a DataframeAnalysisFeatureProcessorTargetMeanEncoding. +func NewDataframeAnalysisFeatureProcessorTargetMeanEncoding() *DataframeAnalysisFeatureProcessorTargetMeanEncoding { + r := &DataframeAnalysisFeatureProcessorTargetMeanEncoding{ + TargetMap: make(map[string]json.RawMessage), + } + + return r +} + +type DataframeAnalysisFeatureProcessorTargetMeanEncodingVariant interface { + DataframeAnalysisFeatureProcessorTargetMeanEncodingCaster() *DataframeAnalysisFeatureProcessorTargetMeanEncoding +} + +func (s *DataframeAnalysisFeatureProcessorTargetMeanEncoding) DataframeAnalysisFeatureProcessorTargetMeanEncodingCaster() *DataframeAnalysisFeatureProcessorTargetMeanEncoding { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisoutlierdetection.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisoutlierdetection.go new file mode 100644 index 000000000..f5781b0f2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisoutlierdetection.go @@ -0,0 +1,183 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeAnalysisOutlierDetection type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L103-L132 +type DataframeAnalysisOutlierDetection struct { + // ComputeFeatureInfluence Specifies whether the feature influence calculation is enabled. + ComputeFeatureInfluence *bool `json:"compute_feature_influence,omitempty"` + // FeatureInfluenceThreshold The minimum outlier score that a document needs to have in order to calculate + // its feature influence score. Value range: 0-1. + FeatureInfluenceThreshold *Float64 `json:"feature_influence_threshold,omitempty"` + // Method The method that outlier detection uses. Available methods are `lof`, `ldof`, + // `distance_kth_nn`, `distance_knn`, and `ensemble`. The default value is + // ensemble, which means that outlier detection uses an ensemble of different + // methods and normalises and combines their individual outlier scores to obtain + // the overall outlier score. + Method *string `json:"method,omitempty"` + // NNeighbors Defines the value for how many nearest neighbors each method of outlier + // detection uses to calculate its outlier score. When the value is not set, + // different values are used for different ensemble members. This default + // behavior helps improve the diversity in the ensemble; only override it if you + // are confident that the value you choose is appropriate for the data set. + NNeighbors *int `json:"n_neighbors,omitempty"` + // OutlierFraction The proportion of the data set that is assumed to be outlying prior to + // outlier detection. For example, 0.05 means it is assumed that 5% of values + // are real outliers and 95% are inliers. + OutlierFraction *Float64 `json:"outlier_fraction,omitempty"` + // StandardizationEnabled If true, the following operation is performed on the columns before computing + // outlier scores: `(x_i - mean(x_i)) / sd(x_i)`. + StandardizationEnabled *bool `json:"standardization_enabled,omitempty"` +} + +func (s *DataframeAnalysisOutlierDetection) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compute_feature_influence": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ComputeFeatureInfluence", err) + } + s.ComputeFeatureInfluence = &value + case bool: + s.ComputeFeatureInfluence = &v + } + + case "feature_influence_threshold": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FeatureInfluenceThreshold", err) + } + f := Float64(value) + s.FeatureInfluenceThreshold = &f + case float64: + f := Float64(v) + s.FeatureInfluenceThreshold = &f + } + + case "method": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Method", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Method = &o + + case "n_neighbors": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NNeighbors", err) + } + s.NNeighbors = &value + case float64: + f := int(v) + s.NNeighbors = &f + } + + case "outlier_fraction": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "OutlierFraction", err) + } + f := Float64(value) + s.OutlierFraction = &f + case float64: + f := Float64(v) + s.OutlierFraction = &f + } + + case "standardization_enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "StandardizationEnabled", err) + } + s.StandardizationEnabled = &value + case bool: + s.StandardizationEnabled = &v + } + + } + } + return nil +} + +// NewDataframeAnalysisOutlierDetection returns a DataframeAnalysisOutlierDetection. +func NewDataframeAnalysisOutlierDetection() *DataframeAnalysisOutlierDetection { + r := &DataframeAnalysisOutlierDetection{} + + return r +} + +type DataframeAnalysisOutlierDetectionVariant interface { + DataframeAnalysisOutlierDetectionCaster() *DataframeAnalysisOutlierDetection +} + +func (s *DataframeAnalysisOutlierDetection) DataframeAnalysisOutlierDetectionCaster() *DataframeAnalysisOutlierDetection { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisregression.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisregression.go new file mode 100644 index 000000000..b4fe1ea5f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalysisregression.go @@ -0,0 +1,467 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeAnalysisRegression type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L215-L225 +type DataframeAnalysisRegression struct { + // Alpha Advanced configuration option. Machine learning uses loss guided tree + // growing, which means that the decision trees grow where the regularized loss + // decreases most quickly. This parameter affects loss calculations by acting as + // a multiplier of the tree depth. Higher alpha values result in shallower trees + // and faster training times. By default, this value is calculated during + // hyperparameter optimization. It must be greater than or equal to zero. + Alpha *Float64 `json:"alpha,omitempty"` + // DependentVariable Defines which field of the document is to be predicted. It must match one of + // the fields in the index being used to train. If this field is missing from a + // document, then that document will not be used for training, but a prediction + // with the trained model will be generated for it. It is also known as + // continuous target variable. + // For classification analysis, the data type of the field must be numeric + // (`integer`, `short`, `long`, `byte`), categorical (`ip` or `keyword`), or + // `boolean`. There must be no more than 30 different values in this field. + // For regression analysis, the data type of the field must be numeric. + DependentVariable string `json:"dependent_variable"` + // DownsampleFactor Advanced configuration option. Controls the fraction of data that is used to + // compute the derivatives of the loss function for tree training. A small value + // results in the use of a small fraction of the data. If this value is set to + // be less than 1, accuracy typically improves. However, too small a value may + // result in poor convergence for the ensemble and so require more trees. By + // default, this value is calculated during hyperparameter optimization. It must + // be greater than zero and less than or equal to 1. + DownsampleFactor *Float64 `json:"downsample_factor,omitempty"` + // EarlyStoppingEnabled Advanced configuration option. Specifies whether the training process should + // finish if it is not finding any better performing models. If disabled, the + // training process can take significantly longer and the chance of finding a + // better performing model is unremarkable. + EarlyStoppingEnabled *bool `json:"early_stopping_enabled,omitempty"` + // Eta Advanced configuration option. The shrinkage applied to the weights. Smaller + // values result in larger forests which have a better generalization error. + // However, larger forests cause slower training. By default, this value is + // calculated during hyperparameter optimization. It must be a value between + // 0.001 and 1. + Eta *Float64 `json:"eta,omitempty"` + // EtaGrowthRatePerTree Advanced configuration option. Specifies the rate at which `eta` increases + // for each new tree that is added to the forest. For example, a rate of 1.05 + // increases `eta` by 5% for each extra tree. By default, this value is + // calculated during hyperparameter optimization. It must be between 0.5 and 2. + EtaGrowthRatePerTree *Float64 `json:"eta_growth_rate_per_tree,omitempty"` + // FeatureBagFraction Advanced configuration option. Defines the fraction of features that will be + // used when selecting a random bag for each candidate split. By default, this + // value is calculated during hyperparameter optimization. + FeatureBagFraction *Float64 `json:"feature_bag_fraction,omitempty"` + // FeatureProcessors Advanced configuration option. A collection of feature preprocessors that + // modify one or more included fields. The analysis uses the resulting one or + // more features instead of the original document field. However, these features + // are ephemeral; they are not stored in the destination index. Multiple + // `feature_processors` entries can refer to the same document fields. Automatic + // categorical feature encoding still occurs for the fields that are unprocessed + // by a custom processor or that have categorical values. Use this property only + // if you want to override the automatic feature encoding of the specified + // fields. + FeatureProcessors []DataframeAnalysisFeatureProcessor `json:"feature_processors,omitempty"` + // Gamma Advanced configuration option. Regularization parameter to prevent + // overfitting on the training data set. Multiplies a linear penalty associated + // with the size of individual trees in the forest. A high gamma value causes + // training to prefer small trees. A small gamma value results in larger + // individual trees and slower training. By default, this value is calculated + // during hyperparameter optimization. It must be a nonnegative value. + Gamma *Float64 `json:"gamma,omitempty"` + // Lambda Advanced configuration option. Regularization parameter to prevent + // overfitting on the training data set. Multiplies an L2 regularization term + // which applies to leaf weights of the individual trees in the forest. A high + // lambda value causes training to favor small leaf weights. This behavior makes + // the prediction function smoother at the expense of potentially not being able + // to capture relevant relationships between the features and the dependent + // variable. A small lambda value results in large individual trees and slower + // training. By default, this value is calculated during hyperparameter + // optimization. It must be a nonnegative value. + Lambda *Float64 `json:"lambda,omitempty"` + // LossFunction The loss function used during regression. Available options are `mse` (mean + // squared error), `msle` (mean squared logarithmic error), `huber` + // (Pseudo-Huber loss). + LossFunction *string `json:"loss_function,omitempty"` + // LossFunctionParameter A positive number that is used as a parameter to the `loss_function`. + LossFunctionParameter *Float64 `json:"loss_function_parameter,omitempty"` + // MaxOptimizationRoundsPerHyperparameter Advanced configuration option. A multiplier responsible for determining the + // maximum number of hyperparameter optimization steps in the Bayesian + // optimization procedure. The maximum number of steps is determined based on + // the number of undefined hyperparameters times the maximum optimization rounds + // per hyperparameter. By default, this value is calculated during + // hyperparameter optimization. + MaxOptimizationRoundsPerHyperparameter *int `json:"max_optimization_rounds_per_hyperparameter,omitempty"` + // MaxTrees Advanced configuration option. Defines the maximum number of decision trees + // in the forest. The maximum value is 2000. By default, this value is + // calculated during hyperparameter optimization. + MaxTrees *int `json:"max_trees,omitempty"` + // NumTopFeatureImportanceValues Advanced configuration option. Specifies the maximum number of feature + // importance values per document to return. By default, no feature importance + // calculation occurs. + NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` + // PredictionFieldName Defines the name of the prediction field in the results. Defaults to + // `_prediction`. + PredictionFieldName *string `json:"prediction_field_name,omitempty"` + // RandomizeSeed Defines the seed for the random generator that is used to pick training data. + // By default, it is randomly generated. Set it to a specific value to use the + // same training data each time you start a job (assuming other related + // parameters such as `source` and `analyzed_fields` are the same). + RandomizeSeed *Float64 `json:"randomize_seed,omitempty"` + // SoftTreeDepthLimit Advanced configuration option. Machine learning uses loss guided tree + // growing, which means that the decision trees grow where the regularized loss + // decreases most quickly. This soft limit combines with the + // `soft_tree_depth_tolerance` to penalize trees that exceed the specified + // depth; the regularized loss increases quickly beyond this depth. By default, + // this value is calculated during hyperparameter optimization. It must be + // greater than or equal to 0. + SoftTreeDepthLimit *int `json:"soft_tree_depth_limit,omitempty"` + // SoftTreeDepthTolerance Advanced configuration option. This option controls how quickly the + // regularized loss increases when the tree depth exceeds + // `soft_tree_depth_limit`. By default, this value is calculated during + // hyperparameter optimization. It must be greater than or equal to 0.01. + SoftTreeDepthTolerance *Float64 `json:"soft_tree_depth_tolerance,omitempty"` + // TrainingPercent Defines what percentage of the eligible documents that will be used for + // training. Documents that are ignored by the analysis (for example those that + // contain arrays with more than one value) won’t be included in the calculation + // for used percentage. + TrainingPercent Percentage `json:"training_percent,omitempty"` +} + +func (s *DataframeAnalysisRegression) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Alpha", err) + } + f := Float64(value) + s.Alpha = &f + case float64: + f := Float64(v) + s.Alpha = &f + } + + case "dependent_variable": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DependentVariable", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DependentVariable = o + + case "downsample_factor": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DownsampleFactor", err) + } + f := Float64(value) + s.DownsampleFactor = &f + case float64: + f := Float64(v) + s.DownsampleFactor = &f + } + + case "early_stopping_enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "EarlyStoppingEnabled", err) + } + s.EarlyStoppingEnabled = &value + case bool: + s.EarlyStoppingEnabled = &v + } + + case "eta": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Eta", err) + } + f := Float64(value) + s.Eta = &f + case float64: + f := Float64(v) + s.Eta = &f + } + + case "eta_growth_rate_per_tree": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "EtaGrowthRatePerTree", err) + } + f := Float64(value) + s.EtaGrowthRatePerTree = &f + case float64: + f := Float64(v) + s.EtaGrowthRatePerTree = &f + } + + case "feature_bag_fraction": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FeatureBagFraction", err) + } + f := Float64(value) + s.FeatureBagFraction = &f + case float64: + f := Float64(v) + s.FeatureBagFraction = &f + } + + case "feature_processors": + if err := dec.Decode(&s.FeatureProcessors); err != nil { + return fmt.Errorf("%s | %w", "FeatureProcessors", err) + } + + case "gamma": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Gamma", err) + } + f := Float64(value) + s.Gamma = &f + case float64: + f := Float64(v) + s.Gamma = &f + } + + case "lambda": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Lambda", err) + } + f := Float64(value) + s.Lambda = &f + case float64: + f := Float64(v) + s.Lambda = &f + } + + case "loss_function": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LossFunction", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LossFunction = &o + + case "loss_function_parameter": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LossFunctionParameter", err) + } + f := Float64(value) + s.LossFunctionParameter = &f + case float64: + f := Float64(v) + s.LossFunctionParameter = &f + } + + case "max_optimization_rounds_per_hyperparameter": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxOptimizationRoundsPerHyperparameter", err) + } + s.MaxOptimizationRoundsPerHyperparameter = &value + case float64: + f := int(v) + s.MaxOptimizationRoundsPerHyperparameter = &f + } + + case "max_trees", "maximum_number_trees": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTrees", err) + } + s.MaxTrees = &value + case float64: + f := int(v) + s.MaxTrees = &f + } + + case "num_top_feature_importance_values": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopFeatureImportanceValues", err) + } + s.NumTopFeatureImportanceValues = &value + case float64: + f := int(v) + s.NumTopFeatureImportanceValues = &f + } + + case "prediction_field_name": + if err := dec.Decode(&s.PredictionFieldName); err != nil { + return fmt.Errorf("%s | %w", "PredictionFieldName", err) + } + + case "randomize_seed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RandomizeSeed", err) + } + f := Float64(value) + s.RandomizeSeed = &f + case float64: + f := Float64(v) + s.RandomizeSeed = &f + } + + case "soft_tree_depth_limit": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SoftTreeDepthLimit", err) + } + s.SoftTreeDepthLimit = &value + case float64: + f := int(v) + s.SoftTreeDepthLimit = &f + } + + case "soft_tree_depth_tolerance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SoftTreeDepthTolerance", err) + } + f := Float64(value) + s.SoftTreeDepthTolerance = &f + case float64: + f := Float64(v) + s.SoftTreeDepthTolerance = &f + } + + case "training_percent": + if err := dec.Decode(&s.TrainingPercent); err != nil { + return fmt.Errorf("%s | %w", "TrainingPercent", err) + } + + } + } + return nil +} + +// NewDataframeAnalysisRegression returns a DataframeAnalysisRegression. +func NewDataframeAnalysisRegression() *DataframeAnalysisRegression { + r := &DataframeAnalysisRegression{} + + return r +} + +type DataframeAnalysisRegressionVariant interface { + DataframeAnalysisRegressionCaster() *DataframeAnalysisRegression +} + +func (s *DataframeAnalysisRegression) DataframeAnalysisRegressionCaster() *DataframeAnalysisRegression { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalytics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalytics.go new file mode 100644 index 000000000..8665a37bf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalytics.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dataframestate" +) + +// DataframeAnalytics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L325-L345 +type DataframeAnalytics struct { + // AnalysisStats An object containing information about the analysis job. + AnalysisStats *DataframeAnalyticsStatsContainer `json:"analysis_stats,omitempty"` + // AssignmentExplanation For running jobs only, contains messages relating to the selection of a node + // to run the job. + AssignmentExplanation *string `json:"assignment_explanation,omitempty"` + // DataCounts An object that provides counts for the quantity of documents skipped, used in + // training, or available for testing. + DataCounts DataframeAnalyticsStatsDataCounts `json:"data_counts"` + // Id The unique identifier of the data frame analytics job. + Id string `json:"id"` + // MemoryUsage An object describing memory usage of the analytics. It is present only after + // the job is started and memory usage is reported. + MemoryUsage DataframeAnalyticsStatsMemoryUsage `json:"memory_usage"` + // Node Contains properties for the node that runs the job. This information is + // available only for running jobs. + Node *NodeAttributes `json:"node,omitempty"` + // Progress The progress report of the data frame analytics job by phase. + Progress []DataframeAnalyticsStatsProgress `json:"progress"` + // State The status of the data frame analytics job, which can be one of the following + // values: failed, started, starting, stopping, stopped. + State dataframestate.DataframeState `json:"state"` +} + +func (s *DataframeAnalytics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analysis_stats": + if err := dec.Decode(&s.AnalysisStats); err != nil { + return fmt.Errorf("%s | %w", "AnalysisStats", err) + } + + case "assignment_explanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AssignmentExplanation", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentExplanation = &o + + case "data_counts": + if err := dec.Decode(&s.DataCounts); err != nil { + return fmt.Errorf("%s | %w", "DataCounts", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "memory_usage": + if err := dec.Decode(&s.MemoryUsage); err != nil { + return fmt.Errorf("%s | %w", "MemoryUsage", err) + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "progress": + if err := dec.Decode(&s.Progress); err != nil { + return fmt.Errorf("%s | %w", "Progress", err) + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + + } + } + return nil +} + +// NewDataframeAnalytics returns a DataframeAnalytics. +func NewDataframeAnalytics() *DataframeAnalytics { + r := &DataframeAnalytics{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsauthorization.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsauthorization.go new file mode 100644 index 000000000..58ec74315 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsauthorization.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeAnalyticsAuthorization type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Authorization.ts#L45-L57 +type DataframeAnalyticsAuthorization struct { + // ApiKey If an API key was used for the most recent update to the job, its name and + // identifier are listed in the response. + ApiKey *ApiKeyAuthorization `json:"api_key,omitempty"` + // Roles If a user ID was used for the most recent update to the job, its roles at the + // time of the update are listed in the response. + Roles []string `json:"roles,omitempty"` + // ServiceAccount If a service account was used for the most recent update to the job, the + // account name is listed in the response. + ServiceAccount *string `json:"service_account,omitempty"` +} + +func (s *DataframeAnalyticsAuthorization) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + if err := dec.Decode(&s.ApiKey); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "service_account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ServiceAccount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServiceAccount = &o + + } + } + return nil +} + +// NewDataframeAnalyticsAuthorization returns a DataframeAnalyticsAuthorization. +func NewDataframeAnalyticsAuthorization() *DataframeAnalyticsAuthorization { + r := &DataframeAnalyticsAuthorization{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsdestination.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsdestination.go new file mode 100644 index 000000000..0bef3db46 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsdestination.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DataframeAnalyticsDestination type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L77-L82 +type DataframeAnalyticsDestination struct { + // Index Defines the destination index to store the results of the data frame + // analytics job. + Index string `json:"index"` + // ResultsField Defines the name of the field in which to store the results of the analysis. + // Defaults to `ml`. + ResultsField *string `json:"results_field,omitempty"` +} + +func (s *DataframeAnalyticsDestination) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "results_field": + if err := dec.Decode(&s.ResultsField); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + + } + } + return nil +} + +// NewDataframeAnalyticsDestination returns a DataframeAnalyticsDestination. +func NewDataframeAnalyticsDestination() *DataframeAnalyticsDestination { + r := &DataframeAnalyticsDestination{} + + return r +} + +type DataframeAnalyticsDestinationVariant interface { + DataframeAnalyticsDestinationCaster() *DataframeAnalyticsDestination +} + +func (s *DataframeAnalyticsDestination) DataframeAnalyticsDestinationCaster() *DataframeAnalyticsDestination { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsfieldselection.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsfieldselection.go new file mode 100644 index 000000000..47fb05d8d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsfieldselection.go @@ -0,0 +1,138 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeAnalyticsFieldSelection type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L55-L68 +type DataframeAnalyticsFieldSelection struct { + // FeatureType The feature type of this field for the analysis. May be categorical or + // numerical. + FeatureType *string `json:"feature_type,omitempty"` + // IsIncluded Whether the field is selected to be included in the analysis. + IsIncluded bool `json:"is_included"` + // IsRequired Whether the field is required. + IsRequired bool `json:"is_required"` + // MappingTypes The mapping types of the field. + MappingTypes []string `json:"mapping_types"` + // Name The field name. + Name string `json:"name"` + // Reason The reason a field is not selected to be included in the analysis. + Reason *string `json:"reason,omitempty"` +} + +func (s *DataframeAnalyticsFieldSelection) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FeatureType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureType = &o + + case "is_included": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsIncluded", err) + } + s.IsIncluded = value + case bool: + s.IsIncluded = v + } + + case "is_required": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsRequired", err) + } + s.IsRequired = value + case bool: + s.IsRequired = v + } + + case "mapping_types": + if err := dec.Decode(&s.MappingTypes); err != nil { + return fmt.Errorf("%s | %w", "MappingTypes", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + } + } + return nil +} + +// NewDataframeAnalyticsFieldSelection returns a DataframeAnalyticsFieldSelection. +func NewDataframeAnalyticsFieldSelection() *DataframeAnalyticsFieldSelection { + r := &DataframeAnalyticsFieldSelection{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsmemoryestimation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsmemoryestimation.go new file mode 100644 index 000000000..6a2bd4f96 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsmemoryestimation.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeAnalyticsMemoryEstimation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L70-L75 +type DataframeAnalyticsMemoryEstimation struct { + // ExpectedMemoryWithDisk Estimated memory usage under the assumption that overflowing to disk is + // allowed during data frame analytics. expected_memory_with_disk is usually + // smaller than expected_memory_without_disk as using disk allows to limit the + // main memory needed to perform data frame analytics. + ExpectedMemoryWithDisk string `json:"expected_memory_with_disk"` + // ExpectedMemoryWithoutDisk Estimated memory usage under the assumption that the whole data frame + // analytics should happen in memory (i.e. without overflowing to disk). + ExpectedMemoryWithoutDisk string `json:"expected_memory_without_disk"` +} + +func (s *DataframeAnalyticsMemoryEstimation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expected_memory_with_disk": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ExpectedMemoryWithDisk", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ExpectedMemoryWithDisk = o + + case "expected_memory_without_disk": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ExpectedMemoryWithoutDisk", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ExpectedMemoryWithoutDisk = o + + } + } + return nil +} + +// NewDataframeAnalyticsMemoryEstimation returns a DataframeAnalyticsMemoryEstimation. +func NewDataframeAnalyticsMemoryEstimation() *DataframeAnalyticsMemoryEstimation { + r := &DataframeAnalyticsMemoryEstimation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsrecord.go new file mode 100644 index 000000000..d744775e0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsrecord.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataFrameAnalyticsRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/ml_data_frame_analytics/types.ts#L22-L102 +type DataFrameAnalyticsRecord struct { + // AssignmentExplanation Messages related to the selection of a node. + AssignmentExplanation *string `json:"assignment_explanation,omitempty"` + // CreateTime The time when the job was created. + CreateTime *string `json:"create_time,omitempty"` + // Description A description of the job. + Description *string `json:"description,omitempty"` + // DestIndex The name of the destination index. + DestIndex *string `json:"dest_index,omitempty"` + // FailureReason Messages about the reason why the job failed. + FailureReason *string `json:"failure_reason,omitempty"` + // Id The identifier for the job. + Id *string `json:"id,omitempty"` + // ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for the + // job. + ModelMemoryLimit *string `json:"model_memory_limit,omitempty"` + // NodeAddress The network address of the assigned node. + NodeAddress *string `json:"node.address,omitempty"` + // NodeEphemeralId The ephemeral identifier of the assigned node. + NodeEphemeralId *string `json:"node.ephemeral_id,omitempty"` + // NodeId The unique identifier of the assigned node. + NodeId *string `json:"node.id,omitempty"` + // NodeName The name of the assigned node. + NodeName *string `json:"node.name,omitempty"` + // Progress The progress report for the job by phase. + Progress *string `json:"progress,omitempty"` + // SourceIndex The name of the source index. + SourceIndex *string `json:"source_index,omitempty"` + // State The current status of the job. + State *string `json:"state,omitempty"` + // Type The type of analysis that the job performs. + Type *string `json:"type,omitempty"` + // Version The version of Elasticsearch when the job was created. + Version *string `json:"version,omitempty"` +} + +func (s *DataFrameAnalyticsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_explanation", "ae", "assignmentExplanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AssignmentExplanation", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentExplanation = &o + + case "create_time", "ct", "createTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CreateTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreateTime = &o + + case "description", "d": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "dest_index", "di", "destIndex": + if err := dec.Decode(&s.DestIndex); err != nil { + return fmt.Errorf("%s | %w", "DestIndex", err) + } + + case "failure_reason", "fr", "failureReason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FailureReason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FailureReason = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "model_memory_limit", "mml", "modelMemoryLimit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelMemoryLimit", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelMemoryLimit = &o + + case "node.address", "na", "nodeAddress": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NodeAddress", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeAddress = &o + + case "node.ephemeral_id", "ne", "nodeEphemeralId": + if err := dec.Decode(&s.NodeEphemeralId); err != nil { + return fmt.Errorf("%s | %w", "NodeEphemeralId", err) + } + + case "node.id", "ni", "nodeId": + if err := dec.Decode(&s.NodeId); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + + case "node.name", "nn", "nodeName": + if err := dec.Decode(&s.NodeName); err != nil { + return fmt.Errorf("%s | %w", "NodeName", err) + } + + case "progress", "p": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Progress", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Progress = &o + + case "source_index", "si", "sourceIndex": + if err := dec.Decode(&s.SourceIndex); err != nil { + return fmt.Errorf("%s | %w", "SourceIndex", err) + } + + case "state", "s": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = &o + + case "type", "t": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewDataFrameAnalyticsRecord returns a DataFrameAnalyticsRecord. +func NewDataFrameAnalyticsRecord() *DataFrameAnalyticsRecord { + r := &DataFrameAnalyticsRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticssource.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticssource.go new file mode 100644 index 000000000..10910dcb6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticssource.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DataframeAnalyticsSource type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L39-L53 +type DataframeAnalyticsSource struct { + // Index Index or indices on which to perform the analysis. It can be a single index + // or index pattern as well as an array of indices or patterns. NOTE: If your + // source indices contain documents with the same IDs, only the document that is + // indexed last appears in the destination index. + Index []string `json:"index"` + // Query The Elasticsearch query domain-specific language (DSL). This value + // corresponds to the query object in an Elasticsearch search POST body. All the + // options that are supported by Elasticsearch can be used, as this object is + // passed verbatim to Elasticsearch. By default, this property has the following + // value: {"match_all": {}}. + Query *Query `json:"query,omitempty"` + // RuntimeMappings Definitions of runtime fields that will become part of the mapping of the + // destination index. + RuntimeMappings RuntimeFields `json:"runtime_mappings,omitempty"` + // Source_ Specify `includes` and/or `excludes patterns to select which fields will be + // present in the destination. Fields that are excluded cannot be included in + // the analysis. + Source_ *DataframeAnalysisAnalyzedFields `json:"_source,omitempty"` +} + +func (s *DataframeAnalyticsSource) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + s.Index = append(s.Index, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + + } + } + return nil +} + +// NewDataframeAnalyticsSource returns a DataframeAnalyticsSource. +func NewDataframeAnalyticsSource() *DataframeAnalyticsSource { + r := &DataframeAnalyticsSource{} + + return r +} + +type DataframeAnalyticsSourceVariant interface { + DataframeAnalyticsSourceCaster() *DataframeAnalyticsSource +} + +func (s *DataframeAnalyticsSource) DataframeAnalyticsSourceCaster() *DataframeAnalyticsSource { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatscontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatscontainer.go new file mode 100644 index 000000000..7573696f6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatscontainer.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// DataframeAnalyticsStatsContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L374-L382 +type DataframeAnalyticsStatsContainer struct { + AdditionalDataframeAnalyticsStatsContainerProperty map[string]json.RawMessage `json:"-"` + // ClassificationStats An object containing information about the classification analysis job. + ClassificationStats *DataframeAnalyticsStatsHyperparameters `json:"classification_stats,omitempty"` + // OutlierDetectionStats An object containing information about the outlier detection job. + OutlierDetectionStats *DataframeAnalyticsStatsOutlierDetection `json:"outlier_detection_stats,omitempty"` + // RegressionStats An object containing information about the regression analysis. + RegressionStats *DataframeAnalyticsStatsHyperparameters `json:"regression_stats,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s DataframeAnalyticsStatsContainer) MarshalJSON() ([]byte, error) { + type opt DataframeAnalyticsStatsContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDataframeAnalyticsStatsContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDataframeAnalyticsStatsContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewDataframeAnalyticsStatsContainer returns a DataframeAnalyticsStatsContainer. +func NewDataframeAnalyticsStatsContainer() *DataframeAnalyticsStatsContainer { + r := &DataframeAnalyticsStatsContainer{ + AdditionalDataframeAnalyticsStatsContainerProperty: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatsdatacounts.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatsdatacounts.go new file mode 100644 index 000000000..82f118327 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatsdatacounts.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeAnalyticsStatsDataCounts type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L365-L372 +type DataframeAnalyticsStatsDataCounts struct { + // SkippedDocsCount The number of documents that are skipped during the analysis because they + // contained values that are not supported by the analysis. For example, outlier + // detection does not support missing fields so it skips documents with missing + // fields. Likewise, all types of analysis skip documents that contain arrays + // with more than one element. + SkippedDocsCount int `json:"skipped_docs_count"` + // TestDocsCount The number of documents that are not used for training the model and can be + // used for testing. + TestDocsCount int `json:"test_docs_count"` + // TrainingDocsCount The number of documents that are used for training the model. + TrainingDocsCount int `json:"training_docs_count"` +} + +func (s *DataframeAnalyticsStatsDataCounts) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "skipped_docs_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SkippedDocsCount", err) + } + s.SkippedDocsCount = value + case float64: + f := int(v) + s.SkippedDocsCount = f + } + + case "test_docs_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TestDocsCount", err) + } + s.TestDocsCount = value + case float64: + f := int(v) + s.TestDocsCount = f + } + + case "training_docs_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TrainingDocsCount", err) + } + s.TrainingDocsCount = value + case float64: + f := int(v) + s.TrainingDocsCount = f + } + + } + } + return nil +} + +// NewDataframeAnalyticsStatsDataCounts returns a DataframeAnalyticsStatsDataCounts. +func NewDataframeAnalyticsStatsDataCounts() *DataframeAnalyticsStatsDataCounts { + r := &DataframeAnalyticsStatsDataCounts{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatshyperparameters.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatshyperparameters.go new file mode 100644 index 000000000..5e37d4a1c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatshyperparameters.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeAnalyticsStatsHyperparameters type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L384-L403 +type DataframeAnalyticsStatsHyperparameters struct { + // Hyperparameters An object containing the parameters of the classification analysis job. + Hyperparameters Hyperparameters `json:"hyperparameters"` + // Iteration The number of iterations on the analysis. + Iteration int `json:"iteration"` + // Timestamp The timestamp when the statistics were reported in milliseconds since the + // epoch. + Timestamp int64 `json:"timestamp"` + // TimingStats An object containing time statistics about the data frame analytics job. + TimingStats TimingStats `json:"timing_stats"` + // ValidationLoss An object containing information about validation loss. + ValidationLoss ValidationLoss `json:"validation_loss"` +} + +func (s *DataframeAnalyticsStatsHyperparameters) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hyperparameters": + if err := dec.Decode(&s.Hyperparameters); err != nil { + return fmt.Errorf("%s | %w", "Hyperparameters", err) + } + + case "iteration": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Iteration", err) + } + s.Iteration = value + case float64: + f := int(v) + s.Iteration = f + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + case "timing_stats": + if err := dec.Decode(&s.TimingStats); err != nil { + return fmt.Errorf("%s | %w", "TimingStats", err) + } + + case "validation_loss": + if err := dec.Decode(&s.ValidationLoss); err != nil { + return fmt.Errorf("%s | %w", "ValidationLoss", err) + } + + } + } + return nil +} + +// NewDataframeAnalyticsStatsHyperparameters returns a DataframeAnalyticsStatsHyperparameters. +func NewDataframeAnalyticsStatsHyperparameters() *DataframeAnalyticsStatsHyperparameters { + r := &DataframeAnalyticsStatsHyperparameters{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatsmemoryusage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatsmemoryusage.go new file mode 100644 index 000000000..86e78f756 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatsmemoryusage.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeAnalyticsStatsMemoryUsage type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L354-L363 +type DataframeAnalyticsStatsMemoryUsage struct { + // MemoryReestimateBytes This value is present when the status is hard_limit and it is a new estimate + // of how much memory the job needs. + MemoryReestimateBytes *int64 `json:"memory_reestimate_bytes,omitempty"` + // PeakUsageBytes The number of bytes used at the highest peak of memory usage. + PeakUsageBytes int64 `json:"peak_usage_bytes"` + // Status The memory usage status. + Status string `json:"status"` + // Timestamp The timestamp when memory usage was calculated. + Timestamp *int64 `json:"timestamp,omitempty"` +} + +func (s *DataframeAnalyticsStatsMemoryUsage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "memory_reestimate_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MemoryReestimateBytes", err) + } + s.MemoryReestimateBytes = &value + case float64: + f := int64(v) + s.MemoryReestimateBytes = &f + } + + case "peak_usage_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PeakUsageBytes", err) + } + s.PeakUsageBytes = value + case float64: + f := int64(v) + s.PeakUsageBytes = f + } + + case "status": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Status = o + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + } + } + return nil +} + +// NewDataframeAnalyticsStatsMemoryUsage returns a DataframeAnalyticsStatsMemoryUsage. +func NewDataframeAnalyticsStatsMemoryUsage() *DataframeAnalyticsStatsMemoryUsage { + r := &DataframeAnalyticsStatsMemoryUsage{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatsoutlierdetection.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatsoutlierdetection.go new file mode 100644 index 000000000..5e75dedd4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatsoutlierdetection.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DataframeAnalyticsStatsOutlierDetection type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L405-L418 +type DataframeAnalyticsStatsOutlierDetection struct { + // Parameters The list of job parameters specified by the user or determined by algorithmic + // heuristics. + Parameters OutlierDetectionParameters `json:"parameters"` + // Timestamp The timestamp when the statistics were reported in milliseconds since the + // epoch. + Timestamp int64 `json:"timestamp"` + // TimingStats An object containing time statistics about the data frame analytics job. + TimingStats TimingStats `json:"timing_stats"` +} + +func (s *DataframeAnalyticsStatsOutlierDetection) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "parameters": + if err := dec.Decode(&s.Parameters); err != nil { + return fmt.Errorf("%s | %w", "Parameters", err) + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + case "timing_stats": + if err := dec.Decode(&s.TimingStats); err != nil { + return fmt.Errorf("%s | %w", "TimingStats", err) + } + + } + } + return nil +} + +// NewDataframeAnalyticsStatsOutlierDetection returns a DataframeAnalyticsStatsOutlierDetection. +func NewDataframeAnalyticsStatsOutlierDetection() *DataframeAnalyticsStatsOutlierDetection { + r := &DataframeAnalyticsStatsOutlierDetection{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatsprogress.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatsprogress.go new file mode 100644 index 000000000..1051e0498 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticsstatsprogress.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeAnalyticsStatsProgress type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L347-L352 +type DataframeAnalyticsStatsProgress struct { + // Phase Defines the phase of the data frame analytics job. + Phase string `json:"phase"` + // ProgressPercent The progress that the data frame analytics job has made expressed in + // percentage. + ProgressPercent int `json:"progress_percent"` +} + +func (s *DataframeAnalyticsStatsProgress) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "phase": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Phase", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Phase = o + + case "progress_percent": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ProgressPercent", err) + } + s.ProgressPercent = value + case float64: + f := int(v) + s.ProgressPercent = f + } + + } + } + return nil +} + +// NewDataframeAnalyticsStatsProgress returns a DataframeAnalyticsStatsProgress. +func NewDataframeAnalyticsStatsProgress() *DataframeAnalyticsStatsProgress { + r := &DataframeAnalyticsStatsProgress{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticssummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticssummary.go new file mode 100644 index 000000000..56fbeb621 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeanalyticssummary.go @@ -0,0 +1,178 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeAnalyticsSummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L306-L323 +type DataframeAnalyticsSummary struct { + AllowLazyStart *bool `json:"allow_lazy_start,omitempty"` + Analysis DataframeAnalysisContainer `json:"analysis"` + AnalyzedFields *DataframeAnalysisAnalyzedFields `json:"analyzed_fields,omitempty"` + // Authorization The security privileges that the job uses to run its queries. If Elastic + // Stack security features were disabled at the time of the most recent update + // to the job, this property is omitted. + Authorization *DataframeAnalyticsAuthorization `json:"authorization,omitempty"` + CreateTime *int64 `json:"create_time,omitempty"` + Description *string `json:"description,omitempty"` + Dest DataframeAnalyticsDestination `json:"dest"` + Id string `json:"id"` + MaxNumThreads *int `json:"max_num_threads,omitempty"` + Meta_ Metadata `json:"_meta,omitempty"` + ModelMemoryLimit *string `json:"model_memory_limit,omitempty"` + Source DataframeAnalyticsSource `json:"source"` + Version *string `json:"version,omitempty"` +} + +func (s *DataframeAnalyticsSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_lazy_start": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowLazyStart", err) + } + s.AllowLazyStart = &value + case bool: + s.AllowLazyStart = &v + } + + case "analysis": + if err := dec.Decode(&s.Analysis); err != nil { + return fmt.Errorf("%s | %w", "Analysis", err) + } + + case "analyzed_fields": + if err := dec.Decode(&s.AnalyzedFields); err != nil { + return fmt.Errorf("%s | %w", "AnalyzedFields", err) + } + + case "authorization": + if err := dec.Decode(&s.Authorization); err != nil { + return fmt.Errorf("%s | %w", "Authorization", err) + } + + case "create_time": + if err := dec.Decode(&s.CreateTime); err != nil { + return fmt.Errorf("%s | %w", "CreateTime", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "dest": + if err := dec.Decode(&s.Dest); err != nil { + return fmt.Errorf("%s | %w", "Dest", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "max_num_threads": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNumThreads", err) + } + s.MaxNumThreads = &value + case float64: + f := int(v) + s.MaxNumThreads = &f + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "model_memory_limit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelMemoryLimit", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelMemoryLimit = &o + + case "source": + if err := dec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewDataframeAnalyticsSummary returns a DataframeAnalyticsSummary. +func NewDataframeAnalyticsSummary() *DataframeAnalyticsSummary { + r := &DataframeAnalyticsSummary{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummary.go new file mode 100644 index 000000000..590e051bb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummary.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DataframeClassificationSummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/types.ts#L44-L66 +type DataframeClassificationSummary struct { + // Accuracy Accuracy of predictions (per-class and overall). + Accuracy *DataframeClassificationSummaryAccuracy `json:"accuracy,omitempty"` + // AucRoc The AUC ROC (area under the curve of the receiver operating characteristic) + // score and optionally the curve. + // It is calculated for a specific class (provided as "class_name") treated as + // positive. + AucRoc *DataframeEvaluationSummaryAucRoc `json:"auc_roc,omitempty"` + // MulticlassConfusionMatrix Multiclass confusion matrix. + MulticlassConfusionMatrix *DataframeClassificationSummaryMulticlassConfusionMatrix `json:"multiclass_confusion_matrix,omitempty"` + // Precision Precision of predictions (per-class and average). + Precision *DataframeClassificationSummaryPrecision `json:"precision,omitempty"` + // Recall Recall of predictions (per-class and average). + Recall *DataframeClassificationSummaryRecall `json:"recall,omitempty"` +} + +// NewDataframeClassificationSummary returns a DataframeClassificationSummary. +func NewDataframeClassificationSummary() *DataframeClassificationSummary { + r := &DataframeClassificationSummary{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummaryaccuracy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummaryaccuracy.go new file mode 100644 index 000000000..bba5a1b46 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummaryaccuracy.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeClassificationSummaryAccuracy type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/types.ts#L111-L114 +type DataframeClassificationSummaryAccuracy struct { + Classes []DataframeEvaluationClass `json:"classes"` + OverallAccuracy Float64 `json:"overall_accuracy"` +} + +func (s *DataframeClassificationSummaryAccuracy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classes": + if err := dec.Decode(&s.Classes); err != nil { + return fmt.Errorf("%s | %w", "Classes", err) + } + + case "overall_accuracy": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "OverallAccuracy", err) + } + f := Float64(value) + s.OverallAccuracy = f + case float64: + f := Float64(v) + s.OverallAccuracy = f + } + + } + } + return nil +} + +// NewDataframeClassificationSummaryAccuracy returns a DataframeClassificationSummaryAccuracy. +func NewDataframeClassificationSummaryAccuracy() *DataframeClassificationSummaryAccuracy { + r := &DataframeClassificationSummaryAccuracy{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go new file mode 100644 index 000000000..88fc2362f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeClassificationSummaryMulticlassConfusionMatrix type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/types.ts#L120-L123 +type DataframeClassificationSummaryMulticlassConfusionMatrix struct { + ConfusionMatrix []ConfusionMatrixItem `json:"confusion_matrix"` + OtherActualClassCount int `json:"other_actual_class_count"` +} + +func (s *DataframeClassificationSummaryMulticlassConfusionMatrix) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "confusion_matrix": + if err := dec.Decode(&s.ConfusionMatrix); err != nil { + return fmt.Errorf("%s | %w", "ConfusionMatrix", err) + } + + case "other_actual_class_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "OtherActualClassCount", err) + } + s.OtherActualClassCount = value + case float64: + f := int(v) + s.OtherActualClassCount = f + } + + } + } + return nil +} + +// NewDataframeClassificationSummaryMulticlassConfusionMatrix returns a DataframeClassificationSummaryMulticlassConfusionMatrix. +func NewDataframeClassificationSummaryMulticlassConfusionMatrix() *DataframeClassificationSummaryMulticlassConfusionMatrix { + r := &DataframeClassificationSummaryMulticlassConfusionMatrix{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummaryprecision.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummaryprecision.go new file mode 100644 index 000000000..cba3cece4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummaryprecision.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeClassificationSummaryPrecision type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/types.ts#L101-L104 +type DataframeClassificationSummaryPrecision struct { + AvgPrecision Float64 `json:"avg_precision"` + Classes []DataframeEvaluationClass `json:"classes"` +} + +func (s *DataframeClassificationSummaryPrecision) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg_precision": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AvgPrecision", err) + } + f := Float64(value) + s.AvgPrecision = f + case float64: + f := Float64(v) + s.AvgPrecision = f + } + + case "classes": + if err := dec.Decode(&s.Classes); err != nil { + return fmt.Errorf("%s | %w", "Classes", err) + } + + } + } + return nil +} + +// NewDataframeClassificationSummaryPrecision returns a DataframeClassificationSummaryPrecision. +func NewDataframeClassificationSummaryPrecision() *DataframeClassificationSummaryPrecision { + r := &DataframeClassificationSummaryPrecision{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummaryrecall.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummaryrecall.go new file mode 100644 index 000000000..6251846d6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeclassificationsummaryrecall.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeClassificationSummaryRecall type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/types.ts#L106-L109 +type DataframeClassificationSummaryRecall struct { + AvgRecall Float64 `json:"avg_recall"` + Classes []DataframeEvaluationClass `json:"classes"` +} + +func (s *DataframeClassificationSummaryRecall) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg_recall": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AvgRecall", err) + } + f := Float64(value) + s.AvgRecall = f + case float64: + f := Float64(v) + s.AvgRecall = f + } + + case "classes": + if err := dec.Decode(&s.Classes); err != nil { + return fmt.Errorf("%s | %w", "Classes", err) + } + + } + } + return nil +} + +// NewDataframeClassificationSummaryRecall returns a DataframeClassificationSummaryRecall. +func NewDataframeClassificationSummaryRecall() *DataframeClassificationSummaryRecall { + r := &DataframeClassificationSummaryRecall{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationclass.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationclass.go new file mode 100644 index 000000000..f8c5be79f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationclass.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeEvaluationClass type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/types.ts#L116-L118 +type DataframeEvaluationClass struct { + ClassName string `json:"class_name"` + Value Float64 `json:"value"` +} + +func (s *DataframeEvaluationClass) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + if err := dec.Decode(&s.ClassName); err != nil { + return fmt.Errorf("%s | %w", "ClassName", err) + } + + case "value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + f := Float64(value) + s.Value = f + case float64: + f := Float64(v) + s.Value = f + } + + } + } + return nil +} + +// NewDataframeEvaluationClass returns a DataframeEvaluationClass. +func NewDataframeEvaluationClass() *DataframeEvaluationClass { + r := &DataframeEvaluationClass{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationclassification.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationclassification.go new file mode 100644 index 000000000..4e6340d23 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationclassification.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DataframeEvaluationClassification type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeEvaluation.ts#L35-L44 +type DataframeEvaluationClassification struct { + // ActualField The field of the index which contains the ground truth. The data type of this + // field can be boolean or integer. If the data type is integer, the value has + // to be either 0 (false) or 1 (true). + ActualField string `json:"actual_field"` + // Metrics Specifies the metrics that are used for the evaluation. + Metrics *DataframeEvaluationClassificationMetrics `json:"metrics,omitempty"` + // PredictedField The field in the index which contains the predicted value, in other words the + // results of the classification analysis. + PredictedField *string `json:"predicted_field,omitempty"` + // TopClassesField The field of the index which is an array of documents of the form { + // "class_name": XXX, "class_probability": YYY }. This field must be defined as + // nested in the mappings. + TopClassesField *string `json:"top_classes_field,omitempty"` +} + +func (s *DataframeEvaluationClassification) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actual_field": + if err := dec.Decode(&s.ActualField); err != nil { + return fmt.Errorf("%s | %w", "ActualField", err) + } + + case "metrics": + if err := dec.Decode(&s.Metrics); err != nil { + return fmt.Errorf("%s | %w", "Metrics", err) + } + + case "predicted_field": + if err := dec.Decode(&s.PredictedField); err != nil { + return fmt.Errorf("%s | %w", "PredictedField", err) + } + + case "top_classes_field": + if err := dec.Decode(&s.TopClassesField); err != nil { + return fmt.Errorf("%s | %w", "TopClassesField", err) + } + + } + } + return nil +} + +// NewDataframeEvaluationClassification returns a DataframeEvaluationClassification. +func NewDataframeEvaluationClassification() *DataframeEvaluationClassification { + r := &DataframeEvaluationClassification{} + + return r +} + +type DataframeEvaluationClassificationVariant interface { + DataframeEvaluationClassificationCaster() *DataframeEvaluationClassification +} + +func (s *DataframeEvaluationClassification) DataframeEvaluationClassificationCaster() *DataframeEvaluationClassification { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationclassificationmetrics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationclassificationmetrics.go new file mode 100644 index 000000000..b6cc07f3a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationclassificationmetrics.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// DataframeEvaluationClassificationMetrics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeEvaluation.ts#L73-L78 +type DataframeEvaluationClassificationMetrics struct { + // Accuracy Accuracy of predictions (per-class and overall). + Accuracy map[string]json.RawMessage `json:"accuracy,omitempty"` + // AucRoc The AUC ROC (area under the curve of the receiver operating characteristic) + // score and optionally the curve. It is calculated for a specific class + // (provided as "class_name") treated as positive. + AucRoc *DataframeEvaluationClassificationMetricsAucRoc `json:"auc_roc,omitempty"` + // MulticlassConfusionMatrix Multiclass confusion matrix. + MulticlassConfusionMatrix map[string]json.RawMessage `json:"multiclass_confusion_matrix,omitempty"` + // Precision Precision of predictions (per-class and average). + Precision map[string]json.RawMessage `json:"precision,omitempty"` + // Recall Recall of predictions (per-class and average). + Recall map[string]json.RawMessage `json:"recall,omitempty"` +} + +// NewDataframeEvaluationClassificationMetrics returns a DataframeEvaluationClassificationMetrics. +func NewDataframeEvaluationClassificationMetrics() *DataframeEvaluationClassificationMetrics { + r := &DataframeEvaluationClassificationMetrics{ + Accuracy: make(map[string]json.RawMessage), + MulticlassConfusionMatrix: make(map[string]json.RawMessage), + Precision: make(map[string]json.RawMessage), + Recall: make(map[string]json.RawMessage), + } + + return r +} + +type DataframeEvaluationClassificationMetricsVariant interface { + DataframeEvaluationClassificationMetricsCaster() *DataframeEvaluationClassificationMetrics +} + +func (s *DataframeEvaluationClassificationMetrics) DataframeEvaluationClassificationMetricsCaster() *DataframeEvaluationClassificationMetrics { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go new file mode 100644 index 000000000..2ffbc4108 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeEvaluationClassificationMetricsAucRoc type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeEvaluation.ts#L85-L90 +type DataframeEvaluationClassificationMetricsAucRoc struct { + // ClassName Name of the only class that is treated as positive during AUC ROC + // calculation. Other classes are treated as negative ("one-vs-all" strategy). + // All the evaluated documents must have class_name in the list of their top + // classes. + ClassName *string `json:"class_name,omitempty"` + // IncludeCurve Whether or not the curve should be returned in addition to the score. Default + // value is false. + IncludeCurve *bool `json:"include_curve,omitempty"` +} + +func (s *DataframeEvaluationClassificationMetricsAucRoc) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + if err := dec.Decode(&s.ClassName); err != nil { + return fmt.Errorf("%s | %w", "ClassName", err) + } + + case "include_curve": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncludeCurve", err) + } + s.IncludeCurve = &value + case bool: + s.IncludeCurve = &v + } + + } + } + return nil +} + +// NewDataframeEvaluationClassificationMetricsAucRoc returns a DataframeEvaluationClassificationMetricsAucRoc. +func NewDataframeEvaluationClassificationMetricsAucRoc() *DataframeEvaluationClassificationMetricsAucRoc { + r := &DataframeEvaluationClassificationMetricsAucRoc{} + + return r +} + +type DataframeEvaluationClassificationMetricsAucRocVariant interface { + DataframeEvaluationClassificationMetricsAucRocCaster() *DataframeEvaluationClassificationMetricsAucRoc +} + +func (s *DataframeEvaluationClassificationMetricsAucRoc) DataframeEvaluationClassificationMetricsAucRocCaster() *DataframeEvaluationClassificationMetricsAucRoc { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationcontainer.go new file mode 100644 index 000000000..6aff3a78a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationcontainer.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// DataframeEvaluationContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeEvaluation.ts#L25-L33 +type DataframeEvaluationContainer struct { + AdditionalDataframeEvaluationContainerProperty map[string]json.RawMessage `json:"-"` + // Classification Classification evaluation evaluates the results of a classification analysis + // which outputs a prediction that identifies to which of the classes each + // document belongs. + Classification *DataframeEvaluationClassification `json:"classification,omitempty"` + // OutlierDetection Outlier detection evaluates the results of an outlier detection analysis + // which outputs the probability that each document is an outlier. + OutlierDetection *DataframeEvaluationOutlierDetection `json:"outlier_detection,omitempty"` + // Regression Regression evaluation evaluates the results of a regression analysis which + // outputs a prediction of values. + Regression *DataframeEvaluationRegression `json:"regression,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s DataframeEvaluationContainer) MarshalJSON() ([]byte, error) { + type opt DataframeEvaluationContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDataframeEvaluationContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDataframeEvaluationContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewDataframeEvaluationContainer returns a DataframeEvaluationContainer. +func NewDataframeEvaluationContainer() *DataframeEvaluationContainer { + r := &DataframeEvaluationContainer{ + AdditionalDataframeEvaluationContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type DataframeEvaluationContainerVariant interface { + DataframeEvaluationContainerCaster() *DataframeEvaluationContainer +} + +func (s *DataframeEvaluationContainer) DataframeEvaluationContainerCaster() *DataframeEvaluationContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationoutlierdetection.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationoutlierdetection.go new file mode 100644 index 000000000..8417cbdf6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationoutlierdetection.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DataframeEvaluationOutlierDetection type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeEvaluation.ts#L46-L53 +type DataframeEvaluationOutlierDetection struct { + // ActualField The field of the index which contains the ground truth. The data type of this + // field can be boolean or integer. If the data type is integer, the value has + // to be either 0 (false) or 1 (true). + ActualField string `json:"actual_field"` + // Metrics Specifies the metrics that are used for the evaluation. + Metrics *DataframeEvaluationOutlierDetectionMetrics `json:"metrics,omitempty"` + // PredictedProbabilityField The field of the index that defines the probability of whether the item + // belongs to the class in question or not. It’s the field that contains the + // results of the analysis. + PredictedProbabilityField string `json:"predicted_probability_field"` +} + +func (s *DataframeEvaluationOutlierDetection) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actual_field": + if err := dec.Decode(&s.ActualField); err != nil { + return fmt.Errorf("%s | %w", "ActualField", err) + } + + case "metrics": + if err := dec.Decode(&s.Metrics); err != nil { + return fmt.Errorf("%s | %w", "Metrics", err) + } + + case "predicted_probability_field": + if err := dec.Decode(&s.PredictedProbabilityField); err != nil { + return fmt.Errorf("%s | %w", "PredictedProbabilityField", err) + } + + } + } + return nil +} + +// NewDataframeEvaluationOutlierDetection returns a DataframeEvaluationOutlierDetection. +func NewDataframeEvaluationOutlierDetection() *DataframeEvaluationOutlierDetection { + r := &DataframeEvaluationOutlierDetection{} + + return r +} + +type DataframeEvaluationOutlierDetectionVariant interface { + DataframeEvaluationOutlierDetectionCaster() *DataframeEvaluationOutlierDetection +} + +func (s *DataframeEvaluationOutlierDetection) DataframeEvaluationOutlierDetectionCaster() *DataframeEvaluationOutlierDetection { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go new file mode 100644 index 000000000..d6f24ad37 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// DataframeEvaluationOutlierDetectionMetrics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeEvaluation.ts#L80-L83 +type DataframeEvaluationOutlierDetectionMetrics struct { + // AucRoc The AUC ROC (area under the curve of the receiver operating characteristic) + // score and optionally the curve. It is calculated for a specific class + // (provided as "class_name") treated as positive. + AucRoc *DataframeEvaluationClassificationMetricsAucRoc `json:"auc_roc,omitempty"` + // ConfusionMatrix Accuracy of predictions (per-class and overall). + ConfusionMatrix map[string]json.RawMessage `json:"confusion_matrix,omitempty"` + // Precision Precision of predictions (per-class and average). + Precision map[string]json.RawMessage `json:"precision,omitempty"` + // Recall Recall of predictions (per-class and average). + Recall map[string]json.RawMessage `json:"recall,omitempty"` +} + +// NewDataframeEvaluationOutlierDetectionMetrics returns a DataframeEvaluationOutlierDetectionMetrics. +func NewDataframeEvaluationOutlierDetectionMetrics() *DataframeEvaluationOutlierDetectionMetrics { + r := &DataframeEvaluationOutlierDetectionMetrics{ + ConfusionMatrix: make(map[string]json.RawMessage), + Precision: make(map[string]json.RawMessage), + Recall: make(map[string]json.RawMessage), + } + + return r +} + +type DataframeEvaluationOutlierDetectionMetricsVariant interface { + DataframeEvaluationOutlierDetectionMetricsCaster() *DataframeEvaluationOutlierDetectionMetrics +} + +func (s *DataframeEvaluationOutlierDetectionMetrics) DataframeEvaluationOutlierDetectionMetricsCaster() *DataframeEvaluationOutlierDetectionMetrics { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationregression.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationregression.go new file mode 100644 index 000000000..fa48f50a1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationregression.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DataframeEvaluationRegression type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeEvaluation.ts#L55-L62 +type DataframeEvaluationRegression struct { + // ActualField The field of the index which contains the ground truth. The data type of this + // field must be numerical. + ActualField string `json:"actual_field"` + // Metrics Specifies the metrics that are used for the evaluation. For more information + // on mse, msle, and huber, consult the Jupyter notebook on regression loss + // functions. + Metrics *DataframeEvaluationRegressionMetrics `json:"metrics,omitempty"` + // PredictedField The field in the index that contains the predicted value, in other words the + // results of the regression analysis. + PredictedField string `json:"predicted_field"` +} + +func (s *DataframeEvaluationRegression) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actual_field": + if err := dec.Decode(&s.ActualField); err != nil { + return fmt.Errorf("%s | %w", "ActualField", err) + } + + case "metrics": + if err := dec.Decode(&s.Metrics); err != nil { + return fmt.Errorf("%s | %w", "Metrics", err) + } + + case "predicted_field": + if err := dec.Decode(&s.PredictedField); err != nil { + return fmt.Errorf("%s | %w", "PredictedField", err) + } + + } + } + return nil +} + +// NewDataframeEvaluationRegression returns a DataframeEvaluationRegression. +func NewDataframeEvaluationRegression() *DataframeEvaluationRegression { + r := &DataframeEvaluationRegression{} + + return r +} + +type DataframeEvaluationRegressionVariant interface { + DataframeEvaluationRegressionCaster() *DataframeEvaluationRegression +} + +func (s *DataframeEvaluationRegression) DataframeEvaluationRegressionCaster() *DataframeEvaluationRegression { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationregressionmetrics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationregressionmetrics.go new file mode 100644 index 000000000..98674875f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationregressionmetrics.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// DataframeEvaluationRegressionMetrics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeEvaluation.ts#L92-L110 +type DataframeEvaluationRegressionMetrics struct { + // Huber Pseudo Huber loss function. + Huber *DataframeEvaluationRegressionMetricsHuber `json:"huber,omitempty"` + // Mse Average squared difference between the predicted values and the actual + // (ground truth) value. For more information, read this wiki article. + Mse map[string]json.RawMessage `json:"mse,omitempty"` + // Msle Average squared difference between the logarithm of the predicted values and + // the logarithm of the actual (ground truth) value. + Msle *DataframeEvaluationRegressionMetricsMsle `json:"msle,omitempty"` + // RSquared Proportion of the variance in the dependent variable that is predictable from + // the independent variables. + RSquared map[string]json.RawMessage `json:"r_squared,omitempty"` +} + +// NewDataframeEvaluationRegressionMetrics returns a DataframeEvaluationRegressionMetrics. +func NewDataframeEvaluationRegressionMetrics() *DataframeEvaluationRegressionMetrics { + r := &DataframeEvaluationRegressionMetrics{ + Mse: make(map[string]json.RawMessage), + RSquared: make(map[string]json.RawMessage), + } + + return r +} + +type DataframeEvaluationRegressionMetricsVariant interface { + DataframeEvaluationRegressionMetricsCaster() *DataframeEvaluationRegressionMetrics +} + +func (s *DataframeEvaluationRegressionMetrics) DataframeEvaluationRegressionMetricsCaster() *DataframeEvaluationRegressionMetrics { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationregressionmetricshuber.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationregressionmetricshuber.go new file mode 100644 index 000000000..4f9ca803c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationregressionmetricshuber.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeEvaluationRegressionMetricsHuber type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeEvaluation.ts#L117-L120 +type DataframeEvaluationRegressionMetricsHuber struct { + // Delta Approximates 1/2 (prediction - actual)2 for values much less than delta and + // approximates a straight line with slope delta for values much larger than + // delta. Defaults to 1. Delta needs to be greater than 0. + Delta *Float64 `json:"delta,omitempty"` +} + +func (s *DataframeEvaluationRegressionMetricsHuber) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "delta": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Delta", err) + } + f := Float64(value) + s.Delta = &f + case float64: + f := Float64(v) + s.Delta = &f + } + + } + } + return nil +} + +// NewDataframeEvaluationRegressionMetricsHuber returns a DataframeEvaluationRegressionMetricsHuber. +func NewDataframeEvaluationRegressionMetricsHuber() *DataframeEvaluationRegressionMetricsHuber { + r := &DataframeEvaluationRegressionMetricsHuber{} + + return r +} + +type DataframeEvaluationRegressionMetricsHuberVariant interface { + DataframeEvaluationRegressionMetricsHuberCaster() *DataframeEvaluationRegressionMetricsHuber +} + +func (s *DataframeEvaluationRegressionMetricsHuber) DataframeEvaluationRegressionMetricsHuberCaster() *DataframeEvaluationRegressionMetricsHuber { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationregressionmetricsmsle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationregressionmetricsmsle.go new file mode 100644 index 000000000..07af8d1b1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationregressionmetricsmsle.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeEvaluationRegressionMetricsMsle type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeEvaluation.ts#L112-L115 +type DataframeEvaluationRegressionMetricsMsle struct { + // Offset Defines the transition point at which you switch from minimizing quadratic + // error to minimizing quadratic log error. Defaults to 1. + Offset *Float64 `json:"offset,omitempty"` +} + +func (s *DataframeEvaluationRegressionMetricsMsle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "offset": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Offset", err) + } + f := Float64(value) + s.Offset = &f + case float64: + f := Float64(v) + s.Offset = &f + } + + } + } + return nil +} + +// NewDataframeEvaluationRegressionMetricsMsle returns a DataframeEvaluationRegressionMetricsMsle. +func NewDataframeEvaluationRegressionMetricsMsle() *DataframeEvaluationRegressionMetricsMsle { + r := &DataframeEvaluationRegressionMetricsMsle{} + + return r +} + +type DataframeEvaluationRegressionMetricsMsleVariant interface { + DataframeEvaluationRegressionMetricsMsleCaster() *DataframeEvaluationRegressionMetricsMsle +} + +func (s *DataframeEvaluationRegressionMetricsMsle) DataframeEvaluationRegressionMetricsMsleCaster() *DataframeEvaluationRegressionMetricsMsle { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationsummaryaucroc.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationsummaryaucroc.go new file mode 100644 index 000000000..330981611 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationsummaryaucroc.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeEvaluationSummaryAucRoc type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/types.ts#L91-L93 +type DataframeEvaluationSummaryAucRoc struct { + Curve []DataframeEvaluationSummaryAucRocCurveItem `json:"curve,omitempty"` + Value Float64 `json:"value"` +} + +func (s *DataframeEvaluationSummaryAucRoc) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "curve": + if err := dec.Decode(&s.Curve); err != nil { + return fmt.Errorf("%s | %w", "Curve", err) + } + + case "value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + f := Float64(value) + s.Value = f + case float64: + f := Float64(v) + s.Value = f + } + + } + } + return nil +} + +// NewDataframeEvaluationSummaryAucRoc returns a DataframeEvaluationSummaryAucRoc. +func NewDataframeEvaluationSummaryAucRoc() *DataframeEvaluationSummaryAucRoc { + r := &DataframeEvaluationSummaryAucRoc{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go new file mode 100644 index 000000000..24cd2724f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeEvaluationSummaryAucRocCurveItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/types.ts#L95-L99 +type DataframeEvaluationSummaryAucRocCurveItem struct { + Fpr Float64 `json:"fpr"` + Threshold Float64 `json:"threshold"` + Tpr Float64 `json:"tpr"` +} + +func (s *DataframeEvaluationSummaryAucRocCurveItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fpr": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Fpr", err) + } + f := Float64(value) + s.Fpr = f + case float64: + f := Float64(v) + s.Fpr = f + } + + case "threshold": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Threshold", err) + } + f := Float64(value) + s.Threshold = f + case float64: + f := Float64(v) + s.Threshold = f + } + + case "tpr": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Tpr", err) + } + f := Float64(value) + s.Tpr = f + case float64: + f := Float64(v) + s.Tpr = f + } + + } + } + return nil +} + +// NewDataframeEvaluationSummaryAucRocCurveItem returns a DataframeEvaluationSummaryAucRocCurveItem. +func NewDataframeEvaluationSummaryAucRocCurveItem() *DataframeEvaluationSummaryAucRocCurveItem { + r := &DataframeEvaluationSummaryAucRocCurveItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationvalue.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationvalue.go new file mode 100644 index 000000000..df0818bc1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeevaluationvalue.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframeEvaluationValue type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/types.ts#L87-L89 +type DataframeEvaluationValue struct { + Value Float64 `json:"value"` +} + +func (s *DataframeEvaluationValue) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + f := Float64(value) + s.Value = f + case float64: + f := Float64(v) + s.Value = f + } + + } + } + return nil +} + +// NewDataframeEvaluationValue returns a DataframeEvaluationValue. +func NewDataframeEvaluationValue() *DataframeEvaluationValue { + r := &DataframeEvaluationValue{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeoutlierdetectionsummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeoutlierdetectionsummary.go new file mode 100644 index 000000000..f92de2958 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeoutlierdetectionsummary.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DataframeOutlierDetectionSummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/types.ts#L24-L42 +type DataframeOutlierDetectionSummary struct { + // AucRoc The AUC ROC (area under the curve of the receiver operating characteristic) + // score and optionally the curve. + AucRoc *DataframeEvaluationSummaryAucRoc `json:"auc_roc,omitempty"` + // ConfusionMatrix Set the different thresholds of the outlier score at where the metrics (`tp` + // - true positive, `fp` - false positive, `tn` - true negative, `fn` - false + // negative) are calculated. + ConfusionMatrix map[string]ConfusionMatrixThreshold `json:"confusion_matrix,omitempty"` + // Precision Set the different thresholds of the outlier score at where the metric is + // calculated. + Precision map[string]Float64 `json:"precision,omitempty"` + // Recall Set the different thresholds of the outlier score at where the metric is + // calculated. + Recall map[string]Float64 `json:"recall,omitempty"` +} + +// NewDataframeOutlierDetectionSummary returns a DataframeOutlierDetectionSummary. +func NewDataframeOutlierDetectionSummary() *DataframeOutlierDetectionSummary { + r := &DataframeOutlierDetectionSummary{ + ConfusionMatrix: make(map[string]ConfusionMatrixThreshold), + Precision: make(map[string]Float64), + Recall: make(map[string]Float64), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframepreviewconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframepreviewconfig.go new file mode 100644 index 000000000..30d69359e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframepreviewconfig.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataframePreviewConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/preview_data_frame_analytics/types.ts#L27-L33 +type DataframePreviewConfig struct { + Analysis DataframeAnalysisContainer `json:"analysis"` + AnalyzedFields *DataframeAnalysisAnalyzedFields `json:"analyzed_fields,omitempty"` + MaxNumThreads *int `json:"max_num_threads,omitempty"` + ModelMemoryLimit *string `json:"model_memory_limit,omitempty"` + Source DataframeAnalyticsSource `json:"source"` +} + +func (s *DataframePreviewConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analysis": + if err := dec.Decode(&s.Analysis); err != nil { + return fmt.Errorf("%s | %w", "Analysis", err) + } + + case "analyzed_fields": + if err := dec.Decode(&s.AnalyzedFields); err != nil { + return fmt.Errorf("%s | %w", "AnalyzedFields", err) + } + + case "max_num_threads": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNumThreads", err) + } + s.MaxNumThreads = &value + case float64: + f := int(v) + s.MaxNumThreads = &f + } + + case "model_memory_limit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelMemoryLimit", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelMemoryLimit = &o + + case "source": + if err := dec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + + } + } + return nil +} + +// NewDataframePreviewConfig returns a DataframePreviewConfig. +func NewDataframePreviewConfig() *DataframePreviewConfig { + r := &DataframePreviewConfig{} + + return r +} + +type DataframePreviewConfigVariant interface { + DataframePreviewConfigCaster() *DataframePreviewConfig +} + +func (s *DataframePreviewConfig) DataframePreviewConfigCaster() *DataframePreviewConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeregressionsummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeregressionsummary.go new file mode 100644 index 000000000..3bffd8edc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dataframeregressionsummary.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DataframeRegressionSummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/evaluate_data_frame/types.ts#L68-L85 +type DataframeRegressionSummary struct { + // Huber Pseudo Huber loss function. + Huber *DataframeEvaluationValue `json:"huber,omitempty"` + // Mse Average squared difference between the predicted values and the actual + // (`ground truth`) value. + Mse *DataframeEvaluationValue `json:"mse,omitempty"` + // Msle Average squared difference between the logarithm of the predicted values and + // the logarithm of the actual (`ground truth`) value. + Msle *DataframeEvaluationValue `json:"msle,omitempty"` + // RSquared Proportion of the variance in the dependent variable that is predictable from + // the independent variables. + RSquared *DataframeEvaluationValue `json:"r_squared,omitempty"` +} + +// NewDataframeRegressionSummary returns a DataframeRegressionSummary. +func NewDataframeRegressionSummary() *DataframeRegressionSummary { + r := &DataframeRegressionSummary{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datapathstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datapathstats.go new file mode 100644 index 000000000..23ce741c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datapathstats.go @@ -0,0 +1,303 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataPathStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L588-L632 +type DataPathStats struct { + // Available Total amount of disk space available to this Java virtual machine on this + // file store. + Available *string `json:"available,omitempty"` + // AvailableInBytes Total number of bytes available to this Java virtual machine on this file + // store. + AvailableInBytes *int64 `json:"available_in_bytes,omitempty"` + DiskQueue *string `json:"disk_queue,omitempty"` + DiskReadSize *string `json:"disk_read_size,omitempty"` + DiskReadSizeInBytes *int64 `json:"disk_read_size_in_bytes,omitempty"` + DiskReads *int64 `json:"disk_reads,omitempty"` + DiskWriteSize *string `json:"disk_write_size,omitempty"` + DiskWriteSizeInBytes *int64 `json:"disk_write_size_in_bytes,omitempty"` + DiskWrites *int64 `json:"disk_writes,omitempty"` + // Free Total amount of unallocated disk space in the file store. + Free *string `json:"free,omitempty"` + // FreeInBytes Total number of unallocated bytes in the file store. + FreeInBytes *int64 `json:"free_in_bytes,omitempty"` + // Mount Mount point of the file store (for example: `/dev/sda2`). + Mount *string `json:"mount,omitempty"` + // Path Path to the file store. + Path *string `json:"path,omitempty"` + // Total Total size of the file store. + Total *string `json:"total,omitempty"` + // TotalInBytes Total size of the file store in bytes. + TotalInBytes *int64 `json:"total_in_bytes,omitempty"` + // Type Type of the file store (ex: ext4). + Type *string `json:"type,omitempty"` +} + +func (s *DataPathStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Available = &o + + case "available_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AvailableInBytes", err) + } + s.AvailableInBytes = &value + case float64: + f := int64(v) + s.AvailableInBytes = &f + } + + case "disk_queue": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DiskQueue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DiskQueue = &o + + case "disk_read_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DiskReadSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DiskReadSize = &o + + case "disk_read_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DiskReadSizeInBytes", err) + } + s.DiskReadSizeInBytes = &value + case float64: + f := int64(v) + s.DiskReadSizeInBytes = &f + } + + case "disk_reads": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DiskReads", err) + } + s.DiskReads = &value + case float64: + f := int64(v) + s.DiskReads = &f + } + + case "disk_write_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DiskWriteSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DiskWriteSize = &o + + case "disk_write_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DiskWriteSizeInBytes", err) + } + s.DiskWriteSizeInBytes = &value + case float64: + f := int64(v) + s.DiskWriteSizeInBytes = &f + } + + case "disk_writes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DiskWrites", err) + } + s.DiskWrites = &value + case float64: + f := int64(v) + s.DiskWrites = &f + } + + case "free": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Free", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Free = &o + + case "free_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FreeInBytes", err) + } + s.FreeInBytes = &value + case float64: + f := int64(v) + s.FreeInBytes = &f + } + + case "mount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Mount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Mount = &o + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = &o + + case "total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Total = &o + + case "total_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalInBytes", err) + } + s.TotalInBytes = &value + case float64: + f := int64(v) + s.TotalInBytes = &f + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + } + } + return nil +} + +// NewDataPathStats returns a DataPathStats. +func NewDataPathStats() *DataPathStats { + r := &DataPathStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastream.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastream.go new file mode 100644 index 000000000..22680ebfe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastream.go @@ -0,0 +1,291 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/healthstatus" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexmode" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/managedby" +) + +// DataStream type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStream.ts#L54-L145 +type DataStream struct { + // AllowCustomRouting If `true`, the data stream allows custom routing on write request. + AllowCustomRouting *bool `json:"allow_custom_routing,omitempty"` + // FailureStore Information about failure store backing indices + FailureStore *FailureStore `json:"failure_store,omitempty"` + // Generation Current generation for the data stream. This number acts as a cumulative + // count of the stream’s rollovers, starting at 1. + Generation int `json:"generation"` + // Hidden If `true`, the data stream is hidden. + Hidden bool `json:"hidden"` + // IlmPolicy Name of the current ILM lifecycle policy in the stream’s matching index + // template. + // This lifecycle policy is set in the `index.lifecycle.name` setting. + // If the template does not include a lifecycle policy, this property is not + // included in the response. + // NOTE: A data stream’s backing indices may be assigned different lifecycle + // policies. To retrieve the lifecycle policy for individual backing indices, + // use the get index settings API. + IlmPolicy *string `json:"ilm_policy,omitempty"` + // IndexMode The index mode for the data stream that will be used for newly created + // backing indices. + IndexMode *indexmode.IndexMode `json:"index_mode,omitempty"` + // Indices Array of objects containing information about the data stream’s backing + // indices. + // The last item in this array contains information about the stream’s current + // write index. + Indices []DataStreamIndex `json:"indices"` + // Lifecycle Contains the configuration for the data stream lifecycle of this data stream. + Lifecycle *DataStreamLifecycleWithRollover `json:"lifecycle,omitempty"` + // Meta_ Custom metadata for the stream, copied from the `_meta` object of the + // stream’s matching index template. + // If empty, the response omits this property. + Meta_ Metadata `json:"_meta,omitempty"` + // Name Name of the data stream. + Name string `json:"name"` + // NextGenerationManagedBy Name of the lifecycle system that'll manage the next generation of the data + // stream. + NextGenerationManagedBy managedby.ManagedBy `json:"next_generation_managed_by"` + // PreferIlm Indicates if ILM should take precedence over DSL in case both are configured + // to managed this data stream. + PreferIlm bool `json:"prefer_ilm"` + // Replicated If `true`, the data stream is created and managed by cross-cluster + // replication and the local cluster can not write into this data stream or + // change its mappings. + Replicated *bool `json:"replicated,omitempty"` + // RolloverOnWrite If `true`, the next write to this data stream will trigger a rollover first + // and the document will be indexed in the new backing index. If the rollover + // fails the indexing request will fail too. + RolloverOnWrite bool `json:"rollover_on_write"` + // Settings The settings specific to this data stream that will take precedence over the + // settings in the matching index + // template. + Settings IndexSettings `json:"settings"` + // Status Health status of the data stream. + // This health status is based on the state of the primary and replica shards of + // the stream’s backing indices. + Status healthstatus.HealthStatus `json:"status"` + // System If `true`, the data stream is created and managed by an Elastic stack + // component and cannot be modified through normal user interaction. + System *bool `json:"system,omitempty"` + // Template Name of the index template used to create the data stream’s backing indices. + // The template’s index pattern must match the name of this data stream. + Template string `json:"template"` + // TimestampField Information about the `@timestamp` field in the data stream. + TimestampField DataStreamTimestampField `json:"timestamp_field"` +} + +func (s *DataStream) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_custom_routing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowCustomRouting", err) + } + s.AllowCustomRouting = &value + case bool: + s.AllowCustomRouting = &v + } + + case "failure_store": + if err := dec.Decode(&s.FailureStore); err != nil { + return fmt.Errorf("%s | %w", "FailureStore", err) + } + + case "generation": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Generation", err) + } + s.Generation = value + case float64: + f := int(v) + s.Generation = f + } + + case "hidden": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Hidden", err) + } + s.Hidden = value + case bool: + s.Hidden = v + } + + case "ilm_policy": + if err := dec.Decode(&s.IlmPolicy); err != nil { + return fmt.Errorf("%s | %w", "IlmPolicy", err) + } + + case "index_mode": + if err := dec.Decode(&s.IndexMode); err != nil { + return fmt.Errorf("%s | %w", "IndexMode", err) + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return fmt.Errorf("%s | %w", "Lifecycle", err) + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "next_generation_managed_by": + if err := dec.Decode(&s.NextGenerationManagedBy); err != nil { + return fmt.Errorf("%s | %w", "NextGenerationManagedBy", err) + } + + case "prefer_ilm": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "PreferIlm", err) + } + s.PreferIlm = value + case bool: + s.PreferIlm = v + } + + case "replicated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Replicated", err) + } + s.Replicated = &value + case bool: + s.Replicated = &v + } + + case "rollover_on_write": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RolloverOnWrite", err) + } + s.RolloverOnWrite = value + case bool: + s.RolloverOnWrite = v + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "system": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "System", err) + } + s.System = &value + case bool: + s.System = &v + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return fmt.Errorf("%s | %w", "Template", err) + } + + case "timestamp_field": + if err := dec.Decode(&s.TimestampField); err != nil { + return fmt.Errorf("%s | %w", "TimestampField", err) + } + + } + } + return nil +} + +// NewDataStream returns a DataStream. +func NewDataStream() *DataStream { + r := &DataStream{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamfailurestore.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamfailurestore.go new file mode 100644 index 000000000..02f73f903 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamfailurestore.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreamFailureStore type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStreamFailureStore.ts#L22-L37 +type DataStreamFailureStore struct { + // Enabled If defined, it turns the failure store on/off (`true`/`false`) for this data + // stream. A data stream failure store + // that's disabled (enabled: `false`) will redirect no new failed indices to the + // failure store; however, it will + // not remove any existing data from the failure store. + Enabled *bool `json:"enabled,omitempty"` + // Lifecycle If defined, it specifies the lifecycle configuration for the failure store of + // this data stream. + Lifecycle *FailureStoreLifecycle `json:"lifecycle,omitempty"` +} + +func (s *DataStreamFailureStore) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return fmt.Errorf("%s | %w", "Lifecycle", err) + } + + } + } + return nil +} + +// NewDataStreamFailureStore returns a DataStreamFailureStore. +func NewDataStreamFailureStore() *DataStreamFailureStore { + r := &DataStreamFailureStore{} + + return r +} + +type DataStreamFailureStoreVariant interface { + DataStreamFailureStoreCaster() *DataStreamFailureStore +} + +func (s *DataStreamFailureStore) DataStreamFailureStoreCaster() *DataStreamFailureStore { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamfailurestoretemplate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamfailurestoretemplate.go new file mode 100644 index 000000000..63ac6bd23 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamfailurestoretemplate.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DataStreamFailureStoreTemplate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStreamFailureStore.ts#L39-L54 +type DataStreamFailureStoreTemplate struct { + // Enabled If defined, it turns the failure store on/off (`true`/`false`) for this data + // stream. A data stream failure store + // that's disabled (enabled: `false`) will redirect no new failed indices to the + // failure store; however, it will + // not remove any existing data from the failure store. + Enabled *bool `json:"enabled,omitempty"` + // Lifecycle If defined, it specifies the lifecycle configuration for the failure store of + // this data stream. + Lifecycle *FailureStoreLifecycleTemplate `json:"lifecycle,omitempty"` +} + +func (s *DataStreamFailureStoreTemplate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + if err := dec.Decode(&s.Enabled); err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return fmt.Errorf("%s | %w", "Lifecycle", err) + } + + } + } + return nil +} + +// NewDataStreamFailureStoreTemplate returns a DataStreamFailureStoreTemplate. +func NewDataStreamFailureStoreTemplate() *DataStreamFailureStoreTemplate { + r := &DataStreamFailureStoreTemplate{} + + return r +} + +type DataStreamFailureStoreTemplateVariant interface { + DataStreamFailureStoreTemplateCaster() *DataStreamFailureStoreTemplate +} + +func (s *DataStreamFailureStoreTemplate) DataStreamFailureStoreTemplateCaster() *DataStreamFailureStoreTemplate { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamindex.go new file mode 100644 index 000000000..9d58fe00e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamindex.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexmode" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/managedby" +) + +// DataStreamIndex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStream.ts#L154-L179 +type DataStreamIndex struct { + // IlmPolicy Name of the current ILM lifecycle policy configured for this backing index. + IlmPolicy *string `json:"ilm_policy,omitempty"` + // IndexMode The index mode of this backing index of the data stream. + IndexMode *indexmode.IndexMode `json:"index_mode,omitempty"` + // IndexName Name of the backing index. + IndexName string `json:"index_name"` + // IndexUuid Universally unique identifier (UUID) for the index. + IndexUuid string `json:"index_uuid"` + // ManagedBy Name of the lifecycle system that's currently managing this backing index. + ManagedBy *managedby.ManagedBy `json:"managed_by,omitempty"` + // PreferIlm Indicates if ILM should take precedence over DSL in case both are configured + // to manage this index. + PreferIlm *bool `json:"prefer_ilm,omitempty"` +} + +func (s *DataStreamIndex) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ilm_policy": + if err := dec.Decode(&s.IlmPolicy); err != nil { + return fmt.Errorf("%s | %w", "IlmPolicy", err) + } + + case "index_mode": + if err := dec.Decode(&s.IndexMode); err != nil { + return fmt.Errorf("%s | %w", "IndexMode", err) + } + + case "index_name": + if err := dec.Decode(&s.IndexName); err != nil { + return fmt.Errorf("%s | %w", "IndexName", err) + } + + case "index_uuid": + if err := dec.Decode(&s.IndexUuid); err != nil { + return fmt.Errorf("%s | %w", "IndexUuid", err) + } + + case "managed_by": + if err := dec.Decode(&s.ManagedBy); err != nil { + return fmt.Errorf("%s | %w", "ManagedBy", err) + } + + case "prefer_ilm": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "PreferIlm", err) + } + s.PreferIlm = &value + case bool: + s.PreferIlm = &v + } + + } + } + return nil +} + +// NewDataStreamIndex returns a DataStreamIndex. +func NewDataStreamIndex() *DataStreamIndex { + r := &DataStreamIndex{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycle.go new file mode 100644 index 000000000..509cd434e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycle.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreamLifecycle type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStreamLifecycle.ts#L25-L45 +type DataStreamLifecycle struct { + // DataRetention If defined, every document added to this data stream will be stored at least + // for this time frame. + // Any time after this duration the document could be deleted. + // When empty, every document in this data stream will be stored indefinitely. + DataRetention Duration `json:"data_retention,omitempty"` + // Downsampling The downsampling configuration to execute for the managed backing index after + // rollover. + Downsampling *DataStreamLifecycleDownsampling `json:"downsampling,omitempty"` + // Enabled If defined, it turns data stream lifecycle on/off (`true`/`false`) for this + // data stream. A data stream lifecycle + // that's disabled (enabled: `false`) will have no effect on the data stream. + Enabled *bool `json:"enabled,omitempty"` +} + +func (s *DataStreamLifecycle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_retention": + if err := dec.Decode(&s.DataRetention); err != nil { + return fmt.Errorf("%s | %w", "DataRetention", err) + } + + case "downsampling": + if err := dec.Decode(&s.Downsampling); err != nil { + return fmt.Errorf("%s | %w", "Downsampling", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + } + } + return nil +} + +// NewDataStreamLifecycle returns a DataStreamLifecycle. +func NewDataStreamLifecycle() *DataStreamLifecycle { + r := &DataStreamLifecycle{} + + return r +} + +type DataStreamLifecycleVariant interface { + DataStreamLifecycleCaster() *DataStreamLifecycle +} + +func (s *DataStreamLifecycle) DataStreamLifecycleCaster() *DataStreamLifecycle { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycledetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycledetails.go new file mode 100644 index 000000000..e706ad48c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycledetails.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreamLifecycleDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L154-L158 +type DataStreamLifecycleDetails struct { + StagnatingBackingIndices []StagnatingBackingIndices `json:"stagnating_backing_indices,omitempty"` + StagnatingBackingIndicesCount int `json:"stagnating_backing_indices_count"` + TotalBackingIndicesInError int `json:"total_backing_indices_in_error"` +} + +func (s *DataStreamLifecycleDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stagnating_backing_indices": + if err := dec.Decode(&s.StagnatingBackingIndices); err != nil { + return fmt.Errorf("%s | %w", "StagnatingBackingIndices", err) + } + + case "stagnating_backing_indices_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "StagnatingBackingIndicesCount", err) + } + s.StagnatingBackingIndicesCount = value + case float64: + f := int(v) + s.StagnatingBackingIndicesCount = f + } + + case "total_backing_indices_in_error": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalBackingIndicesInError", err) + } + s.TotalBackingIndicesInError = value + case float64: + f := int(v) + s.TotalBackingIndicesInError = f + } + + } + } + return nil +} + +// NewDataStreamLifecycleDetails returns a DataStreamLifecycleDetails. +func NewDataStreamLifecycleDetails() *DataStreamLifecycleDetails { + r := &DataStreamLifecycleDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycledownsampling.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycledownsampling.go new file mode 100644 index 000000000..158b2774c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycledownsampling.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DataStreamLifecycleDownsampling type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStreamLifecycleDownsampling.ts#L22-L27 +type DataStreamLifecycleDownsampling struct { + // Rounds The list of downsampling rounds to execute as part of this downsampling + // configuration + Rounds []DownsamplingRound `json:"rounds"` +} + +// NewDataStreamLifecycleDownsampling returns a DataStreamLifecycleDownsampling. +func NewDataStreamLifecycleDownsampling() *DataStreamLifecycleDownsampling { + r := &DataStreamLifecycleDownsampling{} + + return r +} + +type DataStreamLifecycleDownsamplingVariant interface { + DataStreamLifecycleDownsamplingCaster() *DataStreamLifecycleDownsampling +} + +func (s *DataStreamLifecycleDownsampling) DataStreamLifecycleDownsamplingCaster() *DataStreamLifecycleDownsampling { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycleexplain.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycleexplain.go new file mode 100644 index 000000000..e9a55bd0f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycleexplain.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreamLifecycleExplain type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L31-L41 +type DataStreamLifecycleExplain struct { + Error *string `json:"error,omitempty"` + GenerationTime Duration `json:"generation_time,omitempty"` + Index string `json:"index"` + IndexCreationDateMillis *int64 `json:"index_creation_date_millis,omitempty"` + Lifecycle *DataStreamLifecycleWithRollover `json:"lifecycle,omitempty"` + ManagedByLifecycle bool `json:"managed_by_lifecycle"` + RolloverDateMillis *int64 `json:"rollover_date_millis,omitempty"` + TimeSinceIndexCreation Duration `json:"time_since_index_creation,omitempty"` + TimeSinceRollover Duration `json:"time_since_rollover,omitempty"` +} + +func (s *DataStreamLifecycleExplain) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Error = &o + + case "generation_time": + if err := dec.Decode(&s.GenerationTime); err != nil { + return fmt.Errorf("%s | %w", "GenerationTime", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "index_creation_date_millis": + if err := dec.Decode(&s.IndexCreationDateMillis); err != nil { + return fmt.Errorf("%s | %w", "IndexCreationDateMillis", err) + } + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return fmt.Errorf("%s | %w", "Lifecycle", err) + } + + case "managed_by_lifecycle": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ManagedByLifecycle", err) + } + s.ManagedByLifecycle = value + case bool: + s.ManagedByLifecycle = v + } + + case "rollover_date_millis": + if err := dec.Decode(&s.RolloverDateMillis); err != nil { + return fmt.Errorf("%s | %w", "RolloverDateMillis", err) + } + + case "time_since_index_creation": + if err := dec.Decode(&s.TimeSinceIndexCreation); err != nil { + return fmt.Errorf("%s | %w", "TimeSinceIndexCreation", err) + } + + case "time_since_rollover": + if err := dec.Decode(&s.TimeSinceRollover); err != nil { + return fmt.Errorf("%s | %w", "TimeSinceRollover", err) + } + + } + } + return nil +} + +// NewDataStreamLifecycleExplain returns a DataStreamLifecycleExplain. +func NewDataStreamLifecycleExplain() *DataStreamLifecycleExplain { + r := &DataStreamLifecycleExplain{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycleindicator.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycleindicator.go new file mode 100644 index 000000000..11182d937 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecycleindicator.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indicatorhealthstatus" +) + +// DataStreamLifecycleIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L149-L153 +type DataStreamLifecycleIndicator struct { + Details *DataStreamLifecycleDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *DataStreamLifecycleIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return fmt.Errorf("%s | %w", "Diagnosis", err) + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return fmt.Errorf("%s | %w", "Impacts", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Symptom", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewDataStreamLifecycleIndicator returns a DataStreamLifecycleIndicator. +func NewDataStreamLifecycleIndicator() *DataStreamLifecycleIndicator { + r := &DataStreamLifecycleIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecyclerolloverconditions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecyclerolloverconditions.go new file mode 100644 index 000000000..532505cd9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecyclerolloverconditions.go @@ -0,0 +1,178 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreamLifecycleRolloverConditions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStreamLifecycle.ts#L60-L72 +type DataStreamLifecycleRolloverConditions struct { + MaxAge *string `json:"max_age,omitempty"` + MaxDocs *int64 `json:"max_docs,omitempty"` + MaxPrimaryShardDocs *int64 `json:"max_primary_shard_docs,omitempty"` + MaxPrimaryShardSize ByteSize `json:"max_primary_shard_size,omitempty"` + MaxSize ByteSize `json:"max_size,omitempty"` + MinAge Duration `json:"min_age,omitempty"` + MinDocs *int64 `json:"min_docs,omitempty"` + MinPrimaryShardDocs *int64 `json:"min_primary_shard_docs,omitempty"` + MinPrimaryShardSize ByteSize `json:"min_primary_shard_size,omitempty"` + MinSize ByteSize `json:"min_size,omitempty"` +} + +func (s *DataStreamLifecycleRolloverConditions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_age": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxAge", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxAge = &o + + case "max_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxDocs", err) + } + s.MaxDocs = &value + case float64: + f := int64(v) + s.MaxDocs = &f + } + + case "max_primary_shard_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxPrimaryShardDocs", err) + } + s.MaxPrimaryShardDocs = &value + case float64: + f := int64(v) + s.MaxPrimaryShardDocs = &f + } + + case "max_primary_shard_size": + if err := dec.Decode(&s.MaxPrimaryShardSize); err != nil { + return fmt.Errorf("%s | %w", "MaxPrimaryShardSize", err) + } + + case "max_size": + if err := dec.Decode(&s.MaxSize); err != nil { + return fmt.Errorf("%s | %w", "MaxSize", err) + } + + case "min_age": + if err := dec.Decode(&s.MinAge); err != nil { + return fmt.Errorf("%s | %w", "MinAge", err) + } + + case "min_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocs", err) + } + s.MinDocs = &value + case float64: + f := int64(v) + s.MinDocs = &f + } + + case "min_primary_shard_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinPrimaryShardDocs", err) + } + s.MinPrimaryShardDocs = &value + case float64: + f := int64(v) + s.MinPrimaryShardDocs = &f + } + + case "min_primary_shard_size": + if err := dec.Decode(&s.MinPrimaryShardSize); err != nil { + return fmt.Errorf("%s | %w", "MinPrimaryShardSize", err) + } + + case "min_size": + if err := dec.Decode(&s.MinSize); err != nil { + return fmt.Errorf("%s | %w", "MinSize", err) + } + + } + } + return nil +} + +// NewDataStreamLifecycleRolloverConditions returns a DataStreamLifecycleRolloverConditions. +func NewDataStreamLifecycleRolloverConditions() *DataStreamLifecycleRolloverConditions { + r := &DataStreamLifecycleRolloverConditions{} + + return r +} + +type DataStreamLifecycleRolloverConditionsVariant interface { + DataStreamLifecycleRolloverConditionsCaster() *DataStreamLifecycleRolloverConditions +} + +func (s *DataStreamLifecycleRolloverConditions) DataStreamLifecycleRolloverConditionsCaster() *DataStreamLifecycleRolloverConditions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecyclewithrollover.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecyclewithrollover.go new file mode 100644 index 000000000..1deab391d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamlifecyclewithrollover.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreamLifecycleWithRollover type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStreamLifecycle.ts#L47-L58 +type DataStreamLifecycleWithRollover struct { + // DataRetention If defined, every document added to this data stream will be stored at least + // for this time frame. + // Any time after this duration the document could be deleted. + // When empty, every document in this data stream will be stored indefinitely. + DataRetention Duration `json:"data_retention,omitempty"` + // Downsampling The downsampling configuration to execute for the managed backing index after + // rollover. + Downsampling *DataStreamLifecycleDownsampling `json:"downsampling,omitempty"` + // Enabled If defined, it turns data stream lifecycle on/off (`true`/`false`) for this + // data stream. A data stream lifecycle + // that's disabled (enabled: `false`) will have no effect on the data stream. + Enabled *bool `json:"enabled,omitempty"` + // Rollover The conditions which will trigger the rollover of a backing index as + // configured by the cluster setting `cluster.lifecycle.default.rollover`. + // This property is an implementation detail and it will only be retrieved when + // the query param `include_defaults` is set to true. + // The contents of this field are subject to change. + Rollover *DataStreamLifecycleRolloverConditions `json:"rollover,omitempty"` +} + +func (s *DataStreamLifecycleWithRollover) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_retention": + if err := dec.Decode(&s.DataRetention); err != nil { + return fmt.Errorf("%s | %w", "DataRetention", err) + } + + case "downsampling": + if err := dec.Decode(&s.Downsampling); err != nil { + return fmt.Errorf("%s | %w", "Downsampling", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "rollover": + if err := dec.Decode(&s.Rollover); err != nil { + return fmt.Errorf("%s | %w", "Rollover", err) + } + + } + } + return nil +} + +// NewDataStreamLifecycleWithRollover returns a DataStreamLifecycleWithRollover. +func NewDataStreamLifecycleWithRollover() *DataStreamLifecycleWithRollover { + r := &DataStreamLifecycleWithRollover{} + + return r +} + +type DataStreamLifecycleWithRolloverVariant interface { + DataStreamLifecycleWithRolloverCaster() *DataStreamLifecycleWithRollover +} + +func (s *DataStreamLifecycleWithRollover) DataStreamLifecycleWithRolloverCaster() *DataStreamLifecycleWithRollover { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamnames.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamnames.go new file mode 100644 index 000000000..d9475bd05 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamnames.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DataStreamNames type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L88-L88 +type DataStreamNames []string diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamoptions.go new file mode 100644 index 000000000..af45ac46c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamoptions.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DataStreamOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStreamOptions.ts#L25-L34 +type DataStreamOptions struct { + // FailureStore If defined, it specifies configuration for the failure store of this data + // stream. + FailureStore *DataStreamFailureStore `json:"failure_store,omitempty"` +} + +// NewDataStreamOptions returns a DataStreamOptions. +func NewDataStreamOptions() *DataStreamOptions { + r := &DataStreamOptions{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamoptionstemplate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamoptionstemplate.go new file mode 100644 index 000000000..c48763f9d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamoptionstemplate.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DataStreamOptionsTemplate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStreamOptions.ts#L36-L41 +type DataStreamOptionsTemplate struct { + FailureStore *DataStreamFailureStoreTemplate `json:"failure_store,omitempty"` +} + +func (s *DataStreamOptionsTemplate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "failure_store": + if err := dec.Decode(&s.FailureStore); err != nil { + return fmt.Errorf("%s | %w", "FailureStore", err) + } + + } + } + return nil +} + +// NewDataStreamOptionsTemplate returns a DataStreamOptionsTemplate. +func NewDataStreamOptionsTemplate() *DataStreamOptionsTemplate { + r := &DataStreamOptionsTemplate{} + + return r +} + +type DataStreamOptionsTemplateVariant interface { + DataStreamOptionsTemplateCaster() *DataStreamOptionsTemplate +} + +func (s *DataStreamOptionsTemplate) DataStreamOptionsTemplateCaster() *DataStreamOptionsTemplate { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreams.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreams.go new file mode 100644 index 000000000..815c6d69f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreams.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreams type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L78-L81 +type DataStreams struct { + Available bool `json:"available"` + DataStreams int64 `json:"data_streams"` + Enabled bool `json:"enabled"` + IndicesCount int64 `json:"indices_count"` +} + +func (s *DataStreams) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "data_streams": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DataStreams", err) + } + s.DataStreams = value + case float64: + f := int64(v) + s.DataStreams = f + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "indices_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndicesCount", err) + } + s.IndicesCount = value + case float64: + f := int64(v) + s.IndicesCount = f + } + + } + } + return nil +} + +// NewDataStreams returns a DataStreams. +func NewDataStreams() *DataStreams { + r := &DataStreams{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamsettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamsettings.go new file mode 100644 index 000000000..b8eaf950e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamsettings.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreamSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_data_stream_settings/IndicesGetDataStreamSettingsResponse.ts#L29-L39 +type DataStreamSettings struct { + // EffectiveSettings The settings specific to this data stream merged with the settings from its + // template. These `effective_settings` + // are the settings that will be used when a new index is created for this data + // stream. + EffectiveSettings IndexSettings `json:"effective_settings"` + // Name The name of the data stream. + Name string `json:"name"` + // Settings The settings specific to this data stream + Settings IndexSettings `json:"settings"` +} + +func (s *DataStreamSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "effective_settings": + if err := dec.Decode(&s.EffectiveSettings); err != nil { + return fmt.Errorf("%s | %w", "EffectiveSettings", err) + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + } + } + return nil +} + +// NewDataStreamSettings returns a DataStreamSettings. +func NewDataStreamSettings() *DataStreamSettings { + r := &DataStreamSettings{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamsettingserror.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamsettingserror.go new file mode 100644 index 000000000..e6d58893a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamsettingserror.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreamSettingsError type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_data_stream_settings/IndicesPutDataStreamSettingsResponse.ts#L71-L77 +type DataStreamSettingsError struct { + // Error A message explaining why the settings could not be applied to specific + // indices. + Error string `json:"error"` + Index string `json:"index"` +} + +func (s *DataStreamSettingsError) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Error = o + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + } + } + return nil +} + +// NewDataStreamSettingsError returns a DataStreamSettingsError. +func NewDataStreamSettingsError() *DataStreamSettingsError { + r := &DataStreamSettingsError{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamsstatsitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamsstatsitem.go new file mode 100644 index 000000000..eab9d9077 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamsstatsitem.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreamsStatsItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L45-L65 +type DataStreamsStatsItem struct { + // BackingIndices Current number of backing indices for the data stream. + BackingIndices int `json:"backing_indices"` + // DataStream Name of the data stream. + DataStream string `json:"data_stream"` + // MaximumTimestamp The data stream’s highest `@timestamp` value, converted to milliseconds since + // the Unix epoch. + // NOTE: This timestamp is provided as a best effort. + // The data stream may contain `@timestamp` values higher than this if one or + // more of the following conditions are met: + // The stream contains closed backing indices; + // Backing indices with a lower generation contain higher `@timestamp` values. + MaximumTimestamp int64 `json:"maximum_timestamp"` + // StoreSize Total size of all shards for the data stream’s backing indices. + // This parameter is only returned if the `human` query parameter is `true`. + StoreSize ByteSize `json:"store_size,omitempty"` + // StoreSizeBytes Total size, in bytes, of all shards for the data stream’s backing indices. + StoreSizeBytes int64 `json:"store_size_bytes"` +} + +func (s *DataStreamsStatsItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "backing_indices": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "BackingIndices", err) + } + s.BackingIndices = value + case float64: + f := int(v) + s.BackingIndices = f + } + + case "data_stream": + if err := dec.Decode(&s.DataStream); err != nil { + return fmt.Errorf("%s | %w", "DataStream", err) + } + + case "maximum_timestamp": + if err := dec.Decode(&s.MaximumTimestamp); err != nil { + return fmt.Errorf("%s | %w", "MaximumTimestamp", err) + } + + case "store_size": + if err := dec.Decode(&s.StoreSize); err != nil { + return fmt.Errorf("%s | %w", "StoreSize", err) + } + + case "store_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "StoreSizeBytes", err) + } + s.StoreSizeBytes = value + case float64: + f := int64(v) + s.StoreSizeBytes = f + } + + } + } + return nil +} + +// NewDataStreamsStatsItem returns a DataStreamsStatsItem. +func NewDataStreamsStatsItem() *DataStreamsStatsItem { + r := &DataStreamsStatsItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamstats.go new file mode 100644 index 000000000..d4301f1f9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamstats.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreamStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsResponse.ts#L46-L59 +type DataStreamStats struct { + // BackingIndicesInError The count of the backing indices for the data stream. + BackingIndicesInError int `json:"backing_indices_in_error"` + // BackingIndicesInTotal The count of the backing indices for the data stream that have encountered an + // error. + BackingIndicesInTotal int `json:"backing_indices_in_total"` + // Name The name of the data stream. + Name string `json:"name"` +} + +func (s *DataStreamStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "backing_indices_in_error": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "BackingIndicesInError", err) + } + s.BackingIndicesInError = value + case float64: + f := int(v) + s.BackingIndicesInError = f + } + + case "backing_indices_in_total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "BackingIndicesInTotal", err) + } + s.BackingIndicesInTotal = value + case float64: + f := int(v) + s.BackingIndicesInTotal = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewDataStreamStats returns a DataStreamStats. +func NewDataStreamStats() *DataStreamStats { + r := &DataStreamStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamtimestamp.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamtimestamp.go new file mode 100644 index 000000000..ba734fde5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamtimestamp.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreamTimestamp type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/TypeMapping.ts#L59-L61 +type DataStreamTimestamp struct { + Enabled bool `json:"enabled"` +} + +func (s *DataStreamTimestamp) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + +// NewDataStreamTimestamp returns a DataStreamTimestamp. +func NewDataStreamTimestamp() *DataStreamTimestamp { + r := &DataStreamTimestamp{} + + return r +} + +type DataStreamTimestampVariant interface { + DataStreamTimestampCaster() *DataStreamTimestamp +} + +func (s *DataStreamTimestamp) DataStreamTimestampCaster() *DataStreamTimestamp { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamtimestampfield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamtimestampfield.go new file mode 100644 index 000000000..d36120907 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamtimestampfield.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DataStreamTimestampField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStream.ts#L147-L152 +type DataStreamTimestampField struct { + // Name Name of the timestamp field for the data stream, which must be `@timestamp`. + // The `@timestamp` field must be included in every document indexed to the data + // stream. + Name string `json:"name"` +} + +func (s *DataStreamTimestampField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewDataStreamTimestampField returns a DataStreamTimestampField. +func NewDataStreamTimestampField() *DataStreamTimestampField { + r := &DataStreamTimestampField{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamvisibility.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamvisibility.go new file mode 100644 index 000000000..aa3fb5ada --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamvisibility.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataStreamVisibility type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStream.ts#L181-L184 +type DataStreamVisibility struct { + AllowCustomRouting *bool `json:"allow_custom_routing,omitempty"` + Hidden *bool `json:"hidden,omitempty"` +} + +func (s *DataStreamVisibility) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_custom_routing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowCustomRouting", err) + } + s.AllowCustomRouting = &value + case bool: + s.AllowCustomRouting = &v + } + + case "hidden": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Hidden", err) + } + s.Hidden = &value + case bool: + s.Hidden = &v + } + + } + } + return nil +} + +// NewDataStreamVisibility returns a DataStreamVisibility. +func NewDataStreamVisibility() *DataStreamVisibility { + r := &DataStreamVisibility{} + + return r +} + +type DataStreamVisibilityVariant interface { + DataStreamVisibilityCaster() *DataStreamVisibility +} + +func (s *DataStreamVisibility) DataStreamVisibilityCaster() *DataStreamVisibility { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamwithlifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamwithlifecycle.go new file mode 100644 index 000000000..16e9d02f0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamwithlifecycle.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DataStreamWithLifecycle type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L27-L30 +type DataStreamWithLifecycle struct { + Lifecycle *DataStreamLifecycleWithRollover `json:"lifecycle,omitempty"` + Name string `json:"name"` +} + +func (s *DataStreamWithLifecycle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return fmt.Errorf("%s | %w", "Lifecycle", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewDataStreamWithLifecycle returns a DataStreamWithLifecycle. +func NewDataStreamWithLifecycle() *DataStreamWithLifecycle { + r := &DataStreamWithLifecycle{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamwithoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamwithoptions.go new file mode 100644 index 000000000..a84ea6d0c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datastreamwithoptions.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DataStreamWithOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_data_stream_options/IndicesGetDataStreamOptionsResponse.ts#L27-L30 +type DataStreamWithOptions struct { + Name string `json:"name"` + Options *DataStreamOptions `json:"options,omitempty"` +} + +func (s *DataStreamWithOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "options": + if err := dec.Decode(&s.Options); err != nil { + return fmt.Errorf("%s | %w", "Options", err) + } + + } + } + return nil +} + +// NewDataStreamWithOptions returns a DataStreamWithOptions. +func NewDataStreamWithOptions() *DataStreamWithOptions { + r := &DataStreamWithOptions{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datatierphasestatistics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datatierphasestatistics.go new file mode 100644 index 000000000..cd62b1def --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datatierphasestatistics.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataTierPhaseStatistics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L83-L94 +type DataTierPhaseStatistics struct { + DocCount int64 `json:"doc_count"` + IndexCount int64 `json:"index_count"` + NodeCount int64 `json:"node_count"` + PrimaryShardCount int64 `json:"primary_shard_count"` + PrimaryShardSizeAvgBytes int64 `json:"primary_shard_size_avg_bytes"` + PrimaryShardSizeMadBytes int64 `json:"primary_shard_size_mad_bytes"` + PrimaryShardSizeMedianBytes int64 `json:"primary_shard_size_median_bytes"` + PrimarySizeBytes int64 `json:"primary_size_bytes"` + TotalShardCount int64 `json:"total_shard_count"` + TotalSizeBytes int64 `json:"total_size_bytes"` +} + +func (s *DataTierPhaseStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "index_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexCount", err) + } + s.IndexCount = value + case float64: + f := int64(v) + s.IndexCount = f + } + + case "node_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NodeCount", err) + } + s.NodeCount = value + case float64: + f := int64(v) + s.NodeCount = f + } + + case "primary_shard_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryShardCount", err) + } + s.PrimaryShardCount = value + case float64: + f := int64(v) + s.PrimaryShardCount = f + } + + case "primary_shard_size_avg_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryShardSizeAvgBytes", err) + } + s.PrimaryShardSizeAvgBytes = value + case float64: + f := int64(v) + s.PrimaryShardSizeAvgBytes = f + } + + case "primary_shard_size_mad_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryShardSizeMadBytes", err) + } + s.PrimaryShardSizeMadBytes = value + case float64: + f := int64(v) + s.PrimaryShardSizeMadBytes = f + } + + case "primary_shard_size_median_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryShardSizeMedianBytes", err) + } + s.PrimaryShardSizeMedianBytes = value + case float64: + f := int64(v) + s.PrimaryShardSizeMedianBytes = f + } + + case "primary_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimarySizeBytes", err) + } + s.PrimarySizeBytes = value + case float64: + f := int64(v) + s.PrimarySizeBytes = f + } + + case "total_shard_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalShardCount", err) + } + s.TotalShardCount = value + case float64: + f := int64(v) + s.TotalShardCount = f + } + + case "total_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeBytes", err) + } + s.TotalSizeBytes = value + case float64: + f := int64(v) + s.TotalSizeBytes = f + } + + } + } + return nil +} + +// NewDataTierPhaseStatistics returns a DataTierPhaseStatistics. +func NewDataTierPhaseStatistics() *DataTierPhaseStatistics { + r := &DataTierPhaseStatistics{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datatiers.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datatiers.go new file mode 100644 index 000000000..f4dad3eef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datatiers.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DataTiers type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L349-L359 +type DataTiers struct { + Available bool `json:"available"` + DataCold DataTierPhaseStatistics `json:"data_cold"` + DataContent DataTierPhaseStatistics `json:"data_content"` + DataFrozen *DataTierPhaseStatistics `json:"data_frozen,omitempty"` + DataHot DataTierPhaseStatistics `json:"data_hot"` + DataWarm DataTierPhaseStatistics `json:"data_warm"` + Enabled bool `json:"enabled"` +} + +func (s *DataTiers) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "data_cold": + if err := dec.Decode(&s.DataCold); err != nil { + return fmt.Errorf("%s | %w", "DataCold", err) + } + + case "data_content": + if err := dec.Decode(&s.DataContent); err != nil { + return fmt.Errorf("%s | %w", "DataContent", err) + } + + case "data_frozen": + if err := dec.Decode(&s.DataFrozen); err != nil { + return fmt.Errorf("%s | %w", "DataFrozen", err) + } + + case "data_hot": + if err := dec.Decode(&s.DataHot); err != nil { + return fmt.Errorf("%s | %w", "DataHot", err) + } + + case "data_warm": + if err := dec.Decode(&s.DataWarm); err != nil { + return fmt.Errorf("%s | %w", "DataWarm", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + +// NewDataTiers returns a DataTiers. +func NewDataTiers() *DataTiers { + r := &DataTiers{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datedecayfunction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datedecayfunction.go new file mode 100644 index 000000000..0d26b98c7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datedecayfunction.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/multivaluemode" +) + +// DateDecayFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L209-L209 +type DateDecayFunction struct { + DecayFunctionBaseDateMathDuration map[string]DecayPlacementDateMathDuration `json:"-"` + // MultiValueMode Determines how the distance is calculated when a field used for computing the + // decay contains multiple values. + MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s DateDecayFunction) MarshalJSON() ([]byte, error) { + type opt DateDecayFunction + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.DecayFunctionBaseDateMathDuration { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "DecayFunctionBaseDateMathDuration") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewDateDecayFunction returns a DateDecayFunction. +func NewDateDecayFunction() *DateDecayFunction { + r := &DateDecayFunction{ + DecayFunctionBaseDateMathDuration: make(map[string]DecayPlacementDateMathDuration), + } + + return r +} + +type DateDecayFunctionVariant interface { + DateDecayFunctionCaster() *DateDecayFunction +} + +func (s *DateDecayFunction) DateDecayFunctionCaster() *DateDecayFunction { + return s +} + +func (s *DateDecayFunction) DecayFunctionCaster() *DecayFunction { + o := DecayFunction(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datedistancefeaturequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datedistancefeaturequery.go new file mode 100644 index 000000000..082270e65 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datedistancefeaturequery.go @@ -0,0 +1,146 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DateDistanceFeatureQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L71-L74 +type DateDistanceFeatureQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Field Name of the field used to calculate distances. This field must meet the + // following criteria: + // be a `date`, `date_nanos` or `geo_point` field; + // have an `index` mapping parameter value of `true`, which is the default; + // have an `doc_values` mapping parameter value of `true`, which is the default. + Field string `json:"field"` + // Origin Date or point of origin used to calculate distances. + // If the `field` value is a `date` or `date_nanos` field, the `origin` value + // must be a date. + // Date Math, such as `now-1h`, is supported. + // If the field value is a `geo_point` field, the `origin` value must be a + // geopoint. + Origin string `json:"origin"` + // Pivot Distance from the `origin` at which relevance scores receive half of the + // `boost` value. + // If the `field` value is a `date` or `date_nanos` field, the `pivot` value + // must be a time unit, such as `1h` or `10d`. If the `field` value is a + // `geo_point` field, the `pivot` value must be a distance unit, such as `1km` + // or `12m`. + Pivot Duration `json:"pivot"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *DateDistanceFeatureQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "origin": + if err := dec.Decode(&s.Origin); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + + case "pivot": + if err := dec.Decode(&s.Pivot); err != nil { + return fmt.Errorf("%s | %w", "Pivot", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewDateDistanceFeatureQuery returns a DateDistanceFeatureQuery. +func NewDateDistanceFeatureQuery() *DateDistanceFeatureQuery { + r := &DateDistanceFeatureQuery{} + + return r +} + +type DateDistanceFeatureQueryVariant interface { + DateDistanceFeatureQueryCaster() *DateDistanceFeatureQuery +} + +func (s *DateDistanceFeatureQuery) DateDistanceFeatureQueryCaster() *DateDistanceFeatureQuery { + return s +} + +func (s *DateDistanceFeatureQuery) DistanceFeatureQueryCaster() *DistanceFeatureQuery { + o := DistanceFeatureQuery(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datehistogramaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datehistogramaggregate.go new file mode 100644 index 000000000..03128e208 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datehistogramaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DateHistogramAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L383-L386 +type DateHistogramAggregate struct { + Buckets BucketsDateHistogramBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *DateHistogramAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]DateHistogramBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []DateHistogramBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewDateHistogramAggregate returns a DateHistogramAggregate. +func NewDateHistogramAggregate() *DateHistogramAggregate { + r := &DateHistogramAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datehistogramaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datehistogramaggregation.go new file mode 100644 index 000000000..052435a4f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datehistogramaggregation.go @@ -0,0 +1,235 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/calendarinterval" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" +) + +// DateHistogramAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L202-L260 +type DateHistogramAggregation struct { + // CalendarInterval Calendar-aware interval. + // Can be specified using the unit name, such as `month`, or as a single unit + // quantity, such as `1M`. + CalendarInterval *calendarinterval.CalendarInterval `json:"calendar_interval,omitempty"` + // ExtendedBounds Enables extending the bounds of the histogram beyond the data itself. + ExtendedBounds *ExtendedBoundsFieldDateMath `json:"extended_bounds,omitempty"` + // Field The date field whose values are use to build a histogram. + Field *string `json:"field,omitempty"` + // FixedInterval Fixed intervals: a fixed number of SI units and never deviate, regardless of + // where they fall on the calendar. + FixedInterval Duration `json:"fixed_interval,omitempty"` + // Format The date format used to format `key_as_string` in the response. + // If no `format` is specified, the first date format specified in the field + // mapping is used. + Format *string `json:"format,omitempty"` + // HardBounds Limits the histogram to specified bounds. + HardBounds *ExtendedBoundsFieldDateMath `json:"hard_bounds,omitempty"` + Interval Duration `json:"interval,omitempty"` + // Keyed Set to `true` to associate a unique string key with each bucket and return + // the ranges as a hash rather than an array. + Keyed *bool `json:"keyed,omitempty"` + // MinDocCount Only returns buckets that have `min_doc_count` number of documents. + // By default, all buckets between the first bucket that matches documents and + // the last one are returned. + MinDocCount *int `json:"min_doc_count,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing DateTime `json:"missing,omitempty"` + // Offset Changes the start value of each bucket by the specified positive (`+`) or + // negative offset (`-`) duration. + Offset Duration `json:"offset,omitempty"` + // Order The sort order of the returned buckets. + Order AggregateOrder `json:"order,omitempty"` + Params map[string]json.RawMessage `json:"params,omitempty"` + Script *Script `json:"script,omitempty"` + // TimeZone Time zone used for bucketing and rounding. + // Defaults to Coordinated Universal Time (UTC). + TimeZone *string `json:"time_zone,omitempty"` +} + +func (s *DateHistogramAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_interval": + if err := dec.Decode(&s.CalendarInterval); err != nil { + return fmt.Errorf("%s | %w", "CalendarInterval", err) + } + + case "extended_bounds": + if err := dec.Decode(&s.ExtendedBounds); err != nil { + return fmt.Errorf("%s | %w", "ExtendedBounds", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "fixed_interval": + if err := dec.Decode(&s.FixedInterval); err != nil { + return fmt.Errorf("%s | %w", "FixedInterval", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "hard_bounds": + if err := dec.Decode(&s.HardBounds); err != nil { + return fmt.Errorf("%s | %w", "HardBounds", err) + } + + case "interval": + if err := dec.Decode(&s.Interval); err != nil { + return fmt.Errorf("%s | %w", "Interval", err) + } + + case "keyed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Keyed", err) + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "min_doc_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocCount", err) + } + s.MinDocCount = &value + case float64: + f := int(v) + s.MinDocCount = &f + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "offset": + if err := dec.Decode(&s.Offset); err != nil { + return fmt.Errorf("%s | %w", "Offset", err) + } + + case "order": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]sortorder.SortOrder, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + s.Order = o + case '[': + o := make([]map[string]sortorder.SortOrder, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + s.Order = o + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return fmt.Errorf("%s | %w", "TimeZone", err) + } + + } + } + return nil +} + +// NewDateHistogramAggregation returns a DateHistogramAggregation. +func NewDateHistogramAggregation() *DateHistogramAggregation { + r := &DateHistogramAggregation{ + Params: make(map[string]json.RawMessage), + } + + return r +} + +type DateHistogramAggregationVariant interface { + DateHistogramAggregationCaster() *DateHistogramAggregation +} + +func (s *DateHistogramAggregation) DateHistogramAggregationCaster() *DateHistogramAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datehistogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datehistogrambucket.go new file mode 100644 index 000000000..4579bcefa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datehistogrambucket.go @@ -0,0 +1,651 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// DateHistogramBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L388-L391 +type DateHistogramBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key int64 `json:"key"` + KeyAsString *string `json:"key_as_string,omitempty"` +} + +func (s *DateHistogramBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + if err := dec.Decode(&s.Key); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + + case "key_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "KeyAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s DateHistogramBucket) MarshalJSON() ([]byte, error) { + type opt DateHistogramBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewDateHistogramBucket returns a DateHistogramBucket. +func NewDateHistogramBucket() *DateHistogramBucket { + r := &DateHistogramBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datehistogramgrouping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datehistogramgrouping.go new file mode 100644 index 000000000..70528e4dc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datehistogramgrouping.go @@ -0,0 +1,134 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DateHistogramGrouping type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/_types/Groupings.ts#L42-L73 +type DateHistogramGrouping struct { + // CalendarInterval The interval of time buckets to be generated when rolling up. + CalendarInterval Duration `json:"calendar_interval,omitempty"` + // Delay How long to wait before rolling up new documents. + // By default, the indexer attempts to roll up all data that is available. + // However, it is not uncommon for data to arrive out of order. + // The indexer is unable to deal with data that arrives after a time-span has + // been rolled up. + // You need to specify a delay that matches the longest period of time you + // expect out-of-order data to arrive. + Delay Duration `json:"delay,omitempty"` + // Field The date field that is to be rolled up. + Field string `json:"field"` + // FixedInterval The interval of time buckets to be generated when rolling up. + FixedInterval Duration `json:"fixed_interval,omitempty"` + Format *string `json:"format,omitempty"` + Interval Duration `json:"interval,omitempty"` + // TimeZone Defines what `time_zone` the rollup documents are stored as. + // Unlike raw data, which can shift timezones on the fly, rolled documents have + // to be stored with a specific timezone. + // By default, rollup documents are stored in `UTC`. + TimeZone *string `json:"time_zone,omitempty"` +} + +func (s *DateHistogramGrouping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_interval": + if err := dec.Decode(&s.CalendarInterval); err != nil { + return fmt.Errorf("%s | %w", "CalendarInterval", err) + } + + case "delay": + if err := dec.Decode(&s.Delay); err != nil { + return fmt.Errorf("%s | %w", "Delay", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "fixed_interval": + if err := dec.Decode(&s.FixedInterval); err != nil { + return fmt.Errorf("%s | %w", "FixedInterval", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "interval": + if err := dec.Decode(&s.Interval); err != nil { + return fmt.Errorf("%s | %w", "Interval", err) + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return fmt.Errorf("%s | %w", "TimeZone", err) + } + + } + } + return nil +} + +// NewDateHistogramGrouping returns a DateHistogramGrouping. +func NewDateHistogramGrouping() *DateHistogramGrouping { + r := &DateHistogramGrouping{} + + return r +} + +type DateHistogramGroupingVariant interface { + DateHistogramGroupingCaster() *DateHistogramGrouping +} + +func (s *DateHistogramGrouping) DateHistogramGroupingCaster() *DateHistogramGrouping { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dateindexnameprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dateindexnameprocessor.go new file mode 100644 index 000000000..915ad50d5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dateindexnameprocessor.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DateIndexNameProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L730-L768 +type DateIndexNameProcessor struct { + // DateFormats An array of the expected date formats for parsing dates / timestamps in the + // document being preprocessed. + // Can be a java time pattern or one of the following formats: ISO8601, UNIX, + // UNIX_MS, or TAI64N. + DateFormats []string `json:"date_formats,omitempty"` + // DateRounding How to round the date when formatting the date into the index name. Valid + // values are: + // `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and + // `s` (second). + // Supports template snippets. + DateRounding string `json:"date_rounding"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to get the date or timestamp from. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IndexNameFormat The format to be used when printing the parsed date into the index name. + // A valid java time pattern is expected here. + // Supports template snippets. + IndexNameFormat *string `json:"index_name_format,omitempty"` + // IndexNamePrefix A prefix of the index name to be prepended before the printed date. + // Supports template snippets. + IndexNamePrefix *string `json:"index_name_prefix,omitempty"` + // Locale The locale to use when parsing the date from the document being preprocessed, + // relevant when parsing month names or week days. + Locale *string `json:"locale,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // Timezone The timezone to use when parsing the date and when date math index supports + // resolves expressions into concrete index names. + Timezone *string `json:"timezone,omitempty"` +} + +func (s *DateIndexNameProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "date_formats": + if err := dec.Decode(&s.DateFormats); err != nil { + return fmt.Errorf("%s | %w", "DateFormats", err) + } + + case "date_rounding": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DateRounding", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DateRounding = o + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "index_name_format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexNameFormat", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexNameFormat = &o + + case "index_name_prefix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexNamePrefix", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexNamePrefix = &o + + case "locale": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Locale", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Locale = &o + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "timezone": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Timezone", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Timezone = &o + + } + } + return nil +} + +// NewDateIndexNameProcessor returns a DateIndexNameProcessor. +func NewDateIndexNameProcessor() *DateIndexNameProcessor { + r := &DateIndexNameProcessor{} + + return r +} + +type DateIndexNameProcessorVariant interface { + DateIndexNameProcessorCaster() *DateIndexNameProcessor +} + +func (s *DateIndexNameProcessor) DateIndexNameProcessorCaster() *DateIndexNameProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datenanosproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datenanosproperty.go new file mode 100644 index 000000000..d1df55a8b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datenanosproperty.go @@ -0,0 +1,976 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// DateNanosProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L91-L101 +type DateNanosProperty struct { + Boost *Float64 `json:"boost,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + Format *string `json:"format,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue DateTime `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + PrecisionStep *int `json:"precision_step,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + if err := dec.Decode(&s.NullValue); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "precision_step": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrecisionStep", err) + } + s.PrecisionStep = &value + case float64: + f := int(v) + s.PrecisionStep = &f + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DateNanosProperty) MarshalJSON() ([]byte, error) { + type innerDateNanosProperty DateNanosProperty + tmp := innerDateNanosProperty{ + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + Format: s.Format, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + PrecisionStep: s.PrecisionStep, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "date_nanos" + + return json.Marshal(tmp) +} + +// NewDateNanosProperty returns a DateNanosProperty. +func NewDateNanosProperty() *DateNanosProperty { + r := &DateNanosProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type DateNanosPropertyVariant interface { + DateNanosPropertyCaster() *DateNanosProperty +} + +func (s *DateNanosProperty) DateNanosPropertyCaster() *DateNanosProperty { + return s +} + +func (s *DateNanosProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dateprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dateprocessor.go new file mode 100644 index 000000000..c95c81bb5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dateprocessor.go @@ -0,0 +1,200 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DateProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L770-L803 +type DateProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to get the date from. + Field string `json:"field"` + // Formats An array of the expected date formats. + // Can be a java time pattern or one of the following formats: ISO8601, UNIX, + // UNIX_MS, or TAI64N. + Formats []string `json:"formats"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // Locale The locale to use when parsing the date, relevant when parsing month names or + // week days. + // Supports template snippets. + Locale *string `json:"locale,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // OutputFormat The format to use when writing the date to target_field. Must be a valid + // java time pattern. + OutputFormat *string `json:"output_format,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field that will hold the parsed date. + TargetField *string `json:"target_field,omitempty"` + // Timezone The timezone to use when parsing the date. + // Supports template snippets. + Timezone *string `json:"timezone,omitempty"` +} + +func (s *DateProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "formats": + if err := dec.Decode(&s.Formats); err != nil { + return fmt.Errorf("%s | %w", "Formats", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "locale": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Locale", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Locale = &o + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "output_format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "OutputFormat", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OutputFormat = &o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + case "timezone": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Timezone", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Timezone = &o + + } + } + return nil +} + +// NewDateProcessor returns a DateProcessor. +func NewDateProcessor() *DateProcessor { + r := &DateProcessor{} + + return r +} + +type DateProcessorVariant interface { + DateProcessorCaster() *DateProcessor +} + +func (s *DateProcessor) DateProcessorCaster() *DateProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dateproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dateproperty.go new file mode 100644 index 000000000..8864c0b70 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dateproperty.go @@ -0,0 +1,997 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// DateProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L77-L89 +type DateProperty struct { + Boost *Float64 `json:"boost,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fielddata *NumericFielddata `json:"fielddata,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + Format *string `json:"format,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + Index *bool `json:"index,omitempty"` + Locale *string `json:"locale,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue DateTime `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + PrecisionStep *int `json:"precision_step,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *DateProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fielddata": + if err := dec.Decode(&s.Fielddata); err != nil { + return fmt.Errorf("%s | %w", "Fielddata", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "locale": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Locale", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Locale = &o + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + if err := dec.Decode(&s.NullValue); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "precision_step": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrecisionStep", err) + } + s.PrecisionStep = &value + case float64: + f := int(v) + s.PrecisionStep = &f + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DateProperty) MarshalJSON() ([]byte, error) { + type innerDateProperty DateProperty + tmp := innerDateProperty{ + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fielddata: s.Fielddata, + Fields: s.Fields, + Format: s.Format, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Locale: s.Locale, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + PrecisionStep: s.PrecisionStep, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "date" + + return json.Marshal(tmp) +} + +// NewDateProperty returns a DateProperty. +func NewDateProperty() *DateProperty { + r := &DateProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type DatePropertyVariant interface { + DatePropertyCaster() *DateProperty +} + +func (s *DateProperty) DatePropertyCaster() *DateProperty { + return s +} + +func (s *DateProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangeaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangeaggregate.go new file mode 100644 index 000000000..ed67fc310 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangeaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DateRangeAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L609-L615 +type DateRangeAggregate struct { + Buckets BucketsRangeBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *DateRangeAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]RangeBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []RangeBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewDateRangeAggregate returns a DateRangeAggregate. +func NewDateRangeAggregate() *DateRangeAggregate { + r := &DateRangeAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangeaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangeaggregation.go new file mode 100644 index 000000000..5562e980a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangeaggregation.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DateRangeAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L281-L307 +type DateRangeAggregation struct { + // Field The date field whose values are use to build ranges. + Field *string `json:"field,omitempty"` + // Format The date format used to format `from` and `to` in the response. + Format *string `json:"format,omitempty"` + // Keyed Set to `true` to associate a unique string key with each bucket and returns + // the ranges as a hash rather than an array. + Keyed *bool `json:"keyed,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + // Ranges Array of date ranges. + Ranges []DateRangeExpression `json:"ranges,omitempty"` + // TimeZone Time zone used to convert dates from another time zone to UTC. + TimeZone *string `json:"time_zone,omitempty"` +} + +func (s *DateRangeAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "keyed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Keyed", err) + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "ranges": + if err := dec.Decode(&s.Ranges); err != nil { + return fmt.Errorf("%s | %w", "Ranges", err) + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return fmt.Errorf("%s | %w", "TimeZone", err) + } + + } + } + return nil +} + +// NewDateRangeAggregation returns a DateRangeAggregation. +func NewDateRangeAggregation() *DateRangeAggregation { + r := &DateRangeAggregation{} + + return r +} + +type DateRangeAggregationVariant interface { + DateRangeAggregationCaster() *DateRangeAggregation +} + +func (s *DateRangeAggregation) DateRangeAggregationCaster() *DateRangeAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangeexpression.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangeexpression.go new file mode 100644 index 000000000..73b44605b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangeexpression.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DateRangeExpression type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L318-L331 +type DateRangeExpression struct { + // From Start of the range (inclusive). + From FieldDateMath `json:"from,omitempty"` + // Key Custom key to return the range with. + Key *string `json:"key,omitempty"` + // To End of the range (exclusive). + To FieldDateMath `json:"to,omitempty"` +} + +func (s *DateRangeExpression) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from": + if err := dec.Decode(&s.From); err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = &o + + case "to": + if err := dec.Decode(&s.To); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } + + } + } + return nil +} + +// NewDateRangeExpression returns a DateRangeExpression. +func NewDateRangeExpression() *DateRangeExpression { + r := &DateRangeExpression{} + + return r +} + +type DateRangeExpressionVariant interface { + DateRangeExpressionCaster() *DateRangeExpression +} + +func (s *DateRangeExpression) DateRangeExpressionCaster() *DateRangeExpression { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangeproperty.go new file mode 100644 index 000000000..ec01ca041 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangeproperty.go @@ -0,0 +1,936 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// DateRangeProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/range.ts#L29-L32 +type DateRangeProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + Format *string `json:"format,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DateRangeProperty) MarshalJSON() ([]byte, error) { + type innerDateRangeProperty DateRangeProperty + tmp := innerDateRangeProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + Format: s.Format, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "date_range" + + return json.Marshal(tmp) +} + +// NewDateRangeProperty returns a DateRangeProperty. +func NewDateRangeProperty() *DateRangeProperty { + r := &DateRangeProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type DateRangePropertyVariant interface { + DateRangePropertyCaster() *DateRangeProperty +} + +func (s *DateRangeProperty) DateRangePropertyCaster() *DateRangeProperty { + return s +} + +func (s *DateRangeProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangequery.go new file mode 100644 index 000000000..90a4dcc76 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/daterangequery.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/rangerelation" +) + +// DateRangeQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L157-L166 +type DateRangeQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Format Date format used to convert `date` values in the query. + Format *string `json:"format,omitempty"` + // Gt Greater than. + Gt *string `json:"gt,omitempty"` + // Gte Greater than or equal to. + Gte *string `json:"gte,omitempty"` + // Lt Less than. + Lt *string `json:"lt,omitempty"` + // Lte Less than or equal to. + Lte *string `json:"lte,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Relation Indicates how the range query matches values for `range` fields. + Relation *rangerelation.RangeRelation `json:"relation,omitempty"` + // TimeZone Coordinated Universal Time (UTC) offset or IANA time zone used to convert + // `date` values in the query to UTC. + TimeZone *string `json:"time_zone,omitempty"` +} + +func (s *DateRangeQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "format": + if err := dec.Decode(&s.Format); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + + case "gt": + if err := dec.Decode(&s.Gt); err != nil { + return fmt.Errorf("%s | %w", "Gt", err) + } + + case "gte": + if err := dec.Decode(&s.Gte); err != nil { + return fmt.Errorf("%s | %w", "Gte", err) + } + + case "lt": + if err := dec.Decode(&s.Lt); err != nil { + return fmt.Errorf("%s | %w", "Lt", err) + } + + case "lte": + if err := dec.Decode(&s.Lte); err != nil { + return fmt.Errorf("%s | %w", "Lte", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "relation": + if err := dec.Decode(&s.Relation); err != nil { + return fmt.Errorf("%s | %w", "Relation", err) + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return fmt.Errorf("%s | %w", "TimeZone", err) + } + + } + } + return nil +} + +// NewDateRangeQuery returns a DateRangeQuery. +func NewDateRangeQuery() *DateRangeQuery { + r := &DateRangeQuery{} + + return r +} + +type DateRangeQueryVariant interface { + DateRangeQueryCaster() *DateRangeQuery +} + +func (s *DateRangeQuery) DateRangeQueryCaster() *DateRangeQuery { + return s +} + +func (s *DateRangeQuery) RangeQueryCaster() *RangeQuery { + o := RangeQuery(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datetime.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datetime.go new file mode 100644 index 000000000..5efd4b3d4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/datetime.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DateTime holds the union for the following types: +// +// string +// int64 +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Time.ts#L22-L27 +type DateTime any + +type DateTimeVariant interface { + DateTimeCaster() *DateTime +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayfunction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayfunction.go new file mode 100644 index 000000000..f24b42f0d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayfunction.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DecayFunction holds the union for the following types: +// +// UntypedDecayFunction +// DateDecayFunction +// NumericDecayFunction +// GeoDecayFunction +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L215-L224 +type DecayFunction any + +type DecayFunctionVariant interface { + DecayFunctionCaster() *DecayFunction +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayplacement.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayplacement.go new file mode 100644 index 000000000..7cf9a9f8f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayplacement.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DecayPlacement type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L170-L189 +type DecayPlacement struct { + // Decay Defines how documents are scored at the distance given at scale. + Decay *Float64 `json:"decay,omitempty"` + // Offset If defined, the decay function will only compute the decay function for + // documents with a distance greater than the defined `offset`. + Offset json.RawMessage `json:"offset,omitempty"` + // Origin The point of origin used for calculating distance. Must be given as a number + // for numeric field, date for date fields and geo point for geo fields. + Origin json.RawMessage `json:"origin,omitempty"` + // Scale Defines the distance from origin + offset at which the computed score will + // equal `decay` parameter. + Scale json.RawMessage `json:"scale,omitempty"` +} + +func (s *DecayPlacement) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decay": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Decay", err) + } + f := Float64(value) + s.Decay = &f + case float64: + f := Float64(v) + s.Decay = &f + } + + case "offset": + if err := dec.Decode(&s.Offset); err != nil { + return fmt.Errorf("%s | %w", "Offset", err) + } + + case "origin": + if err := dec.Decode(&s.Origin); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + + case "scale": + if err := dec.Decode(&s.Scale); err != nil { + return fmt.Errorf("%s | %w", "Scale", err) + } + + } + } + return nil +} + +// NewDecayPlacement returns a DecayPlacement. +func NewDecayPlacement() *DecayPlacement { + r := &DecayPlacement{} + + return r +} + +type DecayPlacementVariant interface { + DecayPlacementCaster() *DecayPlacement +} + +func (s *DecayPlacement) DecayPlacementCaster() *DecayPlacement { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayplacementdatemathduration.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayplacementdatemathduration.go new file mode 100644 index 000000000..79f54d1aa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayplacementdatemathduration.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DecayPlacementDateMathDuration type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L170-L189 +type DecayPlacementDateMathDuration struct { + // Decay Defines how documents are scored at the distance given at scale. + Decay *Float64 `json:"decay,omitempty"` + // Offset If defined, the decay function will only compute the decay function for + // documents with a distance greater than the defined `offset`. + Offset Duration `json:"offset,omitempty"` + // Origin The point of origin used for calculating distance. Must be given as a number + // for numeric field, date for date fields and geo point for geo fields. + Origin *string `json:"origin,omitempty"` + // Scale Defines the distance from origin + offset at which the computed score will + // equal `decay` parameter. + Scale Duration `json:"scale,omitempty"` +} + +func (s *DecayPlacementDateMathDuration) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decay": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Decay", err) + } + f := Float64(value) + s.Decay = &f + case float64: + f := Float64(v) + s.Decay = &f + } + + case "offset": + if err := dec.Decode(&s.Offset); err != nil { + return fmt.Errorf("%s | %w", "Offset", err) + } + + case "origin": + if err := dec.Decode(&s.Origin); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + + case "scale": + if err := dec.Decode(&s.Scale); err != nil { + return fmt.Errorf("%s | %w", "Scale", err) + } + + } + } + return nil +} + +// NewDecayPlacementDateMathDuration returns a DecayPlacementDateMathDuration. +func NewDecayPlacementDateMathDuration() *DecayPlacementDateMathDuration { + r := &DecayPlacementDateMathDuration{} + + return r +} + +type DecayPlacementDateMathDurationVariant interface { + DecayPlacementDateMathDurationCaster() *DecayPlacementDateMathDuration +} + +func (s *DecayPlacementDateMathDuration) DecayPlacementDateMathDurationCaster() *DecayPlacementDateMathDuration { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayplacementdoubledouble.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayplacementdoubledouble.go new file mode 100644 index 000000000..8e89f2200 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayplacementdoubledouble.go @@ -0,0 +1,146 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DecayPlacementdoubledouble type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L170-L189 +type DecayPlacementdoubledouble struct { + // Decay Defines how documents are scored at the distance given at scale. + Decay *Float64 `json:"decay,omitempty"` + // Offset If defined, the decay function will only compute the decay function for + // documents with a distance greater than the defined `offset`. + Offset *Float64 `json:"offset,omitempty"` + // Origin The point of origin used for calculating distance. Must be given as a number + // for numeric field, date for date fields and geo point for geo fields. + Origin *Float64 `json:"origin,omitempty"` + // Scale Defines the distance from origin + offset at which the computed score will + // equal `decay` parameter. + Scale *Float64 `json:"scale,omitempty"` +} + +func (s *DecayPlacementdoubledouble) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decay": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Decay", err) + } + f := Float64(value) + s.Decay = &f + case float64: + f := Float64(v) + s.Decay = &f + } + + case "offset": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Offset", err) + } + f := Float64(value) + s.Offset = &f + case float64: + f := Float64(v) + s.Offset = &f + } + + case "origin": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + f := Float64(value) + s.Origin = &f + case float64: + f := Float64(v) + s.Origin = &f + } + + case "scale": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Scale", err) + } + f := Float64(value) + s.Scale = &f + case float64: + f := Float64(v) + s.Scale = &f + } + + } + } + return nil +} + +// NewDecayPlacementdoubledouble returns a DecayPlacementdoubledouble. +func NewDecayPlacementdoubledouble() *DecayPlacementdoubledouble { + r := &DecayPlacementdoubledouble{} + + return r +} + +type DecayPlacementdoubledoubleVariant interface { + DecayPlacementdoubledoubleCaster() *DecayPlacementdoubledouble +} + +func (s *DecayPlacementdoubledouble) DecayPlacementdoubledoubleCaster() *DecayPlacementdoubledouble { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayplacementgeolocationdistance.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayplacementgeolocationdistance.go new file mode 100644 index 000000000..39ac3178f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decayplacementgeolocationdistance.go @@ -0,0 +1,153 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DecayPlacementGeoLocationDistance type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L170-L189 +type DecayPlacementGeoLocationDistance struct { + // Decay Defines how documents are scored at the distance given at scale. + Decay *Float64 `json:"decay,omitempty"` + // Offset If defined, the decay function will only compute the decay function for + // documents with a distance greater than the defined `offset`. + Offset *string `json:"offset,omitempty"` + // Origin The point of origin used for calculating distance. Must be given as a number + // for numeric field, date for date fields and geo point for geo fields. + Origin GeoLocation `json:"origin,omitempty"` + // Scale Defines the distance from origin + offset at which the computed score will + // equal `decay` parameter. + Scale *string `json:"scale,omitempty"` +} + +func (s *DecayPlacementGeoLocationDistance) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decay": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Decay", err) + } + f := Float64(value) + s.Decay = &f + case float64: + f := Float64(v) + s.Decay = &f + } + + case "offset": + if err := dec.Decode(&s.Offset); err != nil { + return fmt.Errorf("%s | %w", "Offset", err) + } + + case "origin": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + origin_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Origin", err) + } + + switch t { + + case "lat", "lon": + o := NewLatLonGeoLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + s.Origin = o + break origin_field + + case "geohash": + o := NewGeoHashLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + s.Origin = o + break origin_field + + } + } + if s.Origin == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Origin); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + } + + case "scale": + if err := dec.Decode(&s.Scale); err != nil { + return fmt.Errorf("%s | %w", "Scale", err) + } + + } + } + return nil +} + +// NewDecayPlacementGeoLocationDistance returns a DecayPlacementGeoLocationDistance. +func NewDecayPlacementGeoLocationDistance() *DecayPlacementGeoLocationDistance { + r := &DecayPlacementGeoLocationDistance{} + + return r +} + +type DecayPlacementGeoLocationDistanceVariant interface { + DecayPlacementGeoLocationDistanceCaster() *DecayPlacementGeoLocationDistance +} + +func (s *DecayPlacementGeoLocationDistance) DecayPlacementGeoLocationDistanceCaster() *DecayPlacementGeoLocationDistance { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decimaldigittokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decimaldigittokenfilter.go new file mode 100644 index 000000000..f2dc3840e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/decimaldigittokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DecimalDigitTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L482-L484 +type DecimalDigitTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *DecimalDigitTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DecimalDigitTokenFilter) MarshalJSON() ([]byte, error) { + type innerDecimalDigitTokenFilter DecimalDigitTokenFilter + tmp := innerDecimalDigitTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "decimal_digit" + + return json.Marshal(tmp) +} + +// NewDecimalDigitTokenFilter returns a DecimalDigitTokenFilter. +func NewDecimalDigitTokenFilter() *DecimalDigitTokenFilter { + r := &DecimalDigitTokenFilter{} + + return r +} + +type DecimalDigitTokenFilterVariant interface { + DecimalDigitTokenFilterCaster() *DecimalDigitTokenFilter +} + +func (s *DecimalDigitTokenFilter) DecimalDigitTokenFilterCaster() *DecimalDigitTokenFilter { + return s +} + +func (s *DecimalDigitTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deepseekservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deepseekservicesettings.go new file mode 100644 index 000000000..b0724b8ca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deepseekservicesettings.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DeepSeekServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1166-L1188 +type DeepSeekServiceSettings struct { + // ApiKey A valid API key for your DeepSeek account. + // You can find or create your DeepSeek API keys on the DeepSeek API key page. + // + // IMPORTANT: You need to provide the API key only once, during the inference + // model creation. + // The get inference endpoint API does not retrieve your API key. + // After creating the inference model, you cannot change the associated API key. + // If you want to use a different API key, delete the inference model and + // recreate it with the same name and the updated API key. + ApiKey string `json:"api_key"` + // ModelId For a `completion` or `chat_completion` task, the name of the model to use + // for the inference task. + // + // For the available `completion` and `chat_completion` models, refer to the + // [DeepSeek Models & Pricing + // docs](https://api-docs.deepseek.com/quick_start/pricing). + ModelId string `json:"model_id"` + // Url The URL endpoint to use for the requests. Defaults to + // `https://api.deepseek.com/chat/completions`. + Url *string `json:"url,omitempty"` +} + +func (s *DeepSeekServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = &o + + } + } + return nil +} + +// NewDeepSeekServiceSettings returns a DeepSeekServiceSettings. +func NewDeepSeekServiceSettings() *DeepSeekServiceSettings { + r := &DeepSeekServiceSettings{} + + return r +} + +type DeepSeekServiceSettingsVariant interface { + DeepSeekServiceSettingsCaster() *DeepSeekServiceSettings +} + +func (s *DeepSeekServiceSettings) DeepSeekServiceSettingsCaster() *DeepSeekServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/defaults.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/defaults.go new file mode 100644 index 000000000..b41694826 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/defaults.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Defaults type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/info/types.ts#L24-L27 +type Defaults struct { + AnomalyDetectors AnomalyDetectors `json:"anomaly_detectors"` + Datafeeds Datafeeds `json:"datafeeds"` +} + +// NewDefaults returns a Defaults. +func NewDefaults() *Defaults { + r := &Defaults{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/definition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/definition.go new file mode 100644 index 000000000..a8d8b5d75 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/definition.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Definition type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model/types.ts#L24-L29 +type Definition struct { + // Preprocessors Collection of preprocessors + Preprocessors []Preprocessor `json:"preprocessors,omitempty"` + // TrainedModel The definition of the trained model. + TrainedModel TrainedModel `json:"trained_model"` +} + +// NewDefinition returns a Definition. +func NewDefinition() *Definition { + r := &Definition{} + + return r +} + +type DefinitionVariant interface { + DefinitionCaster() *Definition +} + +func (s *Definition) DefinitionCaster() *Definition { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/delayeddatacheckconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/delayeddatacheckconfig.go new file mode 100644 index 000000000..99af0e5b5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/delayeddatacheckconfig.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DelayedDataCheckConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Datafeed.ts#L122-L133 +type DelayedDataCheckConfig struct { + // CheckWindow The window of time that is searched for late data. This window of time ends + // with the latest finalized bucket. + // It defaults to null, which causes an appropriate `check_window` to be + // calculated when the real-time datafeed runs. + // In particular, the default `check_window` span calculation is based on the + // maximum of `2h` or `8 * bucket_span`. + CheckWindow Duration `json:"check_window,omitempty"` + // Enabled Specifies whether the datafeed periodically checks for delayed data. + Enabled bool `json:"enabled"` +} + +func (s *DelayedDataCheckConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "check_window": + if err := dec.Decode(&s.CheckWindow); err != nil { + return fmt.Errorf("%s | %w", "CheckWindow", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + +// NewDelayedDataCheckConfig returns a DelayedDataCheckConfig. +func NewDelayedDataCheckConfig() *DelayedDataCheckConfig { + r := &DelayedDataCheckConfig{} + + return r +} + +type DelayedDataCheckConfigVariant interface { + DelayedDataCheckConfigCaster() *DelayedDataCheckConfig +} + +func (s *DelayedDataCheckConfig) DelayedDataCheckConfigCaster() *DelayedDataCheckConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deleteaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deleteaction.go new file mode 100644 index 000000000..1c4735342 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deleteaction.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DeleteAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/_types/Phase.ts#L149-L151 +type DeleteAction struct { + DeleteSearchableSnapshot *bool `json:"delete_searchable_snapshot,omitempty"` +} + +func (s *DeleteAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "delete_searchable_snapshot": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DeleteSearchableSnapshot", err) + } + s.DeleteSearchableSnapshot = &value + case bool: + s.DeleteSearchableSnapshot = &v + } + + } + } + return nil +} + +// NewDeleteAction returns a DeleteAction. +func NewDeleteAction() *DeleteAction { + r := &DeleteAction{} + + return r +} + +type DeleteActionVariant interface { + DeleteActionCaster() *DeleteAction +} + +func (s *DeleteAction) DeleteActionCaster() *DeleteAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deleteoperation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deleteoperation.go new file mode 100644 index 000000000..1a9eec12f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deleteoperation.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +// DeleteOperation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/bulk/types.ts#L144-L144 +type DeleteOperation struct { + // Id_ The document ID. + Id_ *string `json:"_id,omitempty"` + IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` + IfSeqNo *int64 `json:"if_seq_no,omitempty"` + // Index_ The name of the index or index alias to perform the action on. + Index_ *string `json:"_index,omitempty"` + // Routing A custom value used to route operations to a specific shard. + Routing *string `json:"routing,omitempty"` + Version *int64 `json:"version,omitempty"` + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *DeleteOperation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "if_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IfPrimaryTerm", err) + } + s.IfPrimaryTerm = &value + case float64: + f := int64(v) + s.IfPrimaryTerm = &f + } + + case "if_seq_no": + if err := dec.Decode(&s.IfSeqNo); err != nil { + return fmt.Errorf("%s | %w", "IfSeqNo", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return fmt.Errorf("%s | %w", "VersionType", err) + } + + } + } + return nil +} + +// NewDeleteOperation returns a DeleteOperation. +func NewDeleteOperation() *DeleteOperation { + r := &DeleteOperation{} + + return r +} + +type DeleteOperationVariant interface { + DeleteOperationCaster() *DeleteOperation +} + +func (s *DeleteOperation) DeleteOperationCaster() *DeleteOperation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/delimitedpayloadtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/delimitedpayloadtokenfilter.go new file mode 100644 index 000000000..90effd722 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/delimitedpayloadtokenfilter.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/delimitedpayloadencoding" +) + +// DelimitedPayloadTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L84-L90 +type DelimitedPayloadTokenFilter struct { + // Delimiter Character used to separate tokens from payloads. Defaults to `|`. + Delimiter *string `json:"delimiter,omitempty"` + // Encoding Data type for the stored payload. + Encoding *delimitedpayloadencoding.DelimitedPayloadEncoding `json:"encoding,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *DelimitedPayloadTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "delimiter": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Delimiter", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Delimiter = &o + + case "encoding": + if err := dec.Decode(&s.Encoding); err != nil { + return fmt.Errorf("%s | %w", "Encoding", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DelimitedPayloadTokenFilter) MarshalJSON() ([]byte, error) { + type innerDelimitedPayloadTokenFilter DelimitedPayloadTokenFilter + tmp := innerDelimitedPayloadTokenFilter{ + Delimiter: s.Delimiter, + Encoding: s.Encoding, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "delimited_payload" + + return json.Marshal(tmp) +} + +// NewDelimitedPayloadTokenFilter returns a DelimitedPayloadTokenFilter. +func NewDelimitedPayloadTokenFilter() *DelimitedPayloadTokenFilter { + r := &DelimitedPayloadTokenFilter{} + + return r +} + +type DelimitedPayloadTokenFilterVariant interface { + DelimitedPayloadTokenFilterCaster() *DelimitedPayloadTokenFilter +} + +func (s *DelimitedPayloadTokenFilter) DelimitedPayloadTokenFilterCaster() *DelimitedPayloadTokenFilter { + return s +} + +func (s *DelimitedPayloadTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectorindexoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectorindexoptions.go new file mode 100644 index 000000000..fa08c6616 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectorindexoptions.go @@ -0,0 +1,166 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorindexoptionstype" +) + +// DenseVectorIndexOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/DenseVectorProperty.ts#L129-L166 +type DenseVectorIndexOptions struct { + // ConfidenceInterval The confidence interval to use when quantizing the vectors. Can be any value + // between and including `0.90` and + // `1.0` or exactly `0`. When the value is `0`, this indicates that dynamic + // quantiles should be calculated for + // optimized quantization. When between `0.90` and `1.0`, this value restricts + // the values used when calculating + // the quantization thresholds. + // + // For example, a value of `0.95` will only use the middle `95%` of the values + // when calculating the quantization + // thresholds (e.g. the highest and lowest `2.5%` of values will be ignored). + // + // Defaults to `1/(dims + 1)` for `int8` quantized vectors and `0` for `int4` + // for dynamic quantile calculation. + // + // Only applicable to `int8_hnsw`, `int4_hnsw`, `int8_flat`, and `int4_flat` + // index types. + ConfidenceInterval *float32 `json:"confidence_interval,omitempty"` + // EfConstruction The number of candidates to track while assembling the list of nearest + // neighbors for each new node. + // + // Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index + // types. + EfConstruction *int `json:"ef_construction,omitempty"` + // M The number of neighbors each node will be connected to in the HNSW graph. + // + // Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index + // types. + M *int `json:"m,omitempty"` + // RescoreVector The rescore vector options. This is only applicable to `bbq_hnsw`, + // `int4_hnsw`, `int8_hnsw`, `bbq_flat`, `int4_flat`, and `int8_flat` index + // types. + RescoreVector *DenseVectorIndexOptionsRescoreVector `json:"rescore_vector,omitempty"` + // Type The type of kNN algorithm to use. + Type densevectorindexoptionstype.DenseVectorIndexOptionsType `json:"type"` +} + +func (s *DenseVectorIndexOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "confidence_interval": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "ConfidenceInterval", err) + } + f := float32(value) + s.ConfidenceInterval = &f + case float64: + f := float32(v) + s.ConfidenceInterval = &f + } + + case "ef_construction": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "EfConstruction", err) + } + s.EfConstruction = &value + case float64: + f := int(v) + s.EfConstruction = &f + } + + case "m": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "M", err) + } + s.M = &value + case float64: + f := int(v) + s.M = &f + } + + case "rescore_vector": + if err := dec.Decode(&s.RescoreVector); err != nil { + return fmt.Errorf("%s | %w", "RescoreVector", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewDenseVectorIndexOptions returns a DenseVectorIndexOptions. +func NewDenseVectorIndexOptions() *DenseVectorIndexOptions { + r := &DenseVectorIndexOptions{} + + return r +} + +type DenseVectorIndexOptionsVariant interface { + DenseVectorIndexOptionsCaster() *DenseVectorIndexOptions +} + +func (s *DenseVectorIndexOptions) DenseVectorIndexOptionsCaster() *DenseVectorIndexOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectorindexoptionsrescorevector.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectorindexoptionsrescorevector.go new file mode 100644 index 000000000..534c63fde --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectorindexoptionsrescorevector.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DenseVectorIndexOptionsRescoreVector type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/DenseVectorProperty.ts#L215-L223 +type DenseVectorIndexOptionsRescoreVector struct { + // Oversample The oversampling factor to use when searching for the nearest neighbor. This + // is only applicable to the quantized formats: `bbq_*`, `int4_*`, and `int8_*`. + // When provided, `oversample * k` vectors will be gathered and then their + // scores will be re-computed with the original vectors. + // + // valid values are between `1.0` and `10.0` (inclusive), or `0` exactly to + // disable oversampling. + Oversample float32 `json:"oversample"` +} + +func (s *DenseVectorIndexOptionsRescoreVector) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "oversample": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Oversample", err) + } + f := float32(value) + s.Oversample = f + case float64: + f := float32(v) + s.Oversample = f + } + + } + } + return nil +} + +// NewDenseVectorIndexOptionsRescoreVector returns a DenseVectorIndexOptionsRescoreVector. +func NewDenseVectorIndexOptionsRescoreVector() *DenseVectorIndexOptionsRescoreVector { + r := &DenseVectorIndexOptionsRescoreVector{} + + return r +} + +type DenseVectorIndexOptionsRescoreVectorVariant interface { + DenseVectorIndexOptionsRescoreVectorCaster() *DenseVectorIndexOptionsRescoreVector +} + +func (s *DenseVectorIndexOptionsRescoreVector) DenseVectorIndexOptionsRescoreVectorCaster() *DenseVectorIndexOptionsRescoreVector { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectoroffheapstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectoroffheapstats.go new file mode 100644 index 000000000..e0d33afa6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectoroffheapstats.go @@ -0,0 +1,184 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DenseVectorOffHeapStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L166-L178 +type DenseVectorOffHeapStats struct { + Fielddata map[string]map[string]int64 `json:"fielddata,omitempty"` + TotalSize ByteSize `json:"total_size,omitempty"` + TotalSizeBytes int64 `json:"total_size_bytes"` + TotalVebSize ByteSize `json:"total_veb_size,omitempty"` + TotalVebSizeBytes int64 `json:"total_veb_size_bytes"` + TotalVecSize ByteSize `json:"total_vec_size,omitempty"` + TotalVecSizeBytes int64 `json:"total_vec_size_bytes"` + TotalVeqSize ByteSize `json:"total_veq_size,omitempty"` + TotalVeqSizeBytes int64 `json:"total_veq_size_bytes"` + TotalVexSize ByteSize `json:"total_vex_size,omitempty"` + TotalVexSizeBytes int64 `json:"total_vex_size_bytes"` +} + +func (s *DenseVectorOffHeapStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fielddata": + if s.Fielddata == nil { + s.Fielddata = make(map[string]map[string]int64, 0) + } + if err := dec.Decode(&s.Fielddata); err != nil { + return fmt.Errorf("%s | %w", "Fielddata", err) + } + + case "total_size": + if err := dec.Decode(&s.TotalSize); err != nil { + return fmt.Errorf("%s | %w", "TotalSize", err) + } + + case "total_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeBytes", err) + } + s.TotalSizeBytes = value + case float64: + f := int64(v) + s.TotalSizeBytes = f + } + + case "total_veb_size": + if err := dec.Decode(&s.TotalVebSize); err != nil { + return fmt.Errorf("%s | %w", "TotalVebSize", err) + } + + case "total_veb_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalVebSizeBytes", err) + } + s.TotalVebSizeBytes = value + case float64: + f := int64(v) + s.TotalVebSizeBytes = f + } + + case "total_vec_size": + if err := dec.Decode(&s.TotalVecSize); err != nil { + return fmt.Errorf("%s | %w", "TotalVecSize", err) + } + + case "total_vec_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalVecSizeBytes", err) + } + s.TotalVecSizeBytes = value + case float64: + f := int64(v) + s.TotalVecSizeBytes = f + } + + case "total_veq_size": + if err := dec.Decode(&s.TotalVeqSize); err != nil { + return fmt.Errorf("%s | %w", "TotalVeqSize", err) + } + + case "total_veq_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalVeqSizeBytes", err) + } + s.TotalVeqSizeBytes = value + case float64: + f := int64(v) + s.TotalVeqSizeBytes = f + } + + case "total_vex_size": + if err := dec.Decode(&s.TotalVexSize); err != nil { + return fmt.Errorf("%s | %w", "TotalVexSize", err) + } + + case "total_vex_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalVexSizeBytes", err) + } + s.TotalVexSizeBytes = value + case float64: + f := int64(v) + s.TotalVexSizeBytes = f + } + + } + } + return nil +} + +// NewDenseVectorOffHeapStats returns a DenseVectorOffHeapStats. +func NewDenseVectorOffHeapStats() *DenseVectorOffHeapStats { + r := &DenseVectorOffHeapStats{ + Fielddata: make(map[string]map[string]int64), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectorproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectorproperty.go new file mode 100644 index 000000000..9e2cf9a7e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectorproperty.go @@ -0,0 +1,906 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorelementtype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorsimilarity" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// DenseVectorProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/DenseVectorProperty.ts#L23-L62 +type DenseVectorProperty struct { + // Dims Number of vector dimensions. Can't exceed `4096`. If `dims` is not specified, + // it will be set to the length of + // the first vector added to the field. + Dims *int `json:"dims,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + // ElementType The data type used to encode vectors. The supported data types are `float` + // (default), `byte`, and `bit`. + ElementType *densevectorelementtype.DenseVectorElementType `json:"element_type,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Index If `true`, you can search this field using the kNN search API. + Index *bool `json:"index,omitempty"` + // IndexOptions An optional section that configures the kNN indexing algorithm. The HNSW + // algorithm has two internal parameters + // that influence how the data structure is built. These can be adjusted to + // improve the accuracy of results, at the + // expense of slower indexing speed. + // + // This parameter can only be specified when `index` is `true`. + IndexOptions *DenseVectorIndexOptions `json:"index_options,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + // Similarity The vector similarity metric to use in kNN search. + // + // Documents are ranked by their vector field's similarity to the query vector. + // The `_score` of each document will + // be derived from the similarity, in a way that ensures scores are positive and + // that a larger score corresponds + // to a higher ranking. + // + // Defaults to `l2_norm` when `element_type` is `bit` otherwise defaults to + // `cosine`. + // + // `bit` vectors only support `l2_norm` as their similarity metric. + // + // This parameter can only be specified when `index` is `true`. + Similarity *densevectorsimilarity.DenseVectorSimilarity `json:"similarity,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dims": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Dims", err) + } + s.Dims = &value + case float64: + f := int(v) + s.Dims = &f + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "element_type": + if err := dec.Decode(&s.ElementType); err != nil { + return fmt.Errorf("%s | %w", "ElementType", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "index_options": + if err := dec.Decode(&s.IndexOptions); err != nil { + return fmt.Errorf("%s | %w", "IndexOptions", err) + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "similarity": + if err := dec.Decode(&s.Similarity); err != nil { + return fmt.Errorf("%s | %w", "Similarity", err) + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DenseVectorProperty) MarshalJSON() ([]byte, error) { + type innerDenseVectorProperty DenseVectorProperty + tmp := innerDenseVectorProperty{ + Dims: s.Dims, + Dynamic: s.Dynamic, + ElementType: s.ElementType, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + IndexOptions: s.IndexOptions, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "dense_vector" + + return json.Marshal(tmp) +} + +// NewDenseVectorProperty returns a DenseVectorProperty. +func NewDenseVectorProperty() *DenseVectorProperty { + r := &DenseVectorProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type DenseVectorPropertyVariant interface { + DenseVectorPropertyCaster() *DenseVectorProperty +} + +func (s *DenseVectorProperty) DenseVectorPropertyCaster() *DenseVectorProperty { + return s +} + +func (s *DenseVectorProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectorstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectorstats.go new file mode 100644 index 000000000..df5dbdd38 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/densevectorstats.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DenseVectorStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L157-L160 +type DenseVectorStats struct { + OffHeap *DenseVectorOffHeapStats `json:"off_heap,omitempty"` + ValueCount int64 `json:"value_count"` +} + +func (s *DenseVectorStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "off_heap": + if err := dec.Decode(&s.OffHeap); err != nil { + return fmt.Errorf("%s | %w", "OffHeap", err) + } + + case "value_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ValueCount", err) + } + s.ValueCount = value + case float64: + f := int64(v) + s.ValueCount = f + } + + } + } + return nil +} + +// NewDenseVectorStats returns a DenseVectorStats. +func NewDenseVectorStats() *DenseVectorStats { + r := &DenseVectorStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dependency.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dependency.go new file mode 100644 index 000000000..73cb171c0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dependency.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Dependency type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L30-L33 +type Dependency struct { + Field string `json:"field"` + Value ScalarValue `json:"value"` +} + +func (s *Dependency) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = o + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + } + } + return nil +} + +// NewDependency returns a Dependency. +func NewDependency() *Dependency { + r := &Dependency{} + + return r +} + +type DependencyVariant interface { + DependencyCaster() *Dependency +} + +func (s *Dependency) DependencyCaster() *Dependency { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deprecation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deprecation.go new file mode 100644 index 000000000..acacb02b0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deprecation.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deprecationlevel" +) + +// Deprecation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/migration/deprecations/types.ts#L32-L47 +type Deprecation struct { + // Details Optional details about the deprecation warning. + Details *string `json:"details,omitempty"` + // Level The level property describes the significance of the issue. + Level deprecationlevel.DeprecationLevel `json:"level"` + // Message Descriptive information about the deprecation warning. + Message string `json:"message"` + Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` + ResolveDuringRollingUpgrade bool `json:"resolve_during_rolling_upgrade"` + // Url A link to the breaking change documentation, where you can find more + // information about this change. + Url string `json:"url"` +} + +func (s *Deprecation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Details = &o + + case "level": + if err := dec.Decode(&s.Level); err != nil { + return fmt.Errorf("%s | %w", "Level", err) + } + + case "message": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Message", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Message = o + + case "_meta": + if s.Meta_ == nil { + s.Meta_ = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "resolve_during_rolling_upgrade": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ResolveDuringRollingUpgrade", err) + } + s.ResolveDuringRollingUpgrade = value + case bool: + s.ResolveDuringRollingUpgrade = v + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = o + + } + } + return nil +} + +// NewDeprecation returns a Deprecation. +func NewDeprecation() *Deprecation { + r := &Deprecation{ + Meta_: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deprecationindexing.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deprecationindexing.go new file mode 100644 index 000000000..7a76f6a45 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/deprecationindexing.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DeprecationIndexing type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L154-L156 +type DeprecationIndexing struct { + Enabled string `json:"enabled"` +} + +func (s *DeprecationIndexing) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Enabled = o + + } + } + return nil +} + +// NewDeprecationIndexing returns a DeprecationIndexing. +func NewDeprecationIndexing() *DeprecationIndexing { + r := &DeprecationIndexing{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/derivativeaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/derivativeaggregate.go new file mode 100644 index 000000000..8c8f8a3b9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/derivativeaggregate.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DerivativeAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L241-L248 +type DerivativeAggregate struct { + Meta Metadata `json:"meta,omitempty"` + NormalizedValue *Float64 `json:"normalized_value,omitempty"` + NormalizedValueAsString *string `json:"normalized_value_as_string,omitempty"` + // Value The metric value. A missing value generally means that there was no data to + // aggregate, + // unless specified otherwise. + Value *Float64 `json:"value,omitempty"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *DerivativeAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "normalized_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NormalizedValue", err) + } + f := Float64(value) + s.NormalizedValue = &f + case float64: + f := Float64(v) + s.NormalizedValue = &f + } + + case "normalized_value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NormalizedValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NormalizedValueAsString = &o + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + +// NewDerivativeAggregate returns a DerivativeAggregate. +func NewDerivativeAggregate() *DerivativeAggregate { + r := &DerivativeAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/derivativeaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/derivativeaggregation.go new file mode 100644 index 000000000..5677794a9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/derivativeaggregation.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// DerivativeAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L216-L216 +type DerivativeAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` +} + +func (s *DerivativeAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + } + } + return nil +} + +// NewDerivativeAggregation returns a DerivativeAggregation. +func NewDerivativeAggregation() *DerivativeAggregation { + r := &DerivativeAggregation{} + + return r +} + +type DerivativeAggregationVariant interface { + DerivativeAggregationCaster() *DerivativeAggregation +} + +func (s *DerivativeAggregation) DerivativeAggregationCaster() *DerivativeAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detailsinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detailsinfo.go new file mode 100644 index 000000000..c144765bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detailsinfo.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DetailsInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L286-L321 +type DetailsInfo struct { + // Blob A description of the blob that was written and read. + Blob BlobDetails `json:"blob"` + // OverwriteElapsed The elapsed time spent overwriting the blob. + // If the blob was not overwritten, this information is omitted. + OverwriteElapsed Duration `json:"overwrite_elapsed,omitempty"` + // OverwriteElapsedNanos The elapsed time spent overwriting the blob, in nanoseconds. + // If the blob was not overwritten, this information is omitted. + OverwriteElapsedNanos *int64 `json:"overwrite_elapsed_nanos,omitempty"` + // WriteElapsed The elapsed time spent writing the blob. + WriteElapsed Duration `json:"write_elapsed"` + // WriteElapsedNanos The elapsed time spent writing the blob, in nanoseconds. + WriteElapsedNanos int64 `json:"write_elapsed_nanos"` + // WriteThrottled The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or + // `indices.recovery.max_bytes_per_sec` if the recovery settings for managed + // services are set) throttle while writing the blob. + WriteThrottled Duration `json:"write_throttled"` + // WriteThrottledNanos The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or + // `indices.recovery.max_bytes_per_sec` if the recovery settings for managed + // services are set) throttle while writing the blob, in nanoseconds. + WriteThrottledNanos int64 `json:"write_throttled_nanos"` + // WriterNode The node which wrote the blob and coordinated the read operations. + WriterNode SnapshotNodeInfo `json:"writer_node"` +} + +func (s *DetailsInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "blob": + if err := dec.Decode(&s.Blob); err != nil { + return fmt.Errorf("%s | %w", "Blob", err) + } + + case "overwrite_elapsed": + if err := dec.Decode(&s.OverwriteElapsed); err != nil { + return fmt.Errorf("%s | %w", "OverwriteElapsed", err) + } + + case "overwrite_elapsed_nanos": + if err := dec.Decode(&s.OverwriteElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "OverwriteElapsedNanos", err) + } + + case "write_elapsed": + if err := dec.Decode(&s.WriteElapsed); err != nil { + return fmt.Errorf("%s | %w", "WriteElapsed", err) + } + + case "write_elapsed_nanos": + if err := dec.Decode(&s.WriteElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "WriteElapsedNanos", err) + } + + case "write_throttled": + if err := dec.Decode(&s.WriteThrottled); err != nil { + return fmt.Errorf("%s | %w", "WriteThrottled", err) + } + + case "write_throttled_nanos": + if err := dec.Decode(&s.WriteThrottledNanos); err != nil { + return fmt.Errorf("%s | %w", "WriteThrottledNanos", err) + } + + case "writer_node": + if err := dec.Decode(&s.WriterNode); err != nil { + return fmt.Errorf("%s | %w", "WriterNode", err) + } + + } + } + return nil +} + +// NewDetailsInfo returns a DetailsInfo. +func NewDetailsInfo() *DetailsInfo { + r := &DetailsInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detectionrule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detectionrule.go new file mode 100644 index 000000000..f0af8f23d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detectionrule.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ruleaction" +) + +// DetectionRule type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Rule.ts#L25-L39 +type DetectionRule struct { + // Actions The set of actions to be triggered when the rule applies. If more than one + // action is specified the effects of all actions are combined. + Actions []ruleaction.RuleAction `json:"actions,omitempty"` + // Conditions An array of numeric conditions when the rule applies. A rule must either have + // a non-empty scope or at least one condition. Multiple conditions are combined + // together with a logical AND. + Conditions []RuleCondition `json:"conditions,omitempty"` + // Scope A scope of series where the rule applies. A rule must either have a non-empty + // scope or at least one condition. By default, the scope includes all series. + // Scoping is allowed for any of the fields that are also specified in + // `by_field_name`, `over_field_name`, or `partition_field_name`. + Scope map[string]FilterRef `json:"scope,omitempty"` +} + +// NewDetectionRule returns a DetectionRule. +func NewDetectionRule() *DetectionRule { + r := &DetectionRule{ + Scope: make(map[string]FilterRef), + } + + return r +} + +type DetectionRuleVariant interface { + DetectionRuleCaster() *DetectionRule +} + +func (s *DetectionRule) DetectionRuleCaster() *DetectionRule { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detector.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detector.go new file mode 100644 index 000000000..1e47b0bda --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detector.go @@ -0,0 +1,193 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/excludefrequent" +) + +// Detector type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Detector.ts#L25-L67 +type Detector struct { + // ByFieldName The field used to split the data. In particular, this property is used for + // analyzing the splits with respect to their own history. It is used for + // finding unusual values in the context of the split. + ByFieldName *string `json:"by_field_name,omitempty"` + // CustomRules Custom rules enable you to customize the way detectors operate. For example, + // a rule may dictate conditions under which results should be skipped. Kibana + // refers to custom rules as job rules. + CustomRules []DetectionRule `json:"custom_rules,omitempty"` + // DetectorDescription A description of the detector. + DetectorDescription *string `json:"detector_description,omitempty"` + // DetectorIndex A unique identifier for the detector. This identifier is based on the order + // of the detectors in the `analysis_config`, starting at zero. If you specify a + // value for this property, it is ignored. + DetectorIndex *int `json:"detector_index,omitempty"` + // ExcludeFrequent If set, frequent entities are excluded from influencing the anomaly results. + // Entities can be considered frequent over time or frequent in a population. If + // you are working with both over and by fields, you can set `exclude_frequent` + // to `all` for both fields, or to `by` or `over` for those specific fields. + ExcludeFrequent *excludefrequent.ExcludeFrequent `json:"exclude_frequent,omitempty"` + // FieldName The field that the detector uses in the function. If you use an event rate + // function such as count or rare, do not specify this field. The `field_name` + // cannot contain double quotes or backslashes. + FieldName *string `json:"field_name,omitempty"` + // Function The analysis function that is used. For example, `count`, `rare`, `mean`, + // `min`, `max`, or `sum`. + Function *string `json:"function,omitempty"` + // OverFieldName The field used to split the data. In particular, this property is used for + // analyzing the splits with respect to the history of all splits. It is used + // for finding unusual values in the population of all splits. + OverFieldName *string `json:"over_field_name,omitempty"` + // PartitionFieldName The field used to segment the analysis. When you use this property, you have + // completely independent baselines for each value of this field. + PartitionFieldName *string `json:"partition_field_name,omitempty"` + // UseNull Defines whether a new series is used as the null series when there is no + // value for the by or partition fields. + UseNull *bool `json:"use_null,omitempty"` +} + +func (s *Detector) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "by_field_name": + if err := dec.Decode(&s.ByFieldName); err != nil { + return fmt.Errorf("%s | %w", "ByFieldName", err) + } + + case "custom_rules": + if err := dec.Decode(&s.CustomRules); err != nil { + return fmt.Errorf("%s | %w", "CustomRules", err) + } + + case "detector_description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DetectorDescription", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DetectorDescription = &o + + case "detector_index": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DetectorIndex", err) + } + s.DetectorIndex = &value + case float64: + f := int(v) + s.DetectorIndex = &f + } + + case "exclude_frequent": + if err := dec.Decode(&s.ExcludeFrequent); err != nil { + return fmt.Errorf("%s | %w", "ExcludeFrequent", err) + } + + case "field_name": + if err := dec.Decode(&s.FieldName); err != nil { + return fmt.Errorf("%s | %w", "FieldName", err) + } + + case "function": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Function", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Function = &o + + case "over_field_name": + if err := dec.Decode(&s.OverFieldName); err != nil { + return fmt.Errorf("%s | %w", "OverFieldName", err) + } + + case "partition_field_name": + if err := dec.Decode(&s.PartitionFieldName); err != nil { + return fmt.Errorf("%s | %w", "PartitionFieldName", err) + } + + case "use_null": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "UseNull", err) + } + s.UseNull = &value + case bool: + s.UseNull = &v + } + + } + } + return nil +} + +// NewDetector returns a Detector. +func NewDetector() *Detector { + r := &Detector{} + + return r +} + +type DetectorVariant interface { + DetectorCaster() *Detector +} + +func (s *Detector) DetectorCaster() *Detector { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detectorread.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detectorread.go new file mode 100644 index 000000000..0dfa65755 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detectorread.go @@ -0,0 +1,192 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/excludefrequent" +) + +// DetectorRead type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Detector.ts#L69-L125 +type DetectorRead struct { + // ByFieldName The field used to split the data. + // In particular, this property is used for analyzing the splits with respect to + // their own history. + // It is used for finding unusual values in the context of the split. + ByFieldName *string `json:"by_field_name,omitempty"` + // CustomRules An array of custom rule objects, which enable you to customize the way + // detectors operate. + // For example, a rule may dictate to the detector conditions under which + // results should be skipped. + // Kibana refers to custom rules as job rules. + CustomRules []DetectionRule `json:"custom_rules,omitempty"` + // DetectorDescription A description of the detector. + DetectorDescription *string `json:"detector_description,omitempty"` + // DetectorIndex A unique identifier for the detector. + // This identifier is based on the order of the detectors in the + // `analysis_config`, starting at zero. + DetectorIndex *int `json:"detector_index,omitempty"` + // ExcludeFrequent Contains one of the following values: `all`, `none`, `by`, or `over`. + // If set, frequent entities are excluded from influencing the anomaly results. + // Entities can be considered frequent over time or frequent in a population. + // If you are working with both over and by fields, then you can set + // `exclude_frequent` to all for both fields, or to `by` or `over` for those + // specific fields. + ExcludeFrequent *excludefrequent.ExcludeFrequent `json:"exclude_frequent,omitempty"` + // FieldName The field that the detector uses in the function. + // If you use an event rate function such as `count` or `rare`, do not specify + // this field. + FieldName *string `json:"field_name,omitempty"` + // Function The analysis function that is used. + // For example, `count`, `rare`, `mean`, `min`, `max`, and `sum`. + Function string `json:"function"` + // OverFieldName The field used to split the data. + // In particular, this property is used for analyzing the splits with respect to + // the history of all splits. + // It is used for finding unusual values in the population of all splits. + OverFieldName *string `json:"over_field_name,omitempty"` + // PartitionFieldName The field used to segment the analysis. + // When you use this property, you have completely independent baselines for + // each value of this field. + PartitionFieldName *string `json:"partition_field_name,omitempty"` + // UseNull Defines whether a new series is used as the null series when there is no + // value for the by or partition fields. + UseNull *bool `json:"use_null,omitempty"` +} + +func (s *DetectorRead) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "by_field_name": + if err := dec.Decode(&s.ByFieldName); err != nil { + return fmt.Errorf("%s | %w", "ByFieldName", err) + } + + case "custom_rules": + if err := dec.Decode(&s.CustomRules); err != nil { + return fmt.Errorf("%s | %w", "CustomRules", err) + } + + case "detector_description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DetectorDescription", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DetectorDescription = &o + + case "detector_index": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DetectorIndex", err) + } + s.DetectorIndex = &value + case float64: + f := int(v) + s.DetectorIndex = &f + } + + case "exclude_frequent": + if err := dec.Decode(&s.ExcludeFrequent); err != nil { + return fmt.Errorf("%s | %w", "ExcludeFrequent", err) + } + + case "field_name": + if err := dec.Decode(&s.FieldName); err != nil { + return fmt.Errorf("%s | %w", "FieldName", err) + } + + case "function": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Function", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Function = o + + case "over_field_name": + if err := dec.Decode(&s.OverFieldName); err != nil { + return fmt.Errorf("%s | %w", "OverFieldName", err) + } + + case "partition_field_name": + if err := dec.Decode(&s.PartitionFieldName); err != nil { + return fmt.Errorf("%s | %w", "PartitionFieldName", err) + } + + case "use_null": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "UseNull", err) + } + s.UseNull = &value + case bool: + s.UseNull = &v + } + + } + } + return nil +} + +// NewDetectorRead returns a DetectorRead. +func NewDetectorRead() *DetectorRead { + r := &DetectorRead{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detectorupdate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detectorupdate.go new file mode 100644 index 000000000..3c323206e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/detectorupdate.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DetectorUpdate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Detector.ts#L127-L143 +type DetectorUpdate struct { + // CustomRules An array of custom rule objects, which enable you to customize the way + // detectors operate. + // For example, a rule may dictate to the detector conditions under which + // results should be skipped. + // Kibana refers to custom rules as job rules. + CustomRules []DetectionRule `json:"custom_rules,omitempty"` + // Description A description of the detector. + Description *string `json:"description,omitempty"` + // DetectorIndex A unique identifier for the detector. + // This identifier is based on the order of the detectors in the + // `analysis_config`, starting at zero. + DetectorIndex int `json:"detector_index"` +} + +func (s *DetectorUpdate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "custom_rules": + if err := dec.Decode(&s.CustomRules); err != nil { + return fmt.Errorf("%s | %w", "CustomRules", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "detector_index": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DetectorIndex", err) + } + s.DetectorIndex = value + case float64: + f := int(v) + s.DetectorIndex = f + } + + } + } + return nil +} + +// NewDetectorUpdate returns a DetectorUpdate. +func NewDetectorUpdate() *DetectorUpdate { + r := &DetectorUpdate{} + + return r +} + +type DetectorUpdateVariant interface { + DetectorUpdateCaster() *DetectorUpdate +} + +func (s *DetectorUpdate) DetectorUpdateCaster() *DetectorUpdate { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dfsknnprofile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dfsknnprofile.go new file mode 100644 index 000000000..a2cc54813 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dfsknnprofile.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DfsKnnProfile type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L181-L186 +type DfsKnnProfile struct { + Collector []KnnCollectorResult `json:"collector"` + Query []KnnQueryProfileResult `json:"query"` + RewriteTime int64 `json:"rewrite_time"` + VectorOperationsCount *int64 `json:"vector_operations_count,omitempty"` +} + +func (s *DfsKnnProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collector": + if err := dec.Decode(&s.Collector); err != nil { + return fmt.Errorf("%s | %w", "Collector", err) + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "rewrite_time": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RewriteTime", err) + } + s.RewriteTime = value + case float64: + f := int64(v) + s.RewriteTime = f + } + + case "vector_operations_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "VectorOperationsCount", err) + } + s.VectorOperationsCount = &value + case float64: + f := int64(v) + s.VectorOperationsCount = &f + } + + } + } + return nil +} + +// NewDfsKnnProfile returns a DfsKnnProfile. +func NewDfsKnnProfile() *DfsKnnProfile { + r := &DfsKnnProfile{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dfsprofile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dfsprofile.go new file mode 100644 index 000000000..170bc0ccb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dfsprofile.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DfsProfile type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L154-L157 +type DfsProfile struct { + Knn []DfsKnnProfile `json:"knn,omitempty"` + Statistics *DfsStatisticsProfile `json:"statistics,omitempty"` +} + +// NewDfsProfile returns a DfsProfile. +func NewDfsProfile() *DfsProfile { + r := &DfsProfile{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dfsstatisticsbreakdown.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dfsstatisticsbreakdown.go new file mode 100644 index 000000000..cd0bc1f29 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dfsstatisticsbreakdown.go @@ -0,0 +1,191 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DfsStatisticsBreakdown type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L170-L179 +type DfsStatisticsBreakdown struct { + CollectionStatistics int64 `json:"collection_statistics"` + CollectionStatisticsCount int64 `json:"collection_statistics_count"` + CreateWeight int64 `json:"create_weight"` + CreateWeightCount int64 `json:"create_weight_count"` + Rewrite int64 `json:"rewrite"` + RewriteCount int64 `json:"rewrite_count"` + TermStatistics int64 `json:"term_statistics"` + TermStatisticsCount int64 `json:"term_statistics_count"` +} + +func (s *DfsStatisticsBreakdown) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collection_statistics": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CollectionStatistics", err) + } + s.CollectionStatistics = value + case float64: + f := int64(v) + s.CollectionStatistics = f + } + + case "collection_statistics_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CollectionStatisticsCount", err) + } + s.CollectionStatisticsCount = value + case float64: + f := int64(v) + s.CollectionStatisticsCount = f + } + + case "create_weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CreateWeight", err) + } + s.CreateWeight = value + case float64: + f := int64(v) + s.CreateWeight = f + } + + case "create_weight_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CreateWeightCount", err) + } + s.CreateWeightCount = value + case float64: + f := int64(v) + s.CreateWeightCount = f + } + + case "rewrite": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Rewrite", err) + } + s.Rewrite = value + case float64: + f := int64(v) + s.Rewrite = f + } + + case "rewrite_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RewriteCount", err) + } + s.RewriteCount = value + case float64: + f := int64(v) + s.RewriteCount = f + } + + case "term_statistics": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TermStatistics", err) + } + s.TermStatistics = value + case float64: + f := int64(v) + s.TermStatistics = f + } + + case "term_statistics_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TermStatisticsCount", err) + } + s.TermStatisticsCount = value + case float64: + f := int64(v) + s.TermStatisticsCount = f + } + + } + } + return nil +} + +// NewDfsStatisticsBreakdown returns a DfsStatisticsBreakdown. +func NewDfsStatisticsBreakdown() *DfsStatisticsBreakdown { + r := &DfsStatisticsBreakdown{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dfsstatisticsprofile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dfsstatisticsprofile.go new file mode 100644 index 000000000..4e4b30a90 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dfsstatisticsprofile.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DfsStatisticsProfile type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L159-L167 +type DfsStatisticsProfile struct { + Breakdown DfsStatisticsBreakdown `json:"breakdown"` + Children []DfsStatisticsProfile `json:"children,omitempty"` + Debug map[string]json.RawMessage `json:"debug,omitempty"` + Description string `json:"description"` + Time Duration `json:"time,omitempty"` + TimeInNanos int64 `json:"time_in_nanos"` + Type string `json:"type"` +} + +func (s *DfsStatisticsProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "breakdown": + if err := dec.Decode(&s.Breakdown); err != nil { + return fmt.Errorf("%s | %w", "Breakdown", err) + } + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return fmt.Errorf("%s | %w", "Children", err) + } + + case "debug": + if s.Debug == nil { + s.Debug = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Debug); err != nil { + return fmt.Errorf("%s | %w", "Debug", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return fmt.Errorf("%s | %w", "Time", err) + } + + case "time_in_nanos": + if err := dec.Decode(&s.TimeInNanos); err != nil { + return fmt.Errorf("%s | %w", "TimeInNanos", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewDfsStatisticsProfile returns a DfsStatisticsProfile. +func NewDfsStatisticsProfile() *DfsStatisticsProfile { + r := &DfsStatisticsProfile{ + Debug: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diagnosis.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diagnosis.go new file mode 100644 index 000000000..35ed78f39 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diagnosis.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Diagnosis type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L52-L58 +type Diagnosis struct { + Action string `json:"action"` + AffectedResources DiagnosisAffectedResources `json:"affected_resources"` + Cause string `json:"cause"` + HelpUrl string `json:"help_url"` + Id string `json:"id"` +} + +func (s *Diagnosis) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Action", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Action = o + + case "affected_resources": + if err := dec.Decode(&s.AffectedResources); err != nil { + return fmt.Errorf("%s | %w", "AffectedResources", err) + } + + case "cause": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Cause", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Cause = o + + case "help_url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "HelpUrl", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HelpUrl = o + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = o + + } + } + return nil +} + +// NewDiagnosis returns a Diagnosis. +func NewDiagnosis() *Diagnosis { + r := &Diagnosis{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diagnosisaffectedresources.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diagnosisaffectedresources.go new file mode 100644 index 000000000..aa28c340f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diagnosisaffectedresources.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DiagnosisAffectedResources type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L60-L66 +type DiagnosisAffectedResources struct { + FeatureStates []string `json:"feature_states,omitempty"` + Indices []string `json:"indices,omitempty"` + Nodes []IndicatorNode `json:"nodes,omitempty"` + SlmPolicies []string `json:"slm_policies,omitempty"` + SnapshotRepositories []string `json:"snapshot_repositories,omitempty"` +} + +func (s *DiagnosisAffectedResources) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_states": + if err := dec.Decode(&s.FeatureStates); err != nil { + return fmt.Errorf("%s | %w", "FeatureStates", err) + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + case "nodes": + if err := dec.Decode(&s.Nodes); err != nil { + return fmt.Errorf("%s | %w", "Nodes", err) + } + + case "slm_policies": + if err := dec.Decode(&s.SlmPolicies); err != nil { + return fmt.Errorf("%s | %w", "SlmPolicies", err) + } + + case "snapshot_repositories": + if err := dec.Decode(&s.SnapshotRepositories); err != nil { + return fmt.Errorf("%s | %w", "SnapshotRepositories", err) + } + + } + } + return nil +} + +// NewDiagnosisAffectedResources returns a DiagnosisAffectedResources. +func NewDiagnosisAffectedResources() *DiagnosisAffectedResources { + r := &DiagnosisAffectedResources{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dictionarydecompoundertokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dictionarydecompoundertokenfilter.go new file mode 100644 index 000000000..153f2d96f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dictionarydecompoundertokenfilter.go @@ -0,0 +1,208 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DictionaryDecompounderTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L63-L65 +type DictionaryDecompounderTokenFilter struct { + // MaxSubwordSize Maximum subword character length. Longer subword tokens are excluded from the + // output. Defaults to `15`. + MaxSubwordSize *int `json:"max_subword_size,omitempty"` + // MinSubwordSize Minimum subword character length. Shorter subword tokens are excluded from + // the output. Defaults to `2`. + MinSubwordSize *int `json:"min_subword_size,omitempty"` + // MinWordSize Minimum word character length. Shorter word tokens are excluded from the + // output. Defaults to `5`. + MinWordSize *int `json:"min_word_size,omitempty"` + // OnlyLongestMatch If `true`, only include the longest matching subword. Defaults to `false`. + OnlyLongestMatch *bool `json:"only_longest_match,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` + // WordList A list of subwords to look for in the token stream. If found, the subword is + // included in the token output. + // Either this parameter or `word_list_path` must be specified. + WordList []string `json:"word_list,omitempty"` + // WordListPath Path to a file that contains a list of subwords to find in the token stream. + // If found, the subword is included in the token output. + // This path must be absolute or relative to the config location, and the file + // must be UTF-8 encoded. Each token in the file must be separated by a line + // break. + // Either this parameter or `word_list` must be specified. + WordListPath *string `json:"word_list_path,omitempty"` +} + +func (s *DictionaryDecompounderTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_subword_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSubwordSize", err) + } + s.MaxSubwordSize = &value + case float64: + f := int(v) + s.MaxSubwordSize = &f + } + + case "min_subword_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinSubwordSize", err) + } + s.MinSubwordSize = &value + case float64: + f := int(v) + s.MinSubwordSize = &f + } + + case "min_word_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinWordSize", err) + } + s.MinWordSize = &value + case float64: + f := int(v) + s.MinWordSize = &f + } + + case "only_longest_match": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "OnlyLongestMatch", err) + } + s.OnlyLongestMatch = &value + case bool: + s.OnlyLongestMatch = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "word_list": + if err := dec.Decode(&s.WordList); err != nil { + return fmt.Errorf("%s | %w", "WordList", err) + } + + case "word_list_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "WordListPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WordListPath = &o + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DictionaryDecompounderTokenFilter) MarshalJSON() ([]byte, error) { + type innerDictionaryDecompounderTokenFilter DictionaryDecompounderTokenFilter + tmp := innerDictionaryDecompounderTokenFilter{ + MaxSubwordSize: s.MaxSubwordSize, + MinSubwordSize: s.MinSubwordSize, + MinWordSize: s.MinWordSize, + OnlyLongestMatch: s.OnlyLongestMatch, + Type: s.Type, + Version: s.Version, + WordList: s.WordList, + WordListPath: s.WordListPath, + } + + tmp.Type = "dictionary_decompounder" + + return json.Marshal(tmp) +} + +// NewDictionaryDecompounderTokenFilter returns a DictionaryDecompounderTokenFilter. +func NewDictionaryDecompounderTokenFilter() *DictionaryDecompounderTokenFilter { + r := &DictionaryDecompounderTokenFilter{} + + return r +} + +type DictionaryDecompounderTokenFilterVariant interface { + DictionaryDecompounderTokenFilterCaster() *DictionaryDecompounderTokenFilter +} + +func (s *DictionaryDecompounderTokenFilter) DictionaryDecompounderTokenFilterCaster() *DictionaryDecompounderTokenFilter { + return s +} + +func (s *DictionaryDecompounderTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/directgenerator.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/directgenerator.go new file mode 100644 index 000000000..3eb245d4f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/directgenerator.go @@ -0,0 +1,262 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/suggestmode" +) + +// DirectGenerator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L269-L332 +type DirectGenerator struct { + // Field The field to fetch the candidate suggestions from. + // Needs to be set globally or per suggestion. + Field string `json:"field"` + // MaxEdits The maximum edit distance candidate suggestions can have in order to be + // considered as a suggestion. + // Can only be `1` or `2`. + MaxEdits *int `json:"max_edits,omitempty"` + // MaxInspections A factor that is used to multiply with the shard_size in order to inspect + // more candidate spelling corrections on the shard level. + // Can improve accuracy at the cost of performance. + MaxInspections *float32 `json:"max_inspections,omitempty"` + // MaxTermFreq The maximum threshold in number of documents in which a suggest text token + // can exist in order to be included. + // This can be used to exclude high frequency terms — which are usually spelled + // correctly — from being spellchecked. + // Can be a relative percentage number (for example `0.4`) or an absolute number + // to represent document frequencies. + // If a value higher than 1 is specified, then fractional can not be specified. + MaxTermFreq *float32 `json:"max_term_freq,omitempty"` + // MinDocFreq The minimal threshold in number of documents a suggestion should appear in. + // This can improve quality by only suggesting high frequency terms. + // Can be specified as an absolute number or as a relative percentage of number + // of documents. + // If a value higher than 1 is specified, the number cannot be fractional. + MinDocFreq *float32 `json:"min_doc_freq,omitempty"` + // MinWordLength The minimum length a suggest text term must have in order to be included. + MinWordLength *int `json:"min_word_length,omitempty"` + // PostFilter A filter (analyzer) that is applied to each of the generated tokens before + // they are passed to the actual phrase scorer. + PostFilter *string `json:"post_filter,omitempty"` + // PreFilter A filter (analyzer) that is applied to each of the tokens passed to this + // candidate generator. + // This filter is applied to the original token before candidates are generated. + PreFilter *string `json:"pre_filter,omitempty"` + // PrefixLength The number of minimal prefix characters that must match in order be a + // candidate suggestions. + // Increasing this number improves spellcheck performance. + PrefixLength *int `json:"prefix_length,omitempty"` + // Size The maximum corrections to be returned per suggest text token. + Size *int `json:"size,omitempty"` + // SuggestMode Controls what suggestions are included on the suggestions generated on each + // shard. + SuggestMode *suggestmode.SuggestMode `json:"suggest_mode,omitempty"` +} + +func (s *DirectGenerator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "max_edits": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxEdits", err) + } + s.MaxEdits = &value + case float64: + f := int(v) + s.MaxEdits = &f + } + + case "max_inspections": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MaxInspections", err) + } + f := float32(value) + s.MaxInspections = &f + case float64: + f := float32(v) + s.MaxInspections = &f + } + + case "max_term_freq": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTermFreq", err) + } + f := float32(value) + s.MaxTermFreq = &f + case float64: + f := float32(v) + s.MaxTermFreq = &f + } + + case "min_doc_freq": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocFreq", err) + } + f := float32(value) + s.MinDocFreq = &f + case float64: + f := float32(v) + s.MinDocFreq = &f + } + + case "min_word_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinWordLength", err) + } + s.MinWordLength = &value + case float64: + f := int(v) + s.MinWordLength = &f + } + + case "post_filter": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PostFilter", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PostFilter = &o + + case "pre_filter": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PreFilter", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PreFilter = &o + + case "prefix_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrefixLength", err) + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "suggest_mode": + if err := dec.Decode(&s.SuggestMode); err != nil { + return fmt.Errorf("%s | %w", "SuggestMode", err) + } + + } + } + return nil +} + +// NewDirectGenerator returns a DirectGenerator. +func NewDirectGenerator() *DirectGenerator { + r := &DirectGenerator{} + + return r +} + +type DirectGeneratorVariant interface { + DirectGeneratorCaster() *DirectGenerator +} + +func (s *DirectGenerator) DirectGeneratorCaster() *DirectGenerator { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/discovery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/discovery.go new file mode 100644 index 000000000..18df3c229 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/discovery.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Discovery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L203-L221 +type Discovery struct { + ClusterApplierStats *ClusterAppliedStats `json:"cluster_applier_stats,omitempty"` + // ClusterStateQueue Contains statistics for the cluster state queue of the node. + ClusterStateQueue *ClusterStateQueue `json:"cluster_state_queue,omitempty"` + // ClusterStateUpdate Contains low-level statistics about how long various activities took during + // cluster state updates while the node was the elected master. + // Omitted if the node is not master-eligible. + // Every field whose name ends in `_time` within this object is also represented + // as a raw number of milliseconds in a field whose name ends in `_time_millis`. + // The human-readable fields with a `_time` suffix are only returned if + // requested with the `?human=true` query parameter. + ClusterStateUpdate map[string]ClusterStateUpdate `json:"cluster_state_update,omitempty"` + // PublishedClusterStates Contains statistics for the published cluster states of the node. + PublishedClusterStates *PublishedClusterStates `json:"published_cluster_states,omitempty"` + SerializedClusterStates *SerializedClusterState `json:"serialized_cluster_states,omitempty"` +} + +// NewDiscovery returns a Discovery. +func NewDiscovery() *Discovery { + r := &Discovery{ + ClusterStateUpdate: make(map[string]ClusterStateUpdate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/discoverynode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/discoverynode.go new file mode 100644 index 000000000..749f47b9f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/discoverynode.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DiscoveryNode type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DiscoveryNode.ts#L25-L25 +type DiscoveryNode map[string]DiscoveryNodeContent diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/discoverynodecompact.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/discoverynodecompact.go new file mode 100644 index 000000000..d2c79241b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/discoverynodecompact.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DiscoveryNodeCompact type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DiscoveryNode.ts#L39-L48 +type DiscoveryNodeCompact struct { + Attributes map[string]string `json:"attributes"` + EphemeralId string `json:"ephemeral_id"` + Id string `json:"id"` + Name string `json:"name"` + TransportAddress string `json:"transport_address"` +} + +func (s *DiscoveryNodeCompact) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "ephemeral_id": + if err := dec.Decode(&s.EphemeralId); err != nil { + return fmt.Errorf("%s | %w", "EphemeralId", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + } + } + return nil +} + +// NewDiscoveryNodeCompact returns a DiscoveryNodeCompact. +func NewDiscoveryNodeCompact() *DiscoveryNodeCompact { + r := &DiscoveryNodeCompact{ + Attributes: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/discoverynodecontent.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/discoverynodecontent.go new file mode 100644 index 000000000..b1277c0ea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/discoverynodecontent.go @@ -0,0 +1,151 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DiscoveryNodeContent type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DiscoveryNode.ts#L27-L37 +type DiscoveryNodeContent struct { + Attributes map[string]string `json:"attributes"` + EphemeralId string `json:"ephemeral_id"` + ExternalId string `json:"external_id"` + MaxIndexVersion int `json:"max_index_version"` + MinIndexVersion int `json:"min_index_version"` + Name *string `json:"name,omitempty"` + Roles []string `json:"roles"` + TransportAddress string `json:"transport_address"` + Version string `json:"version"` +} + +func (s *DiscoveryNodeContent) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "ephemeral_id": + if err := dec.Decode(&s.EphemeralId); err != nil { + return fmt.Errorf("%s | %w", "EphemeralId", err) + } + + case "external_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ExternalId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ExternalId = o + + case "max_index_version": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxIndexVersion", err) + } + s.MaxIndexVersion = value + case float64: + f := int(v) + s.MaxIndexVersion = f + } + + case "min_index_version": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinIndexVersion", err) + } + s.MinIndexVersion = value + case float64: + f := int(v) + s.MinIndexVersion = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewDiscoveryNodeContent returns a DiscoveryNodeContent. +func NewDiscoveryNodeContent() *DiscoveryNodeContent { + r := &DiscoveryNodeContent{ + Attributes: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diskindicator.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diskindicator.go new file mode 100644 index 000000000..5ee18df41 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diskindicator.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indicatorhealthstatus" +) + +// DiskIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L125-L129 +type DiskIndicator struct { + Details *DiskIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *DiskIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return fmt.Errorf("%s | %w", "Diagnosis", err) + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return fmt.Errorf("%s | %w", "Impacts", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Symptom", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewDiskIndicator returns a DiskIndicator. +func NewDiskIndicator() *DiskIndicator { + r := &DiskIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diskindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diskindicatordetails.go new file mode 100644 index 000000000..3ff0b4090 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diskindicatordetails.go @@ -0,0 +1,143 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DiskIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L130-L136 +type DiskIndicatorDetails struct { + IndicesWithReadonlyBlock int64 `json:"indices_with_readonly_block"` + NodesOverFloodStageWatermark int64 `json:"nodes_over_flood_stage_watermark"` + NodesOverHighWatermark int64 `json:"nodes_over_high_watermark"` + NodesWithEnoughDiskSpace int64 `json:"nodes_with_enough_disk_space"` + NodesWithUnknownDiskStatus int64 `json:"nodes_with_unknown_disk_status"` +} + +func (s *DiskIndicatorDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indices_with_readonly_block": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndicesWithReadonlyBlock", err) + } + s.IndicesWithReadonlyBlock = value + case float64: + f := int64(v) + s.IndicesWithReadonlyBlock = f + } + + case "nodes_over_flood_stage_watermark": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NodesOverFloodStageWatermark", err) + } + s.NodesOverFloodStageWatermark = value + case float64: + f := int64(v) + s.NodesOverFloodStageWatermark = f + } + + case "nodes_over_high_watermark": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NodesOverHighWatermark", err) + } + s.NodesOverHighWatermark = value + case float64: + f := int64(v) + s.NodesOverHighWatermark = f + } + + case "nodes_with_enough_disk_space": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NodesWithEnoughDiskSpace", err) + } + s.NodesWithEnoughDiskSpace = value + case float64: + f := int64(v) + s.NodesWithEnoughDiskSpace = f + } + + case "nodes_with_unknown_disk_status": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NodesWithUnknownDiskStatus", err) + } + s.NodesWithUnknownDiskStatus = value + case float64: + f := int64(v) + s.NodesWithUnknownDiskStatus = f + } + + } + } + return nil +} + +// NewDiskIndicatorDetails returns a DiskIndicatorDetails. +func NewDiskIndicatorDetails() *DiskIndicatorDetails { + r := &DiskIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diskusage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diskusage.go new file mode 100644 index 000000000..877e5ff30 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diskusage.go @@ -0,0 +1,158 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DiskUsage type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/allocation_explain/types.ts#L63-L70 +type DiskUsage struct { + FreeBytes int64 `json:"free_bytes"` + FreeDiskPercent Float64 `json:"free_disk_percent"` + Path string `json:"path"` + TotalBytes int64 `json:"total_bytes"` + UsedBytes int64 `json:"used_bytes"` + UsedDiskPercent Float64 `json:"used_disk_percent"` +} + +func (s *DiskUsage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "free_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FreeBytes", err) + } + s.FreeBytes = value + case float64: + f := int64(v) + s.FreeBytes = f + } + + case "free_disk_percent": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FreeDiskPercent", err) + } + f := Float64(value) + s.FreeDiskPercent = f + case float64: + f := Float64(v) + s.FreeDiskPercent = f + } + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = o + + case "total_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalBytes", err) + } + s.TotalBytes = value + case float64: + f := int64(v) + s.TotalBytes = f + } + + case "used_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "UsedBytes", err) + } + s.UsedBytes = value + case float64: + f := int64(v) + s.UsedBytes = f + } + + case "used_disk_percent": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "UsedDiskPercent", err) + } + f := Float64(value) + s.UsedDiskPercent = f + case float64: + f := Float64(v) + s.UsedDiskPercent = f + } + + } + } + return nil +} + +// NewDiskUsage returns a DiskUsage. +func NewDiskUsage() *DiskUsage { + r := &DiskUsage{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dismaxquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dismaxquery.go new file mode 100644 index 000000000..4af8f31cf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dismaxquery.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DisMaxQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L88-L103 +type DisMaxQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Queries One or more query clauses. + // Returned documents must match one or more of these queries. + // If a document matches multiple queries, Elasticsearch uses the highest + // relevance score. + Queries []Query `json:"queries"` + QueryName_ *string `json:"_name,omitempty"` + // TieBreaker Floating point number between 0 and 1.0 used to increase the relevance scores + // of documents matching multiple query clauses. + TieBreaker *Float64 `json:"tie_breaker,omitempty"` +} + +func (s *DisMaxQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "queries": + if err := dec.Decode(&s.Queries); err != nil { + return fmt.Errorf("%s | %w", "Queries", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "tie_breaker": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TieBreaker", err) + } + f := Float64(value) + s.TieBreaker = &f + case float64: + f := Float64(v) + s.TieBreaker = &f + } + + } + } + return nil +} + +// NewDisMaxQuery returns a DisMaxQuery. +func NewDisMaxQuery() *DisMaxQuery { + r := &DisMaxQuery{} + + return r +} + +type DisMaxQueryVariant interface { + DisMaxQueryCaster() *DisMaxQuery +} + +func (s *DisMaxQuery) DisMaxQueryCaster() *DisMaxQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dissectprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dissectprocessor.go new file mode 100644 index 000000000..2ae6b8993 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dissectprocessor.go @@ -0,0 +1,183 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DissectProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L805-L824 +type DissectProcessor struct { + // AppendSeparator The character(s) that separate the appended fields. + AppendSeparator *string `json:"append_separator,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to dissect. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Pattern The pattern to apply to the field. + Pattern string `json:"pattern"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *DissectProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "append_separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AppendSeparator", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AppendSeparator = &o + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil +} + +// NewDissectProcessor returns a DissectProcessor. +func NewDissectProcessor() *DissectProcessor { + r := &DissectProcessor{} + + return r +} + +type DissectProcessorVariant interface { + DissectProcessorCaster() *DissectProcessor +} + +func (s *DissectProcessor) DissectProcessorCaster() *DissectProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/distancefeaturequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/distancefeaturequery.go new file mode 100644 index 000000000..7b1a18ed5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/distancefeaturequery.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DistanceFeatureQuery holds the union for the following types: +// +// UntypedDistanceFeatureQuery +// GeoDistanceFeatureQuery +// DateDistanceFeatureQuery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L76-L85 +type DistanceFeatureQuery any + +type DistanceFeatureQueryVariant interface { + DistanceFeatureQueryCaster() *DistanceFeatureQuery +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diversifiedsampleraggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diversifiedsampleraggregation.go new file mode 100644 index 000000000..14db04c39 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/diversifiedsampleraggregation.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sampleraggregationexecutionhint" +) + +// DiversifiedSamplerAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L333-L357 +type DiversifiedSamplerAggregation struct { + // ExecutionHint The type of value used for de-duplication. + ExecutionHint *sampleraggregationexecutionhint.SamplerAggregationExecutionHint `json:"execution_hint,omitempty"` + // Field The field used to provide values used for de-duplication. + Field *string `json:"field,omitempty"` + // MaxDocsPerValue Limits how many documents are permitted per choice of de-duplicating value. + MaxDocsPerValue *int `json:"max_docs_per_value,omitempty"` + Script *Script `json:"script,omitempty"` + // ShardSize Limits how many top-scoring documents are collected in the sample processed + // on each shard. + ShardSize *int `json:"shard_size,omitempty"` +} + +func (s *DiversifiedSamplerAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "execution_hint": + if err := dec.Decode(&s.ExecutionHint); err != nil { + return fmt.Errorf("%s | %w", "ExecutionHint", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "max_docs_per_value": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxDocsPerValue", err) + } + s.MaxDocsPerValue = &value + case float64: + f := int(v) + s.MaxDocsPerValue = &f + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "shard_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSize", err) + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + } + } + return nil +} + +// NewDiversifiedSamplerAggregation returns a DiversifiedSamplerAggregation. +func NewDiversifiedSamplerAggregation() *DiversifiedSamplerAggregation { + r := &DiversifiedSamplerAggregation{} + + return r +} + +type DiversifiedSamplerAggregationVariant interface { + DiversifiedSamplerAggregationCaster() *DiversifiedSamplerAggregation +} + +func (s *DiversifiedSamplerAggregation) DiversifiedSamplerAggregationCaster() *DiversifiedSamplerAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/docstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/docstats.go new file mode 100644 index 000000000..6463ec5e3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/docstats.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DocStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L100-L121 +type DocStats struct { + // Count Total number of non-deleted documents across all primary shards assigned to + // selected nodes. + // This number is based on documents in Lucene segments and may include + // documents from nested fields. + Count int64 `json:"count"` + // Deleted Total number of deleted documents across all primary shards assigned to + // selected nodes. + // This number is based on documents in Lucene segments. + // Elasticsearch reclaims the disk space of deleted Lucene documents when a + // segment is merged. + Deleted *int64 `json:"deleted,omitempty"` + // TotalSize Human readable total_size_in_bytes + TotalSize ByteSize `json:"total_size,omitempty"` + // TotalSizeInBytes Returns the total size in bytes of all documents in this stats. + // This value may be more reliable than store_stats.size_in_bytes in estimating + // the index size. + TotalSizeInBytes int64 `json:"total_size_in_bytes"` +} + +func (s *DocStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "deleted": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Deleted", err) + } + s.Deleted = &value + case float64: + f := int64(v) + s.Deleted = &f + } + + case "total_size": + if err := dec.Decode(&s.TotalSize); err != nil { + return fmt.Errorf("%s | %w", "TotalSize", err) + } + + case "total_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeInBytes", err) + } + s.TotalSizeInBytes = value + case float64: + f := int64(v) + s.TotalSizeInBytes = f + } + + } + } + return nil +} + +// NewDocStats returns a DocStats. +func NewDocStats() *DocStats { + r := &DocStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/document.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/document.go new file mode 100644 index 000000000..7291180c9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/document.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Document type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Simulation.ts#L69-L83 +type Document struct { + // Id_ Unique identifier for the document. + // This ID must be unique within the `_index`. + Id_ *string `json:"_id,omitempty"` + // Index_ Name of the index containing the document. + Index_ *string `json:"_index,omitempty"` + // Source_ JSON body for the document. + Source_ json.RawMessage `json:"_source,omitempty"` +} + +func (s *Document) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + + } + } + return nil +} + +// NewDocument returns a Document. +func NewDocument() *Document { + r := &Document{} + + return r +} + +type DocumentVariant interface { + DocumentCaster() *Document +} + +func (s *Document) DocumentCaster() *Document { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/documentrating.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/documentrating.go new file mode 100644 index 000000000..f78e8e686 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/documentrating.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DocumentRating type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/types.ts#L119-L126 +type DocumentRating struct { + // Id_ The document ID. + Id_ string `json:"_id"` + // Index_ The document’s index. For data streams, this should be the document’s backing + // index. + Index_ string `json:"_index"` + // Rating The document’s relevance with regard to this search request. + Rating int `json:"rating"` +} + +func (s *DocumentRating) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "rating": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Rating", err) + } + s.Rating = value + case float64: + f := int(v) + s.Rating = f + } + + } + } + return nil +} + +// NewDocumentRating returns a DocumentRating. +func NewDocumentRating() *DocumentRating { + r := &DocumentRating{} + + return r +} + +type DocumentRatingVariant interface { + DocumentRatingCaster() *DocumentRating +} + +func (s *DocumentRating) DocumentRatingCaster() *DocumentRating { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/documentsimulation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/documentsimulation.go new file mode 100644 index 000000000..b8e27228e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/documentsimulation.go @@ -0,0 +1,168 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +// DocumentSimulation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Simulation.ts#L85-L115 +type DocumentSimulation struct { + DocumentSimulation map[string]string `json:"-"` + // Id_ Unique identifier for the document. This ID must be unique within the + // `_index`. + Id_ string `json:"_id"` + // Index_ Name of the index containing the document. + Index_ string `json:"_index"` + Ingest_ Ingest `json:"_ingest"` + // Routing_ Value used to send the document to a specific primary shard. + Routing_ *string `json:"_routing,omitempty"` + // Source_ JSON body for the document. + Source_ map[string]json.RawMessage `json:"_source"` + VersionType_ *versiontype.VersionType `json:"_version_type,omitempty"` + Version_ StringifiedVersionNumber `json:"_version,omitempty"` +} + +func (s *DocumentSimulation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "_ingest": + if err := dec.Decode(&s.Ingest_); err != nil { + return fmt.Errorf("%s | %w", "Ingest_", err) + } + + case "_routing": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Routing_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Routing_ = &o + + case "_source": + if s.Source_ == nil { + s.Source_ = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + + case "_version_type": + if err := dec.Decode(&s.VersionType_); err != nil { + return fmt.Errorf("%s | %w", "VersionType_", err) + } + + case "_version": + if err := dec.Decode(&s.Version_); err != nil { + return fmt.Errorf("%s | %w", "Version_", err) + } + + default: + + if key, ok := t.(string); ok { + if s.DocumentSimulation == nil { + s.DocumentSimulation = make(map[string]string, 0) + } + raw := new(string) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "DocumentSimulation", err) + } + s.DocumentSimulation[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s DocumentSimulation) MarshalJSON() ([]byte, error) { + type opt DocumentSimulation + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.DocumentSimulation { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "DocumentSimulation") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewDocumentSimulation returns a DocumentSimulation. +func NewDocumentSimulation() *DocumentSimulation { + r := &DocumentSimulation{ + DocumentSimulation: make(map[string]string), + Source_: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dotexpanderprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dotexpanderprocessor.go new file mode 100644 index 000000000..d47e655ab --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dotexpanderprocessor.go @@ -0,0 +1,176 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DotExpanderProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L826-L844 +type DotExpanderProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to expand into an object field. + // If set to `*`, all top-level fields will be expanded. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Override Controls the behavior when there is already an existing nested object that + // conflicts with the expanded field. + // When `false`, the processor will merge conflicts by combining the old and the + // new values into an array. + // When `true`, the value from the expanded field will overwrite the existing + // value. + Override *bool `json:"override,omitempty"` + // Path The field that contains the field to expand. + // Only required if the field to expand is part another object field, because + // the `field` option can only understand leaf fields. + Path *string `json:"path,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *DotExpanderProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "override": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Override", err) + } + s.Override = &value + case bool: + s.Override = &v + } + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = &o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil +} + +// NewDotExpanderProcessor returns a DotExpanderProcessor. +func NewDotExpanderProcessor() *DotExpanderProcessor { + r := &DotExpanderProcessor{} + + return r +} + +type DotExpanderProcessorVariant interface { + DotExpanderProcessorCaster() *DotExpanderProcessor +} + +func (s *DotExpanderProcessor) DotExpanderProcessorCaster() *DotExpanderProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/doublenumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/doublenumberproperty.go new file mode 100644 index 000000000..463be2e48 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/doublenumberproperty.go @@ -0,0 +1,999 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype" +) + +// DoubleNumberProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L167-L170 +type DoubleNumberProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *Float64 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + // TimeSeriesMetric For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + f := Float64(value) + s.NullValue = &f + case float64: + f := Float64(v) + s.NullValue = &f + } + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "time_series_metric": + if err := dec.Decode(&s.TimeSeriesMetric); err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DoubleNumberProperty) MarshalJSON() ([]byte, error) { + type innerDoubleNumberProperty DoubleNumberProperty + tmp := innerDoubleNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "double" + + return json.Marshal(tmp) +} + +// NewDoubleNumberProperty returns a DoubleNumberProperty. +func NewDoubleNumberProperty() *DoubleNumberProperty { + r := &DoubleNumberProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type DoubleNumberPropertyVariant interface { + DoubleNumberPropertyCaster() *DoubleNumberProperty +} + +func (s *DoubleNumberProperty) DoubleNumberPropertyCaster() *DoubleNumberProperty { + return s +} + +func (s *DoubleNumberProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/doublerangeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/doublerangeproperty.go new file mode 100644 index 000000000..7404baa4b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/doublerangeproperty.go @@ -0,0 +1,922 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// DoubleRangeProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/range.ts#L34-L36 +type DoubleRangeProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DoubleRangeProperty) MarshalJSON() ([]byte, error) { + type innerDoubleRangeProperty DoubleRangeProperty + tmp := innerDoubleRangeProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "double_range" + + return json.Marshal(tmp) +} + +// NewDoubleRangeProperty returns a DoubleRangeProperty. +func NewDoubleRangeProperty() *DoubleRangeProperty { + r := &DoubleRangeProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type DoubleRangePropertyVariant interface { + DoubleRangePropertyCaster() *DoubleRangeProperty +} + +func (s *DoubleRangeProperty) DoubleRangePropertyCaster() *DoubleRangeProperty { + return s +} + +func (s *DoubleRangeProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/doubletermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/doubletermsaggregate.go new file mode 100644 index 000000000..fa6d5715c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/doubletermsaggregate.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DoubleTermsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L451-L456 +type DoubleTermsAggregate struct { + Buckets BucketsDoubleTermsBucket `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` +} + +func (s *DoubleTermsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]DoubleTermsBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []DoubleTermsBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "doc_count_error_upper_bound": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "sum_other_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SumOtherDocCount", err) + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f + } + + } + } + return nil +} + +// NewDoubleTermsAggregate returns a DoubleTermsAggregate. +func NewDoubleTermsAggregate() *DoubleTermsAggregate { + r := &DoubleTermsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/doubletermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/doubletermsbucket.go new file mode 100644 index 000000000..9f231d512 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/doubletermsbucket.go @@ -0,0 +1,678 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// DoubleTermsBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L458-L461 +type DoubleTermsBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Key Float64 `json:"key"` + KeyAsString *string `json:"key_as_string,omitempty"` +} + +func (s *DoubleTermsBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "doc_count_error_upper_bound": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f + } + + case "key": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + f := Float64(value) + s.Key = f + case float64: + f := Float64(v) + s.Key = f + } + + case "key_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "KeyAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s DoubleTermsBucket) MarshalJSON() ([]byte, error) { + type opt DoubleTermsBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewDoubleTermsBucket returns a DoubleTermsBucket. +func NewDoubleTermsBucket() *DoubleTermsBucket { + r := &DoubleTermsBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/downsampleaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/downsampleaction.go new file mode 100644 index 000000000..5f653c3a8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/downsampleaction.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DownsampleAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/_types/Phase.ts#L112-L115 +type DownsampleAction struct { + FixedInterval string `json:"fixed_interval"` + WaitTimeout Duration `json:"wait_timeout,omitempty"` +} + +func (s *DownsampleAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fixed_interval": + if err := dec.Decode(&s.FixedInterval); err != nil { + return fmt.Errorf("%s | %w", "FixedInterval", err) + } + + case "wait_timeout": + if err := dec.Decode(&s.WaitTimeout); err != nil { + return fmt.Errorf("%s | %w", "WaitTimeout", err) + } + + } + } + return nil +} + +// NewDownsampleAction returns a DownsampleAction. +func NewDownsampleAction() *DownsampleAction { + r := &DownsampleAction{} + + return r +} + +type DownsampleActionVariant interface { + DownsampleActionCaster() *DownsampleAction +} + +func (s *DownsampleAction) DownsampleActionCaster() *DownsampleAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/downsampleconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/downsampleconfig.go new file mode 100644 index 000000000..12148a01e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/downsampleconfig.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DownsampleConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/Downsample.ts#L22-L27 +type DownsampleConfig struct { + // FixedInterval The interval at which to aggregate the original time series index. + FixedInterval string `json:"fixed_interval"` +} + +func (s *DownsampleConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fixed_interval": + if err := dec.Decode(&s.FixedInterval); err != nil { + return fmt.Errorf("%s | %w", "FixedInterval", err) + } + + } + } + return nil +} + +// NewDownsampleConfig returns a DownsampleConfig. +func NewDownsampleConfig() *DownsampleConfig { + r := &DownsampleConfig{} + + return r +} + +type DownsampleConfigVariant interface { + DownsampleConfigCaster() *DownsampleConfig +} + +func (s *DownsampleConfig) DownsampleConfigCaster() *DownsampleConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/downsamplinground.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/downsamplinground.go new file mode 100644 index 000000000..562fffb85 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/downsamplinground.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DownsamplingRound type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DownsamplingRound.ts#L23-L32 +type DownsamplingRound struct { + // After The duration since rollover when this downsampling round should execute + After Duration `json:"after"` + // Config The downsample configuration to execute. + Config DownsampleConfig `json:"config"` +} + +func (s *DownsamplingRound) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "after": + if err := dec.Decode(&s.After); err != nil { + return fmt.Errorf("%s | %w", "After", err) + } + + case "config": + if err := dec.Decode(&s.Config); err != nil { + return fmt.Errorf("%s | %w", "Config", err) + } + + } + } + return nil +} + +// NewDownsamplingRound returns a DownsamplingRound. +func NewDownsamplingRound() *DownsamplingRound { + r := &DownsamplingRound{} + + return r +} + +type DownsamplingRoundVariant interface { + DownsamplingRoundCaster() *DownsamplingRound +} + +func (s *DownsamplingRound) DownsamplingRoundCaster() *DownsamplingRound { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dropprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dropprocessor.go new file mode 100644 index 000000000..5aa519ede --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dropprocessor.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DropProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L846-L846 +type DropProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *DropProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil +} + +// NewDropProcessor returns a DropProcessor. +func NewDropProcessor() *DropProcessor { + r := &DropProcessor{} + + return r +} + +type DropProcessorVariant interface { + DropProcessorCaster() *DropProcessor +} + +func (s *DropProcessor) DropProcessorCaster() *DropProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/duration.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/duration.go new file mode 100644 index 000000000..123080c23 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/duration.go @@ -0,0 +1,32 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Duration holds the union for the following types: +// +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Time.ts#L52-L58 +type Duration any + +type DurationVariant interface { + DurationCaster() *Duration +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/durationvalueunitfloatmillis.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/durationvalueunitfloatmillis.go new file mode 100644 index 000000000..134304579 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/durationvalueunitfloatmillis.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DurationValueUnitFloatMillis type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Time.ts#L67-L67 +type DurationValueUnitFloatMillis Float64 diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/durationvalueunitmillis.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/durationvalueunitmillis.go new file mode 100644 index 000000000..08cf00e36 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/durationvalueunitmillis.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DurationValueUnitMillis type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Time.ts#L67-L67 +type DurationValueUnitMillis int64 + +type DurationValueUnitMillisVariant interface { + DurationValueUnitMillisCaster() *DurationValueUnitMillis +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/durationvalueunitnanos.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/durationvalueunitnanos.go new file mode 100644 index 000000000..aee73de88 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/durationvalueunitnanos.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DurationValueUnitNanos type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Time.ts#L67-L67 +type DurationValueUnitNanos int64 diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/durationvalueunitseconds.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/durationvalueunitseconds.go new file mode 100644 index 000000000..45bc05691 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/durationvalueunitseconds.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// DurationValueUnitSeconds type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Time.ts#L67-L67 +type DurationValueUnitSeconds int64 diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dutchanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dutchanalyzer.go new file mode 100644 index 000000000..059ad7677 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dutchanalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// DutchAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L145-L150 +type DutchAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *DutchAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DutchAnalyzer) MarshalJSON() ([]byte, error) { + type innerDutchAnalyzer DutchAnalyzer + tmp := innerDutchAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "dutch" + + return json.Marshal(tmp) +} + +// NewDutchAnalyzer returns a DutchAnalyzer. +func NewDutchAnalyzer() *DutchAnalyzer { + r := &DutchAnalyzer{} + + return r +} + +type DutchAnalyzerVariant interface { + DutchAnalyzerCaster() *DutchAnalyzer +} + +func (s *DutchAnalyzer) DutchAnalyzerCaster() *DutchAnalyzer { + return s +} + +func (s *DutchAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dutchstemtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dutchstemtokenfilter.go new file mode 100644 index 000000000..ab5529248 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dutchstemtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// DutchStemTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L559-L561 +type DutchStemTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *DutchStemTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DutchStemTokenFilter) MarshalJSON() ([]byte, error) { + type innerDutchStemTokenFilter DutchStemTokenFilter + tmp := innerDutchStemTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "dutch_stem" + + return json.Marshal(tmp) +} + +// NewDutchStemTokenFilter returns a DutchStemTokenFilter. +func NewDutchStemTokenFilter() *DutchStemTokenFilter { + r := &DutchStemTokenFilter{} + + return r +} + +type DutchStemTokenFilterVariant interface { + DutchStemTokenFilterCaster() *DutchStemTokenFilter +} + +func (s *DutchStemTokenFilter) DutchStemTokenFilterCaster() *DutchStemTokenFilter { + return s +} + +func (s *DutchStemTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dynamicproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dynamicproperty.go new file mode 100644 index 000000000..ef551e671 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dynamicproperty.go @@ -0,0 +1,1161 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termvectoroption" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype" +) + +// DynamicProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L353-L384 +type DynamicProperty struct { + Analyzer *string `json:"analyzer,omitempty"` + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + EagerGlobalOrdinals *bool `json:"eager_global_ordinals,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + Format *string `json:"format,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + Index *bool `json:"index,omitempty"` + IndexOptions *indexoptions.IndexOptions `json:"index_options,omitempty"` + IndexPhrases *bool `json:"index_phrases,omitempty"` + IndexPrefixes *TextIndexPrefixes `json:"index_prefixes,omitempty"` + Locale *string `json:"locale,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Norms *bool `json:"norms,omitempty"` + NullValue FieldValue `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + PositionIncrementGap *int `json:"position_increment_gap,omitempty"` + PrecisionStep *int `json:"precision_step,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + SearchAnalyzer *string `json:"search_analyzer,omitempty"` + SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *DynamicProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "eager_global_ordinals": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "EagerGlobalOrdinals", err) + } + s.EagerGlobalOrdinals = &value + case bool: + s.EagerGlobalOrdinals = &v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "index_options": + if err := dec.Decode(&s.IndexOptions); err != nil { + return fmt.Errorf("%s | %w", "IndexOptions", err) + } + + case "index_phrases": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IndexPhrases", err) + } + s.IndexPhrases = &value + case bool: + s.IndexPhrases = &v + } + + case "index_prefixes": + if err := dec.Decode(&s.IndexPrefixes); err != nil { + return fmt.Errorf("%s | %w", "IndexPrefixes", err) + } + + case "locale": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Locale", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Locale = &o + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "norms": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Norms", err) + } + s.Norms = &value + case bool: + s.Norms = &v + } + + case "null_value": + if err := dec.Decode(&s.NullValue); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "position_increment_gap": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PositionIncrementGap", err) + } + s.PositionIncrementGap = &value + case float64: + f := int(v) + s.PositionIncrementGap = &f + } + + case "precision_step": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrecisionStep", err) + } + s.PrecisionStep = &value + case float64: + f := int(v) + s.PrecisionStep = &f + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "search_analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchAnalyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchAnalyzer = &o + + case "search_quote_analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchQuoteAnalyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQuoteAnalyzer = &o + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "term_vector": + if err := dec.Decode(&s.TermVector); err != nil { + return fmt.Errorf("%s | %w", "TermVector", err) + } + + case "time_series_metric": + if err := dec.Decode(&s.TimeSeriesMetric); err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DynamicProperty) MarshalJSON() ([]byte, error) { + type innerDynamicProperty DynamicProperty + tmp := innerDynamicProperty{ + Analyzer: s.Analyzer, + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + EagerGlobalOrdinals: s.EagerGlobalOrdinals, + Enabled: s.Enabled, + Fields: s.Fields, + Format: s.Format, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + IndexOptions: s.IndexOptions, + IndexPhrases: s.IndexPhrases, + IndexPrefixes: s.IndexPrefixes, + Locale: s.Locale, + Meta: s.Meta, + Norms: s.Norms, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + PositionIncrementGap: s.PositionIncrementGap, + PrecisionStep: s.PrecisionStep, + Properties: s.Properties, + Script: s.Script, + SearchAnalyzer: s.SearchAnalyzer, + SearchQuoteAnalyzer: s.SearchQuoteAnalyzer, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TermVector: s.TermVector, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "{dynamic_type}" + + return json.Marshal(tmp) +} + +// NewDynamicProperty returns a DynamicProperty. +func NewDynamicProperty() *DynamicProperty { + r := &DynamicProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type DynamicPropertyVariant interface { + DynamicPropertyCaster() *DynamicProperty +} + +func (s *DynamicProperty) DynamicPropertyCaster() *DynamicProperty { + return s +} + +func (s *DynamicProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dynamictemplate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dynamictemplate.go new file mode 100644 index 000000000..b9b27d9a1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/dynamictemplate.go @@ -0,0 +1,564 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/matchtype" +) + +// DynamicTemplate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/dynamic-template.ts#L23-L43 +type DynamicTemplate struct { + AdditionalDynamicTemplateProperty map[string]json.RawMessage `json:"-"` + Mapping Property `json:"mapping,omitempty"` + Match []string `json:"match,omitempty"` + MatchMappingType []string `json:"match_mapping_type,omitempty"` + MatchPattern *matchtype.MatchType `json:"match_pattern,omitempty"` + PathMatch []string `json:"path_match,omitempty"` + PathUnmatch []string `json:"path_unmatch,omitempty"` + Runtime *RuntimeField `json:"runtime,omitempty"` + Unmatch []string `json:"unmatch,omitempty"` + UnmatchMappingType []string `json:"unmatch_mapping_type,omitempty"` +} + +func (s *DynamicTemplate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mapping": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + kind := make(map[string]string, 0) + localDec := json.NewDecoder(source) + localDec.Decode(&kind) + source.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + + case "binary": + o := NewBinaryProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "binary", err) + } + s.Mapping = *o + case "boolean": + o := NewBooleanProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "boolean", err) + } + s.Mapping = *o + case "{dynamic_type}": + o := NewDynamicProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "{dynamic_type}", err) + } + s.Mapping = *o + case "join": + o := NewJoinProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "join", err) + } + s.Mapping = *o + case "keyword": + o := NewKeywordProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "keyword", err) + } + s.Mapping = *o + case "match_only_text": + o := NewMatchOnlyTextProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "match_only_text", err) + } + s.Mapping = *o + case "percolator": + o := NewPercolatorProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "percolator", err) + } + s.Mapping = *o + case "rank_feature": + o := NewRankFeatureProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "rank_feature", err) + } + s.Mapping = *o + case "rank_features": + o := NewRankFeaturesProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "rank_features", err) + } + s.Mapping = *o + case "search_as_you_type": + o := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "search_as_you_type", err) + } + s.Mapping = *o + case "text": + o := NewTextProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "text", err) + } + s.Mapping = *o + case "version": + o := NewVersionProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "version", err) + } + s.Mapping = *o + case "wildcard": + o := NewWildcardProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "wildcard", err) + } + s.Mapping = *o + case "date_nanos": + o := NewDateNanosProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "date_nanos", err) + } + s.Mapping = *o + case "date": + o := NewDateProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "date", err) + } + s.Mapping = *o + case "aggregate_metric_double": + o := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "aggregate_metric_double", err) + } + s.Mapping = *o + case "dense_vector": + o := NewDenseVectorProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "dense_vector", err) + } + s.Mapping = *o + case "flattened": + o := NewFlattenedProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "flattened", err) + } + s.Mapping = *o + case "nested": + o := NewNestedProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "nested", err) + } + s.Mapping = *o + case "object": + o := NewObjectProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "object", err) + } + s.Mapping = *o + case "passthrough": + o := NewPassthroughObjectProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "passthrough", err) + } + s.Mapping = *o + case "rank_vectors": + o := NewRankVectorProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "rank_vectors", err) + } + s.Mapping = *o + case "semantic_text": + o := NewSemanticTextProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "semantic_text", err) + } + s.Mapping = *o + case "sparse_vector": + o := NewSparseVectorProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "sparse_vector", err) + } + s.Mapping = *o + case "completion": + o := NewCompletionProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "completion", err) + } + s.Mapping = *o + case "constant_keyword": + o := NewConstantKeywordProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "constant_keyword", err) + } + s.Mapping = *o + case "counted_keyword": + o := NewCountedKeywordProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "counted_keyword", err) + } + s.Mapping = *o + case "alias": + o := NewFieldAliasProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "alias", err) + } + s.Mapping = *o + case "histogram": + o := NewHistogramProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "histogram", err) + } + s.Mapping = *o + case "ip": + o := NewIpProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ip", err) + } + s.Mapping = *o + case "murmur3": + o := NewMurmur3HashProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "murmur3", err) + } + s.Mapping = *o + case "token_count": + o := NewTokenCountProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "token_count", err) + } + s.Mapping = *o + case "geo_point": + o := NewGeoPointProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "geo_point", err) + } + s.Mapping = *o + case "geo_shape": + o := NewGeoShapeProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "geo_shape", err) + } + s.Mapping = *o + case "point": + o := NewPointProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "point", err) + } + s.Mapping = *o + case "shape": + o := NewShapeProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "shape", err) + } + s.Mapping = *o + case "byte": + o := NewByteNumberProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "byte", err) + } + s.Mapping = *o + case "double": + o := NewDoubleNumberProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "double", err) + } + s.Mapping = *o + case "float": + o := NewFloatNumberProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "float", err) + } + s.Mapping = *o + case "half_float": + o := NewHalfFloatNumberProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "half_float", err) + } + s.Mapping = *o + case "integer": + o := NewIntegerNumberProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "integer", err) + } + s.Mapping = *o + case "long": + o := NewLongNumberProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "long", err) + } + s.Mapping = *o + case "scaled_float": + o := NewScaledFloatNumberProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "scaled_float", err) + } + s.Mapping = *o + case "short": + o := NewShortNumberProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "short", err) + } + s.Mapping = *o + case "unsigned_long": + o := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "unsigned_long", err) + } + s.Mapping = *o + case "date_range": + o := NewDateRangeProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "date_range", err) + } + s.Mapping = *o + case "double_range": + o := NewDoubleRangeProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "double_range", err) + } + s.Mapping = *o + case "float_range": + o := NewFloatRangeProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "float_range", err) + } + s.Mapping = *o + case "integer_range": + o := NewIntegerRangeProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "integer_range", err) + } + s.Mapping = *o + case "ip_range": + o := NewIpRangeProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ip_range", err) + } + s.Mapping = *o + case "long_range": + o := NewLongRangeProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "long_range", err) + } + s.Mapping = *o + case "icu_collation_keyword": + o := NewIcuCollationProperty() + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "icu_collation_keyword", err) + } + s.Mapping = *o + default: + if err := localDec.Decode(&s.Mapping); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + } + + case "match": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Match", err) + } + + s.Match = append(s.Match, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Match); err != nil { + return fmt.Errorf("%s | %w", "Match", err) + } + } + + case "match_mapping_type": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "MatchMappingType", err) + } + + s.MatchMappingType = append(s.MatchMappingType, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.MatchMappingType); err != nil { + return fmt.Errorf("%s | %w", "MatchMappingType", err) + } + } + + case "match_pattern": + if err := dec.Decode(&s.MatchPattern); err != nil { + return fmt.Errorf("%s | %w", "MatchPattern", err) + } + + case "path_match": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "PathMatch", err) + } + + s.PathMatch = append(s.PathMatch, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.PathMatch); err != nil { + return fmt.Errorf("%s | %w", "PathMatch", err) + } + } + + case "path_unmatch": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "PathUnmatch", err) + } + + s.PathUnmatch = append(s.PathUnmatch, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.PathUnmatch); err != nil { + return fmt.Errorf("%s | %w", "PathUnmatch", err) + } + } + + case "runtime": + if err := dec.Decode(&s.Runtime); err != nil { + return fmt.Errorf("%s | %w", "Runtime", err) + } + + case "unmatch": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Unmatch", err) + } + + s.Unmatch = append(s.Unmatch, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Unmatch); err != nil { + return fmt.Errorf("%s | %w", "Unmatch", err) + } + } + + case "unmatch_mapping_type": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "UnmatchMappingType", err) + } + + s.UnmatchMappingType = append(s.UnmatchMappingType, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.UnmatchMappingType); err != nil { + return fmt.Errorf("%s | %w", "UnmatchMappingType", err) + } + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalDynamicTemplateProperty == nil { + s.AdditionalDynamicTemplateProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalDynamicTemplateProperty", err) + } + s.AdditionalDynamicTemplateProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s DynamicTemplate) MarshalJSON() ([]byte, error) { + type opt DynamicTemplate + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalDynamicTemplateProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalDynamicTemplateProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewDynamicTemplate returns a DynamicTemplate. +func NewDynamicTemplate() *DynamicTemplate { + r := &DynamicTemplate{ + AdditionalDynamicTemplateProperty: make(map[string]json.RawMessage), + } + + return r +} + +type DynamicTemplateVariant interface { + DynamicTemplateCaster() *DynamicTemplate +} + +func (s *DynamicTemplate) DynamicTemplateCaster() *DynamicTemplate { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/edgengramtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/edgengramtokenfilter.go new file mode 100644 index 000000000..73ad4966e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/edgengramtokenfilter.go @@ -0,0 +1,159 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/edgengramside" +) + +// EdgeNGramTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L97-L107 +type EdgeNGramTokenFilter struct { + // MaxGram Maximum character length of a gram. For custom token filters, defaults to + // `2`. For the built-in edge_ngram filter, defaults to `1`. + MaxGram *int `json:"max_gram,omitempty"` + // MinGram Minimum character length of a gram. Defaults to `1`. + MinGram *int `json:"min_gram,omitempty"` + // PreserveOriginal Emits original token when set to `true`. Defaults to `false`. + PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` + // Side Indicates whether to truncate tokens from the `front` or `back`. Defaults to + // `front`. + Side *edgengramside.EdgeNGramSide `json:"side,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *EdgeNGramTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_gram": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxGram", err) + } + s.MaxGram = &value + case float64: + f := int(v) + s.MaxGram = &f + } + + case "min_gram": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinGram", err) + } + s.MinGram = &value + case float64: + f := int(v) + s.MinGram = &f + } + + case "preserve_original": + if err := dec.Decode(&s.PreserveOriginal); err != nil { + return fmt.Errorf("%s | %w", "PreserveOriginal", err) + } + + case "side": + if err := dec.Decode(&s.Side); err != nil { + return fmt.Errorf("%s | %w", "Side", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s EdgeNGramTokenFilter) MarshalJSON() ([]byte, error) { + type innerEdgeNGramTokenFilter EdgeNGramTokenFilter + tmp := innerEdgeNGramTokenFilter{ + MaxGram: s.MaxGram, + MinGram: s.MinGram, + PreserveOriginal: s.PreserveOriginal, + Side: s.Side, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "edge_ngram" + + return json.Marshal(tmp) +} + +// NewEdgeNGramTokenFilter returns a EdgeNGramTokenFilter. +func NewEdgeNGramTokenFilter() *EdgeNGramTokenFilter { + r := &EdgeNGramTokenFilter{} + + return r +} + +type EdgeNGramTokenFilterVariant interface { + EdgeNGramTokenFilterCaster() *EdgeNGramTokenFilter +} + +func (s *EdgeNGramTokenFilter) EdgeNGramTokenFilterCaster() *EdgeNGramTokenFilter { + return s +} + +func (s *EdgeNGramTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/edgengramtokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/edgengramtokenizer.go new file mode 100644 index 000000000..c218eb5b5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/edgengramtokenizer.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tokenchar" +) + +// EdgeNGramTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L48-L58 +type EdgeNGramTokenizer struct { + CustomTokenChars *string `json:"custom_token_chars,omitempty"` + MaxGram *int `json:"max_gram,omitempty"` + MinGram *int `json:"min_gram,omitempty"` + TokenChars []tokenchar.TokenChar `json:"token_chars,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *EdgeNGramTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "custom_token_chars": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CustomTokenChars", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CustomTokenChars = &o + + case "max_gram": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxGram", err) + } + s.MaxGram = &value + case float64: + f := int(v) + s.MaxGram = &f + } + + case "min_gram": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinGram", err) + } + s.MinGram = &value + case float64: + f := int(v) + s.MinGram = &f + } + + case "token_chars": + if err := dec.Decode(&s.TokenChars); err != nil { + return fmt.Errorf("%s | %w", "TokenChars", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s EdgeNGramTokenizer) MarshalJSON() ([]byte, error) { + type innerEdgeNGramTokenizer EdgeNGramTokenizer + tmp := innerEdgeNGramTokenizer{ + CustomTokenChars: s.CustomTokenChars, + MaxGram: s.MaxGram, + MinGram: s.MinGram, + TokenChars: s.TokenChars, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "edge_ngram" + + return json.Marshal(tmp) +} + +// NewEdgeNGramTokenizer returns a EdgeNGramTokenizer. +func NewEdgeNGramTokenizer() *EdgeNGramTokenizer { + r := &EdgeNGramTokenizer{} + + return r +} + +type EdgeNGramTokenizerVariant interface { + EdgeNGramTokenizerCaster() *EdgeNGramTokenizer +} + +func (s *EdgeNGramTokenizer) EdgeNGramTokenizerCaster() *EdgeNGramTokenizer { + return s +} + +func (s *EdgeNGramTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearcherror.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearcherror.go new file mode 100644 index 000000000..7c6d5a554 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearcherror.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strings" +) + +// An ElasticsearchError represent the exception raised +// by the server and sent as json payloads. +type ElasticsearchError struct { + ErrorCause ErrorCause `json:"error"` + Status int `json:"status"` +} + +// Error implements error string serialization of the ElasticsearchError. +func (e ElasticsearchError) Error() string { + var reason string + if e.ErrorCause.Reason != nil { + reason = *e.ErrorCause.Reason + } + return fmt.Sprintf("status: %d, failed: [%s], reason: %s", e.Status, e.ErrorCause.Type, reason) +} + +// Is implements errors.Is interface to allow value comparison within ElasticsearchError. +// It checks for always present values only: Status & ErrorCause.Type. +func (e ElasticsearchError) Is(err error) bool { + prefix := fmt.Sprintf("status: %d, failed: [%s]", e.Status, e.ErrorCause.Type) + return strings.HasPrefix(err.Error(), prefix) +} + +// As implements errors.As interface to allow type matching of ElasticsearchError. +func (e ElasticsearchError) As(err interface{}) bool { + if _, ok := err.(*ElasticsearchError); ok { + return true + } + return false +} + +// NewElasticsearchError returns a ElasticsearchError. +func NewElasticsearchError() *ElasticsearchError { + r := &ElasticsearchError{} + + return r +} + +func (e *ElasticsearchError) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + // Expect start object + t, err := dec.Token() + if err != nil { + return err + } + if delim, ok := t.(json.Delim); !ok || delim != '{' { + return fmt.Errorf("expected start object") + } + + for dec.More() { + t, err := dec.Token() + if err != nil { + return err + } + key, ok := t.(string) + if !ok { + return fmt.Errorf("expected string key") + } + + switch key { + case "error": + if err := dec.Decode(&e.ErrorCause); err != nil { + return fmt.Errorf("error decoding error: %w", err) + } + case "status": + if err := dec.Decode(&e.Status); err != nil { + return fmt.Errorf("error decoding status: %w", err) + } + default: + // Unknown field: store in ErrorCause.Metadata + if e.ErrorCause.Metadata == nil { + e.ErrorCause.Metadata = make(map[string]json.RawMessage) + } + var raw json.RawMessage + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("error decoding unknown field %s: %w", key, err) + } + e.ErrorCause.Metadata[key] = raw + } + } + + // Expect end object + _, err = dec.Token() + if err != nil && !errors.Is(err, io.EOF) { + return err + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearchservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearchservicesettings.go new file mode 100644 index 000000000..7b7353d7c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearchservicesettings.go @@ -0,0 +1,162 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ElasticsearchServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1215-L1249 +type ElasticsearchServiceSettings struct { + // AdaptiveAllocations Adaptive allocations configuration details. + // If `enabled` is true, the number of allocations of the model is set based on + // the current load the process gets. + // When the load is high, a new model allocation is automatically created, + // respecting the value of `max_number_of_allocations` if it's set. + // When the load is low, a model allocation is automatically removed, respecting + // the value of `min_number_of_allocations` if it's set. + // If `enabled` is true, do not set the number of allocations manually. + AdaptiveAllocations *AdaptiveAllocations `json:"adaptive_allocations,omitempty"` + // DeploymentId The deployment identifier for a trained model deployment. + // When `deployment_id` is used the `model_id` is optional. + DeploymentId *string `json:"deployment_id,omitempty"` + // ModelId The name of the model to use for the inference task. + // It can be the ID of a built-in model (for example, `.multilingual-e5-small` + // for E5) or a text embedding model that was uploaded by using the Eland + // client. + ModelId string `json:"model_id"` + // NumAllocations The total number of allocations that are assigned to the model across machine + // learning nodes. + // Increasing this value generally increases the throughput. + // If adaptive allocations are enabled, do not set this value because it's + // automatically set. + NumAllocations *int `json:"num_allocations,omitempty"` + // NumThreads The number of threads used by each model allocation during inference. + // This setting generally increases the speed per inference request. + // The inference process is a compute-bound process; `threads_per_allocations` + // must not exceed the number of available allocated processors per node. + // The value must be a power of 2. + // The maximum value is 32. + NumThreads int `json:"num_threads"` +} + +func (s *ElasticsearchServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adaptive_allocations": + if err := dec.Decode(&s.AdaptiveAllocations); err != nil { + return fmt.Errorf("%s | %w", "AdaptiveAllocations", err) + } + + case "deployment_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DeploymentId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DeploymentId = &o + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "num_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumAllocations", err) + } + s.NumAllocations = &value + case float64: + f := int(v) + s.NumAllocations = &f + } + + case "num_threads": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumThreads", err) + } + s.NumThreads = value + case float64: + f := int(v) + s.NumThreads = f + } + + } + } + return nil +} + +// NewElasticsearchServiceSettings returns a ElasticsearchServiceSettings. +func NewElasticsearchServiceSettings() *ElasticsearchServiceSettings { + r := &ElasticsearchServiceSettings{} + + return r +} + +type ElasticsearchServiceSettingsVariant interface { + ElasticsearchServiceSettingsCaster() *ElasticsearchServiceSettings +} + +func (s *ElasticsearchServiceSettings) ElasticsearchServiceSettingsCaster() *ElasticsearchServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearchtasksettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearchtasksettings.go new file mode 100644 index 000000000..ebb7c9c57 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearchtasksettings.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ElasticsearchTaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1251-L1257 +type ElasticsearchTaskSettings struct { + // ReturnDocuments For a `rerank` task, return the document instead of only the index. + ReturnDocuments *bool `json:"return_documents,omitempty"` +} + +func (s *ElasticsearchTaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "return_documents": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ReturnDocuments", err) + } + s.ReturnDocuments = &value + case bool: + s.ReturnDocuments = &v + } + + } + } + return nil +} + +// NewElasticsearchTaskSettings returns a ElasticsearchTaskSettings. +func NewElasticsearchTaskSettings() *ElasticsearchTaskSettings { + r := &ElasticsearchTaskSettings{} + + return r +} + +type ElasticsearchTaskSettingsVariant interface { + ElasticsearchTaskSettingsCaster() *ElasticsearchTaskSettings +} + +func (s *ElasticsearchTaskSettings) ElasticsearchTaskSettingsCaster() *ElasticsearchTaskSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearchversioninfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearchversioninfo.go new file mode 100644 index 000000000..8266d097a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearchversioninfo.go @@ -0,0 +1,172 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ElasticsearchVersionInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Base.ts#L76-L118 +type ElasticsearchVersionInfo struct { + // BuildDate The Elasticsearch Git commit's date. + BuildDate DateTime `json:"build_date"` + // BuildFlavor The build flavor. For example, `default`. + BuildFlavor string `json:"build_flavor"` + // BuildHash The Elasticsearch Git commit's SHA hash. + BuildHash string `json:"build_hash"` + // BuildSnapshot Indicates whether the Elasticsearch build was a snapshot. + BuildSnapshot bool `json:"build_snapshot"` + // BuildType The build type that corresponds to how Elasticsearch was installed. + // For example, `docker`, `rpm`, or `tar`. + BuildType string `json:"build_type"` + // Int The Elasticsearch version number. + // + // ::: IMPORTANT: For Serverless deployments, this static value is always + // `8.11.0` and is used solely for backward compatibility with legacy clients. + // + // Serverless environments are versionless and automatically upgraded, so this + // + // value can be safely ignored. + Int string `json:"number"` + // LuceneVersion The version number of Elasticsearch's underlying Lucene software. + LuceneVersion string `json:"lucene_version"` + // MinimumIndexCompatibilityVersion The minimum index version with which the responding node can read from disk. + MinimumIndexCompatibilityVersion string `json:"minimum_index_compatibility_version"` + // MinimumWireCompatibilityVersion The minimum node version with which the responding node can communicate. + // Also the minimum version from which you can perform a rolling upgrade. + MinimumWireCompatibilityVersion string `json:"minimum_wire_compatibility_version"` +} + +func (s *ElasticsearchVersionInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "build_date": + if err := dec.Decode(&s.BuildDate); err != nil { + return fmt.Errorf("%s | %w", "BuildDate", err) + } + + case "build_flavor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BuildFlavor", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildFlavor = o + + case "build_hash": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BuildHash", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildHash = o + + case "build_snapshot": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "BuildSnapshot", err) + } + s.BuildSnapshot = value + case bool: + s.BuildSnapshot = v + } + + case "build_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BuildType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildType = o + + case "number": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Int", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Int = o + + case "lucene_version": + if err := dec.Decode(&s.LuceneVersion); err != nil { + return fmt.Errorf("%s | %w", "LuceneVersion", err) + } + + case "minimum_index_compatibility_version": + if err := dec.Decode(&s.MinimumIndexCompatibilityVersion); err != nil { + return fmt.Errorf("%s | %w", "MinimumIndexCompatibilityVersion", err) + } + + case "minimum_wire_compatibility_version": + if err := dec.Decode(&s.MinimumWireCompatibilityVersion); err != nil { + return fmt.Errorf("%s | %w", "MinimumWireCompatibilityVersion", err) + } + + } + } + return nil +} + +// NewElasticsearchVersionInfo returns a ElasticsearchVersionInfo. +func NewElasticsearchVersionInfo() *ElasticsearchVersionInfo { + r := &ElasticsearchVersionInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearchversionmininfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearchversionmininfo.go new file mode 100644 index 000000000..b7f9a666a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elasticsearchversionmininfo.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ElasticsearchVersionMinInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Base.ts#L120-L128 +type ElasticsearchVersionMinInfo struct { + BuildFlavor string `json:"build_flavor"` + Int string `json:"number"` + MinimumIndexCompatibilityVersion string `json:"minimum_index_compatibility_version"` + MinimumWireCompatibilityVersion string `json:"minimum_wire_compatibility_version"` +} + +func (s *ElasticsearchVersionMinInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "build_flavor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BuildFlavor", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildFlavor = o + + case "number": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Int", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Int = o + + case "minimum_index_compatibility_version": + if err := dec.Decode(&s.MinimumIndexCompatibilityVersion); err != nil { + return fmt.Errorf("%s | %w", "MinimumIndexCompatibilityVersion", err) + } + + case "minimum_wire_compatibility_version": + if err := dec.Decode(&s.MinimumWireCompatibilityVersion); err != nil { + return fmt.Errorf("%s | %w", "MinimumWireCompatibilityVersion", err) + } + + } + } + return nil +} + +// NewElasticsearchVersionMinInfo returns a ElasticsearchVersionMinInfo. +func NewElasticsearchVersionMinInfo() *ElasticsearchVersionMinInfo { + r := &ElasticsearchVersionMinInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elisiontokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elisiontokenfilter.go new file mode 100644 index 000000000..ddbc13962 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elisiontokenfilter.go @@ -0,0 +1,146 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ElisionTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L245-L258 +type ElisionTokenFilter struct { + // Articles List of elisions to remove. + // To be removed, the elision must be at the beginning of a token and be + // immediately followed by an apostrophe. Both the elision and apostrophe are + // removed. + // For custom `elision` filters, either this parameter or `articles_path` must + // be specified. + Articles []string `json:"articles,omitempty"` + // ArticlesCase If `true`, elision matching is case insensitive. If `false`, elision matching + // is case sensitive. Defaults to `false`. + ArticlesCase Stringifiedboolean `json:"articles_case,omitempty"` + // ArticlesPath Path to a file that contains a list of elisions to remove. + // This path must be absolute or relative to the `config` location, and the file + // must be UTF-8 encoded. Each elision in the file must be separated by a line + // break. + // To be removed, the elision must be at the beginning of a token and be + // immediately followed by an apostrophe. Both the elision and apostrophe are + // removed. + // For custom `elision` filters, either this parameter or `articles` must be + // specified. + ArticlesPath *string `json:"articles_path,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ElisionTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "articles": + if err := dec.Decode(&s.Articles); err != nil { + return fmt.Errorf("%s | %w", "Articles", err) + } + + case "articles_case": + if err := dec.Decode(&s.ArticlesCase); err != nil { + return fmt.Errorf("%s | %w", "ArticlesCase", err) + } + + case "articles_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ArticlesPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ArticlesPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ElisionTokenFilter) MarshalJSON() ([]byte, error) { + type innerElisionTokenFilter ElisionTokenFilter + tmp := innerElisionTokenFilter{ + Articles: s.Articles, + ArticlesCase: s.ArticlesCase, + ArticlesPath: s.ArticlesPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "elision" + + return json.Marshal(tmp) +} + +// NewElisionTokenFilter returns a ElisionTokenFilter. +func NewElisionTokenFilter() *ElisionTokenFilter { + r := &ElisionTokenFilter{} + + return r +} + +type ElisionTokenFilterVariant interface { + ElisionTokenFilterCaster() *ElisionTokenFilter +} + +func (s *ElisionTokenFilter) ElisionTokenFilterCaster() *ElisionTokenFilter { + return s +} + +func (s *ElisionTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elserservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elserservicesettings.go new file mode 100644 index 000000000..3a5575656 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/elserservicesettings.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ElserServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1269-L1295 +type ElserServiceSettings struct { + // AdaptiveAllocations Adaptive allocations configuration details. + // If `enabled` is true, the number of allocations of the model is set based on + // the current load the process gets. + // When the load is high, a new model allocation is automatically created, + // respecting the value of `max_number_of_allocations` if it's set. + // When the load is low, a model allocation is automatically removed, respecting + // the value of `min_number_of_allocations` if it's set. + // If `enabled` is true, do not set the number of allocations manually. + AdaptiveAllocations *AdaptiveAllocations `json:"adaptive_allocations,omitempty"` + // NumAllocations The total number of allocations this model is assigned across machine + // learning nodes. + // Increasing this value generally increases the throughput. + // If adaptive allocations is enabled, do not set this value because it's + // automatically set. + NumAllocations int `json:"num_allocations"` + // NumThreads The number of threads used by each model allocation during inference. + // Increasing this value generally increases the speed per inference request. + // The inference process is a compute-bound process; `threads_per_allocations` + // must not exceed the number of available allocated processors per node. + // The value must be a power of 2. + // The maximum value is 32. + // + // > info + // > If you want to optimize your ELSER endpoint for ingest, set the number of + // threads to 1. If you want to optimize your ELSER endpoint for search, set the + // number of threads to greater than 1. + NumThreads int `json:"num_threads"` +} + +func (s *ElserServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adaptive_allocations": + if err := dec.Decode(&s.AdaptiveAllocations); err != nil { + return fmt.Errorf("%s | %w", "AdaptiveAllocations", err) + } + + case "num_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumAllocations", err) + } + s.NumAllocations = value + case float64: + f := int(v) + s.NumAllocations = f + } + + case "num_threads": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumThreads", err) + } + s.NumThreads = value + case float64: + f := int(v) + s.NumThreads = f + } + + } + } + return nil +} + +// NewElserServiceSettings returns a ElserServiceSettings. +func NewElserServiceSettings() *ElserServiceSettings { + r := &ElserServiceSettings{} + + return r +} + +type ElserServiceSettingsVariant interface { + ElserServiceSettingsCaster() *ElserServiceSettings +} + +func (s *ElserServiceSettings) ElserServiceSettingsCaster() *ElserServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/email.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/email.go new file mode 100644 index 000000000..9fc58f6e8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/email.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/emailpriority" +) + +// Email type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L238-L250 +type Email struct { + Attachments map[string]EmailAttachmentContainer `json:"attachments,omitempty"` + Bcc []string `json:"bcc,omitempty"` + Body *EmailBody `json:"body,omitempty"` + Cc []string `json:"cc,omitempty"` + From *string `json:"from,omitempty"` + Id *string `json:"id,omitempty"` + Priority *emailpriority.EmailPriority `json:"priority,omitempty"` + ReplyTo []string `json:"reply_to,omitempty"` + SentDate DateTime `json:"sent_date,omitempty"` + Subject string `json:"subject"` + To []string `json:"to"` +} + +func (s *Email) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attachments": + if s.Attachments == nil { + s.Attachments = make(map[string]EmailAttachmentContainer, 0) + } + if err := dec.Decode(&s.Attachments); err != nil { + return fmt.Errorf("%s | %w", "Attachments", err) + } + + case "bcc": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bcc", err) + } + + s.Bcc = append(s.Bcc, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Bcc); err != nil { + return fmt.Errorf("%s | %w", "Bcc", err) + } + } + + case "body": + if err := dec.Decode(&s.Body); err != nil { + return fmt.Errorf("%s | %w", "Body", err) + } + + case "cc": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Cc", err) + } + + s.Cc = append(s.Cc, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Cc); err != nil { + return fmt.Errorf("%s | %w", "Cc", err) + } + } + + case "from": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.From = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "priority": + if err := dec.Decode(&s.Priority); err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + + case "reply_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ReplyTo", err) + } + + s.ReplyTo = append(s.ReplyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.ReplyTo); err != nil { + return fmt.Errorf("%s | %w", "ReplyTo", err) + } + } + + case "sent_date": + if err := dec.Decode(&s.SentDate); err != nil { + return fmt.Errorf("%s | %w", "SentDate", err) + } + + case "subject": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Subject", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Subject = o + + case "to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } + + s.To = append(s.To, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.To); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } + } + + } + } + return nil +} + +// NewEmail returns a Email. +func NewEmail() *Email { + r := &Email{ + Attachments: make(map[string]EmailAttachmentContainer), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emailaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emailaction.go new file mode 100644 index 000000000..0178033ab --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emailaction.go @@ -0,0 +1,202 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/emailpriority" +) + +// EmailAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L252-L252 +type EmailAction struct { + Attachments map[string]EmailAttachmentContainer `json:"attachments,omitempty"` + Bcc []string `json:"bcc,omitempty"` + Body *EmailBody `json:"body,omitempty"` + Cc []string `json:"cc,omitempty"` + From *string `json:"from,omitempty"` + Id *string `json:"id,omitempty"` + Priority *emailpriority.EmailPriority `json:"priority,omitempty"` + ReplyTo []string `json:"reply_to,omitempty"` + SentDate DateTime `json:"sent_date,omitempty"` + Subject string `json:"subject"` + To []string `json:"to"` +} + +func (s *EmailAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attachments": + if s.Attachments == nil { + s.Attachments = make(map[string]EmailAttachmentContainer, 0) + } + if err := dec.Decode(&s.Attachments); err != nil { + return fmt.Errorf("%s | %w", "Attachments", err) + } + + case "bcc": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bcc", err) + } + + s.Bcc = append(s.Bcc, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Bcc); err != nil { + return fmt.Errorf("%s | %w", "Bcc", err) + } + } + + case "body": + if err := dec.Decode(&s.Body); err != nil { + return fmt.Errorf("%s | %w", "Body", err) + } + + case "cc": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Cc", err) + } + + s.Cc = append(s.Cc, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Cc); err != nil { + return fmt.Errorf("%s | %w", "Cc", err) + } + } + + case "from": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.From = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "priority": + if err := dec.Decode(&s.Priority); err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + + case "reply_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ReplyTo", err) + } + + s.ReplyTo = append(s.ReplyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.ReplyTo); err != nil { + return fmt.Errorf("%s | %w", "ReplyTo", err) + } + } + + case "sent_date": + if err := dec.Decode(&s.SentDate); err != nil { + return fmt.Errorf("%s | %w", "SentDate", err) + } + + case "subject": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Subject", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Subject = o + + case "to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } + + s.To = append(s.To, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.To); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } + } + + } + } + return nil +} + +// NewEmailAction returns a EmailAction. +func NewEmailAction() *EmailAction { + r := &EmailAction{ + Attachments: make(map[string]EmailAttachmentContainer), + } + + return r +} + +type EmailActionVariant interface { + EmailActionCaster() *EmailAction +} + +func (s *EmailAction) EmailActionCaster() *EmailAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emailattachmentcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emailattachmentcontainer.go new file mode 100644 index 000000000..bc9abb892 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emailattachmentcontainer.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// EmailAttachmentContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L211-L216 +type EmailAttachmentContainer struct { + AdditionalEmailAttachmentContainerProperty map[string]json.RawMessage `json:"-"` + Data *DataEmailAttachment `json:"data,omitempty"` + Http *HttpEmailAttachment `json:"http,omitempty"` + Reporting *ReportingEmailAttachment `json:"reporting,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s EmailAttachmentContainer) MarshalJSON() ([]byte, error) { + type opt EmailAttachmentContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalEmailAttachmentContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalEmailAttachmentContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewEmailAttachmentContainer returns a EmailAttachmentContainer. +func NewEmailAttachmentContainer() *EmailAttachmentContainer { + r := &EmailAttachmentContainer{ + AdditionalEmailAttachmentContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type EmailAttachmentContainerVariant interface { + EmailAttachmentContainerCaster() *EmailAttachmentContainer +} + +func (s *EmailAttachmentContainer) EmailAttachmentContainerCaster() *EmailAttachmentContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emailbody.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emailbody.go new file mode 100644 index 000000000..e7670671a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emailbody.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// EmailBody type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L192-L195 +type EmailBody struct { + Html *string `json:"html,omitempty"` + Text *string `json:"text,omitempty"` +} + +func (s *EmailBody) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "html": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Html", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Html = &o + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = &o + + } + } + return nil +} + +// NewEmailBody returns a EmailBody. +func NewEmailBody() *EmailBody { + r := &EmailBody{} + + return r +} + +type EmailBodyVariant interface { + EmailBodyCaster() *EmailBody +} + +func (s *EmailBody) EmailBodyCaster() *EmailBody { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emailresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emailresult.go new file mode 100644 index 000000000..d077cb4ff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emailresult.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// EmailResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L205-L209 +type EmailResult struct { + Account *string `json:"account,omitempty"` + Message Email `json:"message"` + Reason *string `json:"reason,omitempty"` +} + +func (s *EmailResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Account", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Account = &o + + case "message": + if err := dec.Decode(&s.Message); err != nil { + return fmt.Errorf("%s | %w", "Message", err) + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + } + } + return nil +} + +// NewEmailResult returns a EmailResult. +func NewEmailResult() *EmailResult { + r := &EmailResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emptyobject.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emptyobject.go new file mode 100644 index 000000000..92695d678 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/emptyobject.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// EmptyObject type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L157-L158 +type EmptyObject struct { +} + +// NewEmptyObject returns a EmptyObject. +func NewEmptyObject() *EmptyObject { + r := &EmptyObject{} + + return r +} + +type EmptyObjectVariant interface { + EmptyObjectCaster() *EmptyObject +} + +func (s *EmptyObject) EmptyObjectCaster() *EmptyObject { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/englishanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/englishanalyzer.go new file mode 100644 index 000000000..57758592c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/englishanalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// EnglishAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L152-L157 +type EnglishAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *EnglishAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s EnglishAnalyzer) MarshalJSON() ([]byte, error) { + type innerEnglishAnalyzer EnglishAnalyzer + tmp := innerEnglishAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "english" + + return json.Marshal(tmp) +} + +// NewEnglishAnalyzer returns a EnglishAnalyzer. +func NewEnglishAnalyzer() *EnglishAnalyzer { + r := &EnglishAnalyzer{} + + return r +} + +type EnglishAnalyzerVariant interface { + EnglishAnalyzerCaster() *EnglishAnalyzer +} + +func (s *EnglishAnalyzer) EnglishAnalyzerCaster() *EnglishAnalyzer { + return s +} + +func (s *EnglishAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enrichpolicy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enrichpolicy.go new file mode 100644 index 000000000..d56a81daa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enrichpolicy.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// EnrichPolicy type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/enrich/_types/Policy.ts#L34-L41 +type EnrichPolicy struct { + ElasticsearchVersion *string `json:"elasticsearch_version,omitempty"` + EnrichFields []string `json:"enrich_fields"` + Indices []string `json:"indices"` + MatchField string `json:"match_field"` + Name *string `json:"name,omitempty"` + Query *Query `json:"query,omitempty"` +} + +func (s *EnrichPolicy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "elasticsearch_version": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ElasticsearchVersion", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ElasticsearchVersion = &o + + case "enrich_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "EnrichFields", err) + } + + s.EnrichFields = append(s.EnrichFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.EnrichFields); err != nil { + return fmt.Errorf("%s | %w", "EnrichFields", err) + } + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + case "match_field": + if err := dec.Decode(&s.MatchField); err != nil { + return fmt.Errorf("%s | %w", "MatchField", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + } + } + return nil +} + +// NewEnrichPolicy returns a EnrichPolicy. +func NewEnrichPolicy() *EnrichPolicy { + r := &EnrichPolicy{} + + return r +} + +type EnrichPolicyVariant interface { + EnrichPolicyCaster() *EnrichPolicy +} + +func (s *EnrichPolicy) EnrichPolicyCaster() *EnrichPolicy { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enrichprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enrichprocessor.go new file mode 100644 index 000000000..70050ef55 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enrichprocessor.go @@ -0,0 +1,231 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoshaperelation" +) + +// EnrichProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L848-L887 +type EnrichProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field in the input document that matches the policies match_field used to + // retrieve the enrichment data. + // Supports template snippets. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // MaxMatches The maximum number of matched documents to include under the configured + // target field. + // The `target_field` will be turned into a json array if `max_matches` is + // higher than 1, otherwise `target_field` will become a json object. + // In order to avoid documents getting too large, the maximum allowed value is + // 128. + MaxMatches *int `json:"max_matches,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Override If processor will update fields with pre-existing non-null-valued field. + // When set to `false`, such fields will not be touched. + Override *bool `json:"override,omitempty"` + // PolicyName The name of the enrich policy to use. + PolicyName string `json:"policy_name"` + // ShapeRelation A spatial relation operator used to match the geoshape of incoming documents + // to documents in the enrich index. + // This option is only used for `geo_match` enrich policy types. + ShapeRelation *geoshaperelation.GeoShapeRelation `json:"shape_relation,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField Field added to incoming documents to contain enrich data. This field contains + // both the `match_field` and `enrich_fields` specified in the enrich policy. + // Supports template snippets. + TargetField string `json:"target_field"` +} + +func (s *EnrichProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "max_matches": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxMatches", err) + } + s.MaxMatches = &value + case float64: + f := int(v) + s.MaxMatches = &f + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "override": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Override", err) + } + s.Override = &value + case bool: + s.Override = &v + } + + case "policy_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PolicyName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PolicyName = o + + case "shape_relation": + if err := dec.Decode(&s.ShapeRelation); err != nil { + return fmt.Errorf("%s | %w", "ShapeRelation", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewEnrichProcessor returns a EnrichProcessor. +func NewEnrichProcessor() *EnrichProcessor { + r := &EnrichProcessor{} + + return r +} + +type EnrichProcessorVariant interface { + EnrichProcessorCaster() *EnrichProcessor +} + +func (s *EnrichProcessor) EnrichProcessorCaster() *EnrichProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ensemble.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ensemble.go new file mode 100644 index 000000000..b9e88b1ed --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ensemble.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Ensemble type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model/types.ts#L93-L99 +type Ensemble struct { + AggregateOutput *AggregateOutput `json:"aggregate_output,omitempty"` + ClassificationLabels []string `json:"classification_labels,omitempty"` + FeatureNames []string `json:"feature_names,omitempty"` + TargetType *string `json:"target_type,omitempty"` + TrainedModels []TrainedModel `json:"trained_models"` +} + +func (s *Ensemble) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregate_output": + if err := dec.Decode(&s.AggregateOutput); err != nil { + return fmt.Errorf("%s | %w", "AggregateOutput", err) + } + + case "classification_labels": + if err := dec.Decode(&s.ClassificationLabels); err != nil { + return fmt.Errorf("%s | %w", "ClassificationLabels", err) + } + + case "feature_names": + if err := dec.Decode(&s.FeatureNames); err != nil { + return fmt.Errorf("%s | %w", "FeatureNames", err) + } + + case "target_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TargetType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TargetType = &o + + case "trained_models": + if err := dec.Decode(&s.TrainedModels); err != nil { + return fmt.Errorf("%s | %w", "TrainedModels", err) + } + + } + } + return nil +} + +// NewEnsemble returns a Ensemble. +func NewEnsemble() *Ensemble { + r := &Ensemble{} + + return r +} + +type EnsembleVariant interface { + EnsembleCaster() *Ensemble +} + +func (s *Ensemble) EnsembleCaster() *Ensemble { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go new file mode 100644 index 000000000..fe9bf9983 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package accesstokengranttype +package accesstokengranttype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_token/types.ts#L23-L48 +type AccessTokenGrantType struct { + Name string +} + +var ( + Password = AccessTokenGrantType{"password"} + + Clientcredentials = AccessTokenGrantType{"client_credentials"} + + Kerberos = AccessTokenGrantType{"_kerberos"} + + Refreshtoken = AccessTokenGrantType{"refresh_token"} +) + +func (a AccessTokenGrantType) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *AccessTokenGrantType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "password": + *a = Password + case "client_credentials": + *a = Clientcredentials + case "_kerberos": + *a = Kerberos + case "refresh_token": + *a = Refreshtoken + default: + *a = AccessTokenGrantType{string(text)} + } + + return nil +} + +func (a AccessTokenGrantType) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go new file mode 100644 index 000000000..cfb5ab407 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package acknowledgementoptions +package acknowledgementoptions + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Action.ts#L103-L107 +type AcknowledgementOptions struct { + Name string +} + +var ( + Awaitssuccessfulexecution = AcknowledgementOptions{"awaits_successful_execution"} + + Ackable = AcknowledgementOptions{"ackable"} + + Acked = AcknowledgementOptions{"acked"} +) + +func (a AcknowledgementOptions) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *AcknowledgementOptions) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "awaits_successful_execution": + *a = Awaitssuccessfulexecution + case "ackable": + *a = Ackable + case "acked": + *a = Acked + default: + *a = AcknowledgementOptions{string(text)} + } + + return nil +} + +func (a AcknowledgementOptions) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go new file mode 100644 index 000000000..a51dd8775 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package actionexecutionmode +package actionexecutionmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Action.ts#L67-L88 +type ActionExecutionMode struct { + Name string +} + +var ( + Simulate = ActionExecutionMode{"simulate"} + + Forcesimulate = ActionExecutionMode{"force_simulate"} + + Execute = ActionExecutionMode{"execute"} + + Forceexecute = ActionExecutionMode{"force_execute"} + + Skip = ActionExecutionMode{"skip"} +) + +func (a ActionExecutionMode) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *ActionExecutionMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "simulate": + *a = Simulate + case "force_simulate": + *a = Forcesimulate + case "execute": + *a = Execute + case "force_execute": + *a = Forceexecute + case "skip": + *a = Skip + default: + *a = ActionExecutionMode{string(text)} + } + + return nil +} + +func (a ActionExecutionMode) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go new file mode 100644 index 000000000..f41d4ab0a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package actionstatusoptions +package actionstatusoptions + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Action.ts#L96-L101 +type ActionStatusOptions struct { + Name string +} + +var ( + Success = ActionStatusOptions{"success"} + + Failure = ActionStatusOptions{"failure"} + + Simulated = ActionStatusOptions{"simulated"} + + Throttled = ActionStatusOptions{"throttled"} +) + +func (a ActionStatusOptions) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *ActionStatusOptions) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "success": + *a = Success + case "failure": + *a = Failure + case "simulated": + *a = Simulated + case "throttled": + *a = Throttled + default: + *a = ActionStatusOptions{string(text)} + } + + return nil +} + +func (a ActionStatusOptions) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actiontype/actiontype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actiontype/actiontype.go new file mode 100644 index 000000000..b288d6445 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actiontype/actiontype.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package actiontype +package actiontype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Action.ts#L58-L65 +type ActionType struct { + Name string +} + +var ( + Email = ActionType{"email"} + + Webhook = ActionType{"webhook"} + + Index = ActionType{"index"} + + Logging = ActionType{"logging"} + + Slack = ActionType{"slack"} + + Pagerduty = ActionType{"pagerduty"} +) + +func (a ActionType) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *ActionType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "email": + *a = Email + case "webhook": + *a = Webhook + case "index": + *a = Index + case "logging": + *a = Logging + case "slack": + *a = Slack + case "pagerduty": + *a = Pagerduty + default: + *a = ActionType{string(text)} + } + + return nil +} + +func (a ActionType) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/alibabacloudservicetype/alibabacloudservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/alibabacloudservicetype/alibabacloudservicetype.go new file mode 100644 index 000000000..a29611f51 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/alibabacloudservicetype/alibabacloudservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package alibabacloudservicetype +package alibabacloudservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L362-L364 +type AlibabaCloudServiceType struct { + Name string +} + +var ( + AlibabacloudAiSearch = AlibabaCloudServiceType{"alibabacloud-ai-search"} +) + +func (a AlibabaCloudServiceType) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *AlibabaCloudServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "alibabacloud-ai-search": + *a = AlibabacloudAiSearch + default: + *a = AlibabaCloudServiceType{string(text)} + } + + return nil +} + +func (a AlibabaCloudServiceType) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go new file mode 100644 index 000000000..998fb01fa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package allocationexplaindecision +package allocationexplaindecision + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/allocation_explain/types.ts#L33-L38 +type AllocationExplainDecision struct { + Name string +} + +var ( + NO = AllocationExplainDecision{"NO"} + + YES = AllocationExplainDecision{"YES"} + + THROTTLE = AllocationExplainDecision{"THROTTLE"} + + ALWAYS = AllocationExplainDecision{"ALWAYS"} +) + +func (a AllocationExplainDecision) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *AllocationExplainDecision) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "no": + *a = NO + case "yes": + *a = YES + case "throttle": + *a = THROTTLE + case "always": + *a = ALWAYS + default: + *a = AllocationExplainDecision{string(text)} + } + + return nil +} + +func (a AllocationExplainDecision) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonbedrockservicetype/amazonbedrockservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonbedrockservicetype/amazonbedrockservicetype.go new file mode 100644 index 000000000..2bb7d2191 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonbedrockservicetype/amazonbedrockservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package amazonbedrockservicetype +package amazonbedrockservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L441-L443 +type AmazonBedrockServiceType struct { + Name string +} + +var ( + Amazonbedrock = AmazonBedrockServiceType{"amazonbedrock"} +) + +func (a AmazonBedrockServiceType) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *AmazonBedrockServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "amazonbedrock": + *a = Amazonbedrock + default: + *a = AmazonBedrockServiceType{string(text)} + } + + return nil +} + +func (a AmazonBedrockServiceType) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonsagemakerapi/amazonsagemakerapi.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonsagemakerapi/amazonsagemakerapi.go new file mode 100644 index 000000000..9c2b6f3c6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonsagemakerapi/amazonsagemakerapi.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package amazonsagemakerapi +package amazonsagemakerapi + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L501-L504 +type AmazonSageMakerApi struct { + Name string +} + +var ( + Openai = AmazonSageMakerApi{"openai"} + + Elastic = AmazonSageMakerApi{"elastic"} +) + +func (a AmazonSageMakerApi) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *AmazonSageMakerApi) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "openai": + *a = Openai + case "elastic": + *a = Elastic + default: + *a = AmazonSageMakerApi{string(text)} + } + + return nil +} + +func (a AmazonSageMakerApi) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonsagemakerservicetype/amazonsagemakerservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonsagemakerservicetype/amazonsagemakerservicetype.go new file mode 100644 index 000000000..573fa6db2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonsagemakerservicetype/amazonsagemakerservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package amazonsagemakerservicetype +package amazonsagemakerservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L584-L586 +type AmazonSageMakerServiceType struct { + Name string +} + +var ( + Amazonsagemaker = AmazonSageMakerServiceType{"amazon_sagemaker"} +) + +func (a AmazonSageMakerServiceType) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *AmazonSageMakerServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "amazon_sagemaker": + *a = Amazonsagemaker + default: + *a = AmazonSageMakerServiceType{string(text)} + } + + return nil +} + +func (a AmazonSageMakerServiceType) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/anthropicservicetype/anthropicservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/anthropicservicetype/anthropicservicetype.go new file mode 100644 index 000000000..4e29fb179 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/anthropicservicetype/anthropicservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package anthropicservicetype +package anthropicservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L637-L639 +type AnthropicServiceType struct { + Name string +} + +var ( + Anthropic = AnthropicServiceType{"anthropic"} +) + +func (a AnthropicServiceType) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *AnthropicServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "anthropic": + *a = Anthropic + default: + *a = AnthropicServiceType{string(text)} + } + + return nil +} + +func (a AnthropicServiceType) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/apikeygranttype/apikeygranttype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/apikeygranttype/apikeygranttype.go new file mode 100644 index 000000000..cd45582fd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/apikeygranttype/apikeygranttype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package apikeygranttype +package apikeygranttype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/grant_api_key/types.ts#L47-L50 +type ApiKeyGrantType struct { + Name string +} + +var ( + Accesstoken = ApiKeyGrantType{"access_token"} + + Password = ApiKeyGrantType{"password"} +) + +func (a ApiKeyGrantType) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *ApiKeyGrantType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "access_token": + *a = Accesstoken + case "password": + *a = Password + default: + *a = ApiKeyGrantType{string(text)} + } + + return nil +} + +func (a ApiKeyGrantType) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/apikeytype/apikeytype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/apikeytype/apikeytype.go new file mode 100644 index 000000000..855a7dfcb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/apikeytype/apikeytype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package apikeytype +package apikeytype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/ApiKey.ts#L115-L118 +type ApiKeyType struct { + Name string +} + +var ( + Rest = ApiKeyType{"rest"} + + Crosscluster = ApiKeyType{"cross_cluster"} +) + +func (a ApiKeyType) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *ApiKeyType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "rest": + *a = Rest + case "cross_cluster": + *a = Crosscluster + default: + *a = ApiKeyType{string(text)} + } + + return nil +} + +func (a ApiKeyType) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/appliesto/appliesto.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/appliesto/appliesto.go new file mode 100644 index 000000000..86d24a741 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/appliesto/appliesto.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package appliesto +package appliesto + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Rule.ts#L67-L72 +type AppliesTo struct { + Name string +} + +var ( + Actual = AppliesTo{"actual"} + + Typical = AppliesTo{"typical"} + + Difffromtypical = AppliesTo{"diff_from_typical"} + + Time = AppliesTo{"time"} +) + +func (a AppliesTo) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *AppliesTo) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "actual": + *a = Actual + case "typical": + *a = Typical + case "diff_from_typical": + *a = Difffromtypical + case "time": + *a = Time + default: + *a = AppliesTo{string(text)} + } + + return nil +} + +func (a AppliesTo) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/azureaistudioservicetype/azureaistudioservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/azureaistudioservicetype/azureaistudioservicetype.go new file mode 100644 index 000000000..0be69aedf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/azureaistudioservicetype/azureaistudioservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package azureaistudioservicetype +package azureaistudioservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L720-L722 +type AzureAiStudioServiceType struct { + Name string +} + +var ( + Azureaistudio = AzureAiStudioServiceType{"azureaistudio"} +) + +func (a AzureAiStudioServiceType) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *AzureAiStudioServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "azureaistudio": + *a = Azureaistudio + default: + *a = AzureAiStudioServiceType{string(text)} + } + + return nil +} + +func (a AzureAiStudioServiceType) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/azureopenaiservicetype/azureopenaiservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/azureopenaiservicetype/azureopenaiservicetype.go new file mode 100644 index 000000000..49c6d134f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/azureopenaiservicetype/azureopenaiservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package azureopenaiservicetype +package azureopenaiservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L784-L786 +type AzureOpenAIServiceType struct { + Name string +} + +var ( + Azureopenai = AzureOpenAIServiceType{"azureopenai"} +) + +func (a AzureOpenAIServiceType) MarshalText() (text []byte, err error) { + return []byte(a.String()), nil +} + +func (a *AzureOpenAIServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "azureopenai": + *a = Azureopenai + default: + *a = AzureOpenAIServiceType{string(text)} + } + + return nil +} + +func (a AzureOpenAIServiceType) String() string { + return a.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/boundaryscanner/boundaryscanner.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/boundaryscanner/boundaryscanner.go new file mode 100644 index 000000000..a01808f5f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/boundaryscanner/boundaryscanner.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package boundaryscanner +package boundaryscanner + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/highlighting.ts#L26-L45 +type BoundaryScanner struct { + Name string +} + +var ( + Chars = BoundaryScanner{"chars"} + + Sentence = BoundaryScanner{"sentence"} + + Word = BoundaryScanner{"word"} +) + +func (b BoundaryScanner) MarshalText() (text []byte, err error) { + return []byte(b.String()), nil +} + +func (b *BoundaryScanner) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "chars": + *b = Chars + case "sentence": + *b = Sentence + case "word": + *b = Word + default: + *b = BoundaryScanner{string(text)} + } + + return nil +} + +func (b BoundaryScanner) String() string { + return b.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/bytes/bytes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/bytes/bytes.go new file mode 100644 index 000000000..fbf2a7dc4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/bytes/bytes.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package bytes +package bytes + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L166-L178 +type Bytes struct { + Name string +} + +var ( + B = Bytes{"b"} + + Kb = Bytes{"kb"} + + Mb = Bytes{"mb"} + + Gb = Bytes{"gb"} + + Tb = Bytes{"tb"} + + Pb = Bytes{"pb"} +) + +func (b Bytes) MarshalText() (text []byte, err error) { + return []byte(b.String()), nil +} + +func (b *Bytes) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "b": + *b = B + case "kb": + *b = Kb + case "mb": + *b = Mb + case "gb": + *b = Gb + case "tb": + *b = Tb + case "pb": + *b = Pb + default: + *b = Bytes{string(text)} + } + + return nil +} + +func (b Bytes) String() string { + return b.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/calendarinterval/calendarinterval.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/calendarinterval/calendarinterval.go new file mode 100644 index 000000000..5e28386de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/calendarinterval/calendarinterval.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package calendarinterval +package calendarinterval + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L262-L279 +type CalendarInterval struct { + Name string +} + +var ( + Second = CalendarInterval{"second"} + + Minute = CalendarInterval{"minute"} + + Hour = CalendarInterval{"hour"} + + Day = CalendarInterval{"day"} + + Week = CalendarInterval{"week"} + + Month = CalendarInterval{"month"} + + Quarter = CalendarInterval{"quarter"} + + Year = CalendarInterval{"year"} +) + +func (c CalendarInterval) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CalendarInterval) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "second": + *c = Second + case "minute": + *c = Minute + case "hour": + *c = Hour + case "day": + *c = Day + case "week": + *c = Week + case "month": + *c = Month + case "quarter": + *c = Quarter + case "year": + *c = Year + default: + *c = CalendarInterval{string(text)} + } + + return nil +} + +func (c CalendarInterval) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go new file mode 100644 index 000000000..5ea53cd78 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package cardinalityexecutionmode +package cardinalityexecutionmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L70-L91 +type CardinalityExecutionMode struct { + Name string +} + +var ( + Globalordinals = CardinalityExecutionMode{"global_ordinals"} + + Segmentordinals = CardinalityExecutionMode{"segment_ordinals"} + + Direct = CardinalityExecutionMode{"direct"} + + Savememoryheuristic = CardinalityExecutionMode{"save_memory_heuristic"} + + Savetimeheuristic = CardinalityExecutionMode{"save_time_heuristic"} +) + +func (c CardinalityExecutionMode) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CardinalityExecutionMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "global_ordinals": + *c = Globalordinals + case "segment_ordinals": + *c = Segmentordinals + case "direct": + *c = Direct + case "save_memory_heuristic": + *c = Savememoryheuristic + case "save_time_heuristic": + *c = Savetimeheuristic + default: + *c = CardinalityExecutionMode{string(text)} + } + + return nil +} + +func (c CardinalityExecutionMode) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go new file mode 100644 index 000000000..4c73a3104 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go @@ -0,0 +1,289 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package catanomalydetectorcolumn +package catanomalydetectorcolumn + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L32-L401 +type CatAnomalyDetectorColumn struct { + Name string +} + +var ( + Assignmentexplanation = CatAnomalyDetectorColumn{"assignment_explanation"} + + Bucketscount = CatAnomalyDetectorColumn{"buckets.count"} + + Bucketstimeexpavg = CatAnomalyDetectorColumn{"buckets.time.exp_avg"} + + Bucketstimeexpavghour = CatAnomalyDetectorColumn{"buckets.time.exp_avg_hour"} + + Bucketstimemax = CatAnomalyDetectorColumn{"buckets.time.max"} + + Bucketstimemin = CatAnomalyDetectorColumn{"buckets.time.min"} + + Bucketstimetotal = CatAnomalyDetectorColumn{"buckets.time.total"} + + Databuckets = CatAnomalyDetectorColumn{"data.buckets"} + + Dataearliestrecord = CatAnomalyDetectorColumn{"data.earliest_record"} + + Dataemptybuckets = CatAnomalyDetectorColumn{"data.empty_buckets"} + + Datainputbytes = CatAnomalyDetectorColumn{"data.input_bytes"} + + Datainputfields = CatAnomalyDetectorColumn{"data.input_fields"} + + Datainputrecords = CatAnomalyDetectorColumn{"data.input_records"} + + Datainvaliddates = CatAnomalyDetectorColumn{"data.invalid_dates"} + + Datalast = CatAnomalyDetectorColumn{"data.last"} + + Datalastemptybucket = CatAnomalyDetectorColumn{"data.last_empty_bucket"} + + Datalastsparsebucket = CatAnomalyDetectorColumn{"data.last_sparse_bucket"} + + Datalatestrecord = CatAnomalyDetectorColumn{"data.latest_record"} + + Datamissingfields = CatAnomalyDetectorColumn{"data.missing_fields"} + + Dataoutofordertimestamps = CatAnomalyDetectorColumn{"data.out_of_order_timestamps"} + + Dataprocessedfields = CatAnomalyDetectorColumn{"data.processed_fields"} + + Dataprocessedrecords = CatAnomalyDetectorColumn{"data.processed_records"} + + Datasparsebuckets = CatAnomalyDetectorColumn{"data.sparse_buckets"} + + Forecastsmemoryavg = CatAnomalyDetectorColumn{"forecasts.memory.avg"} + + Forecastsmemorymax = CatAnomalyDetectorColumn{"forecasts.memory.max"} + + Forecastsmemorymin = CatAnomalyDetectorColumn{"forecasts.memory.min"} + + Forecastsmemorytotal = CatAnomalyDetectorColumn{"forecasts.memory.total"} + + Forecastsrecordsavg = CatAnomalyDetectorColumn{"forecasts.records.avg"} + + Forecastsrecordsmax = CatAnomalyDetectorColumn{"forecasts.records.max"} + + Forecastsrecordsmin = CatAnomalyDetectorColumn{"forecasts.records.min"} + + Forecastsrecordstotal = CatAnomalyDetectorColumn{"forecasts.records.total"} + + Forecaststimeavg = CatAnomalyDetectorColumn{"forecasts.time.avg"} + + Forecaststimemax = CatAnomalyDetectorColumn{"forecasts.time.max"} + + Forecaststimemin = CatAnomalyDetectorColumn{"forecasts.time.min"} + + Forecaststimetotal = CatAnomalyDetectorColumn{"forecasts.time.total"} + + Forecaststotal = CatAnomalyDetectorColumn{"forecasts.total"} + + Id = CatAnomalyDetectorColumn{"id"} + + Modelbucketallocationfailures = CatAnomalyDetectorColumn{"model.bucket_allocation_failures"} + + Modelbyfields = CatAnomalyDetectorColumn{"model.by_fields"} + + Modelbytes = CatAnomalyDetectorColumn{"model.bytes"} + + Modelbytesexceeded = CatAnomalyDetectorColumn{"model.bytes_exceeded"} + + Modelcategorizationstatus = CatAnomalyDetectorColumn{"model.categorization_status"} + + Modelcategorizeddoccount = CatAnomalyDetectorColumn{"model.categorized_doc_count"} + + Modeldeadcategorycount = CatAnomalyDetectorColumn{"model.dead_category_count"} + + Modelfailedcategorycount = CatAnomalyDetectorColumn{"model.failed_category_count"} + + Modelfrequentcategorycount = CatAnomalyDetectorColumn{"model.frequent_category_count"} + + Modellogtime = CatAnomalyDetectorColumn{"model.log_time"} + + Modelmemorylimit = CatAnomalyDetectorColumn{"model.memory_limit"} + + Modelmemorystatus = CatAnomalyDetectorColumn{"model.memory_status"} + + Modeloverfields = CatAnomalyDetectorColumn{"model.over_fields"} + + Modelpartitionfields = CatAnomalyDetectorColumn{"model.partition_fields"} + + Modelrarecategorycount = CatAnomalyDetectorColumn{"model.rare_category_count"} + + Modeltimestamp = CatAnomalyDetectorColumn{"model.timestamp"} + + Modeltotalcategorycount = CatAnomalyDetectorColumn{"model.total_category_count"} + + Nodeaddress = CatAnomalyDetectorColumn{"node.address"} + + Nodeephemeralid = CatAnomalyDetectorColumn{"node.ephemeral_id"} + + Nodeid = CatAnomalyDetectorColumn{"node.id"} + + Nodename = CatAnomalyDetectorColumn{"node.name"} + + Openedtime = CatAnomalyDetectorColumn{"opened_time"} + + State = CatAnomalyDetectorColumn{"state"} +) + +func (c CatAnomalyDetectorColumn) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CatAnomalyDetectorColumn) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "assignment_explanation": + *c = Assignmentexplanation + case "buckets.count": + *c = Bucketscount + case "buckets.time.exp_avg": + *c = Bucketstimeexpavg + case "buckets.time.exp_avg_hour": + *c = Bucketstimeexpavghour + case "buckets.time.max": + *c = Bucketstimemax + case "buckets.time.min": + *c = Bucketstimemin + case "buckets.time.total": + *c = Bucketstimetotal + case "data.buckets": + *c = Databuckets + case "data.earliest_record": + *c = Dataearliestrecord + case "data.empty_buckets": + *c = Dataemptybuckets + case "data.input_bytes": + *c = Datainputbytes + case "data.input_fields": + *c = Datainputfields + case "data.input_records": + *c = Datainputrecords + case "data.invalid_dates": + *c = Datainvaliddates + case "data.last": + *c = Datalast + case "data.last_empty_bucket": + *c = Datalastemptybucket + case "data.last_sparse_bucket": + *c = Datalastsparsebucket + case "data.latest_record": + *c = Datalatestrecord + case "data.missing_fields": + *c = Datamissingfields + case "data.out_of_order_timestamps": + *c = Dataoutofordertimestamps + case "data.processed_fields": + *c = Dataprocessedfields + case "data.processed_records": + *c = Dataprocessedrecords + case "data.sparse_buckets": + *c = Datasparsebuckets + case "forecasts.memory.avg": + *c = Forecastsmemoryavg + case "forecasts.memory.max": + *c = Forecastsmemorymax + case "forecasts.memory.min": + *c = Forecastsmemorymin + case "forecasts.memory.total": + *c = Forecastsmemorytotal + case "forecasts.records.avg": + *c = Forecastsrecordsavg + case "forecasts.records.max": + *c = Forecastsrecordsmax + case "forecasts.records.min": + *c = Forecastsrecordsmin + case "forecasts.records.total": + *c = Forecastsrecordstotal + case "forecasts.time.avg": + *c = Forecaststimeavg + case "forecasts.time.max": + *c = Forecaststimemax + case "forecasts.time.min": + *c = Forecaststimemin + case "forecasts.time.total": + *c = Forecaststimetotal + case "forecasts.total": + *c = Forecaststotal + case "id": + *c = Id + case "model.bucket_allocation_failures": + *c = Modelbucketallocationfailures + case "model.by_fields": + *c = Modelbyfields + case "model.bytes": + *c = Modelbytes + case "model.bytes_exceeded": + *c = Modelbytesexceeded + case "model.categorization_status": + *c = Modelcategorizationstatus + case "model.categorized_doc_count": + *c = Modelcategorizeddoccount + case "model.dead_category_count": + *c = Modeldeadcategorycount + case "model.failed_category_count": + *c = Modelfailedcategorycount + case "model.frequent_category_count": + *c = Modelfrequentcategorycount + case "model.log_time": + *c = Modellogtime + case "model.memory_limit": + *c = Modelmemorylimit + case "model.memory_status": + *c = Modelmemorystatus + case "model.over_fields": + *c = Modeloverfields + case "model.partition_fields": + *c = Modelpartitionfields + case "model.rare_category_count": + *c = Modelrarecategorycount + case "model.timestamp": + *c = Modeltimestamp + case "model.total_category_count": + *c = Modeltotalcategorycount + case "node.address": + *c = Nodeaddress + case "node.ephemeral_id": + *c = Nodeephemeralid + case "node.id": + *c = Nodeid + case "node.name": + *c = Nodename + case "opened_time": + *c = Openedtime + case "state": + *c = State + default: + *c = CatAnomalyDetectorColumn{string(text)} + } + + return nil +} + +func (c CatAnomalyDetectorColumn) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go new file mode 100644 index 000000000..7573760c4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package catdatafeedcolumn +package catdatafeedcolumn + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L405-L471 +type CatDatafeedColumn struct { + Name string +} + +var ( + Ae = CatDatafeedColumn{"ae"} + + Bc = CatDatafeedColumn{"bc"} + + Id = CatDatafeedColumn{"id"} + + Na = CatDatafeedColumn{"na"} + + Ne = CatDatafeedColumn{"ne"} + + Ni = CatDatafeedColumn{"ni"} + + Nn = CatDatafeedColumn{"nn"} + + Sba = CatDatafeedColumn{"sba"} + + Sc = CatDatafeedColumn{"sc"} + + Seah = CatDatafeedColumn{"seah"} + + St = CatDatafeedColumn{"st"} + + S = CatDatafeedColumn{"s"} +) + +func (c CatDatafeedColumn) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CatDatafeedColumn) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "ae": + *c = Ae + case "bc": + *c = Bc + case "id": + *c = Id + case "na": + *c = Na + case "ne": + *c = Ne + case "ni": + *c = Ni + case "nn": + *c = Nn + case "sba": + *c = Sba + case "sc": + *c = Sc + case "seah": + *c = Seah + case "st": + *c = St + case "s": + *c = S + default: + *c = CatDatafeedColumn{string(text)} + } + + return nil +} + +func (c CatDatafeedColumn) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catdfacolumn/catdfacolumn.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catdfacolumn/catdfacolumn.go new file mode 100644 index 000000000..17a40669d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catdfacolumn/catdfacolumn.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package catdfacolumn +package catdfacolumn + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L473-L558 +type CatDfaColumn struct { + Name string +} + +var ( + Assignmentexplanation = CatDfaColumn{"assignment_explanation"} + + Createtime = CatDfaColumn{"create_time"} + + Description = CatDfaColumn{"description"} + + Destindex = CatDfaColumn{"dest_index"} + + Failurereason = CatDfaColumn{"failure_reason"} + + Id = CatDfaColumn{"id"} + + Modelmemorylimit = CatDfaColumn{"model_memory_limit"} + + Nodeaddress = CatDfaColumn{"node.address"} + + Nodeephemeralid = CatDfaColumn{"node.ephemeral_id"} + + Nodeid = CatDfaColumn{"node.id"} + + Nodename = CatDfaColumn{"node.name"} + + Progress = CatDfaColumn{"progress"} + + Sourceindex = CatDfaColumn{"source_index"} + + State = CatDfaColumn{"state"} + + Type = CatDfaColumn{"type"} + + Version = CatDfaColumn{"version"} +) + +func (c CatDfaColumn) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CatDfaColumn) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "assignment_explanation": + *c = Assignmentexplanation + case "create_time": + *c = Createtime + case "description": + *c = Description + case "dest_index": + *c = Destindex + case "failure_reason": + *c = Failurereason + case "id": + *c = Id + case "model_memory_limit": + *c = Modelmemorylimit + case "node.address": + *c = Nodeaddress + case "node.ephemeral_id": + *c = Nodeephemeralid + case "node.id": + *c = Nodeid + case "node.name": + *c = Nodename + case "progress": + *c = Progress + case "source_index": + *c = Sourceindex + case "state": + *c = State + case "type": + *c = Type + case "version": + *c = Version + default: + *c = CatDfaColumn{string(text)} + } + + return nil +} + +func (c CatDfaColumn) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/categorizationstatus/categorizationstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/categorizationstatus/categorizationstatus.go new file mode 100644 index 000000000..e6be89abe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/categorizationstatus/categorizationstatus.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package categorizationstatus +package categorizationstatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Model.ts#L84-L87 +type CategorizationStatus struct { + Name string +} + +var ( + Ok = CategorizationStatus{"ok"} + + Warn = CategorizationStatus{"warn"} +) + +func (c CategorizationStatus) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CategorizationStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "ok": + *c = Ok + case "warn": + *c = Warn + default: + *c = CategorizationStatus{string(text)} + } + + return nil +} + +func (c CategorizationStatus) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catnodecolumn/catnodecolumn.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catnodecolumn/catnodecolumn.go new file mode 100644 index 000000000..702b76c35 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catnodecolumn/catnodecolumn.go @@ -0,0 +1,405 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package catnodecolumn +package catnodecolumn + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L560-L1012 +type CatNodeColumn struct { + Name string +} + +var ( + Build = CatNodeColumn{"build"} + + Completionsize = CatNodeColumn{"completion.size"} + + Cpu = CatNodeColumn{"cpu"} + + Diskavail = CatNodeColumn{"disk.avail"} + + Disktotal = CatNodeColumn{"disk.total"} + + Diskused = CatNodeColumn{"disk.used"} + + Diskusedpercent = CatNodeColumn{"disk.used_percent"} + + Fielddataevictions = CatNodeColumn{"fielddata.evictions"} + + Fielddatamemorysize = CatNodeColumn{"fielddata.memory_size"} + + Filedesccurrent = CatNodeColumn{"file_desc.current"} + + Filedescmax = CatNodeColumn{"file_desc.max"} + + Filedescpercent = CatNodeColumn{"file_desc.percent"} + + Flushtotal = CatNodeColumn{"flush.total"} + + Flushtotaltime = CatNodeColumn{"flush.total_time"} + + Getcurrent = CatNodeColumn{"get.current"} + + Getexiststime = CatNodeColumn{"get.exists_time"} + + Getexiststotal = CatNodeColumn{"get.exists_total"} + + Getmissingtime = CatNodeColumn{"get.missing_time"} + + Getmissingtotal = CatNodeColumn{"get.missing_total"} + + Gettime = CatNodeColumn{"get.time"} + + Gettotal = CatNodeColumn{"get.total"} + + Heapcurrent = CatNodeColumn{"heap.current"} + + Heapmax = CatNodeColumn{"heap.max"} + + Heappercent = CatNodeColumn{"heap.percent"} + + Httpaddress = CatNodeColumn{"http_address"} + + Id = CatNodeColumn{"id"} + + Indexingdeletecurrent = CatNodeColumn{"indexing.delete_current"} + + Indexingdeletetime = CatNodeColumn{"indexing.delete_time"} + + Indexingdeletetotal = CatNodeColumn{"indexing.delete_total"} + + Indexingindexcurrent = CatNodeColumn{"indexing.index_current"} + + Indexingindexfailed = CatNodeColumn{"indexing.index_failed"} + + Indexingindexfailedduetoversionconflict = CatNodeColumn{"indexing.index_failed_due_to_version_conflict"} + + Indexingindextime = CatNodeColumn{"indexing.index_time"} + + Indexingindextotal = CatNodeColumn{"indexing.index_total"} + + Ip = CatNodeColumn{"ip"} + + Jdk = CatNodeColumn{"jdk"} + + Load1m = CatNodeColumn{"load_1m"} + + Load5m = CatNodeColumn{"load_5m"} + + Load15m = CatNodeColumn{"load_15m"} + + Mappingstotalcount = CatNodeColumn{"mappings.total_count"} + + Mappingstotalestimatedoverheadinbytes = CatNodeColumn{"mappings.total_estimated_overhead_in_bytes"} + + Master = CatNodeColumn{"master"} + + Mergescurrent = CatNodeColumn{"merges.current"} + + Mergescurrentdocs = CatNodeColumn{"merges.current_docs"} + + Mergescurrentsize = CatNodeColumn{"merges.current_size"} + + Mergestotal = CatNodeColumn{"merges.total"} + + Mergestotaldocs = CatNodeColumn{"merges.total_docs"} + + Mergestotalsize = CatNodeColumn{"merges.total_size"} + + Mergestotaltime = CatNodeColumn{"merges.total_time"} + + Name = CatNodeColumn{"name"} + + Noderole = CatNodeColumn{"node.role"} + + Pid = CatNodeColumn{"pid"} + + Port = CatNodeColumn{"port"} + + Querycachememorysize = CatNodeColumn{"query_cache.memory_size"} + + Querycacheevictions = CatNodeColumn{"query_cache.evictions"} + + Querycachehitcount = CatNodeColumn{"query_cache.hit_count"} + + Querycachemisscount = CatNodeColumn{"query_cache.miss_count"} + + Ramcurrent = CatNodeColumn{"ram.current"} + + Rammax = CatNodeColumn{"ram.max"} + + Rampercent = CatNodeColumn{"ram.percent"} + + Refreshtotal = CatNodeColumn{"refresh.total"} + + Refreshtime = CatNodeColumn{"refresh.time"} + + Requestcachememorysize = CatNodeColumn{"request_cache.memory_size"} + + Requestcacheevictions = CatNodeColumn{"request_cache.evictions"} + + Requestcachehitcount = CatNodeColumn{"request_cache.hit_count"} + + Requestcachemisscount = CatNodeColumn{"request_cache.miss_count"} + + Scriptcompilations = CatNodeColumn{"script.compilations"} + + Scriptcacheevictions = CatNodeColumn{"script.cache_evictions"} + + Searchfetchcurrent = CatNodeColumn{"search.fetch_current"} + + Searchfetchtime = CatNodeColumn{"search.fetch_time"} + + Searchfetchtotal = CatNodeColumn{"search.fetch_total"} + + Searchopencontexts = CatNodeColumn{"search.open_contexts"} + + Searchquerycurrent = CatNodeColumn{"search.query_current"} + + Searchquerytime = CatNodeColumn{"search.query_time"} + + Searchquerytotal = CatNodeColumn{"search.query_total"} + + Searchscrollcurrent = CatNodeColumn{"search.scroll_current"} + + Searchscrolltime = CatNodeColumn{"search.scroll_time"} + + Searchscrolltotal = CatNodeColumn{"search.scroll_total"} + + Segmentscount = CatNodeColumn{"segments.count"} + + Segmentsfixedbitsetmemory = CatNodeColumn{"segments.fixed_bitset_memory"} + + Segmentsindexwritermemory = CatNodeColumn{"segments.index_writer_memory"} + + Segmentsmemory = CatNodeColumn{"segments.memory"} + + Segmentsversionmapmemory = CatNodeColumn{"segments.version_map_memory"} + + Shardstatstotalcount = CatNodeColumn{"shard_stats.total_count"} + + Suggestcurrent = CatNodeColumn{"suggest.current"} + + Suggesttime = CatNodeColumn{"suggest.time"} + + Suggesttotal = CatNodeColumn{"suggest.total"} + + Uptime = CatNodeColumn{"uptime"} + + Version = CatNodeColumn{"version"} +) + +func (c CatNodeColumn) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CatNodeColumn) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "build": + *c = Build + case "completion.size": + *c = Completionsize + case "cpu": + *c = Cpu + case "disk.avail": + *c = Diskavail + case "disk.total": + *c = Disktotal + case "disk.used": + *c = Diskused + case "disk.used_percent": + *c = Diskusedpercent + case "fielddata.evictions": + *c = Fielddataevictions + case "fielddata.memory_size": + *c = Fielddatamemorysize + case "file_desc.current": + *c = Filedesccurrent + case "file_desc.max": + *c = Filedescmax + case "file_desc.percent": + *c = Filedescpercent + case "flush.total": + *c = Flushtotal + case "flush.total_time": + *c = Flushtotaltime + case "get.current": + *c = Getcurrent + case "get.exists_time": + *c = Getexiststime + case "get.exists_total": + *c = Getexiststotal + case "get.missing_time": + *c = Getmissingtime + case "get.missing_total": + *c = Getmissingtotal + case "get.time": + *c = Gettime + case "get.total": + *c = Gettotal + case "heap.current": + *c = Heapcurrent + case "heap.max": + *c = Heapmax + case "heap.percent": + *c = Heappercent + case "http_address": + *c = Httpaddress + case "id": + *c = Id + case "indexing.delete_current": + *c = Indexingdeletecurrent + case "indexing.delete_time": + *c = Indexingdeletetime + case "indexing.delete_total": + *c = Indexingdeletetotal + case "indexing.index_current": + *c = Indexingindexcurrent + case "indexing.index_failed": + *c = Indexingindexfailed + case "indexing.index_failed_due_to_version_conflict": + *c = Indexingindexfailedduetoversionconflict + case "indexing.index_time": + *c = Indexingindextime + case "indexing.index_total": + *c = Indexingindextotal + case "ip": + *c = Ip + case "jdk": + *c = Jdk + case "load_1m": + *c = Load1m + case "load_5m": + *c = Load5m + case "load_15m": + *c = Load15m + case "mappings.total_count": + *c = Mappingstotalcount + case "mappings.total_estimated_overhead_in_bytes": + *c = Mappingstotalestimatedoverheadinbytes + case "master": + *c = Master + case "merges.current": + *c = Mergescurrent + case "merges.current_docs": + *c = Mergescurrentdocs + case "merges.current_size": + *c = Mergescurrentsize + case "merges.total": + *c = Mergestotal + case "merges.total_docs": + *c = Mergestotaldocs + case "merges.total_size": + *c = Mergestotalsize + case "merges.total_time": + *c = Mergestotaltime + case "name": + *c = Name + case "node.role": + *c = Noderole + case "pid": + *c = Pid + case "port": + *c = Port + case "query_cache.memory_size": + *c = Querycachememorysize + case "query_cache.evictions": + *c = Querycacheevictions + case "query_cache.hit_count": + *c = Querycachehitcount + case "query_cache.miss_count": + *c = Querycachemisscount + case "ram.current": + *c = Ramcurrent + case "ram.max": + *c = Rammax + case "ram.percent": + *c = Rampercent + case "refresh.total": + *c = Refreshtotal + case "refresh.time": + *c = Refreshtime + case "request_cache.memory_size": + *c = Requestcachememorysize + case "request_cache.evictions": + *c = Requestcacheevictions + case "request_cache.hit_count": + *c = Requestcachehitcount + case "request_cache.miss_count": + *c = Requestcachemisscount + case "script.compilations": + *c = Scriptcompilations + case "script.cache_evictions": + *c = Scriptcacheevictions + case "search.fetch_current": + *c = Searchfetchcurrent + case "search.fetch_time": + *c = Searchfetchtime + case "search.fetch_total": + *c = Searchfetchtotal + case "search.open_contexts": + *c = Searchopencontexts + case "search.query_current": + *c = Searchquerycurrent + case "search.query_time": + *c = Searchquerytime + case "search.query_total": + *c = Searchquerytotal + case "search.scroll_current": + *c = Searchscrollcurrent + case "search.scroll_time": + *c = Searchscrolltime + case "search.scroll_total": + *c = Searchscrolltotal + case "segments.count": + *c = Segmentscount + case "segments.fixed_bitset_memory": + *c = Segmentsfixedbitsetmemory + case "segments.index_writer_memory": + *c = Segmentsindexwritermemory + case "segments.memory": + *c = Segmentsmemory + case "segments.version_map_memory": + *c = Segmentsversionmapmemory + case "shard_stats.total_count": + *c = Shardstatstotalcount + case "suggest.current": + *c = Suggestcurrent + case "suggest.time": + *c = Suggesttime + case "suggest.total": + *c = Suggesttotal + case "uptime": + *c = Uptime + case "version": + *c = Version + default: + *c = CatNodeColumn{string(text)} + } + + return nil +} + +func (c CatNodeColumn) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catrecoverycolumn/catrecoverycolumn.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catrecoverycolumn/catrecoverycolumn.go new file mode 100644 index 000000000..422f8edef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catrecoverycolumn/catrecoverycolumn.go @@ -0,0 +1,153 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package catrecoverycolumn +package catrecoverycolumn + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1014-L1145 +type CatRecoveryColumn struct { + Name string +} + +var ( + Index = CatRecoveryColumn{"index"} + + Shard = CatRecoveryColumn{"shard"} + + Time = CatRecoveryColumn{"time"} + + Type = CatRecoveryColumn{"type"} + + Stage = CatRecoveryColumn{"stage"} + + Sourcehost = CatRecoveryColumn{"source_host"} + + Sourcenode = CatRecoveryColumn{"source_node"} + + Targethost = CatRecoveryColumn{"target_host"} + + Targetnode = CatRecoveryColumn{"target_node"} + + Repository = CatRecoveryColumn{"repository"} + + Snapshot = CatRecoveryColumn{"snapshot"} + + Files = CatRecoveryColumn{"files"} + + Filesrecovered = CatRecoveryColumn{"files_recovered"} + + Filespercent = CatRecoveryColumn{"files_percent"} + + Filestotal = CatRecoveryColumn{"files_total"} + + Bytes = CatRecoveryColumn{"bytes"} + + Bytesrecovered = CatRecoveryColumn{"bytes_recovered"} + + Bytespercent = CatRecoveryColumn{"bytes_percent"} + + Bytestotal = CatRecoveryColumn{"bytes_total"} + + Translogops = CatRecoveryColumn{"translog_ops"} + + Translogopsrecovered = CatRecoveryColumn{"translog_ops_recovered"} + + Translogopspercent = CatRecoveryColumn{"translog_ops_percent"} + + Starttime = CatRecoveryColumn{"start_time"} + + Starttimemillis = CatRecoveryColumn{"start_time_millis"} + + Stoptime = CatRecoveryColumn{"stop_time"} + + Stoptimemillis = CatRecoveryColumn{"stop_time_millis"} +) + +func (c CatRecoveryColumn) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CatRecoveryColumn) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "index": + *c = Index + case "shard": + *c = Shard + case "time": + *c = Time + case "type": + *c = Type + case "stage": + *c = Stage + case "source_host": + *c = Sourcehost + case "source_node": + *c = Sourcenode + case "target_host": + *c = Targethost + case "target_node": + *c = Targetnode + case "repository": + *c = Repository + case "snapshot": + *c = Snapshot + case "files": + *c = Files + case "files_recovered": + *c = Filesrecovered + case "files_percent": + *c = Filespercent + case "files_total": + *c = Filestotal + case "bytes": + *c = Bytes + case "bytes_recovered": + *c = Bytesrecovered + case "bytes_percent": + *c = Bytespercent + case "bytes_total": + *c = Bytestotal + case "translog_ops": + *c = Translogops + case "translog_ops_recovered": + *c = Translogopsrecovered + case "translog_ops_percent": + *c = Translogopspercent + case "start_time": + *c = Starttime + case "start_time_millis": + *c = Starttimemillis + case "stop_time": + *c = Stoptime + case "stop_time_millis": + *c = Stoptimemillis + default: + *c = CatRecoveryColumn{string(text)} + } + + return nil +} + +func (c CatRecoveryColumn) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catsegmentscolumn/catsegmentscolumn.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catsegmentscolumn/catsegmentscolumn.go new file mode 100644 index 000000000..964317f70 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catsegmentscolumn/catsegmentscolumn.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package catsegmentscolumn +package catsegmentscolumn + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1147-L1212 +type CatSegmentsColumn struct { + Name string +} + +var ( + Index = CatSegmentsColumn{"index"} + + Shard = CatSegmentsColumn{"shard"} + + Prirep = CatSegmentsColumn{"prirep"} + + Ip = CatSegmentsColumn{"ip"} + + Segment = CatSegmentsColumn{"segment"} + + Generation = CatSegmentsColumn{"generation"} + + Docscount = CatSegmentsColumn{"docs.count"} + + Docsdeleted = CatSegmentsColumn{"docs.deleted"} + + Size = CatSegmentsColumn{"size"} + + Sizememory = CatSegmentsColumn{"size.memory"} + + Committed = CatSegmentsColumn{"committed"} + + Searchable = CatSegmentsColumn{"searchable"} + + Version = CatSegmentsColumn{"version"} + + Compound = CatSegmentsColumn{"compound"} + + Id = CatSegmentsColumn{"id"} +) + +func (c CatSegmentsColumn) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CatSegmentsColumn) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "index": + *c = Index + case "shard": + *c = Shard + case "prirep": + *c = Prirep + case "ip": + *c = Ip + case "segment": + *c = Segment + case "generation": + *c = Generation + case "docs.count": + *c = Docscount + case "docs.deleted": + *c = Docsdeleted + case "size": + *c = Size + case "size.memory": + *c = Sizememory + case "committed": + *c = Committed + case "searchable": + *c = Searchable + case "version": + *c = Version + case "compound": + *c = Compound + case "id": + *c = Id + default: + *c = CatSegmentsColumn{string(text)} + } + + return nil +} + +func (c CatSegmentsColumn) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catshardcolumn/catshardcolumn.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catshardcolumn/catshardcolumn.go new file mode 100644 index 000000000..00527a45a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catshardcolumn/catshardcolumn.go @@ -0,0 +1,329 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package catshardcolumn +package catshardcolumn + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1576-L1949 +type CatShardColumn struct { + Name string +} + +var ( + Completionsize = CatShardColumn{"completion.size"} + + Datasetsize = CatShardColumn{"dataset.size"} + + Densevectorvaluecount = CatShardColumn{"dense_vector.value_count"} + + Docs = CatShardColumn{"docs"} + + Fielddataevictions = CatShardColumn{"fielddata.evictions"} + + Fielddatamemorysize = CatShardColumn{"fielddata.memory_size"} + + Flushtotal = CatShardColumn{"flush.total"} + + Flushtotaltime = CatShardColumn{"flush.total_time"} + + Getcurrent = CatShardColumn{"get.current"} + + Getexiststime = CatShardColumn{"get.exists_time"} + + Getexiststotal = CatShardColumn{"get.exists_total"} + + Getmissingtime = CatShardColumn{"get.missing_time"} + + Getmissingtotal = CatShardColumn{"get.missing_total"} + + Gettime = CatShardColumn{"get.time"} + + Gettotal = CatShardColumn{"get.total"} + + Id = CatShardColumn{"id"} + + Index = CatShardColumn{"index"} + + Indexingdeletecurrent = CatShardColumn{"indexing.delete_current"} + + Indexingdeletetime = CatShardColumn{"indexing.delete_time"} + + Indexingdeletetotal = CatShardColumn{"indexing.delete_total"} + + Indexingindexcurrent = CatShardColumn{"indexing.index_current"} + + Indexingindexfailedduetoversionconflict = CatShardColumn{"indexing.index_failed_due_to_version_conflict"} + + Indexingindexfailed = CatShardColumn{"indexing.index_failed"} + + Indexingindextime = CatShardColumn{"indexing.index_time"} + + Indexingindextotal = CatShardColumn{"indexing.index_total"} + + Ip = CatShardColumn{"ip"} + + Mergescurrent = CatShardColumn{"merges.current"} + + Mergescurrentdocs = CatShardColumn{"merges.current_docs"} + + Mergescurrentsize = CatShardColumn{"merges.current_size"} + + Mergestotal = CatShardColumn{"merges.total"} + + Mergestotaldocs = CatShardColumn{"merges.total_docs"} + + Mergestotalsize = CatShardColumn{"merges.total_size"} + + Mergestotaltime = CatShardColumn{"merges.total_time"} + + Node = CatShardColumn{"node"} + + Prirep = CatShardColumn{"prirep"} + + Querycacheevictions = CatShardColumn{"query_cache.evictions"} + + Querycachememorysize = CatShardColumn{"query_cache.memory_size"} + + Recoverysourcetype = CatShardColumn{"recoverysource.type"} + + Refreshtime = CatShardColumn{"refresh.time"} + + Refreshtotal = CatShardColumn{"refresh.total"} + + Searchfetchcurrent = CatShardColumn{"search.fetch_current"} + + Searchfetchtime = CatShardColumn{"search.fetch_time"} + + Searchfetchtotal = CatShardColumn{"search.fetch_total"} + + Searchopencontexts = CatShardColumn{"search.open_contexts"} + + Searchquerycurrent = CatShardColumn{"search.query_current"} + + Searchquerytime = CatShardColumn{"search.query_time"} + + Searchquerytotal = CatShardColumn{"search.query_total"} + + Searchscrollcurrent = CatShardColumn{"search.scroll_current"} + + Searchscrolltime = CatShardColumn{"search.scroll_time"} + + Searchscrolltotal = CatShardColumn{"search.scroll_total"} + + Segmentscount = CatShardColumn{"segments.count"} + + Segmentsfixedbitsetmemory = CatShardColumn{"segments.fixed_bitset_memory"} + + Segmentsindexwritermemory = CatShardColumn{"segments.index_writer_memory"} + + Segmentsmemory = CatShardColumn{"segments.memory"} + + Segmentsversionmapmemory = CatShardColumn{"segments.version_map_memory"} + + Seqnoglobalcheckpoint = CatShardColumn{"seq_no.global_checkpoint"} + + Seqnolocalcheckpoint = CatShardColumn{"seq_no.local_checkpoint"} + + Seqnomax = CatShardColumn{"seq_no.max"} + + Shard = CatShardColumn{"shard"} + + Dsparsevectorvaluecount = CatShardColumn{"dsparse_vector.value_count"} + + State = CatShardColumn{"state"} + + Store = CatShardColumn{"store"} + + Suggestcurrent = CatShardColumn{"suggest.current"} + + Suggesttime = CatShardColumn{"suggest.time"} + + Suggesttotal = CatShardColumn{"suggest.total"} + + Syncid = CatShardColumn{"sync_id"} + + Unassignedat = CatShardColumn{"unassigned.at"} + + Unassigneddetails = CatShardColumn{"unassigned.details"} + + Unassignedfor = CatShardColumn{"unassigned.for"} + + Unassignedreason = CatShardColumn{"unassigned.reason"} +) + +func (c CatShardColumn) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CatShardColumn) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "completion.size": + *c = Completionsize + case "dataset.size": + *c = Datasetsize + case "dense_vector.value_count": + *c = Densevectorvaluecount + case "docs": + *c = Docs + case "fielddata.evictions": + *c = Fielddataevictions + case "fielddata.memory_size": + *c = Fielddatamemorysize + case "flush.total": + *c = Flushtotal + case "flush.total_time": + *c = Flushtotaltime + case "get.current": + *c = Getcurrent + case "get.exists_time": + *c = Getexiststime + case "get.exists_total": + *c = Getexiststotal + case "get.missing_time": + *c = Getmissingtime + case "get.missing_total": + *c = Getmissingtotal + case "get.time": + *c = Gettime + case "get.total": + *c = Gettotal + case "id": + *c = Id + case "index": + *c = Index + case "indexing.delete_current": + *c = Indexingdeletecurrent + case "indexing.delete_time": + *c = Indexingdeletetime + case "indexing.delete_total": + *c = Indexingdeletetotal + case "indexing.index_current": + *c = Indexingindexcurrent + case "indexing.index_failed_due_to_version_conflict": + *c = Indexingindexfailedduetoversionconflict + case "indexing.index_failed": + *c = Indexingindexfailed + case "indexing.index_time": + *c = Indexingindextime + case "indexing.index_total": + *c = Indexingindextotal + case "ip": + *c = Ip + case "merges.current": + *c = Mergescurrent + case "merges.current_docs": + *c = Mergescurrentdocs + case "merges.current_size": + *c = Mergescurrentsize + case "merges.total": + *c = Mergestotal + case "merges.total_docs": + *c = Mergestotaldocs + case "merges.total_size": + *c = Mergestotalsize + case "merges.total_time": + *c = Mergestotaltime + case "node": + *c = Node + case "prirep": + *c = Prirep + case "query_cache.evictions": + *c = Querycacheevictions + case "query_cache.memory_size": + *c = Querycachememorysize + case "recoverysource.type": + *c = Recoverysourcetype + case "refresh.time": + *c = Refreshtime + case "refresh.total": + *c = Refreshtotal + case "search.fetch_current": + *c = Searchfetchcurrent + case "search.fetch_time": + *c = Searchfetchtime + case "search.fetch_total": + *c = Searchfetchtotal + case "search.open_contexts": + *c = Searchopencontexts + case "search.query_current": + *c = Searchquerycurrent + case "search.query_time": + *c = Searchquerytime + case "search.query_total": + *c = Searchquerytotal + case "search.scroll_current": + *c = Searchscrollcurrent + case "search.scroll_time": + *c = Searchscrolltime + case "search.scroll_total": + *c = Searchscrolltotal + case "segments.count": + *c = Segmentscount + case "segments.fixed_bitset_memory": + *c = Segmentsfixedbitsetmemory + case "segments.index_writer_memory": + *c = Segmentsindexwritermemory + case "segments.memory": + *c = Segmentsmemory + case "segments.version_map_memory": + *c = Segmentsversionmapmemory + case "seq_no.global_checkpoint": + *c = Seqnoglobalcheckpoint + case "seq_no.local_checkpoint": + *c = Seqnolocalcheckpoint + case "seq_no.max": + *c = Seqnomax + case "shard": + *c = Shard + case "dsparse_vector.value_count": + *c = Dsparsevectorvaluecount + case "state": + *c = State + case "store": + *c = Store + case "suggest.current": + *c = Suggestcurrent + case "suggest.time": + *c = Suggesttime + case "suggest.total": + *c = Suggesttotal + case "sync_id": + *c = Syncid + case "unassigned.at": + *c = Unassignedat + case "unassigned.details": + *c = Unassigneddetails + case "unassigned.for": + *c = Unassignedfor + case "unassigned.reason": + *c = Unassignedreason + default: + *c = CatShardColumn{string(text)} + } + + return nil +} + +func (c CatShardColumn) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catsnapshotscolumn/catsnapshotscolumn.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catsnapshotscolumn/catsnapshotscolumn.go new file mode 100644 index 000000000..2deaca9ad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catsnapshotscolumn/catsnapshotscolumn.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package catsnapshotscolumn +package catsnapshotscolumn + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1214-L1281 +type CatSnapshotsColumn struct { + Name string +} + +var ( + Id = CatSnapshotsColumn{"id"} + + Repository = CatSnapshotsColumn{"repository"} + + Status = CatSnapshotsColumn{"status"} + + Startepoch = CatSnapshotsColumn{"start_epoch"} + + Starttime = CatSnapshotsColumn{"start_time"} + + Endepoch = CatSnapshotsColumn{"end_epoch"} + + Endtime = CatSnapshotsColumn{"end_time"} + + Duration = CatSnapshotsColumn{"duration"} + + Indices = CatSnapshotsColumn{"indices"} + + Successfulshards = CatSnapshotsColumn{"successful_shards"} + + Failedshards = CatSnapshotsColumn{"failed_shards"} + + Totalshards = CatSnapshotsColumn{"total_shards"} + + Reason = CatSnapshotsColumn{"reason"} +) + +func (c CatSnapshotsColumn) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CatSnapshotsColumn) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "id": + *c = Id + case "repository": + *c = Repository + case "status": + *c = Status + case "start_epoch": + *c = Startepoch + case "start_time": + *c = Starttime + case "end_epoch": + *c = Endepoch + case "end_time": + *c = Endtime + case "duration": + *c = Duration + case "indices": + *c = Indices + case "successful_shards": + *c = Successfulshards + case "failed_shards": + *c = Failedshards + case "total_shards": + *c = Totalshards + case "reason": + *c = Reason + default: + *c = CatSnapshotsColumn{string(text)} + } + + return nil +} + +func (c CatSnapshotsColumn) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catthreadpoolcolumn/catthreadpoolcolumn.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catthreadpoolcolumn/catthreadpoolcolumn.go new file mode 100644 index 000000000..ac0ebe1f0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catthreadpoolcolumn/catthreadpoolcolumn.go @@ -0,0 +1,129 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package catthreadpoolcolumn +package catthreadpoolcolumn + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1952-L2052 +type CatThreadPoolColumn struct { + Name string +} + +var ( + Active = CatThreadPoolColumn{"active"} + + Completed = CatThreadPoolColumn{"completed"} + + Core = CatThreadPoolColumn{"core"} + + Ephemeralid = CatThreadPoolColumn{"ephemeral_id"} + + Host = CatThreadPoolColumn{"host"} + + Ip = CatThreadPoolColumn{"ip"} + + Keepalive = CatThreadPoolColumn{"keep_alive"} + + Largest = CatThreadPoolColumn{"largest"} + + Max = CatThreadPoolColumn{"max"} + + Name = CatThreadPoolColumn{"name"} + + Nodeid = CatThreadPoolColumn{"node_id"} + + Nodename = CatThreadPoolColumn{"node_name"} + + Pid = CatThreadPoolColumn{"pid"} + + Poolsize = CatThreadPoolColumn{"pool_size"} + + Port = CatThreadPoolColumn{"port"} + + Queue = CatThreadPoolColumn{"queue"} + + Queuesize = CatThreadPoolColumn{"queue_size"} + + Rejected = CatThreadPoolColumn{"rejected"} + + Size = CatThreadPoolColumn{"size"} + + Type = CatThreadPoolColumn{"type"} +) + +func (c CatThreadPoolColumn) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CatThreadPoolColumn) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "active": + *c = Active + case "completed": + *c = Completed + case "core": + *c = Core + case "ephemeral_id": + *c = Ephemeralid + case "host": + *c = Host + case "ip": + *c = Ip + case "keep_alive": + *c = Keepalive + case "largest": + *c = Largest + case "max": + *c = Max + case "name": + *c = Name + case "node_id": + *c = Nodeid + case "node_name": + *c = Nodename + case "pid": + *c = Pid + case "pool_size": + *c = Poolsize + case "port": + *c = Port + case "queue": + *c = Queue + case "queue_size": + *c = Queuesize + case "rejected": + *c = Rejected + case "size": + *c = Size + case "type": + *c = Type + default: + *c = CatThreadPoolColumn{string(text)} + } + + return nil +} + +func (c CatThreadPoolColumn) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go new file mode 100644 index 000000000..565a159e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package cattrainedmodelscolumn +package cattrainedmodelscolumn + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1290-L1364 +type CatTrainedModelsColumn struct { + Name string +} + +var ( + Createtime = CatTrainedModelsColumn{"create_time"} + + Createdby = CatTrainedModelsColumn{"created_by"} + + Dataframeanalyticsid = CatTrainedModelsColumn{"data_frame_analytics_id"} + + Description = CatTrainedModelsColumn{"description"} + + Heapsize = CatTrainedModelsColumn{"heap_size"} + + Id = CatTrainedModelsColumn{"id"} + + Ingestcount = CatTrainedModelsColumn{"ingest.count"} + + Ingestcurrent = CatTrainedModelsColumn{"ingest.current"} + + Ingestfailed = CatTrainedModelsColumn{"ingest.failed"} + + Ingestpipelines = CatTrainedModelsColumn{"ingest.pipelines"} + + Ingesttime = CatTrainedModelsColumn{"ingest.time"} + + License = CatTrainedModelsColumn{"license"} + + Operations = CatTrainedModelsColumn{"operations"} + + Version = CatTrainedModelsColumn{"version"} +) + +func (c CatTrainedModelsColumn) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CatTrainedModelsColumn) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "create_time": + *c = Createtime + case "created_by": + *c = Createdby + case "data_frame_analytics_id": + *c = Dataframeanalyticsid + case "description": + *c = Description + case "heap_size": + *c = Heapsize + case "id": + *c = Id + case "ingest.count": + *c = Ingestcount + case "ingest.current": + *c = Ingestcurrent + case "ingest.failed": + *c = Ingestfailed + case "ingest.pipelines": + *c = Ingestpipelines + case "ingest.time": + *c = Ingesttime + case "license": + *c = License + case "operations": + *c = Operations + case "version": + *c = Version + default: + *c = CatTrainedModelsColumn{string(text)} + } + + return nil +} + +func (c CatTrainedModelsColumn) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go new file mode 100644 index 000000000..12707528c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go @@ -0,0 +1,181 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package cattransformcolumn +package cattransformcolumn + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/_types/CatBase.ts#L1369-L1573 +type CatTransformColumn struct { + Name string +} + +var ( + Changeslastdetectiontime = CatTransformColumn{"changes_last_detection_time"} + + Checkpoint = CatTransformColumn{"checkpoint"} + + Checkpointdurationtimeexpavg = CatTransformColumn{"checkpoint_duration_time_exp_avg"} + + Checkpointprogress = CatTransformColumn{"checkpoint_progress"} + + Createtime = CatTransformColumn{"create_time"} + + Deletetime = CatTransformColumn{"delete_time"} + + Description = CatTransformColumn{"description"} + + Destindex = CatTransformColumn{"dest_index"} + + Documentsdeleted = CatTransformColumn{"documents_deleted"} + + Documentsindexed = CatTransformColumn{"documents_indexed"} + + Docspersecond = CatTransformColumn{"docs_per_second"} + + Documentsprocessed = CatTransformColumn{"documents_processed"} + + Frequency = CatTransformColumn{"frequency"} + + Id = CatTransformColumn{"id"} + + Indexfailure = CatTransformColumn{"index_failure"} + + Indextime = CatTransformColumn{"index_time"} + + Indextotal = CatTransformColumn{"index_total"} + + Indexeddocumentsexpavg = CatTransformColumn{"indexed_documents_exp_avg"} + + Lastsearchtime = CatTransformColumn{"last_search_time"} + + Maxpagesearchsize = CatTransformColumn{"max_page_search_size"} + + Pagesprocessed = CatTransformColumn{"pages_processed"} + + Pipeline = CatTransformColumn{"pipeline"} + + Processeddocumentsexpavg = CatTransformColumn{"processed_documents_exp_avg"} + + Processingtime = CatTransformColumn{"processing_time"} + + Reason = CatTransformColumn{"reason"} + + Searchfailure = CatTransformColumn{"search_failure"} + + Searchtime = CatTransformColumn{"search_time"} + + Searchtotal = CatTransformColumn{"search_total"} + + Sourceindex = CatTransformColumn{"source_index"} + + State = CatTransformColumn{"state"} + + Transformtype = CatTransformColumn{"transform_type"} + + Triggercount = CatTransformColumn{"trigger_count"} + + Version = CatTransformColumn{"version"} +) + +func (c CatTransformColumn) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CatTransformColumn) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "changes_last_detection_time": + *c = Changeslastdetectiontime + case "checkpoint": + *c = Checkpoint + case "checkpoint_duration_time_exp_avg": + *c = Checkpointdurationtimeexpavg + case "checkpoint_progress": + *c = Checkpointprogress + case "create_time": + *c = Createtime + case "delete_time": + *c = Deletetime + case "description": + *c = Description + case "dest_index": + *c = Destindex + case "documents_deleted": + *c = Documentsdeleted + case "documents_indexed": + *c = Documentsindexed + case "docs_per_second": + *c = Docspersecond + case "documents_processed": + *c = Documentsprocessed + case "frequency": + *c = Frequency + case "id": + *c = Id + case "index_failure": + *c = Indexfailure + case "index_time": + *c = Indextime + case "index_total": + *c = Indextotal + case "indexed_documents_exp_avg": + *c = Indexeddocumentsexpavg + case "last_search_time": + *c = Lastsearchtime + case "max_page_search_size": + *c = Maxpagesearchsize + case "pages_processed": + *c = Pagesprocessed + case "pipeline": + *c = Pipeline + case "processed_documents_exp_avg": + *c = Processeddocumentsexpavg + case "processing_time": + *c = Processingtime + case "reason": + *c = Reason + case "search_failure": + *c = Searchfailure + case "search_time": + *c = Searchtime + case "search_total": + *c = Searchtotal + case "source_index": + *c = Sourceindex + case "state": + *c = State + case "transform_type": + *c = Transformtype + case "trigger_count": + *c = Triggercount + case "version": + *c = Version + default: + *c = CatTransformColumn{string(text)} + } + + return nil +} + +func (c CatTransformColumn) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/childscoremode/childscoremode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/childscoremode/childscoremode.go new file mode 100644 index 000000000..0eae5c5f4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/childscoremode/childscoremode.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package childscoremode +package childscoremode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/joining.ts#L25-L39 +type ChildScoreMode struct { + Name string +} + +var ( + None = ChildScoreMode{"none"} + + Avg = ChildScoreMode{"avg"} + + Sum = ChildScoreMode{"sum"} + + Max = ChildScoreMode{"max"} + + Min = ChildScoreMode{"min"} +) + +func (c ChildScoreMode) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *ChildScoreMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "none": + *c = None + case "avg": + *c = Avg + case "sum": + *c = Sum + case "max": + *c = Max + case "min": + *c = Min + default: + *c = ChildScoreMode{string(text)} + } + + return nil +} + +func (c ChildScoreMode) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/chunkingmode/chunkingmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/chunkingmode/chunkingmode.go new file mode 100644 index 000000000..60154f468 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/chunkingmode/chunkingmode.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package chunkingmode +package chunkingmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Datafeed.ts#L245-L249 +type ChunkingMode struct { + Name string +} + +var ( + Auto = ChunkingMode{"auto"} + + Manual = ChunkingMode{"manual"} + + Off = ChunkingMode{"off"} +) + +func (c ChunkingMode) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *ChunkingMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "auto": + *c = Auto + case "manual": + *c = Manual + case "off": + *c = Off + default: + *c = ChunkingMode{string(text)} + } + + return nil +} + +func (c ChunkingMode) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cjkbigramignoredscript/cjkbigramignoredscript.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cjkbigramignoredscript/cjkbigramignoredscript.go new file mode 100644 index 000000000..93c3866fa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cjkbigramignoredscript/cjkbigramignoredscript.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package cjkbigramignoredscript +package cjkbigramignoredscript + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L459-L464 +type CjkBigramIgnoredScript struct { + Name string +} + +var ( + Han = CjkBigramIgnoredScript{"han"} + + Hangul = CjkBigramIgnoredScript{"hangul"} + + Hiragana = CjkBigramIgnoredScript{"hiragana"} + + Katakana = CjkBigramIgnoredScript{"katakana"} +) + +func (c CjkBigramIgnoredScript) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CjkBigramIgnoredScript) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "han": + *c = Han + case "hangul": + *c = Hangul + case "hiragana": + *c = Hiragana + case "katakana": + *c = Katakana + default: + *c = CjkBigramIgnoredScript{string(text)} + } + + return nil +} + +func (c CjkBigramIgnoredScript) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterinfotarget/clusterinfotarget.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterinfotarget/clusterinfotarget.go new file mode 100644 index 000000000..f1e2c8f38 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterinfotarget/clusterinfotarget.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package clusterinfotarget +package clusterinfotarget + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L380-L386 +type ClusterInfoTarget struct { + Name string +} + +var ( + All = ClusterInfoTarget{"_all"} + + Http = ClusterInfoTarget{"http"} + + Ingest = ClusterInfoTarget{"ingest"} + + Threadpool = ClusterInfoTarget{"thread_pool"} + + Script = ClusterInfoTarget{"script"} +) + +func (c ClusterInfoTarget) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *ClusterInfoTarget) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "_all": + *c = All + case "http": + *c = Http + case "ingest": + *c = Ingest + case "thread_pool": + *c = Threadpool + case "script": + *c = Script + default: + *c = ClusterInfoTarget{string(text)} + } + + return nil +} + +func (c ClusterInfoTarget) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterprivilege/clusterprivilege.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterprivilege/clusterprivilege.go new file mode 100644 index 000000000..e62f5109e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterprivilege/clusterprivilege.go @@ -0,0 +1,293 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package clusterprivilege +package clusterprivilege + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L42-L201 +type ClusterPrivilege struct { + Name string +} + +var ( + All = ClusterPrivilege{"all"} + + Canceltask = ClusterPrivilege{"cancel_task"} + + Createsnapshot = ClusterPrivilege{"create_snapshot"} + + Crossclusterreplication = ClusterPrivilege{"cross_cluster_replication"} + + Crossclustersearch = ClusterPrivilege{"cross_cluster_search"} + + Delegatepki = ClusterPrivilege{"delegate_pki"} + + Grantapikey = ClusterPrivilege{"grant_api_key"} + + Manage = ClusterPrivilege{"manage"} + + Manageapikey = ClusterPrivilege{"manage_api_key"} + + Manageautoscaling = ClusterPrivilege{"manage_autoscaling"} + + Managebehavioralanalytics = ClusterPrivilege{"manage_behavioral_analytics"} + + Manageccr = ClusterPrivilege{"manage_ccr"} + + Managedataframetransforms = ClusterPrivilege{"manage_data_frame_transforms"} + + Managedatastreamglobalretention = ClusterPrivilege{"manage_data_stream_global_retention"} + + Manageenrich = ClusterPrivilege{"manage_enrich"} + + Manageesql = ClusterPrivilege{"manage_esql"} + + Manageilm = ClusterPrivilege{"manage_ilm"} + + Manageindextemplates = ClusterPrivilege{"manage_index_templates"} + + Manageinference = ClusterPrivilege{"manage_inference"} + + Manageingestpipelines = ClusterPrivilege{"manage_ingest_pipelines"} + + Managelogstashpipelines = ClusterPrivilege{"manage_logstash_pipelines"} + + Manageml = ClusterPrivilege{"manage_ml"} + + Manageoidc = ClusterPrivilege{"manage_oidc"} + + Manageownapikey = ClusterPrivilege{"manage_own_api_key"} + + Managepipeline = ClusterPrivilege{"manage_pipeline"} + + Managerollup = ClusterPrivilege{"manage_rollup"} + + Managesaml = ClusterPrivilege{"manage_saml"} + + Managesearchapplication = ClusterPrivilege{"manage_search_application"} + + Managesearchqueryrules = ClusterPrivilege{"manage_search_query_rules"} + + Managesearchsynonyms = ClusterPrivilege{"manage_search_synonyms"} + + Managesecurity = ClusterPrivilege{"manage_security"} + + Manageserviceaccount = ClusterPrivilege{"manage_service_account"} + + Manageslm = ClusterPrivilege{"manage_slm"} + + Managetoken = ClusterPrivilege{"manage_token"} + + Managetransform = ClusterPrivilege{"manage_transform"} + + Manageuserprofile = ClusterPrivilege{"manage_user_profile"} + + Managewatcher = ClusterPrivilege{"manage_watcher"} + + Monitor = ClusterPrivilege{"monitor"} + + Monitordataframetransforms = ClusterPrivilege{"monitor_data_frame_transforms"} + + Monitordatastreamglobalretention = ClusterPrivilege{"monitor_data_stream_global_retention"} + + Monitorenrich = ClusterPrivilege{"monitor_enrich"} + + Monitoresql = ClusterPrivilege{"monitor_esql"} + + Monitorinference = ClusterPrivilege{"monitor_inference"} + + Monitorml = ClusterPrivilege{"monitor_ml"} + + Monitorrollup = ClusterPrivilege{"monitor_rollup"} + + Monitorsnapshot = ClusterPrivilege{"monitor_snapshot"} + + Monitorstats = ClusterPrivilege{"monitor_stats"} + + Monitortextstructure = ClusterPrivilege{"monitor_text_structure"} + + Monitortransform = ClusterPrivilege{"monitor_transform"} + + Monitorwatcher = ClusterPrivilege{"monitor_watcher"} + + None = ClusterPrivilege{"none"} + + Postbehavioralanalyticsevent = ClusterPrivilege{"post_behavioral_analytics_event"} + + Readccr = ClusterPrivilege{"read_ccr"} + + Readfleetsecrets = ClusterPrivilege{"read_fleet_secrets"} + + Readilm = ClusterPrivilege{"read_ilm"} + + Readpipeline = ClusterPrivilege{"read_pipeline"} + + Readsecurity = ClusterPrivilege{"read_security"} + + Readslm = ClusterPrivilege{"read_slm"} + + Transportclient = ClusterPrivilege{"transport_client"} + + Writeconnectorsecrets = ClusterPrivilege{"write_connector_secrets"} + + Writefleetsecrets = ClusterPrivilege{"write_fleet_secrets"} +) + +func (c ClusterPrivilege) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *ClusterPrivilege) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "all": + *c = All + case "cancel_task": + *c = Canceltask + case "create_snapshot": + *c = Createsnapshot + case "cross_cluster_replication": + *c = Crossclusterreplication + case "cross_cluster_search": + *c = Crossclustersearch + case "delegate_pki": + *c = Delegatepki + case "grant_api_key": + *c = Grantapikey + case "manage": + *c = Manage + case "manage_api_key": + *c = Manageapikey + case "manage_autoscaling": + *c = Manageautoscaling + case "manage_behavioral_analytics": + *c = Managebehavioralanalytics + case "manage_ccr": + *c = Manageccr + case "manage_data_frame_transforms": + *c = Managedataframetransforms + case "manage_data_stream_global_retention": + *c = Managedatastreamglobalretention + case "manage_enrich": + *c = Manageenrich + case "manage_esql": + *c = Manageesql + case "manage_ilm": + *c = Manageilm + case "manage_index_templates": + *c = Manageindextemplates + case "manage_inference": + *c = Manageinference + case "manage_ingest_pipelines": + *c = Manageingestpipelines + case "manage_logstash_pipelines": + *c = Managelogstashpipelines + case "manage_ml": + *c = Manageml + case "manage_oidc": + *c = Manageoidc + case "manage_own_api_key": + *c = Manageownapikey + case "manage_pipeline": + *c = Managepipeline + case "manage_rollup": + *c = Managerollup + case "manage_saml": + *c = Managesaml + case "manage_search_application": + *c = Managesearchapplication + case "manage_search_query_rules": + *c = Managesearchqueryrules + case "manage_search_synonyms": + *c = Managesearchsynonyms + case "manage_security": + *c = Managesecurity + case "manage_service_account": + *c = Manageserviceaccount + case "manage_slm": + *c = Manageslm + case "manage_token": + *c = Managetoken + case "manage_transform": + *c = Managetransform + case "manage_user_profile": + *c = Manageuserprofile + case "manage_watcher": + *c = Managewatcher + case "monitor": + *c = Monitor + case "monitor_data_frame_transforms": + *c = Monitordataframetransforms + case "monitor_data_stream_global_retention": + *c = Monitordatastreamglobalretention + case "monitor_enrich": + *c = Monitorenrich + case "monitor_esql": + *c = Monitoresql + case "monitor_inference": + *c = Monitorinference + case "monitor_ml": + *c = Monitorml + case "monitor_rollup": + *c = Monitorrollup + case "monitor_snapshot": + *c = Monitorsnapshot + case "monitor_stats": + *c = Monitorstats + case "monitor_text_structure": + *c = Monitortextstructure + case "monitor_transform": + *c = Monitortransform + case "monitor_watcher": + *c = Monitorwatcher + case "none": + *c = None + case "post_behavioral_analytics_event": + *c = Postbehavioralanalyticsevent + case "read_ccr": + *c = Readccr + case "read_fleet_secrets": + *c = Readfleetsecrets + case "read_ilm": + *c = Readilm + case "read_pipeline": + *c = Readpipeline + case "read_security": + *c = Readsecurity + case "read_slm": + *c = Readslm + case "transport_client": + *c = Transportclient + case "write_connector_secrets": + *c = Writeconnectorsecrets + case "write_fleet_secrets": + *c = Writefleetsecrets + default: + *c = ClusterPrivilege{string(text)} + } + + return nil +} + +func (c ClusterPrivilege) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clustersearchstatus/clustersearchstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clustersearchstatus/clustersearchstatus.go new file mode 100644 index 000000000..1933e22ef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clustersearchstatus/clustersearchstatus.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package clustersearchstatus +package clustersearchstatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L37-L43 +type ClusterSearchStatus struct { + Name string +} + +var ( + Running = ClusterSearchStatus{"running"} + + Successful = ClusterSearchStatus{"successful"} + + Partial = ClusterSearchStatus{"partial"} + + Skipped = ClusterSearchStatus{"skipped"} + + Failed = ClusterSearchStatus{"failed"} +) + +func (c ClusterSearchStatus) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *ClusterSearchStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "running": + *c = Running + case "successful": + *c = Successful + case "partial": + *c = Partial + case "skipped": + *c = Skipped + case "failed": + *c = Failed + default: + *c = ClusterSearchStatus{string(text)} + } + + return nil +} + +func (c ClusterSearchStatus) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereembeddingtype/cohereembeddingtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereembeddingtype/cohereembeddingtype.go new file mode 100644 index 000000000..0e91e5ce9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereembeddingtype/cohereembeddingtype.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package cohereembeddingtype +package cohereembeddingtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L843-L849 +type CohereEmbeddingType struct { + Name string +} + +var ( + Binary = CohereEmbeddingType{"binary"} + + Bit = CohereEmbeddingType{"bit"} + + Byte = CohereEmbeddingType{"byte"} + + Float = CohereEmbeddingType{"float"} + + Int8 = CohereEmbeddingType{"int8"} +) + +func (c CohereEmbeddingType) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CohereEmbeddingType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "binary": + *c = Binary + case "bit": + *c = Bit + case "byte": + *c = Byte + case "float": + *c = Float + case "int8": + *c = Int8 + default: + *c = CohereEmbeddingType{string(text)} + } + + return nil +} + +func (c CohereEmbeddingType) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereinputtype/cohereinputtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereinputtype/cohereinputtype.go new file mode 100644 index 000000000..555faaac0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereinputtype/cohereinputtype.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package cohereinputtype +package cohereinputtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L851-L856 +type CohereInputType struct { + Name string +} + +var ( + Classification = CohereInputType{"classification"} + + Clustering = CohereInputType{"clustering"} + + Ingest = CohereInputType{"ingest"} + + Search = CohereInputType{"search"} +) + +func (c CohereInputType) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CohereInputType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "classification": + *c = Classification + case "clustering": + *c = Clustering + case "ingest": + *c = Ingest + case "search": + *c = Search + default: + *c = CohereInputType{string(text)} + } + + return nil +} + +func (c CohereInputType) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereservicetype/cohereservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereservicetype/cohereservicetype.go new file mode 100644 index 000000000..c170c0043 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereservicetype/cohereservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package cohereservicetype +package cohereservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L839-L841 +type CohereServiceType struct { + Name string +} + +var ( + Cohere = CohereServiceType{"cohere"} +) + +func (c CohereServiceType) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CohereServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "cohere": + *c = Cohere + default: + *c = CohereServiceType{string(text)} + } + + return nil +} + +func (c CohereServiceType) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/coheresimilaritytype/coheresimilaritytype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/coheresimilaritytype/coheresimilaritytype.go new file mode 100644 index 000000000..0d37e4d05 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/coheresimilaritytype/coheresimilaritytype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package coheresimilaritytype +package coheresimilaritytype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L858-L862 +type CohereSimilarityType struct { + Name string +} + +var ( + Cosine = CohereSimilarityType{"cosine"} + + Dotproduct = CohereSimilarityType{"dot_product"} + + L2norm = CohereSimilarityType{"l2_norm"} +) + +func (c CohereSimilarityType) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CohereSimilarityType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "cosine": + *c = Cosine + case "dot_product": + *c = Dotproduct + case "l2_norm": + *c = L2norm + default: + *c = CohereSimilarityType{string(text)} + } + + return nil +} + +func (c CohereSimilarityType) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/coheretruncatetype/coheretruncatetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/coheretruncatetype/coheretruncatetype.go new file mode 100644 index 000000000..c53da789f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/coheretruncatetype/coheretruncatetype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package coheretruncatetype +package coheretruncatetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L864-L868 +type CohereTruncateType struct { + Name string +} + +var ( + END = CohereTruncateType{"END"} + + NONE = CohereTruncateType{"NONE"} + + START = CohereTruncateType{"START"} +) + +func (c CohereTruncateType) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CohereTruncateType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "end": + *c = END + case "none": + *c = NONE + case "start": + *c = START + default: + *c = CohereTruncateType{string(text)} + } + + return nil +} + +func (c CohereTruncateType) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go new file mode 100644 index 000000000..ef4eb0275 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package combinedfieldsoperator +package combinedfieldsoperator + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/abstractions.ts#L519-L522 +type CombinedFieldsOperator struct { + Name string +} + +var ( + Or = CombinedFieldsOperator{"or"} + + And = CombinedFieldsOperator{"and"} +) + +func (c CombinedFieldsOperator) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CombinedFieldsOperator) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "or": + *c = Or + case "and": + *c = And + default: + *c = CombinedFieldsOperator{string(text)} + } + + return nil +} + +func (c CombinedFieldsOperator) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go new file mode 100644 index 000000000..e6dd68066 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package combinedfieldszeroterms +package combinedfieldszeroterms + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/abstractions.ts#L524-L533 +type CombinedFieldsZeroTerms struct { + Name string +} + +var ( + None = CombinedFieldsZeroTerms{"none"} + + All = CombinedFieldsZeroTerms{"all"} +) + +func (c CombinedFieldsZeroTerms) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CombinedFieldsZeroTerms) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "none": + *c = None + case "all": + *c = All + default: + *c = CombinedFieldsZeroTerms{string(text)} + } + + return nil +} + +func (c CombinedFieldsZeroTerms) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditionop/conditionop.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditionop/conditionop.go new file mode 100644 index 000000000..e7f336032 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditionop/conditionop.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package conditionop +package conditionop + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Conditions.ts#L42-L49 +type ConditionOp struct { + Name string +} + +var ( + Noteq = ConditionOp{"not_eq"} + + Eq = ConditionOp{"eq"} + + Lt = ConditionOp{"lt"} + + Gt = ConditionOp{"gt"} + + Lte = ConditionOp{"lte"} + + Gte = ConditionOp{"gte"} +) + +func (c ConditionOp) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *ConditionOp) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "not_eq": + *c = Noteq + case "eq": + *c = Eq + case "lt": + *c = Lt + case "gt": + *c = Gt + case "lte": + *c = Lte + case "gte": + *c = Gte + default: + *c = ConditionOp{string(text)} + } + + return nil +} + +func (c ConditionOp) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditionoperator/conditionoperator.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditionoperator/conditionoperator.go new file mode 100644 index 000000000..d402d0405 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditionoperator/conditionoperator.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package conditionoperator +package conditionoperator + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Rule.ts#L74-L79 +type ConditionOperator struct { + Name string +} + +var ( + Gt = ConditionOperator{"gt"} + + Gte = ConditionOperator{"gte"} + + Lt = ConditionOperator{"lt"} + + Lte = ConditionOperator{"lte"} +) + +func (c ConditionOperator) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *ConditionOperator) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "gt": + *c = Gt + case "gte": + *c = Gte + case "lt": + *c = Lt + case "lte": + *c = Lte + default: + *c = ConditionOperator{string(text)} + } + + return nil +} + +func (c ConditionOperator) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditiontype/conditiontype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditiontype/conditiontype.go new file mode 100644 index 000000000..42910f210 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditiontype/conditiontype.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package conditiontype +package conditiontype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Conditions.ts#L65-L71 +type ConditionType struct { + Name string +} + +var ( + Always = ConditionType{"always"} + + Never = ConditionType{"never"} + + Script = ConditionType{"script"} + + Compare = ConditionType{"compare"} + + Arraycompare = ConditionType{"array_compare"} +) + +func (c ConditionType) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *ConditionType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "always": + *c = Always + case "never": + *c = Never + case "script": + *c = Script + case "compare": + *c = Compare + case "array_compare": + *c = Arraycompare + default: + *c = ConditionType{string(text)} + } + + return nil +} + +func (c ConditionType) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conflicts/conflicts.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conflicts/conflicts.go new file mode 100644 index 000000000..56fd051fc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conflicts/conflicts.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package conflicts +package conflicts + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L180-L189 +type Conflicts struct { + Name string +} + +var ( + Abort = Conflicts{"abort"} + + Proceed = Conflicts{"proceed"} +) + +func (c Conflicts) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *Conflicts) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "abort": + *c = Abort + case "proceed": + *c = Proceed + default: + *c = Conflicts{string(text)} + } + + return nil +} + +func (c Conflicts) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectionscheme/connectionscheme.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectionscheme/connectionscheme.go new file mode 100644 index 000000000..cf9c1f8f2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectionscheme/connectionscheme.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package connectionscheme +package connectionscheme + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L39-L42 +type ConnectionScheme struct { + Name string +} + +var ( + Http = ConnectionScheme{"http"} + + Https = ConnectionScheme{"https"} +) + +func (c ConnectionScheme) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *ConnectionScheme) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "http": + *c = Http + case "https": + *c = Https + default: + *c = ConnectionScheme{string(text)} + } + + return nil +} + +func (c ConnectionScheme) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectorfieldtype/connectorfieldtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectorfieldtype/connectorfieldtype.go new file mode 100644 index 000000000..6c76dd3b0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectorfieldtype/connectorfieldtype.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package connectorfieldtype +package connectorfieldtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L43-L48 +type ConnectorFieldType struct { + Name string +} + +var ( + Str = ConnectorFieldType{"str"} + + Int = ConnectorFieldType{"int"} + + List = ConnectorFieldType{"list"} + + Bool = ConnectorFieldType{"bool"} +) + +func (c ConnectorFieldType) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *ConnectorFieldType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "str": + *c = Str + case "int": + *c = Int + case "list": + *c = List + case "bool": + *c = Bool + default: + *c = ConnectorFieldType{string(text)} + } + + return nil +} + +func (c ConnectorFieldType) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectorstatus/connectorstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectorstatus/connectorstatus.go new file mode 100644 index 000000000..b706c66dd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectorstatus/connectorstatus.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package connectorstatus +package connectorstatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L130-L136 +type ConnectorStatus struct { + Name string +} + +var ( + Created = ConnectorStatus{"created"} + + Needsconfiguration = ConnectorStatus{"needs_configuration"} + + Configured = ConnectorStatus{"configured"} + + Connected = ConnectorStatus{"connected"} + + Error = ConnectorStatus{"error"} +) + +func (c ConnectorStatus) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *ConnectorStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "created": + *c = Created + case "needs_configuration": + *c = Needsconfiguration + case "configured": + *c = Configured + case "connected": + *c = Connected + case "error": + *c = Error + default: + *c = ConnectorStatus{string(text)} + } + + return nil +} + +func (c ConnectorStatus) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/converttype/converttype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/converttype/converttype.go new file mode 100644 index 000000000..8b7fe375e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/converttype/converttype.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package converttype +package converttype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L662-L671 +type ConvertType struct { + Name string +} + +var ( + Integer = ConvertType{"integer"} + + Long = ConvertType{"long"} + + Double = ConvertType{"double"} + + Float = ConvertType{"float"} + + Boolean = ConvertType{"boolean"} + + Ip = ConvertType{"ip"} + + String = ConvertType{"string"} + + Auto = ConvertType{"auto"} +) + +func (c ConvertType) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *ConvertType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "integer": + *c = Integer + case "long": + *c = Long + case "double": + *c = Double + case "float": + *c = Float + case "boolean": + *c = Boolean + case "ip": + *c = Ip + case "string": + *c = String + case "auto": + *c = Auto + default: + *c = ConvertType{string(text)} + } + + return nil +} + +func (c ConvertType) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/customservicetype/customservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/customservicetype/customservicetype.go new file mode 100644 index 000000000..2ed6c730d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/customservicetype/customservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package customservicetype +package customservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1134-L1136 +type CustomServiceType struct { + Name string +} + +var ( + Custom = CustomServiceType{"custom"} +) + +func (c CustomServiceType) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *CustomServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "custom": + *c = Custom + default: + *c = CustomServiceType{string(text)} + } + + return nil +} + +func (c CustomServiceType) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go new file mode 100644 index 000000000..295774bda --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package dataattachmentformat +package dataattachmentformat + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L187-L190 +type DataAttachmentFormat struct { + Name string +} + +var ( + Json = DataAttachmentFormat{"json"} + + Yaml = DataAttachmentFormat{"yaml"} +) + +func (d DataAttachmentFormat) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DataAttachmentFormat) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "json": + *d = Json + case "yaml": + *d = Yaml + default: + *d = DataAttachmentFormat{string(text)} + } + + return nil +} + +func (d DataAttachmentFormat) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/datafeedstate/datafeedstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/datafeedstate/datafeedstate.go new file mode 100644 index 000000000..ad25520cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/datafeedstate/datafeedstate.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package datafeedstate +package datafeedstate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Datafeed.ts#L136-L141 +type DatafeedState struct { + Name string +} + +var ( + Started = DatafeedState{"started"} + + Stopped = DatafeedState{"stopped"} + + Starting = DatafeedState{"starting"} + + Stopping = DatafeedState{"stopping"} +) + +func (d DatafeedState) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DatafeedState) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "started": + *d = Started + case "stopped": + *d = Stopped + case "starting": + *d = Starting + case "stopping": + *d = Stopping + default: + *d = DatafeedState{string(text)} + } + + return nil +} + +func (d DatafeedState) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dataframestate/dataframestate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dataframestate/dataframestate.go new file mode 100644 index 000000000..8613ed6f9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dataframestate/dataframestate.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package dataframestate +package dataframestate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Dataframe.ts#L20-L26 +type DataframeState struct { + Name string +} + +var ( + Started = DataframeState{"started"} + + Stopped = DataframeState{"stopped"} + + Starting = DataframeState{"starting"} + + Stopping = DataframeState{"stopping"} + + Failed = DataframeState{"failed"} +) + +func (d DataframeState) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DataframeState) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "started": + *d = Started + case "stopped": + *d = Stopped + case "starting": + *d = Starting + case "stopping": + *d = Stopping + case "failed": + *d = Failed + default: + *d = DataframeState{string(text)} + } + + return nil +} + +func (d DataframeState) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/day/day.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/day/day.go new file mode 100644 index 000000000..c95d9e0ba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/day/day.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package day +package day + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Schedule.ts#L37-L45 +type Day struct { + Name string +} + +var ( + Sunday = Day{"sunday"} + + Monday = Day{"monday"} + + Tuesday = Day{"tuesday"} + + Wednesday = Day{"wednesday"} + + Thursday = Day{"thursday"} + + Friday = Day{"friday"} + + Saturday = Day{"saturday"} +) + +func (d Day) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *Day) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "sunday": + *d = Sunday + case "monday": + *d = Monday + case "tuesday": + *d = Tuesday + case "wednesday": + *d = Wednesday + case "thursday": + *d = Thursday + case "friday": + *d = Friday + case "saturday": + *d = Saturday + default: + *d = Day{string(text)} + } + + return nil +} + +func (d Day) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/decision/decision.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/decision/decision.go new file mode 100644 index 000000000..d7b5eb2c6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/decision/decision.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package decision +package decision + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/allocation_explain/types.ts#L92-L101 +type Decision struct { + Name string +} + +var ( + Yes = Decision{"yes"} + + No = Decision{"no"} + + Worsebalance = Decision{"worse_balance"} + + Throttled = Decision{"throttled"} + + Awaitinginfo = Decision{"awaiting_info"} + + Allocationdelayed = Decision{"allocation_delayed"} + + Novalidshardcopy = Decision{"no_valid_shard_copy"} + + Noattempt = Decision{"no_attempt"} +) + +func (d Decision) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *Decision) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "yes": + *d = Yes + case "no": + *d = No + case "worse_balance": + *d = Worsebalance + case "throttled": + *d = Throttled + case "awaiting_info": + *d = Awaitinginfo + case "allocation_delayed": + *d = Allocationdelayed + case "no_valid_shard_copy": + *d = Novalidshardcopy + case "no_attempt": + *d = Noattempt + default: + *d = Decision{string(text)} + } + + return nil +} + +func (d Decision) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deepseekservicetype/deepseekservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deepseekservicetype/deepseekservicetype.go new file mode 100644 index 000000000..47ca3e771 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deepseekservicetype/deepseekservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package deepseekservicetype +package deepseekservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1190-L1192 +type DeepSeekServiceType struct { + Name string +} + +var ( + Deepseek = DeepSeekServiceType{"deepseek"} +) + +func (d DeepSeekServiceType) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DeepSeekServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "deepseek": + *d = Deepseek + default: + *d = DeepSeekServiceType{string(text)} + } + + return nil +} + +func (d DeepSeekServiceType) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go new file mode 100644 index 000000000..749eac0ad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package delimitedpayloadencoding +package delimitedpayloadencoding + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L78-L82 +type DelimitedPayloadEncoding struct { + Name string +} + +var ( + Int = DelimitedPayloadEncoding{"int"} + + Float = DelimitedPayloadEncoding{"float"} + + Identity = DelimitedPayloadEncoding{"identity"} +) + +func (d DelimitedPayloadEncoding) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DelimitedPayloadEncoding) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "int": + *d = Int + case "float": + *d = Float + case "identity": + *d = Identity + default: + *d = DelimitedPayloadEncoding{string(text)} + } + + return nil +} + +func (d DelimitedPayloadEncoding) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorelementtype/densevectorelementtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorelementtype/densevectorelementtype.go new file mode 100644 index 000000000..d85cd4eb7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorelementtype/densevectorelementtype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package densevectorelementtype +package densevectorelementtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/DenseVectorProperty.ts#L64-L80 +type DenseVectorElementType struct { + Name string +} + +var ( + Bit = DenseVectorElementType{"bit"} + + Byte = DenseVectorElementType{"byte"} + + Float = DenseVectorElementType{"float"} +) + +func (d DenseVectorElementType) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DenseVectorElementType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "bit": + *d = Bit + case "byte": + *d = Byte + case "float": + *d = Float + default: + *d = DenseVectorElementType{string(text)} + } + + return nil +} + +func (d DenseVectorElementType) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorindexoptionstype/densevectorindexoptionstype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorindexoptionstype/densevectorindexoptionstype.go new file mode 100644 index 000000000..3dfa7ba2e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorindexoptionstype/densevectorindexoptionstype.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package densevectorindexoptionstype +package densevectorindexoptionstype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/DenseVectorProperty.ts#L168-L213 +type DenseVectorIndexOptionsType struct { + Name string +} + +var ( + Bbqflat = DenseVectorIndexOptionsType{"bbq_flat"} + + Bbqhnsw = DenseVectorIndexOptionsType{"bbq_hnsw"} + + Flat = DenseVectorIndexOptionsType{"flat"} + + Hnsw = DenseVectorIndexOptionsType{"hnsw"} + + Int4flat = DenseVectorIndexOptionsType{"int4_flat"} + + Int4hnsw = DenseVectorIndexOptionsType{"int4_hnsw"} + + Int8flat = DenseVectorIndexOptionsType{"int8_flat"} + + Int8hnsw = DenseVectorIndexOptionsType{"int8_hnsw"} +) + +func (d DenseVectorIndexOptionsType) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DenseVectorIndexOptionsType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "bbq_flat": + *d = Bbqflat + case "bbq_hnsw": + *d = Bbqhnsw + case "flat": + *d = Flat + case "hnsw": + *d = Hnsw + case "int4_flat": + *d = Int4flat + case "int4_hnsw": + *d = Int4hnsw + case "int8_flat": + *d = Int8flat + case "int8_hnsw": + *d = Int8hnsw + default: + *d = DenseVectorIndexOptionsType{string(text)} + } + + return nil +} + +func (d DenseVectorIndexOptionsType) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorsimilarity/densevectorsimilarity.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorsimilarity/densevectorsimilarity.go new file mode 100644 index 000000000..250c89b85 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorsimilarity/densevectorsimilarity.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package densevectorsimilarity +package densevectorsimilarity + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/DenseVectorProperty.ts#L82-L127 +type DenseVectorSimilarity struct { + Name string +} + +var ( + Cosine = DenseVectorSimilarity{"cosine"} + + Dotproduct = DenseVectorSimilarity{"dot_product"} + + L2norm = DenseVectorSimilarity{"l2_norm"} + + Maxinnerproduct = DenseVectorSimilarity{"max_inner_product"} +) + +func (d DenseVectorSimilarity) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DenseVectorSimilarity) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "cosine": + *d = Cosine + case "dot_product": + *d = Dotproduct + case "l2_norm": + *d = L2norm + case "max_inner_product": + *d = Maxinnerproduct + default: + *d = DenseVectorSimilarity{string(text)} + } + + return nil +} + +func (d DenseVectorSimilarity) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go new file mode 100644 index 000000000..4d2878a9f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package deploymentallocationstate +package deploymentallocationstate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L331-L344 +type DeploymentAllocationState struct { + Name string +} + +var ( + Started = DeploymentAllocationState{"started"} + + Starting = DeploymentAllocationState{"starting"} + + Fullyallocated = DeploymentAllocationState{"fully_allocated"} +) + +func (d DeploymentAllocationState) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DeploymentAllocationState) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "started": + *d = Started + case "starting": + *d = Starting + case "fully_allocated": + *d = Fullyallocated + default: + *d = DeploymentAllocationState{string(text)} + } + + return nil +} + +func (d DeploymentAllocationState) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go new file mode 100644 index 000000000..a121d0474 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package deploymentassignmentstate +package deploymentassignmentstate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L346-L363 +type DeploymentAssignmentState struct { + Name string +} + +var ( + Started = DeploymentAssignmentState{"started"} + + Starting = DeploymentAssignmentState{"starting"} + + Stopping = DeploymentAssignmentState{"stopping"} + + Failed = DeploymentAssignmentState{"failed"} +) + +func (d DeploymentAssignmentState) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DeploymentAssignmentState) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "started": + *d = Started + case "starting": + *d = Starting + case "stopping": + *d = Stopping + case "failed": + *d = Failed + default: + *d = DeploymentAssignmentState{string(text)} + } + + return nil +} + +func (d DeploymentAssignmentState) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deprecationlevel/deprecationlevel.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deprecationlevel/deprecationlevel.go new file mode 100644 index 000000000..cc7c365f0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deprecationlevel/deprecationlevel.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package deprecationlevel +package deprecationlevel + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/migration/deprecations/types.ts#L23-L30 +type DeprecationLevel struct { + Name string +} + +var ( + None = DeprecationLevel{"none"} + + Info = DeprecationLevel{"info"} + + Warning = DeprecationLevel{"warning"} + + Critical = DeprecationLevel{"critical"} +) + +func (d DeprecationLevel) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DeprecationLevel) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "none": + *d = None + case "info": + *d = Info + case "warning": + *d = Warning + case "critical": + *d = Critical + default: + *d = DeprecationLevel{string(text)} + } + + return nil +} + +func (d DeprecationLevel) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go new file mode 100644 index 000000000..69095efc3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package dfiindependencemeasure +package dfiindependencemeasure + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Similarity.ts#L20-L24 +type DFIIndependenceMeasure struct { + Name string +} + +var ( + Standardized = DFIIndependenceMeasure{"standardized"} + + Saturated = DFIIndependenceMeasure{"saturated"} + + Chisquared = DFIIndependenceMeasure{"chisquared"} +) + +func (d DFIIndependenceMeasure) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DFIIndependenceMeasure) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "standardized": + *d = Standardized + case "saturated": + *d = Saturated + case "chisquared": + *d = Chisquared + default: + *d = DFIIndependenceMeasure{string(text)} + } + + return nil +} + +func (d DFIIndependenceMeasure) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfraftereffect/dfraftereffect.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfraftereffect/dfraftereffect.go new file mode 100644 index 000000000..493646f67 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfraftereffect/dfraftereffect.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package dfraftereffect +package dfraftereffect + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Similarity.ts#L26-L30 +type DFRAfterEffect struct { + Name string +} + +var ( + No = DFRAfterEffect{"no"} + + B = DFRAfterEffect{"b"} + + L = DFRAfterEffect{"l"} +) + +func (d DFRAfterEffect) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DFRAfterEffect) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "no": + *d = No + case "b": + *d = B + case "l": + *d = L + default: + *d = DFRAfterEffect{string(text)} + } + + return nil +} + +func (d DFRAfterEffect) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go new file mode 100644 index 000000000..37190fa41 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package dfrbasicmodel +package dfrbasicmodel + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Similarity.ts#L32-L40 +type DFRBasicModel struct { + Name string +} + +var ( + Be = DFRBasicModel{"be"} + + D = DFRBasicModel{"d"} + + G = DFRBasicModel{"g"} + + If = DFRBasicModel{"if"} + + In = DFRBasicModel{"in"} + + Ine = DFRBasicModel{"ine"} + + P = DFRBasicModel{"p"} +) + +func (d DFRBasicModel) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DFRBasicModel) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "be": + *d = Be + case "d": + *d = D + case "g": + *d = G + case "if": + *d = If + case "in": + *d = In + case "ine": + *d = Ine + case "p": + *d = P + default: + *d = DFRBasicModel{string(text)} + } + + return nil +} + +func (d DFRBasicModel) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/displaytype/displaytype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/displaytype/displaytype.go new file mode 100644 index 000000000..f03b2644c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/displaytype/displaytype.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package displaytype +package displaytype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L35-L41 +type DisplayType struct { + Name string +} + +var ( + Textbox = DisplayType{"textbox"} + + Textarea = DisplayType{"textarea"} + + Numeric = DisplayType{"numeric"} + + Toggle = DisplayType{"toggle"} + + Dropdown = DisplayType{"dropdown"} +) + +func (d DisplayType) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DisplayType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "textbox": + *d = Textbox + case "textarea": + *d = Textarea + case "numeric": + *d = Numeric + case "toggle": + *d = Toggle + case "dropdown": + *d = Dropdown + default: + *d = DisplayType{string(text)} + } + + return nil +} + +func (d DisplayType) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/distanceunit/distanceunit.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/distanceunit/distanceunit.go new file mode 100644 index 000000000..68307ce1d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/distanceunit/distanceunit.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package distanceunit +package distanceunit + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Geo.ts#L30-L40 +type DistanceUnit struct { + Name string +} + +var ( + Inches = DistanceUnit{"in"} + + Feet = DistanceUnit{"ft"} + + Yards = DistanceUnit{"yd"} + + Miles = DistanceUnit{"mi"} + + Nauticmiles = DistanceUnit{"nmi"} + + Kilometers = DistanceUnit{"km"} + + Meters = DistanceUnit{"m"} + + Centimeters = DistanceUnit{"cm"} + + Millimeters = DistanceUnit{"mm"} +) + +func (d DistanceUnit) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DistanceUnit) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "in": + *d = Inches + case "ft": + *d = Feet + case "yd": + *d = Yards + case "mi": + *d = Miles + case "nmi": + *d = Nauticmiles + case "km": + *d = Kilometers + case "m": + *d = Meters + case "cm": + *d = Centimeters + case "mm": + *d = Millimeters + default: + *d = DistanceUnit{string(text)} + } + + return nil +} + +func (d DistanceUnit) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping/dynamicmapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping/dynamicmapping.go new file mode 100644 index 000000000..58d358640 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping/dynamicmapping.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package dynamicmapping +package dynamicmapping + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/dynamic-template.ts#L50-L59 +type DynamicMapping struct { + Name string +} + +var ( + Strict = DynamicMapping{"strict"} + + Runtime = DynamicMapping{"runtime"} + + True = DynamicMapping{"true"} + + False = DynamicMapping{"false"} +) + +func (d *DynamicMapping) UnmarshalJSON(data []byte) error { + return d.UnmarshalText(data) +} + +func (d DynamicMapping) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +func (d *DynamicMapping) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "strict": + *d = Strict + case "runtime": + *d = Runtime + case "true": + *d = True + case "false": + *d = False + default: + *d = DynamicMapping{string(text)} + } + + return nil +} + +func (d DynamicMapping) String() string { + return d.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ecscompatibilitytype/ecscompatibilitytype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ecscompatibilitytype/ecscompatibilitytype.go new file mode 100644 index 000000000..f92d1d0de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ecscompatibilitytype/ecscompatibilitytype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package ecscompatibilitytype +package ecscompatibilitytype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/text_structure/_types/Structure.ts#L40-L43 +type EcsCompatibilityType struct { + Name string +} + +var ( + Disabled = EcsCompatibilityType{"disabled"} + + V1 = EcsCompatibilityType{"v1"} +) + +func (e EcsCompatibilityType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *EcsCompatibilityType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "disabled": + *e = Disabled + case "v1": + *e = V1 + default: + *e = EcsCompatibilityType{string(text)} + } + + return nil +} + +func (e EcsCompatibilityType) String() string { + return e.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/edgengramside/edgengramside.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/edgengramside/edgengramside.go new file mode 100644 index 000000000..7bdc00f6b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/edgengramside/edgengramside.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package edgengramside +package edgengramside + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L92-L95 +type EdgeNGramSide struct { + Name string +} + +var ( + Front = EdgeNGramSide{"front"} + + Back = EdgeNGramSide{"back"} +) + +func (e EdgeNGramSide) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *EdgeNGramSide) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "front": + *e = Front + case "back": + *e = Back + default: + *e = EdgeNGramSide{string(text)} + } + + return nil +} + +func (e EdgeNGramSide) String() string { + return e.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/elasticsearchservicetype/elasticsearchservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/elasticsearchservicetype/elasticsearchservicetype.go new file mode 100644 index 000000000..c4320fa73 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/elasticsearchservicetype/elasticsearchservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package elasticsearchservicetype +package elasticsearchservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1265-L1267 +type ElasticsearchServiceType struct { + Name string +} + +var ( + Elasticsearch = ElasticsearchServiceType{"elasticsearch"} +) + +func (e ElasticsearchServiceType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *ElasticsearchServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "elasticsearch": + *e = Elasticsearch + default: + *e = ElasticsearchServiceType{string(text)} + } + + return nil +} + +func (e ElasticsearchServiceType) String() string { + return e.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/elserservicetype/elserservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/elserservicetype/elserservicetype.go new file mode 100644 index 000000000..8d8c3a11c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/elserservicetype/elserservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package elserservicetype +package elserservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1301-L1303 +type ElserServiceType struct { + Name string +} + +var ( + Elser = ElserServiceType{"elser"} +) + +func (e ElserServiceType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *ElserServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "elser": + *e = Elser + default: + *e = ElserServiceType{string(text)} + } + + return nil +} + +func (e ElserServiceType) String() string { + return e.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/emailpriority/emailpriority.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/emailpriority/emailpriority.go new file mode 100644 index 000000000..90800904c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/emailpriority/emailpriority.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package emailpriority +package emailpriority + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L197-L203 +type EmailPriority struct { + Name string +} + +var ( + Lowest = EmailPriority{"lowest"} + + Low = EmailPriority{"low"} + + Normal = EmailPriority{"normal"} + + High = EmailPriority{"high"} + + Highest = EmailPriority{"highest"} +) + +func (e EmailPriority) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *EmailPriority) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "lowest": + *e = Lowest + case "low": + *e = Low + case "normal": + *e = Normal + case "high": + *e = High + case "highest": + *e = Highest + default: + *e = EmailPriority{string(text)} + } + + return nil +} + +func (e EmailPriority) String() string { + return e.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go new file mode 100644 index 000000000..24de6cf24 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package enrichpolicyphase +package enrichpolicyphase + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/enrich/execute_policy/types.ts#L25-L31 +type EnrichPolicyPhase struct { + Name string +} + +var ( + SCHEDULED = EnrichPolicyPhase{"SCHEDULED"} + + RUNNING = EnrichPolicyPhase{"RUNNING"} + + COMPLETE = EnrichPolicyPhase{"COMPLETE"} + + FAILED = EnrichPolicyPhase{"FAILED"} + + CANCELLED = EnrichPolicyPhase{"CANCELLED"} +) + +func (e EnrichPolicyPhase) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *EnrichPolicyPhase) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "scheduled": + *e = SCHEDULED + case "running": + *e = RUNNING + case "complete": + *e = COMPLETE + case "failed": + *e = FAILED + case "cancelled": + *e = CANCELLED + default: + *e = EnrichPolicyPhase{string(text)} + } + + return nil +} + +func (e EnrichPolicyPhase) String() string { + return e.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/esqlclusterstatus/esqlclusterstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/esqlclusterstatus/esqlclusterstatus.go new file mode 100644 index 000000000..63613af9e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/esqlclusterstatus/esqlclusterstatus.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package esqlclusterstatus +package esqlclusterstatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/_types/EsqlResult.ts#L83-L89 +type EsqlClusterStatus struct { + Name string +} + +var ( + Running = EsqlClusterStatus{"running"} + + Successful = EsqlClusterStatus{"successful"} + + Partial = EsqlClusterStatus{"partial"} + + Skipped = EsqlClusterStatus{"skipped"} + + Failed = EsqlClusterStatus{"failed"} +) + +func (e EsqlClusterStatus) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *EsqlClusterStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "running": + *e = Running + case "successful": + *e = Successful + case "partial": + *e = Partial + case "skipped": + *e = Skipped + case "failed": + *e = Failed + default: + *e = EsqlClusterStatus{string(text)} + } + + return nil +} + +func (e EsqlClusterStatus) String() string { + return e.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/esqlformat/esqlformat.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/esqlformat/esqlformat.go new file mode 100644 index 000000000..5948dd0d9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/esqlformat/esqlformat.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package esqlformat +package esqlformat + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/_types/QueryParameters.ts#L20-L29 +type EsqlFormat struct { + Name string +} + +var ( + Csv = EsqlFormat{"csv"} + + Json = EsqlFormat{"json"} + + Tsv = EsqlFormat{"tsv"} + + Txt = EsqlFormat{"txt"} + + Yaml = EsqlFormat{"yaml"} + + Cbor = EsqlFormat{"cbor"} + + Smile = EsqlFormat{"smile"} + + Arrow = EsqlFormat{"arrow"} +) + +func (e EsqlFormat) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *EsqlFormat) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "csv": + *e = Csv + case "json": + *e = Json + case "tsv": + *e = Tsv + case "txt": + *e = Txt + case "yaml": + *e = Yaml + case "cbor": + *e = Cbor + case "smile": + *e = Smile + case "arrow": + *e = Arrow + default: + *e = EsqlFormat{string(text)} + } + + return nil +} + +func (e EsqlFormat) String() string { + return e.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/excludefrequent/excludefrequent.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/excludefrequent/excludefrequent.go new file mode 100644 index 000000000..172a8bf2c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/excludefrequent/excludefrequent.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package excludefrequent +package excludefrequent + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Detector.ts#L145-L150 +type ExcludeFrequent struct { + Name string +} + +var ( + All = ExcludeFrequent{"all"} + + None = ExcludeFrequent{"none"} + + By = ExcludeFrequent{"by"} + + Over = ExcludeFrequent{"over"} +) + +func (e ExcludeFrequent) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *ExcludeFrequent) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "all": + *e = All + case "none": + *e = None + case "by": + *e = By + case "over": + *e = Over + default: + *e = ExcludeFrequent{string(text)} + } + + return nil +} + +func (e ExcludeFrequent) String() string { + return e.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/executionphase/executionphase.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/executionphase/executionphase.go new file mode 100644 index 000000000..e98477af6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/executionphase/executionphase.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package executionphase +package executionphase + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Execution.ts#L49-L58 +type ExecutionPhase struct { + Name string +} + +var ( + Awaitsexecution = ExecutionPhase{"awaits_execution"} + + Started = ExecutionPhase{"started"} + + Input = ExecutionPhase{"input"} + + Condition = ExecutionPhase{"condition"} + + Actions = ExecutionPhase{"actions"} + + Watchtransform = ExecutionPhase{"watch_transform"} + + Aborted = ExecutionPhase{"aborted"} + + Finished = ExecutionPhase{"finished"} +) + +func (e ExecutionPhase) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *ExecutionPhase) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "awaits_execution": + *e = Awaitsexecution + case "started": + *e = Started + case "input": + *e = Input + case "condition": + *e = Condition + case "actions": + *e = Actions + case "watch_transform": + *e = Watchtransform + case "aborted": + *e = Aborted + case "finished": + *e = Finished + default: + *e = ExecutionPhase{string(text)} + } + + return nil +} + +func (e ExecutionPhase) String() string { + return e.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/executionstatus/executionstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/executionstatus/executionstatus.go new file mode 100644 index 000000000..d8b497793 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/executionstatus/executionstatus.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package executionstatus +package executionstatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Execution.ts#L38-L47 +type ExecutionStatus struct { + Name string +} + +var ( + Awaitsexecution = ExecutionStatus{"awaits_execution"} + + Checking = ExecutionStatus{"checking"} + + Executionnotneeded = ExecutionStatus{"execution_not_needed"} + + Throttled = ExecutionStatus{"throttled"} + + Executed = ExecutionStatus{"executed"} + + Failed = ExecutionStatus{"failed"} + + Deletedwhilequeued = ExecutionStatus{"deleted_while_queued"} + + Notexecutedalreadyqueued = ExecutionStatus{"not_executed_already_queued"} +) + +func (e ExecutionStatus) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *ExecutionStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "awaits_execution": + *e = Awaitsexecution + case "checking": + *e = Checking + case "execution_not_needed": + *e = Executionnotneeded + case "throttled": + *e = Throttled + case "executed": + *e = Executed + case "failed": + *e = Failed + case "deleted_while_queued": + *e = Deletedwhilequeued + case "not_executed_already_queued": + *e = Notexecutedalreadyqueued + default: + *e = ExecutionStatus{string(text)} + } + + return nil +} + +func (e ExecutionStatus) String() string { + return e.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard/expandwildcard.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard/expandwildcard.go new file mode 100644 index 000000000..661fc6730 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard/expandwildcard.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package expandwildcard +package expandwildcard + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L198-L212 +type ExpandWildcard struct { + Name string +} + +var ( + All = ExpandWildcard{"all"} + + Open = ExpandWildcard{"open"} + + Closed = ExpandWildcard{"closed"} + + Hidden = ExpandWildcard{"hidden"} + + None = ExpandWildcard{"none"} +) + +func (e ExpandWildcard) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +func (e *ExpandWildcard) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "all": + *e = All + case "open": + *e = Open + case "closed": + *e = Closed + case "hidden": + *e = Hidden + case "none": + *e = None + default: + *e = ExpandWildcard{string(text)} + } + + return nil +} + +func (e ExpandWildcard) String() string { + return e.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/failurestorestatus/failurestorestatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/failurestorestatus/failurestorestatus.go new file mode 100644 index 000000000..c92c2fbdf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/failurestorestatus/failurestorestatus.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package failurestorestatus +package failurestorestatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/bulk/types.ts#L86-L91 +type FailureStoreStatus struct { + Name string +} + +var ( + Notapplicableorunknown = FailureStoreStatus{"not_applicable_or_unknown"} + + Used = FailureStoreStatus{"used"} + + Notenabled = FailureStoreStatus{"not_enabled"} + + Failed = FailureStoreStatus{"failed"} +) + +func (f FailureStoreStatus) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FailureStoreStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "not_applicable_or_unknown": + *f = Notapplicableorunknown + case "used": + *f = Used + case "not_enabled": + *f = Notenabled + case "failed": + *f = Failed + default: + *f = FailureStoreStatus{string(text)} + } + + return nil +} + +func (f FailureStoreStatus) String() string { + return f.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/feature/feature.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/feature/feature.go new file mode 100644 index 000000000..6546c9ba6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/feature/feature.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package feature +package feature + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get/IndicesGetRequest.ts#L98-L102 +type Feature struct { + Name string +} + +var ( + Aliases = Feature{"aliases"} + + Mappings = Feature{"mappings"} + + Settings = Feature{"settings"} +) + +func (f Feature) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *Feature) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "aliases": + *f = Aliases + case "mappings": + *f = Mappings + case "settings": + *f = Settings + default: + *f = Feature{string(text)} + } + + return nil +} + +func (f Feature) String() string { + return f.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go new file mode 100644 index 000000000..3094e1c0a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package fieldsortnumerictype +package fieldsortnumerictype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/sort.ts#L36-L41 +type FieldSortNumericType struct { + Name string +} + +var ( + Long = FieldSortNumericType{"long"} + + Double = FieldSortNumericType{"double"} + + Date = FieldSortNumericType{"date"} + + Datenanos = FieldSortNumericType{"date_nanos"} +) + +func (f FieldSortNumericType) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FieldSortNumericType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "long": + *f = Long + case "double": + *f = Double + case "date": + *f = Date + case "date_nanos": + *f = Datenanos + default: + *f = FieldSortNumericType{string(text)} + } + + return nil +} + +func (f FieldSortNumericType) String() string { + return f.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldtype/fieldtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldtype/fieldtype.go new file mode 100644 index 000000000..4b1b4a7c7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldtype/fieldtype.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package fieldtype +package fieldtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/Property.ts#L193-L242 +type FieldType struct { + Name string +} + +var ( + None = FieldType{"none"} + + Geopoint = FieldType{"geo_point"} + + Geoshape = FieldType{"geo_shape"} + + Ip = FieldType{"ip"} + + Binary = FieldType{"binary"} + + Keyword = FieldType{"keyword"} + + Text = FieldType{"text"} + + Searchasyoutype = FieldType{"search_as_you_type"} + + Date = FieldType{"date"} + + Datenanos = FieldType{"date_nanos"} + + Boolean = FieldType{"boolean"} + + Completion = FieldType{"completion"} + + Nested = FieldType{"nested"} + + Object = FieldType{"object"} + + Passthrough = FieldType{"passthrough"} + + Version = FieldType{"version"} + + Murmur3 = FieldType{"murmur3"} + + Tokencount = FieldType{"token_count"} + + Percolator = FieldType{"percolator"} + + Integer = FieldType{"integer"} + + Long = FieldType{"long"} + + Short = FieldType{"short"} + + Byte = FieldType{"byte"} + + Float = FieldType{"float"} + + Halffloat = FieldType{"half_float"} + + Scaledfloat = FieldType{"scaled_float"} + + Double = FieldType{"double"} + + Integerrange = FieldType{"integer_range"} + + Floatrange = FieldType{"float_range"} + + Longrange = FieldType{"long_range"} + + Doublerange = FieldType{"double_range"} + + Daterange = FieldType{"date_range"} + + Iprange = FieldType{"ip_range"} + + Alias = FieldType{"alias"} + + Join = FieldType{"join"} + + Rankfeature = FieldType{"rank_feature"} + + Rankfeatures = FieldType{"rank_features"} + + Flattened = FieldType{"flattened"} + + Shape = FieldType{"shape"} + + Histogram = FieldType{"histogram"} + + Constantkeyword = FieldType{"constant_keyword"} + + Countedkeyword = FieldType{"counted_keyword"} + + Aggregatemetricdouble = FieldType{"aggregate_metric_double"} + + Densevector = FieldType{"dense_vector"} + + Semantictext = FieldType{"semantic_text"} + + Sparsevector = FieldType{"sparse_vector"} + + Matchonlytext = FieldType{"match_only_text"} + + Icucollationkeyword = FieldType{"icu_collation_keyword"} +) + +func (f FieldType) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FieldType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "none": + *f = None + case "geo_point": + *f = Geopoint + case "geo_shape": + *f = Geoshape + case "ip": + *f = Ip + case "binary": + *f = Binary + case "keyword": + *f = Keyword + case "text": + *f = Text + case "search_as_you_type": + *f = Searchasyoutype + case "date": + *f = Date + case "date_nanos": + *f = Datenanos + case "boolean": + *f = Boolean + case "completion": + *f = Completion + case "nested": + *f = Nested + case "object": + *f = Object + case "passthrough": + *f = Passthrough + case "version": + *f = Version + case "murmur3": + *f = Murmur3 + case "token_count": + *f = Tokencount + case "percolator": + *f = Percolator + case "integer": + *f = Integer + case "long": + *f = Long + case "short": + *f = Short + case "byte": + *f = Byte + case "float": + *f = Float + case "half_float": + *f = Halffloat + case "scaled_float": + *f = Scaledfloat + case "double": + *f = Double + case "integer_range": + *f = Integerrange + case "float_range": + *f = Floatrange + case "long_range": + *f = Longrange + case "double_range": + *f = Doublerange + case "date_range": + *f = Daterange + case "ip_range": + *f = Iprange + case "alias": + *f = Alias + case "join": + *f = Join + case "rank_feature": + *f = Rankfeature + case "rank_features": + *f = Rankfeatures + case "flattened": + *f = Flattened + case "shape": + *f = Shape + case "histogram": + *f = Histogram + case "constant_keyword": + *f = Constantkeyword + case "counted_keyword": + *f = Countedkeyword + case "aggregate_metric_double": + *f = Aggregatemetricdouble + case "dense_vector": + *f = Densevector + case "semantic_text": + *f = Semantictext + case "sparse_vector": + *f = Sparsevector + case "match_only_text": + *f = Matchonlytext + case "icu_collation_keyword": + *f = Icucollationkeyword + default: + *f = FieldType{string(text)} + } + + return nil +} + +func (f FieldType) String() string { + return f.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go new file mode 100644 index 000000000..41116b2df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package fieldvaluefactormodifier +package fieldvaluefactormodifier + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L323-L366 +type FieldValueFactorModifier struct { + Name string +} + +var ( + None = FieldValueFactorModifier{"none"} + + Log = FieldValueFactorModifier{"log"} + + Log1p = FieldValueFactorModifier{"log1p"} + + Log2p = FieldValueFactorModifier{"log2p"} + + Ln = FieldValueFactorModifier{"ln"} + + Ln1p = FieldValueFactorModifier{"ln1p"} + + Ln2p = FieldValueFactorModifier{"ln2p"} + + Square = FieldValueFactorModifier{"square"} + + Sqrt = FieldValueFactorModifier{"sqrt"} + + Reciprocal = FieldValueFactorModifier{"reciprocal"} +) + +func (f FieldValueFactorModifier) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FieldValueFactorModifier) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "none": + *f = None + case "log": + *f = Log + case "log1p": + *f = Log1p + case "log2p": + *f = Log2p + case "ln": + *f = Ln + case "ln1p": + *f = Ln1p + case "ln2p": + *f = Ln2p + case "square": + *f = Square + case "sqrt": + *f = Sqrt + case "reciprocal": + *f = Reciprocal + default: + *f = FieldValueFactorModifier{string(text)} + } + + return nil +} + +func (f FieldValueFactorModifier) String() string { + return f.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringpolicy/filteringpolicy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringpolicy/filteringpolicy.go new file mode 100644 index 000000000..26b3f525a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringpolicy/filteringpolicy.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package filteringpolicy +package filteringpolicy + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L155-L158 +type FilteringPolicy struct { + Name string +} + +var ( + Exclude = FilteringPolicy{"exclude"} + + Include = FilteringPolicy{"include"} +) + +func (f FilteringPolicy) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FilteringPolicy) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "exclude": + *f = Exclude + case "include": + *f = Include + default: + *f = FilteringPolicy{string(text)} + } + + return nil +} + +func (f FilteringPolicy) String() string { + return f.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringrulerule/filteringrulerule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringrulerule/filteringrulerule.go new file mode 100644 index 000000000..93afaef22 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringrulerule/filteringrulerule.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package filteringrulerule +package filteringrulerule + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L160-L168 +type FilteringRuleRule struct { + Name string +} + +var ( + Contains = FilteringRuleRule{"contains"} + + Endswith = FilteringRuleRule{"ends_with"} + + Equals = FilteringRuleRule{"equals"} + + Regex = FilteringRuleRule{"regex"} + + Startswith = FilteringRuleRule{"starts_with"} + + Greaterthan = FilteringRuleRule{">"} + + Lessthan = FilteringRuleRule{"<"} +) + +func (f FilteringRuleRule) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FilteringRuleRule) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "contains": + *f = Contains + case "ends_with": + *f = Endswith + case "equals": + *f = Equals + case "regex": + *f = Regex + case "starts_with": + *f = Startswith + case ">": + *f = Greaterthan + case "<": + *f = Lessthan + default: + *f = FilteringRuleRule{string(text)} + } + + return nil +} + +func (f FilteringRuleRule) String() string { + return f.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringvalidationstate/filteringvalidationstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringvalidationstate/filteringvalidationstate.go new file mode 100644 index 000000000..1d0245e39 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringvalidationstate/filteringvalidationstate.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package filteringvalidationstate +package filteringvalidationstate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L186-L190 +type FilteringValidationState struct { + Name string +} + +var ( + Edited = FilteringValidationState{"edited"} + + Invalid = FilteringValidationState{"invalid"} + + Valid = FilteringValidationState{"valid"} +) + +func (f FilteringValidationState) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FilteringValidationState) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "edited": + *f = Edited + case "invalid": + *f = Invalid + case "valid": + *f = Valid + default: + *f = FilteringValidationState{string(text)} + } + + return nil +} + +func (f FilteringValidationState) String() string { + return f.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filtertype/filtertype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filtertype/filtertype.go new file mode 100644 index 000000000..0a1effb8d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filtertype/filtertype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package filtertype +package filtertype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Filter.ts#L43-L46 +type FilterType struct { + Name string +} + +var ( + Include = FilterType{"include"} + + Exclude = FilterType{"exclude"} +) + +func (f FilterType) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FilterType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "include": + *f = Include + case "exclude": + *f = Exclude + default: + *f = FilterType{string(text)} + } + + return nil +} + +func (f FilterType) String() string { + return f.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fingerprintdigest/fingerprintdigest.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fingerprintdigest/fingerprintdigest.go new file mode 100644 index 000000000..b7d71a7bc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fingerprintdigest/fingerprintdigest.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package fingerprintdigest +package fingerprintdigest + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L897-L903 +type FingerprintDigest struct { + Name string +} + +var ( + Md5 = FingerprintDigest{"MD5"} + + Sha1 = FingerprintDigest{"SHA-1"} + + Sha256 = FingerprintDigest{"SHA-256"} + + Sha512 = FingerprintDigest{"SHA-512"} + + MurmurHash3 = FingerprintDigest{"MurmurHash3"} +) + +func (f FingerprintDigest) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FingerprintDigest) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "md5": + *f = Md5 + case "sha-1": + *f = Sha1 + case "sha-256": + *f = Sha256 + case "sha-512": + *f = Sha512 + case "murmurhash3": + *f = MurmurHash3 + default: + *f = FingerprintDigest{string(text)} + } + + return nil +} + +func (f FingerprintDigest) String() string { + return f.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/followerindexstatus/followerindexstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/followerindexstatus/followerindexstatus.go new file mode 100644 index 000000000..2e14148a4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/followerindexstatus/followerindexstatus.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package followerindexstatus +package followerindexstatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/follow_info/types.ts#L37-L40 +type FollowerIndexStatus struct { + Name string +} + +var ( + Active = FollowerIndexStatus{"active"} + + Paused = FollowerIndexStatus{"paused"} +) + +func (f FollowerIndexStatus) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FollowerIndexStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "active": + *f = Active + case "paused": + *f = Paused + default: + *f = FollowerIndexStatus{string(text)} + } + + return nil +} + +func (f FollowerIndexStatus) String() string { + return f.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/formattype/formattype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/formattype/formattype.go new file mode 100644 index 000000000..7da8a36bf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/formattype/formattype.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package formattype +package formattype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/text_structure/_types/Structure.ts#L45-L50 +type FormatType struct { + Name string +} + +var ( + Delimited = FormatType{"delimited"} + + Ndjson = FormatType{"ndjson"} + + Semistructuredtext = FormatType{"semi_structured_text"} + + Xml = FormatType{"xml"} +) + +func (f FormatType) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FormatType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "delimited": + *f = Delimited + case "ndjson": + *f = Ndjson + case "semi_structured_text": + *f = Semistructuredtext + case "xml": + *f = Xml + default: + *f = FormatType{string(text)} + } + + return nil +} + +func (f FormatType) String() string { + return f.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/functionboostmode/functionboostmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/functionboostmode/functionboostmode.go new file mode 100644 index 000000000..847c8698c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/functionboostmode/functionboostmode.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package functionboostmode +package functionboostmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L295-L321 +type FunctionBoostMode struct { + Name string +} + +var ( + Multiply = FunctionBoostMode{"multiply"} + + Replace = FunctionBoostMode{"replace"} + + Sum = FunctionBoostMode{"sum"} + + Avg = FunctionBoostMode{"avg"} + + Max = FunctionBoostMode{"max"} + + Min = FunctionBoostMode{"min"} +) + +func (f FunctionBoostMode) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FunctionBoostMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "multiply": + *f = Multiply + case "replace": + *f = Replace + case "sum": + *f = Sum + case "avg": + *f = Avg + case "max": + *f = Max + case "min": + *f = Min + default: + *f = FunctionBoostMode{string(text)} + } + + return nil +} + +func (f FunctionBoostMode) String() string { + return f.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/functionscoremode/functionscoremode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/functionscoremode/functionscoremode.go new file mode 100644 index 000000000..8303d5522 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/functionscoremode/functionscoremode.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package functionscoremode +package functionscoremode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L268-L293 +type FunctionScoreMode struct { + Name string +} + +var ( + Multiply = FunctionScoreMode{"multiply"} + + Sum = FunctionScoreMode{"sum"} + + Avg = FunctionScoreMode{"avg"} + + First = FunctionScoreMode{"first"} + + Max = FunctionScoreMode{"max"} + + Min = FunctionScoreMode{"min"} +) + +func (f FunctionScoreMode) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f *FunctionScoreMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "multiply": + *f = Multiply + case "sum": + *f = Sum + case "avg": + *f = Avg + case "first": + *f = First + case "max": + *f = Max + case "min": + *f = Min + default: + *f = FunctionScoreMode{string(text)} + } + + return nil +} + +func (f FunctionScoreMode) String() string { + return f.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy/gappolicy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy/gappolicy.go new file mode 100644 index 000000000..2db74665c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy/gappolicy.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package gappolicy +package gappolicy + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L61-L76 +type GapPolicy struct { + Name string +} + +var ( + Skip = GapPolicy{"skip"} + + Insertzeros = GapPolicy{"insert_zeros"} + + Keepvalues = GapPolicy{"keep_values"} +) + +func (g GapPolicy) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GapPolicy) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "skip": + *g = Skip + case "insert_zeros": + *g = Insertzeros + case "keep_values": + *g = Keepvalues + default: + *g = GapPolicy{string(text)} + } + + return nil +} + +func (g GapPolicy) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geodistancetype/geodistancetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geodistancetype/geodistancetype.go new file mode 100644 index 000000000..d48b1a74d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geodistancetype/geodistancetype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package geodistancetype +package geodistancetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Geo.ts#L42-L51 +type GeoDistanceType struct { + Name string +} + +var ( + Arc = GeoDistanceType{"arc"} + + Plane = GeoDistanceType{"plane"} +) + +func (g GeoDistanceType) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GeoDistanceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "arc": + *g = Arc + case "plane": + *g = Plane + default: + *g = GeoDistanceType{string(text)} + } + + return nil +} + +func (g GeoDistanceType) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoexecution/geoexecution.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoexecution/geoexecution.go new file mode 100644 index 000000000..44f681e51 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoexecution/geoexecution.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package geoexecution +package geoexecution + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/geo.ts#L59-L62 +type GeoExecution struct { + Name string +} + +var ( + Memory = GeoExecution{"memory"} + + Indexed = GeoExecution{"indexed"} +) + +func (g GeoExecution) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GeoExecution) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "memory": + *g = Memory + case "indexed": + *g = Indexed + default: + *g = GeoExecution{string(text)} + } + + return nil +} + +func (g GeoExecution) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geogridtargetformat/geogridtargetformat.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geogridtargetformat/geogridtargetformat.go new file mode 100644 index 000000000..422858012 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geogridtargetformat/geogridtargetformat.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package geogridtargetformat +package geogridtargetformat + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L438-L441 +type GeoGridTargetFormat struct { + Name string +} + +var ( + Geojson = GeoGridTargetFormat{"geojson"} + + Wkt = GeoGridTargetFormat{"wkt"} +) + +func (g GeoGridTargetFormat) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GeoGridTargetFormat) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "geojson": + *g = Geojson + case "wkt": + *g = Wkt + default: + *g = GeoGridTargetFormat{string(text)} + } + + return nil +} + +func (g GeoGridTargetFormat) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geogridtiletype/geogridtiletype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geogridtiletype/geogridtiletype.go new file mode 100644 index 000000000..bbf96ef6d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geogridtiletype/geogridtiletype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package geogridtiletype +package geogridtiletype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L432-L436 +type GeoGridTileType struct { + Name string +} + +var ( + Geotile = GeoGridTileType{"geotile"} + + Geohex = GeoGridTileType{"geohex"} + + Geohash = GeoGridTileType{"geohash"} +) + +func (g GeoGridTileType) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GeoGridTileType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "geotile": + *g = Geotile + case "geohex": + *g = Geohex + case "geohash": + *g = Geohash + default: + *g = GeoGridTileType{string(text)} + } + + return nil +} + +func (g GeoGridTileType) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoorientation/geoorientation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoorientation/geoorientation.go new file mode 100644 index 000000000..d80acb07d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoorientation/geoorientation.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package geoorientation +package geoorientation + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/geo.ts#L41-L46 +type GeoOrientation struct { + Name string +} + +var ( + Right = GeoOrientation{"right"} + + Left = GeoOrientation{"left"} +) + +func (g GeoOrientation) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GeoOrientation) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "right": + *g = Right + case "left": + *g = Left + default: + *g = GeoOrientation{string(text)} + } + + return nil +} + +func (g GeoOrientation) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geopointmetrictype/geopointmetrictype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geopointmetrictype/geopointmetrictype.go new file mode 100644 index 000000000..0988ce3d9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geopointmetrictype/geopointmetrictype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package geopointmetrictype +package geopointmetrictype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/geo.ts#L35-L39 +type GeoPointMetricType struct { + Name string +} + +var ( + Gauge = GeoPointMetricType{"gauge"} + + Counter = GeoPointMetricType{"counter"} + + Position = GeoPointMetricType{"position"} +) + +func (g GeoPointMetricType) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GeoPointMetricType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "gauge": + *g = Gauge + case "counter": + *g = Counter + case "position": + *g = Position + default: + *g = GeoPointMetricType{string(text)} + } + + return nil +} + +func (g GeoPointMetricType) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoshaperelation/geoshaperelation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoshaperelation/geoshaperelation.go new file mode 100644 index 000000000..192fbd4a7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoshaperelation/geoshaperelation.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package geoshaperelation +package geoshaperelation + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Geo.ts#L64-L82 +type GeoShapeRelation struct { + Name string +} + +var ( + Intersects = GeoShapeRelation{"intersects"} + + Disjoint = GeoShapeRelation{"disjoint"} + + Within = GeoShapeRelation{"within"} + + Contains = GeoShapeRelation{"contains"} +) + +func (g GeoShapeRelation) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GeoShapeRelation) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "intersects": + *g = Intersects + case "disjoint": + *g = Disjoint + case "within": + *g = Within + case "contains": + *g = Contains + default: + *g = GeoShapeRelation{string(text)} + } + + return nil +} + +func (g GeoShapeRelation) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geostrategy/geostrategy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geostrategy/geostrategy.go new file mode 100644 index 000000000..a0d71b389 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geostrategy/geostrategy.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package geostrategy +package geostrategy + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/geo.ts#L64-L67 +type GeoStrategy struct { + Name string +} + +var ( + Recursive = GeoStrategy{"recursive"} + + Term = GeoStrategy{"term"} +) + +func (g GeoStrategy) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GeoStrategy) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "recursive": + *g = Recursive + case "term": + *g = Term + default: + *g = GeoStrategy{string(text)} + } + + return nil +} + +func (g GeoStrategy) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go new file mode 100644 index 000000000..26aaa052f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package geovalidationmethod +package geovalidationmethod + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/geo.ts#L173-L183 +type GeoValidationMethod struct { + Name string +} + +var ( + Coerce = GeoValidationMethod{"coerce"} + + Ignoremalformed = GeoValidationMethod{"ignore_malformed"} + + Strict = GeoValidationMethod{"strict"} +) + +func (g GeoValidationMethod) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GeoValidationMethod) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "coerce": + *g = Coerce + case "ignore_malformed": + *g = Ignoremalformed + case "strict": + *g = Strict + default: + *g = GeoValidationMethod{string(text)} + } + + return nil +} + +func (g GeoValidationMethod) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/googleaiservicetype/googleaiservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/googleaiservicetype/googleaiservicetype.go new file mode 100644 index 000000000..f92902456 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/googleaiservicetype/googleaiservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package googleaiservicetype +package googleaiservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1328-L1330 +type GoogleAiServiceType struct { + Name string +} + +var ( + Googleaistudio = GoogleAiServiceType{"googleaistudio"} +) + +func (g GoogleAiServiceType) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GoogleAiServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "googleaistudio": + *g = Googleaistudio + default: + *g = GoogleAiServiceType{string(text)} + } + + return nil +} + +func (g GoogleAiServiceType) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/googlevertexaiservicetype/googlevertexaiservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/googlevertexaiservicetype/googlevertexaiservicetype.go new file mode 100644 index 000000000..e54c1b506 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/googlevertexaiservicetype/googlevertexaiservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package googlevertexaiservicetype +package googlevertexaiservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1378-L1380 +type GoogleVertexAIServiceType struct { + Name string +} + +var ( + Googlevertexai = GoogleVertexAIServiceType{"googlevertexai"} +) + +func (g GoogleVertexAIServiceType) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GoogleVertexAIServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "googlevertexai": + *g = Googlevertexai + default: + *g = GoogleVertexAIServiceType{string(text)} + } + + return nil +} + +func (g GoogleVertexAIServiceType) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/granttype/granttype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/granttype/granttype.go new file mode 100644 index 000000000..4bbf9c1ce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/granttype/granttype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package granttype +package granttype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/GrantType.ts#L20-L30 +type GrantType struct { + Name string +} + +var ( + Password = GrantType{"password"} + + Accesstoken = GrantType{"access_token"} +) + +func (g GrantType) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GrantType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "password": + *g = Password + case "access_token": + *g = Accesstoken + default: + *g = GrantType{string(text)} + } + + return nil +} + +func (g GrantType) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go new file mode 100644 index 000000000..18f876462 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package gridaggregationtype +package gridaggregationtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search_mvt/_types/GridType.ts#L30-L33 +type GridAggregationType struct { + Name string +} + +var ( + Geotile = GridAggregationType{"geotile"} + + Geohex = GridAggregationType{"geohex"} +) + +func (g GridAggregationType) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GridAggregationType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "geotile": + *g = Geotile + case "geohex": + *g = Geohex + default: + *g = GridAggregationType{string(text)} + } + + return nil +} + +func (g GridAggregationType) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gridtype/gridtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gridtype/gridtype.go new file mode 100644 index 000000000..5a31f7c46 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gridtype/gridtype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package gridtype +package gridtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search_mvt/_types/GridType.ts#L20-L28 +type GridType struct { + Name string +} + +var ( + Grid = GridType{"grid"} + + Point = GridType{"point"} + + Centroid = GridType{"centroid"} +) + +func (g GridType) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GridType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "grid": + *g = Grid + case "point": + *g = Point + case "centroid": + *g = Centroid + default: + *g = GridType{string(text)} + } + + return nil +} + +func (g GridType) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/groupby/groupby.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/groupby/groupby.go new file mode 100644 index 000000000..a162ab620 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/groupby/groupby.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package groupby +package groupby + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/tasks/_types/GroupBy.ts#L20-L27 +type GroupBy struct { + Name string +} + +var ( + Nodes = GroupBy{"nodes"} + + Parents = GroupBy{"parents"} + + None = GroupBy{"none"} +) + +func (g GroupBy) MarshalText() (text []byte, err error) { + return []byte(g.String()), nil +} + +func (g *GroupBy) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "nodes": + *g = Nodes + case "parents": + *g = Parents + case "none": + *g = None + default: + *g = GroupBy{string(text)} + } + + return nil +} + +func (g GroupBy) String() string { + return g.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/healthstatus/healthstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/healthstatus/healthstatus.go new file mode 100644 index 000000000..6e590ba7c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/healthstatus/healthstatus.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package healthstatus +package healthstatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L216-L238 +type HealthStatus struct { + Name string +} + +var ( + Green = HealthStatus{"green"} + + Yellow = HealthStatus{"yellow"} + + Red = HealthStatus{"red"} + + Unknown = HealthStatus{"unknown"} + + Unavailable = HealthStatus{"unavailable"} +) + +func (h HealthStatus) MarshalText() (text []byte, err error) { + return []byte(h.String()), nil +} + +func (h *HealthStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "green": + *h = Green + case "yellow": + *h = Yellow + case "red": + *h = Red + case "unknown": + *h = Unknown + case "unavailable": + *h = Unavailable + default: + *h = HealthStatus{string(text)} + } + + return nil +} + +func (h HealthStatus) String() string { + return h.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterencoder/highlighterencoder.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterencoder/highlighterencoder.go new file mode 100644 index 000000000..4e3e6c1eb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterencoder/highlighterencoder.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package highlighterencoder +package highlighterencoder + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/highlighting.ts#L159-L162 +type HighlighterEncoder struct { + Name string +} + +var ( + Default = HighlighterEncoder{"default"} + + Html = HighlighterEncoder{"html"} +) + +func (h HighlighterEncoder) MarshalText() (text []byte, err error) { + return []byte(h.String()), nil +} + +func (h *HighlighterEncoder) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "default": + *h = Default + case "html": + *h = Html + default: + *h = HighlighterEncoder{string(text)} + } + + return nil +} + +func (h HighlighterEncoder) String() string { + return h.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go new file mode 100644 index 000000000..7f5a698d3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package highlighterfragmenter +package highlighterfragmenter + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/highlighting.ts#L164-L167 +type HighlighterFragmenter struct { + Name string +} + +var ( + Simple = HighlighterFragmenter{"simple"} + + Span = HighlighterFragmenter{"span"} +) + +func (h HighlighterFragmenter) MarshalText() (text []byte, err error) { + return []byte(h.String()), nil +} + +func (h *HighlighterFragmenter) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "simple": + *h = Simple + case "span": + *h = Span + default: + *h = HighlighterFragmenter{string(text)} + } + + return nil +} + +func (h HighlighterFragmenter) String() string { + return h.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterorder/highlighterorder.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterorder/highlighterorder.go new file mode 100644 index 000000000..6f89cc1f0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterorder/highlighterorder.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package highlighterorder +package highlighterorder + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/highlighting.ts#L169-L171 +type HighlighterOrder struct { + Name string +} + +var ( + Score = HighlighterOrder{"score"} +) + +func (h HighlighterOrder) MarshalText() (text []byte, err error) { + return []byte(h.String()), nil +} + +func (h *HighlighterOrder) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "score": + *h = Score + default: + *h = HighlighterOrder{string(text)} + } + + return nil +} + +func (h HighlighterOrder) String() string { + return h.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go new file mode 100644 index 000000000..1fb473020 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package highlightertagsschema +package highlightertagsschema + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/highlighting.ts#L173-L175 +type HighlighterTagsSchema struct { + Name string +} + +var ( + Styled = HighlighterTagsSchema{"styled"} +) + +func (h HighlighterTagsSchema) MarshalText() (text []byte, err error) { + return []byte(h.String()), nil +} + +func (h *HighlighterTagsSchema) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "styled": + *h = Styled + default: + *h = HighlighterTagsSchema{string(text)} + } + + return nil +} + +func (h HighlighterTagsSchema) String() string { + return h.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlightertype/highlightertype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlightertype/highlightertype.go new file mode 100644 index 000000000..bf46da7af --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlightertype/highlightertype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package highlightertype +package highlightertype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/highlighting.ts#L177-L192 +type HighlighterType struct { + Name string +} + +var ( + Plain = HighlighterType{"plain"} + + Fastvector = HighlighterType{"fvh"} + + Unified = HighlighterType{"unified"} +) + +func (h HighlighterType) MarshalText() (text []byte, err error) { + return []byte(h.String()), nil +} + +func (h *HighlighterType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "plain": + *h = Plain + case "fvh": + *h = Fastvector + case "unified": + *h = Unified + default: + *h = HighlighterType{string(text)} + } + + return nil +} + +func (h HighlighterType) String() string { + return h.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/holtwinterstype/holtwinterstype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/holtwinterstype/holtwinterstype.go new file mode 100644 index 000000000..0fc773d11 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/holtwinterstype/holtwinterstype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package holtwinterstype +package holtwinterstype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L309-L312 +type HoltWintersType struct { + Name string +} + +var ( + Additive = HoltWintersType{"add"} + + Multiplicative = HoltWintersType{"mult"} +) + +func (h HoltWintersType) MarshalText() (text []byte, err error) { + return []byte(h.String()), nil +} + +func (h *HoltWintersType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "add": + *h = Additive + case "mult": + *h = Multiplicative + default: + *h = HoltWintersType{string(text)} + } + + return nil +} + +func (h HoltWintersType) String() string { + return h.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/httpinputmethod/httpinputmethod.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/httpinputmethod/httpinputmethod.go new file mode 100644 index 000000000..c3d78b3a9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/httpinputmethod/httpinputmethod.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package httpinputmethod +package httpinputmethod + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L59-L65 +type HttpInputMethod struct { + Name string +} + +var ( + Head = HttpInputMethod{"head"} + + Get = HttpInputMethod{"get"} + + Post = HttpInputMethod{"post"} + + Put = HttpInputMethod{"put"} + + Delete = HttpInputMethod{"delete"} +) + +func (h HttpInputMethod) MarshalText() (text []byte, err error) { + return []byte(h.String()), nil +} + +func (h *HttpInputMethod) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "head": + *h = Head + case "get": + *h = Get + case "post": + *h = Post + case "put": + *h = Put + case "delete": + *h = Delete + default: + *h = HttpInputMethod{string(text)} + } + + return nil +} + +func (h HttpInputMethod) String() string { + return h.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/huggingfaceservicetype/huggingfaceservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/huggingfaceservicetype/huggingfaceservicetype.go new file mode 100644 index 000000000..a40f5dea2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/huggingfaceservicetype/huggingfaceservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package huggingfaceservicetype +package huggingfaceservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1435-L1437 +type HuggingFaceServiceType struct { + Name string +} + +var ( + Huggingface = HuggingFaceServiceType{"hugging_face"} +) + +func (h HuggingFaceServiceType) MarshalText() (text []byte, err error) { + return []byte(h.String()), nil +} + +func (h *HuggingFaceServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "hugging_face": + *h = Huggingface + default: + *h = HuggingFaceServiceType{string(text)} + } + + return nil +} + +func (h HuggingFaceServiceType) String() string { + return h.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ibdistribution/ibdistribution.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ibdistribution/ibdistribution.go new file mode 100644 index 000000000..35f6c1b00 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ibdistribution/ibdistribution.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package ibdistribution +package ibdistribution + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Similarity.ts#L42-L45 +type IBDistribution struct { + Name string +} + +var ( + Ll = IBDistribution{"ll"} + + Spl = IBDistribution{"spl"} +) + +func (i IBDistribution) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IBDistribution) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "ll": + *i = Ll + case "spl": + *i = Spl + default: + *i = IBDistribution{string(text)} + } + + return nil +} + +func (i IBDistribution) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/iblambda/iblambda.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/iblambda/iblambda.go new file mode 100644 index 000000000..802b8cf21 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/iblambda/iblambda.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package iblambda +package iblambda + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Similarity.ts#L47-L50 +type IBLambda struct { + Name string +} + +var ( + Df = IBLambda{"df"} + + Ttf = IBLambda{"ttf"} +) + +func (i IBLambda) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IBLambda) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "df": + *i = Df + case "ttf": + *i = Ttf + default: + *i = IBLambda{string(text)} + } + + return nil +} + +func (i IBLambda) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationalternate/icucollationalternate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationalternate/icucollationalternate.go new file mode 100644 index 000000000..3c65f362b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationalternate/icucollationalternate.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package icucollationalternate +package icucollationalternate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/icu-plugin.ts#L90-L93 +type IcuCollationAlternate struct { + Name string +} + +var ( + Shifted = IcuCollationAlternate{"shifted"} + + NonIgnorable = IcuCollationAlternate{"non-ignorable"} +) + +func (i IcuCollationAlternate) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IcuCollationAlternate) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "shifted": + *i = Shifted + case "non-ignorable": + *i = NonIgnorable + default: + *i = IcuCollationAlternate{string(text)} + } + + return nil +} + +func (i IcuCollationAlternate) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go new file mode 100644 index 000000000..132b82bc8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package icucollationcasefirst +package icucollationcasefirst + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/icu-plugin.ts#L95-L98 +type IcuCollationCaseFirst struct { + Name string +} + +var ( + Lower = IcuCollationCaseFirst{"lower"} + + Upper = IcuCollationCaseFirst{"upper"} +) + +func (i IcuCollationCaseFirst) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IcuCollationCaseFirst) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "lower": + *i = Lower + case "upper": + *i = Upper + default: + *i = IcuCollationCaseFirst{string(text)} + } + + return nil +} + +func (i IcuCollationCaseFirst) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go new file mode 100644 index 000000000..d671bbfed --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package icucollationdecomposition +package icucollationdecomposition + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/icu-plugin.ts#L100-L103 +type IcuCollationDecomposition struct { + Name string +} + +var ( + No = IcuCollationDecomposition{"no"} + + Identical = IcuCollationDecomposition{"identical"} +) + +func (i IcuCollationDecomposition) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IcuCollationDecomposition) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "no": + *i = No + case "identical": + *i = Identical + default: + *i = IcuCollationDecomposition{string(text)} + } + + return nil +} + +func (i IcuCollationDecomposition) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationstrength/icucollationstrength.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationstrength/icucollationstrength.go new file mode 100644 index 000000000..dfc900d7f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationstrength/icucollationstrength.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package icucollationstrength +package icucollationstrength + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/icu-plugin.ts#L105-L111 +type IcuCollationStrength struct { + Name string +} + +var ( + Primary = IcuCollationStrength{"primary"} + + Secondary = IcuCollationStrength{"secondary"} + + Tertiary = IcuCollationStrength{"tertiary"} + + Quaternary = IcuCollationStrength{"quaternary"} + + Identical = IcuCollationStrength{"identical"} +) + +func (i IcuCollationStrength) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IcuCollationStrength) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "primary": + *i = Primary + case "secondary": + *i = Secondary + case "tertiary": + *i = Tertiary + case "quaternary": + *i = Quaternary + case "identical": + *i = Identical + default: + *i = IcuCollationStrength{string(text)} + } + + return nil +} + +func (i IcuCollationStrength) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go new file mode 100644 index 000000000..14f1f36aa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package icunormalizationmode +package icunormalizationmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/icu-plugin.ts#L79-L82 +type IcuNormalizationMode struct { + Name string +} + +var ( + Decompose = IcuNormalizationMode{"decompose"} + + Compose = IcuNormalizationMode{"compose"} +) + +func (i IcuNormalizationMode) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IcuNormalizationMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "decompose": + *i = Decompose + case "compose": + *i = Compose + default: + *i = IcuNormalizationMode{string(text)} + } + + return nil +} + +func (i IcuNormalizationMode) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go new file mode 100644 index 000000000..d98db7fca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package icunormalizationtype +package icunormalizationtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/icu-plugin.ts#L84-L88 +type IcuNormalizationType struct { + Name string +} + +var ( + Nfc = IcuNormalizationType{"nfc"} + + Nfkc = IcuNormalizationType{"nfkc"} + + Nfkccf = IcuNormalizationType{"nfkc_cf"} +) + +func (i IcuNormalizationType) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IcuNormalizationType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "nfc": + *i = Nfc + case "nfkc": + *i = Nfkc + case "nfkc_cf": + *i = Nfkccf + default: + *i = IcuNormalizationType{string(text)} + } + + return nil +} + +func (i IcuNormalizationType) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icutransformdirection/icutransformdirection.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icutransformdirection/icutransformdirection.go new file mode 100644 index 000000000..1a33854b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icutransformdirection/icutransformdirection.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package icutransformdirection +package icutransformdirection + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/icu-plugin.ts#L74-L77 +type IcuTransformDirection struct { + Name string +} + +var ( + Forward = IcuTransformDirection{"forward"} + + Reverse = IcuTransformDirection{"reverse"} +) + +func (i IcuTransformDirection) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IcuTransformDirection) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "forward": + *i = Forward + case "reverse": + *i = Reverse + default: + *i = IcuTransformDirection{string(text)} + } + + return nil +} + +func (i IcuTransformDirection) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/impactarea/impactarea.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/impactarea/impactarea.go new file mode 100644 index 000000000..3b62f1e4e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/impactarea/impactarea.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package impactarea +package impactarea + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L75-L80 +type ImpactArea struct { + Name string +} + +var ( + Search = ImpactArea{"search"} + + Ingest = ImpactArea{"ingest"} + + Backup = ImpactArea{"backup"} + + Deploymentmanagement = ImpactArea{"deployment_management"} +) + +func (i ImpactArea) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *ImpactArea) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "search": + *i = Search + case "ingest": + *i = Ingest + case "backup": + *i = Backup + case "deployment_management": + *i = Deploymentmanagement + default: + *i = ImpactArea{string(text)} + } + + return nil +} + +func (i ImpactArea) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/include/include.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/include/include.go new file mode 100644 index 000000000..58df05437 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/include/include.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package include +package include + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Include.ts#L20-L47 +type Include struct { + Name string +} + +var ( + Definition = Include{"definition"} + + Featureimportancebaseline = Include{"feature_importance_baseline"} + + Hyperparameters = Include{"hyperparameters"} + + Totalfeatureimportance = Include{"total_feature_importance"} + + Definitionstatus = Include{"definition_status"} +) + +func (i Include) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *Include) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "definition": + *i = Definition + case "feature_importance_baseline": + *i = Featureimportancebaseline + case "hyperparameters": + *i = Hyperparameters + case "total_feature_importance": + *i = Totalfeatureimportance + case "definition_status": + *i = Definitionstatus + default: + *i = Include{string(text)} + } + + return nil +} + +func (i Include) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go new file mode 100644 index 000000000..8643d516c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package indexcheckonstartup +package indexcheckonstartup + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L283-L290 +type IndexCheckOnStartup struct { + Name string +} + +var ( + True = IndexCheckOnStartup{"true"} + + False = IndexCheckOnStartup{"false"} + + Checksum = IndexCheckOnStartup{"checksum"} +) + +func (i *IndexCheckOnStartup) UnmarshalJSON(data []byte) error { + return i.UnmarshalText(data) +} + +func (i IndexCheckOnStartup) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IndexCheckOnStartup) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "true": + *i = True + case "false": + *i = False + case "checksum": + *i = Checksum + default: + *i = IndexCheckOnStartup{string(text)} + } + + return nil +} + +func (i IndexCheckOnStartup) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexingjobstate/indexingjobstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexingjobstate/indexingjobstate.go new file mode 100644 index 000000000..19c0244e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexingjobstate/indexingjobstate.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package indexingjobstate +package indexingjobstate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/get_jobs/types.ts#L77-L83 +type IndexingJobState struct { + Name string +} + +var ( + Started = IndexingJobState{"started"} + + Indexing = IndexingJobState{"indexing"} + + Stopping = IndexingJobState{"stopping"} + + Stopped = IndexingJobState{"stopped"} + + Aborting = IndexingJobState{"aborting"} +) + +func (i IndexingJobState) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IndexingJobState) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "started": + *i = Started + case "indexing": + *i = Indexing + case "stopping": + *i = Stopping + case "stopped": + *i = Stopped + case "aborting": + *i = Aborting + default: + *i = IndexingJobState{string(text)} + } + + return nil +} + +func (i IndexingJobState) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go new file mode 100644 index 000000000..322ea13c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package indexmetadatastate +package indexmetadatastate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L225-L232 +type IndexMetadataState struct { + Name string +} + +var ( + Open = IndexMetadataState{"open"} + + Close = IndexMetadataState{"close"} +) + +func (i IndexMetadataState) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IndexMetadataState) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "open": + *i = Open + case "close": + *i = Close + default: + *i = IndexMetadataState{string(text)} + } + + return nil +} + +func (i IndexMetadataState) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexmode/indexmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexmode/indexmode.go new file mode 100644 index 000000000..219eca1fb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexmode/indexmode.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package indexmode +package indexmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStream.ts#L41-L46 +type IndexMode struct { + Name string +} + +var ( + Standard = IndexMode{"standard"} + + Timeseries = IndexMode{"time_series"} + + Logsdb = IndexMode{"logsdb"} + + Lookup = IndexMode{"lookup"} +) + +func (i IndexMode) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IndexMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "standard": + *i = Standard + case "time_series": + *i = Timeseries + case "logsdb": + *i = Logsdb + case "lookup": + *i = Lookup + default: + *i = IndexMode{string(text)} + } + + return nil +} + +func (i IndexMode) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexoptions/indexoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexoptions/indexoptions.go new file mode 100644 index 000000000..0d7f51827 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexoptions/indexoptions.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package indexoptions +package indexoptions + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L309-L314 +type IndexOptions struct { + Name string +} + +var ( + Docs = IndexOptions{"docs"} + + Freqs = IndexOptions{"freqs"} + + Positions = IndexOptions{"positions"} + + Offsets = IndexOptions{"offsets"} +) + +func (i IndexOptions) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IndexOptions) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "docs": + *i = Docs + case "freqs": + *i = Freqs + case "positions": + *i = Positions + case "offsets": + *i = Offsets + default: + *i = IndexOptions{string(text)} + } + + return nil +} + +func (i IndexOptions) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexprivilege/indexprivilege.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexprivilege/indexprivilege.go new file mode 100644 index 000000000..28a5924c3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexprivilege/indexprivilege.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package indexprivilege +package indexprivilege + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L388-L430 +type IndexPrivilege struct { + Name string +} + +var ( + All = IndexPrivilege{"all"} + + Autoconfigure = IndexPrivilege{"auto_configure"} + + Create = IndexPrivilege{"create"} + + Createdoc = IndexPrivilege{"create_doc"} + + Createindex = IndexPrivilege{"create_index"} + + Crossclusterreplication = IndexPrivilege{"cross_cluster_replication"} + + Crossclusterreplicationinternal = IndexPrivilege{"cross_cluster_replication_internal"} + + Delete = IndexPrivilege{"delete"} + + Deleteindex = IndexPrivilege{"delete_index"} + + Index = IndexPrivilege{"index"} + + Maintenance = IndexPrivilege{"maintenance"} + + Manage = IndexPrivilege{"manage"} + + Managedatastreamlifecycle = IndexPrivilege{"manage_data_stream_lifecycle"} + + Managefollowindex = IndexPrivilege{"manage_follow_index"} + + Manageilm = IndexPrivilege{"manage_ilm"} + + Manageleaderindex = IndexPrivilege{"manage_leader_index"} + + Monitor = IndexPrivilege{"monitor"} + + None = IndexPrivilege{"none"} + + Read = IndexPrivilege{"read"} + + Readcrosscluster = IndexPrivilege{"read_cross_cluster"} + + Viewindexmetadata = IndexPrivilege{"view_index_metadata"} + + Write = IndexPrivilege{"write"} +) + +func (i IndexPrivilege) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IndexPrivilege) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "all": + *i = All + case "auto_configure": + *i = Autoconfigure + case "create": + *i = Create + case "create_doc": + *i = Createdoc + case "create_index": + *i = Createindex + case "cross_cluster_replication": + *i = Crossclusterreplication + case "cross_cluster_replication_internal": + *i = Crossclusterreplicationinternal + case "delete": + *i = Delete + case "delete_index": + *i = Deleteindex + case "index": + *i = Index + case "maintenance": + *i = Maintenance + case "manage": + *i = Manage + case "manage_data_stream_lifecycle": + *i = Managedatastreamlifecycle + case "manage_follow_index": + *i = Managefollowindex + case "manage_ilm": + *i = Manageilm + case "manage_leader_index": + *i = Manageleaderindex + case "monitor": + *i = Monitor + case "none": + *i = None + case "read": + *i = Read + case "read_cross_cluster": + *i = Readcrosscluster + case "view_index_metadata": + *i = Viewindexmetadata + case "write": + *i = Write + default: + *i = IndexPrivilege{string(text)} + } + + return nil +} + +func (i IndexPrivilege) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go new file mode 100644 index 000000000..c8d329095 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package indexroutingallocationoptions +package indexroutingallocationoptions + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexRouting.ts#L38-L43 +type IndexRoutingAllocationOptions struct { + Name string +} + +var ( + All = IndexRoutingAllocationOptions{"all"} + + Primaries = IndexRoutingAllocationOptions{"primaries"} + + Newprimaries = IndexRoutingAllocationOptions{"new_primaries"} + + None = IndexRoutingAllocationOptions{"none"} +) + +func (i IndexRoutingAllocationOptions) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IndexRoutingAllocationOptions) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "all": + *i = All + case "primaries": + *i = Primaries + case "new_primaries": + *i = Newprimaries + case "none": + *i = None + default: + *i = IndexRoutingAllocationOptions{string(text)} + } + + return nil +} + +func (i IndexRoutingAllocationOptions) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go new file mode 100644 index 000000000..e997f9938 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package indexroutingrebalanceoptions +package indexroutingrebalanceoptions + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexRouting.ts#L45-L50 +type IndexRoutingRebalanceOptions struct { + Name string +} + +var ( + All = IndexRoutingRebalanceOptions{"all"} + + Primaries = IndexRoutingRebalanceOptions{"primaries"} + + Replicas = IndexRoutingRebalanceOptions{"replicas"} + + None = IndexRoutingRebalanceOptions{"none"} +) + +func (i IndexRoutingRebalanceOptions) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IndexRoutingRebalanceOptions) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "all": + *i = All + case "primaries": + *i = Primaries + case "replicas": + *i = Replicas + case "none": + *i = None + default: + *i = IndexRoutingRebalanceOptions{string(text)} + } + + return nil +} + +func (i IndexRoutingRebalanceOptions) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indicatorhealthstatus/indicatorhealthstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indicatorhealthstatus/indicatorhealthstatus.go new file mode 100644 index 000000000..bf2ef6293 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indicatorhealthstatus/indicatorhealthstatus.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package indicatorhealthstatus +package indicatorhealthstatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L25-L31 +type IndicatorHealthStatus struct { + Name string +} + +var ( + Green = IndicatorHealthStatus{"green"} + + Yellow = IndicatorHealthStatus{"yellow"} + + Red = IndicatorHealthStatus{"red"} + + Unknown = IndicatorHealthStatus{"unknown"} + + Unavailable = IndicatorHealthStatus{"unavailable"} +) + +func (i IndicatorHealthStatus) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IndicatorHealthStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "green": + *i = Green + case "yellow": + *i = Yellow + case "red": + *i = Red + case "unknown": + *i = Unknown + case "unavailable": + *i = Unavailable + default: + *i = IndicatorHealthStatus{string(text)} + } + + return nil +} + +func (i IndicatorHealthStatus) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/inputtype/inputtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/inputtype/inputtype.go new file mode 100644 index 000000000..548f090c3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/inputtype/inputtype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package inputtype +package inputtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L97-L101 +type InputType struct { + Name string +} + +var ( + Http = InputType{"http"} + + Search = InputType{"search"} + + Simple = InputType{"simple"} +) + +func (i InputType) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *InputType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "http": + *i = Http + case "search": + *i = Search + case "simple": + *i = Simple + default: + *i = InputType{string(text)} + } + + return nil +} + +func (i InputType) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaiservicetype/jinaaiservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaiservicetype/jinaaiservicetype.go new file mode 100644 index 000000000..6c0ce8051 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaiservicetype/jinaaiservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package jinaaiservicetype +package jinaaiservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1498-L1500 +type JinaAIServiceType struct { + Name string +} + +var ( + Jinaai = JinaAIServiceType{"jinaai"} +) + +func (j JinaAIServiceType) MarshalText() (text []byte, err error) { + return []byte(j.String()), nil +} + +func (j *JinaAIServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "jinaai": + *j = Jinaai + default: + *j = JinaAIServiceType{string(text)} + } + + return nil +} + +func (j JinaAIServiceType) String() string { + return j.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaisimilaritytype/jinaaisimilaritytype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaisimilaritytype/jinaaisimilaritytype.go new file mode 100644 index 000000000..9b57eb761 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaisimilaritytype/jinaaisimilaritytype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package jinaaisimilaritytype +package jinaaisimilaritytype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1502-L1506 +type JinaAISimilarityType struct { + Name string +} + +var ( + Cosine = JinaAISimilarityType{"cosine"} + + Dotproduct = JinaAISimilarityType{"dot_product"} + + L2norm = JinaAISimilarityType{"l2_norm"} +) + +func (j JinaAISimilarityType) MarshalText() (text []byte, err error) { + return []byte(j.String()), nil +} + +func (j *JinaAISimilarityType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "cosine": + *j = Cosine + case "dot_product": + *j = Dotproduct + case "l2_norm": + *j = L2norm + default: + *j = JinaAISimilarityType{string(text)} + } + + return nil +} + +func (j JinaAISimilarityType) String() string { + return j.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaitextembeddingtask/jinaaitextembeddingtask.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaitextembeddingtask/jinaaitextembeddingtask.go new file mode 100644 index 000000000..070828bf6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaitextembeddingtask/jinaaitextembeddingtask.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package jinaaitextembeddingtask +package jinaaitextembeddingtask + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1508-L1513 +type JinaAITextEmbeddingTask struct { + Name string +} + +var ( + Classification = JinaAITextEmbeddingTask{"classification"} + + Clustering = JinaAITextEmbeddingTask{"clustering"} + + Ingest = JinaAITextEmbeddingTask{"ingest"} + + Search = JinaAITextEmbeddingTask{"search"} +) + +func (j JinaAITextEmbeddingTask) MarshalText() (text []byte, err error) { + return []byte(j.String()), nil +} + +func (j *JinaAITextEmbeddingTask) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "classification": + *j = Classification + case "clustering": + *j = Clustering + case "ingest": + *j = Ingest + case "search": + *j = Search + default: + *j = JinaAITextEmbeddingTask{string(text)} + } + + return nil +} + +func (j JinaAITextEmbeddingTask) String() string { + return j.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jobblockedreason/jobblockedreason.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jobblockedreason/jobblockedreason.go new file mode 100644 index 000000000..222c153fe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jobblockedreason/jobblockedreason.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package jobblockedreason +package jobblockedreason + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Job.ts#L397-L401 +type JobBlockedReason struct { + Name string +} + +var ( + Delete = JobBlockedReason{"delete"} + + Reset = JobBlockedReason{"reset"} + + Revert = JobBlockedReason{"revert"} +) + +func (j JobBlockedReason) MarshalText() (text []byte, err error) { + return []byte(j.String()), nil +} + +func (j *JobBlockedReason) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "delete": + *j = Delete + case "reset": + *j = Reset + case "revert": + *j = Revert + default: + *j = JobBlockedReason{string(text)} + } + + return nil +} + +func (j JobBlockedReason) String() string { + return j.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jobstate/jobstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jobstate/jobstate.go new file mode 100644 index 000000000..1120d42c9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jobstate/jobstate.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package jobstate +package jobstate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Job.ts#L36-L52 +type JobState struct { + Name string +} + +var ( + Closing = JobState{"closing"} + + Closed = JobState{"closed"} + + Opened = JobState{"opened"} + + Failed = JobState{"failed"} + + Opening = JobState{"opening"} +) + +func (j JobState) MarshalText() (text []byte, err error) { + return []byte(j.String()), nil +} + +func (j *JobState) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "closing": + *j = Closing + case "closed": + *j = Closed + case "opened": + *j = Opened + case "failed": + *j = Failed + case "opening": + *j = Opening + default: + *j = JobState{string(text)} + } + + return nil +} + +func (j JobState) String() string { + return j.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go new file mode 100644 index 000000000..94dceb631 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package jsonprocessorconflictstrategy +package jsonprocessorconflictstrategy + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1170-L1175 +type JsonProcessorConflictStrategy struct { + Name string +} + +var ( + Replace = JsonProcessorConflictStrategy{"replace"} + + Merge = JsonProcessorConflictStrategy{"merge"} +) + +func (j JsonProcessorConflictStrategy) MarshalText() (text []byte, err error) { + return []byte(j.String()), nil +} + +func (j *JsonProcessorConflictStrategy) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "replace": + *j = Replace + case "merge": + *j = Merge + default: + *j = JsonProcessorConflictStrategy{string(text)} + } + + return nil +} + +func (j JsonProcessorConflictStrategy) String() string { + return j.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/keeptypesmode/keeptypesmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/keeptypesmode/keeptypesmode.go new file mode 100644 index 000000000..53a9d9703 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/keeptypesmode/keeptypesmode.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package keeptypesmode +package keeptypesmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L282-L285 +type KeepTypesMode struct { + Name string +} + +var ( + Include = KeepTypesMode{"include"} + + Exclude = KeepTypesMode{"exclude"} +) + +func (k KeepTypesMode) MarshalText() (text []byte, err error) { + return []byte(k.String()), nil +} + +func (k *KeepTypesMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "include": + *k = Include + case "exclude": + *k = Exclude + default: + *k = KeepTypesMode{string(text)} + } + + return nil +} + +func (k KeepTypesMode) String() string { + return k.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go new file mode 100644 index 000000000..ab76e5d7a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package kuromojitokenizationmode +package kuromojitokenizationmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/kuromoji-plugin.ts#L58-L62 +type KuromojiTokenizationMode struct { + Name string +} + +var ( + Normal = KuromojiTokenizationMode{"normal"} + + Search = KuromojiTokenizationMode{"search"} + + Extended = KuromojiTokenizationMode{"extended"} +) + +func (k KuromojiTokenizationMode) MarshalText() (text []byte, err error) { + return []byte(k.String()), nil +} + +func (k *KuromojiTokenizationMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "normal": + *k = Normal + case "search": + *k = Search + case "extended": + *k = Extended + default: + *k = KuromojiTokenizationMode{string(text)} + } + + return nil +} + +func (k KuromojiTokenizationMode) String() string { + return k.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/level/level.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/level/level.go new file mode 100644 index 000000000..64e303476 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/level/level.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package level +package level + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L248-L252 +type Level struct { + Name string +} + +var ( + Cluster = Level{"cluster"} + + Indices = Level{"indices"} + + Shards = Level{"shards"} +) + +func (l Level) MarshalText() (text []byte, err error) { + return []byte(l.String()), nil +} + +func (l *Level) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "cluster": + *l = Cluster + case "indices": + *l = Indices + case "shards": + *l = Shards + default: + *l = Level{string(text)} + } + + return nil +} + +func (l Level) String() string { + return l.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensestatus/licensestatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensestatus/licensestatus.go new file mode 100644 index 000000000..b74bedded --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensestatus/licensestatus.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package licensestatus +package licensestatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/license/_types/License.ts#L35-L40 +type LicenseStatus struct { + Name string +} + +var ( + Active = LicenseStatus{"active"} + + Valid = LicenseStatus{"valid"} + + Invalid = LicenseStatus{"invalid"} + + Expired = LicenseStatus{"expired"} +) + +func (l LicenseStatus) MarshalText() (text []byte, err error) { + return []byte(l.String()), nil +} + +func (l *LicenseStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "active": + *l = Active + case "valid": + *l = Valid + case "invalid": + *l = Invalid + case "expired": + *l = Expired + default: + *l = LicenseStatus{string(text)} + } + + return nil +} + +func (l LicenseStatus) String() string { + return l.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensetype/licensetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensetype/licensetype.go new file mode 100644 index 000000000..f079fceee --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensetype/licensetype.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package licensetype +package licensetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/license/_types/License.ts#L23-L33 +type LicenseType struct { + Name string +} + +var ( + Missing = LicenseType{"missing"} + + Trial = LicenseType{"trial"} + + Basic = LicenseType{"basic"} + + Standard = LicenseType{"standard"} + + Dev = LicenseType{"dev"} + + Silver = LicenseType{"silver"} + + Gold = LicenseType{"gold"} + + Platinum = LicenseType{"platinum"} + + Enterprise = LicenseType{"enterprise"} +) + +func (l LicenseType) MarshalText() (text []byte, err error) { + return []byte(l.String()), nil +} + +func (l *LicenseType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "missing": + *l = Missing + case "trial": + *l = Trial + case "basic": + *l = Basic + case "standard": + *l = Standard + case "dev": + *l = Dev + case "silver": + *l = Silver + case "gold": + *l = Gold + case "platinum": + *l = Platinum + case "enterprise": + *l = Enterprise + default: + *l = LicenseType{string(text)} + } + + return nil +} + +func (l LicenseType) String() string { + return l.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go new file mode 100644 index 000000000..43bd3658b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package lifecycleoperationmode +package lifecycleoperationmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Lifecycle.ts#L20-L24 +type LifecycleOperationMode struct { + Name string +} + +var ( + RUNNING = LifecycleOperationMode{"RUNNING"} + + STOPPING = LifecycleOperationMode{"STOPPING"} + + STOPPED = LifecycleOperationMode{"STOPPED"} +) + +func (l LifecycleOperationMode) MarshalText() (text []byte, err error) { + return []byte(l.String()), nil +} + +func (l *LifecycleOperationMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "running": + *l = RUNNING + case "stopping": + *l = STOPPING + case "stopped": + *l = STOPPED + default: + *l = LifecycleOperationMode{string(text)} + } + + return nil +} + +func (l LifecycleOperationMode) String() string { + return l.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/lowercasetokenfilterlanguages/lowercasetokenfilterlanguages.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/lowercasetokenfilterlanguages/lowercasetokenfilterlanguages.go new file mode 100644 index 000000000..1e981c1b5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/lowercasetokenfilterlanguages/lowercasetokenfilterlanguages.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package lowercasetokenfilterlanguages +package lowercasetokenfilterlanguages + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L344-L348 +type LowercaseTokenFilterLanguages struct { + Name string +} + +var ( + Greek = LowercaseTokenFilterLanguages{"greek"} + + Irish = LowercaseTokenFilterLanguages{"irish"} + + Turkish = LowercaseTokenFilterLanguages{"turkish"} +) + +func (l LowercaseTokenFilterLanguages) MarshalText() (text []byte, err error) { + return []byte(l.String()), nil +} + +func (l *LowercaseTokenFilterLanguages) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "greek": + *l = Greek + case "irish": + *l = Irish + case "turkish": + *l = Turkish + default: + *l = LowercaseTokenFilterLanguages{string(text)} + } + + return nil +} + +func (l LowercaseTokenFilterLanguages) String() string { + return l.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/managedby/managedby.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/managedby/managedby.go new file mode 100644 index 000000000..45a0a3158 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/managedby/managedby.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package managedby +package managedby + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStream.ts#L33-L38 +type ManagedBy struct { + Name string +} + +var ( + Ilm = ManagedBy{"Index Lifecycle Management"} + + Datastream = ManagedBy{"Data stream lifecycle"} + + Unmanaged = ManagedBy{"Unmanaged"} +) + +func (m ManagedBy) MarshalText() (text []byte, err error) { + return []byte(m.String()), nil +} + +func (m *ManagedBy) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "index lifecycle management": + *m = Ilm + case "data stream lifecycle": + *m = Datastream + case "unmanaged": + *m = Unmanaged + default: + *m = ManagedBy{string(text)} + } + + return nil +} + +func (m ManagedBy) String() string { + return m.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/matchtype/matchtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/matchtype/matchtype.go new file mode 100644 index 000000000..ca26d7b3c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/matchtype/matchtype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package matchtype +package matchtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/dynamic-template.ts#L45-L48 +type MatchType struct { + Name string +} + +var ( + Simple = MatchType{"simple"} + + Regex = MatchType{"regex"} +) + +func (m MatchType) MarshalText() (text []byte, err error) { + return []byte(m.String()), nil +} + +func (m *MatchType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "simple": + *m = Simple + case "regex": + *m = Regex + default: + *m = MatchType{string(text)} + } + + return nil +} + +func (m MatchType) String() string { + return m.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/memorystatus/memorystatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/memorystatus/memorystatus.go new file mode 100644 index 000000000..b255caed0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/memorystatus/memorystatus.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package memorystatus +package memorystatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Model.ts#L89-L93 +type MemoryStatus struct { + Name string +} + +var ( + Ok = MemoryStatus{"ok"} + + Softlimit = MemoryStatus{"soft_limit"} + + Hardlimit = MemoryStatus{"hard_limit"} +) + +func (m MemoryStatus) MarshalText() (text []byte, err error) { + return []byte(m.String()), nil +} + +func (m *MemoryStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "ok": + *m = Ok + case "soft_limit": + *m = Softlimit + case "hard_limit": + *m = Hardlimit + default: + *m = MemoryStatus{string(text)} + } + + return nil +} + +func (m MemoryStatus) String() string { + return m.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/metric/metric.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/metric/metric.go new file mode 100644 index 000000000..743bddb09 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/metric/metric.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package metric +package metric + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/_types/Metric.ts#L22-L28 +type Metric struct { + Name string +} + +var ( + Min = Metric{"min"} + + Max = Metric{"max"} + + Sum = Metric{"sum"} + + Avg = Metric{"avg"} + + Valuecount = Metric{"value_count"} +) + +func (m Metric) MarshalText() (text []byte, err error) { + return []byte(m.String()), nil +} + +func (m *Metric) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "min": + *m = Min + case "max": + *m = Max + case "sum": + *m = Sum + case "avg": + *m = Avg + case "value_count": + *m = Valuecount + default: + *m = Metric{string(text)} + } + + return nil +} + +func (m Metric) String() string { + return m.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/migrationstatus/migrationstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/migrationstatus/migrationstatus.go new file mode 100644 index 000000000..b1e296dc2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/migrationstatus/migrationstatus.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package migrationstatus +package migrationstatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L30-L35 +type MigrationStatus struct { + Name string +} + +var ( + NOMIGRATIONNEEDED = MigrationStatus{"NO_MIGRATION_NEEDED"} + + MIGRATIONNEEDED = MigrationStatus{"MIGRATION_NEEDED"} + + INPROGRESS = MigrationStatus{"IN_PROGRESS"} + + ERROR = MigrationStatus{"ERROR"} +) + +func (m MigrationStatus) MarshalText() (text []byte, err error) { + return []byte(m.String()), nil +} + +func (m *MigrationStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "no_migration_needed": + *m = NOMIGRATIONNEEDED + case "migration_needed": + *m = MIGRATIONNEEDED + case "in_progress": + *m = INPROGRESS + case "error": + *m = ERROR + default: + *m = MigrationStatus{string(text)} + } + + return nil +} + +func (m MigrationStatus) String() string { + return m.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/minimuminterval/minimuminterval.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/minimuminterval/minimuminterval.go new file mode 100644 index 000000000..42ee4e422 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/minimuminterval/minimuminterval.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package minimuminterval +package minimuminterval + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L112-L119 +type MinimumInterval struct { + Name string +} + +var ( + Second = MinimumInterval{"second"} + + Minute = MinimumInterval{"minute"} + + Hour = MinimumInterval{"hour"} + + Day = MinimumInterval{"day"} + + Month = MinimumInterval{"month"} + + Year = MinimumInterval{"year"} +) + +func (m MinimumInterval) MarshalText() (text []byte, err error) { + return []byte(m.String()), nil +} + +func (m *MinimumInterval) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "second": + *m = Second + case "minute": + *m = Minute + case "hour": + *m = Hour + case "day": + *m = Day + case "month": + *m = Month + case "year": + *m = Year + default: + *m = MinimumInterval{string(text)} + } + + return nil +} + +func (m MinimumInterval) String() string { + return m.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/missingorder/missingorder.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/missingorder/missingorder.go new file mode 100644 index 000000000..e8a97e412 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/missingorder/missingorder.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package missingorder +package missingorder + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/AggregationContainer.ts#L536-L540 +type MissingOrder struct { + Name string +} + +var ( + First = MissingOrder{"first"} + + Last = MissingOrder{"last"} + + Default = MissingOrder{"default"} +) + +func (m MissingOrder) MarshalText() (text []byte, err error) { + return []byte(m.String()), nil +} + +func (m *MissingOrder) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "first": + *m = First + case "last": + *m = Last + case "default": + *m = Default + default: + *m = MissingOrder{string(text)} + } + + return nil +} + +func (m MissingOrder) String() string { + return m.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/mistralservicetype/mistralservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/mistralservicetype/mistralservicetype.go new file mode 100644 index 000000000..6870487d9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/mistralservicetype/mistralservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package mistralservicetype +package mistralservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1550-L1552 +type MistralServiceType struct { + Name string +} + +var ( + Mistral = MistralServiceType{"mistral"} +) + +func (m MistralServiceType) MarshalText() (text []byte, err error) { + return []byte(m.String()), nil +} + +func (m *MistralServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "mistral": + *m = Mistral + default: + *m = MistralServiceType{string(text)} + } + + return nil +} + +func (m MistralServiceType) String() string { + return m.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/modeenum/modeenum.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/modeenum/modeenum.go new file mode 100644 index 000000000..0207d01c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/modeenum/modeenum.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package modeenum +package modeenum + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/migrate_reindex/MigrateReindexRequest.ts#L54-L56 +type ModeEnum struct { + Name string +} + +var ( + Upgrade = ModeEnum{"upgrade"} +) + +func (m ModeEnum) MarshalText() (text []byte, err error) { + return []byte(m.String()), nil +} + +func (m *ModeEnum) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "upgrade": + *m = Upgrade + default: + *m = ModeEnum{string(text)} + } + + return nil +} + +func (m ModeEnum) String() string { + return m.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/month/month.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/month/month.go new file mode 100644 index 000000000..25e7d08c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/month/month.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package month +package month + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Schedule.ts#L65-L78 +type Month struct { + Name string +} + +var ( + January = Month{"january"} + + February = Month{"february"} + + March = Month{"march"} + + April = Month{"april"} + + May = Month{"may"} + + June = Month{"june"} + + July = Month{"july"} + + August = Month{"august"} + + September = Month{"september"} + + October = Month{"october"} + + November = Month{"november"} + + December = Month{"december"} +) + +func (m Month) MarshalText() (text []byte, err error) { + return []byte(m.String()), nil +} + +func (m *Month) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "january": + *m = January + case "february": + *m = February + case "march": + *m = March + case "april": + *m = April + case "may": + *m = May + case "june": + *m = June + case "july": + *m = July + case "august": + *m = August + case "september": + *m = September + case "october": + *m = October + case "november": + *m = November + case "december": + *m = December + default: + *m = Month{string(text)} + } + + return nil +} + +func (m Month) String() string { + return m.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/multivaluemode/multivaluemode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/multivaluemode/multivaluemode.go new file mode 100644 index 000000000..651cf04ec --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/multivaluemode/multivaluemode.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package multivaluemode +package multivaluemode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L368-L385 +type MultiValueMode struct { + Name string +} + +var ( + Min = MultiValueMode{"min"} + + Max = MultiValueMode{"max"} + + Avg = MultiValueMode{"avg"} + + Sum = MultiValueMode{"sum"} +) + +func (m MultiValueMode) MarshalText() (text []byte, err error) { + return []byte(m.String()), nil +} + +func (m *MultiValueMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "min": + *m = Min + case "max": + *m = Max + case "avg": + *m = Avg + case "sum": + *m = Sum + default: + *m = MultiValueMode{string(text)} + } + + return nil +} + +func (m MultiValueMode) String() string { + return m.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noderole/noderole.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noderole/noderole.go new file mode 100644 index 000000000..50c6429f2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noderole/noderole.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package noderole +package noderole + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Node.ts#L71-L89 +type NodeRole struct { + Name string +} + +var ( + Master = NodeRole{"master"} + + Data = NodeRole{"data"} + + Datacold = NodeRole{"data_cold"} + + Datacontent = NodeRole{"data_content"} + + Datafrozen = NodeRole{"data_frozen"} + + Datahot = NodeRole{"data_hot"} + + Datawarm = NodeRole{"data_warm"} + + Client = NodeRole{"client"} + + Ingest = NodeRole{"ingest"} + + Ml = NodeRole{"ml"} + + Votingonly = NodeRole{"voting_only"} + + Transform = NodeRole{"transform"} + + Remoteclusterclient = NodeRole{"remote_cluster_client"} + + Coordinatingonly = NodeRole{"coordinating_only"} +) + +func (n NodeRole) MarshalText() (text []byte, err error) { + return []byte(n.String()), nil +} + +func (n *NodeRole) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "master": + *n = Master + case "data": + *n = Data + case "data_cold": + *n = Datacold + case "data_content": + *n = Datacontent + case "data_frozen": + *n = Datafrozen + case "data_hot": + *n = Datahot + case "data_warm": + *n = Datawarm + case "client": + *n = Client + case "ingest": + *n = Ingest + case "ml": + *n = Ml + case "voting_only": + *n = Votingonly + case "transform": + *n = Transform + case "remote_cluster_client": + *n = Remoteclusterclient + case "coordinating_only": + *n = Coordinatingonly + default: + *n = NodeRole{string(text)} + } + + return nil +} + +func (n NodeRole) String() string { + return n.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go new file mode 100644 index 000000000..9d2d41f89 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package noridecompoundmode +package noridecompoundmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/nori-plugin.ts#L23-L27 +type NoriDecompoundMode struct { + Name string +} + +var ( + Discard = NoriDecompoundMode{"discard"} + + None = NoriDecompoundMode{"none"} + + Mixed = NoriDecompoundMode{"mixed"} +) + +func (n NoriDecompoundMode) MarshalText() (text []byte, err error) { + return []byte(n.String()), nil +} + +func (n *NoriDecompoundMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "discard": + *n = Discard + case "none": + *n = None + case "mixed": + *n = Mixed + default: + *n = NoriDecompoundMode{string(text)} + } + + return nil +} + +func (n NoriDecompoundMode) String() string { + return n.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/normalization/normalization.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/normalization/normalization.go new file mode 100644 index 000000000..bab33ebda --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/normalization/normalization.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package normalization +package normalization + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Similarity.ts#L52-L58 +type Normalization struct { + Name string +} + +var ( + No = Normalization{"no"} + + H1 = Normalization{"h1"} + + H2 = Normalization{"h2"} + + H3 = Normalization{"h3"} + + Z = Normalization{"z"} +) + +func (n Normalization) MarshalText() (text []byte, err error) { + return []byte(n.String()), nil +} + +func (n *Normalization) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "no": + *n = No + case "h1": + *n = H1 + case "h2": + *n = H2 + case "h3": + *n = H3 + case "z": + *n = Z + default: + *n = Normalization{string(text)} + } + + return nil +} + +func (n Normalization) String() string { + return n.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/normalizemethod/normalizemethod.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/normalizemethod/normalizemethod.go new file mode 100644 index 000000000..6e5e3450d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/normalizemethod/normalizemethod.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package normalizemethod +package normalizemethod + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L361-L387 +type NormalizeMethod struct { + Name string +} + +var ( + Rescale01 = NormalizeMethod{"rescale_0_1"} + + Rescale0100 = NormalizeMethod{"rescale_0_100"} + + Percentofsum = NormalizeMethod{"percent_of_sum"} + + Mean = NormalizeMethod{"mean"} + + Zscore = NormalizeMethod{"z-score"} + + Softmax = NormalizeMethod{"softmax"} +) + +func (n NormalizeMethod) MarshalText() (text []byte, err error) { + return []byte(n.String()), nil +} + +func (n *NormalizeMethod) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "rescale_0_1": + *n = Rescale01 + case "rescale_0_100": + *n = Rescale0100 + case "percent_of_sum": + *n = Percentofsum + case "mean": + *n = Mean + case "z-score": + *n = Zscore + case "softmax": + *n = Softmax + default: + *n = NormalizeMethod{string(text)} + } + + return nil +} + +func (n NormalizeMethod) String() string { + return n.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go new file mode 100644 index 000000000..10733e969 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package numericfielddataformat +package numericfielddataformat + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/NumericFielddataFormat.ts#L20-L23 +type NumericFielddataFormat struct { + Name string +} + +var ( + Array = NumericFielddataFormat{"array"} + + Disabled = NumericFielddataFormat{"disabled"} +) + +func (n NumericFielddataFormat) MarshalText() (text []byte, err error) { + return []byte(n.String()), nil +} + +func (n *NumericFielddataFormat) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "array": + *n = Array + case "disabled": + *n = Disabled + default: + *n = NumericFielddataFormat{string(text)} + } + + return nil +} + +func (n NumericFielddataFormat) String() string { + return n.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror/onscripterror.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror/onscripterror.go new file mode 100644 index 000000000..13a66aaad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror/onscripterror.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package onscripterror +package onscripterror + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L152-L155 +type OnScriptError struct { + Name string +} + +var ( + Fail = OnScriptError{"fail"} + + Continue = OnScriptError{"continue"} +) + +func (o OnScriptError) MarshalText() (text []byte, err error) { + return []byte(o.String()), nil +} + +func (o *OnScriptError) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "fail": + *o = Fail + case "continue": + *o = Continue + default: + *o = OnScriptError{string(text)} + } + + return nil +} + +func (o OnScriptError) String() string { + return o.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/openaiservicetype/openaiservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/openaiservicetype/openaiservicetype.go new file mode 100644 index 000000000..308927ad2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/openaiservicetype/openaiservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package openaiservicetype +package openaiservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1612-L1614 +type OpenAIServiceType struct { + Name string +} + +var ( + Openai = OpenAIServiceType{"openai"} +) + +func (o OpenAIServiceType) MarshalText() (text []byte, err error) { + return []byte(o.String()), nil +} + +func (o *OpenAIServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "openai": + *o = Openai + default: + *o = OpenAIServiceType{string(text)} + } + + return nil +} + +func (o OpenAIServiceType) String() string { + return o.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operationtype/operationtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operationtype/operationtype.go new file mode 100644 index 000000000..fb7104141 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operationtype/operationtype.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package operationtype +package operationtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/bulk/types.ts#L93-L98 +type OperationType struct { + Name string +} + +var ( + Index = OperationType{"index"} + + Create = OperationType{"create"} + + Update = OperationType{"update"} + + Delete = OperationType{"delete"} +) + +func (o OperationType) MarshalText() (text []byte, err error) { + return []byte(o.String()), nil +} + +func (o *OperationType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "index": + *o = Index + case "create": + *o = Create + case "update": + *o = Update + case "delete": + *o = Delete + default: + *o = OperationType{string(text)} + } + + return nil +} + +func (o OperationType) String() string { + return o.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator/operator.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator/operator.go new file mode 100644 index 000000000..dc52047e7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator/operator.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package operator +package operator + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/Operator.ts#L22-L27 +type Operator struct { + Name string +} + +var ( + And = Operator{"and"} + + Or = Operator{"or"} +) + +func (o Operator) MarshalText() (text []byte, err error) { + return []byte(o.String()), nil +} + +func (o *Operator) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "and": + *o = And + case "or": + *o = Or + default: + *o = Operator{string(text)} + } + + return nil +} + +func (o Operator) String() string { + return o.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/optype/optype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/optype/optype.go new file mode 100644 index 000000000..60b622ac3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/optype/optype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package optype +package optype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L254-L263 +type OpType struct { + Name string +} + +var ( + Index = OpType{"index"} + + Create = OpType{"create"} +) + +func (o OpType) MarshalText() (text []byte, err error) { + return []byte(o.String()), nil +} + +func (o *OpType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "index": + *o = Index + case "create": + *o = Create + default: + *o = OpType{string(text)} + } + + return nil +} + +func (o OpType) String() string { + return o.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go new file mode 100644 index 000000000..57f81ce25 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package pagerdutycontexttype +package pagerdutycontexttype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L67-L70 +type PagerDutyContextType struct { + Name string +} + +var ( + Link = PagerDutyContextType{"link"} + + Image = PagerDutyContextType{"image"} +) + +func (p PagerDutyContextType) MarshalText() (text []byte, err error) { + return []byte(p.String()), nil +} + +func (p *PagerDutyContextType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "link": + *p = Link + case "image": + *p = Image + default: + *p = PagerDutyContextType{string(text)} + } + + return nil +} + +func (p PagerDutyContextType) String() string { + return p.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go new file mode 100644 index 000000000..4a1aa8097 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package pagerdutyeventtype +package pagerdutyeventtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L72-L76 +type PagerDutyEventType struct { + Name string +} + +var ( + Trigger = PagerDutyEventType{"trigger"} + + Resolve = PagerDutyEventType{"resolve"} + + Acknowledge = PagerDutyEventType{"acknowledge"} +) + +func (p PagerDutyEventType) MarshalText() (text []byte, err error) { + return []byte(p.String()), nil +} + +func (p *PagerDutyEventType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "trigger": + *p = Trigger + case "resolve": + *p = Resolve + case "acknowledge": + *p = Acknowledge + default: + *p = PagerDutyEventType{string(text)} + } + + return nil +} + +func (p PagerDutyEventType) String() string { + return p.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/painlesscontext/painlesscontext.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/painlesscontext/painlesscontext.go new file mode 100644 index 000000000..91a22d307 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/painlesscontext/painlesscontext.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package painlesscontext +package painlesscontext + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/scripts_painless_execute/types.ts#L57-L80 +type PainlessContext struct { + Name string +} + +var ( + Painlesstest = PainlessContext{"painless_test"} + + Filter = PainlessContext{"filter"} + + Score = PainlessContext{"score"} + + Booleanfield = PainlessContext{"boolean_field"} + + Datefield = PainlessContext{"date_field"} + + Doublefield = PainlessContext{"double_field"} + + Geopointfield = PainlessContext{"geo_point_field"} + + Ipfield = PainlessContext{"ip_field"} + + Keywordfield = PainlessContext{"keyword_field"} + + Longfield = PainlessContext{"long_field"} + + Compositefield = PainlessContext{"composite_field"} +) + +func (p PainlessContext) MarshalText() (text []byte, err error) { + return []byte(p.String()), nil +} + +func (p *PainlessContext) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "painless_test": + *p = Painlesstest + case "filter": + *p = Filter + case "score": + *p = Score + case "boolean_field": + *p = Booleanfield + case "date_field": + *p = Datefield + case "double_field": + *p = Doublefield + case "geo_point_field": + *p = Geopointfield + case "ip_field": + *p = Ipfield + case "keyword_field": + *p = Keywordfield + case "long_field": + *p = Longfield + case "composite_field": + *p = Compositefield + default: + *p = PainlessContext{string(text)} + } + + return nil +} + +func (p PainlessContext) String() string { + return p.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticencoder/phoneticencoder.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticencoder/phoneticencoder.go new file mode 100644 index 000000000..e974eb327 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticencoder/phoneticencoder.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package phoneticencoder +package phoneticencoder + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/phonetic-plugin.ts#L23-L36 +type PhoneticEncoder struct { + Name string +} + +var ( + Metaphone = PhoneticEncoder{"metaphone"} + + Doublemetaphone = PhoneticEncoder{"double_metaphone"} + + Soundex = PhoneticEncoder{"soundex"} + + Refinedsoundex = PhoneticEncoder{"refined_soundex"} + + Caverphone1 = PhoneticEncoder{"caverphone1"} + + Caverphone2 = PhoneticEncoder{"caverphone2"} + + Cologne = PhoneticEncoder{"cologne"} + + Nysiis = PhoneticEncoder{"nysiis"} + + Koelnerphonetik = PhoneticEncoder{"koelnerphonetik"} + + Haasephonetik = PhoneticEncoder{"haasephonetik"} + + Beidermorse = PhoneticEncoder{"beider_morse"} + + Daitchmokotoff = PhoneticEncoder{"daitch_mokotoff"} +) + +func (p PhoneticEncoder) MarshalText() (text []byte, err error) { + return []byte(p.String()), nil +} + +func (p *PhoneticEncoder) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "metaphone": + *p = Metaphone + case "double_metaphone": + *p = Doublemetaphone + case "soundex": + *p = Soundex + case "refined_soundex": + *p = Refinedsoundex + case "caverphone1": + *p = Caverphone1 + case "caverphone2": + *p = Caverphone2 + case "cologne": + *p = Cologne + case "nysiis": + *p = Nysiis + case "koelnerphonetik": + *p = Koelnerphonetik + case "haasephonetik": + *p = Haasephonetik + case "beider_morse": + *p = Beidermorse + case "daitch_mokotoff": + *p = Daitchmokotoff + default: + *p = PhoneticEncoder{string(text)} + } + + return nil +} + +func (p PhoneticEncoder) String() string { + return p.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go new file mode 100644 index 000000000..d8bbd9a51 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package phoneticlanguage +package phoneticlanguage + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/phonetic-plugin.ts#L38-L51 +type PhoneticLanguage struct { + Name string +} + +var ( + Any = PhoneticLanguage{"any"} + + Common = PhoneticLanguage{"common"} + + Cyrillic = PhoneticLanguage{"cyrillic"} + + English = PhoneticLanguage{"english"} + + French = PhoneticLanguage{"french"} + + German = PhoneticLanguage{"german"} + + Hebrew = PhoneticLanguage{"hebrew"} + + Hungarian = PhoneticLanguage{"hungarian"} + + Polish = PhoneticLanguage{"polish"} + + Romanian = PhoneticLanguage{"romanian"} + + Russian = PhoneticLanguage{"russian"} + + Spanish = PhoneticLanguage{"spanish"} +) + +func (p PhoneticLanguage) MarshalText() (text []byte, err error) { + return []byte(p.String()), nil +} + +func (p *PhoneticLanguage) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "any": + *p = Any + case "common": + *p = Common + case "cyrillic": + *p = Cyrillic + case "english": + *p = English + case "french": + *p = French + case "german": + *p = German + case "hebrew": + *p = Hebrew + case "hungarian": + *p = Hungarian + case "polish": + *p = Polish + case "romanian": + *p = Romanian + case "russian": + *p = Russian + case "spanish": + *p = Spanish + default: + *p = PhoneticLanguage{string(text)} + } + + return nil +} + +func (p PhoneticLanguage) String() string { + return p.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticnametype/phoneticnametype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticnametype/phoneticnametype.go new file mode 100644 index 000000000..0a8e3eed8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticnametype/phoneticnametype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package phoneticnametype +package phoneticnametype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/phonetic-plugin.ts#L53-L57 +type PhoneticNameType struct { + Name string +} + +var ( + Generic = PhoneticNameType{"generic"} + + Ashkenazi = PhoneticNameType{"ashkenazi"} + + Sephardic = PhoneticNameType{"sephardic"} +) + +func (p PhoneticNameType) MarshalText() (text []byte, err error) { + return []byte(p.String()), nil +} + +func (p *PhoneticNameType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "generic": + *p = Generic + case "ashkenazi": + *p = Ashkenazi + case "sephardic": + *p = Sephardic + default: + *p = PhoneticNameType{string(text)} + } + + return nil +} + +func (p PhoneticNameType) String() string { + return p.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticruletype/phoneticruletype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticruletype/phoneticruletype.go new file mode 100644 index 000000000..9e8d68e2b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticruletype/phoneticruletype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package phoneticruletype +package phoneticruletype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/phonetic-plugin.ts#L59-L62 +type PhoneticRuleType struct { + Name string +} + +var ( + Approx = PhoneticRuleType{"approx"} + + Exact = PhoneticRuleType{"exact"} +) + +func (p PhoneticRuleType) MarshalText() (text []byte, err error) { + return []byte(p.String()), nil +} + +func (p *PhoneticRuleType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "approx": + *p = Approx + case "exact": + *p = Exact + default: + *p = PhoneticRuleType{string(text)} + } + + return nil +} + +func (p PhoneticRuleType) String() string { + return p.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pipelinesimulationstatusoptions/pipelinesimulationstatusoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pipelinesimulationstatusoptions/pipelinesimulationstatusoptions.go new file mode 100644 index 000000000..a418aabe7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pipelinesimulationstatusoptions/pipelinesimulationstatusoptions.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package pipelinesimulationstatusoptions +package pipelinesimulationstatusoptions + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Simulation.ts#L51-L57 +type PipelineSimulationStatusOptions struct { + Name string +} + +var ( + Success = PipelineSimulationStatusOptions{"success"} + + Error = PipelineSimulationStatusOptions{"error"} + + Errorignored = PipelineSimulationStatusOptions{"error_ignored"} + + Skipped = PipelineSimulationStatusOptions{"skipped"} + + Dropped = PipelineSimulationStatusOptions{"dropped"} +) + +func (p PipelineSimulationStatusOptions) MarshalText() (text []byte, err error) { + return []byte(p.String()), nil +} + +func (p *PipelineSimulationStatusOptions) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "success": + *p = Success + case "error": + *p = Error + case "error_ignored": + *p = Errorignored + case "skipped": + *p = Skipped + case "dropped": + *p = Dropped + default: + *p = PipelineSimulationStatusOptions{string(text)} + } + + return nil +} + +func (p PipelineSimulationStatusOptions) String() string { + return p.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/policytype/policytype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/policytype/policytype.go new file mode 100644 index 000000000..75ac0c227 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/policytype/policytype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package policytype +package policytype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/enrich/_types/Policy.ts#L28-L32 +type PolicyType struct { + Name string +} + +var ( + Geomatch = PolicyType{"geo_match"} + + Match = PolicyType{"match"} + + Range = PolicyType{"range"} +) + +func (p PolicyType) MarshalText() (text []byte, err error) { + return []byte(p.String()), nil +} + +func (p *PolicyType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "geo_match": + *p = Geomatch + case "match": + *p = Match + case "range": + *p = Range + default: + *p = PolicyType{string(text)} + } + + return nil +} + +func (p PolicyType) String() string { + return p.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/quantifier/quantifier.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/quantifier/quantifier.go new file mode 100644 index 000000000..9e1de8bd1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/quantifier/quantifier.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package quantifier +package quantifier + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Conditions.ts#L75-L78 +type Quantifier struct { + Name string +} + +var ( + Some = Quantifier{"some"} + + All = Quantifier{"all"} +) + +func (q Quantifier) MarshalText() (text []byte, err error) { + return []byte(q.String()), nil +} + +func (q *Quantifier) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "some": + *q = Some + case "all": + *q = All + default: + *q = Quantifier{string(text)} + } + + return nil +} + +func (q Quantifier) String() string { + return q.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/queryrulecriteriatype/queryrulecriteriatype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/queryrulecriteriatype/queryrulecriteriatype.go new file mode 100644 index 000000000..bf684d490 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/queryrulecriteriatype/queryrulecriteriatype.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package queryrulecriteriatype +package queryrulecriteriatype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/_types/QueryRuleset.ts#L95-L108 +type QueryRuleCriteriaType struct { + Name string +} + +var ( + Global = QueryRuleCriteriaType{"global"} + + Exact = QueryRuleCriteriaType{"exact"} + + Exactfuzzy = QueryRuleCriteriaType{"exact_fuzzy"} + + Fuzzy = QueryRuleCriteriaType{"fuzzy"} + + Prefix = QueryRuleCriteriaType{"prefix"} + + Suffix = QueryRuleCriteriaType{"suffix"} + + Contains = QueryRuleCriteriaType{"contains"} + + Lt = QueryRuleCriteriaType{"lt"} + + Lte = QueryRuleCriteriaType{"lte"} + + Gt = QueryRuleCriteriaType{"gt"} + + Gte = QueryRuleCriteriaType{"gte"} + + Always = QueryRuleCriteriaType{"always"} +) + +func (q QueryRuleCriteriaType) MarshalText() (text []byte, err error) { + return []byte(q.String()), nil +} + +func (q *QueryRuleCriteriaType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "global": + *q = Global + case "exact": + *q = Exact + case "exact_fuzzy": + *q = Exactfuzzy + case "fuzzy": + *q = Fuzzy + case "prefix": + *q = Prefix + case "suffix": + *q = Suffix + case "contains": + *q = Contains + case "lt": + *q = Lt + case "lte": + *q = Lte + case "gt": + *q = Gt + case "gte": + *q = Gte + case "always": + *q = Always + default: + *q = QueryRuleCriteriaType{string(text)} + } + + return nil +} + +func (q QueryRuleCriteriaType) String() string { + return q.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/queryruletype/queryruletype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/queryruletype/queryruletype.go new file mode 100644 index 000000000..bcdc9ee8c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/queryruletype/queryruletype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package queryruletype +package queryruletype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/_types/QueryRuleset.ts#L60-L63 +type QueryRuleType struct { + Name string +} + +var ( + Pinned = QueryRuleType{"pinned"} + + Exclude = QueryRuleType{"exclude"} +) + +func (q QueryRuleType) MarshalText() (text []byte, err error) { + return []byte(q.String()), nil +} + +func (q *QueryRuleType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "pinned": + *q = Pinned + case "exclude": + *q = Exclude + default: + *q = QueryRuleType{string(text)} + } + + return nil +} + +func (q QueryRuleType) String() string { + return q.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/rangerelation/rangerelation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/rangerelation/rangerelation.go new file mode 100644 index 000000000..bbf25c821 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/rangerelation/rangerelation.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package rangerelation +package rangerelation + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L184-L197 +type RangeRelation struct { + Name string +} + +var ( + Within = RangeRelation{"within"} + + Contains = RangeRelation{"contains"} + + Intersects = RangeRelation{"intersects"} +) + +func (r RangeRelation) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *RangeRelation) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "within": + *r = Within + case "contains": + *r = Contains + case "intersects": + *r = Intersects + default: + *r = RangeRelation{string(text)} + } + + return nil +} + +func (r RangeRelation) String() string { + return r.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/rankvectorelementtype/rankvectorelementtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/rankvectorelementtype/rankvectorelementtype.go new file mode 100644 index 000000000..7749a1880 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/rankvectorelementtype/rankvectorelementtype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package rankvectorelementtype +package rankvectorelementtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L386-L390 +type RankVectorElementType struct { + Name string +} + +var ( + Byte = RankVectorElementType{"byte"} + + Float = RankVectorElementType{"float"} + + Bit = RankVectorElementType{"bit"} +) + +func (r RankVectorElementType) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *RankVectorElementType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "byte": + *r = Byte + case "float": + *r = Float + case "bit": + *r = Bit + default: + *r = RankVectorElementType{string(text)} + } + + return nil +} + +func (r RankVectorElementType) String() string { + return r.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ratemode/ratemode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ratemode/ratemode.go new file mode 100644 index 000000000..f861de1eb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ratemode/ratemode.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package ratemode +package ratemode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L275-L284 +type RateMode struct { + Name string +} + +var ( + Sum = RateMode{"sum"} + + Valuecount = RateMode{"value_count"} +) + +func (r RateMode) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *RateMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "sum": + *r = Sum + case "value_count": + *r = Valuecount + default: + *r = RateMode{string(text)} + } + + return nil +} + +func (r RateMode) String() string { + return r.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh/refresh.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh/refresh.go new file mode 100644 index 000000000..a580817b3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh/refresh.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package refresh +package refresh + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L265-L272 +type Refresh struct { + Name string +} + +var ( + True = Refresh{"true"} + + False = Refresh{"false"} + + Waitfor = Refresh{"wait_for"} +) + +func (r *Refresh) UnmarshalJSON(data []byte) error { + return r.UnmarshalText(data) +} + +func (r Refresh) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *Refresh) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "true": + *r = True + case "false": + *r = False + case "wait_for": + *r = Waitfor + default: + *r = Refresh{string(text)} + } + + return nil +} + +func (r Refresh) String() string { + return r.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/remoteclusterprivilege/remoteclusterprivilege.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/remoteclusterprivilege/remoteclusterprivilege.go new file mode 100644 index 000000000..fedefee1b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/remoteclusterprivilege/remoteclusterprivilege.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package remoteclusterprivilege +package remoteclusterprivilege + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L203-L215 +type RemoteClusterPrivilege struct { + Name string +} + +var ( + Monitorenrich = RemoteClusterPrivilege{"monitor_enrich"} + + Monitorstats = RemoteClusterPrivilege{"monitor_stats"} +) + +func (r RemoteClusterPrivilege) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *RemoteClusterPrivilege) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "monitor_enrich": + *r = Monitorenrich + case "monitor_stats": + *r = Monitorstats + default: + *r = RemoteClusterPrivilege{string(text)} + } + + return nil +} + +func (r RemoteClusterPrivilege) String() string { + return r.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/responsecontenttype/responsecontenttype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/responsecontenttype/responsecontenttype.go new file mode 100644 index 000000000..9530c5790 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/responsecontenttype/responsecontenttype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package responsecontenttype +package responsecontenttype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L103-L107 +type ResponseContentType struct { + Name string +} + +var ( + Json = ResponseContentType{"json"} + + Yaml = ResponseContentType{"yaml"} + + Text = ResponseContentType{"text"} +) + +func (r ResponseContentType) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *ResponseContentType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "json": + *r = Json + case "yaml": + *r = Yaml + case "text": + *r = Text + default: + *r = ResponseContentType{string(text)} + } + + return nil +} + +func (r ResponseContentType) String() string { + return r.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/restrictionworkflow/restrictionworkflow.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/restrictionworkflow/restrictionworkflow.go new file mode 100644 index 000000000..4c2c5c928 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/restrictionworkflow/restrictionworkflow.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package restrictionworkflow +package restrictionworkflow + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/RoleDescriptor.ts#L143-L146 +type RestrictionWorkflow struct { + Name string +} + +var ( + Searchapplicationquery = RestrictionWorkflow{"search_application_query"} +) + +func (r RestrictionWorkflow) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *RestrictionWorkflow) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "search_application_query": + *r = Searchapplicationquery + default: + *r = RestrictionWorkflow{string(text)} + } + + return nil +} + +func (r RestrictionWorkflow) String() string { + return r.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result/result.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result/result.go new file mode 100644 index 000000000..f4a371324 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result/result.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package result +package result + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Result.ts#L20-L26 +type Result struct { + Name string +} + +var ( + Created = Result{"created"} + + Updated = Result{"updated"} + + Deleted = Result{"deleted"} + + Notfound = Result{"not_found"} + + Noop = Result{"noop"} +) + +func (r Result) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *Result) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "created": + *r = Created + case "updated": + *r = Updated + case "deleted": + *r = Deleted + case "not_found": + *r = Notfound + case "noop": + *r = Noop + default: + *r = Result{string(text)} + } + + return nil +} + +func (r Result) String() string { + return r.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/resultposition/resultposition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/resultposition/resultposition.go new file mode 100644 index 000000000..e188de5e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/resultposition/resultposition.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package resultposition +package resultposition + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/eql/search/types.ts#L20-L32 +type ResultPosition struct { + Name string +} + +var ( + Tail = ResultPosition{"tail"} + + Head = ResultPosition{"head"} +) + +func (r ResultPosition) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *ResultPosition) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "tail": + *r = Tail + case "head": + *r = Head + default: + *r = ResultPosition{string(text)} + } + + return nil +} + +func (r ResultPosition) String() string { + return r.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/routingstate/routingstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/routingstate/routingstate.go new file mode 100644 index 000000000..926ed0ebd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/routingstate/routingstate.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package routingstate +package routingstate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L408-L429 +type RoutingState struct { + Name string +} + +var ( + Failed = RoutingState{"failed"} + + Started = RoutingState{"started"} + + Starting = RoutingState{"starting"} + + Stopped = RoutingState{"stopped"} + + Stopping = RoutingState{"stopping"} +) + +func (r RoutingState) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *RoutingState) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "failed": + *r = Failed + case "started": + *r = Started + case "starting": + *r = Starting + case "stopped": + *r = Stopped + case "stopping": + *r = Stopping + default: + *r = RoutingState{string(text)} + } + + return nil +} + +func (r RoutingState) String() string { + return r.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ruleaction/ruleaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ruleaction/ruleaction.go new file mode 100644 index 000000000..900cf17a9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ruleaction/ruleaction.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package ruleaction +package ruleaction + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Rule.ts#L41-L50 +type RuleAction struct { + Name string +} + +var ( + Skipresult = RuleAction{"skip_result"} + + Skipmodelupdate = RuleAction{"skip_model_update"} +) + +func (r RuleAction) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *RuleAction) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "skip_result": + *r = Skipresult + case "skip_model_update": + *r = Skipmodelupdate + default: + *r = RuleAction{string(text)} + } + + return nil +} + +func (r RuleAction) String() string { + return r.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go new file mode 100644 index 000000000..8a4368882 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package runtimefieldtype +package runtimefieldtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/RuntimeFields.ts#L62-L73 +type RuntimeFieldType struct { + Name string +} + +var ( + Boolean = RuntimeFieldType{"boolean"} + + Composite = RuntimeFieldType{"composite"} + + Date = RuntimeFieldType{"date"} + + Double = RuntimeFieldType{"double"} + + Geopoint = RuntimeFieldType{"geo_point"} + + Geoshape = RuntimeFieldType{"geo_shape"} + + Ip = RuntimeFieldType{"ip"} + + Keyword = RuntimeFieldType{"keyword"} + + Long = RuntimeFieldType{"long"} + + Lookup = RuntimeFieldType{"lookup"} +) + +func (r RuntimeFieldType) MarshalText() (text []byte, err error) { + return []byte(r.String()), nil +} + +func (r *RuntimeFieldType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "boolean": + *r = Boolean + case "composite": + *r = Composite + case "date": + *r = Date + case "double": + *r = Double + case "geo_point": + *r = Geopoint + case "geo_shape": + *r = Geoshape + case "ip": + *r = Ip + case "keyword": + *r = Keyword + case "long": + *r = Long + case "lookup": + *r = Lookup + default: + *r = RuntimeFieldType{string(text)} + } + + return nil +} + +func (r RuntimeFieldType) String() string { + return r.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go new file mode 100644 index 000000000..ee0b1aad5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package sampleraggregationexecutionhint +package sampleraggregationexecutionhint + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L359-L372 +type SamplerAggregationExecutionHint struct { + Name string +} + +var ( + Map = SamplerAggregationExecutionHint{"map"} + + Globalordinals = SamplerAggregationExecutionHint{"global_ordinals"} + + Byteshash = SamplerAggregationExecutionHint{"bytes_hash"} +) + +func (s SamplerAggregationExecutionHint) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SamplerAggregationExecutionHint) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "map": + *s = Map + case "global_ordinals": + *s = Globalordinals + case "bytes_hash": + *s = Byteshash + default: + *s = SamplerAggregationExecutionHint{string(text)} + } + + return nil +} + +func (s SamplerAggregationExecutionHint) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scoremode/scoremode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scoremode/scoremode.go new file mode 100644 index 000000000..dbcfd2b59 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scoremode/scoremode.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package scoremode +package scoremode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/rescoring.ts#L64-L86 +type ScoreMode struct { + Name string +} + +var ( + Avg = ScoreMode{"avg"} + + Max = ScoreMode{"max"} + + Min = ScoreMode{"min"} + + Multiply = ScoreMode{"multiply"} + + Total = ScoreMode{"total"} +) + +func (s ScoreMode) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ScoreMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "avg": + *s = Avg + case "max": + *s = Max + case "min": + *s = Min + case "multiply": + *s = Multiply + case "total": + *s = Total + default: + *s = ScoreMode{string(text)} + } + + return nil +} + +func (s ScoreMode) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scorenormalizer/scorenormalizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scorenormalizer/scorenormalizer.go new file mode 100644 index 000000000..796f7ed4a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scorenormalizer/scorenormalizer.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package scorenormalizer +package scorenormalizer + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Retriever.ts#L91-L95 +type ScoreNormalizer struct { + Name string +} + +var ( + None = ScoreNormalizer{"none"} + + Minmax = ScoreNormalizer{"minmax"} + + L2norm = ScoreNormalizer{"l2_norm"} +) + +func (s ScoreNormalizer) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ScoreNormalizer) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "none": + *s = None + case "minmax": + *s = Minmax + case "l2_norm": + *s = L2norm + default: + *s = ScoreNormalizer{string(text)} + } + + return nil +} + +func (s ScoreNormalizer) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptlanguage/scriptlanguage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptlanguage/scriptlanguage.go new file mode 100644 index 000000000..d5e632793 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptlanguage/scriptlanguage.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package scriptlanguage +package scriptlanguage + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Scripting.ts#L25-L46 +type ScriptLanguage struct { + Name string +} + +var ( + Painless = ScriptLanguage{"painless"} + + Expression = ScriptLanguage{"expression"} + + Mustache = ScriptLanguage{"mustache"} + + Java = ScriptLanguage{"java"} +) + +func (s ScriptLanguage) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ScriptLanguage) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "painless": + *s = Painless + case "expression": + *s = Expression + case "mustache": + *s = Mustache + case "java": + *s = Java + default: + *s = ScriptLanguage{string(text)} + } + + return nil +} + +func (s ScriptLanguage) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptsorttype/scriptsorttype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptsorttype/scriptsorttype.go new file mode 100644 index 000000000..1535ec0a2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptsorttype/scriptsorttype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package scriptsorttype +package scriptsorttype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/sort.ts#L80-L84 +type ScriptSortType struct { + Name string +} + +var ( + String = ScriptSortType{"string"} + + Number = ScriptSortType{"number"} + + Version = ScriptSortType{"version"} +) + +func (s ScriptSortType) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ScriptSortType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "string": + *s = String + case "number": + *s = Number + case "version": + *s = Version + default: + *s = ScriptSortType{string(text)} + } + + return nil +} + +func (s ScriptSortType) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype/searchtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype/searchtype.go new file mode 100644 index 000000000..97d2d7ddd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype/searchtype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package searchtype +package searchtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L274-L279 +type SearchType struct { + Name string +} + +var ( + Querythenfetch = SearchType{"query_then_fetch"} + + Dfsquerythenfetch = SearchType{"dfs_query_then_fetch"} +) + +func (s SearchType) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SearchType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "query_then_fetch": + *s = Querythenfetch + case "dfs_query_then_fetch": + *s = Dfsquerythenfetch + default: + *s = SearchType{string(text)} + } + + return nil +} + +func (s SearchType) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go new file mode 100644 index 000000000..3935279f1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package segmentsortmissing +package segmentsortmissing + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSegmentSort.ts#L43-L46 +type SegmentSortMissing struct { + Name string +} + +var ( + Last = SegmentSortMissing{"_last"} + + First = SegmentSortMissing{"_first"} +) + +func (s SegmentSortMissing) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SegmentSortMissing) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "_last": + *s = Last + case "_first": + *s = First + default: + *s = SegmentSortMissing{string(text)} + } + + return nil +} + +func (s SegmentSortMissing) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortmode/segmentsortmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortmode/segmentsortmode.go new file mode 100644 index 000000000..4cda7d77a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortmode/segmentsortmode.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package segmentsortmode +package segmentsortmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSegmentSort.ts#L36-L41 +type SegmentSortMode struct { + Name string +} + +var ( + Min = SegmentSortMode{"min"} + + Max = SegmentSortMode{"max"} +) + +func (s SegmentSortMode) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SegmentSortMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "min": + *s = Min + case "max": + *s = Max + default: + *s = SegmentSortMode{string(text)} + } + + return nil +} + +func (s SegmentSortMode) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortorder/segmentsortorder.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortorder/segmentsortorder.go new file mode 100644 index 000000000..d03d7d1b9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortorder/segmentsortorder.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package segmentsortorder +package segmentsortorder + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSegmentSort.ts#L29-L34 +type SegmentSortOrder struct { + Name string +} + +var ( + Asc = SegmentSortOrder{"asc"} + + Desc = SegmentSortOrder{"desc"} +) + +func (s SegmentSortOrder) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SegmentSortOrder) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "asc": + *s = Asc + case "desc": + *s = Desc + default: + *s = SegmentSortOrder{string(text)} + } + + return nil +} + +func (s SegmentSortOrder) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shapetype/shapetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shapetype/shapetype.go new file mode 100644 index 000000000..c1775db19 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shapetype/shapetype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package shapetype +package shapetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1492-L1495 +type ShapeType struct { + Name string +} + +var ( + Geoshape = ShapeType{"geo_shape"} + + Shape = ShapeType{"shape"} +) + +func (s ShapeType) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ShapeType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "geo_shape": + *s = Geoshape + case "shape": + *s = Shape + default: + *s = ShapeType{string(text)} + } + + return nil +} + +func (s ShapeType) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardroutingstate/shardroutingstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardroutingstate/shardroutingstate.go new file mode 100644 index 000000000..1b7758026 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardroutingstate/shardroutingstate.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package shardroutingstate +package shardroutingstate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L169-L174 +type ShardRoutingState struct { + Name string +} + +var ( + UNASSIGNED = ShardRoutingState{"UNASSIGNED"} + + INITIALIZING = ShardRoutingState{"INITIALIZING"} + + STARTED = ShardRoutingState{"STARTED"} + + RELOCATING = ShardRoutingState{"RELOCATING"} +) + +func (s ShardRoutingState) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ShardRoutingState) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "unassigned": + *s = UNASSIGNED + case "initializing": + *s = INITIALIZING + case "started": + *s = STARTED + case "relocating": + *s = RELOCATING + default: + *s = ShardRoutingState{string(text)} + } + + return nil +} + +func (s ShardRoutingState) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go new file mode 100644 index 000000000..fc78fc054 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package shardsstatsstage +package shardsstatsstage + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotShardsStatsStage.ts#L20-L31 +type ShardsStatsStage struct { + Name string +} + +var ( + DONE = ShardsStatsStage{"DONE"} + + FAILURE = ShardsStatsStage{"FAILURE"} + + FINALIZE = ShardsStatsStage{"FINALIZE"} + + INIT = ShardsStatsStage{"INIT"} + + STARTED = ShardsStatsStage{"STARTED"} +) + +func (s ShardsStatsStage) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ShardsStatsStage) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "done": + *s = DONE + case "failure": + *s = FAILURE + case "finalize": + *s = FINALIZE + case "init": + *s = INIT + case "started": + *s = STARTED + default: + *s = ShardsStatsStage{string(text)} + } + + return nil +} + +func (s ShardsStatsStage) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstate/shardstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstate/shardstate.go new file mode 100644 index 000000000..31b044e9b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstate/shardstate.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package shardstate +package shardstate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L689-L698 +type ShardState struct { + Name string +} + +var ( + INIT = ShardState{"INIT"} + + SUCCESS = ShardState{"SUCCESS"} + + FAILED = ShardState{"FAILED"} + + ABORTED = ShardState{"ABORTED"} + + MISSING = ShardState{"MISSING"} + + WAITING = ShardState{"WAITING"} + + QUEUED = ShardState{"QUEUED"} + + PAUSEDFORNODEREMOVAL = ShardState{"PAUSED_FOR_NODE_REMOVAL"} +) + +func (s ShardState) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ShardState) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "init": + *s = INIT + case "success": + *s = SUCCESS + case "failed": + *s = FAILED + case "aborted": + *s = ABORTED + case "missing": + *s = MISSING + case "waiting": + *s = WAITING + case "queued": + *s = QUEUED + case "paused_for_node_removal": + *s = PAUSEDFORNODEREMOVAL + default: + *s = ShardState{string(text)} + } + + return nil +} + +func (s ShardState) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go new file mode 100644 index 000000000..f4c43cd76 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package shardstoreallocation +package shardstoreallocation + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/shard_stores/types.ts#L47-L51 +type ShardStoreAllocation struct { + Name string +} + +var ( + Primary = ShardStoreAllocation{"primary"} + + Replica = ShardStoreAllocation{"replica"} + + Unused = ShardStoreAllocation{"unused"} +) + +func (s ShardStoreAllocation) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ShardStoreAllocation) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "primary": + *s = Primary + case "replica": + *s = Replica + case "unused": + *s = Unused + default: + *s = ShardStoreAllocation{string(text)} + } + + return nil +} + +func (s ShardStoreAllocation) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstorestatus/shardstorestatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstorestatus/shardstorestatus.go new file mode 100644 index 000000000..6dfffea91 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstorestatus/shardstorestatus.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package shardstorestatus +package shardstorestatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/shard_stores/types.ts#L62-L71 +type ShardStoreStatus struct { + Name string +} + +var ( + Green = ShardStoreStatus{"green"} + + Yellow = ShardStoreStatus{"yellow"} + + Red = ShardStoreStatus{"red"} + + All = ShardStoreStatus{"all"} +) + +func (s ShardStoreStatus) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ShardStoreStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "green": + *s = Green + case "yellow": + *s = Yellow + case "red": + *s = Red + case "all": + *s = All + default: + *s = ShardStoreStatus{string(text)} + } + + return nil +} + +func (s ShardStoreStatus) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shutdownstatus/shutdownstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shutdownstatus/shutdownstatus.go new file mode 100644 index 000000000..691b4a61e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shutdownstatus/shutdownstatus.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package shutdownstatus +package shutdownstatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L45-L50 +type ShutdownStatus struct { + Name string +} + +var ( + Notstarted = ShutdownStatus{"not_started"} + + Inprogress = ShutdownStatus{"in_progress"} + + Stalled = ShutdownStatus{"stalled"} + + Complete = ShutdownStatus{"complete"} +) + +func (s ShutdownStatus) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ShutdownStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "not_started": + *s = Notstarted + case "in_progress": + *s = Inprogress + case "stalled": + *s = Stalled + case "complete": + *s = Complete + default: + *s = ShutdownStatus{string(text)} + } + + return nil +} + +func (s ShutdownStatus) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shutdowntype/shutdowntype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shutdowntype/shutdowntype.go new file mode 100644 index 000000000..389f3cca2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shutdowntype/shutdowntype.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package shutdowntype +package shutdowntype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L40-L43 +type ShutdownType struct { + Name string +} + +var ( + Remove = ShutdownType{"remove"} + + Restart = ShutdownType{"restart"} +) + +func (s ShutdownType) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ShutdownType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "remove": + *s = Remove + case "restart": + *s = Restart + default: + *s = ShutdownType{string(text)} + } + + return nil +} + +func (s ShutdownType) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotsort/snapshotsort.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotsort/snapshotsort.go new file mode 100644 index 000000000..9043b64b3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotsort/snapshotsort.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package snapshotsort +package snapshotsort + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotInfo.ts#L73-L93 +type SnapshotSort struct { + Name string +} + +var ( + Starttime = SnapshotSort{"start_time"} + + Duration = SnapshotSort{"duration"} + + Name = SnapshotSort{"name"} + + Indexcount = SnapshotSort{"index_count"} + + Repository = SnapshotSort{"repository"} + + Shardcount = SnapshotSort{"shard_count"} + + Failedshardcount = SnapshotSort{"failed_shard_count"} +) + +func (s SnapshotSort) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SnapshotSort) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "start_time": + *s = Starttime + case "duration": + *s = Duration + case "name": + *s = Name + case "index_count": + *s = Indexcount + case "repository": + *s = Repository + case "shard_count": + *s = Shardcount + case "failed_shard_count": + *s = Failedshardcount + default: + *s = SnapshotSort{string(text)} + } + + return nil +} + +func (s SnapshotSort) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotstate/snapshotstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotstate/snapshotstate.go new file mode 100644 index 000000000..82c5e3c8e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotstate/snapshotstate.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package snapshotstate +package snapshotstate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotState.ts#L20-L31 +type SnapshotState struct { + Name string +} + +var ( + INPROGRESS = SnapshotState{"IN_PROGRESS"} + + SUCCESS = SnapshotState{"SUCCESS"} + + FAILED = SnapshotState{"FAILED"} + + PARTIAL = SnapshotState{"PARTIAL"} + + INCOMPATIBLE = SnapshotState{"INCOMPATIBLE"} +) + +func (s SnapshotState) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SnapshotState) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "in_progress": + *s = INPROGRESS + case "success": + *s = SUCCESS + case "failed": + *s = FAILED + case "partial": + *s = PARTIAL + case "incompatible": + *s = INCOMPATIBLE + default: + *s = SnapshotState{string(text)} + } + + return nil +} + +func (s SnapshotState) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go new file mode 100644 index 000000000..ee206038d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package snapshotupgradestate +package snapshotupgradestate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Model.ts#L95-L100 +type SnapshotUpgradeState struct { + Name string +} + +var ( + Loadingoldstate = SnapshotUpgradeState{"loading_old_state"} + + Savingnewstate = SnapshotUpgradeState{"saving_new_state"} + + Stopped = SnapshotUpgradeState{"stopped"} + + Failed = SnapshotUpgradeState{"failed"} +) + +func (s SnapshotUpgradeState) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SnapshotUpgradeState) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "loading_old_state": + *s = Loadingoldstate + case "saving_new_state": + *s = Savingnewstate + case "stopped": + *s = Stopped + case "failed": + *s = Failed + default: + *s = SnapshotUpgradeState{string(text)} + } + + return nil +} + +func (s SnapshotUpgradeState) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snowballlanguage/snowballlanguage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snowballlanguage/snowballlanguage.go new file mode 100644 index 000000000..a6b0760a7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snowballlanguage/snowballlanguage.go @@ -0,0 +1,157 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package snowballlanguage +package snowballlanguage + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/languages.ts#L20-L48 +type SnowballLanguage struct { + Name string +} + +var ( + Arabic = SnowballLanguage{"Arabic"} + + Armenian = SnowballLanguage{"Armenian"} + + Basque = SnowballLanguage{"Basque"} + + Catalan = SnowballLanguage{"Catalan"} + + Danish = SnowballLanguage{"Danish"} + + Dutch = SnowballLanguage{"Dutch"} + + English = SnowballLanguage{"English"} + + Estonian = SnowballLanguage{"Estonian"} + + Finnish = SnowballLanguage{"Finnish"} + + French = SnowballLanguage{"French"} + + German = SnowballLanguage{"German"} + + German2 = SnowballLanguage{"German2"} + + Hungarian = SnowballLanguage{"Hungarian"} + + Italian = SnowballLanguage{"Italian"} + + Irish = SnowballLanguage{"Irish"} + + Kp = SnowballLanguage{"Kp"} + + Lithuanian = SnowballLanguage{"Lithuanian"} + + Lovins = SnowballLanguage{"Lovins"} + + Norwegian = SnowballLanguage{"Norwegian"} + + Porter = SnowballLanguage{"Porter"} + + Portuguese = SnowballLanguage{"Portuguese"} + + Romanian = SnowballLanguage{"Romanian"} + + Russian = SnowballLanguage{"Russian"} + + Serbian = SnowballLanguage{"Serbian"} + + Spanish = SnowballLanguage{"Spanish"} + + Swedish = SnowballLanguage{"Swedish"} + + Turkish = SnowballLanguage{"Turkish"} +) + +func (s SnowballLanguage) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SnowballLanguage) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "arabic": + *s = Arabic + case "armenian": + *s = Armenian + case "basque": + *s = Basque + case "catalan": + *s = Catalan + case "danish": + *s = Danish + case "dutch": + *s = Dutch + case "english": + *s = English + case "estonian": + *s = Estonian + case "finnish": + *s = Finnish + case "french": + *s = French + case "german": + *s = German + case "german2": + *s = German2 + case "hungarian": + *s = Hungarian + case "italian": + *s = Italian + case "irish": + *s = Irish + case "kp": + *s = Kp + case "lithuanian": + *s = Lithuanian + case "lovins": + *s = Lovins + case "norwegian": + *s = Norwegian + case "porter": + *s = Porter + case "portuguese": + *s = Portuguese + case "romanian": + *s = Romanian + case "russian": + *s = Russian + case "serbian": + *s = Serbian + case "spanish": + *s = Spanish + case "swedish": + *s = Swedish + case "turkish": + *s = Turkish + default: + *s = SnowballLanguage{string(text)} + } + + return nil +} + +func (s SnowballLanguage) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortmode/sortmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortmode/sortmode.go new file mode 100644 index 000000000..0e2b04a8c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortmode/sortmode.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package sortmode +package sortmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/sort.ts#L108-L117 +type SortMode struct { + Name string +} + +var ( + Min = SortMode{"min"} + + Max = SortMode{"max"} + + Sum = SortMode{"sum"} + + Avg = SortMode{"avg"} + + Median = SortMode{"median"} +) + +func (s SortMode) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SortMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "min": + *s = Min + case "max": + *s = Max + case "sum": + *s = Sum + case "avg": + *s = Avg + case "median": + *s = Median + default: + *s = SortMode{string(text)} + } + + return nil +} + +func (s SortMode) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder/sortorder.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder/sortorder.go new file mode 100644 index 000000000..bb338cf4d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder/sortorder.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package sortorder +package sortorder + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/sort.ts#L119-L128 +type SortOrder struct { + Name string +} + +var ( + Asc = SortOrder{"asc"} + + Desc = SortOrder{"desc"} +) + +func (s SortOrder) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SortOrder) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "asc": + *s = Asc + case "desc": + *s = Desc + default: + *s = SortOrder{string(text)} + } + + return nil +} + +func (s SortOrder) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go new file mode 100644 index 000000000..a6645db74 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package sourcefieldmode +package sourcefieldmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/meta-fields.ts#L67-L75 +type SourceFieldMode struct { + Name string +} + +var ( + Disabled = SourceFieldMode{"disabled"} + + Stored = SourceFieldMode{"stored"} + + Synthetic = SourceFieldMode{"synthetic"} +) + +func (s SourceFieldMode) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SourceFieldMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "disabled": + *s = Disabled + case "stored": + *s = Stored + case "synthetic": + *s = Synthetic + default: + *s = SourceFieldMode{string(text)} + } + + return nil +} + +func (s SourceFieldMode) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sourcemode/sourcemode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sourcemode/sourcemode.go new file mode 100644 index 000000000..32b3cbe02 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sourcemode/sourcemode.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package sourcemode +package sourcemode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L522-L526 +type SourceMode struct { + Name string +} + +var ( + Disabled = SourceMode{"disabled"} + + Stored = SourceMode{"stored"} + + Synthetic = SourceMode{"synthetic"} +) + +func (s SourceMode) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SourceMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "disabled": + *s = Disabled + case "stored": + *s = Stored + case "synthetic": + *s = Synthetic + default: + *s = SourceMode{string(text)} + } + + return nil +} + +func (s SourceMode) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sqlformat/sqlformat.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sqlformat/sqlformat.go new file mode 100644 index 000000000..404c65c4a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sqlformat/sqlformat.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package sqlformat +package sqlformat + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/sql/query/QuerySqlRequest.ts#L153-L161 +type SqlFormat struct { + Name string +} + +var ( + Csv = SqlFormat{"csv"} + + Json = SqlFormat{"json"} + + Tsv = SqlFormat{"tsv"} + + Txt = SqlFormat{"txt"} + + Yaml = SqlFormat{"yaml"} + + Cbor = SqlFormat{"cbor"} + + Smile = SqlFormat{"smile"} +) + +func (s SqlFormat) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SqlFormat) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "csv": + *s = Csv + case "json": + *s = Json + case "tsv": + *s = Tsv + case "txt": + *s = Txt + case "yaml": + *s = Yaml + case "cbor": + *s = Cbor + case "smile": + *s = Smile + default: + *s = SqlFormat{string(text)} + } + + return nil +} + +func (s SqlFormat) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/statslevel/statslevel.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/statslevel/statslevel.go new file mode 100644 index 000000000..95b24a328 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/statslevel/statslevel.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package statslevel +package statslevel + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/searchable_snapshots/_types/stats.ts#L20-L24 +type StatsLevel struct { + Name string +} + +var ( + Cluster = StatsLevel{"cluster"} + + Indices = StatsLevel{"indices"} + + Shards = StatsLevel{"shards"} +) + +func (s StatsLevel) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *StatsLevel) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "cluster": + *s = Cluster + case "indices": + *s = Indices + case "shards": + *s = Shards + default: + *s = StatsLevel{string(text)} + } + + return nil +} + +func (s StatsLevel) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/storagetype/storagetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/storagetype/storagetype.go new file mode 100644 index 000000000..f2240ebee --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/storagetype/storagetype.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package storagetype +package storagetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L560-L588 +type StorageType struct { + Name string +} + +var ( + Fs = StorageType{"fs"} + + Niofs = StorageType{"niofs"} + + Mmapfs = StorageType{"mmapfs"} + + Hybridfs = StorageType{"hybridfs"} +) + +func (s StorageType) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *StorageType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "fs": + *s = Fs + case "niofs": + *s = Niofs + case "mmapfs": + *s = Mmapfs + case "hybridfs": + *s = Hybridfs + default: + *s = StorageType{string(text)} + } + + return nil +} + +func (s StorageType) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/stringdistance/stringdistance.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/stringdistance/stringdistance.go new file mode 100644 index 000000000..88d5dcf1f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/stringdistance/stringdistance.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package stringdistance +package stringdistance + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L473-L494 +type StringDistance struct { + Name string +} + +var ( + Internal = StringDistance{"internal"} + + Dameraulevenshtein = StringDistance{"damerau_levenshtein"} + + Levenshtein = StringDistance{"levenshtein"} + + Jarowinkler = StringDistance{"jaro_winkler"} + + Ngram = StringDistance{"ngram"} +) + +func (s StringDistance) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *StringDistance) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "internal": + *s = Internal + case "damerau_levenshtein": + *s = Dameraulevenshtein + case "levenshtein": + *s = Levenshtein + case "jaro_winkler": + *s = Jarowinkler + case "ngram": + *s = Ngram + default: + *s = StringDistance{string(text)} + } + + return nil +} + +func (s StringDistance) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/subobjects/subobjects.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/subobjects/subobjects.go new file mode 100644 index 000000000..d1395a945 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/subobjects/subobjects.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package subobjects +package subobjects + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/TypeMapping.ts#L63-L74 +type Subobjects struct { + Name string +} + +var ( + True = Subobjects{"true"} + + False = Subobjects{"false"} + + Auto = Subobjects{"auto"} +) + +func (s *Subobjects) UnmarshalJSON(data []byte) error { + return s.UnmarshalText(data) +} + +func (s Subobjects) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *Subobjects) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "true": + *s = True + case "false": + *s = False + case "auto": + *s = Auto + default: + *s = Subobjects{string(text)} + } + + return nil +} + +func (s Subobjects) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/suggestmode/suggestmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/suggestmode/suggestmode.go new file mode 100644 index 000000000..d39d7bcfa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/suggestmode/suggestmode.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package suggestmode +package suggestmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L281-L294 +type SuggestMode struct { + Name string +} + +var ( + Missing = SuggestMode{"missing"} + + Popular = SuggestMode{"popular"} + + Always = SuggestMode{"always"} +) + +func (s SuggestMode) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SuggestMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "missing": + *s = Missing + case "popular": + *s = Popular + case "always": + *s = Always + default: + *s = SuggestMode{string(text)} + } + + return nil +} + +func (s SuggestMode) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/suggestsort/suggestsort.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/suggestsort/suggestsort.go new file mode 100644 index 000000000..06533855f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/suggestsort/suggestsort.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package suggestsort +package suggestsort + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L496-L505 +type SuggestSort struct { + Name string +} + +var ( + Score = SuggestSort{"score"} + + Frequency = SuggestSort{"frequency"} +) + +func (s SuggestSort) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SuggestSort) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "score": + *s = Score + case "frequency": + *s = Frequency + default: + *s = SuggestSort{string(text)} + } + + return nil +} + +func (s SuggestSort) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtriggermethod/syncjobtriggermethod.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtriggermethod/syncjobtriggermethod.go new file mode 100644 index 000000000..01471a075 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtriggermethod/syncjobtriggermethod.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package syncjobtriggermethod +package syncjobtriggermethod + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/SyncJob.ts#L48-L51 +type SyncJobTriggerMethod struct { + Name string +} + +var ( + Ondemand = SyncJobTriggerMethod{"on_demand"} + + Scheduled = SyncJobTriggerMethod{"scheduled"} +) + +func (s SyncJobTriggerMethod) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SyncJobTriggerMethod) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "on_demand": + *s = Ondemand + case "scheduled": + *s = Scheduled + default: + *s = SyncJobTriggerMethod{string(text)} + } + + return nil +} + +func (s SyncJobTriggerMethod) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtype/syncjobtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtype/syncjobtype.go new file mode 100644 index 000000000..9260322a0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtype/syncjobtype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package syncjobtype +package syncjobtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/SyncJob.ts#L42-L46 +type SyncJobType struct { + Name string +} + +var ( + Full = SyncJobType{"full"} + + Incremental = SyncJobType{"incremental"} + + Accesscontrol = SyncJobType{"access_control"} +) + +func (s SyncJobType) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SyncJobType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "full": + *s = Full + case "incremental": + *s = Incremental + case "access_control": + *s = Accesscontrol + default: + *s = SyncJobType{string(text)} + } + + return nil +} + +func (s SyncJobType) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncstatus/syncstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncstatus/syncstatus.go new file mode 100644 index 000000000..53b4d83c7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncstatus/syncstatus.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package syncstatus +package syncstatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L138-L146 +type SyncStatus struct { + Name string +} + +var ( + Canceling = SyncStatus{"canceling"} + + Canceled = SyncStatus{"canceled"} + + Completed = SyncStatus{"completed"} + + Error = SyncStatus{"error"} + + Inprogress = SyncStatus{"in_progress"} + + Pending = SyncStatus{"pending"} + + Suspended = SyncStatus{"suspended"} +) + +func (s SyncStatus) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SyncStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "canceling": + *s = Canceling + case "canceled": + *s = Canceled + case "completed": + *s = Completed + case "error": + *s = Error + case "in_progress": + *s = Inprogress + case "pending": + *s = Pending + case "suspended": + *s = Suspended + default: + *s = SyncStatus{string(text)} + } + + return nil +} + +func (s SyncStatus) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/synonymformat/synonymformat.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/synonymformat/synonymformat.go new file mode 100644 index 000000000..af44dcdbd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/synonymformat/synonymformat.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package synonymformat +package synonymformat + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L138-L141 +type SynonymFormat struct { + Name string +} + +var ( + Solr = SynonymFormat{"solr"} + + Wordnet = SynonymFormat{"wordnet"} +) + +func (s SynonymFormat) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SynonymFormat) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "solr": + *s = Solr + case "wordnet": + *s = Wordnet + default: + *s = SynonymFormat{string(text)} + } + + return nil +} + +func (s SynonymFormat) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum/syntheticsourcekeepenum.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum/syntheticsourcekeepenum.go new file mode 100644 index 000000000..e805ade3c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum/syntheticsourcekeepenum.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package syntheticsourcekeepenum +package syntheticsourcekeepenum + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/Property.ts#L100-L118 +type SyntheticSourceKeepEnum struct { + Name string +} + +var ( + None = SyntheticSourceKeepEnum{"none"} + + Arrays = SyntheticSourceKeepEnum{"arrays"} + + All = SyntheticSourceKeepEnum{"all"} +) + +func (s SyntheticSourceKeepEnum) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SyntheticSourceKeepEnum) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "none": + *s = None + case "arrays": + *s = Arrays + case "all": + *s = All + default: + *s = SyntheticSourceKeepEnum{string(text)} + } + + return nil +} + +func (s SyntheticSourceKeepEnum) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktype/tasktype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktype/tasktype.go new file mode 100644 index 000000000..b51369653 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktype/tasktype.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktype +package tasktype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L20-L29 +type TaskType struct { + Name string +} + +var ( + Sparseembedding = TaskType{"sparse_embedding"} + + Textembedding = TaskType{"text_embedding"} + + Rerank = TaskType{"rerank"} + + Completion = TaskType{"completion"} + + Chatcompletion = TaskType{"chat_completion"} +) + +func (t TaskType) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "sparse_embedding": + *t = Sparseembedding + case "text_embedding": + *t = Textembedding + case "rerank": + *t = Rerank + case "completion": + *t = Completion + case "chat_completion": + *t = Chatcompletion + default: + *t = TaskType{string(text)} + } + + return nil +} + +func (t TaskType) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypealibabacloudai/tasktypealibabacloudai.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypealibabacloudai/tasktypealibabacloudai.go new file mode 100644 index 000000000..04924279e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypealibabacloudai/tasktypealibabacloudai.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypealibabacloudai +package tasktypealibabacloudai + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L36-L41 +type TaskTypeAlibabaCloudAI struct { + Name string +} + +var ( + Textembedding = TaskTypeAlibabaCloudAI{"text_embedding"} + + Rerank = TaskTypeAlibabaCloudAI{"rerank"} + + Completion = TaskTypeAlibabaCloudAI{"completion"} + + Sparseembedding = TaskTypeAlibabaCloudAI{"sparse_embedding"} +) + +func (t TaskTypeAlibabaCloudAI) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeAlibabaCloudAI) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *t = Textembedding + case "rerank": + *t = Rerank + case "completion": + *t = Completion + case "sparse_embedding": + *t = Sparseembedding + default: + *t = TaskTypeAlibabaCloudAI{string(text)} + } + + return nil +} + +func (t TaskTypeAlibabaCloudAI) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeamazonbedrock/tasktypeamazonbedrock.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeamazonbedrock/tasktypeamazonbedrock.go new file mode 100644 index 000000000..25d41c1de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeamazonbedrock/tasktypeamazonbedrock.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypeamazonbedrock +package tasktypeamazonbedrock + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L43-L46 +type TaskTypeAmazonBedrock struct { + Name string +} + +var ( + Textembedding = TaskTypeAmazonBedrock{"text_embedding"} + + Completion = TaskTypeAmazonBedrock{"completion"} +) + +func (t TaskTypeAmazonBedrock) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeAmazonBedrock) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *t = Textembedding + case "completion": + *t = Completion + default: + *t = TaskTypeAmazonBedrock{string(text)} + } + + return nil +} + +func (t TaskTypeAmazonBedrock) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeamazonsagemaker/tasktypeamazonsagemaker.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeamazonsagemaker/tasktypeamazonsagemaker.go new file mode 100644 index 000000000..5140a3c70 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeamazonsagemaker/tasktypeamazonsagemaker.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypeamazonsagemaker +package tasktypeamazonsagemaker + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L48-L54 +type TaskTypeAmazonSageMaker struct { + Name string +} + +var ( + Textembedding = TaskTypeAmazonSageMaker{"text_embedding"} + + Completion = TaskTypeAmazonSageMaker{"completion"} + + Chatcompletion = TaskTypeAmazonSageMaker{"chat_completion"} + + Sparseembedding = TaskTypeAmazonSageMaker{"sparse_embedding"} + + Rerank = TaskTypeAmazonSageMaker{"rerank"} +) + +func (t TaskTypeAmazonSageMaker) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeAmazonSageMaker) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *t = Textembedding + case "completion": + *t = Completion + case "chat_completion": + *t = Chatcompletion + case "sparse_embedding": + *t = Sparseembedding + case "rerank": + *t = Rerank + default: + *t = TaskTypeAmazonSageMaker{string(text)} + } + + return nil +} + +func (t TaskTypeAmazonSageMaker) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeanthropic/tasktypeanthropic.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeanthropic/tasktypeanthropic.go new file mode 100644 index 000000000..89d6f1e41 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeanthropic/tasktypeanthropic.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypeanthropic +package tasktypeanthropic + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L56-L58 +type TaskTypeAnthropic struct { + Name string +} + +var ( + Completion = TaskTypeAnthropic{"completion"} +) + +func (t TaskTypeAnthropic) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeAnthropic) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "completion": + *t = Completion + default: + *t = TaskTypeAnthropic{string(text)} + } + + return nil +} + +func (t TaskTypeAnthropic) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeazureaistudio/tasktypeazureaistudio.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeazureaistudio/tasktypeazureaistudio.go new file mode 100644 index 000000000..64e99d4a8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeazureaistudio/tasktypeazureaistudio.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypeazureaistudio +package tasktypeazureaistudio + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L60-L63 +type TaskTypeAzureAIStudio struct { + Name string +} + +var ( + Textembedding = TaskTypeAzureAIStudio{"text_embedding"} + + Completion = TaskTypeAzureAIStudio{"completion"} +) + +func (t TaskTypeAzureAIStudio) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeAzureAIStudio) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *t = Textembedding + case "completion": + *t = Completion + default: + *t = TaskTypeAzureAIStudio{string(text)} + } + + return nil +} + +func (t TaskTypeAzureAIStudio) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeazureopenai/tasktypeazureopenai.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeazureopenai/tasktypeazureopenai.go new file mode 100644 index 000000000..f4ee2407d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeazureopenai/tasktypeazureopenai.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypeazureopenai +package tasktypeazureopenai + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L65-L68 +type TaskTypeAzureOpenAI struct { + Name string +} + +var ( + Textembedding = TaskTypeAzureOpenAI{"text_embedding"} + + Completion = TaskTypeAzureOpenAI{"completion"} +) + +func (t TaskTypeAzureOpenAI) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeAzureOpenAI) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *t = Textembedding + case "completion": + *t = Completion + default: + *t = TaskTypeAzureOpenAI{string(text)} + } + + return nil +} + +func (t TaskTypeAzureOpenAI) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypecohere/tasktypecohere.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypecohere/tasktypecohere.go new file mode 100644 index 000000000..1bde1ba1e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypecohere/tasktypecohere.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypecohere +package tasktypecohere + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L70-L74 +type TaskTypeCohere struct { + Name string +} + +var ( + Textembedding = TaskTypeCohere{"text_embedding"} + + Rerank = TaskTypeCohere{"rerank"} + + Completion = TaskTypeCohere{"completion"} +) + +func (t TaskTypeCohere) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeCohere) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *t = Textembedding + case "rerank": + *t = Rerank + case "completion": + *t = Completion + default: + *t = TaskTypeCohere{string(text)} + } + + return nil +} + +func (t TaskTypeCohere) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypecustom/tasktypecustom.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypecustom/tasktypecustom.go new file mode 100644 index 000000000..1c7eef583 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypecustom/tasktypecustom.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypecustom +package tasktypecustom + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L76-L81 +type TaskTypeCustom struct { + Name string +} + +var ( + Textembedding = TaskTypeCustom{"text_embedding"} + + Sparseembedding = TaskTypeCustom{"sparse_embedding"} + + Rerank = TaskTypeCustom{"rerank"} + + Completion = TaskTypeCustom{"completion"} +) + +func (t TaskTypeCustom) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeCustom) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *t = Textembedding + case "sparse_embedding": + *t = Sparseembedding + case "rerank": + *t = Rerank + case "completion": + *t = Completion + default: + *t = TaskTypeCustom{string(text)} + } + + return nil +} + +func (t TaskTypeCustom) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypedeepseek/tasktypedeepseek.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypedeepseek/tasktypedeepseek.go new file mode 100644 index 000000000..fc7ac13b2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypedeepseek/tasktypedeepseek.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypedeepseek +package tasktypedeepseek + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L83-L86 +type TaskTypeDeepSeek struct { + Name string +} + +var ( + Completion = TaskTypeDeepSeek{"completion"} + + Chatcompletion = TaskTypeDeepSeek{"chat_completion"} +) + +func (t TaskTypeDeepSeek) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeDeepSeek) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "completion": + *t = Completion + case "chat_completion": + *t = Chatcompletion + default: + *t = TaskTypeDeepSeek{string(text)} + } + + return nil +} + +func (t TaskTypeDeepSeek) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeelasticsearch/tasktypeelasticsearch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeelasticsearch/tasktypeelasticsearch.go new file mode 100644 index 000000000..eee49fca3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeelasticsearch/tasktypeelasticsearch.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypeelasticsearch +package tasktypeelasticsearch + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L88-L92 +type TaskTypeElasticsearch struct { + Name string +} + +var ( + Sparseembedding = TaskTypeElasticsearch{"sparse_embedding"} + + Textembedding = TaskTypeElasticsearch{"text_embedding"} + + Rerank = TaskTypeElasticsearch{"rerank"} +) + +func (t TaskTypeElasticsearch) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeElasticsearch) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "sparse_embedding": + *t = Sparseembedding + case "text_embedding": + *t = Textembedding + case "rerank": + *t = Rerank + default: + *t = TaskTypeElasticsearch{string(text)} + } + + return nil +} + +func (t TaskTypeElasticsearch) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeelser/tasktypeelser.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeelser/tasktypeelser.go new file mode 100644 index 000000000..b9f1f486b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeelser/tasktypeelser.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypeelser +package tasktypeelser + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L94-L96 +type TaskTypeELSER struct { + Name string +} + +var ( + Sparseembedding = TaskTypeELSER{"sparse_embedding"} +) + +func (t TaskTypeELSER) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeELSER) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "sparse_embedding": + *t = Sparseembedding + default: + *t = TaskTypeELSER{string(text)} + } + + return nil +} + +func (t TaskTypeELSER) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypegoogleaistudio/tasktypegoogleaistudio.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypegoogleaistudio/tasktypegoogleaistudio.go new file mode 100644 index 000000000..1dd000d4c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypegoogleaistudio/tasktypegoogleaistudio.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypegoogleaistudio +package tasktypegoogleaistudio + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L98-L101 +type TaskTypeGoogleAIStudio struct { + Name string +} + +var ( + Textembedding = TaskTypeGoogleAIStudio{"text_embedding"} + + Completion = TaskTypeGoogleAIStudio{"completion"} +) + +func (t TaskTypeGoogleAIStudio) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeGoogleAIStudio) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *t = Textembedding + case "completion": + *t = Completion + default: + *t = TaskTypeGoogleAIStudio{string(text)} + } + + return nil +} + +func (t TaskTypeGoogleAIStudio) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypegooglevertexai/tasktypegooglevertexai.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypegooglevertexai/tasktypegooglevertexai.go new file mode 100644 index 000000000..fad86448a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypegooglevertexai/tasktypegooglevertexai.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypegooglevertexai +package tasktypegooglevertexai + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L103-L106 +type TaskTypeGoogleVertexAI struct { + Name string +} + +var ( + Textembedding = TaskTypeGoogleVertexAI{"text_embedding"} + + Rerank = TaskTypeGoogleVertexAI{"rerank"} +) + +func (t TaskTypeGoogleVertexAI) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeGoogleVertexAI) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *t = Textembedding + case "rerank": + *t = Rerank + default: + *t = TaskTypeGoogleVertexAI{string(text)} + } + + return nil +} + +func (t TaskTypeGoogleVertexAI) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypehuggingface/tasktypehuggingface.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypehuggingface/tasktypehuggingface.go new file mode 100644 index 000000000..4bfb2970c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypehuggingface/tasktypehuggingface.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypehuggingface +package tasktypehuggingface + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L108-L113 +type TaskTypeHuggingFace struct { + Name string +} + +var ( + Chatcompletion = TaskTypeHuggingFace{"chat_completion"} + + Completion = TaskTypeHuggingFace{"completion"} + + Rerank = TaskTypeHuggingFace{"rerank"} + + Textembedding = TaskTypeHuggingFace{"text_embedding"} +) + +func (t TaskTypeHuggingFace) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeHuggingFace) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "chat_completion": + *t = Chatcompletion + case "completion": + *t = Completion + case "rerank": + *t = Rerank + case "text_embedding": + *t = Textembedding + default: + *t = TaskTypeHuggingFace{string(text)} + } + + return nil +} + +func (t TaskTypeHuggingFace) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypejinaai/tasktypejinaai.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypejinaai/tasktypejinaai.go new file mode 100644 index 000000000..2e604bee3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypejinaai/tasktypejinaai.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypejinaai +package tasktypejinaai + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L31-L34 +type TaskTypeJinaAi struct { + Name string +} + +var ( + Textembedding = TaskTypeJinaAi{"text_embedding"} + + Rerank = TaskTypeJinaAi{"rerank"} +) + +func (t TaskTypeJinaAi) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeJinaAi) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *t = Textembedding + case "rerank": + *t = Rerank + default: + *t = TaskTypeJinaAi{string(text)} + } + + return nil +} + +func (t TaskTypeJinaAi) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypemistral/tasktypemistral.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypemistral/tasktypemistral.go new file mode 100644 index 000000000..2fd0c0afb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypemistral/tasktypemistral.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypemistral +package tasktypemistral + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L115-L119 +type TaskTypeMistral struct { + Name string +} + +var ( + Textembedding = TaskTypeMistral{"text_embedding"} + + Chatcompletion = TaskTypeMistral{"chat_completion"} + + Completion = TaskTypeMistral{"completion"} +) + +func (t TaskTypeMistral) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeMistral) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *t = Textembedding + case "chat_completion": + *t = Chatcompletion + case "completion": + *t = Completion + default: + *t = TaskTypeMistral{string(text)} + } + + return nil +} + +func (t TaskTypeMistral) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeopenai/tasktypeopenai.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeopenai/tasktypeopenai.go new file mode 100644 index 000000000..fcd007ef9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeopenai/tasktypeopenai.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypeopenai +package tasktypeopenai + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L121-L125 +type TaskTypeOpenAI struct { + Name string +} + +var ( + Textembedding = TaskTypeOpenAI{"text_embedding"} + + Chatcompletion = TaskTypeOpenAI{"chat_completion"} + + Completion = TaskTypeOpenAI{"completion"} +) + +func (t TaskTypeOpenAI) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeOpenAI) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *t = Textembedding + case "chat_completion": + *t = Chatcompletion + case "completion": + *t = Completion + default: + *t = TaskTypeOpenAI{string(text)} + } + + return nil +} + +func (t TaskTypeOpenAI) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypevoyageai/tasktypevoyageai.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypevoyageai/tasktypevoyageai.go new file mode 100644 index 000000000..701c828c4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypevoyageai/tasktypevoyageai.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypevoyageai +package tasktypevoyageai + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L127-L130 +type TaskTypeVoyageAI struct { + Name string +} + +var ( + Textembedding = TaskTypeVoyageAI{"text_embedding"} + + Rerank = TaskTypeVoyageAI{"rerank"} +) + +func (t TaskTypeVoyageAI) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeVoyageAI) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *t = Textembedding + case "rerank": + *t = Rerank + default: + *t = TaskTypeVoyageAI{string(text)} + } + + return nil +} + +func (t TaskTypeVoyageAI) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypewatsonx/tasktypewatsonx.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypewatsonx/tasktypewatsonx.go new file mode 100644 index 000000000..fce9b57de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypewatsonx/tasktypewatsonx.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tasktypewatsonx +package tasktypewatsonx + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/TaskType.ts#L132-L136 +type TaskTypeWatsonx struct { + Name string +} + +var ( + Textembedding = TaskTypeWatsonx{"text_embedding"} + + Chatcompletion = TaskTypeWatsonx{"chat_completion"} + + Completion = TaskTypeWatsonx{"completion"} +) + +func (t TaskTypeWatsonx) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskTypeWatsonx) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "text_embedding": + *t = Textembedding + case "chat_completion": + *t = Chatcompletion + case "completion": + *t = Completion + default: + *t = TaskTypeWatsonx{string(text)} + } + + return nil +} + +func (t TaskTypeWatsonx) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tdigestexecutionhint/tdigestexecutionhint.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tdigestexecutionhint/tdigestexecutionhint.go new file mode 100644 index 000000000..096bbbc2b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tdigestexecutionhint/tdigestexecutionhint.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tdigestexecutionhint +package tdigestexecutionhint + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L257-L260 +type TDigestExecutionHint struct { + Name string +} + +var ( + Default = TDigestExecutionHint{"default"} + + Highaccuracy = TDigestExecutionHint{"high_accuracy"} +) + +func (t TDigestExecutionHint) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TDigestExecutionHint) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "default": + *t = Default + case "high_accuracy": + *t = Highaccuracy + default: + *t = TDigestExecutionHint{string(text)} + } + + return nil +} + +func (t TDigestExecutionHint) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/templateformat/templateformat.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/templateformat/templateformat.go new file mode 100644 index 000000000..27b756d66 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/templateformat/templateformat.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package templateformat +package templateformat + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/RoleTemplate.ts#L22-L25 +type TemplateFormat struct { + Name string +} + +var ( + String = TemplateFormat{"string"} + + Json = TemplateFormat{"json"} +) + +func (t TemplateFormat) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TemplateFormat) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "string": + *t = String + case "json": + *t = Json + default: + *t = TemplateFormat{string(text)} + } + + return nil +} + +func (t TemplateFormat) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go new file mode 100644 index 000000000..942ed5a3b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package termsaggregationcollectmode +package termsaggregationcollectmode + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L1056-L1065 +type TermsAggregationCollectMode struct { + Name string +} + +var ( + Depthfirst = TermsAggregationCollectMode{"depth_first"} + + Breadthfirst = TermsAggregationCollectMode{"breadth_first"} +) + +func (t TermsAggregationCollectMode) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TermsAggregationCollectMode) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "depth_first": + *t = Depthfirst + case "breadth_first": + *t = Breadthfirst + default: + *t = TermsAggregationCollectMode{string(text)} + } + + return nil +} + +func (t TermsAggregationCollectMode) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go new file mode 100644 index 000000000..c1e11d6fd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package termsaggregationexecutionhint +package termsaggregationexecutionhint + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L1067-L1072 +type TermsAggregationExecutionHint struct { + Name string +} + +var ( + Map = TermsAggregationExecutionHint{"map"} + + Globalordinals = TermsAggregationExecutionHint{"global_ordinals"} + + Globalordinalshash = TermsAggregationExecutionHint{"global_ordinals_hash"} + + Globalordinalslowcardinality = TermsAggregationExecutionHint{"global_ordinals_low_cardinality"} +) + +func (t TermsAggregationExecutionHint) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TermsAggregationExecutionHint) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "map": + *t = Map + case "global_ordinals": + *t = Globalordinals + case "global_ordinals_hash": + *t = Globalordinalshash + case "global_ordinals_low_cardinality": + *t = Globalordinalslowcardinality + default: + *t = TermsAggregationExecutionHint{string(text)} + } + + return nil +} + +func (t TermsAggregationExecutionHint) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termvectoroption/termvectoroption.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termvectoroption/termvectoroption.go new file mode 100644 index 000000000..b1cb53e1a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termvectoroption/termvectoroption.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package termvectoroption +package termvectoroption + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/TermVectorOption.ts#L20-L28 +type TermVectorOption struct { + Name string +} + +var ( + No = TermVectorOption{"no"} + + Yes = TermVectorOption{"yes"} + + Withoffsets = TermVectorOption{"with_offsets"} + + Withpositions = TermVectorOption{"with_positions"} + + Withpositionsoffsets = TermVectorOption{"with_positions_offsets"} + + Withpositionsoffsetspayloads = TermVectorOption{"with_positions_offsets_payloads"} + + Withpositionspayloads = TermVectorOption{"with_positions_payloads"} +) + +func (t TermVectorOption) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TermVectorOption) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "no": + *t = No + case "yes": + *t = Yes + case "with_offsets": + *t = Withoffsets + case "with_positions": + *t = Withpositions + case "with_positions_offsets": + *t = Withpositionsoffsets + case "with_positions_offsets_payloads": + *t = Withpositionsoffsetspayloads + case "with_positions_payloads": + *t = Withpositionspayloads + default: + *t = TermVectorOption{string(text)} + } + + return nil +} + +func (t TermVectorOption) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/textquerytype/textquerytype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/textquerytype/textquerytype.go new file mode 100644 index 000000000..16fd599eb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/textquerytype/textquerytype.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package textquerytype +package textquerytype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L610-L636 +type TextQueryType struct { + Name string +} + +var ( + Bestfields = TextQueryType{"best_fields"} + + Mostfields = TextQueryType{"most_fields"} + + Crossfields = TextQueryType{"cross_fields"} + + Phrase = TextQueryType{"phrase"} + + Phraseprefix = TextQueryType{"phrase_prefix"} + + Boolprefix = TextQueryType{"bool_prefix"} +) + +func (t TextQueryType) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TextQueryType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "best_fields": + *t = Bestfields + case "most_fields": + *t = Mostfields + case "cross_fields": + *t = Crossfields + case "phrase": + *t = Phrase + case "phrase_prefix": + *t = Phraseprefix + case "bool_prefix": + *t = Boolprefix + default: + *t = TextQueryType{string(text)} + } + + return nil +} + +func (t TextQueryType) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/threadtype/threadtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/threadtype/threadtype.go new file mode 100644 index 000000000..f43112ac3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/threadtype/threadtype.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package threadtype +package threadtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L296-L302 +type ThreadType struct { + Name string +} + +var ( + Cpu = ThreadType{"cpu"} + + Wait = ThreadType{"wait"} + + Block = ThreadType{"block"} + + Gpu = ThreadType{"gpu"} + + Mem = ThreadType{"mem"} +) + +func (t ThreadType) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *ThreadType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "cpu": + *t = Cpu + case "wait": + *t = Wait + case "block": + *t = Block + case "gpu": + *t = Gpu + case "mem": + *t = Mem + default: + *t = ThreadType{string(text)} + } + + return nil +} + +func (t ThreadType) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go new file mode 100644 index 000000000..282c29b78 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package timeseriesmetrictype +package timeseriesmetrictype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/TimeSeriesMetricType.ts#L20-L26 +type TimeSeriesMetricType struct { + Name string +} + +var ( + Gauge = TimeSeriesMetricType{"gauge"} + + Counter = TimeSeriesMetricType{"counter"} + + Summary = TimeSeriesMetricType{"summary"} + + Histogram = TimeSeriesMetricType{"histogram"} + + Position = TimeSeriesMetricType{"position"} +) + +func (t TimeSeriesMetricType) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TimeSeriesMetricType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "gauge": + *t = Gauge + case "counter": + *t = Counter + case "summary": + *t = Summary + case "histogram": + *t = Histogram + case "position": + *t = Position + default: + *t = TimeSeriesMetricType{string(text)} + } + + return nil +} + +func (t TimeSeriesMetricType) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit/timeunit.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit/timeunit.go new file mode 100644 index 000000000..a2ec6e222 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit/timeunit.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package timeunit +package timeunit + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Time.ts#L69-L77 +type TimeUnit struct { + Name string +} + +var ( + Nanoseconds = TimeUnit{"nanos"} + + Microseconds = TimeUnit{"micros"} + + Milliseconds = TimeUnit{"ms"} + + Seconds = TimeUnit{"s"} + + Minutes = TimeUnit{"m"} + + Hours = TimeUnit{"h"} + + Days = TimeUnit{"d"} +) + +func (t TimeUnit) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TimeUnit) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "nanos": + *t = Nanoseconds + case "micros": + *t = Microseconds + case "ms": + *t = Milliseconds + case "s": + *t = Seconds + case "m": + *t = Minutes + case "h": + *t = Hours + case "d": + *t = Days + default: + *t = TimeUnit{string(text)} + } + + return nil +} + +func (t TimeUnit) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tokenchar/tokenchar.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tokenchar/tokenchar.go new file mode 100644 index 000000000..5eebf63db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tokenchar/tokenchar.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tokenchar +package tokenchar + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L60-L67 +type TokenChar struct { + Name string +} + +var ( + Letter = TokenChar{"letter"} + + Digit = TokenChar{"digit"} + + Whitespace = TokenChar{"whitespace"} + + Punctuation = TokenChar{"punctuation"} + + Symbol = TokenChar{"symbol"} + + Custom = TokenChar{"custom"} +) + +func (t TokenChar) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TokenChar) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "letter": + *t = Letter + case "digit": + *t = Digit + case "whitespace": + *t = Whitespace + case "punctuation": + *t = Punctuation + case "symbol": + *t = Symbol + case "custom": + *t = Custom + default: + *t = TokenChar{string(text)} + } + + return nil +} + +func (t TokenChar) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go new file mode 100644 index 000000000..513803917 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package tokenizationtruncate +package tokenizationtruncate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L369-L373 +type TokenizationTruncate struct { + Name string +} + +var ( + First = TokenizationTruncate{"first"} + + Second = TokenizationTruncate{"second"} + + None = TokenizationTruncate{"none"} +) + +func (t TokenizationTruncate) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TokenizationTruncate) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "first": + *t = First + case "second": + *t = Second + case "none": + *t = None + default: + *t = TokenizationTruncate{string(text)} + } + + return nil +} + +func (t TokenizationTruncate) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go new file mode 100644 index 000000000..24270db12 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package totalhitsrelation +package totalhitsrelation + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/hits.ts#L100-L105 +type TotalHitsRelation struct { + Name string +} + +var ( + Eq = TotalHitsRelation{"eq"} + + Gte = TotalHitsRelation{"gte"} +) + +func (t TotalHitsRelation) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TotalHitsRelation) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "eq": + *t = Eq + case "gte": + *t = Gte + default: + *t = TotalHitsRelation{string(text)} + } + + return nil +} + +func (t TotalHitsRelation) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go new file mode 100644 index 000000000..54142c4b0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package trainedmodeltype +package trainedmodeltype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L315-L329 +type TrainedModelType struct { + Name string +} + +var ( + Treeensemble = TrainedModelType{"tree_ensemble"} + + Langident = TrainedModelType{"lang_ident"} + + Pytorch = TrainedModelType{"pytorch"} +) + +func (t TrainedModelType) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TrainedModelType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "tree_ensemble": + *t = Treeensemble + case "lang_ident": + *t = Langident + case "pytorch": + *t = Pytorch + default: + *t = TrainedModelType{string(text)} + } + + return nil +} + +func (t TrainedModelType) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainingpriority/trainingpriority.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainingpriority/trainingpriority.go new file mode 100644 index 000000000..479c21fad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainingpriority/trainingpriority.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package trainingpriority +package trainingpriority + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L365-L368 +type TrainingPriority struct { + Name string +} + +var ( + Normal = TrainingPriority{"normal"} + + Low = TrainingPriority{"low"} +) + +func (t TrainingPriority) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TrainingPriority) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "normal": + *t = Normal + case "low": + *t = Low + default: + *t = TrainingPriority{string(text)} + } + + return nil +} + +func (t TrainingPriority) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/translogdurability/translogdurability.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/translogdurability/translogdurability.go new file mode 100644 index 000000000..99b5944e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/translogdurability/translogdurability.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package translogdurability +package translogdurability + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L392-L407 +type TranslogDurability struct { + Name string +} + +var ( + Request = TranslogDurability{"request"} + + Async = TranslogDurability{"async"} +) + +func (t TranslogDurability) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TranslogDurability) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "request": + *t = Request + case "async": + *t = Async + default: + *t = TranslogDurability{string(text)} + } + + return nil +} + +func (t TranslogDurability) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ttesttype/ttesttype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ttesttype/ttesttype.go new file mode 100644 index 000000000..8dd77eff3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ttesttype/ttesttype.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package ttesttype +package ttesttype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L354-L367 +type TTestType struct { + Name string +} + +var ( + Paired = TTestType{"paired"} + + Homoscedastic = TTestType{"homoscedastic"} + + Heteroscedastic = TTestType{"heteroscedastic"} +) + +func (t TTestType) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TTestType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "paired": + *t = Paired + case "homoscedastic": + *t = Homoscedastic + case "heteroscedastic": + *t = Heteroscedastic + default: + *t = TTestType{string(text)} + } + + return nil +} + +func (t TTestType) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/type_/type_.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/type_/type_.go new file mode 100644 index 000000000..dcd6f5467 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/type_/type_.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package type_ +package type_ + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/shutdown/_types/types.ts#L20-L24 +type Type struct { + Name string +} + +var ( + Restart = Type{"restart"} + + Remove = Type{"remove"} + + Replace = Type{"replace"} +) + +func (t Type) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *Type) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "restart": + *t = Restart + case "remove": + *t = Remove + case "replace": + *t = Replace + default: + *t = Type{string(text)} + } + + return nil +} + +func (t Type) String() string { + return t.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go new file mode 100644 index 000000000..e53936aa6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package unassignedinformationreason +package unassignedinformationreason + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/allocation_explain/types.ts#L138-L157 +type UnassignedInformationReason struct { + Name string +} + +var ( + INDEXCREATED = UnassignedInformationReason{"INDEX_CREATED"} + + CLUSTERRECOVERED = UnassignedInformationReason{"CLUSTER_RECOVERED"} + + INDEXREOPENED = UnassignedInformationReason{"INDEX_REOPENED"} + + DANGLINGINDEXIMPORTED = UnassignedInformationReason{"DANGLING_INDEX_IMPORTED"} + + NEWINDEXRESTORED = UnassignedInformationReason{"NEW_INDEX_RESTORED"} + + EXISTINGINDEXRESTORED = UnassignedInformationReason{"EXISTING_INDEX_RESTORED"} + + REPLICAADDED = UnassignedInformationReason{"REPLICA_ADDED"} + + ALLOCATIONFAILED = UnassignedInformationReason{"ALLOCATION_FAILED"} + + NODELEFT = UnassignedInformationReason{"NODE_LEFT"} + + REROUTECANCELLED = UnassignedInformationReason{"REROUTE_CANCELLED"} + + REINITIALIZED = UnassignedInformationReason{"REINITIALIZED"} + + REALLOCATEDREPLICA = UnassignedInformationReason{"REALLOCATED_REPLICA"} + + PRIMARYFAILED = UnassignedInformationReason{"PRIMARY_FAILED"} + + FORCEDEMPTYPRIMARY = UnassignedInformationReason{"FORCED_EMPTY_PRIMARY"} + + MANUALALLOCATION = UnassignedInformationReason{"MANUAL_ALLOCATION"} +) + +func (u UnassignedInformationReason) MarshalText() (text []byte, err error) { + return []byte(u.String()), nil +} + +func (u *UnassignedInformationReason) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "index_created": + *u = INDEXCREATED + case "cluster_recovered": + *u = CLUSTERRECOVERED + case "index_reopened": + *u = INDEXREOPENED + case "dangling_index_imported": + *u = DANGLINGINDEXIMPORTED + case "new_index_restored": + *u = NEWINDEXRESTORED + case "existing_index_restored": + *u = EXISTINGINDEXRESTORED + case "replica_added": + *u = REPLICAADDED + case "allocation_failed": + *u = ALLOCATIONFAILED + case "node_left": + *u = NODELEFT + case "reroute_cancelled": + *u = REROUTECANCELLED + case "reinitialized": + *u = REINITIALIZED + case "reallocated_replica": + *u = REALLOCATEDREPLICA + case "primary_failed": + *u = PRIMARYFAILED + case "forced_empty_primary": + *u = FORCEDEMPTYPRIMARY + case "manual_allocation": + *u = MANUALALLOCATION + default: + *u = UnassignedInformationReason{string(text)} + } + + return nil +} + +func (u UnassignedInformationReason) String() string { + return u.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/useragentproperty/useragentproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/useragentproperty/useragentproperty.go new file mode 100644 index 000000000..0bc73350c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/useragentproperty/useragentproperty.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package useragentproperty +package useragentproperty + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L548-L554 +type UserAgentProperty struct { + Name string +} + +var ( + Name = UserAgentProperty{"name"} + + Os = UserAgentProperty{"os"} + + Device = UserAgentProperty{"device"} + + Original = UserAgentProperty{"original"} + + Version = UserAgentProperty{"version"} +) + +func (u UserAgentProperty) MarshalText() (text []byte, err error) { + return []byte(u.String()), nil +} + +func (u *UserAgentProperty) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "name": + *u = Name + case "os": + *u = Os + case "device": + *u = Device + case "original": + *u = Original + case "version": + *u = Version + default: + *u = UserAgentProperty{string(text)} + } + + return nil +} + +func (u UserAgentProperty) String() string { + return u.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/valuetype/valuetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/valuetype/valuetype.go new file mode 100644 index 000000000..304e5ef2a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/valuetype/valuetype.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package valuetype +package valuetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L459-L470 +type ValueType struct { + Name string +} + +var ( + String = ValueType{"string"} + + Long = ValueType{"long"} + + Double = ValueType{"double"} + + Number = ValueType{"number"} + + Date = ValueType{"date"} + + Datenanos = ValueType{"date_nanos"} + + Ip = ValueType{"ip"} + + Numeric = ValueType{"numeric"} + + Geopoint = ValueType{"geo_point"} + + Boolean = ValueType{"boolean"} +) + +func (v ValueType) MarshalText() (text []byte, err error) { + return []byte(v.String()), nil +} + +func (v *ValueType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "string": + *v = String + case "long": + *v = Long + case "double": + *v = Double + case "number": + *v = Number + case "date": + *v = Date + case "date_nanos": + *v = Datenanos + case "ip": + *v = Ip + case "numeric": + *v = Numeric + case "geo_point": + *v = Geopoint + case "boolean": + *v = Boolean + default: + *v = ValueType{string(text)} + } + + return nil +} + +func (v ValueType) String() string { + return v.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype/versiontype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype/versiontype.go new file mode 100644 index 000000000..e68f1678c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype/versiontype.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package versiontype +package versiontype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L100-L119 +type VersionType struct { + Name string +} + +var ( + Internal = VersionType{"internal"} + + External = VersionType{"external"} + + Externalgte = VersionType{"external_gte"} + + Force = VersionType{"force"} +) + +func (v VersionType) MarshalText() (text []byte, err error) { + return []byte(v.String()), nil +} + +func (v *VersionType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "internal": + *v = Internal + case "external": + *v = External + case "external_gte": + *v = Externalgte + case "force": + *v = Force + default: + *v = VersionType{string(text)} + } + + return nil +} + +func (v VersionType) String() string { + return v.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/voyageaiservicetype/voyageaiservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/voyageaiservicetype/voyageaiservicetype.go new file mode 100644 index 000000000..e7ac906d3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/voyageaiservicetype/voyageaiservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package voyageaiservicetype +package voyageaiservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1680-L1682 +type VoyageAIServiceType struct { + Name string +} + +var ( + Voyageai = VoyageAIServiceType{"voyageai"} +) + +func (v VoyageAIServiceType) MarshalText() (text []byte, err error) { + return []byte(v.String()), nil +} + +func (v *VoyageAIServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "voyageai": + *v = Voyageai + default: + *v = VoyageAIServiceType{string(text)} + } + + return nil +} + +func (v VoyageAIServiceType) String() string { + return v.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/waitforevents/waitforevents.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/waitforevents/waitforevents.go new file mode 100644 index 000000000..428ad0b12 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/waitforevents/waitforevents.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package waitforevents +package waitforevents + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L310-L317 +type WaitForEvents struct { + Name string +} + +var ( + Immediate = WaitForEvents{"immediate"} + + Urgent = WaitForEvents{"urgent"} + + High = WaitForEvents{"high"} + + Normal = WaitForEvents{"normal"} + + Low = WaitForEvents{"low"} + + Languid = WaitForEvents{"languid"} +) + +func (w WaitForEvents) MarshalText() (text []byte, err error) { + return []byte(w.String()), nil +} + +func (w *WaitForEvents) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "immediate": + *w = Immediate + case "urgent": + *w = Urgent + case "high": + *w = High + case "normal": + *w = Normal + case "low": + *w = Low + case "languid": + *w = Languid + default: + *w = WaitForEvents{string(text)} + } + + return nil +} + +func (w WaitForEvents) String() string { + return w.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/watcherstate/watcherstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/watcherstate/watcherstate.go new file mode 100644 index 000000000..97bcdfa30 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/watcherstate/watcherstate.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package watcherstate +package watcherstate + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/stats/types.ts#L26-L31 +type WatcherState struct { + Name string +} + +var ( + Stopped = WatcherState{"stopped"} + + Starting = WatcherState{"starting"} + + Started = WatcherState{"started"} + + Stopping = WatcherState{"stopping"} +) + +func (w WatcherState) MarshalText() (text []byte, err error) { + return []byte(w.String()), nil +} + +func (w *WatcherState) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "stopped": + *w = Stopped + case "starting": + *w = Starting + case "started": + *w = Started + case "stopping": + *w = Stopping + default: + *w = WatcherState{string(text)} + } + + return nil +} + +func (w WatcherState) String() string { + return w.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/watsonxservicetype/watsonxservicetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/watsonxservicetype/watsonxservicetype.go new file mode 100644 index 000000000..a36260716 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/watsonxservicetype/watsonxservicetype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package watsonxservicetype +package watsonxservicetype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1730-L1732 +type WatsonxServiceType struct { + Name string +} + +var ( + Watsonxai = WatsonxServiceType{"watsonxai"} +) + +func (w WatsonxServiceType) MarshalText() (text []byte, err error) { + return []byte(w.String()), nil +} + +func (w *WatsonxServiceType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "watsonxai": + *w = Watsonxai + default: + *w = WatsonxServiceType{string(text)} + } + + return nil +} + +func (w WatsonxServiceType) String() string { + return w.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/xpackcategory/xpackcategory.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/xpackcategory/xpackcategory.go new file mode 100644 index 000000000..52ad31117 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/xpackcategory/xpackcategory.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package xpackcategory +package xpackcategory + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/info/XPackInfoRequest.ts#L58-L62 +type XPackCategory struct { + Name string +} + +var ( + Build = XPackCategory{"build"} + + Features = XPackCategory{"features"} + + License = XPackCategory{"license"} +) + +func (x XPackCategory) MarshalText() (text []byte, err error) { + return []byte(x.String()), nil +} + +func (x *XPackCategory) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "build": + *x = Build + case "features": + *x = Features + case "license": + *x = License + default: + *x = XPackCategory{string(text)} + } + + return nil +} + +func (x XPackCategory) String() string { + return x.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/zerotermsquery/zerotermsquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/zerotermsquery/zerotermsquery.go new file mode 100644 index 000000000..1780738be --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/zerotermsquery/zerotermsquery.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Package zerotermsquery +package zerotermsquery + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L638-L647 +type ZeroTermsQuery struct { + Name string +} + +var ( + All = ZeroTermsQuery{"all"} + + None = ZeroTermsQuery{"none"} +) + +func (z ZeroTermsQuery) MarshalText() (text []byte, err error) { + return []byte(z.String()), nil +} + +func (z *ZeroTermsQuery) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "all": + *z = All + case "none": + *z = None + default: + *z = ZeroTermsQuery{string(text)} + } + + return nil +} + +func (z ZeroTermsQuery) String() string { + return z.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/epochtimeunitmillis.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/epochtimeunitmillis.go new file mode 100644 index 000000000..b7cbef9d4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/epochtimeunitmillis.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// EpochTimeUnitMillis type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Time.ts#L40-L40 +type EpochTimeUnitMillis int64 + +type EpochTimeUnitMillisVariant interface { + EpochTimeUnitMillisCaster() *EpochTimeUnitMillis +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/epochtimeunitseconds.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/epochtimeunitseconds.go new file mode 100644 index 000000000..d07a552e8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/epochtimeunitseconds.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// EpochTimeUnitSeconds type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Time.ts#L40-L40 +type EpochTimeUnitSeconds int64 + +type EpochTimeUnitSecondsVariant interface { + EpochTimeUnitSecondsCaster() *EpochTimeUnitSeconds +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eql.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eql.go new file mode 100644 index 000000000..23604bafc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eql.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Eql type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L361-L364 +type Eql struct { + Available bool `json:"available"` + Enabled bool `json:"enabled"` + Features EqlFeatures `json:"features"` + Queries map[string]XpackQuery `json:"queries"` +} + +func (s *Eql) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "features": + if err := dec.Decode(&s.Features); err != nil { + return fmt.Errorf("%s | %w", "Features", err) + } + + case "queries": + if s.Queries == nil { + s.Queries = make(map[string]XpackQuery, 0) + } + if err := dec.Decode(&s.Queries); err != nil { + return fmt.Errorf("%s | %w", "Queries", err) + } + + } + } + return nil +} + +// NewEql returns a Eql. +func NewEql() *Eql { + r := &Eql{ + Queries: make(map[string]XpackQuery), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeatures.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeatures.go new file mode 100644 index 000000000..13d95535b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeatures.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// EqlFeatures type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L96-L104 +type EqlFeatures struct { + Event uint `json:"event"` + Join uint `json:"join"` + Joins EqlFeaturesJoin `json:"joins"` + Keys EqlFeaturesKeys `json:"keys"` + Pipes EqlFeaturesPipes `json:"pipes"` + Sequence uint `json:"sequence"` + Sequences EqlFeaturesSequences `json:"sequences"` +} + +// NewEqlFeatures returns a EqlFeatures. +func NewEqlFeatures() *EqlFeatures { + r := &EqlFeatures{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeaturesjoin.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeaturesjoin.go new file mode 100644 index 000000000..ba9f867de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeaturesjoin.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// EqlFeaturesJoin type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L106-L112 +type EqlFeaturesJoin struct { + JoinQueriesFiveOrMore uint `json:"join_queries_five_or_more"` + JoinQueriesFour uint `json:"join_queries_four"` + JoinQueriesThree uint `json:"join_queries_three"` + JoinQueriesTwo uint `json:"join_queries_two"` + JoinUntil uint `json:"join_until"` +} + +// NewEqlFeaturesJoin returns a EqlFeaturesJoin. +func NewEqlFeaturesJoin() *EqlFeaturesJoin { + r := &EqlFeaturesJoin{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeatureskeys.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeatureskeys.go new file mode 100644 index 000000000..23b216693 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeatureskeys.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// EqlFeaturesKeys type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L114-L120 +type EqlFeaturesKeys struct { + JoinKeysFiveOrMore uint `json:"join_keys_five_or_more"` + JoinKeysFour uint `json:"join_keys_four"` + JoinKeysOne uint `json:"join_keys_one"` + JoinKeysThree uint `json:"join_keys_three"` + JoinKeysTwo uint `json:"join_keys_two"` +} + +// NewEqlFeaturesKeys returns a EqlFeaturesKeys. +func NewEqlFeaturesKeys() *EqlFeaturesKeys { + r := &EqlFeaturesKeys{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeaturespipes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeaturespipes.go new file mode 100644 index 000000000..0b358de46 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeaturespipes.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// EqlFeaturesPipes type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L122-L125 +type EqlFeaturesPipes struct { + PipeHead uint `json:"pipe_head"` + PipeTail uint `json:"pipe_tail"` +} + +// NewEqlFeaturesPipes returns a EqlFeaturesPipes. +func NewEqlFeaturesPipes() *EqlFeaturesPipes { + r := &EqlFeaturesPipes{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeaturessequences.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeaturessequences.go new file mode 100644 index 000000000..206560371 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlfeaturessequences.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// EqlFeaturesSequences type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L127-L134 +type EqlFeaturesSequences struct { + SequenceMaxspan uint `json:"sequence_maxspan"` + SequenceQueriesFiveOrMore uint `json:"sequence_queries_five_or_more"` + SequenceQueriesFour uint `json:"sequence_queries_four"` + SequenceQueriesThree uint `json:"sequence_queries_three"` + SequenceQueriesTwo uint `json:"sequence_queries_two"` + SequenceUntil uint `json:"sequence_until"` +} + +// NewEqlFeaturesSequences returns a EqlFeaturesSequences. +func NewEqlFeaturesSequences() *EqlFeaturesSequences { + r := &EqlFeaturesSequences{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlhits.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlhits.go new file mode 100644 index 000000000..7218932e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eqlhits.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// EqlHits type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/eql/_types/EqlHits.ts#L25-L39 +type EqlHits struct { + // Events Contains events matching the query. Each object represents a matching event. + Events []HitsEvent `json:"events,omitempty"` + // Sequences Contains event sequences matching the query. Each object represents a + // matching sequence. This parameter is only returned for EQL queries containing + // a sequence. + Sequences []HitsSequence `json:"sequences,omitempty"` + // Total Metadata about the number of matching events or sequences. + Total *TotalHits `json:"total,omitempty"` +} + +// NewEqlHits returns a EqlHits. +func NewEqlHits() *EqlHits { + r := &EqlHits{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/errorcause.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/errorcause.go new file mode 100644 index 000000000..4d35fb350 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/errorcause.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ErrorCause type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Errors.ts#L25-L50 +type ErrorCause struct { + CausedBy *ErrorCause `json:"caused_by,omitempty"` + Metadata map[string]json.RawMessage `json:"-"` + // Reason A human-readable explanation of the error, in English. + Reason *string `json:"reason,omitempty"` + RootCause []ErrorCause `json:"root_cause,omitempty"` + // StackTrace The server stack trace. Present only if the `error_trace=true` parameter was + // sent with the request. + StackTrace *string `json:"stack_trace,omitempty"` + Suppressed []ErrorCause `json:"suppressed,omitempty"` + // Type The type of error + Type string `json:"type"` +} + +func (s *ErrorCause) UnmarshalJSON(data []byte) error { + + if bytes.HasPrefix(data, []byte(`"`)) { + reason := string(data) + s.Reason = &reason + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "caused_by": + if err := dec.Decode(&s.CausedBy); err != nil { + return fmt.Errorf("%s | %w", "CausedBy", err) + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "root_cause": + if err := dec.Decode(&s.RootCause); err != nil { + return fmt.Errorf("%s | %w", "RootCause", err) + } + + case "stack_trace": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StackTrace", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StackTrace = &o + + case "suppressed": + if err := dec.Decode(&s.Suppressed); err != nil { + return fmt.Errorf("%s | %w", "Suppressed", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + default: + + if key, ok := t.(string); ok { + if s.Metadata == nil { + s.Metadata = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + s.Metadata[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s ErrorCause) MarshalJSON() ([]byte, error) { + type opt ErrorCause + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Metadata { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Metadata") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewErrorCause returns a ErrorCause. +func NewErrorCause() *ErrorCause { + r := &ErrorCause{ + Metadata: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/errorresponsebase.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/errorresponsebase.go new file mode 100644 index 000000000..a0c61f071 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/errorresponsebase.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ErrorResponseBase type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Base.ts#L130-L139 +type ErrorResponseBase struct { + Error ErrorCause `json:"error"` + Status int `json:"status"` +} + +func (s *ErrorResponseBase) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + + case "status": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + s.Status = value + case float64: + f := int(v) + s.Status = f + } + + } + } + return nil +} + +// NewErrorResponseBase returns a ErrorResponseBase. +func NewErrorResponseBase() *ErrorResponseBase { + r := &ErrorResponseBase{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlclusterdetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlclusterdetails.go new file mode 100644 index 000000000..7240f2bc6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlclusterdetails.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/esqlclusterstatus" +) + +// EsqlClusterDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/_types/EsqlResult.ts#L75-L81 +type EsqlClusterDetails struct { + Failures []EsqlShardFailure `json:"failures,omitempty"` + Indices string `json:"indices"` + Shards_ *EsqlShardInfo `json:"_shards,omitempty"` + Status esqlclusterstatus.EsqlClusterStatus `json:"status"` + Took *int64 `json:"took,omitempty"` +} + +func (s *EsqlClusterDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "failures": + if err := dec.Decode(&s.Failures); err != nil { + return fmt.Errorf("%s | %w", "Failures", err) + } + + case "indices": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Indices = o + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return fmt.Errorf("%s | %w", "Shards_", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "took": + if err := dec.Decode(&s.Took); err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + + } + } + return nil +} + +// NewEsqlClusterDetails returns a EsqlClusterDetails. +func NewEsqlClusterDetails() *EsqlClusterDetails { + r := &EsqlClusterDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlclusterinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlclusterinfo.go new file mode 100644 index 000000000..88bc269d2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlclusterinfo.go @@ -0,0 +1,176 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// EsqlClusterInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/_types/EsqlResult.ts#L65-L73 +type EsqlClusterInfo struct { + Details map[string]EsqlClusterDetails `json:"details"` + Failed int `json:"failed"` + Partial int `json:"partial"` + Running int `json:"running"` + Skipped int `json:"skipped"` + Successful int `json:"successful"` + Total int `json:"total"` +} + +func (s *EsqlClusterInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if s.Details == nil { + s.Details = make(map[string]EsqlClusterDetails, 0) + } + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + case "failed": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Failed", err) + } + s.Failed = value + case float64: + f := int(v) + s.Failed = f + } + + case "partial": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Partial", err) + } + s.Partial = value + case float64: + f := int(v) + s.Partial = f + } + + case "running": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Running", err) + } + s.Running = value + case float64: + f := int(v) + s.Running = f + } + + case "skipped": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Skipped", err) + } + s.Skipped = value + case float64: + f := int(v) + s.Skipped = f + } + + case "successful": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Successful", err) + } + s.Successful = value + case float64: + f := int(v) + s.Successful = f + } + + case "total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int(v) + s.Total = f + } + + } + } + return nil +} + +// NewEsqlClusterInfo returns a EsqlClusterInfo. +func NewEsqlClusterInfo() *EsqlClusterInfo { + r := &EsqlClusterInfo{ + Details: make(map[string]EsqlClusterDetails), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlcolumninfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlcolumninfo.go new file mode 100644 index 000000000..55bd8159f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlcolumninfo.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// EsqlColumnInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/_types/EsqlResult.ts#L60-L63 +type EsqlColumnInfo struct { + Name string `json:"name"` + Type string `json:"type"` +} + +func (s *EsqlColumnInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewEsqlColumnInfo returns a EsqlColumnInfo. +func NewEsqlColumnInfo() *EsqlColumnInfo { + r := &EsqlColumnInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlshardfailure.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlshardfailure.go new file mode 100644 index 000000000..c947d9439 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlshardfailure.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// EsqlShardFailure type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/_types/EsqlResult.ts#L98-L103 +type EsqlShardFailure struct { + Index *string `json:"index,omitempty"` + Node *string `json:"node,omitempty"` + Reason ErrorCause `json:"reason"` + Shard int `json:"shard"` +} + +func (s *EsqlShardFailure) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "reason": + if err := dec.Decode(&s.Reason); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + + case "shard": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Shard", err) + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + } + } + return nil +} + +// NewEsqlShardFailure returns a EsqlShardFailure. +func NewEsqlShardFailure() *EsqlShardFailure { + r := &EsqlShardFailure{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlshardinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlshardinfo.go new file mode 100644 index 000000000..3be916b3f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/esqlshardinfo.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// EsqlShardInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/_types/EsqlResult.ts#L91-L96 +type EsqlShardInfo struct { + Failed *int `json:"failed,omitempty"` + Skipped *int `json:"skipped,omitempty"` + Successful *int `json:"successful,omitempty"` + Total int `json:"total"` +} + +func (s *EsqlShardInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "failed": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Failed", err) + } + s.Failed = &value + case float64: + f := int(v) + s.Failed = &f + } + + case "skipped": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Skipped", err) + } + s.Skipped = &value + case float64: + f := int(v) + s.Skipped = &f + } + + case "successful": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Successful", err) + } + s.Successful = &value + case float64: + f := int(v) + s.Successful = &f + } + + case "total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int(v) + s.Total = f + } + + } + } + return nil +} + +// NewEsqlShardInfo returns a EsqlShardInfo. +func NewEsqlShardInfo() *EsqlShardInfo { + r := &EsqlShardInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/estoniananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/estoniananalyzer.go new file mode 100644 index 000000000..07880f6f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/estoniananalyzer.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// EstonianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L159-L163 +type EstonianAnalyzer struct { + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *EstonianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s EstonianAnalyzer) MarshalJSON() ([]byte, error) { + type innerEstonianAnalyzer EstonianAnalyzer + tmp := innerEstonianAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "estonian" + + return json.Marshal(tmp) +} + +// NewEstonianAnalyzer returns a EstonianAnalyzer. +func NewEstonianAnalyzer() *EstonianAnalyzer { + r := &EstonianAnalyzer{} + + return r +} + +type EstonianAnalyzerVariant interface { + EstonianAnalyzerCaster() *EstonianAnalyzer +} + +func (s *EstonianAnalyzer) EstonianAnalyzerCaster() *EstonianAnalyzer { + return s +} + +func (s *EstonianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eventdatastream.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eventdatastream.go new file mode 100644 index 000000000..8c8eae031 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/eventdatastream.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// EventDataStream type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/_types/BehavioralAnalytics.ts#L29-L31 +type EventDataStream struct { + Name string `json:"name"` +} + +func (s *EventDataStream) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewEventDataStream returns a EventDataStream. +func NewEventDataStream() *EventDataStream { + r := &EventDataStream{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ewmamodelsettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ewmamodelsettings.go new file mode 100644 index 000000000..b784e487f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ewmamodelsettings.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// EwmaModelSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L293-L295 +type EwmaModelSettings struct { + Alpha *float32 `json:"alpha,omitempty"` +} + +func (s *EwmaModelSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Alpha", err) + } + f := float32(value) + s.Alpha = &f + case float64: + f := float32(v) + s.Alpha = &f + } + + } + } + return nil +} + +// NewEwmaModelSettings returns a EwmaModelSettings. +func NewEwmaModelSettings() *EwmaModelSettings { + r := &EwmaModelSettings{} + + return r +} + +type EwmaModelSettingsVariant interface { + EwmaModelSettingsCaster() *EwmaModelSettings +} + +func (s *EwmaModelSettings) EwmaModelSettingsCaster() *EwmaModelSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ewmamovingaverageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ewmamovingaverageaggregation.go new file mode 100644 index 000000000..14514100b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ewmamovingaverageaggregation.go @@ -0,0 +1,188 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// EwmaMovingAverageAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L278-L281 +type EwmaMovingAverageAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Minimize *bool `json:"minimize,omitempty"` + Model string `json:"model,omitempty"` + Predict *int `json:"predict,omitempty"` + Settings EwmaModelSettings `json:"settings"` + Window *int `json:"window,omitempty"` +} + +func (s *EwmaMovingAverageAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "minimize": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Minimize", err) + } + s.Minimize = &value + case bool: + s.Minimize = &v + } + + case "model": + if err := dec.Decode(&s.Model); err != nil { + return fmt.Errorf("%s | %w", "Model", err) + } + + case "predict": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Predict", err) + } + s.Predict = &value + case float64: + f := int(v) + s.Predict = &f + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "window": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Window", err) + } + s.Window = &value + case float64: + f := int(v) + s.Window = &f + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s EwmaMovingAverageAggregation) MarshalJSON() ([]byte, error) { + type innerEwmaMovingAverageAggregation EwmaMovingAverageAggregation + tmp := innerEwmaMovingAverageAggregation{ + BucketsPath: s.BucketsPath, + Format: s.Format, + GapPolicy: s.GapPolicy, + Minimize: s.Minimize, + Model: s.Model, + Predict: s.Predict, + Settings: s.Settings, + Window: s.Window, + } + + tmp.Model = "ewma" + + return json.Marshal(tmp) +} + +// NewEwmaMovingAverageAggregation returns a EwmaMovingAverageAggregation. +func NewEwmaMovingAverageAggregation() *EwmaMovingAverageAggregation { + r := &EwmaMovingAverageAggregation{} + + return r +} + +type EwmaMovingAverageAggregationVariant interface { + EwmaMovingAverageAggregationCaster() *EwmaMovingAverageAggregation +} + +func (s *EwmaMovingAverageAggregation) EwmaMovingAverageAggregationCaster() *EwmaMovingAverageAggregation { + return s +} + +func (s *EwmaMovingAverageAggregation) MovingAverageAggregationCaster() *MovingAverageAggregation { + o := MovingAverageAggregation(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executeenrichpolicystatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executeenrichpolicystatus.go new file mode 100644 index 000000000..d29331ac9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executeenrichpolicystatus.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/enrichpolicyphase" +) + +// ExecuteEnrichPolicyStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/enrich/execute_policy/types.ts#L20-L23 +type ExecuteEnrichPolicyStatus struct { + Phase enrichpolicyphase.EnrichPolicyPhase `json:"phase"` + Step *string `json:"step,omitempty"` +} + +func (s *ExecuteEnrichPolicyStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "phase": + if err := dec.Decode(&s.Phase); err != nil { + return fmt.Errorf("%s | %w", "Phase", err) + } + + case "step": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Step", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Step = &o + + } + } + return nil +} + +// NewExecuteEnrichPolicyStatus returns a ExecuteEnrichPolicyStatus. +func NewExecuteEnrichPolicyStatus() *ExecuteEnrichPolicyStatus { + r := &ExecuteEnrichPolicyStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executingpolicy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executingpolicy.go new file mode 100644 index 000000000..11062fa98 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executingpolicy.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ExecutingPolicy type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/enrich/stats/types.ts#L25-L28 +type ExecutingPolicy struct { + Name string `json:"name"` + Task TaskInfo `json:"task"` +} + +func (s *ExecutingPolicy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "task": + if err := dec.Decode(&s.Task); err != nil { + return fmt.Errorf("%s | %w", "Task", err) + } + + } + } + return nil +} + +// NewExecutingPolicy returns a ExecutingPolicy. +func NewExecutingPolicy() *ExecutingPolicy { + r := &ExecutingPolicy{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionresult.go new file mode 100644 index 000000000..e40b8c881 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionresult.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ExecutionResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Execution.ts#L60-L66 +type ExecutionResult struct { + Actions []ExecutionResultAction `json:"actions"` + Condition ExecutionResultCondition `json:"condition"` + ExecutionDuration int64 `json:"execution_duration"` + ExecutionTime DateTime `json:"execution_time"` + Input ExecutionResultInput `json:"input"` +} + +func (s *ExecutionResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return fmt.Errorf("%s | %w", "Actions", err) + } + + case "condition": + if err := dec.Decode(&s.Condition); err != nil { + return fmt.Errorf("%s | %w", "Condition", err) + } + + case "execution_duration": + if err := dec.Decode(&s.ExecutionDuration); err != nil { + return fmt.Errorf("%s | %w", "ExecutionDuration", err) + } + + case "execution_time": + if err := dec.Decode(&s.ExecutionTime); err != nil { + return fmt.Errorf("%s | %w", "ExecutionTime", err) + } + + case "input": + if err := dec.Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + } + } + return nil +} + +// NewExecutionResult returns a ExecutionResult. +func NewExecutionResult() *ExecutionResult { + r := &ExecutionResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionresultaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionresultaction.go new file mode 100644 index 000000000..3c6a3df19 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionresultaction.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actionstatusoptions" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actiontype" +) + +// ExecutionResultAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Execution.ts#L74-L86 +type ExecutionResultAction struct { + Email *EmailResult `json:"email,omitempty"` + Error *ErrorCause `json:"error,omitempty"` + Id string `json:"id"` + Index *IndexResult `json:"index,omitempty"` + Logging *LoggingResult `json:"logging,omitempty"` + Pagerduty *PagerDutyResult `json:"pagerduty,omitempty"` + Reason *string `json:"reason,omitempty"` + Slack *SlackResult `json:"slack,omitempty"` + Status actionstatusoptions.ActionStatusOptions `json:"status"` + Type actiontype.ActionType `json:"type"` + Webhook *WebhookResult `json:"webhook,omitempty"` +} + +func (s *ExecutionResultAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "email": + if err := dec.Decode(&s.Email); err != nil { + return fmt.Errorf("%s | %w", "Email", err) + } + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "logging": + if err := dec.Decode(&s.Logging); err != nil { + return fmt.Errorf("%s | %w", "Logging", err) + } + + case "pagerduty": + if err := dec.Decode(&s.Pagerduty); err != nil { + return fmt.Errorf("%s | %w", "Pagerduty", err) + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "slack": + if err := dec.Decode(&s.Slack); err != nil { + return fmt.Errorf("%s | %w", "Slack", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "webhook": + if err := dec.Decode(&s.Webhook); err != nil { + return fmt.Errorf("%s | %w", "Webhook", err) + } + + } + } + return nil +} + +// NewExecutionResultAction returns a ExecutionResultAction. +func NewExecutionResultAction() *ExecutionResultAction { + r := &ExecutionResultAction{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionresultcondition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionresultcondition.go new file mode 100644 index 000000000..9dd570df3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionresultcondition.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actionstatusoptions" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditiontype" +) + +// ExecutionResultCondition type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Execution.ts#L68-L72 +type ExecutionResultCondition struct { + Met bool `json:"met"` + Status actionstatusoptions.ActionStatusOptions `json:"status"` + Type conditiontype.ConditionType `json:"type"` +} + +func (s *ExecutionResultCondition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "met": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Met", err) + } + s.Met = value + case bool: + s.Met = v + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewExecutionResultCondition returns a ExecutionResultCondition. +func NewExecutionResultCondition() *ExecutionResultCondition { + r := &ExecutionResultCondition{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionresultinput.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionresultinput.go new file mode 100644 index 000000000..d240c059f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionresultinput.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actionstatusoptions" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/inputtype" +) + +// ExecutionResultInput type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Execution.ts#L88-L92 +type ExecutionResultInput struct { + Payload map[string]json.RawMessage `json:"payload"` + Status actionstatusoptions.ActionStatusOptions `json:"status"` + Type inputtype.InputType `json:"type"` +} + +// NewExecutionResultInput returns a ExecutionResultInput. +func NewExecutionResultInput() *ExecutionResultInput { + r := &ExecutionResultInput{ + Payload: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionstate.go new file mode 100644 index 000000000..ae4f862d8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionstate.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ExecutionState type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Action.ts#L114-L118 +type ExecutionState struct { + Reason *string `json:"reason,omitempty"` + Successful bool `json:"successful"` + Timestamp DateTime `json:"timestamp"` +} + +func (s *ExecutionState) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "successful": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Successful", err) + } + s.Successful = value + case bool: + s.Successful = v + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + } + } + return nil +} + +// NewExecutionState returns a ExecutionState. +func NewExecutionState() *ExecutionState { + r := &ExecutionState{} + + return r +} + +type ExecutionStateVariant interface { + ExecutionStateCaster() *ExecutionState +} + +func (s *ExecutionState) ExecutionStateCaster() *ExecutionState { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionthreadpool.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionthreadpool.go new file mode 100644 index 000000000..e0ad400a7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/executionthreadpool.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ExecutionThreadPool type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Execution.ts#L94-L103 +type ExecutionThreadPool struct { + // MaxSize The largest size of the execution thread pool, which indicates the largest + // number of concurrent running watches. + MaxSize int64 `json:"max_size"` + // QueueSize The number of watches that were triggered and are currently queued. + QueueSize int64 `json:"queue_size"` +} + +func (s *ExecutionThreadPool) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_size": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSize", err) + } + s.MaxSize = value + case float64: + f := int64(v) + s.MaxSize = f + } + + case "queue_size": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "QueueSize", err) + } + s.QueueSize = value + case float64: + f := int64(v) + s.QueueSize = f + } + + } + } + return nil +} + +// NewExecutionThreadPool returns a ExecutionThreadPool. +func NewExecutionThreadPool() *ExecutionThreadPool { + r := &ExecutionThreadPool{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/existsquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/existsquery.go new file mode 100644 index 000000000..b3b53baa4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/existsquery.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ExistsQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L38-L46 +type ExistsQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Field Name of the field you wish to search. + Field string `json:"field"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *ExistsQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewExistsQuery returns a ExistsQuery. +func NewExistsQuery() *ExistsQuery { + r := &ExistsQuery{} + + return r +} + +type ExistsQueryVariant interface { + ExistsQueryCaster() *ExistsQuery +} + +func (s *ExistsQuery) ExistsQueryCaster() *ExistsQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/expandwildcards.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/expandwildcards.go new file mode 100644 index 000000000..15031e26f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/expandwildcards.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +// ExpandWildcards type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L214-L214 +type ExpandWildcards []expandwildcard.ExpandWildcard + +type ExpandWildcardsVariant interface { + ExpandWildcardsCaster() *ExpandWildcards +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/explainanalyzetoken.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/explainanalyzetoken.go new file mode 100644 index 000000000..3d66ed544 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/explainanalyzetoken.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ExplainAnalyzeToken type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/analyze/types.ts#L52-L67 +type ExplainAnalyzeToken struct { + Bytes string `json:"bytes"` + EndOffset int64 `json:"end_offset"` + ExplainAnalyzeToken map[string]json.RawMessage `json:"-"` + Keyword *bool `json:"keyword,omitempty"` + Position int64 `json:"position"` + PositionLength int64 `json:"positionLength"` + StartOffset int64 `json:"start_offset"` + TermFrequency int64 `json:"termFrequency"` + Token string `json:"token"` + Type string `json:"type"` +} + +func (s *ExplainAnalyzeToken) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Bytes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Bytes = o + + case "end_offset": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "EndOffset", err) + } + s.EndOffset = value + case float64: + f := int64(v) + s.EndOffset = f + } + + case "keyword": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Keyword", err) + } + s.Keyword = &value + case bool: + s.Keyword = &v + } + + case "position": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Position", err) + } + s.Position = value + case float64: + f := int64(v) + s.Position = f + } + + case "positionLength": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PositionLength", err) + } + s.PositionLength = value + case float64: + f := int64(v) + s.PositionLength = f + } + + case "start_offset": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "StartOffset", err) + } + s.StartOffset = value + case float64: + f := int64(v) + s.StartOffset = f + } + + case "termFrequency": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TermFrequency", err) + } + s.TermFrequency = value + case float64: + f := int64(v) + s.TermFrequency = f + } + + case "token": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Token", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Token = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + default: + + if key, ok := t.(string); ok { + if s.ExplainAnalyzeToken == nil { + s.ExplainAnalyzeToken = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "ExplainAnalyzeToken", err) + } + s.ExplainAnalyzeToken[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s ExplainAnalyzeToken) MarshalJSON() ([]byte, error) { + type opt ExplainAnalyzeToken + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.ExplainAnalyzeToken { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "ExplainAnalyzeToken") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewExplainAnalyzeToken returns a ExplainAnalyzeToken. +func NewExplainAnalyzeToken() *ExplainAnalyzeToken { + r := &ExplainAnalyzeToken{ + ExplainAnalyzeToken: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/explanation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/explanation.go new file mode 100644 index 000000000..b855b0be8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/explanation.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Explanation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/explain/types.ts#L22-L26 +type Explanation struct { + Description string `json:"description"` + Details []ExplanationDetail `json:"details"` + Value float32 `json:"value"` +} + +func (s *Explanation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + case "value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + f := float32(value) + s.Value = f + case float64: + f := float32(v) + s.Value = f + } + + } + } + return nil +} + +// NewExplanation returns a Explanation. +func NewExplanation() *Explanation { + r := &Explanation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/explanationdetail.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/explanationdetail.go new file mode 100644 index 000000000..064bcbf56 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/explanationdetail.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ExplanationDetail type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/explain/types.ts#L28-L32 +type ExplanationDetail struct { + Description string `json:"description"` + Details []ExplanationDetail `json:"details,omitempty"` + Value float32 `json:"value"` +} + +func (s *ExplanationDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + case "value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + f := float32(value) + s.Value = f + case float64: + f := float32(v) + s.Value = f + } + + } + } + return nil +} + +// NewExplanationDetail returns a ExplanationDetail. +func NewExplanationDetail() *ExplanationDetail { + r := &ExplanationDetail{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/explorecontrols.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/explorecontrols.go new file mode 100644 index 000000000..6cde59baa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/explorecontrols.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ExploreControls type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/graph/_types/ExploreControls.ts#L24-L49 +type ExploreControls struct { + // SampleDiversity To avoid the top-matching documents sample being dominated by a single source + // of results, it is sometimes necessary to request diversity in the sample. + // You can do this by selecting a single-value field and setting a maximum + // number of documents per value for that field. + SampleDiversity *SampleDiversity `json:"sample_diversity,omitempty"` + // SampleSize Each hop considers a sample of the best-matching documents on each shard. + // Using samples improves the speed of execution and keeps exploration focused + // on meaningfully-connected terms. + // Very small values (less than 50) might not provide sufficient + // weight-of-evidence to identify significant connections between terms. + // Very large sample sizes can dilute the quality of the results and increase + // execution times. + SampleSize *int `json:"sample_size,omitempty"` + // Timeout The length of time in milliseconds after which exploration will be halted and + // the results gathered so far are returned. + // This timeout is honored on a best-effort basis. + // Execution might overrun this timeout if, for example, a long pause is + // encountered while FieldData is loaded for a field. + Timeout Duration `json:"timeout,omitempty"` + // UseSignificance Filters associated terms so only those that are significantly associated with + // your query are included. + UseSignificance bool `json:"use_significance"` +} + +func (s *ExploreControls) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "sample_diversity": + if err := dec.Decode(&s.SampleDiversity); err != nil { + return fmt.Errorf("%s | %w", "SampleDiversity", err) + } + + case "sample_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SampleSize", err) + } + s.SampleSize = &value + case float64: + f := int(v) + s.SampleSize = &f + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + + case "use_significance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "UseSignificance", err) + } + s.UseSignificance = value + case bool: + s.UseSignificance = v + } + + } + } + return nil +} + +// NewExploreControls returns a ExploreControls. +func NewExploreControls() *ExploreControls { + r := &ExploreControls{} + + return r +} + +type ExploreControlsVariant interface { + ExploreControlsCaster() *ExploreControls +} + +func (s *ExploreControls) ExploreControlsCaster() *ExploreControls { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/exponentialaveragecalculationcontext.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/exponentialaveragecalculationcontext.go new file mode 100644 index 000000000..d1317d39a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/exponentialaveragecalculationcontext.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ExponentialAverageCalculationContext type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Datafeed.ts#L204-L208 +type ExponentialAverageCalculationContext struct { + IncrementalMetricValueMs Float64 `json:"incremental_metric_value_ms"` + LatestTimestamp *int64 `json:"latest_timestamp,omitempty"` + PreviousExponentialAverageMs Float64 `json:"previous_exponential_average_ms,omitempty"` +} + +func (s *ExponentialAverageCalculationContext) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "incremental_metric_value_ms": + if err := dec.Decode(&s.IncrementalMetricValueMs); err != nil { + return fmt.Errorf("%s | %w", "IncrementalMetricValueMs", err) + } + + case "latest_timestamp": + if err := dec.Decode(&s.LatestTimestamp); err != nil { + return fmt.Errorf("%s | %w", "LatestTimestamp", err) + } + + case "previous_exponential_average_ms": + if err := dec.Decode(&s.PreviousExponentialAverageMs); err != nil { + return fmt.Errorf("%s | %w", "PreviousExponentialAverageMs", err) + } + + } + } + return nil +} + +// NewExponentialAverageCalculationContext returns a ExponentialAverageCalculationContext. +func NewExponentialAverageCalculationContext() *ExponentialAverageCalculationContext { + r := &ExponentialAverageCalculationContext{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedboundsdouble.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedboundsdouble.go new file mode 100644 index 000000000..fb3074928 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedboundsdouble.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ExtendedBoundsdouble type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L508-L517 +type ExtendedBoundsdouble struct { + // Max Maximum value for the bound. + Max *Float64 `json:"max,omitempty"` + // Min Minimum value for the bound. + Min *Float64 `json:"min,omitempty"` +} + +func (s *ExtendedBoundsdouble) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + f := Float64(value) + s.Max = &f + case float64: + f := Float64(v) + s.Max = &f + } + + case "min": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + f := Float64(value) + s.Min = &f + case float64: + f := Float64(v) + s.Min = &f + } + + } + } + return nil +} + +// NewExtendedBoundsdouble returns a ExtendedBoundsdouble. +func NewExtendedBoundsdouble() *ExtendedBoundsdouble { + r := &ExtendedBoundsdouble{} + + return r +} + +type ExtendedBoundsdoubleVariant interface { + ExtendedBoundsdoubleCaster() *ExtendedBoundsdouble +} + +func (s *ExtendedBoundsdouble) ExtendedBoundsdoubleCaster() *ExtendedBoundsdouble { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedboundsfielddatemath.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedboundsfielddatemath.go new file mode 100644 index 000000000..3cc189701 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedboundsfielddatemath.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ExtendedBoundsFieldDateMath type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L508-L517 +type ExtendedBoundsFieldDateMath struct { + // Max Maximum value for the bound. + Max FieldDateMath `json:"max,omitempty"` + // Min Minimum value for the bound. + Min FieldDateMath `json:"min,omitempty"` +} + +func (s *ExtendedBoundsFieldDateMath) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max": + if err := dec.Decode(&s.Max); err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + + case "min": + if err := dec.Decode(&s.Min); err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + + } + } + return nil +} + +// NewExtendedBoundsFieldDateMath returns a ExtendedBoundsFieldDateMath. +func NewExtendedBoundsFieldDateMath() *ExtendedBoundsFieldDateMath { + r := &ExtendedBoundsFieldDateMath{} + + return r +} + +type ExtendedBoundsFieldDateMathVariant interface { + ExtendedBoundsFieldDateMathCaster() *ExtendedBoundsFieldDateMath +} + +func (s *ExtendedBoundsFieldDateMath) ExtendedBoundsFieldDateMathCaster() *ExtendedBoundsFieldDateMath { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedmemorystats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedmemorystats.go new file mode 100644 index 000000000..43a9ec6fb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedmemorystats.go @@ -0,0 +1,257 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ExtendedMemoryStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L660-L669 +type ExtendedMemoryStats struct { + // AdjustedTotalInBytes If the amount of physical memory has been overridden using the + // `es`.`total_memory_bytes` system property then this reports the overridden + // value in bytes. + // Otherwise it reports the same value as `total_in_bytes`. + AdjustedTotalInBytes *int64 `json:"adjusted_total_in_bytes,omitempty"` + // FreeInBytes Amount of free physical memory in bytes. + FreeInBytes *int64 `json:"free_in_bytes,omitempty"` + // FreePercent Percentage of free memory. + FreePercent *int `json:"free_percent,omitempty"` + Resident *string `json:"resident,omitempty"` + ResidentInBytes *int64 `json:"resident_in_bytes,omitempty"` + Share *string `json:"share,omitempty"` + ShareInBytes *int64 `json:"share_in_bytes,omitempty"` + // TotalInBytes Total amount of physical memory in bytes. + TotalInBytes *int64 `json:"total_in_bytes,omitempty"` + TotalVirtual *string `json:"total_virtual,omitempty"` + TotalVirtualInBytes *int64 `json:"total_virtual_in_bytes,omitempty"` + // UsedInBytes Amount of used physical memory in bytes. + UsedInBytes *int64 `json:"used_in_bytes,omitempty"` + // UsedPercent Percentage of used memory. + UsedPercent *int `json:"used_percent,omitempty"` +} + +func (s *ExtendedMemoryStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adjusted_total_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AdjustedTotalInBytes", err) + } + s.AdjustedTotalInBytes = &value + case float64: + f := int64(v) + s.AdjustedTotalInBytes = &f + } + + case "free_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FreeInBytes", err) + } + s.FreeInBytes = &value + case float64: + f := int64(v) + s.FreeInBytes = &f + } + + case "free_percent": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FreePercent", err) + } + s.FreePercent = &value + case float64: + f := int(v) + s.FreePercent = &f + } + + case "resident": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Resident", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Resident = &o + + case "resident_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ResidentInBytes", err) + } + s.ResidentInBytes = &value + case float64: + f := int64(v) + s.ResidentInBytes = &f + } + + case "share": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Share", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Share = &o + + case "share_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShareInBytes", err) + } + s.ShareInBytes = &value + case float64: + f := int64(v) + s.ShareInBytes = &f + } + + case "total_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalInBytes", err) + } + s.TotalInBytes = &value + case float64: + f := int64(v) + s.TotalInBytes = &f + } + + case "total_virtual": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TotalVirtual", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TotalVirtual = &o + + case "total_virtual_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalVirtualInBytes", err) + } + s.TotalVirtualInBytes = &value + case float64: + f := int64(v) + s.TotalVirtualInBytes = &f + } + + case "used_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "UsedInBytes", err) + } + s.UsedInBytes = &value + case float64: + f := int64(v) + s.UsedInBytes = &f + } + + case "used_percent": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "UsedPercent", err) + } + s.UsedPercent = &value + case float64: + f := int(v) + s.UsedPercent = &f + } + + } + } + return nil +} + +// NewExtendedMemoryStats returns a ExtendedMemoryStats. +func NewExtendedMemoryStats() *ExtendedMemoryStats { + r := &ExtendedMemoryStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedstatsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedstatsaggregate.go new file mode 100644 index 000000000..e8a52ef34 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedstatsaggregate.go @@ -0,0 +1,291 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ExtendedStatsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L299-L320 +type ExtendedStatsAggregate struct { + Avg *Float64 `json:"avg,omitempty"` + AvgAsString *string `json:"avg_as_string,omitempty"` + Count int64 `json:"count"` + Max *Float64 `json:"max,omitempty"` + MaxAsString *string `json:"max_as_string,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Min *Float64 `json:"min,omitempty"` + MinAsString *string `json:"min_as_string,omitempty"` + StdDeviation *Float64 `json:"std_deviation,omitempty"` + StdDeviationAsString *string `json:"std_deviation_as_string,omitempty"` + StdDeviationBounds *StandardDeviationBounds `json:"std_deviation_bounds,omitempty"` + StdDeviationBoundsAsString *StandardDeviationBoundsAsString `json:"std_deviation_bounds_as_string,omitempty"` + StdDeviationPopulation *Float64 `json:"std_deviation_population,omitempty"` + StdDeviationSampling *Float64 `json:"std_deviation_sampling,omitempty"` + Sum Float64 `json:"sum"` + SumAsString *string `json:"sum_as_string,omitempty"` + SumOfSquares *Float64 `json:"sum_of_squares,omitempty"` + SumOfSquaresAsString *string `json:"sum_of_squares_as_string,omitempty"` + Variance *Float64 `json:"variance,omitempty"` + VarianceAsString *string `json:"variance_as_string,omitempty"` + VariancePopulation *Float64 `json:"variance_population,omitempty"` + VariancePopulationAsString *string `json:"variance_population_as_string,omitempty"` + VarianceSampling *Float64 `json:"variance_sampling,omitempty"` + VarianceSamplingAsString *string `json:"variance_sampling_as_string,omitempty"` +} + +func (s *ExtendedStatsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + if err := dec.Decode(&s.Avg); err != nil { + return fmt.Errorf("%s | %w", "Avg", err) + } + + case "avg_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AvgAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AvgAsString = &o + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "max": + if err := dec.Decode(&s.Max); err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + + case "max_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxAsString = &o + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "min": + if err := dec.Decode(&s.Min); err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + + case "min_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MinAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinAsString = &o + + case "std_deviation": + if err := dec.Decode(&s.StdDeviation); err != nil { + return fmt.Errorf("%s | %w", "StdDeviation", err) + } + + case "std_deviation_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StdDeviationAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StdDeviationAsString = &o + + case "std_deviation_bounds": + if err := dec.Decode(&s.StdDeviationBounds); err != nil { + return fmt.Errorf("%s | %w", "StdDeviationBounds", err) + } + + case "std_deviation_bounds_as_string": + if err := dec.Decode(&s.StdDeviationBoundsAsString); err != nil { + return fmt.Errorf("%s | %w", "StdDeviationBoundsAsString", err) + } + + case "std_deviation_population": + if err := dec.Decode(&s.StdDeviationPopulation); err != nil { + return fmt.Errorf("%s | %w", "StdDeviationPopulation", err) + } + + case "std_deviation_sampling": + if err := dec.Decode(&s.StdDeviationSampling); err != nil { + return fmt.Errorf("%s | %w", "StdDeviationSampling", err) + } + + case "sum": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Sum", err) + } + f := Float64(value) + s.Sum = f + case float64: + f := Float64(v) + s.Sum = f + } + + case "sum_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SumAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SumAsString = &o + + case "sum_of_squares": + if err := dec.Decode(&s.SumOfSquares); err != nil { + return fmt.Errorf("%s | %w", "SumOfSquares", err) + } + + case "sum_of_squares_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SumOfSquaresAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SumOfSquaresAsString = &o + + case "variance": + if err := dec.Decode(&s.Variance); err != nil { + return fmt.Errorf("%s | %w", "Variance", err) + } + + case "variance_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VarianceAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VarianceAsString = &o + + case "variance_population": + if err := dec.Decode(&s.VariancePopulation); err != nil { + return fmt.Errorf("%s | %w", "VariancePopulation", err) + } + + case "variance_population_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VariancePopulationAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VariancePopulationAsString = &o + + case "variance_sampling": + if err := dec.Decode(&s.VarianceSampling); err != nil { + return fmt.Errorf("%s | %w", "VarianceSampling", err) + } + + case "variance_sampling_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VarianceSamplingAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VarianceSamplingAsString = &o + + } + } + return nil +} + +// NewExtendedStatsAggregate returns a ExtendedStatsAggregate. +func NewExtendedStatsAggregate() *ExtendedStatsAggregate { + r := &ExtendedStatsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedstatsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedstatsaggregation.go new file mode 100644 index 000000000..93b4d254f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedstatsaggregation.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ExtendedStatsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L107-L112 +type ExtendedStatsAggregation struct { + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` + // Sigma The number of standard deviations above/below the mean to display. + Sigma *Float64 `json:"sigma,omitempty"` +} + +func (s *ExtendedStatsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "sigma": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Sigma", err) + } + f := Float64(value) + s.Sigma = &f + case float64: + f := Float64(v) + s.Sigma = &f + } + + } + } + return nil +} + +// NewExtendedStatsAggregation returns a ExtendedStatsAggregation. +func NewExtendedStatsAggregation() *ExtendedStatsAggregation { + r := &ExtendedStatsAggregation{} + + return r +} + +type ExtendedStatsAggregationVariant interface { + ExtendedStatsAggregationCaster() *ExtendedStatsAggregation +} + +func (s *ExtendedStatsAggregation) ExtendedStatsAggregationCaster() *ExtendedStatsAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedstatsbucketaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedstatsbucketaggregate.go new file mode 100644 index 000000000..29a0d581b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedstatsbucketaggregate.go @@ -0,0 +1,291 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ExtendedStatsBucketAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L322-L323 +type ExtendedStatsBucketAggregate struct { + Avg *Float64 `json:"avg,omitempty"` + AvgAsString *string `json:"avg_as_string,omitempty"` + Count int64 `json:"count"` + Max *Float64 `json:"max,omitempty"` + MaxAsString *string `json:"max_as_string,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Min *Float64 `json:"min,omitempty"` + MinAsString *string `json:"min_as_string,omitempty"` + StdDeviation *Float64 `json:"std_deviation,omitempty"` + StdDeviationAsString *string `json:"std_deviation_as_string,omitempty"` + StdDeviationBounds *StandardDeviationBounds `json:"std_deviation_bounds,omitempty"` + StdDeviationBoundsAsString *StandardDeviationBoundsAsString `json:"std_deviation_bounds_as_string,omitempty"` + StdDeviationPopulation *Float64 `json:"std_deviation_population,omitempty"` + StdDeviationSampling *Float64 `json:"std_deviation_sampling,omitempty"` + Sum Float64 `json:"sum"` + SumAsString *string `json:"sum_as_string,omitempty"` + SumOfSquares *Float64 `json:"sum_of_squares,omitempty"` + SumOfSquaresAsString *string `json:"sum_of_squares_as_string,omitempty"` + Variance *Float64 `json:"variance,omitempty"` + VarianceAsString *string `json:"variance_as_string,omitempty"` + VariancePopulation *Float64 `json:"variance_population,omitempty"` + VariancePopulationAsString *string `json:"variance_population_as_string,omitempty"` + VarianceSampling *Float64 `json:"variance_sampling,omitempty"` + VarianceSamplingAsString *string `json:"variance_sampling_as_string,omitempty"` +} + +func (s *ExtendedStatsBucketAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + if err := dec.Decode(&s.Avg); err != nil { + return fmt.Errorf("%s | %w", "Avg", err) + } + + case "avg_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AvgAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AvgAsString = &o + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "max": + if err := dec.Decode(&s.Max); err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + + case "max_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxAsString = &o + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "min": + if err := dec.Decode(&s.Min); err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + + case "min_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MinAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinAsString = &o + + case "std_deviation": + if err := dec.Decode(&s.StdDeviation); err != nil { + return fmt.Errorf("%s | %w", "StdDeviation", err) + } + + case "std_deviation_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StdDeviationAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StdDeviationAsString = &o + + case "std_deviation_bounds": + if err := dec.Decode(&s.StdDeviationBounds); err != nil { + return fmt.Errorf("%s | %w", "StdDeviationBounds", err) + } + + case "std_deviation_bounds_as_string": + if err := dec.Decode(&s.StdDeviationBoundsAsString); err != nil { + return fmt.Errorf("%s | %w", "StdDeviationBoundsAsString", err) + } + + case "std_deviation_population": + if err := dec.Decode(&s.StdDeviationPopulation); err != nil { + return fmt.Errorf("%s | %w", "StdDeviationPopulation", err) + } + + case "std_deviation_sampling": + if err := dec.Decode(&s.StdDeviationSampling); err != nil { + return fmt.Errorf("%s | %w", "StdDeviationSampling", err) + } + + case "sum": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Sum", err) + } + f := Float64(value) + s.Sum = f + case float64: + f := Float64(v) + s.Sum = f + } + + case "sum_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SumAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SumAsString = &o + + case "sum_of_squares": + if err := dec.Decode(&s.SumOfSquares); err != nil { + return fmt.Errorf("%s | %w", "SumOfSquares", err) + } + + case "sum_of_squares_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SumOfSquaresAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SumOfSquaresAsString = &o + + case "variance": + if err := dec.Decode(&s.Variance); err != nil { + return fmt.Errorf("%s | %w", "Variance", err) + } + + case "variance_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VarianceAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VarianceAsString = &o + + case "variance_population": + if err := dec.Decode(&s.VariancePopulation); err != nil { + return fmt.Errorf("%s | %w", "VariancePopulation", err) + } + + case "variance_population_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VariancePopulationAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VariancePopulationAsString = &o + + case "variance_sampling": + if err := dec.Decode(&s.VarianceSampling); err != nil { + return fmt.Errorf("%s | %w", "VarianceSampling", err) + } + + case "variance_sampling_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VarianceSamplingAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VarianceSamplingAsString = &o + + } + } + return nil +} + +// NewExtendedStatsBucketAggregate returns a ExtendedStatsBucketAggregate. +func NewExtendedStatsBucketAggregate() *ExtendedStatsBucketAggregate { + r := &ExtendedStatsBucketAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedstatsbucketaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedstatsbucketaggregation.go new file mode 100644 index 000000000..8a9eb7be2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/extendedstatsbucketaggregation.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// ExtendedStatsBucketAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L218-L223 +type ExtendedStatsBucketAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + // Sigma The number of standard deviations above/below the mean to display. + Sigma *Float64 `json:"sigma,omitempty"` +} + +func (s *ExtendedStatsBucketAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "sigma": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Sigma", err) + } + f := Float64(value) + s.Sigma = &f + case float64: + f := Float64(v) + s.Sigma = &f + } + + } + } + return nil +} + +// NewExtendedStatsBucketAggregation returns a ExtendedStatsBucketAggregation. +func NewExtendedStatsBucketAggregation() *ExtendedStatsBucketAggregation { + r := &ExtendedStatsBucketAggregation{} + + return r +} + +type ExtendedStatsBucketAggregationVariant interface { + ExtendedStatsBucketAggregationCaster() *ExtendedStatsBucketAggregation +} + +func (s *ExtendedStatsBucketAggregation) ExtendedStatsBucketAggregationCaster() *ExtendedStatsBucketAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/failprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/failprocessor.go new file mode 100644 index 000000000..94b93c65b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/failprocessor.go @@ -0,0 +1,146 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FailProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L889-L895 +type FailProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // Message The error message thrown by the processor. + // Supports template snippets. + Message string `json:"message"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *FailProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "message": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Message", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Message = o + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil +} + +// NewFailProcessor returns a FailProcessor. +func NewFailProcessor() *FailProcessor { + r := &FailProcessor{} + + return r +} + +type FailProcessorVariant interface { + FailProcessorCaster() *FailProcessor +} + +func (s *FailProcessor) FailProcessorCaster() *FailProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/failurestore.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/failurestore.go new file mode 100644 index 000000000..c876a904e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/failurestore.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FailureStore type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStream.ts#L48-L52 +type FailureStore struct { + Enabled bool `json:"enabled"` + Indices []DataStreamIndex `json:"indices"` + RolloverOnWrite bool `json:"rollover_on_write"` +} + +func (s *FailureStore) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "rollover_on_write": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RolloverOnWrite", err) + } + s.RolloverOnWrite = value + case bool: + s.RolloverOnWrite = v + } + + } + } + return nil +} + +// NewFailureStore returns a FailureStore. +func NewFailureStore() *FailureStore { + r := &FailureStore{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/failurestorelifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/failurestorelifecycle.go new file mode 100644 index 000000000..db662df4e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/failurestorelifecycle.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FailureStoreLifecycle type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStreamFailureStore.ts#L56-L72 +type FailureStoreLifecycle struct { + // DataRetention If defined, every document added to this data stream will be stored at least + // for this time frame. + // Any time after this duration the document could be deleted. + // When empty, every document in this data stream will be stored indefinitely. + DataRetention Duration `json:"data_retention,omitempty"` + // Enabled If defined, it turns data stream lifecycle on/off (`true`/`false`) for this + // data stream. A data stream lifecycle + // that's disabled (enabled: `false`) will have no effect on the data stream. + Enabled *bool `json:"enabled,omitempty"` +} + +func (s *FailureStoreLifecycle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_retention": + if err := dec.Decode(&s.DataRetention); err != nil { + return fmt.Errorf("%s | %w", "DataRetention", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + } + } + return nil +} + +// NewFailureStoreLifecycle returns a FailureStoreLifecycle. +func NewFailureStoreLifecycle() *FailureStoreLifecycle { + r := &FailureStoreLifecycle{} + + return r +} + +type FailureStoreLifecycleVariant interface { + FailureStoreLifecycleCaster() *FailureStoreLifecycle +} + +func (s *FailureStoreLifecycle) FailureStoreLifecycleCaster() *FailureStoreLifecycle { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/failurestorelifecycletemplate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/failurestorelifecycletemplate.go new file mode 100644 index 000000000..6870d9ea3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/failurestorelifecycletemplate.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FailureStoreLifecycleTemplate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/DataStreamFailureStore.ts#L74-L90 +type FailureStoreLifecycleTemplate struct { + // DataRetention If defined, every document added to this data stream will be stored at least + // for this time frame. + // Any time after this duration the document could be deleted. + // When empty, every document in this data stream will be stored indefinitely. + DataRetention Duration `json:"data_retention,omitempty"` + // Enabled If defined, it turns data stream lifecycle on/off (`true`/`false`) for this + // data stream. A data stream lifecycle + // that's disabled (enabled: `false`) will have no effect on the data stream. + Enabled *bool `json:"enabled,omitempty"` +} + +func (s *FailureStoreLifecycleTemplate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_retention": + if err := dec.Decode(&s.DataRetention); err != nil { + return fmt.Errorf("%s | %w", "DataRetention", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + } + } + return nil +} + +// NewFailureStoreLifecycleTemplate returns a FailureStoreLifecycleTemplate. +func NewFailureStoreLifecycleTemplate() *FailureStoreLifecycleTemplate { + r := &FailureStoreLifecycleTemplate{} + + return r +} + +type FailureStoreLifecycleTemplateVariant interface { + FailureStoreLifecycleTemplateCaster() *FailureStoreLifecycleTemplate +} + +func (s *FailureStoreLifecycleTemplate) FailureStoreLifecycleTemplateCaster() *FailureStoreLifecycleTemplate { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/feature.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/feature.go new file mode 100644 index 000000000..b66548d3e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/feature.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Feature type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/features/_types/Feature.ts#L20-L23 +type Feature struct { + Description string `json:"description"` + Name string `json:"name"` +} + +func (s *Feature) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + } + } + return nil +} + +// NewFeature returns a Feature. +func NewFeature() *Feature { + r := &Feature{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/featureenabled.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/featureenabled.go new file mode 100644 index 000000000..09eda510a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/featureenabled.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FeatureEnabled type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L215-L217 +type FeatureEnabled struct { + Enabled bool `json:"enabled"` +} + +func (s *FeatureEnabled) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + +// NewFeatureEnabled returns a FeatureEnabled. +func NewFeatureEnabled() *FeatureEnabled { + r := &FeatureEnabled{} + + return r +} + +type FeatureEnabledVariant interface { + FeatureEnabledCaster() *FeatureEnabled +} + +func (s *FeatureEnabled) FeatureEnabledCaster() *FeatureEnabled { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/featureextractor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/featureextractor.go new file mode 100644 index 000000000..5b9e66ee2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/featureextractor.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// FeatureExtractor type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L93-L96 +type FeatureExtractor QueryFeatureExtractor + +type FeatureExtractorVariant interface { + FeatureExtractorCaster() *FeatureExtractor +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/features.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/features.go new file mode 100644 index 000000000..404d4fccc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/features.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/feature" +) + +// Features type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get/IndicesGetRequest.ts#L103-L103 +type Features []feature.Feature diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/featuretoggle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/featuretoggle.go new file mode 100644 index 000000000..0755b04f7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/featuretoggle.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FeatureToggle type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L37-L39 +type FeatureToggle struct { + Enabled bool `json:"enabled"` +} + +func (s *FeatureToggle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + +// NewFeatureToggle returns a FeatureToggle. +func NewFeatureToggle() *FeatureToggle { + r := &FeatureToggle{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fetchprofile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fetchprofile.go new file mode 100644 index 000000000..4936693b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fetchprofile.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FetchProfile type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L230-L237 +type FetchProfile struct { + Breakdown FetchProfileBreakdown `json:"breakdown"` + Children []FetchProfile `json:"children,omitempty"` + Debug *FetchProfileDebug `json:"debug,omitempty"` + Description string `json:"description"` + TimeInNanos int64 `json:"time_in_nanos"` + Type string `json:"type"` +} + +func (s *FetchProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "breakdown": + if err := dec.Decode(&s.Breakdown); err != nil { + return fmt.Errorf("%s | %w", "Breakdown", err) + } + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return fmt.Errorf("%s | %w", "Children", err) + } + + case "debug": + if err := dec.Decode(&s.Debug); err != nil { + return fmt.Errorf("%s | %w", "Debug", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "time_in_nanos": + if err := dec.Decode(&s.TimeInNanos); err != nil { + return fmt.Errorf("%s | %w", "TimeInNanos", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewFetchProfile returns a FetchProfile. +func NewFetchProfile() *FetchProfile { + r := &FetchProfile{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fetchprofilebreakdown.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fetchprofilebreakdown.go new file mode 100644 index 000000000..6fbc886ff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fetchprofilebreakdown.go @@ -0,0 +1,199 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FetchProfileBreakdown type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L239-L248 +type FetchProfileBreakdown struct { + LoadSource *int `json:"load_source,omitempty"` + LoadSourceCount *int `json:"load_source_count,omitempty"` + LoadStoredFields *int `json:"load_stored_fields,omitempty"` + LoadStoredFieldsCount *int `json:"load_stored_fields_count,omitempty"` + NextReader *int `json:"next_reader,omitempty"` + NextReaderCount *int `json:"next_reader_count,omitempty"` + Process *int `json:"process,omitempty"` + ProcessCount *int `json:"process_count,omitempty"` +} + +func (s *FetchProfileBreakdown) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "load_source": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "LoadSource", err) + } + s.LoadSource = &value + case float64: + f := int(v) + s.LoadSource = &f + } + + case "load_source_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "LoadSourceCount", err) + } + s.LoadSourceCount = &value + case float64: + f := int(v) + s.LoadSourceCount = &f + } + + case "load_stored_fields": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "LoadStoredFields", err) + } + s.LoadStoredFields = &value + case float64: + f := int(v) + s.LoadStoredFields = &f + } + + case "load_stored_fields_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "LoadStoredFieldsCount", err) + } + s.LoadStoredFieldsCount = &value + case float64: + f := int(v) + s.LoadStoredFieldsCount = &f + } + + case "next_reader": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NextReader", err) + } + s.NextReader = &value + case float64: + f := int(v) + s.NextReader = &f + } + + case "next_reader_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NextReaderCount", err) + } + s.NextReaderCount = &value + case float64: + f := int(v) + s.NextReaderCount = &f + } + + case "process": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Process", err) + } + s.Process = &value + case float64: + f := int(v) + s.Process = &f + } + + case "process_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ProcessCount", err) + } + s.ProcessCount = &value + case float64: + f := int(v) + s.ProcessCount = &f + } + + } + } + return nil +} + +// NewFetchProfileBreakdown returns a FetchProfileBreakdown. +func NewFetchProfileBreakdown() *FetchProfileBreakdown { + r := &FetchProfileBreakdown{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fetchprofiledebug.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fetchprofiledebug.go new file mode 100644 index 000000000..fdde6c0a2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fetchprofiledebug.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FetchProfileDebug type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L250-L253 +type FetchProfileDebug struct { + FastPath *int `json:"fast_path,omitempty"` + StoredFields []string `json:"stored_fields,omitempty"` +} + +func (s *FetchProfileDebug) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fast_path": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FastPath", err) + } + s.FastPath = &value + case float64: + f := int(v) + s.FastPath = &f + } + + case "stored_fields": + if err := dec.Decode(&s.StoredFields); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + + } + } + return nil +} + +// NewFetchProfileDebug returns a FetchProfileDebug. +func NewFetchProfileDebug() *FetchProfileDebug { + r := &FetchProfileDebug{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldaliasproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldaliasproperty.go new file mode 100644 index 000000000..2317ed5d2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldaliasproperty.go @@ -0,0 +1,829 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// FieldAliasProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/specialized.ts#L64-L67 +type FieldAliasProperty struct { + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Path *string `json:"path,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s FieldAliasProperty) MarshalJSON() ([]byte, error) { + type innerFieldAliasProperty FieldAliasProperty + tmp := innerFieldAliasProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Path: s.Path, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "alias" + + return json.Marshal(tmp) +} + +// NewFieldAliasProperty returns a FieldAliasProperty. +func NewFieldAliasProperty() *FieldAliasProperty { + r := &FieldAliasProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type FieldAliasPropertyVariant interface { + FieldAliasPropertyCaster() *FieldAliasProperty +} + +func (s *FieldAliasProperty) FieldAliasPropertyCaster() *FieldAliasProperty { + return s +} + +func (s *FieldAliasProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldandformat.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldandformat.go new file mode 100644 index 000000000..495f82658 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldandformat.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FieldAndFormat type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/abstractions.ts#L535-L549 +type FieldAndFormat struct { + // Field A wildcard pattern. The request returns values for field names matching this + // pattern. + Field string `json:"field"` + // Format The format in which the values are returned. + Format *string `json:"format,omitempty"` + IncludeUnmapped *bool `json:"include_unmapped,omitempty"` +} + +func (s *FieldAndFormat) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Field) + if err != nil { + return err + } + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "include_unmapped": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncludeUnmapped", err) + } + s.IncludeUnmapped = &value + case bool: + s.IncludeUnmapped = &v + } + + } + } + return nil +} + +// NewFieldAndFormat returns a FieldAndFormat. +func NewFieldAndFormat() *FieldAndFormat { + r := &FieldAndFormat{} + + return r +} + +type FieldAndFormatVariant interface { + FieldAndFormatCaster() *FieldAndFormat +} + +func (s *FieldAndFormat) FieldAndFormatCaster() *FieldAndFormat { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldcapability.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldcapability.go new file mode 100644 index 000000000..51de8b0df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldcapability.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype" +) + +// FieldCapability type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/field_caps/types.ts#L23-L81 +type FieldCapability struct { + // Aggregatable Whether this field can be aggregated on all indices. + Aggregatable bool `json:"aggregatable"` + // Indices The list of indices where this field has the same type family, or null if all + // indices have the same type family for the field. + Indices []string `json:"indices,omitempty"` + // Meta Merged metadata across all indices as a map of string keys to arrays of + // values. A value length of 1 indicates that all indices had the same value for + // this key, while a length of 2 or more indicates that not all indices had the + // same value for this key. + Meta Metadata `json:"meta,omitempty"` + // MetadataField Whether this field is registered as a metadata field. + MetadataField *bool `json:"metadata_field,omitempty"` + // MetricConflictsIndices The list of indices where this field is present if these indices + // don’t have the same `time_series_metric` value for this field. + MetricConflictsIndices []string `json:"metric_conflicts_indices,omitempty"` + // NonAggregatableIndices The list of indices where this field is not aggregatable, or null if all + // indices have the same definition for the field. + NonAggregatableIndices []string `json:"non_aggregatable_indices,omitempty"` + // NonDimensionIndices If this list is present in response then some indices have the + // field marked as a dimension and other indices, the ones in this list, do not. + NonDimensionIndices []string `json:"non_dimension_indices,omitempty"` + // NonSearchableIndices The list of indices where this field is not searchable, or null if all + // indices have the same definition for the field. + NonSearchableIndices []string `json:"non_searchable_indices,omitempty"` + // Searchable Whether this field is indexed for search on all indices. + Searchable bool `json:"searchable"` + // TimeSeriesDimension Whether this field is used as a time series dimension. + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + // TimeSeriesMetric Contains metric type if this fields is used as a time series + // metrics, absent if the field is not used as metric. + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type"` +} + +func (s *FieldCapability) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregatable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Aggregatable", err) + } + s.Aggregatable = value + case bool: + s.Aggregatable = v + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "metadata_field": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MetadataField", err) + } + s.MetadataField = &value + case bool: + s.MetadataField = &v + } + + case "metric_conflicts_indices": + if err := dec.Decode(&s.MetricConflictsIndices); err != nil { + return fmt.Errorf("%s | %w", "MetricConflictsIndices", err) + } + + case "non_aggregatable_indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "NonAggregatableIndices", err) + } + + s.NonAggregatableIndices = append(s.NonAggregatableIndices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.NonAggregatableIndices); err != nil { + return fmt.Errorf("%s | %w", "NonAggregatableIndices", err) + } + } + + case "non_dimension_indices": + if err := dec.Decode(&s.NonDimensionIndices); err != nil { + return fmt.Errorf("%s | %w", "NonDimensionIndices", err) + } + + case "non_searchable_indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "NonSearchableIndices", err) + } + + s.NonSearchableIndices = append(s.NonSearchableIndices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.NonSearchableIndices); err != nil { + return fmt.Errorf("%s | %w", "NonSearchableIndices", err) + } + } + + case "searchable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Searchable", err) + } + s.Searchable = value + case bool: + s.Searchable = v + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "time_series_metric": + if err := dec.Decode(&s.TimeSeriesMetric); err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewFieldCapability returns a FieldCapability. +func NewFieldCapability() *FieldCapability { + r := &FieldCapability{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldcollapse.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldcollapse.go new file mode 100644 index 000000000..2a79529cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldcollapse.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FieldCollapse type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/FieldCollapse.ts#L24-L41 +type FieldCollapse struct { + Collapse *FieldCollapse `json:"collapse,omitempty"` + // Field The field to collapse the result set on + Field string `json:"field"` + // InnerHits The number of inner hits and their sort order + InnerHits []InnerHits `json:"inner_hits,omitempty"` + // MaxConcurrentGroupSearches The number of concurrent requests allowed to retrieve the inner_hits per + // group + MaxConcurrentGroupSearches *int `json:"max_concurrent_group_searches,omitempty"` +} + +func (s *FieldCollapse) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collapse": + if err := dec.Decode(&s.Collapse); err != nil { + return fmt.Errorf("%s | %w", "Collapse", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "inner_hits": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewInnerHits() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "InnerHits", err) + } + + s.InnerHits = append(s.InnerHits, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.InnerHits); err != nil { + return fmt.Errorf("%s | %w", "InnerHits", err) + } + } + + case "max_concurrent_group_searches": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxConcurrentGroupSearches", err) + } + s.MaxConcurrentGroupSearches = &value + case float64: + f := int(v) + s.MaxConcurrentGroupSearches = &f + } + + } + } + return nil +} + +// NewFieldCollapse returns a FieldCollapse. +func NewFieldCollapse() *FieldCollapse { + r := &FieldCollapse{} + + return r +} + +type FieldCollapseVariant interface { + FieldCollapseCaster() *FieldCollapse +} + +func (s *FieldCollapse) FieldCollapseCaster() *FieldCollapse { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fielddatafrequencyfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fielddatafrequencyfilter.go new file mode 100644 index 000000000..215d8cb53 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fielddatafrequencyfilter.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FielddataFrequencyFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/FielddataFrequencyFilter.ts#L22-L26 +type FielddataFrequencyFilter struct { + Max Float64 `json:"max"` + Min Float64 `json:"min"` + MinSegmentSize int `json:"min_segment_size"` +} + +func (s *FielddataFrequencyFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + f := Float64(value) + s.Max = f + case float64: + f := Float64(v) + s.Max = f + } + + case "min": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + f := Float64(value) + s.Min = f + case float64: + f := Float64(v) + s.Min = f + } + + case "min_segment_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinSegmentSize", err) + } + s.MinSegmentSize = value + case float64: + f := int(v) + s.MinSegmentSize = f + } + + } + } + return nil +} + +// NewFielddataFrequencyFilter returns a FielddataFrequencyFilter. +func NewFielddataFrequencyFilter() *FielddataFrequencyFilter { + r := &FielddataFrequencyFilter{} + + return r +} + +type FielddataFrequencyFilterVariant interface { + FielddataFrequencyFilterCaster() *FielddataFrequencyFilter +} + +func (s *FielddataFrequencyFilter) FielddataFrequencyFilterCaster() *FielddataFrequencyFilter { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fielddatarecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fielddatarecord.go new file mode 100644 index 000000000..7eefd59e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fielddatarecord.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FielddataRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/fielddata/types.ts#L20-L48 +type FielddataRecord struct { + // Field field name + Field *string `json:"field,omitempty"` + // Host host name + Host *string `json:"host,omitempty"` + // Id node id + Id *string `json:"id,omitempty"` + // Ip ip address + Ip *string `json:"ip,omitempty"` + // Node node name + Node *string `json:"node,omitempty"` + // Size field data usage + Size *string `json:"size,omitempty"` +} + +func (s *FielddataRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field", "f": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = &o + + case "host", "h": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Host = &o + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "ip": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "node", "n": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = &o + + case "size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Size = &o + + } + } + return nil +} + +// NewFielddataRecord returns a FielddataRecord. +func NewFielddataRecord() *FielddataRecord { + r := &FielddataRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fielddatastats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fielddatastats.go new file mode 100644 index 000000000..c7cdc4cbf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fielddatastats.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FielddataStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L123-L129 +type FielddataStats struct { + Evictions *int64 `json:"evictions,omitempty"` + Fields map[string]FieldMemoryUsage `json:"fields,omitempty"` + GlobalOrdinals GlobalOrdinalsStats `json:"global_ordinals"` + MemorySize ByteSize `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` +} + +func (s *FielddataStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "evictions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Evictions", err) + } + s.Evictions = &value + case float64: + f := int64(v) + s.Evictions = &f + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]FieldMemoryUsage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "global_ordinals": + if err := dec.Decode(&s.GlobalOrdinals); err != nil { + return fmt.Errorf("%s | %w", "GlobalOrdinals", err) + } + + case "memory_size": + if err := dec.Decode(&s.MemorySize); err != nil { + return fmt.Errorf("%s | %w", "MemorySize", err) + } + + case "memory_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MemorySizeInBytes", err) + } + s.MemorySizeInBytes = value + case float64: + f := int64(v) + s.MemorySizeInBytes = f + } + + } + } + return nil +} + +// NewFielddataStats returns a FielddataStats. +func NewFielddataStats() *FielddataStats { + r := &FielddataStats{ + Fields: make(map[string]FieldMemoryUsage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fielddatemath.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fielddatemath.go new file mode 100644 index 000000000..e993d2c42 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fielddatemath.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// FieldDateMath holds the union for the following types: +// +// string +// Float64 +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L309-L316 +type FieldDateMath any + +type FieldDateMathVariant interface { + FieldDateMathCaster() *FieldDateMath +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldlookup.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldlookup.go new file mode 100644 index 000000000..0f0276638 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldlookup.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// FieldLookup type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/abstractions.ts#L436-L453 +type FieldLookup struct { + // Id `id` of the document. + Id string `json:"id"` + // Index Index from which to retrieve the document. + Index *string `json:"index,omitempty"` + // Path Name of the field. + Path *string `json:"path,omitempty"` + // Routing Custom routing value. + Routing *string `json:"routing,omitempty"` +} + +func (s *FieldLookup) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + } + } + return nil +} + +// NewFieldLookup returns a FieldLookup. +func NewFieldLookup() *FieldLookup { + r := &FieldLookup{} + + return r +} + +type FieldLookupVariant interface { + FieldLookupCaster() *FieldLookup +} + +func (s *FieldLookup) FieldLookupCaster() *FieldLookup { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldmapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldmapping.go new file mode 100644 index 000000000..718039ed9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldmapping.go @@ -0,0 +1,416 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FieldMapping type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/meta-fields.ts#L24-L27 +type FieldMapping struct { + FullName string `json:"full_name"` + Mapping map[string]Property `json:"mapping"` +} + +func (s *FieldMapping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "full_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FullName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FullName = o + + case "mapping": + if s.Mapping == nil { + s.Mapping = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Mapping | %w", err) + } + s.Mapping[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Mapping[key] = oo + } + } + + } + } + return nil +} + +// NewFieldMapping returns a FieldMapping. +func NewFieldMapping() *FieldMapping { + r := &FieldMapping{ + Mapping: make(map[string]Property), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldmemoryusage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldmemoryusage.go new file mode 100644 index 000000000..e2405e302 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldmemoryusage.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FieldMemoryUsage type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L143-L146 +type FieldMemoryUsage struct { + MemorySize ByteSize `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` +} + +func (s *FieldMemoryUsage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "memory_size": + if err := dec.Decode(&s.MemorySize); err != nil { + return fmt.Errorf("%s | %w", "MemorySize", err) + } + + case "memory_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MemorySizeInBytes", err) + } + s.MemorySizeInBytes = value + case float64: + f := int64(v) + s.MemorySizeInBytes = f + } + + } + } + return nil +} + +// NewFieldMemoryUsage returns a FieldMemoryUsage. +func NewFieldMemoryUsage() *FieldMemoryUsage { + r := &FieldMemoryUsage{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldmetric.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldmetric.go new file mode 100644 index 000000000..43aa2830a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldmetric.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/metric" +) + +// FieldMetric type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/_types/Metric.ts#L30-L35 +type FieldMetric struct { + // Field The field to collect metrics for. This must be a numeric of some kind. + Field string `json:"field"` + // Metrics An array of metrics to collect for the field. At least one metric must be + // configured. + Metrics []metric.Metric `json:"metrics"` +} + +func (s *FieldMetric) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "metrics": + if err := dec.Decode(&s.Metrics); err != nil { + return fmt.Errorf("%s | %w", "Metrics", err) + } + + } + } + return nil +} + +// NewFieldMetric returns a FieldMetric. +func NewFieldMetric() *FieldMetric { + r := &FieldMetric{} + + return r +} + +type FieldMetricVariant interface { + FieldMetricCaster() *FieldMetric +} + +func (s *FieldMetric) FieldMetricCaster() *FieldMetric { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldnamesfield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldnamesfield.go new file mode 100644 index 000000000..8ebe443ef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldnamesfield.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FieldNamesField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/meta-fields.ts#L42-L44 +type FieldNamesField struct { + Enabled bool `json:"enabled"` +} + +func (s *FieldNamesField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + +// NewFieldNamesField returns a FieldNamesField. +func NewFieldNamesField() *FieldNamesField { + r := &FieldNamesField{} + + return r +} + +type FieldNamesFieldVariant interface { + FieldNamesFieldCaster() *FieldNamesField +} + +func (s *FieldNamesField) FieldNamesFieldCaster() *FieldNamesField { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fields.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fields.go new file mode 100644 index 000000000..30d52b1cf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fields.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Fields type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L137-L137 +type Fields []string + +type FieldsVariant interface { + FieldsCaster() *Fields +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsecurity.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsecurity.go new file mode 100644 index 000000000..b4374efb8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsecurity.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// FieldSecurity type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/FieldSecurity.ts#L22-L25 +type FieldSecurity struct { + Except []string `json:"except,omitempty"` + Grant []string `json:"grant,omitempty"` +} + +func (s *FieldSecurity) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "except": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Except", err) + } + + s.Except = append(s.Except, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Except); err != nil { + return fmt.Errorf("%s | %w", "Except", err) + } + } + + case "grant": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Grant", err) + } + + s.Grant = append(s.Grant, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Grant); err != nil { + return fmt.Errorf("%s | %w", "Grant", err) + } + } + + } + } + return nil +} + +// NewFieldSecurity returns a FieldSecurity. +func NewFieldSecurity() *FieldSecurity { + r := &FieldSecurity{} + + return r +} + +type FieldSecurityVariant interface { + FieldSecurityCaster() *FieldSecurity +} + +func (s *FieldSecurity) FieldSecurityCaster() *FieldSecurity { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsizeusage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsizeusage.go new file mode 100644 index 000000000..133c82c5f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsizeusage.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FieldSizeUsage type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L95-L98 +type FieldSizeUsage struct { + Size ByteSize `json:"size,omitempty"` + SizeInBytes int64 `json:"size_in_bytes"` +} + +func (s *FieldSizeUsage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + case "size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SizeInBytes", err) + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + } + } + return nil +} + +// NewFieldSizeUsage returns a FieldSizeUsage. +func NewFieldSizeUsage() *FieldSizeUsage { + r := &FieldSizeUsage{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsort.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsort.go new file mode 100644 index 000000000..f526e6741 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsort.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldsortnumerictype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldtype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortmode" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" +) + +// FieldSort type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/sort.ts#L43-L52 +type FieldSort struct { + Format *string `json:"format,omitempty"` + Missing Missing `json:"missing,omitempty"` + Mode *sortmode.SortMode `json:"mode,omitempty"` + Nested *NestedSortValue `json:"nested,omitempty"` + NumericType *fieldsortnumerictype.FieldSortNumericType `json:"numeric_type,omitempty"` + Order *sortorder.SortOrder `json:"order,omitempty"` + UnmappedType *fieldtype.FieldType `json:"unmapped_type,omitempty"` +} + +func (s *FieldSort) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Order) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + + case "nested": + if err := dec.Decode(&s.Nested); err != nil { + return fmt.Errorf("%s | %w", "Nested", err) + } + + case "numeric_type": + if err := dec.Decode(&s.NumericType); err != nil { + return fmt.Errorf("%s | %w", "NumericType", err) + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + + case "unmapped_type": + if err := dec.Decode(&s.UnmappedType); err != nil { + return fmt.Errorf("%s | %w", "UnmappedType", err) + } + + } + } + return nil +} + +// NewFieldSort returns a FieldSort. +func NewFieldSort() *FieldSort { + r := &FieldSort{} + + return r +} + +type FieldSortVariant interface { + FieldSortCaster() *FieldSort +} + +func (s *FieldSort) FieldSortCaster() *FieldSort { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldstat.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldstat.go new file mode 100644 index 000000000..702ee494a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldstat.go @@ -0,0 +1,197 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FieldStat type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/text_structure/_types/Structure.ts#L23-L33 +type FieldStat struct { + Cardinality int `json:"cardinality"` + Count int `json:"count"` + Earliest *string `json:"earliest,omitempty"` + Latest *string `json:"latest,omitempty"` + MaxValue *int `json:"max_value,omitempty"` + MeanValue *int `json:"mean_value,omitempty"` + MedianValue *int `json:"median_value,omitempty"` + MinValue *int `json:"min_value,omitempty"` + TopHits []TopHit `json:"top_hits"` +} + +func (s *FieldStat) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cardinality": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Cardinality", err) + } + s.Cardinality = value + case float64: + f := int(v) + s.Cardinality = f + } + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "earliest": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Earliest", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Earliest = &o + + case "latest": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Latest", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Latest = &o + + case "max_value": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxValue", err) + } + s.MaxValue = &value + case float64: + f := int(v) + s.MaxValue = &f + } + + case "mean_value": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MeanValue", err) + } + s.MeanValue = &value + case float64: + f := int(v) + s.MeanValue = &f + } + + case "median_value": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MedianValue", err) + } + s.MedianValue = &value + case float64: + f := int(v) + s.MedianValue = &f + } + + case "min_value": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinValue", err) + } + s.MinValue = &value + case float64: + f := int(v) + s.MinValue = &f + } + + case "top_hits": + if err := dec.Decode(&s.TopHits); err != nil { + return fmt.Errorf("%s | %w", "TopHits", err) + } + + } + } + return nil +} + +// NewFieldStat returns a FieldStat. +func NewFieldStat() *FieldStat { + r := &FieldStat{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldstatistics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldstatistics.go new file mode 100644 index 000000000..a64100f03 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldstatistics.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FieldStatistics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/termvectors/types.ts#L28-L32 +type FieldStatistics struct { + DocCount int `json:"doc_count"` + SumDocFreq int64 `json:"sum_doc_freq"` + SumTtf int64 `json:"sum_ttf"` +} + +func (s *FieldStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int(v) + s.DocCount = f + } + + case "sum_doc_freq": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SumDocFreq", err) + } + s.SumDocFreq = value + case float64: + f := int64(v) + s.SumDocFreq = f + } + + case "sum_ttf": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SumTtf", err) + } + s.SumTtf = value + case float64: + f := int64(v) + s.SumTtf = f + } + + } + } + return nil +} + +// NewFieldStatistics returns a FieldStatistics. +func NewFieldStatistics() *FieldStatistics { + r := &FieldStatistics{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsuggester.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsuggester.go new file mode 100644 index 000000000..0eb497657 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsuggester.go @@ -0,0 +1,181 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FieldSuggester type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L110-L143 +type FieldSuggester struct { + AdditionalFieldSuggesterProperty map[string]json.RawMessage `json:"-"` + // Completion Provides auto-complete/search-as-you-type functionality. + Completion *CompletionSuggester `json:"completion,omitempty"` + // Phrase Provides access to word alternatives on a per token basis within a certain + // string distance. + Phrase *PhraseSuggester `json:"phrase,omitempty"` + // Prefix Prefix used to search for suggestions. + Prefix *string `json:"prefix,omitempty"` + // Regex A prefix expressed as a regular expression. + Regex *string `json:"regex,omitempty"` + // Term Suggests terms based on edit distance. + Term *TermSuggester `json:"term,omitempty"` + // Text The text to use as input for the suggester. + // Needs to be set globally or per suggestion. + Text *string `json:"text,omitempty"` +} + +func (s *FieldSuggester) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "completion": + if err := dec.Decode(&s.Completion); err != nil { + return fmt.Errorf("%s | %w", "Completion", err) + } + + case "phrase": + if err := dec.Decode(&s.Phrase); err != nil { + return fmt.Errorf("%s | %w", "Phrase", err) + } + + case "prefix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Prefix", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Prefix = &o + + case "regex": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Regex", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Regex = &o + + case "term": + if err := dec.Decode(&s.Term); err != nil { + return fmt.Errorf("%s | %w", "Term", err) + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = &o + + default: + + if key, ok := t.(string); ok { + if s.AdditionalFieldSuggesterProperty == nil { + s.AdditionalFieldSuggesterProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalFieldSuggesterProperty", err) + } + s.AdditionalFieldSuggesterProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s FieldSuggester) MarshalJSON() ([]byte, error) { + type opt FieldSuggester + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalFieldSuggesterProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalFieldSuggesterProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewFieldSuggester returns a FieldSuggester. +func NewFieldSuggester() *FieldSuggester { + r := &FieldSuggester{ + AdditionalFieldSuggesterProperty: make(map[string]json.RawMessage), + } + + return r +} + +type FieldSuggesterVariant interface { + FieldSuggesterCaster() *FieldSuggester +} + +func (s *FieldSuggester) FieldSuggesterCaster() *FieldSuggester { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsummary.go new file mode 100644 index 000000000..f5f90b30e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldsummary.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// FieldSummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L58-L67 +type FieldSummary struct { + Any uint `json:"any"` + DocValues uint `json:"doc_values"` + InvertedIndex InvertedIndex `json:"inverted_index"` + KnnVectors uint `json:"knn_vectors"` + Norms uint `json:"norms"` + Points uint `json:"points"` + StoredFields uint `json:"stored_fields"` + TermVectors uint `json:"term_vectors"` +} + +// NewFieldSummary returns a FieldSummary. +func NewFieldSummary() *FieldSummary { + r := &FieldSummary{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldtypes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldtypes.go new file mode 100644 index 000000000..c9a9aa928 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldtypes.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FieldTypes type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L211-L254 +type FieldTypes struct { + // Count The number of occurrences of the field type in selected nodes. + Count int `json:"count"` + // IndexCount The number of indices containing the field type in selected nodes. + IndexCount int `json:"index_count"` + // IndexedVectorCount For dense_vector field types, number of indexed vector types in selected + // nodes. + IndexedVectorCount *int `json:"indexed_vector_count,omitempty"` + // IndexedVectorDimMax For dense_vector field types, the maximum dimension of all indexed vector + // types in selected nodes. + IndexedVectorDimMax *int `json:"indexed_vector_dim_max,omitempty"` + // IndexedVectorDimMin For dense_vector field types, the minimum dimension of all indexed vector + // types in selected nodes. + IndexedVectorDimMin *int `json:"indexed_vector_dim_min,omitempty"` + // Name The name for the field type in selected nodes. + Name string `json:"name"` + // ScriptCount The number of fields that declare a script. + ScriptCount *int `json:"script_count,omitempty"` + // VectorElementTypeCount For dense_vector field types, count of mappings by element type + VectorElementTypeCount map[string]int `json:"vector_element_type_count,omitempty"` + // VectorIndexTypeCount For dense_vector field types, count of mappings by index type + VectorIndexTypeCount map[string]int `json:"vector_index_type_count,omitempty"` + // VectorSimilarityTypeCount For dense_vector field types, count of mappings by similarity + VectorSimilarityTypeCount map[string]int `json:"vector_similarity_type_count,omitempty"` +} + +func (s *FieldTypes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "index_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IndexCount", err) + } + s.IndexCount = value + case float64: + f := int(v) + s.IndexCount = f + } + + case "indexed_vector_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IndexedVectorCount", err) + } + s.IndexedVectorCount = &value + case float64: + f := int(v) + s.IndexedVectorCount = &f + } + + case "indexed_vector_dim_max": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IndexedVectorDimMax", err) + } + s.IndexedVectorDimMax = &value + case float64: + f := int(v) + s.IndexedVectorDimMax = &f + } + + case "indexed_vector_dim_min": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IndexedVectorDimMin", err) + } + s.IndexedVectorDimMin = &value + case float64: + f := int(v) + s.IndexedVectorDimMin = &f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "script_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ScriptCount", err) + } + s.ScriptCount = &value + case float64: + f := int(v) + s.ScriptCount = &f + } + + case "vector_element_type_count": + if s.VectorElementTypeCount == nil { + s.VectorElementTypeCount = make(map[string]int, 0) + } + if err := dec.Decode(&s.VectorElementTypeCount); err != nil { + return fmt.Errorf("%s | %w", "VectorElementTypeCount", err) + } + + case "vector_index_type_count": + if s.VectorIndexTypeCount == nil { + s.VectorIndexTypeCount = make(map[string]int, 0) + } + if err := dec.Decode(&s.VectorIndexTypeCount); err != nil { + return fmt.Errorf("%s | %w", "VectorIndexTypeCount", err) + } + + case "vector_similarity_type_count": + if s.VectorSimilarityTypeCount == nil { + s.VectorSimilarityTypeCount = make(map[string]int, 0) + } + if err := dec.Decode(&s.VectorSimilarityTypeCount); err != nil { + return fmt.Errorf("%s | %w", "VectorSimilarityTypeCount", err) + } + + } + } + return nil +} + +// NewFieldTypes returns a FieldTypes. +func NewFieldTypes() *FieldTypes { + r := &FieldTypes{ + VectorElementTypeCount: make(map[string]int), + VectorIndexTypeCount: make(map[string]int), + VectorSimilarityTypeCount: make(map[string]int), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldtypesmappings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldtypesmappings.go new file mode 100644 index 000000000..c12f0e61f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldtypesmappings.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FieldTypesMappings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L180-L209 +type FieldTypesMappings struct { + // FieldTypes Contains statistics about field data types used in selected nodes. + FieldTypes []FieldTypes `json:"field_types"` + // RuntimeFieldTypes Contains statistics about runtime field data types used in selected nodes. + RuntimeFieldTypes []ClusterRuntimeFieldTypes `json:"runtime_field_types"` + // SourceModes Source mode usage count. + SourceModes map[string]int `json:"source_modes"` + // TotalDeduplicatedFieldCount Total number of fields in all non-system indices, accounting for mapping + // deduplication. + TotalDeduplicatedFieldCount *int64 `json:"total_deduplicated_field_count,omitempty"` + // TotalDeduplicatedMappingSize Total size of all mappings after deduplication and compression. + TotalDeduplicatedMappingSize ByteSize `json:"total_deduplicated_mapping_size,omitempty"` + // TotalDeduplicatedMappingSizeInBytes Total size of all mappings, in bytes, after deduplication and compression. + TotalDeduplicatedMappingSizeInBytes *int64 `json:"total_deduplicated_mapping_size_in_bytes,omitempty"` + // TotalFieldCount Total number of fields in all non-system indices. + TotalFieldCount *int64 `json:"total_field_count,omitempty"` +} + +func (s *FieldTypesMappings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field_types": + if err := dec.Decode(&s.FieldTypes); err != nil { + return fmt.Errorf("%s | %w", "FieldTypes", err) + } + + case "runtime_field_types": + if err := dec.Decode(&s.RuntimeFieldTypes); err != nil { + return fmt.Errorf("%s | %w", "RuntimeFieldTypes", err) + } + + case "source_modes": + if s.SourceModes == nil { + s.SourceModes = make(map[string]int, 0) + } + if err := dec.Decode(&s.SourceModes); err != nil { + return fmt.Errorf("%s | %w", "SourceModes", err) + } + + case "total_deduplicated_field_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalDeduplicatedFieldCount", err) + } + s.TotalDeduplicatedFieldCount = &value + case float64: + f := int64(v) + s.TotalDeduplicatedFieldCount = &f + } + + case "total_deduplicated_mapping_size": + if err := dec.Decode(&s.TotalDeduplicatedMappingSize); err != nil { + return fmt.Errorf("%s | %w", "TotalDeduplicatedMappingSize", err) + } + + case "total_deduplicated_mapping_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalDeduplicatedMappingSizeInBytes", err) + } + s.TotalDeduplicatedMappingSizeInBytes = &value + case float64: + f := int64(v) + s.TotalDeduplicatedMappingSizeInBytes = &f + } + + case "total_field_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalFieldCount", err) + } + s.TotalFieldCount = &value + case float64: + f := int64(v) + s.TotalFieldCount = &f + } + + } + } + return nil +} + +// NewFieldTypesMappings returns a FieldTypesMappings. +func NewFieldTypesMappings() *FieldTypesMappings { + r := &FieldTypesMappings{ + SourceModes: make(map[string]int), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldvalue.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldvalue.go new file mode 100644 index 000000000..8419a3180 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldvalue.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// FieldValue holds the union for the following types: +// +// int64 +// Float64 +// string +// bool +// nil +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L25-L31 +type FieldValue any + +type FieldValueVariant interface { + FieldValueCaster() *FieldValue +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldvaluefactorscorefunction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldvaluefactorscorefunction.go new file mode 100644 index 000000000..3d9caec08 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fieldvaluefactorscorefunction.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldvaluefactormodifier" +) + +// FieldValueFactorScoreFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L149-L168 +type FieldValueFactorScoreFunction struct { + // Factor Optional factor to multiply the field value with. + Factor *Float64 `json:"factor,omitempty"` + // Field Field to be extracted from the document. + Field string `json:"field"` + // Missing Value used if the document doesn’t have that field. + // The modifier and factor are still applied to it as though it were read from + // the document. + Missing *Float64 `json:"missing,omitempty"` + // Modifier Modifier to apply to the field value. + Modifier *fieldvaluefactormodifier.FieldValueFactorModifier `json:"modifier,omitempty"` +} + +func (s *FieldValueFactorScoreFunction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "factor": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Factor", err) + } + f := Float64(value) + s.Factor = &f + case float64: + f := Float64(v) + s.Factor = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + f := Float64(value) + s.Missing = &f + case float64: + f := Float64(v) + s.Missing = &f + } + + case "modifier": + if err := dec.Decode(&s.Modifier); err != nil { + return fmt.Errorf("%s | %w", "Modifier", err) + } + + } + } + return nil +} + +// NewFieldValueFactorScoreFunction returns a FieldValueFactorScoreFunction. +func NewFieldValueFactorScoreFunction() *FieldValueFactorScoreFunction { + r := &FieldValueFactorScoreFunction{} + + return r +} + +type FieldValueFactorScoreFunctionVariant interface { + FieldValueFactorScoreFunctionCaster() *FieldValueFactorScoreFunction +} + +func (s *FieldValueFactorScoreFunction) FieldValueFactorScoreFunctionCaster() *FieldValueFactorScoreFunction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filecountsnapshotstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filecountsnapshotstats.go new file mode 100644 index 000000000..755f7e199 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filecountsnapshotstats.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FileCountSnapshotStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/FileCountSnapshotStats.ts#L22-L25 +type FileCountSnapshotStats struct { + FileCount int `json:"file_count"` + SizeInBytes int64 `json:"size_in_bytes"` +} + +func (s *FileCountSnapshotStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "file_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FileCount", err) + } + s.FileCount = value + case float64: + f := int(v) + s.FileCount = f + } + + case "size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SizeInBytes", err) + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + } + } + return nil +} + +// NewFileCountSnapshotStats returns a FileCountSnapshotStats. +func NewFileCountSnapshotStats() *FileCountSnapshotStats { + r := &FileCountSnapshotStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filedetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filedetails.go new file mode 100644 index 000000000..0d4f8ed74 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filedetails.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FileDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/recovery/types.ts#L50-L54 +type FileDetails struct { + Length int64 `json:"length"` + Name string `json:"name"` + Recovered int64 `json:"recovered"` +} + +func (s *FileDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "length": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Length", err) + } + s.Length = value + case float64: + f := int64(v) + s.Length = f + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "recovered": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Recovered", err) + } + s.Recovered = value + case float64: + f := int64(v) + s.Recovered = f + } + + } + } + return nil +} + +// NewFileDetails returns a FileDetails. +func NewFileDetails() *FileDetails { + r := &FileDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filesettingsindicator.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filesettingsindicator.go new file mode 100644 index 000000000..3a8cdc357 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filesettingsindicator.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indicatorhealthstatus" +) + +// FileSettingsIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L208-L212 +type FileSettingsIndicator struct { + Details *FileSettingsIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *FileSettingsIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return fmt.Errorf("%s | %w", "Diagnosis", err) + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return fmt.Errorf("%s | %w", "Impacts", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Symptom", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewFileSettingsIndicator returns a FileSettingsIndicator. +func NewFileSettingsIndicator() *FileSettingsIndicator { + r := &FileSettingsIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filesettingsindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filesettingsindicatordetails.go new file mode 100644 index 000000000..ffe9ef6d0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filesettingsindicatordetails.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FileSettingsIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L214-L217 +type FileSettingsIndicatorDetails struct { + FailureStreak int64 `json:"failure_streak"` + MostRecentFailure string `json:"most_recent_failure"` +} + +func (s *FileSettingsIndicatorDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "failure_streak": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FailureStreak", err) + } + s.FailureStreak = value + case float64: + f := int64(v) + s.FailureStreak = f + } + + case "most_recent_failure": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MostRecentFailure", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MostRecentFailure = o + + } + } + return nil +} + +// NewFileSettingsIndicatorDetails returns a FileSettingsIndicatorDetails. +func NewFileSettingsIndicatorDetails() *FileSettingsIndicatorDetails { + r := &FileSettingsIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filesystem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filesystem.go new file mode 100644 index 000000000..2e64954bb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filesystem.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FileSystem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L771-L789 +type FileSystem struct { + // Data List of all file stores. + Data []DataPathStats `json:"data,omitempty"` + // IoStats Contains I/O statistics for the node. + IoStats *IoStats `json:"io_stats,omitempty"` + // Timestamp Last time the file stores statistics were refreshed. + // Recorded in milliseconds since the Unix Epoch. + Timestamp *int64 `json:"timestamp,omitempty"` + // Total Contains statistics for all file stores of the node. + Total *FileSystemTotal `json:"total,omitempty"` +} + +func (s *FileSystem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data": + if err := dec.Decode(&s.Data); err != nil { + return fmt.Errorf("%s | %w", "Data", err) + } + + case "io_stats": + if err := dec.Decode(&s.IoStats); err != nil { + return fmt.Errorf("%s | %w", "IoStats", err) + } + + case "timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + s.Timestamp = &value + case float64: + f := int64(v) + s.Timestamp = &f + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + + } + } + return nil +} + +// NewFileSystem returns a FileSystem. +func NewFileSystem() *FileSystem { + r := &FileSystem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filesystemtotal.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filesystemtotal.go new file mode 100644 index 000000000..5a5d89bf2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filesystemtotal.go @@ -0,0 +1,165 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FileSystemTotal type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L830-L859 +type FileSystemTotal struct { + // Available Total disk space available to this Java virtual machine on all file stores. + // Depending on OS or process level restrictions, this might appear less than + // `free`. + // This is the actual amount of free disk space the Elasticsearch node can + // utilise. + Available *string `json:"available,omitempty"` + // AvailableInBytes Total number of bytes available to this Java virtual machine on all file + // stores. + // Depending on OS or process level restrictions, this might appear less than + // `free_in_bytes`. + // This is the actual amount of free disk space the Elasticsearch node can + // utilise. + AvailableInBytes *int64 `json:"available_in_bytes,omitempty"` + // Free Total unallocated disk space in all file stores. + Free *string `json:"free,omitempty"` + // FreeInBytes Total number of unallocated bytes in all file stores. + FreeInBytes *int64 `json:"free_in_bytes,omitempty"` + // Total Total size of all file stores. + Total *string `json:"total,omitempty"` + // TotalInBytes Total size of all file stores in bytes. + TotalInBytes *int64 `json:"total_in_bytes,omitempty"` +} + +func (s *FileSystemTotal) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Available = &o + + case "available_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AvailableInBytes", err) + } + s.AvailableInBytes = &value + case float64: + f := int64(v) + s.AvailableInBytes = &f + } + + case "free": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Free", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Free = &o + + case "free_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FreeInBytes", err) + } + s.FreeInBytes = &value + case float64: + f := int64(v) + s.FreeInBytes = &f + } + + case "total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Total = &o + + case "total_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalInBytes", err) + } + s.TotalInBytes = &value + case float64: + f := int64(v) + s.TotalInBytes = &f + } + + } + } + return nil +} + +// NewFileSystemTotal returns a FileSystemTotal. +func NewFileSystemTotal() *FileSystemTotal { + r := &FileSystemTotal{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fillmaskinferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fillmaskinferenceoptions.go new file mode 100644 index 000000000..91b1821c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fillmaskinferenceoptions.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FillMaskInferenceOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L284-L299 +type FillMaskInferenceOptions struct { + // MaskToken The string/token which will be removed from incoming documents and replaced + // with the inference prediction(s). + // In a response, this field contains the mask token for the specified + // model/tokenizer. Each model and tokenizer + // has a predefined mask token which cannot be changed. Thus, it is recommended + // not to set this value in requests. + // However, if this field is present in a request, its value must match the + // predefined value for that model/tokenizer, + // otherwise the request will fail. + MaskToken *string `json:"mask_token,omitempty"` + // NumTopClasses Specifies the number of top class predictions to return. Defaults to 0. + NumTopClasses *int `json:"num_top_classes,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options to update when inferring + Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` + Vocabulary Vocabulary `json:"vocabulary"` +} + +func (s *FillMaskInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mask_token": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaskToken", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaskToken = &o + + case "num_top_classes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopClasses", err) + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + case "vocabulary": + if err := dec.Decode(&s.Vocabulary); err != nil { + return fmt.Errorf("%s | %w", "Vocabulary", err) + } + + } + } + return nil +} + +// NewFillMaskInferenceOptions returns a FillMaskInferenceOptions. +func NewFillMaskInferenceOptions() *FillMaskInferenceOptions { + r := &FillMaskInferenceOptions{} + + return r +} + +type FillMaskInferenceOptionsVariant interface { + FillMaskInferenceOptionsCaster() *FillMaskInferenceOptions +} + +func (s *FillMaskInferenceOptions) FillMaskInferenceOptionsCaster() *FillMaskInferenceOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fillmaskinferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fillmaskinferenceupdateoptions.go new file mode 100644 index 000000000..d54b72c3d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fillmaskinferenceupdateoptions.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FillMaskInferenceUpdateOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L430-L437 +type FillMaskInferenceUpdateOptions struct { + // NumTopClasses Specifies the number of top class predictions to return. Defaults to 0. + NumTopClasses *int `json:"num_top_classes,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options to update when inferring + Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` +} + +func (s *FillMaskInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "num_top_classes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopClasses", err) + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + } + } + return nil +} + +// NewFillMaskInferenceUpdateOptions returns a FillMaskInferenceUpdateOptions. +func NewFillMaskInferenceUpdateOptions() *FillMaskInferenceUpdateOptions { + r := &FillMaskInferenceUpdateOptions{} + + return r +} + +type FillMaskInferenceUpdateOptionsVariant interface { + FillMaskInferenceUpdateOptionsCaster() *FillMaskInferenceUpdateOptions +} + +func (s *FillMaskInferenceUpdateOptions) FillMaskInferenceUpdateOptionsCaster() *FillMaskInferenceUpdateOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteraggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteraggregate.go new file mode 100644 index 000000000..d8a1c94ef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteraggregate.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// FilterAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L552-L556 +type FilterAggregate struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *FilterAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s FilterAggregate) MarshalJSON() ([]byte, error) { + type opt FilterAggregate + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewFilterAggregate returns a FilterAggregate. +func NewFilterAggregate() *FilterAggregate { + r := &FilterAggregate{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringadvancedsnippet.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringadvancedsnippet.go new file mode 100644 index 000000000..dda96c9e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringadvancedsnippet.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// FilteringAdvancedSnippet type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L192-L196 +type FilteringAdvancedSnippet struct { + CreatedAt DateTime `json:"created_at,omitempty"` + UpdatedAt DateTime `json:"updated_at,omitempty"` + Value json.RawMessage `json:"value,omitempty"` +} + +func (s *FilteringAdvancedSnippet) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "created_at": + if err := dec.Decode(&s.CreatedAt); err != nil { + return fmt.Errorf("%s | %w", "CreatedAt", err) + } + + case "updated_at": + if err := dec.Decode(&s.UpdatedAt); err != nil { + return fmt.Errorf("%s | %w", "UpdatedAt", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + } + } + return nil +} + +// NewFilteringAdvancedSnippet returns a FilteringAdvancedSnippet. +func NewFilteringAdvancedSnippet() *FilteringAdvancedSnippet { + r := &FilteringAdvancedSnippet{} + + return r +} + +type FilteringAdvancedSnippetVariant interface { + FilteringAdvancedSnippetCaster() *FilteringAdvancedSnippet +} + +func (s *FilteringAdvancedSnippet) FilteringAdvancedSnippetCaster() *FilteringAdvancedSnippet { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringconfig.go new file mode 100644 index 000000000..2c8cf9758 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringconfig.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FilteringConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L209-L213 +type FilteringConfig struct { + Active FilteringRules `json:"active"` + Domain *string `json:"domain,omitempty"` + Draft FilteringRules `json:"draft"` +} + +func (s *FilteringConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active": + if err := dec.Decode(&s.Active); err != nil { + return fmt.Errorf("%s | %w", "Active", err) + } + + case "domain": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Domain", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Domain = &o + + case "draft": + if err := dec.Decode(&s.Draft); err != nil { + return fmt.Errorf("%s | %w", "Draft", err) + } + + } + } + return nil +} + +// NewFilteringConfig returns a FilteringConfig. +func NewFilteringConfig() *FilteringConfig { + r := &FilteringConfig{} + + return r +} + +type FilteringConfigVariant interface { + FilteringConfigCaster() *FilteringConfig +} + +func (s *FilteringConfig) FilteringConfigCaster() *FilteringConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringrule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringrule.go new file mode 100644 index 000000000..bbaa53eba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringrule.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringpolicy" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringrulerule" +) + +// FilteringRule type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L170-L179 +type FilteringRule struct { + CreatedAt DateTime `json:"created_at,omitempty"` + Field string `json:"field"` + Id string `json:"id"` + Order int `json:"order"` + Policy filteringpolicy.FilteringPolicy `json:"policy"` + Rule filteringrulerule.FilteringRuleRule `json:"rule"` + UpdatedAt DateTime `json:"updated_at,omitempty"` + Value string `json:"value"` +} + +func (s *FilteringRule) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "created_at": + if err := dec.Decode(&s.CreatedAt); err != nil { + return fmt.Errorf("%s | %w", "CreatedAt", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "order": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + s.Order = value + case float64: + f := int(v) + s.Order = f + } + + case "policy": + if err := dec.Decode(&s.Policy); err != nil { + return fmt.Errorf("%s | %w", "Policy", err) + } + + case "rule": + if err := dec.Decode(&s.Rule); err != nil { + return fmt.Errorf("%s | %w", "Rule", err) + } + + case "updated_at": + if err := dec.Decode(&s.UpdatedAt); err != nil { + return fmt.Errorf("%s | %w", "UpdatedAt", err) + } + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil +} + +// NewFilteringRule returns a FilteringRule. +func NewFilteringRule() *FilteringRule { + r := &FilteringRule{} + + return r +} + +type FilteringRuleVariant interface { + FilteringRuleCaster() *FilteringRule +} + +func (s *FilteringRule) FilteringRuleCaster() *FilteringRule { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringrules.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringrules.go new file mode 100644 index 000000000..65879bf82 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringrules.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// FilteringRules type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L203-L207 +type FilteringRules struct { + AdvancedSnippet FilteringAdvancedSnippet `json:"advanced_snippet"` + Rules []FilteringRule `json:"rules"` + Validation FilteringRulesValidation `json:"validation"` +} + +// NewFilteringRules returns a FilteringRules. +func NewFilteringRules() *FilteringRules { + r := &FilteringRules{} + + return r +} + +type FilteringRulesVariant interface { + FilteringRulesCaster() *FilteringRules +} + +func (s *FilteringRules) FilteringRulesCaster() *FilteringRules { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringrulesvalidation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringrulesvalidation.go new file mode 100644 index 000000000..42cc8200f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringrulesvalidation.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringvalidationstate" +) + +// FilteringRulesValidation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L198-L201 +type FilteringRulesValidation struct { + Errors []FilteringValidation `json:"errors"` + State filteringvalidationstate.FilteringValidationState `json:"state"` +} + +// NewFilteringRulesValidation returns a FilteringRulesValidation. +func NewFilteringRulesValidation() *FilteringRulesValidation { + r := &FilteringRulesValidation{} + + return r +} + +type FilteringRulesValidationVariant interface { + FilteringRulesValidationCaster() *FilteringRulesValidation +} + +func (s *FilteringRulesValidation) FilteringRulesValidationCaster() *FilteringRulesValidation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringvalidation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringvalidation.go new file mode 100644 index 000000000..949220fe8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filteringvalidation.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// FilteringValidation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L181-L184 +type FilteringValidation struct { + Ids []string `json:"ids"` + Messages []string `json:"messages"` +} + +// NewFilteringValidation returns a FilteringValidation. +func NewFilteringValidation() *FilteringValidation { + r := &FilteringValidation{} + + return r +} + +type FilteringValidationVariant interface { + FilteringValidationCaster() *FilteringValidation +} + +func (s *FilteringValidation) FilteringValidationCaster() *FilteringValidation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filterref.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filterref.go new file mode 100644 index 000000000..22847b8c8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filterref.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filtertype" +) + +// FilterRef type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Filter.ts#L31-L41 +type FilterRef struct { + // FilterId The identifier for the filter. + FilterId string `json:"filter_id"` + // FilterType If set to `include`, the rule applies for values in the filter. If set to + // `exclude`, the rule applies for values not in the filter. + FilterType *filtertype.FilterType `json:"filter_type,omitempty"` +} + +func (s *FilterRef) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter_id": + if err := dec.Decode(&s.FilterId); err != nil { + return fmt.Errorf("%s | %w", "FilterId", err) + } + + case "filter_type": + if err := dec.Decode(&s.FilterType); err != nil { + return fmt.Errorf("%s | %w", "FilterType", err) + } + + } + } + return nil +} + +// NewFilterRef returns a FilterRef. +func NewFilterRef() *FilterRef { + r := &FilterRef{} + + return r +} + +type FilterRefVariant interface { + FilterRefCaster() *FilterRef +} + +func (s *FilterRef) FilterRefCaster() *FilterRef { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filtersaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filtersaggregate.go new file mode 100644 index 000000000..9dccb35f3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filtersaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// FiltersAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L639-L643 +type FiltersAggregate struct { + Buckets BucketsFiltersBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *FiltersAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]FiltersBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []FiltersBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewFiltersAggregate returns a FiltersAggregate. +func NewFiltersAggregate() *FiltersAggregate { + r := &FiltersAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filtersaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filtersaggregation.go new file mode 100644 index 000000000..c56d07b3b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filtersaggregation.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FiltersAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L374-L394 +type FiltersAggregation struct { + // Filters Collection of queries from which to build buckets. + Filters BucketsQuery `json:"filters,omitempty"` + // Keyed By default, the named filters aggregation returns the buckets as an object. + // Set to `false` to return the buckets as an array of objects. + Keyed *bool `json:"keyed,omitempty"` + // OtherBucket Set to `true` to add a bucket to the response which will contain all + // documents that do not match any of the given filters. + OtherBucket *bool `json:"other_bucket,omitempty"` + // OtherBucketKey The key with which the other bucket is returned. + OtherBucketKey *string `json:"other_bucket_key,omitempty"` +} + +func (s *FiltersAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filters": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]Query, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filters", err) + } + s.Filters = o + case '[': + o := []Query{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filters", err) + } + s.Filters = o + } + + case "keyed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Keyed", err) + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "other_bucket": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "OtherBucket", err) + } + s.OtherBucket = &value + case bool: + s.OtherBucket = &v + } + + case "other_bucket_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "OtherBucketKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OtherBucketKey = &o + + } + } + return nil +} + +// NewFiltersAggregation returns a FiltersAggregation. +func NewFiltersAggregation() *FiltersAggregation { + r := &FiltersAggregation{} + + return r +} + +type FiltersAggregationVariant interface { + FiltersAggregationCaster() *FiltersAggregation +} + +func (s *FiltersAggregation) FiltersAggregationCaster() *FiltersAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filtersbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filtersbucket.go new file mode 100644 index 000000000..fce9e02d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/filtersbucket.go @@ -0,0 +1,645 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// FiltersBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L645-L647 +type FiltersBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key *string `json:"key,omitempty"` +} + +func (s *FiltersBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s FiltersBucket) MarshalJSON() ([]byte, error) { + type opt FiltersBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewFiltersBucket returns a FiltersBucket. +func NewFiltersBucket() *FiltersBucket { + r := &FiltersBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fingerprintanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fingerprintanalyzer.go new file mode 100644 index 000000000..c80eeb450 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fingerprintanalyzer.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FingerprintAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L37-L64 +type FingerprintAnalyzer struct { + // MaxOutputSize The maximum token size to emit. Tokens larger than this size will be + // discarded. + // Defaults to `255` + MaxOutputSize *int `json:"max_output_size,omitempty"` + // Separator The character to use to concatenate the terms. + // Defaults to a space. + Separator *string `json:"separator,omitempty"` + // Stopwords A pre-defined stop words list like `_english_` or an array containing a list + // of stop words. + // Defaults to `_none_`. + Stopwords StopWords `json:"stopwords,omitempty"` + // StopwordsPath The path to a file containing stop words. + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *FingerprintAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_output_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxOutputSize", err) + } + s.MaxOutputSize = &value + case float64: + f := int(v) + s.MaxOutputSize = &f + } + + case "separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Separator", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Separator = &o + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s FingerprintAnalyzer) MarshalJSON() ([]byte, error) { + type innerFingerprintAnalyzer FingerprintAnalyzer + tmp := innerFingerprintAnalyzer{ + MaxOutputSize: s.MaxOutputSize, + Separator: s.Separator, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "fingerprint" + + return json.Marshal(tmp) +} + +// NewFingerprintAnalyzer returns a FingerprintAnalyzer. +func NewFingerprintAnalyzer() *FingerprintAnalyzer { + r := &FingerprintAnalyzer{} + + return r +} + +type FingerprintAnalyzerVariant interface { + FingerprintAnalyzerCaster() *FingerprintAnalyzer +} + +func (s *FingerprintAnalyzer) FingerprintAnalyzerCaster() *FingerprintAnalyzer { + return s +} + +func (s *FingerprintAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fingerprintprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fingerprintprocessor.go new file mode 100644 index 000000000..a4d9d0782 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fingerprintprocessor.go @@ -0,0 +1,199 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fingerprintdigest" +) + +// FingerprintProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L905-L933 +type FingerprintProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Fields Array of fields to include in the fingerprint. For objects, the processor + // hashes both the field key and value. For other fields, the processor hashes + // only the field value. + Fields []string `json:"fields"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If true, the processor ignores any missing fields. If all fields are + // missing, the processor silently exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // Method The hash method used to compute the fingerprint. Must be one of MD5, SHA-1, + // SHA-256, SHA-512, or MurmurHash3. + Method *fingerprintdigest.FingerprintDigest `json:"method,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Salt Salt value for the hash function. + Salt *string `json:"salt,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField Output field for the fingerprint. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *FingerprintProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "method": + if err := dec.Decode(&s.Method); err != nil { + return fmt.Errorf("%s | %w", "Method", err) + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "salt": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Salt", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Salt = &o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewFingerprintProcessor returns a FingerprintProcessor. +func NewFingerprintProcessor() *FingerprintProcessor { + r := &FingerprintProcessor{} + + return r +} + +type FingerprintProcessorVariant interface { + FingerprintProcessorCaster() *FingerprintProcessor +} + +func (s *FingerprintProcessor) FingerprintProcessorCaster() *FingerprintProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fingerprinttokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fingerprinttokenfilter.go new file mode 100644 index 000000000..0d19bca63 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fingerprinttokenfilter.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FingerprintTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L260-L266 +type FingerprintTokenFilter struct { + // MaxOutputSize Maximum character length, including whitespace, of the output token. Defaults + // to `255`. Concatenated tokens longer than this will result in no token + // output. + MaxOutputSize *int `json:"max_output_size,omitempty"` + // Separator Character to use to concatenate the token stream input. Defaults to a space. + Separator *string `json:"separator,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *FingerprintTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_output_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxOutputSize", err) + } + s.MaxOutputSize = &value + case float64: + f := int(v) + s.MaxOutputSize = &f + } + + case "separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Separator", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Separator = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s FingerprintTokenFilter) MarshalJSON() ([]byte, error) { + type innerFingerprintTokenFilter FingerprintTokenFilter + tmp := innerFingerprintTokenFilter{ + MaxOutputSize: s.MaxOutputSize, + Separator: s.Separator, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "fingerprint" + + return json.Marshal(tmp) +} + +// NewFingerprintTokenFilter returns a FingerprintTokenFilter. +func NewFingerprintTokenFilter() *FingerprintTokenFilter { + r := &FingerprintTokenFilter{} + + return r +} + +type FingerprintTokenFilterVariant interface { + FingerprintTokenFilterCaster() *FingerprintTokenFilter +} + +func (s *FingerprintTokenFilter) FingerprintTokenFilterCaster() *FingerprintTokenFilter { + return s +} + +func (s *FingerprintTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/finnishanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/finnishanalyzer.go new file mode 100644 index 000000000..274f0d6b8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/finnishanalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FinnishAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L165-L170 +type FinnishAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *FinnishAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s FinnishAnalyzer) MarshalJSON() ([]byte, error) { + type innerFinnishAnalyzer FinnishAnalyzer + tmp := innerFinnishAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "finnish" + + return json.Marshal(tmp) +} + +// NewFinnishAnalyzer returns a FinnishAnalyzer. +func NewFinnishAnalyzer() *FinnishAnalyzer { + r := &FinnishAnalyzer{} + + return r +} + +type FinnishAnalyzerVariant interface { + FinnishAnalyzerCaster() *FinnishAnalyzer +} + +func (s *FinnishAnalyzer) FinnishAnalyzerCaster() *FinnishAnalyzer { + return s +} + +func (s *FinnishAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/flattened.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/flattened.go new file mode 100644 index 000000000..2672f486f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/flattened.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Flattened type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L366-L368 +type Flattened struct { + Available bool `json:"available"` + Enabled bool `json:"enabled"` + FieldCount int `json:"field_count"` +} + +func (s *Flattened) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "field_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FieldCount", err) + } + s.FieldCount = value + case float64: + f := int(v) + s.FieldCount = f + } + + } + } + return nil +} + +// NewFlattened returns a Flattened. +func NewFlattened() *Flattened { + r := &Flattened{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/flattenedproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/flattenedproperty.go new file mode 100644 index 000000000..ef3be46a1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/flattenedproperty.go @@ -0,0 +1,965 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// FlattenedProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/complex.ts#L26-L38 +type FlattenedProperty struct { + Boost *Float64 `json:"boost,omitempty"` + DepthLimit *int `json:"depth_limit,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + EagerGlobalOrdinals *bool `json:"eager_global_ordinals,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + Index *bool `json:"index,omitempty"` + IndexOptions *indexoptions.IndexOptions `json:"index_options,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *string `json:"null_value,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Similarity *string `json:"similarity,omitempty"` + SplitQueriesOnWhitespace *bool `json:"split_queries_on_whitespace,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TimeSeriesDimensions []string `json:"time_series_dimensions,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "depth_limit": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DepthLimit", err) + } + s.DepthLimit = &value + case float64: + f := int(v) + s.DepthLimit = &f + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "eager_global_ordinals": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "EagerGlobalOrdinals", err) + } + s.EagerGlobalOrdinals = &value + case bool: + s.EagerGlobalOrdinals = &v + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "index_options": + if err := dec.Decode(&s.IndexOptions); err != nil { + return fmt.Errorf("%s | %w", "IndexOptions", err) + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NullValue = &o + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "similarity": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Similarity", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o + + case "split_queries_on_whitespace": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SplitQueriesOnWhitespace", err) + } + s.SplitQueriesOnWhitespace = &value + case bool: + s.SplitQueriesOnWhitespace = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimensions": + if err := dec.Decode(&s.TimeSeriesDimensions); err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimensions", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s FlattenedProperty) MarshalJSON() ([]byte, error) { + type innerFlattenedProperty FlattenedProperty + tmp := innerFlattenedProperty{ + Boost: s.Boost, + DepthLimit: s.DepthLimit, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + EagerGlobalOrdinals: s.EagerGlobalOrdinals, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + IndexOptions: s.IndexOptions, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Similarity: s.Similarity, + SplitQueriesOnWhitespace: s.SplitQueriesOnWhitespace, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimensions: s.TimeSeriesDimensions, + Type: s.Type, + } + + tmp.Type = "flattened" + + return json.Marshal(tmp) +} + +// NewFlattenedProperty returns a FlattenedProperty. +func NewFlattenedProperty() *FlattenedProperty { + r := &FlattenedProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type FlattenedPropertyVariant interface { + FlattenedPropertyCaster() *FlattenedProperty +} + +func (s *FlattenedProperty) FlattenedPropertyCaster() *FlattenedProperty { + return s +} + +func (s *FlattenedProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/flattengraphtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/flattengraphtokenfilter.go new file mode 100644 index 000000000..b2d3cafbd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/flattengraphtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// FlattenGraphTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L486-L488 +type FlattenGraphTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *FlattenGraphTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s FlattenGraphTokenFilter) MarshalJSON() ([]byte, error) { + type innerFlattenGraphTokenFilter FlattenGraphTokenFilter + tmp := innerFlattenGraphTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "flatten_graph" + + return json.Marshal(tmp) +} + +// NewFlattenGraphTokenFilter returns a FlattenGraphTokenFilter. +func NewFlattenGraphTokenFilter() *FlattenGraphTokenFilter { + r := &FlattenGraphTokenFilter{} + + return r +} + +type FlattenGraphTokenFilterVariant interface { + FlattenGraphTokenFilterCaster() *FlattenGraphTokenFilter +} + +func (s *FlattenGraphTokenFilter) FlattenGraphTokenFilterCaster() *FlattenGraphTokenFilter { + return s +} + +func (s *FlattenGraphTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/float64.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/float64.go new file mode 100644 index 000000000..f6d7c0589 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/float64.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +import ( + "bytes" + "math" + "strconv" +) + +// Float64 custom type for Inf & NaN handling. +type Float64 float64 + +// MarshalJSON implements Marshaler interface. +func (f Float64) MarshalJSON() ([]byte, error) { + var s string + switch { + case math.IsInf(float64(f), 1): + s = `"Infinity"` + case math.IsInf(float64(f), -1): + s = `"-Infinity"` + case math.IsNaN(float64(f)): + s = `"NaN"` + default: + s = strconv.FormatFloat(float64(f), 'f', -1, 64) + } + return []byte(s), nil +} + +// UnmarshalJSON implements Unmarshaler interface. +func (f *Float64) UnmarshalJSON(data []byte) error { + switch { + case bytes.Equal(data, []byte(`"NaN"`)): + *f = Float64(math.NaN()) + case bytes.Equal(data, []byte(`"Infinity"`)): + *f = Float64(math.Inf(1)) + case bytes.Equal(data, []byte(`"-Infinity"`)): + *f = Float64(math.Inf(-1)) + case bytes.Equal(data, []byte(`null`)): + return nil + default: + n, err := strconv.ParseFloat(string(data), 64) + if err != nil { + return err + } + *f = Float64(n) + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/floatnumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/floatnumberproperty.go new file mode 100644 index 000000000..20adc7b99 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/floatnumberproperty.go @@ -0,0 +1,999 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype" +) + +// FloatNumberProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L157-L160 +type FloatNumberProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *float32 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + // TimeSeriesMetric For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + f := float32(value) + s.NullValue = &f + case float64: + f := float32(v) + s.NullValue = &f + } + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "time_series_metric": + if err := dec.Decode(&s.TimeSeriesMetric); err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s FloatNumberProperty) MarshalJSON() ([]byte, error) { + type innerFloatNumberProperty FloatNumberProperty + tmp := innerFloatNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "float" + + return json.Marshal(tmp) +} + +// NewFloatNumberProperty returns a FloatNumberProperty. +func NewFloatNumberProperty() *FloatNumberProperty { + r := &FloatNumberProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type FloatNumberPropertyVariant interface { + FloatNumberPropertyCaster() *FloatNumberProperty +} + +func (s *FloatNumberProperty) FloatNumberPropertyCaster() *FloatNumberProperty { + return s +} + +func (s *FloatNumberProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/floatrangeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/floatrangeproperty.go new file mode 100644 index 000000000..f7115ad3d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/floatrangeproperty.go @@ -0,0 +1,922 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// FloatRangeProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/range.ts#L38-L40 +type FloatRangeProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s FloatRangeProperty) MarshalJSON() ([]byte, error) { + type innerFloatRangeProperty FloatRangeProperty + tmp := innerFloatRangeProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "float_range" + + return json.Marshal(tmp) +} + +// NewFloatRangeProperty returns a FloatRangeProperty. +func NewFloatRangeProperty() *FloatRangeProperty { + r := &FloatRangeProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type FloatRangePropertyVariant interface { + FloatRangePropertyCaster() *FloatRangeProperty +} + +func (s *FloatRangeProperty) FloatRangePropertyCaster() *FloatRangeProperty { + return s +} + +func (s *FloatRangeProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/flushstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/flushstats.go new file mode 100644 index 000000000..ed439d0ec --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/flushstats.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FlushStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L148-L153 +type FlushStats struct { + Periodic int64 `json:"periodic"` + Total int64 `json:"total"` + TotalTime Duration `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +func (s *FlushStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "periodic": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Periodic", err) + } + s.Periodic = value + case float64: + f := int64(v) + s.Periodic = f + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return fmt.Errorf("%s | %w", "TotalTime", err) + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalTimeInMillis", err) + } + + } + } + return nil +} + +// NewFlushStats returns a FlushStats. +func NewFlushStats() *FlushStats { + r := &FlushStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/followerindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/followerindex.go new file mode 100644 index 000000000..71e3f03bc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/followerindex.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/followerindexstatus" +) + +// FollowerIndex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/follow_info/types.ts#L24-L35 +type FollowerIndex struct { + // FollowerIndex The name of the follower index. + FollowerIndex string `json:"follower_index"` + // LeaderIndex The name of the index in the leader cluster that is followed. + LeaderIndex string `json:"leader_index"` + // Parameters An object that encapsulates cross-cluster replication parameters. If the + // follower index's status is paused, this object is omitted. + Parameters *FollowerIndexParameters `json:"parameters,omitempty"` + // RemoteCluster The remote cluster that contains the leader index. + RemoteCluster string `json:"remote_cluster"` + // Status The status of the index following: `active` or `paused`. + Status followerindexstatus.FollowerIndexStatus `json:"status"` +} + +func (s *FollowerIndex) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "follower_index": + if err := dec.Decode(&s.FollowerIndex); err != nil { + return fmt.Errorf("%s | %w", "FollowerIndex", err) + } + + case "leader_index": + if err := dec.Decode(&s.LeaderIndex); err != nil { + return fmt.Errorf("%s | %w", "LeaderIndex", err) + } + + case "parameters": + if err := dec.Decode(&s.Parameters); err != nil { + return fmt.Errorf("%s | %w", "Parameters", err) + } + + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + } + } + return nil +} + +// NewFollowerIndex returns a FollowerIndex. +func NewFollowerIndex() *FollowerIndex { + r := &FollowerIndex{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/followerindexparameters.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/followerindexparameters.go new file mode 100644 index 000000000..1e2535959 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/followerindexparameters.go @@ -0,0 +1,200 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FollowerIndexParameters type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/follow_info/types.ts#L42-L88 +type FollowerIndexParameters struct { + // MaxOutstandingReadRequests The maximum number of outstanding reads requests from the remote cluster. + MaxOutstandingReadRequests *int64 `json:"max_outstanding_read_requests,omitempty"` + // MaxOutstandingWriteRequests The maximum number of outstanding write requests on the follower. + MaxOutstandingWriteRequests *int `json:"max_outstanding_write_requests,omitempty"` + // MaxReadRequestOperationCount The maximum number of operations to pull per read from the remote cluster. + MaxReadRequestOperationCount *int `json:"max_read_request_operation_count,omitempty"` + // MaxReadRequestSize The maximum size in bytes of per read of a batch of operations pulled from + // the remote cluster. + MaxReadRequestSize ByteSize `json:"max_read_request_size,omitempty"` + // MaxRetryDelay The maximum time to wait before retrying an operation that failed + // exceptionally. An exponential backoff strategy is employed when + // retrying. + MaxRetryDelay Duration `json:"max_retry_delay,omitempty"` + // MaxWriteBufferCount The maximum number of operations that can be queued for writing. When this + // limit is reached, reads from the remote cluster will be + // deferred until the number of queued operations goes below the limit. + MaxWriteBufferCount *int `json:"max_write_buffer_count,omitempty"` + // MaxWriteBufferSize The maximum total bytes of operations that can be queued for writing. When + // this limit is reached, reads from the remote cluster will + // be deferred until the total bytes of queued operations goes below the limit. + MaxWriteBufferSize ByteSize `json:"max_write_buffer_size,omitempty"` + // MaxWriteRequestOperationCount The maximum number of operations per bulk write request executed on the + // follower. + MaxWriteRequestOperationCount *int `json:"max_write_request_operation_count,omitempty"` + // MaxWriteRequestSize The maximum total bytes of operations per bulk write request executed on the + // follower. + MaxWriteRequestSize ByteSize `json:"max_write_request_size,omitempty"` + // ReadPollTimeout The maximum time to wait for new operations on the remote cluster when the + // follower index is synchronized with the leader index. + // When the timeout has elapsed, the poll for operations will return to the + // follower so that it can update some statistics. + // Then the follower will immediately attempt to read from the leader again. + ReadPollTimeout Duration `json:"read_poll_timeout,omitempty"` +} + +func (s *FollowerIndexParameters) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_outstanding_read_requests": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxOutstandingReadRequests", err) + } + s.MaxOutstandingReadRequests = &value + case float64: + f := int64(v) + s.MaxOutstandingReadRequests = &f + } + + case "max_outstanding_write_requests": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxOutstandingWriteRequests", err) + } + s.MaxOutstandingWriteRequests = &value + case float64: + f := int(v) + s.MaxOutstandingWriteRequests = &f + } + + case "max_read_request_operation_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxReadRequestOperationCount", err) + } + s.MaxReadRequestOperationCount = &value + case float64: + f := int(v) + s.MaxReadRequestOperationCount = &f + } + + case "max_read_request_size": + if err := dec.Decode(&s.MaxReadRequestSize); err != nil { + return fmt.Errorf("%s | %w", "MaxReadRequestSize", err) + } + + case "max_retry_delay": + if err := dec.Decode(&s.MaxRetryDelay); err != nil { + return fmt.Errorf("%s | %w", "MaxRetryDelay", err) + } + + case "max_write_buffer_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxWriteBufferCount", err) + } + s.MaxWriteBufferCount = &value + case float64: + f := int(v) + s.MaxWriteBufferCount = &f + } + + case "max_write_buffer_size": + if err := dec.Decode(&s.MaxWriteBufferSize); err != nil { + return fmt.Errorf("%s | %w", "MaxWriteBufferSize", err) + } + + case "max_write_request_operation_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxWriteRequestOperationCount", err) + } + s.MaxWriteRequestOperationCount = &value + case float64: + f := int(v) + s.MaxWriteRequestOperationCount = &f + } + + case "max_write_request_size": + if err := dec.Decode(&s.MaxWriteRequestSize); err != nil { + return fmt.Errorf("%s | %w", "MaxWriteRequestSize", err) + } + + case "read_poll_timeout": + if err := dec.Decode(&s.ReadPollTimeout); err != nil { + return fmt.Errorf("%s | %w", "ReadPollTimeout", err) + } + + } + } + return nil +} + +// NewFollowerIndexParameters returns a FollowerIndexParameters. +func NewFollowerIndexParameters() *FollowerIndexParameters { + r := &FollowerIndexParameters{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/followindexstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/followindexstats.go new file mode 100644 index 000000000..8537f5b32 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/followindexstats.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// FollowIndexStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/_types/FollowIndexStats.ts#L30-L35 +type FollowIndexStats struct { + // Index The name of the follower index. + Index string `json:"index"` + // Shards An array of shard-level following task statistics. + Shards []CcrShardStats `json:"shards"` +} + +func (s *FollowIndexStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "shards": + if err := dec.Decode(&s.Shards); err != nil { + return fmt.Errorf("%s | %w", "Shards", err) + } + + } + } + return nil +} + +// NewFollowIndexStats returns a FollowIndexStats. +func NewFollowIndexStats() *FollowIndexStats { + r := &FollowIndexStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/followstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/followstats.go new file mode 100644 index 000000000..a58d418d1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/followstats.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// FollowStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/stats/types.ts.ts#L49-L51 +type FollowStats struct { + Indices []FollowIndexStats `json:"indices"` +} + +// NewFollowStats returns a FollowStats. +func NewFollowStats() *FollowStats { + r := &FollowStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/forcemergeaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/forcemergeaction.go new file mode 100644 index 000000000..09dedfedc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/forcemergeaction.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ForceMergeAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/_types/Phase.ts#L123-L126 +type ForceMergeAction struct { + IndexCodec *string `json:"index_codec,omitempty"` + MaxNumSegments int `json:"max_num_segments"` +} + +func (s *ForceMergeAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index_codec": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexCodec", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexCodec = &o + + case "max_num_segments": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNumSegments", err) + } + s.MaxNumSegments = value + case float64: + f := int(v) + s.MaxNumSegments = f + } + + } + } + return nil +} + +// NewForceMergeAction returns a ForceMergeAction. +func NewForceMergeAction() *ForceMergeAction { + r := &ForceMergeAction{} + + return r +} + +type ForceMergeActionVariant interface { + ForceMergeActionCaster() *ForceMergeAction +} + +func (s *ForceMergeAction) ForceMergeActionCaster() *ForceMergeAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/foreachprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/foreachprocessor.go new file mode 100644 index 000000000..b768519d3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/foreachprocessor.go @@ -0,0 +1,162 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ForeachProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L935-L949 +type ForeachProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field Field containing array or object values. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true`, the processor silently exits without changing the document if the + // `field` is `null` or missing. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Processor Ingest processor to run on each element. + Processor ProcessorContainer `json:"processor"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *ForeachProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "processor": + if err := dec.Decode(&s.Processor); err != nil { + return fmt.Errorf("%s | %w", "Processor", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil +} + +// NewForeachProcessor returns a ForeachProcessor. +func NewForeachProcessor() *ForeachProcessor { + r := &ForeachProcessor{} + + return r +} + +type ForeachProcessorVariant interface { + ForeachProcessorCaster() *ForeachProcessor +} + +func (s *ForeachProcessor) ForeachProcessorCaster() *ForeachProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/foundstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/foundstatus.go new file mode 100644 index 000000000..74bd0eea6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/foundstatus.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FoundStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/delete_privileges/types.ts#L20-L22 +type FoundStatus struct { + Found bool `json:"found"` +} + +func (s *FoundStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "found": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Found", err) + } + s.Found = value + case bool: + s.Found = v + } + + } + } + return nil +} + +// NewFoundStatus returns a FoundStatus. +func NewFoundStatus() *FoundStatus { + r := &FoundStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frenchanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frenchanalyzer.go new file mode 100644 index 000000000..98b073dd2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frenchanalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FrenchAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L172-L177 +type FrenchAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *FrenchAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s FrenchAnalyzer) MarshalJSON() ([]byte, error) { + type innerFrenchAnalyzer FrenchAnalyzer + tmp := innerFrenchAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "french" + + return json.Marshal(tmp) +} + +// NewFrenchAnalyzer returns a FrenchAnalyzer. +func NewFrenchAnalyzer() *FrenchAnalyzer { + r := &FrenchAnalyzer{} + + return r +} + +type FrenchAnalyzerVariant interface { + FrenchAnalyzerCaster() *FrenchAnalyzer +} + +func (s *FrenchAnalyzer) FrenchAnalyzerCaster() *FrenchAnalyzer { + return s +} + +func (s *FrenchAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frenchstemtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frenchstemtokenfilter.go new file mode 100644 index 000000000..e4c3ec472 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frenchstemtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// FrenchStemTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L555-L557 +type FrenchStemTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *FrenchStemTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s FrenchStemTokenFilter) MarshalJSON() ([]byte, error) { + type innerFrenchStemTokenFilter FrenchStemTokenFilter + tmp := innerFrenchStemTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "french_stem" + + return json.Marshal(tmp) +} + +// NewFrenchStemTokenFilter returns a FrenchStemTokenFilter. +func NewFrenchStemTokenFilter() *FrenchStemTokenFilter { + r := &FrenchStemTokenFilter{} + + return r +} + +type FrenchStemTokenFilterVariant interface { + FrenchStemTokenFilterCaster() *FrenchStemTokenFilter +} + +func (s *FrenchStemTokenFilter) FrenchStemTokenFilterCaster() *FrenchStemTokenFilter { + return s +} + +func (s *FrenchStemTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequencyencodingpreprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequencyencodingpreprocessor.go new file mode 100644 index 000000000..1caf47c8b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequencyencodingpreprocessor.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FrequencyEncodingPreprocessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model/types.ts#L38-L42 +type FrequencyEncodingPreprocessor struct { + FeatureName string `json:"feature_name"` + Field string `json:"field"` + FrequencyMap map[string]Float64 `json:"frequency_map"` +} + +func (s *FrequencyEncodingPreprocessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FeatureName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + case "field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = o + + case "frequency_map": + if s.FrequencyMap == nil { + s.FrequencyMap = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.FrequencyMap); err != nil { + return fmt.Errorf("%s | %w", "FrequencyMap", err) + } + + } + } + return nil +} + +// NewFrequencyEncodingPreprocessor returns a FrequencyEncodingPreprocessor. +func NewFrequencyEncodingPreprocessor() *FrequencyEncodingPreprocessor { + r := &FrequencyEncodingPreprocessor{ + FrequencyMap: make(map[string]Float64), + } + + return r +} + +type FrequencyEncodingPreprocessorVariant interface { + FrequencyEncodingPreprocessorCaster() *FrequencyEncodingPreprocessor +} + +func (s *FrequencyEncodingPreprocessor) FrequencyEncodingPreprocessorCaster() *FrequencyEncodingPreprocessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequentitemsetsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequentitemsetsaggregate.go new file mode 100644 index 000000000..0814670bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequentitemsetsaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// FrequentItemSetsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L724-L725 +type FrequentItemSetsAggregate struct { + Buckets BucketsFrequentItemSetsBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *FrequentItemSetsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]FrequentItemSetsBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []FrequentItemSetsBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewFrequentItemSetsAggregate returns a FrequentItemSetsAggregate. +func NewFrequentItemSetsAggregate() *FrequentItemSetsAggregate { + r := &FrequentItemSetsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequentitemsetsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequentitemsetsaggregation.go new file mode 100644 index 000000000..1413f8666 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequentitemsetsaggregation.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FrequentItemSetsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L1241-L1268 +type FrequentItemSetsAggregation struct { + // Fields Fields to analyze. + Fields []FrequentItemSetsField `json:"fields"` + // Filter Query that filters documents from analysis. + Filter *Query `json:"filter,omitempty"` + // MinimumSetSize The minimum size of one item set. + MinimumSetSize *int `json:"minimum_set_size,omitempty"` + // MinimumSupport The minimum support of one item set. + MinimumSupport *Float64 `json:"minimum_support,omitempty"` + // Size The number of top item sets to return. + Size *int `json:"size,omitempty"` +} + +func (s *FrequentItemSetsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "minimum_set_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinimumSetSize", err) + } + s.MinimumSetSize = &value + case float64: + f := int(v) + s.MinimumSetSize = &f + } + + case "minimum_support": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinimumSupport", err) + } + f := Float64(value) + s.MinimumSupport = &f + case float64: + f := Float64(v) + s.MinimumSupport = &f + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + +// NewFrequentItemSetsAggregation returns a FrequentItemSetsAggregation. +func NewFrequentItemSetsAggregation() *FrequentItemSetsAggregation { + r := &FrequentItemSetsAggregation{} + + return r +} + +type FrequentItemSetsAggregationVariant interface { + FrequentItemSetsAggregationCaster() *FrequentItemSetsAggregation +} + +func (s *FrequentItemSetsAggregation) FrequentItemSetsAggregationCaster() *FrequentItemSetsAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequentitemsetsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequentitemsetsbucket.go new file mode 100644 index 000000000..77a901a0f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequentitemsetsbucket.go @@ -0,0 +1,659 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// FrequentItemSetsBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L727-L730 +type FrequentItemSetsBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key map[string][]string `json:"key"` + Support Float64 `json:"support"` +} + +func (s *FrequentItemSetsBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + if s.Key == nil { + s.Key = make(map[string][]string, 0) + } + if err := dec.Decode(&s.Key); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + + case "support": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Support", err) + } + f := Float64(value) + s.Support = f + case float64: + f := Float64(v) + s.Support = f + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s FrequentItemSetsBucket) MarshalJSON() ([]byte, error) { + type opt FrequentItemSetsBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewFrequentItemSetsBucket returns a FrequentItemSetsBucket. +func NewFrequentItemSetsBucket() *FrequentItemSetsBucket { + r := &FrequentItemSetsBucket{ + Aggregations: make(map[string]Aggregate), + Key: make(map[string][]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequentitemsetsfield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequentitemsetsfield.go new file mode 100644 index 000000000..b2aa684e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/frequentitemsetsfield.go @@ -0,0 +1,134 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// FrequentItemSetsField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L1227-L1239 +type FrequentItemSetsField struct { + // Exclude Values to exclude. + // Can be regular expression strings or arrays of strings of exact terms. + Exclude []string `json:"exclude,omitempty"` + Field string `json:"field"` + // Include Values to include. + // Can be regular expression strings or arrays of strings of exact terms. + Include TermsInclude `json:"include,omitempty"` +} + +func (s *FrequentItemSetsField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exclude": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Exclude", err) + } + + s.Exclude = append(s.Exclude, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Exclude); err != nil { + return fmt.Errorf("%s | %w", "Exclude", err) + } + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "include": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + include_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Include", err) + } + + switch t { + + case "num_partitions", "partition": + o := NewTermsPartition() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + s.Include = o + break include_field + + } + } + if s.Include == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Include); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + } + + } + } + return nil +} + +// NewFrequentItemSetsField returns a FrequentItemSetsField. +func NewFrequentItemSetsField() *FrequentItemSetsField { + r := &FrequentItemSetsField{} + + return r +} + +type FrequentItemSetsFieldVariant interface { + FrequentItemSetsFieldCaster() *FrequentItemSetsField +} + +func (s *FrequentItemSetsField) FrequentItemSetsFieldCaster() *FrequentItemSetsField { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/functionscore.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/functionscore.go new file mode 100644 index 000000000..084109b47 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/functionscore.go @@ -0,0 +1,218 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FunctionScore type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L226-L266 +type FunctionScore struct { + AdditionalFunctionScoreProperty map[string]json.RawMessage `json:"-"` + // Exp Function that scores a document with a exponential decay, depending on the + // distance of a numeric field value of the document from an origin. + Exp DecayFunction `json:"exp,omitempty"` + // FieldValueFactor Function allows you to use a field from a document to influence the score. + // It’s similar to using the script_score function, however, it avoids the + // overhead of scripting. + FieldValueFactor *FieldValueFactorScoreFunction `json:"field_value_factor,omitempty"` + Filter *Query `json:"filter,omitempty"` + // Gauss Function that scores a document with a normal decay, depending on the + // distance of a numeric field value of the document from an origin. + Gauss DecayFunction `json:"gauss,omitempty"` + // Linear Function that scores a document with a linear decay, depending on the + // distance of a numeric field value of the document from an origin. + Linear DecayFunction `json:"linear,omitempty"` + // RandomScore Generates scores that are uniformly distributed from 0 up to but not + // including 1. + // In case you want scores to be reproducible, it is possible to provide a + // `seed` and `field`. + RandomScore *RandomScoreFunction `json:"random_score,omitempty"` + // ScriptScore Enables you to wrap another query and customize the scoring of it optionally + // with a computation derived from other numeric field values in the doc using a + // script expression. + ScriptScore *ScriptScoreFunction `json:"script_score,omitempty"` + Weight *Float64 `json:"weight,omitempty"` +} + +func (s *FunctionScore) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exp": + var message json.RawMessage + err := dec.Decode(&message) + if err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + + untyped := NewUntypedDecayFunction() + err = json.Unmarshal(message, &untyped) + if err != nil { + return fmt.Errorf("%s | %w", "Exp", err) + } + s.Exp = untyped + + case "field_value_factor": + if err := dec.Decode(&s.FieldValueFactor); err != nil { + return fmt.Errorf("%s | %w", "FieldValueFactor", err) + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "gauss": + var message json.RawMessage + err := dec.Decode(&message) + if err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + + untyped := NewUntypedDecayFunction() + err = json.Unmarshal(message, &untyped) + if err != nil { + return fmt.Errorf("%s | %w", "Gauss", err) + } + s.Gauss = untyped + + case "linear": + var message json.RawMessage + err := dec.Decode(&message) + if err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + + untyped := NewUntypedDecayFunction() + err = json.Unmarshal(message, &untyped) + if err != nil { + return fmt.Errorf("%s | %w", "Linear", err) + } + s.Linear = untyped + + case "random_score": + if err := dec.Decode(&s.RandomScore); err != nil { + return fmt.Errorf("%s | %w", "RandomScore", err) + } + + case "script_score": + if err := dec.Decode(&s.ScriptScore); err != nil { + return fmt.Errorf("%s | %w", "ScriptScore", err) + } + + case "weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Weight", err) + } + f := Float64(value) + s.Weight = &f + case float64: + f := Float64(v) + s.Weight = &f + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalFunctionScoreProperty == nil { + s.AdditionalFunctionScoreProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalFunctionScoreProperty", err) + } + s.AdditionalFunctionScoreProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s FunctionScore) MarshalJSON() ([]byte, error) { + type opt FunctionScore + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalFunctionScoreProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalFunctionScoreProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewFunctionScore returns a FunctionScore. +func NewFunctionScore() *FunctionScore { + r := &FunctionScore{ + AdditionalFunctionScoreProperty: make(map[string]json.RawMessage), + } + + return r +} + +type FunctionScoreVariant interface { + FunctionScoreCaster() *FunctionScore +} + +func (s *FunctionScore) FunctionScoreCaster() *FunctionScore { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/functionscorequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/functionscorequery.go new file mode 100644 index 000000000..d6bfd266e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/functionscorequery.go @@ -0,0 +1,179 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/functionboostmode" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/functionscoremode" +) + +// FunctionScoreQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L105-L135 +type FunctionScoreQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // BoostMode Defines how he newly computed score is combined with the score of the query + BoostMode *functionboostmode.FunctionBoostMode `json:"boost_mode,omitempty"` + // Functions One or more functions that compute a new score for each document returned by + // the query. + Functions []FunctionScore `json:"functions,omitempty"` + // MaxBoost Restricts the new score to not exceed the provided limit. + MaxBoost *Float64 `json:"max_boost,omitempty"` + // MinScore Excludes documents that do not meet the provided score threshold. + MinScore *Float64 `json:"min_score,omitempty"` + // Query A query that determines the documents for which a new score is computed. + Query *Query `json:"query,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // ScoreMode Specifies how the computed scores are combined + ScoreMode *functionscoremode.FunctionScoreMode `json:"score_mode,omitempty"` +} + +func (s *FunctionScoreQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Functions) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "boost_mode": + if err := dec.Decode(&s.BoostMode); err != nil { + return fmt.Errorf("%s | %w", "BoostMode", err) + } + + case "functions": + if err := dec.Decode(&s.Functions); err != nil { + return fmt.Errorf("%s | %w", "Functions", err) + } + + case "max_boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxBoost", err) + } + f := Float64(value) + s.MaxBoost = &f + case float64: + f := Float64(v) + s.MaxBoost = &f + } + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := Float64(value) + s.MinScore = &f + case float64: + f := Float64(v) + s.MinScore = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "score_mode": + if err := dec.Decode(&s.ScoreMode); err != nil { + return fmt.Errorf("%s | %w", "ScoreMode", err) + } + + } + } + return nil +} + +// NewFunctionScoreQuery returns a FunctionScoreQuery. +func NewFunctionScoreQuery() *FunctionScoreQuery { + r := &FunctionScoreQuery{} + + return r +} + +type FunctionScoreQueryVariant interface { + FunctionScoreQueryCaster() *FunctionScoreQuery +} + +func (s *FunctionScoreQuery) FunctionScoreQueryCaster() *FunctionScoreQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fuzziness.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fuzziness.go new file mode 100644 index 000000000..597c2b44e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fuzziness.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Fuzziness holds the union for the following types: +// +// string +// int +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L130-L131 +type Fuzziness any + +type FuzzinessVariant interface { + FuzzinessCaster() *Fuzziness +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fuzzyquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fuzzyquery.go new file mode 100644 index 000000000..5c270b00b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/fuzzyquery.go @@ -0,0 +1,199 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// FuzzyQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L48-L86 +type FuzzyQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Fuzziness Maximum edit distance allowed for matching. + Fuzziness Fuzziness `json:"fuzziness,omitempty"` + // MaxExpansions Maximum number of variations created. + MaxExpansions *int `json:"max_expansions,omitempty"` + // PrefixLength Number of beginning characters left unchanged when creating expansions. + PrefixLength *int `json:"prefix_length,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Rewrite Number of beginning characters left unchanged when creating expansions. + Rewrite *string `json:"rewrite,omitempty"` + // Transpositions Indicates whether edits include transpositions of two adjacent characters + // (for example `ab` to `ba`). + Transpositions *bool `json:"transpositions,omitempty"` + // Value Term you wish to find in the provided field. + Value string `json:"value"` +} + +func (s *FuzzyQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Value) + if err != nil { + return err + } + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "fuzziness": + if err := dec.Decode(&s.Fuzziness); err != nil { + return fmt.Errorf("%s | %w", "Fuzziness", err) + } + + case "max_expansions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxExpansions", err) + } + s.MaxExpansions = &value + case float64: + f := int(v) + s.MaxExpansions = &f + } + + case "prefix_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrefixLength", err) + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "rewrite": + if err := dec.Decode(&s.Rewrite); err != nil { + return fmt.Errorf("%s | %w", "Rewrite", err) + } + + case "transpositions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Transpositions", err) + } + s.Transpositions = &value + case bool: + s.Transpositions = &v + } + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil +} + +// NewFuzzyQuery returns a FuzzyQuery. +func NewFuzzyQuery() *FuzzyQuery { + r := &FuzzyQuery{} + + return r +} + +type FuzzyQueryVariant interface { + FuzzyQueryCaster() *FuzzyQuery +} + +func (s *FuzzyQuery) FuzzyQueryCaster() *FuzzyQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/galiciananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/galiciananalyzer.go new file mode 100644 index 000000000..0105e00e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/galiciananalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GalicianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L179-L184 +type GalicianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *GalicianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s GalicianAnalyzer) MarshalJSON() ([]byte, error) { + type innerGalicianAnalyzer GalicianAnalyzer + tmp := innerGalicianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "galician" + + return json.Marshal(tmp) +} + +// NewGalicianAnalyzer returns a GalicianAnalyzer. +func NewGalicianAnalyzer() *GalicianAnalyzer { + r := &GalicianAnalyzer{} + + return r +} + +type GalicianAnalyzerVariant interface { + GalicianAnalyzerCaster() *GalicianAnalyzer +} + +func (s *GalicianAnalyzer) GalicianAnalyzerCaster() *GalicianAnalyzer { + return s +} + +func (s *GalicianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/garbagecollector.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/garbagecollector.go new file mode 100644 index 000000000..e923f2913 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/garbagecollector.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// GarbageCollector type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L1001-L1006 +type GarbageCollector struct { + // Collectors Contains statistics about JVM garbage collectors for the node. + Collectors map[string]GarbageCollectorTotal `json:"collectors,omitempty"` +} + +// NewGarbageCollector returns a GarbageCollector. +func NewGarbageCollector() *GarbageCollector { + r := &GarbageCollector{ + Collectors: make(map[string]GarbageCollectorTotal), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/garbagecollectortotal.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/garbagecollectortotal.go new file mode 100644 index 000000000..3f630569b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/garbagecollectortotal.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GarbageCollectorTotal type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L1008-L1021 +type GarbageCollectorTotal struct { + // CollectionCount Total number of JVM garbage collectors that collect objects. + CollectionCount *int64 `json:"collection_count,omitempty"` + // CollectionTime Total time spent by JVM collecting objects. + CollectionTime *string `json:"collection_time,omitempty"` + // CollectionTimeInMillis Total time, in milliseconds, spent by JVM collecting objects. + CollectionTimeInMillis *int64 `json:"collection_time_in_millis,omitempty"` +} + +func (s *GarbageCollectorTotal) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collection_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CollectionCount", err) + } + s.CollectionCount = &value + case float64: + f := int64(v) + s.CollectionCount = &f + } + + case "collection_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CollectionTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CollectionTime = &o + + case "collection_time_in_millis": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CollectionTimeInMillis", err) + } + s.CollectionTimeInMillis = &value + case float64: + f := int64(v) + s.CollectionTimeInMillis = &f + } + + } + } + return nil +} + +// NewGarbageCollectorTotal returns a GarbageCollectorTotal. +func NewGarbageCollectorTotal() *GarbageCollectorTotal { + r := &GarbageCollectorTotal{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/gcsrepository.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/gcsrepository.go new file mode 100644 index 000000000..3f957ca84 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/gcsrepository.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// GcsRepository type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotRepository.ts#L52-L62 +type GcsRepository struct { + // Settings The repository settings. + Settings GcsRepositorySettings `json:"settings"` + // Type The Google Cloud Storage repository type. + Type string `json:"type,omitempty"` + Uuid *string `json:"uuid,omitempty"` +} + +func (s *GcsRepository) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return fmt.Errorf("%s | %w", "Uuid", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s GcsRepository) MarshalJSON() ([]byte, error) { + type innerGcsRepository GcsRepository + tmp := innerGcsRepository{ + Settings: s.Settings, + Type: s.Type, + Uuid: s.Uuid, + } + + tmp.Type = "gcs" + + return json.Marshal(tmp) +} + +// NewGcsRepository returns a GcsRepository. +func NewGcsRepository() *GcsRepository { + r := &GcsRepository{} + + return r +} + +type GcsRepositoryVariant interface { + GcsRepositoryCaster() *GcsRepository +} + +func (s *GcsRepository) GcsRepositoryCaster() *GcsRepository { + return s +} + +func (s *GcsRepository) RepositoryCaster() *Repository { + o := Repository(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/gcsrepositorysettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/gcsrepositorysettings.go new file mode 100644 index 000000000..119e23ab8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/gcsrepositorysettings.go @@ -0,0 +1,217 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GcsRepositorySettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotRepository.ts#L198-L235 +type GcsRepositorySettings struct { + // ApplicationName The name used by the client when it uses the Google Cloud Storage service. + ApplicationName *string `json:"application_name,omitempty"` + // BasePath The path to the repository data within the bucket. + // It defaults to the root of the bucket. + // + // NOTE: Don't set `base_path` when configuring a snapshot repository for + // Elastic Cloud Enterprise. + // Elastic Cloud Enterprise automatically generates the `base_path` for each + // deployment so that multiple deployments can share the same bucket. + BasePath *string `json:"base_path,omitempty"` + // Bucket The name of the bucket to be used for snapshots. + Bucket string `json:"bucket"` + // ChunkSize Big files can be broken down into multiple smaller blobs in the blob store + // during snapshotting. + // It is not recommended to change this value from its default unless there is + // an explicit reason for limiting the size of blobs in the repository. + // Setting a value lower than the default can result in an increased number of + // API calls to the blob store during snapshot create and restore operations + // compared to using the default value and thus make both operations slower and + // more costly. + // Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + // The default varies by repository type. + ChunkSize ByteSize `json:"chunk_size,omitempty"` + // Client The name of the client to use to connect to Google Cloud Storage. + Client *string `json:"client,omitempty"` + // Compress When set to `true`, metadata files are stored in compressed format. + // This setting doesn't affect index files that are already compressed by + // default. + Compress *bool `json:"compress,omitempty"` + // MaxRestoreBytesPerSec The maximum snapshot restore rate per node. + // It defaults to unlimited. + // Note that restores are also throttled through recovery settings. + MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // MaxSnapshotBytesPerSec The maximum snapshot creation rate per node. + // It defaults to 40mb per second. + // Note that if the recovery settings for managed services are set, then it + // defaults to unlimited, and the rate is additionally throttled through + // recovery settings. + MaxSnapshotBytesPerSec ByteSize `json:"max_snapshot_bytes_per_sec,omitempty"` + // Readonly If `true`, the repository is read-only. + // The cluster can retrieve and restore snapshots from the repository but not + // write to the repository or create snapshots in it. + // + // Only a cluster with write access can create snapshots in the repository. + // All other clusters connected to the repository should have the `readonly` + // parameter set to `true`. + // + // If `false`, the cluster can write to the repository and create snapshots in + // it. + // + // IMPORTANT: If you register the same snapshot repository with multiple + // clusters, only one cluster should have write access to the repository. + // Having multiple clusters write to the repository at the same time risks + // corrupting the contents of the repository. + Readonly *bool `json:"readonly,omitempty"` +} + +func (s *GcsRepositorySettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "application_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApplicationName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApplicationName = &o + + case "base_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BasePath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BasePath = &o + + case "bucket": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Bucket", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Bucket = o + + case "chunk_size": + if err := dec.Decode(&s.ChunkSize); err != nil { + return fmt.Errorf("%s | %w", "ChunkSize", err) + } + + case "client": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Client", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Client = &o + + case "compress": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Compress", err) + } + s.Compress = &value + case bool: + s.Compress = &v + } + + case "max_restore_bytes_per_sec": + if err := dec.Decode(&s.MaxRestoreBytesPerSec); err != nil { + return fmt.Errorf("%s | %w", "MaxRestoreBytesPerSec", err) + } + + case "max_snapshot_bytes_per_sec": + if err := dec.Decode(&s.MaxSnapshotBytesPerSec); err != nil { + return fmt.Errorf("%s | %w", "MaxSnapshotBytesPerSec", err) + } + + case "readonly": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Readonly", err) + } + s.Readonly = &value + case bool: + s.Readonly = &v + } + + } + } + return nil +} + +// NewGcsRepositorySettings returns a GcsRepositorySettings. +func NewGcsRepositorySettings() *GcsRepositorySettings { + r := &GcsRepositorySettings{} + + return r +} + +type GcsRepositorySettingsVariant interface { + GcsRepositorySettingsCaster() *GcsRepositorySettings +} + +func (s *GcsRepositorySettings) GcsRepositorySettingsCaster() *GcsRepositorySettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoboundingboxquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoboundingboxquery.go new file mode 100644 index 000000000..4dcf2547d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoboundingboxquery.go @@ -0,0 +1,187 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoexecution" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geovalidationmethod" +) + +// GeoBoundingBoxQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/geo.ts#L35-L57 +type GeoBoundingBoxQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + GeoBoundingBoxQuery map[string]GeoBounds `json:"-"` + // IgnoreUnmapped Set to `true` to ignore an unmapped field and not match any documents for + // this query. + // Set to `false` to throw an exception if the field is not mapped. + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + Type *geoexecution.GeoExecution `json:"type,omitempty"` + // ValidationMethod Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or + // longitude. + // Set to `COERCE` to also try to infer correct latitude or longitude. + ValidationMethod *geovalidationmethod.GeoValidationMethod `json:"validation_method,omitempty"` +} + +func (s *GeoBoundingBoxQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "ignore_unmapped": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnmapped", err) + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "validation_method": + if err := dec.Decode(&s.ValidationMethod); err != nil { + return fmt.Errorf("%s | %w", "ValidationMethod", err) + } + + default: + + if key, ok := t.(string); ok { + if s.GeoBoundingBoxQuery == nil { + s.GeoBoundingBoxQuery = make(map[string]GeoBounds, 0) + } + raw := new(GeoBounds) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "GeoBoundingBoxQuery", err) + } + s.GeoBoundingBoxQuery[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s GeoBoundingBoxQuery) MarshalJSON() ([]byte, error) { + type opt GeoBoundingBoxQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.GeoBoundingBoxQuery { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "GeoBoundingBoxQuery") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewGeoBoundingBoxQuery returns a GeoBoundingBoxQuery. +func NewGeoBoundingBoxQuery() *GeoBoundingBoxQuery { + r := &GeoBoundingBoxQuery{ + GeoBoundingBoxQuery: make(map[string]GeoBounds), + } + + return r +} + +type GeoBoundingBoxQueryVariant interface { + GeoBoundingBoxQueryCaster() *GeoBoundingBoxQuery +} + +func (s *GeoBoundingBoxQuery) GeoBoundingBoxQueryCaster() *GeoBoundingBoxQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geobounds.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geobounds.go new file mode 100644 index 000000000..a35b3e5b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geobounds.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// GeoBounds holds the union for the following types: +// +// CoordsGeoBounds +// TopLeftBottomRightGeoBounds +// TopRightBottomLeftGeoBounds +// WktGeoBounds +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Geo.ts#L135-L148 +type GeoBounds any + +type GeoBoundsVariant interface { + GeoBoundsCaster() *GeoBounds +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoboundsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoboundsaggregate.go new file mode 100644 index 000000000..fef23b70d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoboundsaggregate.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// GeoBoundsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L327-L333 +type GeoBoundsAggregate struct { + Bounds GeoBounds `json:"bounds,omitempty"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *GeoBoundsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bounds": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + bounds_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Bounds", err) + } + + switch t { + + case "bottom", "left", "right", "top": + o := NewCoordsGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "bottom_right", "top_left": + o := NewTopLeftBottomRightGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "bottom_left", "top_right": + o := NewTopRightBottomLeftGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "wkt": + o := NewWktGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + } + } + if s.Bounds == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Bounds); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewGeoBoundsAggregate returns a GeoBoundsAggregate. +func NewGeoBoundsAggregate() *GeoBoundsAggregate { + r := &GeoBoundsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoboundsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoboundsaggregation.go new file mode 100644 index 000000000..31b139fc9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoboundsaggregation.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoBoundsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L114-L123 +type GeoBoundsAggregation struct { + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` + // WrapLongitude Specifies whether the bounding box should be allowed to overlap the + // international date line. + WrapLongitude *bool `json:"wrap_longitude,omitempty"` +} + +func (s *GeoBoundsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "wrap_longitude": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "WrapLongitude", err) + } + s.WrapLongitude = &value + case bool: + s.WrapLongitude = &v + } + + } + } + return nil +} + +// NewGeoBoundsAggregation returns a GeoBoundsAggregation. +func NewGeoBoundsAggregation() *GeoBoundsAggregation { + r := &GeoBoundsAggregation{} + + return r +} + +type GeoBoundsAggregationVariant interface { + GeoBoundsAggregationCaster() *GeoBoundsAggregation +} + +func (s *GeoBoundsAggregation) GeoBoundsAggregationCaster() *GeoBoundsAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geocentroidaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geocentroidaggregate.go new file mode 100644 index 000000000..7bf62cd74 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geocentroidaggregate.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoCentroidAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L335-L342 +type GeoCentroidAggregate struct { + Count int64 `json:"count"` + Location GeoLocation `json:"location,omitempty"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *GeoCentroidAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "location": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Location", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + location_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Location", err) + } + + switch t { + + case "lat", "lon": + o := NewLatLonGeoLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Location", err) + } + s.Location = o + break location_field + + case "geohash": + o := NewGeoHashLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Location", err) + } + s.Location = o + break location_field + + } + } + if s.Location == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Location); err != nil { + return fmt.Errorf("%s | %w", "Location", err) + } + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewGeoCentroidAggregate returns a GeoCentroidAggregate. +func NewGeoCentroidAggregate() *GeoCentroidAggregate { + r := &GeoCentroidAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geocentroidaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geocentroidaggregation.go new file mode 100644 index 000000000..84cda571e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geocentroidaggregation.go @@ -0,0 +1,154 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoCentroidAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L125-L128 +type GeoCentroidAggregation struct { + Count *int64 `json:"count,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Location GeoLocation `json:"location,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *GeoCentroidAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "location": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Location", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + location_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Location", err) + } + + switch t { + + case "lat", "lon": + o := NewLatLonGeoLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Location", err) + } + s.Location = o + break location_field + + case "geohash": + o := NewGeoHashLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Location", err) + } + s.Location = o + break location_field + + } + } + if s.Location == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Location); err != nil { + return fmt.Errorf("%s | %w", "Location", err) + } + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewGeoCentroidAggregation returns a GeoCentroidAggregation. +func NewGeoCentroidAggregation() *GeoCentroidAggregation { + r := &GeoCentroidAggregation{} + + return r +} + +type GeoCentroidAggregationVariant interface { + GeoCentroidAggregationCaster() *GeoCentroidAggregation +} + +func (s *GeoCentroidAggregation) GeoCentroidAggregationCaster() *GeoCentroidAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodecayfunction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodecayfunction.go new file mode 100644 index 000000000..f37bbc57d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodecayfunction.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/multivaluemode" +) + +// GeoDecayFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L210-L213 +type GeoDecayFunction struct { + DecayFunctionBaseGeoLocationDistance map[string]DecayPlacementGeoLocationDistance `json:"-"` + // MultiValueMode Determines how the distance is calculated when a field used for computing the + // decay contains multiple values. + MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s GeoDecayFunction) MarshalJSON() ([]byte, error) { + type opt GeoDecayFunction + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.DecayFunctionBaseGeoLocationDistance { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "DecayFunctionBaseGeoLocationDistance") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewGeoDecayFunction returns a GeoDecayFunction. +func NewGeoDecayFunction() *GeoDecayFunction { + r := &GeoDecayFunction{ + DecayFunctionBaseGeoLocationDistance: make(map[string]DecayPlacementGeoLocationDistance), + } + + return r +} + +type GeoDecayFunctionVariant interface { + GeoDecayFunctionCaster() *GeoDecayFunction +} + +func (s *GeoDecayFunction) GeoDecayFunctionCaster() *GeoDecayFunction { + return s +} + +func (s *GeoDecayFunction) DecayFunctionCaster() *DecayFunction { + o := DecayFunction(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistanceaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistanceaggregate.go new file mode 100644 index 000000000..64417b570 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistanceaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// GeoDistanceAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L617-L622 +type GeoDistanceAggregate struct { + Buckets BucketsRangeBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *GeoDistanceAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]RangeBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []RangeBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewGeoDistanceAggregate returns a GeoDistanceAggregate. +func NewGeoDistanceAggregate() *GeoDistanceAggregate { + r := &GeoDistanceAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistanceaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistanceaggregation.go new file mode 100644 index 000000000..72f915d7f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistanceaggregation.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/distanceunit" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geodistancetype" +) + +// GeoDistanceAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L396-L419 +type GeoDistanceAggregation struct { + // DistanceType The distance calculation type. + DistanceType *geodistancetype.GeoDistanceType `json:"distance_type,omitempty"` + // Field A field of type `geo_point` used to evaluate the distance. + Field *string `json:"field,omitempty"` + // Origin The origin used to evaluate the distance. + Origin GeoLocation `json:"origin,omitempty"` + // Ranges An array of ranges used to bucket documents. + Ranges []AggregationRange `json:"ranges,omitempty"` + // Unit The distance unit. + Unit *distanceunit.DistanceUnit `json:"unit,omitempty"` +} + +func (s *GeoDistanceAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "distance_type": + if err := dec.Decode(&s.DistanceType); err != nil { + return fmt.Errorf("%s | %w", "DistanceType", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "origin": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + origin_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Origin", err) + } + + switch t { + + case "lat", "lon": + o := NewLatLonGeoLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + s.Origin = o + break origin_field + + case "geohash": + o := NewGeoHashLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + s.Origin = o + break origin_field + + } + } + if s.Origin == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Origin); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + } + + case "ranges": + if err := dec.Decode(&s.Ranges); err != nil { + return fmt.Errorf("%s | %w", "Ranges", err) + } + + case "unit": + if err := dec.Decode(&s.Unit); err != nil { + return fmt.Errorf("%s | %w", "Unit", err) + } + + } + } + return nil +} + +// NewGeoDistanceAggregation returns a GeoDistanceAggregation. +func NewGeoDistanceAggregation() *GeoDistanceAggregation { + r := &GeoDistanceAggregation{} + + return r +} + +type GeoDistanceAggregationVariant interface { + GeoDistanceAggregationCaster() *GeoDistanceAggregation +} + +func (s *GeoDistanceAggregation) GeoDistanceAggregationCaster() *GeoDistanceAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistancefeaturequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistancefeaturequery.go new file mode 100644 index 000000000..b315ee3e0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistancefeaturequery.go @@ -0,0 +1,186 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoDistanceFeatureQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L66-L69 +type GeoDistanceFeatureQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Field Name of the field used to calculate distances. This field must meet the + // following criteria: + // be a `date`, `date_nanos` or `geo_point` field; + // have an `index` mapping parameter value of `true`, which is the default; + // have an `doc_values` mapping parameter value of `true`, which is the default. + Field string `json:"field"` + // Origin Date or point of origin used to calculate distances. + // If the `field` value is a `date` or `date_nanos` field, the `origin` value + // must be a date. + // Date Math, such as `now-1h`, is supported. + // If the field value is a `geo_point` field, the `origin` value must be a + // geopoint. + Origin GeoLocation `json:"origin"` + // Pivot Distance from the `origin` at which relevance scores receive half of the + // `boost` value. + // If the `field` value is a `date` or `date_nanos` field, the `pivot` value + // must be a time unit, such as `1h` or `10d`. If the `field` value is a + // `geo_point` field, the `pivot` value must be a distance unit, such as `1km` + // or `12m`. + Pivot string `json:"pivot"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *GeoDistanceFeatureQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "origin": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + origin_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Origin", err) + } + + switch t { + + case "lat", "lon": + o := NewLatLonGeoLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + s.Origin = o + break origin_field + + case "geohash": + o := NewGeoHashLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + s.Origin = o + break origin_field + + } + } + if s.Origin == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Origin); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + } + + case "pivot": + if err := dec.Decode(&s.Pivot); err != nil { + return fmt.Errorf("%s | %w", "Pivot", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewGeoDistanceFeatureQuery returns a GeoDistanceFeatureQuery. +func NewGeoDistanceFeatureQuery() *GeoDistanceFeatureQuery { + r := &GeoDistanceFeatureQuery{} + + return r +} + +type GeoDistanceFeatureQueryVariant interface { + GeoDistanceFeatureQueryCaster() *GeoDistanceFeatureQuery +} + +func (s *GeoDistanceFeatureQuery) GeoDistanceFeatureQueryCaster() *GeoDistanceFeatureQuery { + return s +} + +func (s *GeoDistanceFeatureQuery) DistanceFeatureQueryCaster() *DistanceFeatureQuery { + o := DistanceFeatureQuery(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistancequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistancequery.go new file mode 100644 index 000000000..14dd519f8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistancequery.go @@ -0,0 +1,198 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geodistancetype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geovalidationmethod" +) + +// GeoDistanceQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/geo.ts#L64-L96 +type GeoDistanceQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Distance The radius of the circle centred on the specified location. + // Points which fall into this circle are considered to be matches. + Distance string `json:"distance"` + // DistanceType How to compute the distance. + // Set to `plane` for a faster calculation that's inaccurate on long distances + // and close to the poles. + DistanceType *geodistancetype.GeoDistanceType `json:"distance_type,omitempty"` + GeoDistanceQuery map[string]GeoLocation `json:"-"` + // IgnoreUnmapped Set to `true` to ignore an unmapped field and not match any documents for + // this query. + // Set to `false` to throw an exception if the field is not mapped. + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // ValidationMethod Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or + // longitude. + // Set to `COERCE` to also try to infer correct latitude or longitude. + ValidationMethod *geovalidationmethod.GeoValidationMethod `json:"validation_method,omitempty"` +} + +func (s *GeoDistanceQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "distance": + if err := dec.Decode(&s.Distance); err != nil { + return fmt.Errorf("%s | %w", "Distance", err) + } + + case "distance_type": + if err := dec.Decode(&s.DistanceType); err != nil { + return fmt.Errorf("%s | %w", "DistanceType", err) + } + + case "ignore_unmapped": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnmapped", err) + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "validation_method": + if err := dec.Decode(&s.ValidationMethod); err != nil { + return fmt.Errorf("%s | %w", "ValidationMethod", err) + } + + default: + + if key, ok := t.(string); ok { + if s.GeoDistanceQuery == nil { + s.GeoDistanceQuery = make(map[string]GeoLocation, 0) + } + raw := new(GeoLocation) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "GeoDistanceQuery", err) + } + s.GeoDistanceQuery[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s GeoDistanceQuery) MarshalJSON() ([]byte, error) { + type opt GeoDistanceQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.GeoDistanceQuery { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "GeoDistanceQuery") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewGeoDistanceQuery returns a GeoDistanceQuery. +func NewGeoDistanceQuery() *GeoDistanceQuery { + r := &GeoDistanceQuery{ + GeoDistanceQuery: make(map[string]GeoLocation), + } + + return r +} + +type GeoDistanceQueryVariant interface { + GeoDistanceQueryCaster() *GeoDistanceQuery +} + +func (s *GeoDistanceQuery) GeoDistanceQueryCaster() *GeoDistanceQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistancesort.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistancesort.go new file mode 100644 index 000000000..5911c522e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geodistancesort.go @@ -0,0 +1,176 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/distanceunit" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geodistancetype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortmode" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" +) + +// GeoDistanceSort type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/sort.ts#L58-L70 +type GeoDistanceSort struct { + DistanceType *geodistancetype.GeoDistanceType `json:"distance_type,omitempty"` + GeoDistanceSort map[string][]GeoLocation `json:"-"` + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + Mode *sortmode.SortMode `json:"mode,omitempty"` + Nested *NestedSortValue `json:"nested,omitempty"` + Order *sortorder.SortOrder `json:"order,omitempty"` + Unit *distanceunit.DistanceUnit `json:"unit,omitempty"` +} + +func (s *GeoDistanceSort) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "distance_type": + if err := dec.Decode(&s.DistanceType); err != nil { + return fmt.Errorf("%s | %w", "DistanceType", err) + } + + case "ignore_unmapped": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnmapped", err) + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + + case "nested": + if err := dec.Decode(&s.Nested); err != nil { + return fmt.Errorf("%s | %w", "Nested", err) + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + + case "unit": + if err := dec.Decode(&s.Unit); err != nil { + return fmt.Errorf("%s | %w", "Unit", err) + } + + default: + + rawMsg := make(map[string]json.RawMessage, 0) + dec.Decode(&rawMsg) + for key, value := range rawMsg { + switch { + case bytes.HasPrefix(value, []byte("\"")), bytes.HasPrefix(value, []byte("{")): + o := new(GeoLocation) + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return fmt.Errorf("%s | %w", "GeoDistanceSort", err) + } + s.GeoDistanceSort[key] = append(s.GeoDistanceSort[key], o) + default: + o := []GeoLocation{} + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return fmt.Errorf("%s | %w", "GeoDistanceSort", err) + } + s.GeoDistanceSort[key] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s GeoDistanceSort) MarshalJSON() ([]byte, error) { + type opt GeoDistanceSort + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.GeoDistanceSort { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "GeoDistanceSort") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewGeoDistanceSort returns a GeoDistanceSort. +func NewGeoDistanceSort() *GeoDistanceSort { + r := &GeoDistanceSort{ + GeoDistanceSort: make(map[string][]GeoLocation), + } + + return r +} + +type GeoDistanceSortVariant interface { + GeoDistanceSortCaster() *GeoDistanceSort +} + +func (s *GeoDistanceSort) GeoDistanceSortCaster() *GeoDistanceSort { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geogridprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geogridprocessor.go new file mode 100644 index 000000000..814433605 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geogridprocessor.go @@ -0,0 +1,218 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geogridtargetformat" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geogridtiletype" +) + +// GeoGridProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L389-L430 +type GeoGridProcessor struct { + // ChildrenField If specified and children tiles exist, save those tile addresses to this + // field as an array of strings. + ChildrenField *string `json:"children_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to interpret as a geo-tile.= + // The field format is determined by the `tile_type`. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // NonChildrenField If specified and intersecting non-child tiles exist, save their addresses to + // this field as an array of strings. + NonChildrenField *string `json:"non_children_field,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // ParentField If specified and a parent tile exists, save that tile address to this field. + ParentField *string `json:"parent_field,omitempty"` + // PrecisionField If specified, save the tile precision (zoom) as an integer to this field. + PrecisionField *string `json:"precision_field,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the polygon shape to, by default, the `field` is updated + // in-place. + TargetField *string `json:"target_field,omitempty"` + // TargetFormat Which format to save the generated polygon in. + TargetFormat *geogridtargetformat.GeoGridTargetFormat `json:"target_format,omitempty"` + // TileType Three tile formats are understood: geohash, geotile and geohex. + TileType geogridtiletype.GeoGridTileType `json:"tile_type"` +} + +func (s *GeoGridProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "children_field": + if err := dec.Decode(&s.ChildrenField); err != nil { + return fmt.Errorf("%s | %w", "ChildrenField", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = o + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "non_children_field": + if err := dec.Decode(&s.NonChildrenField); err != nil { + return fmt.Errorf("%s | %w", "NonChildrenField", err) + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "parent_field": + if err := dec.Decode(&s.ParentField); err != nil { + return fmt.Errorf("%s | %w", "ParentField", err) + } + + case "precision_field": + if err := dec.Decode(&s.PrecisionField); err != nil { + return fmt.Errorf("%s | %w", "PrecisionField", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + case "target_format": + if err := dec.Decode(&s.TargetFormat); err != nil { + return fmt.Errorf("%s | %w", "TargetFormat", err) + } + + case "tile_type": + if err := dec.Decode(&s.TileType); err != nil { + return fmt.Errorf("%s | %w", "TileType", err) + } + + } + } + return nil +} + +// NewGeoGridProcessor returns a GeoGridProcessor. +func NewGeoGridProcessor() *GeoGridProcessor { + r := &GeoGridProcessor{} + + return r +} + +type GeoGridProcessorVariant interface { + GeoGridProcessorCaster() *GeoGridProcessor +} + +func (s *GeoGridProcessor) GeoGridProcessorCaster() *GeoGridProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geogridquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geogridquery.go new file mode 100644 index 000000000..fd2593f39 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geogridquery.go @@ -0,0 +1,169 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoGridQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/geo.ts#L98-L103 +type GeoGridQuery struct { + AdditionalGeoGridQueryProperty map[string]json.RawMessage `json:"-"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + Geohash *string `json:"geohash,omitempty"` + Geohex *string `json:"geohex,omitempty"` + Geotile *string `json:"geotile,omitempty"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *GeoGridQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "geohash": + if err := dec.Decode(&s.Geohash); err != nil { + return fmt.Errorf("%s | %w", "Geohash", err) + } + + case "geohex": + if err := dec.Decode(&s.Geohex); err != nil { + return fmt.Errorf("%s | %w", "Geohex", err) + } + + case "geotile": + if err := dec.Decode(&s.Geotile); err != nil { + return fmt.Errorf("%s | %w", "Geotile", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + default: + + if key, ok := t.(string); ok { + if s.AdditionalGeoGridQueryProperty == nil { + s.AdditionalGeoGridQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalGeoGridQueryProperty", err) + } + s.AdditionalGeoGridQueryProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s GeoGridQuery) MarshalJSON() ([]byte, error) { + type opt GeoGridQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalGeoGridQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalGeoGridQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewGeoGridQuery returns a GeoGridQuery. +func NewGeoGridQuery() *GeoGridQuery { + r := &GeoGridQuery{ + AdditionalGeoGridQueryProperty: make(map[string]json.RawMessage), + } + + return r +} + +type GeoGridQueryVariant interface { + GeoGridQueryCaster() *GeoGridQuery +} + +func (s *GeoGridQuery) GeoGridQueryCaster() *GeoGridQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashgridaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashgridaggregate.go new file mode 100644 index 000000000..959272d3b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashgridaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// GeoHashGridAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L566-L568 +type GeoHashGridAggregate struct { + Buckets BucketsGeoHashGridBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *GeoHashGridAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]GeoHashGridBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []GeoHashGridBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewGeoHashGridAggregate returns a GeoHashGridAggregate. +func NewGeoHashGridAggregate() *GeoHashGridAggregate { + r := &GeoHashGridAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashgridaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashgridaggregation.go new file mode 100644 index 000000000..6daf1b7d2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashgridaggregation.go @@ -0,0 +1,191 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoHashGridAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L421-L449 +type GeoHashGridAggregation struct { + // Bounds The bounding box to filter the points in each bucket. + Bounds GeoBounds `json:"bounds,omitempty"` + // Field Field containing indexed `geo_point` or `geo_shape` values. + // If the field contains an array, `geohash_grid` aggregates all array values. + Field *string `json:"field,omitempty"` + // Precision The string length of the geohashes used to define cells/buckets in the + // results. + Precision GeoHashPrecision `json:"precision,omitempty"` + // ShardSize Allows for more accurate counting of the top cells returned in the final + // result the aggregation. + // Defaults to returning `max(10,(size x number-of-shards))` buckets from each + // shard. + ShardSize *int `json:"shard_size,omitempty"` + // Size The maximum number of geohash buckets to return. + Size *int `json:"size,omitempty"` +} + +func (s *GeoHashGridAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bounds": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + bounds_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Bounds", err) + } + + switch t { + + case "bottom", "left", "right", "top": + o := NewCoordsGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "bottom_right", "top_left": + o := NewTopLeftBottomRightGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "bottom_left", "top_right": + o := NewTopRightBottomLeftGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "wkt": + o := NewWktGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + } + } + if s.Bounds == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Bounds); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "precision": + if err := dec.Decode(&s.Precision); err != nil { + return fmt.Errorf("%s | %w", "Precision", err) + } + + case "shard_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSize", err) + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + +// NewGeoHashGridAggregation returns a GeoHashGridAggregation. +func NewGeoHashGridAggregation() *GeoHashGridAggregation { + r := &GeoHashGridAggregation{} + + return r +} + +type GeoHashGridAggregationVariant interface { + GeoHashGridAggregationCaster() *GeoHashGridAggregation +} + +func (s *GeoHashGridAggregation) GeoHashGridAggregationCaster() *GeoHashGridAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashgridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashgridbucket.go new file mode 100644 index 000000000..6c774a3eb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashgridbucket.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// GeoHashGridBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L570-L572 +type GeoHashGridBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key string `json:"key"` +} + +func (s *GeoHashGridBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + if err := dec.Decode(&s.Key); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s GeoHashGridBucket) MarshalJSON() ([]byte, error) { + type opt GeoHashGridBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewGeoHashGridBucket returns a GeoHashGridBucket. +func NewGeoHashGridBucket() *GeoHashGridBucket { + r := &GeoHashGridBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashlocation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashlocation.go new file mode 100644 index 000000000..8321f9ed4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashlocation.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// GeoHashLocation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Geo.ts#L131-L133 +type GeoHashLocation struct { + Geohash string `json:"geohash"` +} + +func (s *GeoHashLocation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "geohash": + if err := dec.Decode(&s.Geohash); err != nil { + return fmt.Errorf("%s | %w", "Geohash", err) + } + + } + } + return nil +} + +// NewGeoHashLocation returns a GeoHashLocation. +func NewGeoHashLocation() *GeoHashLocation { + r := &GeoHashLocation{} + + return r +} + +type GeoHashLocationVariant interface { + GeoHashLocationCaster() *GeoHashLocation +} + +func (s *GeoHashLocation) GeoHashLocationCaster() *GeoHashLocation { + return s +} + +func (s *GeoHashLocation) GeoLocationCaster() *GeoLocation { + o := GeoLocation(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashprecision.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashprecision.go new file mode 100644 index 000000000..85e744b1f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohashprecision.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// GeoHashPrecision holds the union for the following types: +// +// int +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Geo.ts#L86-L90 +type GeoHashPrecision any + +type GeoHashPrecisionVariant interface { + GeoHashPrecisionCaster() *GeoHashPrecision +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohexgridaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohexgridaggregate.go new file mode 100644 index 000000000..80aeaa095 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohexgridaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// GeoHexGridAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L585-L586 +type GeoHexGridAggregate struct { + Buckets BucketsGeoHexGridBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *GeoHexGridAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]GeoHexGridBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []GeoHexGridBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewGeoHexGridAggregate returns a GeoHexGridAggregate. +func NewGeoHexGridAggregate() *GeoHexGridAggregate { + r := &GeoHexGridAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohexgridaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohexgridaggregation.go new file mode 100644 index 000000000..39ca148c4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohexgridaggregation.go @@ -0,0 +1,199 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeohexGridAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L479-L504 +type GeohexGridAggregation struct { + // Bounds Bounding box used to filter the geo-points in each bucket. + Bounds GeoBounds `json:"bounds,omitempty"` + // Field Field containing indexed `geo_point` or `geo_shape` values. + // If the field contains an array, `geohex_grid` aggregates all array values. + Field string `json:"field"` + // Precision Integer zoom of the key used to defined cells or buckets + // in the results. Value should be between 0-15. + Precision *int `json:"precision,omitempty"` + // ShardSize Number of buckets returned from each shard. + ShardSize *int `json:"shard_size,omitempty"` + // Size Maximum number of buckets to return. + Size *int `json:"size,omitempty"` +} + +func (s *GeohexGridAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bounds": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + bounds_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Bounds", err) + } + + switch t { + + case "bottom", "left", "right", "top": + o := NewCoordsGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "bottom_right", "top_left": + o := NewTopLeftBottomRightGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "bottom_left", "top_right": + o := NewTopRightBottomLeftGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "wkt": + o := NewWktGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + } + } + if s.Bounds == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Bounds); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "precision": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Precision", err) + } + s.Precision = &value + case float64: + f := int(v) + s.Precision = &f + } + + case "shard_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSize", err) + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + +// NewGeohexGridAggregation returns a GeohexGridAggregation. +func NewGeohexGridAggregation() *GeohexGridAggregation { + r := &GeohexGridAggregation{} + + return r +} + +type GeohexGridAggregationVariant interface { + GeohexGridAggregationCaster() *GeohexGridAggregation +} + +func (s *GeohexGridAggregation) GeohexGridAggregationCaster() *GeohexGridAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohexgridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohexgridbucket.go new file mode 100644 index 000000000..4547cad34 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geohexgridbucket.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// GeoHexGridBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L588-L590 +type GeoHexGridBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key string `json:"key"` +} + +func (s *GeoHexGridBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + if err := dec.Decode(&s.Key); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s GeoHexGridBucket) MarshalJSON() ([]byte, error) { + type opt GeoHexGridBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewGeoHexGridBucket returns a GeoHexGridBucket. +func NewGeoHexGridBucket() *GeoHexGridBucket { + r := &GeoHexGridBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipdatabaseconfigurationmetadata.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipdatabaseconfigurationmetadata.go new file mode 100644 index 000000000..353ca1615 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipdatabaseconfigurationmetadata.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoipDatabaseConfigurationMetadata type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/get_geoip_database/GetGeoipDatabaseResponse.ts#L29-L34 +type GeoipDatabaseConfigurationMetadata struct { + Database DatabaseConfiguration `json:"database"` + Id string `json:"id"` + ModifiedDateMillis int64 `json:"modified_date_millis"` + Version int64 `json:"version"` +} + +func (s *GeoipDatabaseConfigurationMetadata) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "database": + if err := dec.Decode(&s.Database); err != nil { + return fmt.Errorf("%s | %w", "Database", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "modified_date_millis": + if err := dec.Decode(&s.ModifiedDateMillis); err != nil { + return fmt.Errorf("%s | %w", "ModifiedDateMillis", err) + } + + case "version": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + s.Version = value + case float64: + f := int64(v) + s.Version = f + } + + } + } + return nil +} + +// NewGeoipDatabaseConfigurationMetadata returns a GeoipDatabaseConfigurationMetadata. +func NewGeoipDatabaseConfigurationMetadata() *GeoipDatabaseConfigurationMetadata { + r := &GeoipDatabaseConfigurationMetadata{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipdownloadstatistics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipdownloadstatistics.go new file mode 100644 index 000000000..651ad3a0f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipdownloadstatistics.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoIpDownloadStatistics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/geo_ip_stats/types.ts#L24-L37 +type GeoIpDownloadStatistics struct { + // DatabasesCount Current number of databases available for use. + DatabasesCount int `json:"databases_count"` + // ExpiredDatabases Total number of databases not updated after 30 days + ExpiredDatabases int `json:"expired_databases"` + // FailedDownloads Total number of failed database downloads. + FailedDownloads int `json:"failed_downloads"` + // SkippedUpdates Total number of database updates skipped. + SkippedUpdates int `json:"skipped_updates"` + // SuccessfulDownloads Total number of successful database downloads. + SuccessfulDownloads int `json:"successful_downloads"` + // TotalDownloadTime Total milliseconds spent downloading databases. + TotalDownloadTime int64 `json:"total_download_time"` +} + +func (s *GeoIpDownloadStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "databases_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DatabasesCount", err) + } + s.DatabasesCount = value + case float64: + f := int(v) + s.DatabasesCount = f + } + + case "expired_databases": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ExpiredDatabases", err) + } + s.ExpiredDatabases = value + case float64: + f := int(v) + s.ExpiredDatabases = f + } + + case "failed_downloads": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FailedDownloads", err) + } + s.FailedDownloads = value + case float64: + f := int(v) + s.FailedDownloads = f + } + + case "skipped_updates": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SkippedUpdates", err) + } + s.SkippedUpdates = value + case float64: + f := int(v) + s.SkippedUpdates = f + } + + case "successful_downloads": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SuccessfulDownloads", err) + } + s.SuccessfulDownloads = value + case float64: + f := int(v) + s.SuccessfulDownloads = f + } + + case "total_download_time": + if err := dec.Decode(&s.TotalDownloadTime); err != nil { + return fmt.Errorf("%s | %w", "TotalDownloadTime", err) + } + + } + } + return nil +} + +// NewGeoIpDownloadStatistics returns a GeoIpDownloadStatistics. +func NewGeoIpDownloadStatistics() *GeoIpDownloadStatistics { + r := &GeoIpDownloadStatistics{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipnodedatabasename.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipnodedatabasename.go new file mode 100644 index 000000000..f376a9359 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipnodedatabasename.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// GeoIpNodeDatabaseName type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/geo_ip_stats/types.ts#L47-L50 +type GeoIpNodeDatabaseName struct { + // Name Name of the database. + Name string `json:"name"` +} + +func (s *GeoIpNodeDatabaseName) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewGeoIpNodeDatabaseName returns a GeoIpNodeDatabaseName. +func NewGeoIpNodeDatabaseName() *GeoIpNodeDatabaseName { + r := &GeoIpNodeDatabaseName{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipnodedatabases.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipnodedatabases.go new file mode 100644 index 000000000..51e39e23b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipnodedatabases.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// GeoIpNodeDatabases type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/geo_ip_stats/types.ts#L39-L45 +type GeoIpNodeDatabases struct { + // Databases Downloaded databases for the node. + Databases []GeoIpNodeDatabaseName `json:"databases"` + // FilesInTemp Downloaded database files, including related license files. Elasticsearch + // stores these files in the node’s temporary directory: + // $ES_TMPDIR/geoip-databases/. + FilesInTemp []string `json:"files_in_temp"` +} + +// NewGeoIpNodeDatabases returns a GeoIpNodeDatabases. +func NewGeoIpNodeDatabases() *GeoIpNodeDatabases { + r := &GeoIpNodeDatabases{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipprocessor.go new file mode 100644 index 000000000..565e7463c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoipprocessor.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoIpProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L443-L477 +type GeoIpProcessor struct { + // DatabaseFile The database filename referring to a database the module ships with + // (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom + // database in the ingest-geoip config directory. + DatabaseFile *string `json:"database_file,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // DownloadDatabaseOnPipelineCreation If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the + // missing database is downloaded when the pipeline is created. + // Else, the download is triggered by when the pipeline is used as the + // `default_pipeline` or `final_pipeline` in an index. + DownloadDatabaseOnPipelineCreation *bool `json:"download_database_on_pipeline_creation,omitempty"` + // Field The field to get the ip address from for the geographical lookup. + Field string `json:"field"` + // FirstOnly If `true`, only the first found geoip data will be returned, even if the + // field contains an array. + FirstOnly *bool `json:"first_only,omitempty"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Properties Controls what properties are added to the `target_field` based on the geoip + // lookup. + Properties []string `json:"properties,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field that will hold the geographical information looked up from the + // MaxMind database. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *GeoIpProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "database_file": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DatabaseFile", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DatabaseFile = &o + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "download_database_on_pipeline_creation": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DownloadDatabaseOnPipelineCreation", err) + } + s.DownloadDatabaseOnPipelineCreation = &value + case bool: + s.DownloadDatabaseOnPipelineCreation = &v + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "first_only": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FirstOnly", err) + } + s.FirstOnly = &value + case bool: + s.FirstOnly = &v + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "properties": + if err := dec.Decode(&s.Properties); err != nil { + return fmt.Errorf("%s | %w", "Properties", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewGeoIpProcessor returns a GeoIpProcessor. +func NewGeoIpProcessor() *GeoIpProcessor { + r := &GeoIpProcessor{} + + return r +} + +type GeoIpProcessorVariant interface { + GeoIpProcessorCaster() *GeoIpProcessor +} + +func (s *GeoIpProcessor) GeoIpProcessorCaster() *GeoIpProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoline.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoline.go new file mode 100644 index 000000000..2d1371e3a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoline.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoLine type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Geo.ts#L56-L62 +type GeoLine struct { + // Coordinates Array of `[lon, lat]` coordinates + Coordinates [][]Float64 `json:"coordinates"` + // Type Always `"LineString"` + Type string `json:"type"` +} + +func (s *GeoLine) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "coordinates": + if err := dec.Decode(&s.Coordinates); err != nil { + return fmt.Errorf("%s | %w", "Coordinates", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewGeoLine returns a GeoLine. +func NewGeoLine() *GeoLine { + r := &GeoLine{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolineaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolineaggregate.go new file mode 100644 index 000000000..6ec22664e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolineaggregate.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoLineAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L904-L914 +type GeoLineAggregate struct { + Geometry GeoLine `json:"geometry"` + Meta Metadata `json:"meta,omitempty"` + Properties json.RawMessage `json:"properties,omitempty"` + Type string `json:"type"` +} + +func (s *GeoLineAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "geometry": + if err := dec.Decode(&s.Geometry); err != nil { + return fmt.Errorf("%s | %w", "Geometry", err) + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if err := dec.Decode(&s.Properties); err != nil { + return fmt.Errorf("%s | %w", "Properties", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewGeoLineAggregate returns a GeoLineAggregate. +func NewGeoLineAggregate() *GeoLineAggregate { + r := &GeoLineAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolineaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolineaggregation.go new file mode 100644 index 000000000..1cf1a94f2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolineaggregation.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" +) + +// GeoLineAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L130-L155 +type GeoLineAggregation struct { + // IncludeSort When `true`, returns an additional array of the sort values in the feature + // properties. + IncludeSort *bool `json:"include_sort,omitempty"` + // Point The name of the geo_point field. + Point GeoLinePoint `json:"point"` + // Size The maximum length of the line represented in the aggregation. + // Valid sizes are between 1 and 10000. + Size *int `json:"size,omitempty"` + // Sort The name of the numeric field to use as the sort key for ordering the points. + // When the `geo_line` aggregation is nested inside a `time_series` aggregation, + // this field defaults to `@timestamp`, and any other value will result in + // error. + Sort GeoLineSort `json:"sort"` + // SortOrder The order in which the line is sorted (ascending or descending). + SortOrder *sortorder.SortOrder `json:"sort_order,omitempty"` +} + +func (s *GeoLineAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "include_sort": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncludeSort", err) + } + s.IncludeSort = &value + case bool: + s.IncludeSort = &v + } + + case "point": + if err := dec.Decode(&s.Point); err != nil { + return fmt.Errorf("%s | %w", "Point", err) + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + if err := dec.Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + case "sort_order": + if err := dec.Decode(&s.SortOrder); err != nil { + return fmt.Errorf("%s | %w", "SortOrder", err) + } + + } + } + return nil +} + +// NewGeoLineAggregation returns a GeoLineAggregation. +func NewGeoLineAggregation() *GeoLineAggregation { + r := &GeoLineAggregation{} + + return r +} + +type GeoLineAggregationVariant interface { + GeoLineAggregationCaster() *GeoLineAggregation +} + +func (s *GeoLineAggregation) GeoLineAggregationCaster() *GeoLineAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolinepoint.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolinepoint.go new file mode 100644 index 000000000..007fcad12 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolinepoint.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// GeoLinePoint type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L164-L169 +type GeoLinePoint struct { + // Field The name of the geo_point field. + Field string `json:"field"` +} + +func (s *GeoLinePoint) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + } + } + return nil +} + +// NewGeoLinePoint returns a GeoLinePoint. +func NewGeoLinePoint() *GeoLinePoint { + r := &GeoLinePoint{} + + return r +} + +type GeoLinePointVariant interface { + GeoLinePointCaster() *GeoLinePoint +} + +func (s *GeoLinePoint) GeoLinePointCaster() *GeoLinePoint { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolinesort.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolinesort.go new file mode 100644 index 000000000..a41843be0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolinesort.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// GeoLineSort type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L157-L162 +type GeoLineSort struct { + // Field The name of the numeric field to use as the sort key for ordering the points. + Field string `json:"field"` +} + +func (s *GeoLineSort) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + } + } + return nil +} + +// NewGeoLineSort returns a GeoLineSort. +func NewGeoLineSort() *GeoLineSort { + r := &GeoLineSort{} + + return r +} + +type GeoLineSortVariant interface { + GeoLineSortCaster() *GeoLineSort +} + +func (s *GeoLineSort) GeoLineSortCaster() *GeoLineSort { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolocation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolocation.go new file mode 100644 index 000000000..f529a9105 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geolocation.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// GeoLocation holds the union for the following types: +// +// LatLonGeoLocation +// GeoHashLocation +// []Float64 +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Geo.ts#L104-L118 +type GeoLocation any + +type GeoLocationVariant interface { + GeoLocationCaster() *GeoLocation +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geopointproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geopointproperty.go new file mode 100644 index 000000000..df38107e6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geopointproperty.go @@ -0,0 +1,990 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geopointmetrictype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// GeoPointProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/geo.ts#L24-L33 +type GeoPointProperty struct { + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + IgnoreZValue *bool `json:"ignore_z_value,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue GeoLocation `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TimeSeriesMetric *geopointmetrictype.GeoPointMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "ignore_z_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreZValue", err) + } + s.IgnoreZValue = &value + case bool: + s.IgnoreZValue = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + nullvalue_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "NullValue", err) + } + + switch t { + + case "lat", "lon": + o := NewLatLonGeoLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + s.NullValue = o + break nullvalue_field + + case "geohash": + o := NewGeoHashLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + s.NullValue = o + break nullvalue_field + + } + } + if s.NullValue == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.NullValue); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + } + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_metric": + if err := dec.Decode(&s.TimeSeriesMetric); err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s GeoPointProperty) MarshalJSON() ([]byte, error) { + type innerGeoPointProperty GeoPointProperty + tmp := innerGeoPointProperty{ + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "geo_point" + + return json.Marshal(tmp) +} + +// NewGeoPointProperty returns a GeoPointProperty. +func NewGeoPointProperty() *GeoPointProperty { + r := &GeoPointProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type GeoPointPropertyVariant interface { + GeoPointPropertyCaster() *GeoPointProperty +} + +func (s *GeoPointProperty) GeoPointPropertyCaster() *GeoPointProperty { + return s +} + +func (s *GeoPointProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geopolygonpoints.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geopolygonpoints.go new file mode 100644 index 000000000..c18c28253 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geopolygonpoints.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// GeoPolygonPoints type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/geo.ts#L105-L107 +type GeoPolygonPoints struct { + Points []GeoLocation `json:"points"` +} + +// NewGeoPolygonPoints returns a GeoPolygonPoints. +func NewGeoPolygonPoints() *GeoPolygonPoints { + r := &GeoPolygonPoints{} + + return r +} + +type GeoPolygonPointsVariant interface { + GeoPolygonPointsCaster() *GeoPolygonPoints +} + +func (s *GeoPolygonPoints) GeoPolygonPointsCaster() *GeoPolygonPoints { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geopolygonquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geopolygonquery.go new file mode 100644 index 000000000..c437f3032 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geopolygonquery.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geovalidationmethod" +) + +// GeoPolygonQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/geo.ts#L109-L121 +type GeoPolygonQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + GeoPolygonQuery map[string]GeoPolygonPoints `json:"-"` + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + ValidationMethod *geovalidationmethod.GeoValidationMethod `json:"validation_method,omitempty"` +} + +func (s *GeoPolygonQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "ignore_unmapped": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnmapped", err) + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "validation_method": + if err := dec.Decode(&s.ValidationMethod); err != nil { + return fmt.Errorf("%s | %w", "ValidationMethod", err) + } + + default: + + if key, ok := t.(string); ok { + if s.GeoPolygonQuery == nil { + s.GeoPolygonQuery = make(map[string]GeoPolygonPoints, 0) + } + raw := NewGeoPolygonPoints() + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "GeoPolygonQuery", err) + } + s.GeoPolygonQuery[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s GeoPolygonQuery) MarshalJSON() ([]byte, error) { + type opt GeoPolygonQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.GeoPolygonQuery { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "GeoPolygonQuery") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewGeoPolygonQuery returns a GeoPolygonQuery. +func NewGeoPolygonQuery() *GeoPolygonQuery { + r := &GeoPolygonQuery{ + GeoPolygonQuery: make(map[string]GeoPolygonPoints), + } + + return r +} + +type GeoPolygonQueryVariant interface { + GeoPolygonQueryCaster() *GeoPolygonQuery +} + +func (s *GeoPolygonQuery) GeoPolygonQueryCaster() *GeoPolygonQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/georesults.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/georesults.go new file mode 100644 index 000000000..0d2dd605b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/georesults.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoResults type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Anomaly.ts#L146-L155 +type GeoResults struct { + // ActualPoint The actual value for the bucket formatted as a `geo_point`. + ActualPoint *string `json:"actual_point,omitempty"` + // TypicalPoint The typical value for the bucket formatted as a `geo_point`. + TypicalPoint *string `json:"typical_point,omitempty"` +} + +func (s *GeoResults) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actual_point": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ActualPoint", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ActualPoint = &o + + case "typical_point": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TypicalPoint", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TypicalPoint = &o + + } + } + return nil +} + +// NewGeoResults returns a GeoResults. +func NewGeoResults() *GeoResults { + r := &GeoResults{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoshapefieldquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoshapefieldquery.go new file mode 100644 index 000000000..d50331716 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoshapefieldquery.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoshaperelation" +) + +// GeoShapeFieldQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/geo.ts#L128-L139 +type GeoShapeFieldQuery struct { + // IndexedShape Query using an indexed shape retrieved from the the specified document and + // path. + IndexedShape *FieldLookup `json:"indexed_shape,omitempty"` + // Relation Spatial relation operator used to search a geo field. + Relation *geoshaperelation.GeoShapeRelation `json:"relation,omitempty"` + Shape json.RawMessage `json:"shape,omitempty"` +} + +func (s *GeoShapeFieldQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indexed_shape": + if err := dec.Decode(&s.IndexedShape); err != nil { + return fmt.Errorf("%s | %w", "IndexedShape", err) + } + + case "relation": + if err := dec.Decode(&s.Relation); err != nil { + return fmt.Errorf("%s | %w", "Relation", err) + } + + case "shape": + if err := dec.Decode(&s.Shape); err != nil { + return fmt.Errorf("%s | %w", "Shape", err) + } + + } + } + return nil +} + +// NewGeoShapeFieldQuery returns a GeoShapeFieldQuery. +func NewGeoShapeFieldQuery() *GeoShapeFieldQuery { + r := &GeoShapeFieldQuery{} + + return r +} + +type GeoShapeFieldQueryVariant interface { + GeoShapeFieldQueryCaster() *GeoShapeFieldQuery +} + +func (s *GeoShapeFieldQuery) GeoShapeFieldQueryCaster() *GeoShapeFieldQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoshapeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoshapeproperty.go new file mode 100644 index 000000000..596ff5393 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoshapeproperty.go @@ -0,0 +1,952 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoorientation" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geostrategy" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// GeoShapeProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/geo.ts#L48-L62 +type GeoShapeProperty struct { + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + IgnoreZValue *bool `json:"ignore_z_value,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Orientation *geoorientation.GeoOrientation `json:"orientation,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + Strategy *geostrategy.GeoStrategy `json:"strategy,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "ignore_z_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreZValue", err) + } + s.IgnoreZValue = &value + case bool: + s.IgnoreZValue = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "orientation": + if err := dec.Decode(&s.Orientation); err != nil { + return fmt.Errorf("%s | %w", "Orientation", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "strategy": + if err := dec.Decode(&s.Strategy); err != nil { + return fmt.Errorf("%s | %w", "Strategy", err) + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s GeoShapeProperty) MarshalJSON() ([]byte, error) { + type innerGeoShapeProperty GeoShapeProperty + tmp := innerGeoShapeProperty{ + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Index: s.Index, + Meta: s.Meta, + Orientation: s.Orientation, + Properties: s.Properties, + Store: s.Store, + Strategy: s.Strategy, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "geo_shape" + + return json.Marshal(tmp) +} + +// NewGeoShapeProperty returns a GeoShapeProperty. +func NewGeoShapeProperty() *GeoShapeProperty { + r := &GeoShapeProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type GeoShapePropertyVariant interface { + GeoShapePropertyCaster() *GeoShapeProperty +} + +func (s *GeoShapeProperty) GeoShapePropertyCaster() *GeoShapeProperty { + return s +} + +func (s *GeoShapeProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoshapequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoshapequery.go new file mode 100644 index 000000000..655eccea3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geoshapequery.go @@ -0,0 +1,169 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoShapeQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/geo.ts#L141-L157 +type GeoShapeQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + GeoShapeQuery map[string]GeoShapeFieldQuery `json:"-"` + // IgnoreUnmapped Set to `true` to ignore an unmapped field and not match any documents for + // this query. + // Set to `false` to throw an exception if the field is not mapped. + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *GeoShapeQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "ignore_unmapped": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnmapped", err) + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + default: + + if key, ok := t.(string); ok { + if s.GeoShapeQuery == nil { + s.GeoShapeQuery = make(map[string]GeoShapeFieldQuery, 0) + } + raw := NewGeoShapeFieldQuery() + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "GeoShapeQuery", err) + } + s.GeoShapeQuery[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s GeoShapeQuery) MarshalJSON() ([]byte, error) { + type opt GeoShapeQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.GeoShapeQuery { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "GeoShapeQuery") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewGeoShapeQuery returns a GeoShapeQuery. +func NewGeoShapeQuery() *GeoShapeQuery { + r := &GeoShapeQuery{ + GeoShapeQuery: make(map[string]GeoShapeFieldQuery), + } + + return r +} + +type GeoShapeQueryVariant interface { + GeoShapeQueryCaster() *GeoShapeQuery +} + +func (s *GeoShapeQuery) GeoShapeQueryCaster() *GeoShapeQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geotilegridaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geotilegridaggregate.go new file mode 100644 index 000000000..54d8877e3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geotilegridaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// GeoTileGridAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L574-L579 +type GeoTileGridAggregate struct { + Buckets BucketsGeoTileGridBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *GeoTileGridAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]GeoTileGridBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []GeoTileGridBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewGeoTileGridAggregate returns a GeoTileGridAggregate. +func NewGeoTileGridAggregate() *GeoTileGridAggregate { + r := &GeoTileGridAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geotilegridaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geotilegridaggregation.go new file mode 100644 index 000000000..5dbd6d666 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geotilegridaggregation.go @@ -0,0 +1,191 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GeoTileGridAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L451-L477 +type GeoTileGridAggregation struct { + // Bounds A bounding box to filter the geo-points or geo-shapes in each bucket. + Bounds GeoBounds `json:"bounds,omitempty"` + // Field Field containing indexed `geo_point` or `geo_shape` values. + // If the field contains an array, `geotile_grid` aggregates all array values. + Field *string `json:"field,omitempty"` + // Precision Integer zoom of the key used to define cells/buckets in the results. + // Values outside of the range [0,29] will be rejected. + Precision *int `json:"precision,omitempty"` + // ShardSize Allows for more accurate counting of the top cells returned in the final + // result the aggregation. + // Defaults to returning `max(10,(size x number-of-shards))` buckets from each + // shard. + ShardSize *int `json:"shard_size,omitempty"` + // Size The maximum number of buckets to return. + Size *int `json:"size,omitempty"` +} + +func (s *GeoTileGridAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bounds": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + bounds_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Bounds", err) + } + + switch t { + + case "bottom", "left", "right", "top": + o := NewCoordsGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "bottom_right", "top_left": + o := NewTopLeftBottomRightGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "bottom_left", "top_right": + o := NewTopRightBottomLeftGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + case "wkt": + o := NewWktGeoBounds() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + s.Bounds = o + break bounds_field + + } + } + if s.Bounds == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Bounds); err != nil { + return fmt.Errorf("%s | %w", "Bounds", err) + } + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "precision": + if err := dec.Decode(&s.Precision); err != nil { + return fmt.Errorf("%s | %w", "Precision", err) + } + + case "shard_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSize", err) + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + +// NewGeoTileGridAggregation returns a GeoTileGridAggregation. +func NewGeoTileGridAggregation() *GeoTileGridAggregation { + r := &GeoTileGridAggregation{} + + return r +} + +type GeoTileGridAggregationVariant interface { + GeoTileGridAggregationCaster() *GeoTileGridAggregation +} + +func (s *GeoTileGridAggregation) GeoTileGridAggregationCaster() *GeoTileGridAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geotilegridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geotilegridbucket.go new file mode 100644 index 000000000..7e22c911f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/geotilegridbucket.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// GeoTileGridBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L581-L583 +type GeoTileGridBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key string `json:"key"` +} + +func (s *GeoTileGridBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + if err := dec.Decode(&s.Key); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s GeoTileGridBucket) MarshalJSON() ([]byte, error) { + type opt GeoTileGridBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewGeoTileGridBucket returns a GeoTileGridBucket. +func NewGeoTileGridBucket() *GeoTileGridBucket { + r := &GeoTileGridBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/germananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/germananalyzer.go new file mode 100644 index 000000000..b8651c845 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/germananalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GermanAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L186-L191 +type GermanAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *GermanAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s GermanAnalyzer) MarshalJSON() ([]byte, error) { + type innerGermanAnalyzer GermanAnalyzer + tmp := innerGermanAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "german" + + return json.Marshal(tmp) +} + +// NewGermanAnalyzer returns a GermanAnalyzer. +func NewGermanAnalyzer() *GermanAnalyzer { + r := &GermanAnalyzer{} + + return r +} + +type GermanAnalyzerVariant interface { + GermanAnalyzerCaster() *GermanAnalyzer +} + +func (s *GermanAnalyzer) GermanAnalyzerCaster() *GermanAnalyzer { + return s +} + +func (s *GermanAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/germannormalizationtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/germannormalizationtokenfilter.go new file mode 100644 index 000000000..ccbec068d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/germannormalizationtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// GermanNormalizationTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L498-L500 +type GermanNormalizationTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *GermanNormalizationTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s GermanNormalizationTokenFilter) MarshalJSON() ([]byte, error) { + type innerGermanNormalizationTokenFilter GermanNormalizationTokenFilter + tmp := innerGermanNormalizationTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "german_normalization" + + return json.Marshal(tmp) +} + +// NewGermanNormalizationTokenFilter returns a GermanNormalizationTokenFilter. +func NewGermanNormalizationTokenFilter() *GermanNormalizationTokenFilter { + r := &GermanNormalizationTokenFilter{} + + return r +} + +type GermanNormalizationTokenFilterVariant interface { + GermanNormalizationTokenFilterCaster() *GermanNormalizationTokenFilter +} + +func (s *GermanNormalizationTokenFilter) GermanNormalizationTokenFilterCaster() *GermanNormalizationTokenFilter { + return s +} + +func (s *GermanNormalizationTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/germanstemtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/germanstemtokenfilter.go new file mode 100644 index 000000000..2ca77b589 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/germanstemtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// GermanStemTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L563-L565 +type GermanStemTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *GermanStemTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s GermanStemTokenFilter) MarshalJSON() ([]byte, error) { + type innerGermanStemTokenFilter GermanStemTokenFilter + tmp := innerGermanStemTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "german_stem" + + return json.Marshal(tmp) +} + +// NewGermanStemTokenFilter returns a GermanStemTokenFilter. +func NewGermanStemTokenFilter() *GermanStemTokenFilter { + r := &GermanStemTokenFilter{} + + return r +} + +type GermanStemTokenFilterVariant interface { + GermanStemTokenFilterCaster() *GermanStemTokenFilter +} + +func (s *GermanStemTokenFilter) GermanStemTokenFilterCaster() *GermanStemTokenFilter { + return s +} + +func (s *GermanStemTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getmigrationfeature.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getmigrationfeature.go new file mode 100644 index 000000000..79af78b86 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getmigrationfeature.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/migrationstatus" +) + +// GetMigrationFeature type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L37-L42 +type GetMigrationFeature struct { + FeatureName string `json:"feature_name"` + Indices []MigrationFeatureIndexInfo `json:"indices"` + MigrationStatus migrationstatus.MigrationStatus `json:"migration_status"` + MinimumIndexVersion string `json:"minimum_index_version"` +} + +func (s *GetMigrationFeature) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FeatureName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "migration_status": + if err := dec.Decode(&s.MigrationStatus); err != nil { + return fmt.Errorf("%s | %w", "MigrationStatus", err) + } + + case "minimum_index_version": + if err := dec.Decode(&s.MinimumIndexVersion); err != nil { + return fmt.Errorf("%s | %w", "MinimumIndexVersion", err) + } + + } + } + return nil +} + +// NewGetMigrationFeature returns a GetMigrationFeature. +func NewGetMigrationFeature() *GetMigrationFeature { + r := &GetMigrationFeature{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getresult.go new file mode 100644 index 000000000..b15f170c1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getresult.go @@ -0,0 +1,168 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GetResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/get/types.ts#L25-L67 +type GetResult struct { + // Fields If the `stored_fields` parameter is set to `true` and `found` is `true`, it + // contains the document fields stored in the index. + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Found Indicates whether the document exists. + Found bool `json:"found"` + // Id_ The unique identifier for the document. + Id_ string `json:"_id"` + Ignored_ []string `json:"_ignored,omitempty"` + // Index_ The name of the index the document belongs to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Routing_ The explicit routing, if set. + Routing_ *string `json:"_routing,omitempty"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Source_ If `found` is `true`, it contains the document data formatted in JSON. + // If the `_source` parameter is set to `false` or the `stored_fields` parameter + // is set to `true`, it is excluded. + Source_ json.RawMessage `json:"_source,omitempty"` + // Version_ The document version, which is ncremented each time the document is updated. + Version_ *int64 `json:"_version,omitempty"` +} + +func (s *GetResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "found": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Found", err) + } + s.Found = value + case bool: + s.Found = v + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_ignored": + if err := dec.Decode(&s.Ignored_); err != nil { + return fmt.Errorf("%s | %w", "Ignored_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryTerm_", err) + } + s.PrimaryTerm_ = &value + case float64: + f := int64(v) + s.PrimaryTerm_ = &f + } + + case "_routing": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Routing_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Routing_ = &o + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return fmt.Errorf("%s | %w", "SeqNo_", err) + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + + case "_version": + if err := dec.Decode(&s.Version_); err != nil { + return fmt.Errorf("%s | %w", "Version_", err) + } + + } + } + return nil +} + +// NewGetResult returns a GetResult. +func NewGetResult() *GetResult { + r := &GetResult{ + Fields: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getscriptcontext.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getscriptcontext.go new file mode 100644 index 000000000..62873d2b9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getscriptcontext.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// GetScriptContext type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/get_script_context/types.ts#L22-L25 +type GetScriptContext struct { + Methods []ContextMethod `json:"methods"` + Name string `json:"name"` +} + +func (s *GetScriptContext) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "methods": + if err := dec.Decode(&s.Methods); err != nil { + return fmt.Errorf("%s | %w", "Methods", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewGetScriptContext returns a GetScriptContext. +func NewGetScriptContext() *GetScriptContext { + r := &GetScriptContext{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getstats.go new file mode 100644 index 000000000..327a74f36 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getstats.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GetStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L155-L166 +type GetStats struct { + Current int64 `json:"current"` + ExistsTime Duration `json:"exists_time,omitempty"` + ExistsTimeInMillis int64 `json:"exists_time_in_millis"` + ExistsTotal int64 `json:"exists_total"` + MissingTime Duration `json:"missing_time,omitempty"` + MissingTimeInMillis int64 `json:"missing_time_in_millis"` + MissingTotal int64 `json:"missing_total"` + Time Duration `json:"time,omitempty"` + TimeInMillis int64 `json:"time_in_millis"` + Total int64 `json:"total"` +} + +func (s *GetStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Current", err) + } + s.Current = value + case float64: + f := int64(v) + s.Current = f + } + + case "exists_time": + if err := dec.Decode(&s.ExistsTime); err != nil { + return fmt.Errorf("%s | %w", "ExistsTime", err) + } + + case "exists_time_in_millis": + if err := dec.Decode(&s.ExistsTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "ExistsTimeInMillis", err) + } + + case "exists_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ExistsTotal", err) + } + s.ExistsTotal = value + case float64: + f := int64(v) + s.ExistsTotal = f + } + + case "missing_time": + if err := dec.Decode(&s.MissingTime); err != nil { + return fmt.Errorf("%s | %w", "MissingTime", err) + } + + case "missing_time_in_millis": + if err := dec.Decode(&s.MissingTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "MissingTimeInMillis", err) + } + + case "missing_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MissingTotal", err) + } + s.MissingTotal = value + case float64: + f := int64(v) + s.MissingTotal = f + } + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return fmt.Errorf("%s | %w", "Time", err) + } + + case "time_in_millis": + if err := dec.Decode(&s.TimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TimeInMillis", err) + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + +// NewGetStats returns a GetStats. +func NewGetStats() *GetStats { + r := &GetStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getuserprofileerrors.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getuserprofileerrors.go new file mode 100644 index 000000000..c7ad22b76 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/getuserprofileerrors.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GetUserProfileErrors type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_user_profile/types.ts#L25-L28 +type GetUserProfileErrors struct { + Count int64 `json:"count"` + Details map[string]ErrorCause `json:"details"` +} + +func (s *GetUserProfileErrors) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "details": + if s.Details == nil { + s.Details = make(map[string]ErrorCause, 0) + } + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + } + } + return nil +} + +// NewGetUserProfileErrors returns a GetUserProfileErrors. +func NewGetUserProfileErrors() *GetUserProfileErrors { + r := &GetUserProfileErrors{ + Details: make(map[string]ErrorCause), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalaggregate.go new file mode 100644 index 000000000..e0f37cd5c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalaggregate.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// GlobalAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L546-L550 +type GlobalAggregate struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *GlobalAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s GlobalAggregate) MarshalJSON() ([]byte, error) { + type opt GlobalAggregate + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewGlobalAggregate returns a GlobalAggregate. +func NewGlobalAggregate() *GlobalAggregate { + r := &GlobalAggregate{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalaggregation.go new file mode 100644 index 000000000..9b2502bdd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalaggregation.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// GlobalAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L506-L506 +type GlobalAggregation struct { +} + +// NewGlobalAggregation returns a GlobalAggregation. +func NewGlobalAggregation() *GlobalAggregation { + r := &GlobalAggregation{} + + return r +} + +type GlobalAggregationVariant interface { + GlobalAggregationCaster() *GlobalAggregation +} + +func (s *GlobalAggregation) GlobalAggregationCaster() *GlobalAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalordinalfieldstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalordinalfieldstats.go new file mode 100644 index 000000000..332610780 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalordinalfieldstats.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GlobalOrdinalFieldStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L137-L141 +type GlobalOrdinalFieldStats struct { + BuildTime *string `json:"build_time,omitempty"` + BuildTimeInMillis int64 `json:"build_time_in_millis"` + ShardMaxValueCount int64 `json:"shard_max_value_count"` +} + +func (s *GlobalOrdinalFieldStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "build_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BuildTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildTime = &o + + case "build_time_in_millis": + if err := dec.Decode(&s.BuildTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "BuildTimeInMillis", err) + } + + case "shard_max_value_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShardMaxValueCount", err) + } + s.ShardMaxValueCount = value + case float64: + f := int64(v) + s.ShardMaxValueCount = f + } + + } + } + return nil +} + +// NewGlobalOrdinalFieldStats returns a GlobalOrdinalFieldStats. +func NewGlobalOrdinalFieldStats() *GlobalOrdinalFieldStats { + r := &GlobalOrdinalFieldStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalordinalsstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalordinalsstats.go new file mode 100644 index 000000000..05fff7977 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalordinalsstats.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GlobalOrdinalsStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L131-L135 +type GlobalOrdinalsStats struct { + BuildTime *string `json:"build_time,omitempty"` + BuildTimeInMillis int64 `json:"build_time_in_millis"` + Fields map[string]GlobalOrdinalFieldStats `json:"fields,omitempty"` +} + +func (s *GlobalOrdinalsStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "build_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BuildTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildTime = &o + + case "build_time_in_millis": + if err := dec.Decode(&s.BuildTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "BuildTimeInMillis", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]GlobalOrdinalFieldStats, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + } + } + return nil +} + +// NewGlobalOrdinalsStats returns a GlobalOrdinalsStats. +func NewGlobalOrdinalsStats() *GlobalOrdinalsStats { + r := &GlobalOrdinalsStats{ + Fields: make(map[string]GlobalOrdinalFieldStats), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalprivilege.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalprivilege.go new file mode 100644 index 000000000..60498844d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/globalprivilege.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// GlobalPrivilege type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L432-L434 +type GlobalPrivilege struct { + Application ApplicationGlobalUserPrivileges `json:"application"` +} + +// NewGlobalPrivilege returns a GlobalPrivilege. +func NewGlobalPrivilege() *GlobalPrivilege { + r := &GlobalPrivilege{} + + return r +} + +type GlobalPrivilegeVariant interface { + GlobalPrivilegeCaster() *GlobalPrivilege +} + +func (s *GlobalPrivilege) GlobalPrivilegeCaster() *GlobalPrivilege { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/googleaistudioservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/googleaistudioservicesettings.go new file mode 100644 index 000000000..7888ef01e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/googleaistudioservicesettings.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GoogleAiStudioServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1305-L1321 +type GoogleAiStudioServiceSettings struct { + // ApiKey A valid API key of your Google Gemini account. + ApiKey string `json:"api_key"` + // ModelId The name of the model to use for the inference task. + // Refer to the Google documentation for the list of supported models. + ModelId string `json:"model_id"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // Google AI Studio. + // By default, the `googleaistudio` service sets the number of requests allowed + // per minute to 360. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` +} + +func (s *GoogleAiStudioServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + } + } + return nil +} + +// NewGoogleAiStudioServiceSettings returns a GoogleAiStudioServiceSettings. +func NewGoogleAiStudioServiceSettings() *GoogleAiStudioServiceSettings { + r := &GoogleAiStudioServiceSettings{} + + return r +} + +type GoogleAiStudioServiceSettingsVariant interface { + GoogleAiStudioServiceSettingsCaster() *GoogleAiStudioServiceSettings +} + +func (s *GoogleAiStudioServiceSettings) GoogleAiStudioServiceSettingsCaster() *GoogleAiStudioServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/googlenormalizeddistanceheuristic.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/googlenormalizeddistanceheuristic.go new file mode 100644 index 000000000..d99287bbe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/googlenormalizeddistanceheuristic.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GoogleNormalizedDistanceHeuristic type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L793-L798 +type GoogleNormalizedDistanceHeuristic struct { + // BackgroundIsSuperset Set to `false` if you defined a custom background filter that represents a + // different set of documents that you want to compare to. + BackgroundIsSuperset *bool `json:"background_is_superset,omitempty"` +} + +func (s *GoogleNormalizedDistanceHeuristic) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "background_is_superset": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "BackgroundIsSuperset", err) + } + s.BackgroundIsSuperset = &value + case bool: + s.BackgroundIsSuperset = &v + } + + } + } + return nil +} + +// NewGoogleNormalizedDistanceHeuristic returns a GoogleNormalizedDistanceHeuristic. +func NewGoogleNormalizedDistanceHeuristic() *GoogleNormalizedDistanceHeuristic { + r := &GoogleNormalizedDistanceHeuristic{} + + return r +} + +type GoogleNormalizedDistanceHeuristicVariant interface { + GoogleNormalizedDistanceHeuristicCaster() *GoogleNormalizedDistanceHeuristic +} + +func (s *GoogleNormalizedDistanceHeuristic) GoogleNormalizedDistanceHeuristicCaster() *GoogleNormalizedDistanceHeuristic { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/googlevertexaiservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/googlevertexaiservicesettings.go new file mode 100644 index 000000000..6c584ac16 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/googlevertexaiservicesettings.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GoogleVertexAIServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1332-L1358 +type GoogleVertexAIServiceSettings struct { + // Location The name of the location to use for the inference task. + // Refer to the Google documentation for the list of supported locations. + Location string `json:"location"` + // ModelId The name of the model to use for the inference task. + // Refer to the Google documentation for the list of supported models. + ModelId string `json:"model_id"` + // ProjectId The name of the project to use for the inference task. + ProjectId string `json:"project_id"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // Google Vertex AI. + // By default, the `googlevertexai` service sets the number of requests allowed + // per minute to 30.000. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` + // ServiceAccountJson A valid service account in JSON format for the Google Vertex AI API. + ServiceAccountJson string `json:"service_account_json"` +} + +func (s *GoogleVertexAIServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "location": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Location", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Location = o + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "project_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ProjectId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProjectId = o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + case "service_account_json": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ServiceAccountJson", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServiceAccountJson = o + + } + } + return nil +} + +// NewGoogleVertexAIServiceSettings returns a GoogleVertexAIServiceSettings. +func NewGoogleVertexAIServiceSettings() *GoogleVertexAIServiceSettings { + r := &GoogleVertexAIServiceSettings{} + + return r +} + +type GoogleVertexAIServiceSettingsVariant interface { + GoogleVertexAIServiceSettingsCaster() *GoogleVertexAIServiceSettings +} + +func (s *GoogleVertexAIServiceSettings) GoogleVertexAIServiceSettingsCaster() *GoogleVertexAIServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/googlevertexaitasksettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/googlevertexaitasksettings.go new file mode 100644 index 000000000..b0166dbee --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/googlevertexaitasksettings.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GoogleVertexAITaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1360-L1369 +type GoogleVertexAITaskSettings struct { + // AutoTruncate For a `text_embedding` task, truncate inputs longer than the maximum token + // length automatically. + AutoTruncate *bool `json:"auto_truncate,omitempty"` + // TopN For a `rerank` task, the number of the top N documents that should be + // returned. + TopN *int `json:"top_n,omitempty"` +} + +func (s *GoogleVertexAITaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "auto_truncate": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AutoTruncate", err) + } + s.AutoTruncate = &value + case bool: + s.AutoTruncate = &v + } + + case "top_n": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TopN", err) + } + s.TopN = &value + case float64: + f := int(v) + s.TopN = &f + } + + } + } + return nil +} + +// NewGoogleVertexAITaskSettings returns a GoogleVertexAITaskSettings. +func NewGoogleVertexAITaskSettings() *GoogleVertexAITaskSettings { + r := &GoogleVertexAITaskSettings{} + + return r +} + +type GoogleVertexAITaskSettingsVariant interface { + GoogleVertexAITaskSettingsCaster() *GoogleVertexAITaskSettings +} + +func (s *GoogleVertexAITaskSettings) GoogleVertexAITaskSettingsCaster() *GoogleVertexAITaskSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/grantapikey.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/grantapikey.go new file mode 100644 index 000000000..fbf150da8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/grantapikey.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// GrantApiKey type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/grant_api_key/types.ts#L25-L45 +type GrantApiKey struct { + // Expiration Expiration time for the API key. By default, API keys never expire. + Expiration *string `json:"expiration,omitempty"` + // Metadata Arbitrary metadata that you want to associate with the API key. + // It supports nested data structure. + // Within the `metadata` object, keys beginning with `_` are reserved for system + // usage. + Metadata Metadata `json:"metadata,omitempty"` + Name string `json:"name"` + // RoleDescriptors The role descriptors for this API key. + // When it is not specified or is an empty array, the API key has a point in + // time snapshot of permissions of the specified user or access token. + // If you supply role descriptors, the resultant permissions are an intersection + // of API keys permissions and the permissions of the user or access token. + RoleDescriptors []map[string]RoleDescriptor `json:"role_descriptors,omitempty"` +} + +func (s *GrantApiKey) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expiration": + if err := dec.Decode(&s.Expiration); err != nil { + return fmt.Errorf("%s | %w", "Expiration", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "role_descriptors": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]RoleDescriptor, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "RoleDescriptors", err) + } + s.RoleDescriptors = append(s.RoleDescriptors, o) + case '[': + o := make([]map[string]RoleDescriptor, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "RoleDescriptors", err) + } + s.RoleDescriptors = o + } + + } + } + return nil +} + +// NewGrantApiKey returns a GrantApiKey. +func NewGrantApiKey() *GrantApiKey { + r := &GrantApiKey{} + + return r +} + +type GrantApiKeyVariant interface { + GrantApiKeyCaster() *GrantApiKey +} + +func (s *GrantApiKey) GrantApiKeyCaster() *GrantApiKey { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/greaterthanvalidation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/greaterthanvalidation.go new file mode 100644 index 000000000..f5cd4c365 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/greaterthanvalidation.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GreaterThanValidation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L63-L66 +type GreaterThanValidation struct { + Constraint Float64 `json:"constraint"` + Type string `json:"type,omitempty"` +} + +func (s *GreaterThanValidation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "constraint": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Constraint", err) + } + f := Float64(value) + s.Constraint = f + case float64: + f := Float64(v) + s.Constraint = f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s GreaterThanValidation) MarshalJSON() ([]byte, error) { + type innerGreaterThanValidation GreaterThanValidation + tmp := innerGreaterThanValidation{ + Constraint: s.Constraint, + Type: s.Type, + } + + tmp.Type = "greater_than" + + return json.Marshal(tmp) +} + +// NewGreaterThanValidation returns a GreaterThanValidation. +func NewGreaterThanValidation() *GreaterThanValidation { + r := &GreaterThanValidation{} + + return r +} + +type GreaterThanValidationVariant interface { + GreaterThanValidationCaster() *GreaterThanValidation +} + +func (s *GreaterThanValidation) GreaterThanValidationCaster() *GreaterThanValidation { + return s +} + +func (s *GreaterThanValidation) ValidationCaster() *Validation { + o := Validation(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/greekanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/greekanalyzer.go new file mode 100644 index 000000000..2157fb53a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/greekanalyzer.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GreekAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L193-L197 +type GreekAnalyzer struct { + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *GreekAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s GreekAnalyzer) MarshalJSON() ([]byte, error) { + type innerGreekAnalyzer GreekAnalyzer + tmp := innerGreekAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "greek" + + return json.Marshal(tmp) +} + +// NewGreekAnalyzer returns a GreekAnalyzer. +func NewGreekAnalyzer() *GreekAnalyzer { + r := &GreekAnalyzer{} + + return r +} + +type GreekAnalyzerVariant interface { + GreekAnalyzerCaster() *GreekAnalyzer +} + +func (s *GreekAnalyzer) GreekAnalyzerCaster() *GreekAnalyzer { + return s +} + +func (s *GreekAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/grokprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/grokprocessor.go new file mode 100644 index 000000000..b9b01a579 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/grokprocessor.go @@ -0,0 +1,210 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GrokProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L951-L982 +type GrokProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // EcsCompatibility Must be disabled or v1. If v1, the processor uses patterns with Elastic + // Common Schema (ECS) field names. + EcsCompatibility *string `json:"ecs_compatibility,omitempty"` + // Field The field to use for grok expression parsing. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // PatternDefinitions A map of pattern-name and pattern tuples defining custom patterns to be used + // by the current processor. + // Patterns matching existing names will override the pre-existing definition. + PatternDefinitions map[string]string `json:"pattern_definitions,omitempty"` + // Patterns An ordered list of grok expression to match and extract named captures with. + // Returns on the first expression in the list that matches. + Patterns []string `json:"patterns"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TraceMatch When `true`, `_ingest._grok_match_index` will be inserted into your matched + // document’s metadata with the index into the pattern found in `patterns` that + // matched. + TraceMatch *bool `json:"trace_match,omitempty"` +} + +func (s *GrokProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "ecs_compatibility": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "EcsCompatibility", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.EcsCompatibility = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "pattern_definitions": + if s.PatternDefinitions == nil { + s.PatternDefinitions = make(map[string]string, 0) + } + if err := dec.Decode(&s.PatternDefinitions); err != nil { + return fmt.Errorf("%s | %w", "PatternDefinitions", err) + } + + case "patterns": + if err := dec.Decode(&s.Patterns); err != nil { + return fmt.Errorf("%s | %w", "Patterns", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "trace_match": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TraceMatch", err) + } + s.TraceMatch = &value + case bool: + s.TraceMatch = &v + } + + } + } + return nil +} + +// NewGrokProcessor returns a GrokProcessor. +func NewGrokProcessor() *GrokProcessor { + r := &GrokProcessor{ + PatternDefinitions: make(map[string]string), + } + + return r +} + +type GrokProcessorVariant interface { + GrokProcessorCaster() *GrokProcessor +} + +func (s *GrokProcessor) GrokProcessorCaster() *GrokProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/groupings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/groupings.go new file mode 100644 index 000000000..46bf051ab --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/groupings.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Groupings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/_types/Groupings.ts#L24-L40 +type Groupings struct { + // DateHistogram A date histogram group aggregates a date field into time-based buckets. + // This group is mandatory; you currently cannot roll up documents without a + // timestamp and a `date_histogram` group. + DateHistogram *DateHistogramGrouping `json:"date_histogram,omitempty"` + // Histogram The histogram group aggregates one or more numeric fields into numeric + // histogram intervals. + Histogram *HistogramGrouping `json:"histogram,omitempty"` + // Terms The terms group can be used on keyword or numeric fields to allow bucketing + // via the terms aggregation at a later point. + // The indexer enumerates and stores all values of a field for each time-period. + // This can be potentially costly for high-cardinality groups such as IP + // addresses, especially if the time-bucket is particularly sparse. + Terms *TermsGrouping `json:"terms,omitempty"` +} + +// NewGroupings returns a Groupings. +func NewGroupings() *Groupings { + r := &Groupings{} + + return r +} + +type GroupingsVariant interface { + GroupingsCaster() *Groupings +} + +func (s *Groupings) GroupingsCaster() *Groupings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/gsubprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/gsubprocessor.go new file mode 100644 index 000000000..325cbe3d0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/gsubprocessor.go @@ -0,0 +1,191 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// GsubProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L984-L1008 +type GsubProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to apply the replacement to. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Pattern The pattern to be replaced. + Pattern string `json:"pattern"` + // Replacement The string to replace the matching patterns with. + Replacement string `json:"replacement"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the converted value to + // By default, the `field` is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *GsubProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = o + + case "replacement": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Replacement", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Replacement = o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewGsubProcessor returns a GsubProcessor. +func NewGsubProcessor() *GsubProcessor { + r := &GsubProcessor{} + + return r +} + +type GsubProcessorVariant interface { + GsubProcessorCaster() *GsubProcessor +} + +func (s *GsubProcessor) GsubProcessorCaster() *GsubProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/halffloatnumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/halffloatnumberproperty.go new file mode 100644 index 000000000..a932fd133 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/halffloatnumberproperty.go @@ -0,0 +1,999 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype" +) + +// HalfFloatNumberProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L162-L165 +type HalfFloatNumberProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *float32 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + // TimeSeriesMetric For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + f := float32(value) + s.NullValue = &f + case float64: + f := float32(v) + s.NullValue = &f + } + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "time_series_metric": + if err := dec.Decode(&s.TimeSeriesMetric); err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HalfFloatNumberProperty) MarshalJSON() ([]byte, error) { + type innerHalfFloatNumberProperty HalfFloatNumberProperty + tmp := innerHalfFloatNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "half_float" + + return json.Marshal(tmp) +} + +// NewHalfFloatNumberProperty returns a HalfFloatNumberProperty. +func NewHalfFloatNumberProperty() *HalfFloatNumberProperty { + r := &HalfFloatNumberProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type HalfFloatNumberPropertyVariant interface { + HalfFloatNumberPropertyCaster() *HalfFloatNumberProperty +} + +func (s *HalfFloatNumberProperty) HalfFloatNumberPropertyCaster() *HalfFloatNumberProperty { + return s +} + +func (s *HalfFloatNumberProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/haschildquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/haschildquery.go new file mode 100644 index 000000000..07889ef72 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/haschildquery.go @@ -0,0 +1,198 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/childscoremode" +) + +// HasChildQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/joining.ts#L41-L79 +type HasChildQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // IgnoreUnmapped Indicates whether to ignore an unmapped `type` and not return any documents + // instead of an error. + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + // InnerHits If defined, each search hit will contain inner hits. + InnerHits *InnerHits `json:"inner_hits,omitempty"` + // MaxChildren Maximum number of child documents that match the query allowed for a returned + // parent document. + // If the parent document exceeds this limit, it is excluded from the search + // results. + MaxChildren *int `json:"max_children,omitempty"` + // MinChildren Minimum number of child documents that match the query required to match the + // query for a returned parent document. + // If the parent document does not meet this limit, it is excluded from the + // search results. + MinChildren *int `json:"min_children,omitempty"` + // Query Query you wish to run on child documents of the `type` field. + // If a child document matches the search, the query returns the parent + // document. + Query Query `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // ScoreMode Indicates how scores for matching child documents affect the root parent + // document’s relevance score. + ScoreMode *childscoremode.ChildScoreMode `json:"score_mode,omitempty"` + // Type Name of the child relationship mapped for the `join` field. + Type string `json:"type"` +} + +func (s *HasChildQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "ignore_unmapped": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnmapped", err) + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "inner_hits": + if err := dec.Decode(&s.InnerHits); err != nil { + return fmt.Errorf("%s | %w", "InnerHits", err) + } + + case "max_children": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxChildren", err) + } + s.MaxChildren = &value + case float64: + f := int(v) + s.MaxChildren = &f + } + + case "min_children": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinChildren", err) + } + s.MinChildren = &value + case float64: + f := int(v) + s.MinChildren = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "score_mode": + if err := dec.Decode(&s.ScoreMode); err != nil { + return fmt.Errorf("%s | %w", "ScoreMode", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewHasChildQuery returns a HasChildQuery. +func NewHasChildQuery() *HasChildQuery { + r := &HasChildQuery{} + + return r +} + +type HasChildQueryVariant interface { + HasChildQueryCaster() *HasChildQuery +} + +func (s *HasChildQuery) HasChildQueryCaster() *HasChildQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hasparentquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hasparentquery.go new file mode 100644 index 000000000..9d46a2b85 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hasparentquery.go @@ -0,0 +1,165 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HasParentQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/joining.ts#L81-L110 +type HasParentQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // IgnoreUnmapped Indicates whether to ignore an unmapped `parent_type` and not return any + // documents instead of an error. + // You can use this parameter to query multiple indices that may not contain the + // `parent_type`. + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + // InnerHits If defined, each search hit will contain inner hits. + InnerHits *InnerHits `json:"inner_hits,omitempty"` + // ParentType Name of the parent relationship mapped for the `join` field. + ParentType string `json:"parent_type"` + // Query Query you wish to run on parent documents of the `parent_type` field. + // If a parent document matches the search, the query returns its child + // documents. + Query Query `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // Score Indicates whether the relevance score of a matching parent document is + // aggregated into its child documents. + Score *bool `json:"score,omitempty"` +} + +func (s *HasParentQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "ignore_unmapped": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnmapped", err) + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "inner_hits": + if err := dec.Decode(&s.InnerHits); err != nil { + return fmt.Errorf("%s | %w", "InnerHits", err) + } + + case "parent_type": + if err := dec.Decode(&s.ParentType); err != nil { + return fmt.Errorf("%s | %w", "ParentType", err) + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Score", err) + } + s.Score = &value + case bool: + s.Score = &v + } + + } + } + return nil +} + +// NewHasParentQuery returns a HasParentQuery. +func NewHasParentQuery() *HasParentQuery { + r := &HasParentQuery{} + + return r +} + +type HasParentQueryVariant interface { + HasParentQueryCaster() *HasParentQuery +} + +func (s *HasParentQuery) HasParentQueryCaster() *HasParentQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hasprivilegesuserprofileerrors.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hasprivilegesuserprofileerrors.go new file mode 100644 index 000000000..00c715b84 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hasprivilegesuserprofileerrors.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HasPrivilegesUserProfileErrors type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/has_privileges_user_profile/types.ts#L39-L42 +type HasPrivilegesUserProfileErrors struct { + Count int64 `json:"count"` + Details map[string]ErrorCause `json:"details"` +} + +func (s *HasPrivilegesUserProfileErrors) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "details": + if s.Details == nil { + s.Details = make(map[string]ErrorCause, 0) + } + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + } + } + return nil +} + +// NewHasPrivilegesUserProfileErrors returns a HasPrivilegesUserProfileErrors. +func NewHasPrivilegesUserProfileErrors() *HasPrivilegesUserProfileErrors { + r := &HasPrivilegesUserProfileErrors{ + Details: make(map[string]ErrorCause), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hdrmethod.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hdrmethod.go new file mode 100644 index 000000000..6557938ec --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hdrmethod.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HdrMethod type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L237-L242 +type HdrMethod struct { + // NumberOfSignificantValueDigits Specifies the resolution of values for the histogram in number of significant + // digits. + NumberOfSignificantValueDigits *int `json:"number_of_significant_value_digits,omitempty"` +} + +func (s *HdrMethod) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "number_of_significant_value_digits": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfSignificantValueDigits", err) + } + s.NumberOfSignificantValueDigits = &value + case float64: + f := int(v) + s.NumberOfSignificantValueDigits = &f + } + + } + } + return nil +} + +// NewHdrMethod returns a HdrMethod. +func NewHdrMethod() *HdrMethod { + r := &HdrMethod{} + + return r +} + +type HdrMethodVariant interface { + HdrMethodCaster() *HdrMethod +} + +func (s *HdrMethod) HdrMethodCaster() *HdrMethod { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hdrpercentileranksaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hdrpercentileranksaggregate.go new file mode 100644 index 000000000..874578283 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hdrpercentileranksaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// HdrPercentileRanksAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L171-L172 +type HdrPercentileRanksAggregate struct { + Meta Metadata `json:"meta,omitempty"` + Values Percentiles `json:"values"` +} + +func (s *HdrPercentileRanksAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "values": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]any, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + s.Values = o + case '[': + o := []ArrayPercentilesItem{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + s.Values = o + } + + } + } + return nil +} + +// NewHdrPercentileRanksAggregate returns a HdrPercentileRanksAggregate. +func NewHdrPercentileRanksAggregate() *HdrPercentileRanksAggregate { + r := &HdrPercentileRanksAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hdrpercentilesaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hdrpercentilesaggregate.go new file mode 100644 index 000000000..7c57d4f5c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hdrpercentilesaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// HdrPercentilesAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L168-L169 +type HdrPercentilesAggregate struct { + Meta Metadata `json:"meta,omitempty"` + Values Percentiles `json:"values"` +} + +func (s *HdrPercentilesAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "values": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]any, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + s.Values = o + case '[': + o := []ArrayPercentilesItem{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + s.Values = o + } + + } + } + return nil +} + +// NewHdrPercentilesAggregate returns a HdrPercentilesAggregate. +func NewHdrPercentilesAggregate() *HdrPercentilesAggregate { + r := &HdrPercentilesAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/healthrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/healthrecord.go new file mode 100644 index 000000000..72f9bbc55 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/healthrecord.go @@ -0,0 +1,259 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HealthRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/health/types.ts#L23-L99 +type HealthRecord struct { + // ActiveShardsPercent active number of shards in percent + ActiveShardsPercent *string `json:"active_shards_percent,omitempty"` + // Cluster cluster name + Cluster *string `json:"cluster,omitempty"` + // Epoch seconds since 1970-01-01 00:00:00 + Epoch StringifiedEpochTimeUnitSeconds `json:"epoch,omitempty"` + // Init number of initializing nodes + Init *string `json:"init,omitempty"` + // MaxTaskWaitTime wait time of longest task pending + MaxTaskWaitTime *string `json:"max_task_wait_time,omitempty"` + // NodeData number of nodes that can store data + NodeData *string `json:"node.data,omitempty"` + // NodeTotal total number of nodes + NodeTotal *string `json:"node.total,omitempty"` + // PendingTasks number of pending tasks + PendingTasks *string `json:"pending_tasks,omitempty"` + // Pri number of primary shards + Pri *string `json:"pri,omitempty"` + // Relo number of relocating nodes + Relo *string `json:"relo,omitempty"` + // Shards total number of shards + Shards *string `json:"shards,omitempty"` + // Status health status + Status *string `json:"status,omitempty"` + // Timestamp time in HH:MM:SS + Timestamp *string `json:"timestamp,omitempty"` + // Unassign number of unassigned shards + Unassign *string `json:"unassign,omitempty"` + // UnassignPri number of unassigned primary shards + UnassignPri *string `json:"unassign.pri,omitempty"` +} + +func (s *HealthRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active_shards_percent", "asp", "activeShardsPercent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ActiveShardsPercent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ActiveShardsPercent = &o + + case "cluster", "cl": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Cluster", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Cluster = &o + + case "epoch", "time": + if err := dec.Decode(&s.Epoch); err != nil { + return fmt.Errorf("%s | %w", "Epoch", err) + } + + case "init", "i", "shards.initializing", "shardsInitializing": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Init", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Init = &o + + case "max_task_wait_time", "mtwt", "maxTaskWaitTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxTaskWaitTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxTaskWaitTime = &o + + case "node.data", "nd", "nodeData": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NodeData", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeData = &o + + case "node.total", "nt", "nodeTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NodeTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeTotal = &o + + case "pending_tasks", "pt", "pendingTasks": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PendingTasks", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PendingTasks = &o + + case "pri", "p", "shards.primary", "shardsPrimary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pri", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pri = &o + + case "relo", "r", "shards.relocating", "shardsRelocating": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Relo", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Relo = &o + + case "shards", "t", "sh", "shards.total", "shardsTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Shards", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Shards = &o + + case "status", "st": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Status = &o + + case "timestamp", "ts", "hms", "hhmmss": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + case "unassign", "u", "shards.unassigned", "shardsUnassigned": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Unassign", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Unassign = &o + + case "unassign.pri", "up", "shards.unassigned.primary", "shardsUnassignedPrimary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UnassignPri", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UnassignPri = &o + + } + } + return nil +} + +// NewHealthRecord returns a HealthRecord. +func NewHealthRecord() *HealthRecord { + r := &HealthRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/healthstatistics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/healthstatistics.go new file mode 100644 index 000000000..9164232ae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/healthstatistics.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HealthStatistics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L150-L152 +type HealthStatistics struct { + Available bool `json:"available"` + Enabled bool `json:"enabled"` + Invocations Invocations `json:"invocations"` +} + +func (s *HealthStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "invocations": + if err := dec.Decode(&s.Invocations); err != nil { + return fmt.Errorf("%s | %w", "Invocations", err) + } + + } + } + return nil +} + +// NewHealthStatistics returns a HealthStatistics. +func NewHealthStatistics() *HealthStatistics { + r := &HealthStatistics{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/highlight.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/highlight.go new file mode 100644 index 000000000..da445c848 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/highlight.go @@ -0,0 +1,406 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/boundaryscanner" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterencoder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterfragmenter" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterorder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlightertagsschema" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlightertype" +) + +// Highlight type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/highlighting.ts#L152-L157 +type Highlight struct { + // BoundaryChars A string that contains each boundary character. + BoundaryChars *string `json:"boundary_chars,omitempty"` + // BoundaryMaxScan How far to scan for boundary characters. + BoundaryMaxScan *int `json:"boundary_max_scan,omitempty"` + // BoundaryScanner Specifies how to break the highlighted fragments: chars, sentence, or word. + // Only valid for the unified and fvh highlighters. + // Defaults to `sentence` for the `unified` highlighter. Defaults to `chars` for + // the `fvh` highlighter. + BoundaryScanner *boundaryscanner.BoundaryScanner `json:"boundary_scanner,omitempty"` + // BoundaryScannerLocale Controls which locale is used to search for sentence and word boundaries. + // This parameter takes a form of a language tag, for example: `"en-US"`, + // `"fr-FR"`, `"ja-JP"`. + BoundaryScannerLocale *string `json:"boundary_scanner_locale,omitempty"` + Encoder *highlighterencoder.HighlighterEncoder `json:"encoder,omitempty"` + Fields []map[string]HighlightField `json:"fields"` + ForceSource *bool `json:"force_source,omitempty"` + // FragmentSize The size of the highlighted fragment in characters. + FragmentSize *int `json:"fragment_size,omitempty"` + // Fragmenter Specifies how text should be broken up in highlight snippets: `simple` or + // `span`. + // Only valid for the `plain` highlighter. + Fragmenter *highlighterfragmenter.HighlighterFragmenter `json:"fragmenter,omitempty"` + HighlightFilter *bool `json:"highlight_filter,omitempty"` + // HighlightQuery Highlight matches for a query other than the search query. + // This is especially useful if you use a rescore query because those are not + // taken into account by highlighting by default. + HighlightQuery *Query `json:"highlight_query,omitempty"` + // MaxAnalyzedOffset If set to a non-negative value, highlighting stops at this defined maximum + // limit. + // The rest of the text is not processed, thus not highlighted and no error is + // returned + // The `max_analyzed_offset` query setting does not override the + // `index.highlight.max_analyzed_offset` setting, which prevails when it’s set + // to lower value than the query setting. + MaxAnalyzedOffset *int `json:"max_analyzed_offset,omitempty"` + MaxFragmentLength *int `json:"max_fragment_length,omitempty"` + // NoMatchSize The amount of text you want to return from the beginning of the field if + // there are no matching fragments to highlight. + NoMatchSize *int `json:"no_match_size,omitempty"` + // NumberOfFragments The maximum number of fragments to return. + // If the number of fragments is set to `0`, no fragments are returned. + // Instead, the entire field contents are highlighted and returned. + // This can be handy when you need to highlight short texts such as a title or + // address, but fragmentation is not required. + // If `number_of_fragments` is `0`, `fragment_size` is ignored. + NumberOfFragments *int `json:"number_of_fragments,omitempty"` + Options map[string]json.RawMessage `json:"options,omitempty"` + // Order Sorts highlighted fragments by score when set to `score`. + // By default, fragments will be output in the order they appear in the field + // (order: `none`). + // Setting this option to `score` will output the most relevant fragments first. + // Each highlighter applies its own logic to compute relevancy scores. + Order *highlighterorder.HighlighterOrder `json:"order,omitempty"` + // PhraseLimit Controls the number of matching phrases in a document that are considered. + // Prevents the `fvh` highlighter from analyzing too many phrases and consuming + // too much memory. + // When using `matched_fields`, `phrase_limit` phrases per matched field are + // considered. Raising the limit increases query time and consumes more memory. + // Only supported by the `fvh` highlighter. + PhraseLimit *int `json:"phrase_limit,omitempty"` + // PostTags Use in conjunction with `pre_tags` to define the HTML tags to use for the + // highlighted text. + // By default, highlighted text is wrapped in `` and `` tags. + PostTags []string `json:"post_tags,omitempty"` + // PreTags Use in conjunction with `post_tags` to define the HTML tags to use for the + // highlighted text. + // By default, highlighted text is wrapped in `` and `` tags. + PreTags []string `json:"pre_tags,omitempty"` + // RequireFieldMatch By default, only fields that contains a query match are highlighted. + // Set to `false` to highlight all fields. + RequireFieldMatch *bool `json:"require_field_match,omitempty"` + // TagsSchema Set to `styled` to use the built-in tag schema. + TagsSchema *highlightertagsschema.HighlighterTagsSchema `json:"tags_schema,omitempty"` + Type *highlightertype.HighlighterType `json:"type,omitempty"` +} + +func (s *Highlight) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boundary_chars": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BoundaryChars", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BoundaryChars = &o + + case "boundary_max_scan": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "BoundaryMaxScan", err) + } + s.BoundaryMaxScan = &value + case float64: + f := int(v) + s.BoundaryMaxScan = &f + } + + case "boundary_scanner": + if err := dec.Decode(&s.BoundaryScanner); err != nil { + return fmt.Errorf("%s | %w", "BoundaryScanner", err) + } + + case "boundary_scanner_locale": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BoundaryScannerLocale", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BoundaryScannerLocale = &o + + case "encoder": + if err := dec.Decode(&s.Encoder); err != nil { + return fmt.Errorf("%s | %w", "Encoder", err) + } + + case "fields": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]HighlightField, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + s.Fields = append(s.Fields, o) + case '[': + o := make([]map[string]HighlightField, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + s.Fields = o + } + + case "force_source": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ForceSource", err) + } + s.ForceSource = &value + case bool: + s.ForceSource = &v + } + + case "fragment_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FragmentSize", err) + } + s.FragmentSize = &value + case float64: + f := int(v) + s.FragmentSize = &f + } + + case "fragmenter": + if err := dec.Decode(&s.Fragmenter); err != nil { + return fmt.Errorf("%s | %w", "Fragmenter", err) + } + + case "highlight_filter": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "HighlightFilter", err) + } + s.HighlightFilter = &value + case bool: + s.HighlightFilter = &v + } + + case "highlight_query": + if err := dec.Decode(&s.HighlightQuery); err != nil { + return fmt.Errorf("%s | %w", "HighlightQuery", err) + } + + case "max_analyzed_offset": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxAnalyzedOffset", err) + } + s.MaxAnalyzedOffset = &value + case float64: + f := int(v) + s.MaxAnalyzedOffset = &f + } + + case "max_fragment_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxFragmentLength", err) + } + s.MaxFragmentLength = &value + case float64: + f := int(v) + s.MaxFragmentLength = &f + } + + case "no_match_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NoMatchSize", err) + } + s.NoMatchSize = &value + case float64: + f := int(v) + s.NoMatchSize = &f + } + + case "number_of_fragments": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfFragments", err) + } + s.NumberOfFragments = &value + case float64: + f := int(v) + s.NumberOfFragments = &f + } + + case "options": + if s.Options == nil { + s.Options = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Options); err != nil { + return fmt.Errorf("%s | %w", "Options", err) + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + + case "phrase_limit": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PhraseLimit", err) + } + s.PhraseLimit = &value + case float64: + f := int(v) + s.PhraseLimit = &f + } + + case "post_tags": + if err := dec.Decode(&s.PostTags); err != nil { + return fmt.Errorf("%s | %w", "PostTags", err) + } + + case "pre_tags": + if err := dec.Decode(&s.PreTags); err != nil { + return fmt.Errorf("%s | %w", "PreTags", err) + } + + case "require_field_match": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RequireFieldMatch", err) + } + s.RequireFieldMatch = &value + case bool: + s.RequireFieldMatch = &v + } + + case "tags_schema": + if err := dec.Decode(&s.TagsSchema); err != nil { + return fmt.Errorf("%s | %w", "TagsSchema", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewHighlight returns a Highlight. +func NewHighlight() *Highlight { + r := &Highlight{ + Options: make(map[string]json.RawMessage), + } + + return r +} + +type HighlightVariant interface { + HighlightCaster() *Highlight +} + +func (s *Highlight) HighlightCaster() *Highlight { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/highlightfield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/highlightfield.go new file mode 100644 index 000000000..8ffb94a07 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/highlightfield.go @@ -0,0 +1,411 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/boundaryscanner" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterfragmenter" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterorder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlightertagsschema" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlightertype" +) + +// HighlightField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/highlighting.ts#L194-L197 +type HighlightField struct { + // BoundaryChars A string that contains each boundary character. + BoundaryChars *string `json:"boundary_chars,omitempty"` + // BoundaryMaxScan How far to scan for boundary characters. + BoundaryMaxScan *int `json:"boundary_max_scan,omitempty"` + // BoundaryScanner Specifies how to break the highlighted fragments: chars, sentence, or word. + // Only valid for the unified and fvh highlighters. + // Defaults to `sentence` for the `unified` highlighter. Defaults to `chars` for + // the `fvh` highlighter. + BoundaryScanner *boundaryscanner.BoundaryScanner `json:"boundary_scanner,omitempty"` + // BoundaryScannerLocale Controls which locale is used to search for sentence and word boundaries. + // This parameter takes a form of a language tag, for example: `"en-US"`, + // `"fr-FR"`, `"ja-JP"`. + BoundaryScannerLocale *string `json:"boundary_scanner_locale,omitempty"` + ForceSource *bool `json:"force_source,omitempty"` + FragmentOffset *int `json:"fragment_offset,omitempty"` + // FragmentSize The size of the highlighted fragment in characters. + FragmentSize *int `json:"fragment_size,omitempty"` + // Fragmenter Specifies how text should be broken up in highlight snippets: `simple` or + // `span`. + // Only valid for the `plain` highlighter. + Fragmenter *highlighterfragmenter.HighlighterFragmenter `json:"fragmenter,omitempty"` + HighlightFilter *bool `json:"highlight_filter,omitempty"` + // HighlightQuery Highlight matches for a query other than the search query. + // This is especially useful if you use a rescore query because those are not + // taken into account by highlighting by default. + HighlightQuery *Query `json:"highlight_query,omitempty"` + MatchedFields []string `json:"matched_fields,omitempty"` + // MaxAnalyzedOffset If set to a non-negative value, highlighting stops at this defined maximum + // limit. + // The rest of the text is not processed, thus not highlighted and no error is + // returned + // The `max_analyzed_offset` query setting does not override the + // `index.highlight.max_analyzed_offset` setting, which prevails when it’s set + // to lower value than the query setting. + MaxAnalyzedOffset *int `json:"max_analyzed_offset,omitempty"` + MaxFragmentLength *int `json:"max_fragment_length,omitempty"` + // NoMatchSize The amount of text you want to return from the beginning of the field if + // there are no matching fragments to highlight. + NoMatchSize *int `json:"no_match_size,omitempty"` + // NumberOfFragments The maximum number of fragments to return. + // If the number of fragments is set to `0`, no fragments are returned. + // Instead, the entire field contents are highlighted and returned. + // This can be handy when you need to highlight short texts such as a title or + // address, but fragmentation is not required. + // If `number_of_fragments` is `0`, `fragment_size` is ignored. + NumberOfFragments *int `json:"number_of_fragments,omitempty"` + Options map[string]json.RawMessage `json:"options,omitempty"` + // Order Sorts highlighted fragments by score when set to `score`. + // By default, fragments will be output in the order they appear in the field + // (order: `none`). + // Setting this option to `score` will output the most relevant fragments first. + // Each highlighter applies its own logic to compute relevancy scores. + Order *highlighterorder.HighlighterOrder `json:"order,omitempty"` + // PhraseLimit Controls the number of matching phrases in a document that are considered. + // Prevents the `fvh` highlighter from analyzing too many phrases and consuming + // too much memory. + // When using `matched_fields`, `phrase_limit` phrases per matched field are + // considered. Raising the limit increases query time and consumes more memory. + // Only supported by the `fvh` highlighter. + PhraseLimit *int `json:"phrase_limit,omitempty"` + // PostTags Use in conjunction with `pre_tags` to define the HTML tags to use for the + // highlighted text. + // By default, highlighted text is wrapped in `` and `` tags. + PostTags []string `json:"post_tags,omitempty"` + // PreTags Use in conjunction with `post_tags` to define the HTML tags to use for the + // highlighted text. + // By default, highlighted text is wrapped in `` and `` tags. + PreTags []string `json:"pre_tags,omitempty"` + // RequireFieldMatch By default, only fields that contains a query match are highlighted. + // Set to `false` to highlight all fields. + RequireFieldMatch *bool `json:"require_field_match,omitempty"` + // TagsSchema Set to `styled` to use the built-in tag schema. + TagsSchema *highlightertagsschema.HighlighterTagsSchema `json:"tags_schema,omitempty"` + Type *highlightertype.HighlighterType `json:"type,omitempty"` +} + +func (s *HighlightField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boundary_chars": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BoundaryChars", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BoundaryChars = &o + + case "boundary_max_scan": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "BoundaryMaxScan", err) + } + s.BoundaryMaxScan = &value + case float64: + f := int(v) + s.BoundaryMaxScan = &f + } + + case "boundary_scanner": + if err := dec.Decode(&s.BoundaryScanner); err != nil { + return fmt.Errorf("%s | %w", "BoundaryScanner", err) + } + + case "boundary_scanner_locale": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BoundaryScannerLocale", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BoundaryScannerLocale = &o + + case "force_source": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ForceSource", err) + } + s.ForceSource = &value + case bool: + s.ForceSource = &v + } + + case "fragment_offset": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FragmentOffset", err) + } + s.FragmentOffset = &value + case float64: + f := int(v) + s.FragmentOffset = &f + } + + case "fragment_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FragmentSize", err) + } + s.FragmentSize = &value + case float64: + f := int(v) + s.FragmentSize = &f + } + + case "fragmenter": + if err := dec.Decode(&s.Fragmenter); err != nil { + return fmt.Errorf("%s | %w", "Fragmenter", err) + } + + case "highlight_filter": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "HighlightFilter", err) + } + s.HighlightFilter = &value + case bool: + s.HighlightFilter = &v + } + + case "highlight_query": + if err := dec.Decode(&s.HighlightQuery); err != nil { + return fmt.Errorf("%s | %w", "HighlightQuery", err) + } + + case "matched_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "MatchedFields", err) + } + + s.MatchedFields = append(s.MatchedFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.MatchedFields); err != nil { + return fmt.Errorf("%s | %w", "MatchedFields", err) + } + } + + case "max_analyzed_offset": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxAnalyzedOffset", err) + } + s.MaxAnalyzedOffset = &value + case float64: + f := int(v) + s.MaxAnalyzedOffset = &f + } + + case "max_fragment_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxFragmentLength", err) + } + s.MaxFragmentLength = &value + case float64: + f := int(v) + s.MaxFragmentLength = &f + } + + case "no_match_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NoMatchSize", err) + } + s.NoMatchSize = &value + case float64: + f := int(v) + s.NoMatchSize = &f + } + + case "number_of_fragments": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfFragments", err) + } + s.NumberOfFragments = &value + case float64: + f := int(v) + s.NumberOfFragments = &f + } + + case "options": + if s.Options == nil { + s.Options = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Options); err != nil { + return fmt.Errorf("%s | %w", "Options", err) + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + + case "phrase_limit": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PhraseLimit", err) + } + s.PhraseLimit = &value + case float64: + f := int(v) + s.PhraseLimit = &f + } + + case "post_tags": + if err := dec.Decode(&s.PostTags); err != nil { + return fmt.Errorf("%s | %w", "PostTags", err) + } + + case "pre_tags": + if err := dec.Decode(&s.PreTags); err != nil { + return fmt.Errorf("%s | %w", "PreTags", err) + } + + case "require_field_match": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RequireFieldMatch", err) + } + s.RequireFieldMatch = &value + case bool: + s.RequireFieldMatch = &v + } + + case "tags_schema": + if err := dec.Decode(&s.TagsSchema); err != nil { + return fmt.Errorf("%s | %w", "TagsSchema", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewHighlightField returns a HighlightField. +func NewHighlightField() *HighlightField { + r := &HighlightField{ + Options: make(map[string]json.RawMessage), + } + + return r +} + +type HighlightFieldVariant interface { + HighlightFieldCaster() *HighlightField +} + +func (s *HighlightField) HighlightFieldCaster() *HighlightField { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hindianalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hindianalyzer.go new file mode 100644 index 000000000..4ed41d628 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hindianalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HindiAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L199-L204 +type HindiAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *HindiAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HindiAnalyzer) MarshalJSON() ([]byte, error) { + type innerHindiAnalyzer HindiAnalyzer + tmp := innerHindiAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "hindi" + + return json.Marshal(tmp) +} + +// NewHindiAnalyzer returns a HindiAnalyzer. +func NewHindiAnalyzer() *HindiAnalyzer { + r := &HindiAnalyzer{} + + return r +} + +type HindiAnalyzerVariant interface { + HindiAnalyzerCaster() *HindiAnalyzer +} + +func (s *HindiAnalyzer) HindiAnalyzerCaster() *HindiAnalyzer { + return s +} + +func (s *HindiAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hindinormalizationtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hindinormalizationtokenfilter.go new file mode 100644 index 000000000..6d0ce58bc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hindinormalizationtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// HindiNormalizationTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L502-L504 +type HindiNormalizationTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *HindiNormalizationTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HindiNormalizationTokenFilter) MarshalJSON() ([]byte, error) { + type innerHindiNormalizationTokenFilter HindiNormalizationTokenFilter + tmp := innerHindiNormalizationTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "hindi_normalization" + + return json.Marshal(tmp) +} + +// NewHindiNormalizationTokenFilter returns a HindiNormalizationTokenFilter. +func NewHindiNormalizationTokenFilter() *HindiNormalizationTokenFilter { + r := &HindiNormalizationTokenFilter{} + + return r +} + +type HindiNormalizationTokenFilterVariant interface { + HindiNormalizationTokenFilterCaster() *HindiNormalizationTokenFilter +} + +func (s *HindiNormalizationTokenFilter) HindiNormalizationTokenFilterCaster() *HindiNormalizationTokenFilter { + return s +} + +func (s *HindiNormalizationTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hint.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hint.go new file mode 100644 index 000000000..da7cb9289 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hint.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Hint type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/suggest_user_profiles/types.ts#L23-L34 +type Hint struct { + // Labels A single key-value pair to match against the labels section + // of a profile. A profile is considered matching if it matches + // at least one of the strings. + Labels map[string][]string `json:"labels,omitempty"` + // Uids A list of profile UIDs to match against. + Uids []string `json:"uids,omitempty"` +} + +// NewHint returns a Hint. +func NewHint() *Hint { + r := &Hint{ + Labels: make(map[string][]string), + } + + return r +} + +type HintVariant interface { + HintCaster() *Hint +} + +func (s *Hint) HintCaster() *Hint { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogramaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogramaggregate.go new file mode 100644 index 000000000..a7ccd6fa0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogramaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// HistogramAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L372-L376 +type HistogramAggregate struct { + Buckets BucketsHistogramBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *HistogramAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]HistogramBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []HistogramBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewHistogramAggregate returns a HistogramAggregate. +func NewHistogramAggregate() *HistogramAggregate { + r := &HistogramAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogramaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogramaggregation.go new file mode 100644 index 000000000..b5103710d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogramaggregation.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" +) + +// HistogramAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L519-L565 +type HistogramAggregation struct { + // ExtendedBounds Enables extending the bounds of the histogram beyond the data itself. + ExtendedBounds *ExtendedBoundsdouble `json:"extended_bounds,omitempty"` + // Field The name of the field to aggregate on. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // HardBounds Limits the range of buckets in the histogram. + // It is particularly useful in the case of open data ranges that can result in + // a very large number of buckets. + HardBounds *ExtendedBoundsdouble `json:"hard_bounds,omitempty"` + // Interval The interval for the buckets. + // Must be a positive decimal. + Interval *Float64 `json:"interval,omitempty"` + // Keyed If `true`, returns buckets as a hash instead of an array, keyed by the bucket + // keys. + Keyed *bool `json:"keyed,omitempty"` + // MinDocCount Only returns buckets that have `min_doc_count` number of documents. + // By default, the response will fill gaps in the histogram with empty buckets. + MinDocCount *int `json:"min_doc_count,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing *Float64 `json:"missing,omitempty"` + // Offset By default, the bucket keys start with 0 and then continue in even spaced + // steps of `interval`. + // The bucket boundaries can be shifted by using the `offset` option. + Offset *Float64 `json:"offset,omitempty"` + // Order The sort order of the returned buckets. + // By default, the returned buckets are sorted by their key ascending. + Order AggregateOrder `json:"order,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *HistogramAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "extended_bounds": + if err := dec.Decode(&s.ExtendedBounds); err != nil { + return fmt.Errorf("%s | %w", "ExtendedBounds", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "hard_bounds": + if err := dec.Decode(&s.HardBounds); err != nil { + return fmt.Errorf("%s | %w", "HardBounds", err) + } + + case "interval": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Interval", err) + } + f := Float64(value) + s.Interval = &f + case float64: + f := Float64(v) + s.Interval = &f + } + + case "keyed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Keyed", err) + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "min_doc_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocCount", err) + } + s.MinDocCount = &value + case float64: + f := int(v) + s.MinDocCount = &f + } + + case "missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + f := Float64(value) + s.Missing = &f + case float64: + f := Float64(v) + s.Missing = &f + } + + case "offset": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Offset", err) + } + f := Float64(value) + s.Offset = &f + case float64: + f := Float64(v) + s.Offset = &f + } + + case "order": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]sortorder.SortOrder, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + s.Order = o + case '[': + o := make([]map[string]sortorder.SortOrder, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + s.Order = o + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewHistogramAggregation returns a HistogramAggregation. +func NewHistogramAggregation() *HistogramAggregation { + r := &HistogramAggregation{} + + return r +} + +type HistogramAggregationVariant interface { + HistogramAggregationCaster() *HistogramAggregation +} + +func (s *HistogramAggregation) HistogramAggregationCaster() *HistogramAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogrambucket.go new file mode 100644 index 000000000..b5b8e60c1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogrambucket.go @@ -0,0 +1,662 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// HistogramBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L378-L381 +type HistogramBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key Float64 `json:"key"` + KeyAsString *string `json:"key_as_string,omitempty"` +} + +func (s *HistogramBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + f := Float64(value) + s.Key = f + case float64: + f := Float64(v) + s.Key = f + } + + case "key_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "KeyAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s HistogramBucket) MarshalJSON() ([]byte, error) { + type opt HistogramBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewHistogramBucket returns a HistogramBucket. +func NewHistogramBucket() *HistogramBucket { + r := &HistogramBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogramgrouping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogramgrouping.go new file mode 100644 index 000000000..1140fe432 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogramgrouping.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HistogramGrouping type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/_types/Groupings.ts#L84-L97 +type HistogramGrouping struct { + // Fields The set of fields that you wish to build histograms for. + // All fields specified must be some kind of numeric. + // Order does not matter. + Fields []string `json:"fields"` + // Interval The interval of histogram buckets to be generated when rolling up. + // For example, a value of `5` creates buckets that are five units wide (`0-5`, + // `5-10`, etc). + // Note that only one interval can be specified in the histogram group, meaning + // that all fields being grouped via the histogram must share the same interval. + Interval int64 `json:"interval"` +} + +func (s *HistogramGrouping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + } + + case "interval": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Interval", err) + } + s.Interval = value + case float64: + f := int64(v) + s.Interval = f + } + + } + } + return nil +} + +// NewHistogramGrouping returns a HistogramGrouping. +func NewHistogramGrouping() *HistogramGrouping { + r := &HistogramGrouping{} + + return r +} + +type HistogramGroupingVariant interface { + HistogramGroupingCaster() *HistogramGrouping +} + +func (s *HistogramGrouping) HistogramGroupingCaster() *HistogramGrouping { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogramproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogramproperty.go new file mode 100644 index 000000000..04c901eec --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/histogramproperty.go @@ -0,0 +1,838 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// HistogramProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/specialized.ts#L69-L72 +type HistogramProperty struct { + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *HistogramProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HistogramProperty) MarshalJSON() ([]byte, error) { + type innerHistogramProperty HistogramProperty + tmp := innerHistogramProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "histogram" + + return json.Marshal(tmp) +} + +// NewHistogramProperty returns a HistogramProperty. +func NewHistogramProperty() *HistogramProperty { + r := &HistogramProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type HistogramPropertyVariant interface { + HistogramPropertyCaster() *HistogramProperty +} + +func (s *HistogramProperty) HistogramPropertyCaster() *HistogramProperty { + return s +} + +func (s *HistogramProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hit.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hit.go new file mode 100644 index 000000000..ee404f5ee --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hit.go @@ -0,0 +1,258 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Hit type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/hits.ts#L40-L65 +type Hit struct { + Explanation_ *Explanation `json:"_explanation,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + Highlight map[string][]string `json:"highlight,omitempty"` + Id_ *string `json:"_id,omitempty"` + IgnoredFieldValues map[string][]json.RawMessage `json:"ignored_field_values,omitempty"` + Ignored_ []string `json:"_ignored,omitempty"` + Index_ string `json:"_index"` + InnerHits map[string]InnerHitsResult `json:"inner_hits,omitempty"` + MatchedQueries any `json:"matched_queries,omitempty"` + Nested_ *NestedIdentity `json:"_nested,omitempty"` + Node_ *string `json:"_node,omitempty"` + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + Rank_ *int `json:"_rank,omitempty"` + Routing_ *string `json:"_routing,omitempty"` + Score_ *Float64 `json:"_score,omitempty"` + SeqNo_ *int64 `json:"_seq_no,omitempty"` + Shard_ *string `json:"_shard,omitempty"` + Sort []FieldValue `json:"sort,omitempty"` + Source_ json.RawMessage `json:"_source,omitempty"` + Version_ *int64 `json:"_version,omitempty"` +} + +func (s *Hit) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_explanation": + if err := dec.Decode(&s.Explanation_); err != nil { + return fmt.Errorf("%s | %w", "Explanation_", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "highlight": + if s.Highlight == nil { + s.Highlight = make(map[string][]string, 0) + } + if err := dec.Decode(&s.Highlight); err != nil { + return fmt.Errorf("%s | %w", "Highlight", err) + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "ignored_field_values": + if s.IgnoredFieldValues == nil { + s.IgnoredFieldValues = make(map[string][]json.RawMessage, 0) + } + if err := dec.Decode(&s.IgnoredFieldValues); err != nil { + return fmt.Errorf("%s | %w", "IgnoredFieldValues", err) + } + + case "_ignored": + if err := dec.Decode(&s.Ignored_); err != nil { + return fmt.Errorf("%s | %w", "Ignored_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "inner_hits": + if s.InnerHits == nil { + s.InnerHits = make(map[string]InnerHitsResult, 0) + } + if err := dec.Decode(&s.InnerHits); err != nil { + return fmt.Errorf("%s | %w", "InnerHits", err) + } + + case "matched_queries": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]Float64, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "MatchedQueries", err) + } + s.MatchedQueries = o + case '[': + o := []string{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "MatchedQueries", err) + } + s.MatchedQueries = o + } + + case "_nested": + if err := dec.Decode(&s.Nested_); err != nil { + return fmt.Errorf("%s | %w", "Nested_", err) + } + + case "_node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Node_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node_ = &o + + case "_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryTerm_", err) + } + s.PrimaryTerm_ = &value + case float64: + f := int64(v) + s.PrimaryTerm_ = &f + } + + case "_rank": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Rank_", err) + } + s.Rank_ = &value + case float64: + f := int(v) + s.Rank_ = &f + } + + case "_routing": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Routing_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Routing_ = &o + + case "_score": + if err := dec.Decode(&s.Score_); err != nil { + return fmt.Errorf("%s | %w", "Score_", err) + } + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return fmt.Errorf("%s | %w", "SeqNo_", err) + } + + case "_shard": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Shard_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Shard_ = &o + + case "sort": + if err := dec.Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + + case "_version": + if err := dec.Decode(&s.Version_); err != nil { + return fmt.Errorf("%s | %w", "Version_", err) + } + + } + } + return nil +} + +// NewHit returns a Hit. +func NewHit() *Hit { + r := &Hit{ + Fields: make(map[string]json.RawMessage), + Highlight: make(map[string][]string), + IgnoredFieldValues: make(map[string][]json.RawMessage), + InnerHits: make(map[string]InnerHitsResult), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hitsevent.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hitsevent.go new file mode 100644 index 000000000..1f5f4e630 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hitsevent.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HitsEvent type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/eql/_types/EqlHits.ts#L41-L54 +type HitsEvent struct { + Fields map[string][]json.RawMessage `json:"fields,omitempty"` + // Id_ Unique identifier for the event. This ID is only unique within the index. + Id_ string `json:"_id"` + // Index_ Name of the index containing the event. + Index_ string `json:"_index"` + // Missing Set to `true` for events in a timespan-constrained sequence that do not meet + // a given condition. + Missing *bool `json:"missing,omitempty"` + // Source_ Original JSON body passed for the event at index time. + Source_ json.RawMessage `json:"_source,omitempty"` +} + +func (s *HitsEvent) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string][]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + s.Missing = &value + case bool: + s.Missing = &v + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + + } + } + return nil +} + +// NewHitsEvent returns a HitsEvent. +func NewHitsEvent() *HitsEvent { + r := &HitsEvent{ + Fields: make(map[string][]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hitsmetadata.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hitsmetadata.go new file mode 100644 index 000000000..6d3d41984 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hitsmetadata.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// HitsMetadata type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/hits.ts#L67-L73 +type HitsMetadata struct { + Hits []Hit `json:"hits"` + MaxScore *Float64 `json:"max_score,omitempty"` + // Total Total hit count information, present only if `track_total_hits` wasn't + // `false` in the search request. + Total *TotalHits `json:"total,omitempty"` +} + +func (s *HitsMetadata) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return fmt.Errorf("%s | %w", "Hits", err) + } + + case "max_score": + if err := dec.Decode(&s.MaxScore); err != nil { + return fmt.Errorf("%s | %w", "MaxScore", err) + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + + } + } + return nil +} + +// NewHitsMetadata returns a HitsMetadata. +func NewHitsMetadata() *HitsMetadata { + r := &HitsMetadata{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hitssequence.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hitssequence.go new file mode 100644 index 000000000..95acf17cf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hitssequence.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// HitsSequence type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/eql/_types/EqlHits.ts#L56-L64 +type HitsSequence struct { + // Events Contains events matching the query. Each object represents a matching event. + Events []HitsEvent `json:"events"` + // JoinKeys Shared field values used to constrain matches in the sequence. These are + // defined using the by keyword in the EQL query syntax. + JoinKeys []json.RawMessage `json:"join_keys,omitempty"` +} + +// NewHitsSequence returns a HitsSequence. +func NewHitsSequence() *HitsSequence { + r := &HitsSequence{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/holtlinearmodelsettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/holtlinearmodelsettings.go new file mode 100644 index 000000000..c76a04458 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/holtlinearmodelsettings.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HoltLinearModelSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L297-L300 +type HoltLinearModelSettings struct { + Alpha *float32 `json:"alpha,omitempty"` + Beta *float32 `json:"beta,omitempty"` +} + +func (s *HoltLinearModelSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Alpha", err) + } + f := float32(value) + s.Alpha = &f + case float64: + f := float32(v) + s.Alpha = &f + } + + case "beta": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Beta", err) + } + f := float32(value) + s.Beta = &f + case float64: + f := float32(v) + s.Beta = &f + } + + } + } + return nil +} + +// NewHoltLinearModelSettings returns a HoltLinearModelSettings. +func NewHoltLinearModelSettings() *HoltLinearModelSettings { + r := &HoltLinearModelSettings{} + + return r +} + +type HoltLinearModelSettingsVariant interface { + HoltLinearModelSettingsCaster() *HoltLinearModelSettings +} + +func (s *HoltLinearModelSettings) HoltLinearModelSettingsCaster() *HoltLinearModelSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/holtmovingaverageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/holtmovingaverageaggregation.go new file mode 100644 index 000000000..efcf558b7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/holtmovingaverageaggregation.go @@ -0,0 +1,188 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// HoltMovingAverageAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L283-L286 +type HoltMovingAverageAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Minimize *bool `json:"minimize,omitempty"` + Model string `json:"model,omitempty"` + Predict *int `json:"predict,omitempty"` + Settings HoltLinearModelSettings `json:"settings"` + Window *int `json:"window,omitempty"` +} + +func (s *HoltMovingAverageAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "minimize": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Minimize", err) + } + s.Minimize = &value + case bool: + s.Minimize = &v + } + + case "model": + if err := dec.Decode(&s.Model); err != nil { + return fmt.Errorf("%s | %w", "Model", err) + } + + case "predict": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Predict", err) + } + s.Predict = &value + case float64: + f := int(v) + s.Predict = &f + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "window": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Window", err) + } + s.Window = &value + case float64: + f := int(v) + s.Window = &f + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HoltMovingAverageAggregation) MarshalJSON() ([]byte, error) { + type innerHoltMovingAverageAggregation HoltMovingAverageAggregation + tmp := innerHoltMovingAverageAggregation{ + BucketsPath: s.BucketsPath, + Format: s.Format, + GapPolicy: s.GapPolicy, + Minimize: s.Minimize, + Model: s.Model, + Predict: s.Predict, + Settings: s.Settings, + Window: s.Window, + } + + tmp.Model = "holt" + + return json.Marshal(tmp) +} + +// NewHoltMovingAverageAggregation returns a HoltMovingAverageAggregation. +func NewHoltMovingAverageAggregation() *HoltMovingAverageAggregation { + r := &HoltMovingAverageAggregation{} + + return r +} + +type HoltMovingAverageAggregationVariant interface { + HoltMovingAverageAggregationCaster() *HoltMovingAverageAggregation +} + +func (s *HoltMovingAverageAggregation) HoltMovingAverageAggregationCaster() *HoltMovingAverageAggregation { + return s +} + +func (s *HoltMovingAverageAggregation) MovingAverageAggregationCaster() *MovingAverageAggregation { + o := MovingAverageAggregation(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/holtwintersmodelsettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/holtwintersmodelsettings.go new file mode 100644 index 000000000..8c882d21c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/holtwintersmodelsettings.go @@ -0,0 +1,162 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/holtwinterstype" +) + +// HoltWintersModelSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L301-L308 +type HoltWintersModelSettings struct { + Alpha *float32 `json:"alpha,omitempty"` + Beta *float32 `json:"beta,omitempty"` + Gamma *float32 `json:"gamma,omitempty"` + Pad *bool `json:"pad,omitempty"` + Period *int `json:"period,omitempty"` + Type *holtwinterstype.HoltWintersType `json:"type,omitempty"` +} + +func (s *HoltWintersModelSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Alpha", err) + } + f := float32(value) + s.Alpha = &f + case float64: + f := float32(v) + s.Alpha = &f + } + + case "beta": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Beta", err) + } + f := float32(value) + s.Beta = &f + case float64: + f := float32(v) + s.Beta = &f + } + + case "gamma": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Gamma", err) + } + f := float32(value) + s.Gamma = &f + case float64: + f := float32(v) + s.Gamma = &f + } + + case "pad": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Pad", err) + } + s.Pad = &value + case bool: + s.Pad = &v + } + + case "period": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Period", err) + } + s.Period = &value + case float64: + f := int(v) + s.Period = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewHoltWintersModelSettings returns a HoltWintersModelSettings. +func NewHoltWintersModelSettings() *HoltWintersModelSettings { + r := &HoltWintersModelSettings{} + + return r +} + +type HoltWintersModelSettingsVariant interface { + HoltWintersModelSettingsCaster() *HoltWintersModelSettings +} + +func (s *HoltWintersModelSettings) HoltWintersModelSettingsCaster() *HoltWintersModelSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/holtwintersmovingaverageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/holtwintersmovingaverageaggregation.go new file mode 100644 index 000000000..2925a8a1d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/holtwintersmovingaverageaggregation.go @@ -0,0 +1,188 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// HoltWintersMovingAverageAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L288-L291 +type HoltWintersMovingAverageAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Minimize *bool `json:"minimize,omitempty"` + Model string `json:"model,omitempty"` + Predict *int `json:"predict,omitempty"` + Settings HoltWintersModelSettings `json:"settings"` + Window *int `json:"window,omitempty"` +} + +func (s *HoltWintersMovingAverageAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "minimize": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Minimize", err) + } + s.Minimize = &value + case bool: + s.Minimize = &v + } + + case "model": + if err := dec.Decode(&s.Model); err != nil { + return fmt.Errorf("%s | %w", "Model", err) + } + + case "predict": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Predict", err) + } + s.Predict = &value + case float64: + f := int(v) + s.Predict = &f + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "window": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Window", err) + } + s.Window = &value + case float64: + f := int(v) + s.Window = &f + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HoltWintersMovingAverageAggregation) MarshalJSON() ([]byte, error) { + type innerHoltWintersMovingAverageAggregation HoltWintersMovingAverageAggregation + tmp := innerHoltWintersMovingAverageAggregation{ + BucketsPath: s.BucketsPath, + Format: s.Format, + GapPolicy: s.GapPolicy, + Minimize: s.Minimize, + Model: s.Model, + Predict: s.Predict, + Settings: s.Settings, + Window: s.Window, + } + + tmp.Model = "holt_winters" + + return json.Marshal(tmp) +} + +// NewHoltWintersMovingAverageAggregation returns a HoltWintersMovingAverageAggregation. +func NewHoltWintersMovingAverageAggregation() *HoltWintersMovingAverageAggregation { + r := &HoltWintersMovingAverageAggregation{} + + return r +} + +type HoltWintersMovingAverageAggregationVariant interface { + HoltWintersMovingAverageAggregationCaster() *HoltWintersMovingAverageAggregation +} + +func (s *HoltWintersMovingAverageAggregation) HoltWintersMovingAverageAggregationCaster() *HoltWintersMovingAverageAggregation { + return s +} + +func (s *HoltWintersMovingAverageAggregation) MovingAverageAggregationCaster() *MovingAverageAggregation { + o := MovingAverageAggregation(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hop.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hop.go new file mode 100644 index 000000000..ba5386ff5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hop.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Hop type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/graph/_types/Hop.ts#L23-L36 +type Hop struct { + // Connections Specifies one or more fields from which you want to extract terms that are + // associated with the specified vertices. + Connections *Hop `json:"connections,omitempty"` + // Query An optional guiding query that constrains the Graph API as it explores + // connected terms. + Query *Query `json:"query,omitempty"` + // Vertices Contains the fields you are interested in. + Vertices []VertexDefinition `json:"vertices"` +} + +// NewHop returns a Hop. +func NewHop() *Hop { + r := &Hop{} + + return r +} + +type HopVariant interface { + HopCaster() *Hop +} + +func (s *Hop) HopCaster() *Hop { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hourandminute.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hourandminute.go new file mode 100644 index 000000000..61d7b8d8a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hourandminute.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// HourAndMinute type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Schedule.ts#L106-L109 +type HourAndMinute struct { + Hour []int `json:"hour"` + Minute []int `json:"minute"` +} + +// NewHourAndMinute returns a HourAndMinute. +func NewHourAndMinute() *HourAndMinute { + r := &HourAndMinute{} + + return r +} + +type HourAndMinuteVariant interface { + HourAndMinuteCaster() *HourAndMinute +} + +func (s *HourAndMinute) HourAndMinuteCaster() *HourAndMinute { + return s +} + +func (s *HourAndMinute) ScheduleTimeOfDayCaster() *ScheduleTimeOfDay { + o := ScheduleTimeOfDay(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hourlyschedule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hourlyschedule.go new file mode 100644 index 000000000..7509fdbf3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hourlyschedule.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// HourlySchedule type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Schedule.ts#L47-L49 +type HourlySchedule struct { + Minute []int `json:"minute"` +} + +// NewHourlySchedule returns a HourlySchedule. +func NewHourlySchedule() *HourlySchedule { + r := &HourlySchedule{} + + return r +} + +type HourlyScheduleVariant interface { + HourlyScheduleCaster() *HourlySchedule +} + +func (s *HourlySchedule) HourlyScheduleCaster() *HourlySchedule { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/htmlstripcharfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/htmlstripcharfilter.go new file mode 100644 index 000000000..b3127eca6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/htmlstripcharfilter.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// HtmlStripCharFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/char_filters.ts#L46-L49 +type HtmlStripCharFilter struct { + EscapedTags []string `json:"escaped_tags,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *HtmlStripCharFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "escaped_tags": + if err := dec.Decode(&s.EscapedTags); err != nil { + return fmt.Errorf("%s | %w", "EscapedTags", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HtmlStripCharFilter) MarshalJSON() ([]byte, error) { + type innerHtmlStripCharFilter HtmlStripCharFilter + tmp := innerHtmlStripCharFilter{ + EscapedTags: s.EscapedTags, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "html_strip" + + return json.Marshal(tmp) +} + +// NewHtmlStripCharFilter returns a HtmlStripCharFilter. +func NewHtmlStripCharFilter() *HtmlStripCharFilter { + r := &HtmlStripCharFilter{} + + return r +} + +type HtmlStripCharFilterVariant interface { + HtmlStripCharFilterCaster() *HtmlStripCharFilter +} + +func (s *HtmlStripCharFilter) HtmlStripCharFilterCaster() *HtmlStripCharFilter { + return s +} + +func (s *HtmlStripCharFilter) CharFilterDefinitionCaster() *CharFilterDefinition { + o := CharFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/htmlstripprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/htmlstripprocessor.go new file mode 100644 index 000000000..3b29b265c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/htmlstripprocessor.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HtmlStripProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1010-L1026 +type HtmlStripProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The string-valued field to remove HTML tags from. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document, + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the converted value to + // By default, the `field` is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *HtmlStripProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewHtmlStripProcessor returns a HtmlStripProcessor. +func NewHtmlStripProcessor() *HtmlStripProcessor { + r := &HtmlStripProcessor{} + + return r +} + +type HtmlStripProcessorVariant interface { + HtmlStripProcessorCaster() *HtmlStripProcessor +} + +func (s *HtmlStripProcessor) HtmlStripProcessorCaster() *HtmlStripProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/http.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/http.go new file mode 100644 index 000000000..aa4dc7680 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/http.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Http type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L671-L690 +type Http struct { + // Clients Information on current and recently-closed HTTP client connections. + // Clients that have been closed longer than the + // `http.client_stats.closed_channels.max_age` setting will not be represented + // here. + Clients []Client `json:"clients,omitempty"` + // CurrentOpen Current number of open HTTP connections for the node. + CurrentOpen *int `json:"current_open,omitempty"` + // Routes Detailed HTTP stats broken down by route + Routes map[string]HttpRoute `json:"routes"` + // TotalOpened Total number of HTTP connections opened for the node. + TotalOpened *int64 `json:"total_opened,omitempty"` +} + +func (s *Http) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "clients": + if err := dec.Decode(&s.Clients); err != nil { + return fmt.Errorf("%s | %w", "Clients", err) + } + + case "current_open": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CurrentOpen", err) + } + s.CurrentOpen = &value + case float64: + f := int(v) + s.CurrentOpen = &f + } + + case "routes": + if s.Routes == nil { + s.Routes = make(map[string]HttpRoute, 0) + } + if err := dec.Decode(&s.Routes); err != nil { + return fmt.Errorf("%s | %w", "Routes", err) + } + + case "total_opened": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalOpened", err) + } + s.TotalOpened = &value + case float64: + f := int64(v) + s.TotalOpened = &f + } + + } + } + return nil +} + +// NewHttp returns a Http. +func NewHttp() *Http { + r := &Http{ + Routes: make(map[string]HttpRoute), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpemailattachment.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpemailattachment.go new file mode 100644 index 000000000..6c3e505a6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpemailattachment.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HttpEmailAttachment type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L218-L222 +type HttpEmailAttachment struct { + ContentType *string `json:"content_type,omitempty"` + Inline *bool `json:"inline,omitempty"` + Request *HttpInputRequestDefinition `json:"request,omitempty"` +} + +func (s *HttpEmailAttachment) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "content_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ContentType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ContentType = &o + + case "inline": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Inline", err) + } + s.Inline = &value + case bool: + s.Inline = &v + } + + case "request": + if err := dec.Decode(&s.Request); err != nil { + return fmt.Errorf("%s | %w", "Request", err) + } + + } + } + return nil +} + +// NewHttpEmailAttachment returns a HttpEmailAttachment. +func NewHttpEmailAttachment() *HttpEmailAttachment { + r := &HttpEmailAttachment{} + + return r +} + +type HttpEmailAttachmentVariant interface { + HttpEmailAttachmentCaster() *HttpEmailAttachment +} + +func (s *HttpEmailAttachment) HttpEmailAttachmentCaster() *HttpEmailAttachment { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpheaders.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpheaders.go new file mode 100644 index 000000000..3e058cdbd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpheaders.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// HttpHeaders type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L155-L155 +type HttpHeaders map[string][]string + +type HttpHeadersVariant interface { + HttpHeadersCaster() *HttpHeaders +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinput.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinput.go new file mode 100644 index 000000000..c1dc45ca7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinput.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/responsecontenttype" +) + +// HttpInput type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L44-L48 +type HttpInput struct { + Extract []string `json:"extract,omitempty"` + Request *HttpInputRequestDefinition `json:"request,omitempty"` + ResponseContentType *responsecontenttype.ResponseContentType `json:"response_content_type,omitempty"` +} + +// NewHttpInput returns a HttpInput. +func NewHttpInput() *HttpInput { + r := &HttpInput{} + + return r +} + +type HttpInputVariant interface { + HttpInputCaster() *HttpInput +} + +func (s *HttpInput) HttpInputCaster() *HttpInput { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputauthentication.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputauthentication.go new file mode 100644 index 000000000..8d88fb030 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputauthentication.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// HttpInputAuthentication type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L50-L52 +type HttpInputAuthentication struct { + Basic HttpInputBasicAuthentication `json:"basic"` +} + +// NewHttpInputAuthentication returns a HttpInputAuthentication. +func NewHttpInputAuthentication() *HttpInputAuthentication { + r := &HttpInputAuthentication{} + + return r +} + +type HttpInputAuthenticationVariant interface { + HttpInputAuthenticationCaster() *HttpInputAuthentication +} + +func (s *HttpInputAuthentication) HttpInputAuthenticationCaster() *HttpInputAuthentication { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputbasicauthentication.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputbasicauthentication.go new file mode 100644 index 000000000..74d24765f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputbasicauthentication.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// HttpInputBasicAuthentication type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L54-L57 +type HttpInputBasicAuthentication struct { + Password string `json:"password"` + Username string `json:"username"` +} + +func (s *HttpInputBasicAuthentication) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "password": + if err := dec.Decode(&s.Password); err != nil { + return fmt.Errorf("%s | %w", "Password", err) + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + + } + } + return nil +} + +// NewHttpInputBasicAuthentication returns a HttpInputBasicAuthentication. +func NewHttpInputBasicAuthentication() *HttpInputBasicAuthentication { + r := &HttpInputBasicAuthentication{} + + return r +} + +type HttpInputBasicAuthenticationVariant interface { + HttpInputBasicAuthenticationCaster() *HttpInputBasicAuthentication +} + +func (s *HttpInputBasicAuthentication) HttpInputBasicAuthenticationCaster() *HttpInputBasicAuthentication { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputproxy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputproxy.go new file mode 100644 index 000000000..0f66604c0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputproxy.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// HttpInputProxy type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L67-L70 +type HttpInputProxy struct { + Host string `json:"host"` + Port uint `json:"port"` +} + +func (s *HttpInputProxy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + case "port": + if err := dec.Decode(&s.Port); err != nil { + return fmt.Errorf("%s | %w", "Port", err) + } + + } + } + return nil +} + +// NewHttpInputProxy returns a HttpInputProxy. +func NewHttpInputProxy() *HttpInputProxy { + r := &HttpInputProxy{} + + return r +} + +type HttpInputProxyVariant interface { + HttpInputProxyCaster() *HttpInputProxy +} + +func (s *HttpInputProxy) HttpInputProxyCaster() *HttpInputProxy { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputrequestdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputrequestdefinition.go new file mode 100644 index 000000000..99105fce1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputrequestdefinition.go @@ -0,0 +1,182 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectionscheme" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/httpinputmethod" +) + +// HttpInputRequestDefinition type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L72-L86 +type HttpInputRequestDefinition struct { + Auth *HttpInputAuthentication `json:"auth,omitempty"` + Body *string `json:"body,omitempty"` + ConnectionTimeout Duration `json:"connection_timeout,omitempty"` + Headers map[string]string `json:"headers,omitempty"` + Host *string `json:"host,omitempty"` + Method *httpinputmethod.HttpInputMethod `json:"method,omitempty"` + Params map[string]string `json:"params,omitempty"` + Path *string `json:"path,omitempty"` + Port *uint `json:"port,omitempty"` + Proxy *HttpInputProxy `json:"proxy,omitempty"` + ReadTimeout Duration `json:"read_timeout,omitempty"` + Scheme *connectionscheme.ConnectionScheme `json:"scheme,omitempty"` + Url *string `json:"url,omitempty"` +} + +func (s *HttpInputRequestDefinition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "auth": + if err := dec.Decode(&s.Auth); err != nil { + return fmt.Errorf("%s | %w", "Auth", err) + } + + case "body": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Body", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Body = &o + + case "connection_timeout": + if err := dec.Decode(&s.ConnectionTimeout); err != nil { + return fmt.Errorf("%s | %w", "ConnectionTimeout", err) + } + + case "headers": + if s.Headers == nil { + s.Headers = make(map[string]string, 0) + } + if err := dec.Decode(&s.Headers); err != nil { + return fmt.Errorf("%s | %w", "Headers", err) + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + case "method": + if err := dec.Decode(&s.Method); err != nil { + return fmt.Errorf("%s | %w", "Method", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]string, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = &o + + case "port": + if err := dec.Decode(&s.Port); err != nil { + return fmt.Errorf("%s | %w", "Port", err) + } + + case "proxy": + if err := dec.Decode(&s.Proxy); err != nil { + return fmt.Errorf("%s | %w", "Proxy", err) + } + + case "read_timeout": + if err := dec.Decode(&s.ReadTimeout); err != nil { + return fmt.Errorf("%s | %w", "ReadTimeout", err) + } + + case "scheme": + if err := dec.Decode(&s.Scheme); err != nil { + return fmt.Errorf("%s | %w", "Scheme", err) + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = &o + + } + } + return nil +} + +// NewHttpInputRequestDefinition returns a HttpInputRequestDefinition. +func NewHttpInputRequestDefinition() *HttpInputRequestDefinition { + r := &HttpInputRequestDefinition{ + Headers: make(map[string]string), + Params: make(map[string]string), + } + + return r +} + +type HttpInputRequestDefinitionVariant interface { + HttpInputRequestDefinitionCaster() *HttpInputRequestDefinition +} + +func (s *HttpInputRequestDefinition) HttpInputRequestDefinitionCaster() *HttpInputRequestDefinition { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputrequestresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputrequestresult.go new file mode 100644 index 000000000..20f2530c2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputrequestresult.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectionscheme" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/httpinputmethod" +) + +// HttpInputRequestResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L300-L300 +type HttpInputRequestResult struct { + Auth *HttpInputAuthentication `json:"auth,omitempty"` + Body *string `json:"body,omitempty"` + ConnectionTimeout Duration `json:"connection_timeout,omitempty"` + Headers map[string]string `json:"headers,omitempty"` + Host *string `json:"host,omitempty"` + Method *httpinputmethod.HttpInputMethod `json:"method,omitempty"` + Params map[string]string `json:"params,omitempty"` + Path *string `json:"path,omitempty"` + Port *uint `json:"port,omitempty"` + Proxy *HttpInputProxy `json:"proxy,omitempty"` + ReadTimeout Duration `json:"read_timeout,omitempty"` + Scheme *connectionscheme.ConnectionScheme `json:"scheme,omitempty"` + Url *string `json:"url,omitempty"` +} + +func (s *HttpInputRequestResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "auth": + if err := dec.Decode(&s.Auth); err != nil { + return fmt.Errorf("%s | %w", "Auth", err) + } + + case "body": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Body", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Body = &o + + case "connection_timeout": + if err := dec.Decode(&s.ConnectionTimeout); err != nil { + return fmt.Errorf("%s | %w", "ConnectionTimeout", err) + } + + case "headers": + if s.Headers == nil { + s.Headers = make(map[string]string, 0) + } + if err := dec.Decode(&s.Headers); err != nil { + return fmt.Errorf("%s | %w", "Headers", err) + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + case "method": + if err := dec.Decode(&s.Method); err != nil { + return fmt.Errorf("%s | %w", "Method", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]string, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = &o + + case "port": + if err := dec.Decode(&s.Port); err != nil { + return fmt.Errorf("%s | %w", "Port", err) + } + + case "proxy": + if err := dec.Decode(&s.Proxy); err != nil { + return fmt.Errorf("%s | %w", "Proxy", err) + } + + case "read_timeout": + if err := dec.Decode(&s.ReadTimeout); err != nil { + return fmt.Errorf("%s | %w", "ReadTimeout", err) + } + + case "scheme": + if err := dec.Decode(&s.Scheme); err != nil { + return fmt.Errorf("%s | %w", "Scheme", err) + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = &o + + } + } + return nil +} + +// NewHttpInputRequestResult returns a HttpInputRequestResult. +func NewHttpInputRequestResult() *HttpInputRequestResult { + r := &HttpInputRequestResult{ + Headers: make(map[string]string), + Params: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputresponseresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputresponseresult.go new file mode 100644 index 000000000..dff7a2e5e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httpinputresponseresult.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HttpInputResponseResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L302-L306 +type HttpInputResponseResult struct { + Body string `json:"body"` + Headers HttpHeaders `json:"headers"` + Status int `json:"status"` +} + +func (s *HttpInputResponseResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "body": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Body", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Body = o + + case "headers": + if err := dec.Decode(&s.Headers); err != nil { + return fmt.Errorf("%s | %w", "Headers", err) + } + + case "status": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + s.Status = value + case float64: + f := int(v) + s.Status = f + } + + } + } + return nil +} + +// NewHttpInputResponseResult returns a HttpInputResponseResult. +func NewHttpInputResponseResult() *HttpInputResponseResult { + r := &HttpInputResponseResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httproute.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httproute.go new file mode 100644 index 000000000..8c78ed6c4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httproute.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// HttpRoute type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L692-L695 +type HttpRoute struct { + Requests HttpRouteRequests `json:"requests"` + Responses HttpRouteResponses `json:"responses"` +} + +// NewHttpRoute returns a HttpRoute. +func NewHttpRoute() *HttpRoute { + r := &HttpRoute{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httprouterequests.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httprouterequests.go new file mode 100644 index 000000000..64f60ab82 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httprouterequests.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HttpRouteRequests type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L697-L701 +type HttpRouteRequests struct { + Count int64 `json:"count"` + SizeHistogram []SizeHttpHistogram `json:"size_histogram"` + TotalSizeInBytes int64 `json:"total_size_in_bytes"` +} + +func (s *HttpRouteRequests) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "size_histogram": + if err := dec.Decode(&s.SizeHistogram); err != nil { + return fmt.Errorf("%s | %w", "SizeHistogram", err) + } + + case "total_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeInBytes", err) + } + s.TotalSizeInBytes = value + case float64: + f := int64(v) + s.TotalSizeInBytes = f + } + + } + } + return nil +} + +// NewHttpRouteRequests returns a HttpRouteRequests. +func NewHttpRouteRequests() *HttpRouteRequests { + r := &HttpRouteRequests{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httprouteresponses.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httprouteresponses.go new file mode 100644 index 000000000..ad376254f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/httprouteresponses.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HttpRouteResponses type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L703-L708 +type HttpRouteResponses struct { + Count int64 `json:"count"` + HandlingTimeHistogram []TimeHttpHistogram `json:"handling_time_histogram"` + SizeHistogram []SizeHttpHistogram `json:"size_histogram"` + TotalSizeInBytes int64 `json:"total_size_in_bytes"` +} + +func (s *HttpRouteResponses) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "handling_time_histogram": + if err := dec.Decode(&s.HandlingTimeHistogram); err != nil { + return fmt.Errorf("%s | %w", "HandlingTimeHistogram", err) + } + + case "size_histogram": + if err := dec.Decode(&s.SizeHistogram); err != nil { + return fmt.Errorf("%s | %w", "SizeHistogram", err) + } + + case "total_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeInBytes", err) + } + s.TotalSizeInBytes = value + case float64: + f := int64(v) + s.TotalSizeInBytes = f + } + + } + } + return nil +} + +// NewHttpRouteResponses returns a HttpRouteResponses. +func NewHttpRouteResponses() *HttpRouteResponses { + r := &HttpRouteResponses{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/huggingfaceservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/huggingfaceservicesettings.go new file mode 100644 index 000000000..71e73790c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/huggingfaceservicesettings.go @@ -0,0 +1,150 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HuggingFaceServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1382-L1414 +type HuggingFaceServiceSettings struct { + // ApiKey A valid access token for your HuggingFace account. + // You can create or find your access tokens on the HuggingFace settings page. + // + // IMPORTANT: You need to provide the API key only once, during the inference + // model creation. + // The get inference endpoint API does not retrieve your API key. + // After creating the inference model, you cannot change the associated API key. + // If you want to use a different API key, delete the inference model and + // recreate it with the same name and the updated API key. + ApiKey string `json:"api_key"` + // ModelId The name of the HuggingFace model to use for the inference task. + // For `completion` and `chat_completion` tasks, this field is optional but may + // be required for certain models — particularly when using serverless inference + // endpoints. + // For the `text_embedding` task, this field should not be included. Otherwise, + // the request will fail. + ModelId *string `json:"model_id,omitempty"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // Hugging Face. + // By default, the `hugging_face` service sets the number of requests allowed + // per minute to 3000 for all supported tasks. + // Hugging Face does not publish a universal rate limit — actual limits may + // vary. + // It is recommended to adjust this value based on the capacity and limits of + // your specific deployment environment. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` + // Url The URL endpoint to use for the requests. + // For `completion` and `chat_completion` tasks, the deployed model must be + // compatible with the Hugging Face Chat Completion interface (see the linked + // external documentation for details). The endpoint URL for the request must + // include `/v1/chat/completions`. + // If the model supports the OpenAI Chat Completion schema, a toggle should + // appear in the interface. Enabling this toggle doesn't change any model + // behavior, it reveals the full endpoint URL needed (which should include + // `/v1/chat/completions`) when configuring the inference endpoint in + // Elasticsearch. If the model doesn't support this schema, the toggle may not + // be shown. + Url string `json:"url"` +} + +func (s *HuggingFaceServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = &o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = o + + } + } + return nil +} + +// NewHuggingFaceServiceSettings returns a HuggingFaceServiceSettings. +func NewHuggingFaceServiceSettings() *HuggingFaceServiceSettings { + r := &HuggingFaceServiceSettings{} + + return r +} + +type HuggingFaceServiceSettingsVariant interface { + HuggingFaceServiceSettingsCaster() *HuggingFaceServiceSettings +} + +func (s *HuggingFaceServiceSettings) HuggingFaceServiceSettingsCaster() *HuggingFaceServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/huggingfacetasksettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/huggingfacetasksettings.go new file mode 100644 index 000000000..2c5c19f9d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/huggingfacetasksettings.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HuggingFaceTaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1416-L1426 +type HuggingFaceTaskSettings struct { + // ReturnDocuments For a `rerank` task, return doc text within the results. + ReturnDocuments *bool `json:"return_documents,omitempty"` + // TopN For a `rerank` task, the number of most relevant documents to return. + // It defaults to the number of the documents. + TopN *int `json:"top_n,omitempty"` +} + +func (s *HuggingFaceTaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "return_documents": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ReturnDocuments", err) + } + s.ReturnDocuments = &value + case bool: + s.ReturnDocuments = &v + } + + case "top_n": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TopN", err) + } + s.TopN = &value + case float64: + f := int(v) + s.TopN = &f + } + + } + } + return nil +} + +// NewHuggingFaceTaskSettings returns a HuggingFaceTaskSettings. +func NewHuggingFaceTaskSettings() *HuggingFaceTaskSettings { + r := &HuggingFaceTaskSettings{} + + return r +} + +type HuggingFaceTaskSettingsVariant interface { + HuggingFaceTaskSettingsCaster() *HuggingFaceTaskSettings +} + +func (s *HuggingFaceTaskSettings) HuggingFaceTaskSettingsCaster() *HuggingFaceTaskSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hungariananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hungariananalyzer.go new file mode 100644 index 000000000..dd8431e95 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hungariananalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HungarianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L206-L211 +type HungarianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *HungarianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HungarianAnalyzer) MarshalJSON() ([]byte, error) { + type innerHungarianAnalyzer HungarianAnalyzer + tmp := innerHungarianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "hungarian" + + return json.Marshal(tmp) +} + +// NewHungarianAnalyzer returns a HungarianAnalyzer. +func NewHungarianAnalyzer() *HungarianAnalyzer { + r := &HungarianAnalyzer{} + + return r +} + +type HungarianAnalyzerVariant interface { + HungarianAnalyzerCaster() *HungarianAnalyzer +} + +func (s *HungarianAnalyzer) HungarianAnalyzerCaster() *HungarianAnalyzer { + return s +} + +func (s *HungarianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hunspelltokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hunspelltokenfilter.go new file mode 100644 index 000000000..670b8fcfd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hunspelltokenfilter.go @@ -0,0 +1,173 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HunspellTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L268-L280 +type HunspellTokenFilter struct { + // Dedup If `true`, duplicate tokens are removed from the filter’s output. Defaults to + // `true`. + Dedup *bool `json:"dedup,omitempty"` + // Dictionary One or more `.dic` files (e.g, `en_US.dic`, my_custom.dic) to use for the + // Hunspell dictionary. + // By default, the `hunspell` filter uses all `.dic` files in the + // `<$ES_PATH_CONF>/hunspell/` directory specified using the `lang`, + // `language`, or `locale` parameter. + Dictionary *string `json:"dictionary,omitempty"` + // Locale Locale directory used to specify the `.aff` and `.dic` files for a Hunspell + // dictionary. + Locale string `json:"locale"` + // LongestOnly If `true`, only the longest stemmed version of each token is included in the + // output. If `false`, all stemmed versions of the token are included. Defaults + // to `false`. + LongestOnly *bool `json:"longest_only,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *HunspellTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dedup": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Dedup", err) + } + s.Dedup = &value + case bool: + s.Dedup = &v + } + + case "dictionary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Dictionary", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Dictionary = &o + + case "locale", "lang", "language": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Locale", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Locale = o + + case "longest_only": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "LongestOnly", err) + } + s.LongestOnly = &value + case bool: + s.LongestOnly = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HunspellTokenFilter) MarshalJSON() ([]byte, error) { + type innerHunspellTokenFilter HunspellTokenFilter + tmp := innerHunspellTokenFilter{ + Dedup: s.Dedup, + Dictionary: s.Dictionary, + Locale: s.Locale, + LongestOnly: s.LongestOnly, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "hunspell" + + return json.Marshal(tmp) +} + +// NewHunspellTokenFilter returns a HunspellTokenFilter. +func NewHunspellTokenFilter() *HunspellTokenFilter { + r := &HunspellTokenFilter{} + + return r +} + +type HunspellTokenFilterVariant interface { + HunspellTokenFilterCaster() *HunspellTokenFilter +} + +func (s *HunspellTokenFilter) HunspellTokenFilterCaster() *HunspellTokenFilter { + return s +} + +func (s *HunspellTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hyperparameter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hyperparameter.go new file mode 100644 index 000000000..f2b513498 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hyperparameter.go @@ -0,0 +1,146 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Hyperparameter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L274-L288 +type Hyperparameter struct { + // AbsoluteImportance A positive number showing how much the parameter influences the variation of + // the loss function. For hyperparameters with values that are not specified by + // the user but tuned during hyperparameter optimization. + AbsoluteImportance *Float64 `json:"absolute_importance,omitempty"` + // Name Name of the hyperparameter. + Name string `json:"name"` + // RelativeImportance A number between 0 and 1 showing the proportion of influence on the variation + // of the loss function among all tuned hyperparameters. For hyperparameters + // with values that are not specified by the user but tuned during + // hyperparameter optimization. + RelativeImportance *Float64 `json:"relative_importance,omitempty"` + // Supplied Indicates if the hyperparameter is specified by the user (true) or optimized + // (false). + Supplied bool `json:"supplied"` + // Value The value of the hyperparameter, either optimized or specified by the user. + Value Float64 `json:"value"` +} + +func (s *Hyperparameter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "absolute_importance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AbsoluteImportance", err) + } + f := Float64(value) + s.AbsoluteImportance = &f + case float64: + f := Float64(v) + s.AbsoluteImportance = &f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "relative_importance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RelativeImportance", err) + } + f := Float64(value) + s.RelativeImportance = &f + case float64: + f := Float64(v) + s.RelativeImportance = &f + } + + case "supplied": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Supplied", err) + } + s.Supplied = value + case bool: + s.Supplied = v + } + + case "value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + f := Float64(value) + s.Value = f + case float64: + f := Float64(v) + s.Value = f + } + + } + } + return nil +} + +// NewHyperparameter returns a Hyperparameter. +func NewHyperparameter() *Hyperparameter { + r := &Hyperparameter{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hyperparameters.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hyperparameters.go new file mode 100644 index 000000000..dc07b3408 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hyperparameters.go @@ -0,0 +1,384 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Hyperparameters type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L420-L526 +type Hyperparameters struct { + // Alpha Advanced configuration option. + // Machine learning uses loss guided tree growing, which means that the decision + // trees grow where the regularized loss decreases most quickly. + // This parameter affects loss calculations by acting as a multiplier of the + // tree depth. + // Higher alpha values result in shallower trees and faster training times. + // By default, this value is calculated during hyperparameter optimization. + // It must be greater than or equal to zero. + Alpha *Float64 `json:"alpha,omitempty"` + // DownsampleFactor Advanced configuration option. + // Controls the fraction of data that is used to compute the derivatives of the + // loss function for tree training. + // A small value results in the use of a small fraction of the data. + // If this value is set to be less than 1, accuracy typically improves. + // However, too small a value may result in poor convergence for the ensemble + // and so require more trees. + // By default, this value is calculated during hyperparameter optimization. + // It must be greater than zero and less than or equal to 1. + DownsampleFactor *Float64 `json:"downsample_factor,omitempty"` + // Eta Advanced configuration option. + // The shrinkage applied to the weights. + // Smaller values result in larger forests which have a better generalization + // error. + // However, larger forests cause slower training. + // By default, this value is calculated during hyperparameter optimization. + // It must be a value between `0.001` and `1`. + Eta *Float64 `json:"eta,omitempty"` + // EtaGrowthRatePerTree Advanced configuration option. + // Specifies the rate at which `eta` increases for each new tree that is added + // to the forest. + // For example, a rate of 1.05 increases `eta` by 5% for each extra tree. + // By default, this value is calculated during hyperparameter optimization. + // It must be between `0.5` and `2`. + EtaGrowthRatePerTree *Float64 `json:"eta_growth_rate_per_tree,omitempty"` + // FeatureBagFraction Advanced configuration option. + // Defines the fraction of features that will be used when selecting a random + // bag for each candidate split. + // By default, this value is calculated during hyperparameter optimization. + FeatureBagFraction *Float64 `json:"feature_bag_fraction,omitempty"` + // Gamma Advanced configuration option. + // Regularization parameter to prevent overfitting on the training data set. + // Multiplies a linear penalty associated with the size of individual trees in + // the forest. + // A high gamma value causes training to prefer small trees. + // A small gamma value results in larger individual trees and slower training. + // By default, this value is calculated during hyperparameter optimization. + // It must be a nonnegative value. + Gamma *Float64 `json:"gamma,omitempty"` + // Lambda Advanced configuration option. + // Regularization parameter to prevent overfitting on the training data set. + // Multiplies an L2 regularization term which applies to leaf weights of the + // individual trees in the forest. + // A high lambda value causes training to favor small leaf weights. + // This behavior makes the prediction function smoother at the expense of + // potentially not being able to capture relevant relationships between the + // features and the dependent variable. + // A small lambda value results in large individual trees and slower training. + // By default, this value is calculated during hyperparameter optimization. + // It must be a nonnegative value. + Lambda *Float64 `json:"lambda,omitempty"` + // MaxAttemptsToAddTree If the algorithm fails to determine a non-trivial tree (more than a single + // leaf), this parameter determines how many of such consecutive failures are + // tolerated. + // Once the number of attempts exceeds the threshold, the forest training stops. + MaxAttemptsToAddTree *int `json:"max_attempts_to_add_tree,omitempty"` + // MaxOptimizationRoundsPerHyperparameter Advanced configuration option. + // A multiplier responsible for determining the maximum number of hyperparameter + // optimization steps in the Bayesian optimization procedure. + // The maximum number of steps is determined based on the number of undefined + // hyperparameters times the maximum optimization rounds per hyperparameter. + // By default, this value is calculated during hyperparameter optimization. + MaxOptimizationRoundsPerHyperparameter *int `json:"max_optimization_rounds_per_hyperparameter,omitempty"` + // MaxTrees Advanced configuration option. + // Defines the maximum number of decision trees in the forest. + // The maximum value is 2000. + // By default, this value is calculated during hyperparameter optimization. + MaxTrees *int `json:"max_trees,omitempty"` + // NumFolds The maximum number of folds for the cross-validation procedure. + NumFolds *int `json:"num_folds,omitempty"` + // NumSplitsPerFeature Determines the maximum number of splits for every feature that can occur in a + // decision tree when the tree is trained. + NumSplitsPerFeature *int `json:"num_splits_per_feature,omitempty"` + // SoftTreeDepthLimit Advanced configuration option. + // Machine learning uses loss guided tree growing, which means that the decision + // trees grow where the regularized loss decreases most quickly. + // This soft limit combines with the `soft_tree_depth_tolerance` to penalize + // trees that exceed the specified depth; the regularized loss increases quickly + // beyond this depth. + // By default, this value is calculated during hyperparameter optimization. + // It must be greater than or equal to 0. + SoftTreeDepthLimit *int `json:"soft_tree_depth_limit,omitempty"` + // SoftTreeDepthTolerance Advanced configuration option. + // This option controls how quickly the regularized loss increases when the tree + // depth exceeds `soft_tree_depth_limit`. + // By default, this value is calculated during hyperparameter optimization. + // It must be greater than or equal to 0.01. + SoftTreeDepthTolerance *Float64 `json:"soft_tree_depth_tolerance,omitempty"` +} + +func (s *Hyperparameters) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Alpha", err) + } + f := Float64(value) + s.Alpha = &f + case float64: + f := Float64(v) + s.Alpha = &f + } + + case "downsample_factor": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DownsampleFactor", err) + } + f := Float64(value) + s.DownsampleFactor = &f + case float64: + f := Float64(v) + s.DownsampleFactor = &f + } + + case "eta": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Eta", err) + } + f := Float64(value) + s.Eta = &f + case float64: + f := Float64(v) + s.Eta = &f + } + + case "eta_growth_rate_per_tree": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "EtaGrowthRatePerTree", err) + } + f := Float64(value) + s.EtaGrowthRatePerTree = &f + case float64: + f := Float64(v) + s.EtaGrowthRatePerTree = &f + } + + case "feature_bag_fraction": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FeatureBagFraction", err) + } + f := Float64(value) + s.FeatureBagFraction = &f + case float64: + f := Float64(v) + s.FeatureBagFraction = &f + } + + case "gamma": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Gamma", err) + } + f := Float64(value) + s.Gamma = &f + case float64: + f := Float64(v) + s.Gamma = &f + } + + case "lambda": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Lambda", err) + } + f := Float64(value) + s.Lambda = &f + case float64: + f := Float64(v) + s.Lambda = &f + } + + case "max_attempts_to_add_tree": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxAttemptsToAddTree", err) + } + s.MaxAttemptsToAddTree = &value + case float64: + f := int(v) + s.MaxAttemptsToAddTree = &f + } + + case "max_optimization_rounds_per_hyperparameter": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxOptimizationRoundsPerHyperparameter", err) + } + s.MaxOptimizationRoundsPerHyperparameter = &value + case float64: + f := int(v) + s.MaxOptimizationRoundsPerHyperparameter = &f + } + + case "max_trees": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTrees", err) + } + s.MaxTrees = &value + case float64: + f := int(v) + s.MaxTrees = &f + } + + case "num_folds": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumFolds", err) + } + s.NumFolds = &value + case float64: + f := int(v) + s.NumFolds = &f + } + + case "num_splits_per_feature": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumSplitsPerFeature", err) + } + s.NumSplitsPerFeature = &value + case float64: + f := int(v) + s.NumSplitsPerFeature = &f + } + + case "soft_tree_depth_limit": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SoftTreeDepthLimit", err) + } + s.SoftTreeDepthLimit = &value + case float64: + f := int(v) + s.SoftTreeDepthLimit = &f + } + + case "soft_tree_depth_tolerance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SoftTreeDepthTolerance", err) + } + f := Float64(value) + s.SoftTreeDepthTolerance = &f + case float64: + f := Float64(v) + s.SoftTreeDepthTolerance = &f + } + + } + } + return nil +} + +// NewHyperparameters returns a Hyperparameters. +func NewHyperparameters() *Hyperparameters { + r := &Hyperparameters{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hyphenationdecompoundertokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hyphenationdecompoundertokenfilter.go new file mode 100644 index 000000000..dd3a02273 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/hyphenationdecompoundertokenfilter.go @@ -0,0 +1,261 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// HyphenationDecompounderTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L67-L76 +type HyphenationDecompounderTokenFilter struct { + // HyphenationPatternsPath Path to an Apache FOP (Formatting Objects Processor) XML hyphenation pattern + // file. + // This path must be absolute or relative to the `config` location. Only FOP + // v1.2 compatible files are supported. + HyphenationPatternsPath string `json:"hyphenation_patterns_path"` + // MaxSubwordSize Maximum subword character length. Longer subword tokens are excluded from the + // output. Defaults to `15`. + MaxSubwordSize *int `json:"max_subword_size,omitempty"` + // MinSubwordSize Minimum subword character length. Shorter subword tokens are excluded from + // the output. Defaults to `2`. + MinSubwordSize *int `json:"min_subword_size,omitempty"` + // MinWordSize Minimum word character length. Shorter word tokens are excluded from the + // output. Defaults to `5`. + MinWordSize *int `json:"min_word_size,omitempty"` + // NoOverlappingMatches If `true`, do not allow overlapping tokens. Defaults to `false`. + NoOverlappingMatches *bool `json:"no_overlapping_matches,omitempty"` + // NoSubMatches If `true`, do not match sub tokens in tokens that are in the word list. + // Defaults to `false`. + NoSubMatches *bool `json:"no_sub_matches,omitempty"` + // OnlyLongestMatch If `true`, only include the longest matching subword. Defaults to `false`. + OnlyLongestMatch *bool `json:"only_longest_match,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` + // WordList A list of subwords to look for in the token stream. If found, the subword is + // included in the token output. + // Either this parameter or `word_list_path` must be specified. + WordList []string `json:"word_list,omitempty"` + // WordListPath Path to a file that contains a list of subwords to find in the token stream. + // If found, the subword is included in the token output. + // This path must be absolute or relative to the config location, and the file + // must be UTF-8 encoded. Each token in the file must be separated by a line + // break. + // Either this parameter or `word_list` must be specified. + WordListPath *string `json:"word_list_path,omitempty"` +} + +func (s *HyphenationDecompounderTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hyphenation_patterns_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "HyphenationPatternsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HyphenationPatternsPath = o + + case "max_subword_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSubwordSize", err) + } + s.MaxSubwordSize = &value + case float64: + f := int(v) + s.MaxSubwordSize = &f + } + + case "min_subword_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinSubwordSize", err) + } + s.MinSubwordSize = &value + case float64: + f := int(v) + s.MinSubwordSize = &f + } + + case "min_word_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinWordSize", err) + } + s.MinWordSize = &value + case float64: + f := int(v) + s.MinWordSize = &f + } + + case "no_overlapping_matches": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "NoOverlappingMatches", err) + } + s.NoOverlappingMatches = &value + case bool: + s.NoOverlappingMatches = &v + } + + case "no_sub_matches": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "NoSubMatches", err) + } + s.NoSubMatches = &value + case bool: + s.NoSubMatches = &v + } + + case "only_longest_match": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "OnlyLongestMatch", err) + } + s.OnlyLongestMatch = &value + case bool: + s.OnlyLongestMatch = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "word_list": + if err := dec.Decode(&s.WordList); err != nil { + return fmt.Errorf("%s | %w", "WordList", err) + } + + case "word_list_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "WordListPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WordListPath = &o + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HyphenationDecompounderTokenFilter) MarshalJSON() ([]byte, error) { + type innerHyphenationDecompounderTokenFilter HyphenationDecompounderTokenFilter + tmp := innerHyphenationDecompounderTokenFilter{ + HyphenationPatternsPath: s.HyphenationPatternsPath, + MaxSubwordSize: s.MaxSubwordSize, + MinSubwordSize: s.MinSubwordSize, + MinWordSize: s.MinWordSize, + NoOverlappingMatches: s.NoOverlappingMatches, + NoSubMatches: s.NoSubMatches, + OnlyLongestMatch: s.OnlyLongestMatch, + Type: s.Type, + Version: s.Version, + WordList: s.WordList, + WordListPath: s.WordListPath, + } + + tmp.Type = "hyphenation_decompounder" + + return json.Marshal(tmp) +} + +// NewHyphenationDecompounderTokenFilter returns a HyphenationDecompounderTokenFilter. +func NewHyphenationDecompounderTokenFilter() *HyphenationDecompounderTokenFilter { + r := &HyphenationDecompounderTokenFilter{} + + return r +} + +type HyphenationDecompounderTokenFilterVariant interface { + HyphenationDecompounderTokenFilterCaster() *HyphenationDecompounderTokenFilter +} + +func (s *HyphenationDecompounderTokenFilter) HyphenationDecompounderTokenFilterCaster() *HyphenationDecompounderTokenFilter { + return s +} + +func (s *HyphenationDecompounderTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icuanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icuanalyzer.go new file mode 100644 index 000000000..4e424ff8a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icuanalyzer.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icunormalizationmode" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icunormalizationtype" +) + +// IcuAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/icu-plugin.ts#L68-L72 +type IcuAnalyzer struct { + Method icunormalizationtype.IcuNormalizationType `json:"method"` + Mode icunormalizationmode.IcuNormalizationMode `json:"mode"` + Type string `json:"type,omitempty"` +} + +// MarshalJSON override marshalling to include literal value +func (s IcuAnalyzer) MarshalJSON() ([]byte, error) { + type innerIcuAnalyzer IcuAnalyzer + tmp := innerIcuAnalyzer{ + Method: s.Method, + Mode: s.Mode, + Type: s.Type, + } + + tmp.Type = "icu_analyzer" + + return json.Marshal(tmp) +} + +// NewIcuAnalyzer returns a IcuAnalyzer. +func NewIcuAnalyzer() *IcuAnalyzer { + r := &IcuAnalyzer{} + + return r +} + +type IcuAnalyzerVariant interface { + IcuAnalyzerCaster() *IcuAnalyzer +} + +func (s *IcuAnalyzer) IcuAnalyzerCaster() *IcuAnalyzer { + return s +} + +func (s *IcuAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icucollationproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icucollationproperty.go new file mode 100644 index 000000000..4a5c1206b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icucollationproperty.go @@ -0,0 +1,1079 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationalternate" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationcasefirst" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationdecomposition" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationstrength" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// IcuCollationProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/specialized.ts#L103-L127 +type IcuCollationProperty struct { + Alternate *icucollationalternate.IcuCollationAlternate `json:"alternate,omitempty"` + CaseFirst *icucollationcasefirst.IcuCollationCaseFirst `json:"case_first,omitempty"` + CaseLevel *bool `json:"case_level,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + Country *string `json:"country,omitempty"` + Decomposition *icucollationdecomposition.IcuCollationDecomposition `json:"decomposition,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + HiraganaQuaternaryMode *bool `json:"hiragana_quaternary_mode,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Index Should the field be searchable? + Index *bool `json:"index,omitempty"` + IndexOptions *indexoptions.IndexOptions `json:"index_options,omitempty"` + Language *string `json:"language,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Norms *bool `json:"norms,omitempty"` + // NullValue Accepts a string value which is substituted for any explicit null values. + // Defaults to null, which means the field is treated as missing. + NullValue *string `json:"null_value,omitempty"` + Numeric *bool `json:"numeric,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Rules *string `json:"rules,omitempty"` + Store *bool `json:"store,omitempty"` + Strength *icucollationstrength.IcuCollationStrength `json:"strength,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` + VariableTop *string `json:"variable_top,omitempty"` + Variant *string `json:"variant,omitempty"` +} + +func (s *IcuCollationProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alternate": + if err := dec.Decode(&s.Alternate); err != nil { + return fmt.Errorf("%s | %w", "Alternate", err) + } + + case "case_first": + if err := dec.Decode(&s.CaseFirst); err != nil { + return fmt.Errorf("%s | %w", "CaseFirst", err) + } + + case "case_level": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CaseLevel", err) + } + s.CaseLevel = &value + case bool: + s.CaseLevel = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "country": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Country", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Country = &o + + case "decomposition": + if err := dec.Decode(&s.Decomposition); err != nil { + return fmt.Errorf("%s | %w", "Decomposition", err) + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "hiragana_quaternary_mode": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "HiraganaQuaternaryMode", err) + } + s.HiraganaQuaternaryMode = &value + case bool: + s.HiraganaQuaternaryMode = &v + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "index_options": + if err := dec.Decode(&s.IndexOptions); err != nil { + return fmt.Errorf("%s | %w", "IndexOptions", err) + } + + case "language": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Language", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Language = &o + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "norms": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Norms", err) + } + s.Norms = &value + case bool: + s.Norms = &v + } + + case "null_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NullValue = &o + + case "numeric": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Numeric", err) + } + s.Numeric = &value + case bool: + s.Numeric = &v + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "rules": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Rules", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Rules = &o + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "strength": + if err := dec.Decode(&s.Strength); err != nil { + return fmt.Errorf("%s | %w", "Strength", err) + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "variable_top": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VariableTop", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VariableTop = &o + + case "variant": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Variant", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Variant = &o + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IcuCollationProperty) MarshalJSON() ([]byte, error) { + type innerIcuCollationProperty IcuCollationProperty + tmp := innerIcuCollationProperty{ + Alternate: s.Alternate, + CaseFirst: s.CaseFirst, + CaseLevel: s.CaseLevel, + CopyTo: s.CopyTo, + Country: s.Country, + Decomposition: s.Decomposition, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + HiraganaQuaternaryMode: s.HiraganaQuaternaryMode, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + IndexOptions: s.IndexOptions, + Language: s.Language, + Meta: s.Meta, + Norms: s.Norms, + NullValue: s.NullValue, + Numeric: s.Numeric, + Properties: s.Properties, + Rules: s.Rules, + Store: s.Store, + Strength: s.Strength, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + VariableTop: s.VariableTop, + Variant: s.Variant, + } + + tmp.Type = "icu_collation_keyword" + + return json.Marshal(tmp) +} + +// NewIcuCollationProperty returns a IcuCollationProperty. +func NewIcuCollationProperty() *IcuCollationProperty { + r := &IcuCollationProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type IcuCollationPropertyVariant interface { + IcuCollationPropertyCaster() *IcuCollationProperty +} + +func (s *IcuCollationProperty) IcuCollationPropertyCaster() *IcuCollationProperty { + return s +} + +func (s *IcuCollationProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icucollationtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icucollationtokenfilter.go new file mode 100644 index 000000000..e313bc423 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icucollationtokenfilter.go @@ -0,0 +1,252 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationalternate" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationcasefirst" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationdecomposition" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationstrength" +) + +// IcuCollationTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/icu-plugin.ts#L52-L66 +type IcuCollationTokenFilter struct { + Alternate *icucollationalternate.IcuCollationAlternate `json:"alternate,omitempty"` + CaseFirst *icucollationcasefirst.IcuCollationCaseFirst `json:"caseFirst,omitempty"` + CaseLevel *bool `json:"caseLevel,omitempty"` + Country *string `json:"country,omitempty"` + Decomposition *icucollationdecomposition.IcuCollationDecomposition `json:"decomposition,omitempty"` + HiraganaQuaternaryMode *bool `json:"hiraganaQuaternaryMode,omitempty"` + Language *string `json:"language,omitempty"` + Numeric *bool `json:"numeric,omitempty"` + Rules *string `json:"rules,omitempty"` + Strength *icucollationstrength.IcuCollationStrength `json:"strength,omitempty"` + Type string `json:"type,omitempty"` + VariableTop *string `json:"variableTop,omitempty"` + Variant *string `json:"variant,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *IcuCollationTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alternate": + if err := dec.Decode(&s.Alternate); err != nil { + return fmt.Errorf("%s | %w", "Alternate", err) + } + + case "caseFirst": + if err := dec.Decode(&s.CaseFirst); err != nil { + return fmt.Errorf("%s | %w", "CaseFirst", err) + } + + case "caseLevel": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CaseLevel", err) + } + s.CaseLevel = &value + case bool: + s.CaseLevel = &v + } + + case "country": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Country", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Country = &o + + case "decomposition": + if err := dec.Decode(&s.Decomposition); err != nil { + return fmt.Errorf("%s | %w", "Decomposition", err) + } + + case "hiraganaQuaternaryMode": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "HiraganaQuaternaryMode", err) + } + s.HiraganaQuaternaryMode = &value + case bool: + s.HiraganaQuaternaryMode = &v + } + + case "language": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Language", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Language = &o + + case "numeric": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Numeric", err) + } + s.Numeric = &value + case bool: + s.Numeric = &v + } + + case "rules": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Rules", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Rules = &o + + case "strength": + if err := dec.Decode(&s.Strength); err != nil { + return fmt.Errorf("%s | %w", "Strength", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "variableTop": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VariableTop", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VariableTop = &o + + case "variant": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Variant", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Variant = &o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IcuCollationTokenFilter) MarshalJSON() ([]byte, error) { + type innerIcuCollationTokenFilter IcuCollationTokenFilter + tmp := innerIcuCollationTokenFilter{ + Alternate: s.Alternate, + CaseFirst: s.CaseFirst, + CaseLevel: s.CaseLevel, + Country: s.Country, + Decomposition: s.Decomposition, + HiraganaQuaternaryMode: s.HiraganaQuaternaryMode, + Language: s.Language, + Numeric: s.Numeric, + Rules: s.Rules, + Strength: s.Strength, + Type: s.Type, + VariableTop: s.VariableTop, + Variant: s.Variant, + Version: s.Version, + } + + tmp.Type = "icu_collation" + + return json.Marshal(tmp) +} + +// NewIcuCollationTokenFilter returns a IcuCollationTokenFilter. +func NewIcuCollationTokenFilter() *IcuCollationTokenFilter { + r := &IcuCollationTokenFilter{} + + return r +} + +type IcuCollationTokenFilterVariant interface { + IcuCollationTokenFilterCaster() *IcuCollationTokenFilter +} + +func (s *IcuCollationTokenFilter) IcuCollationTokenFilterCaster() *IcuCollationTokenFilter { + return s +} + +func (s *IcuCollationTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icufoldingtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icufoldingtokenfilter.go new file mode 100644 index 000000000..946c2e08d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icufoldingtokenfilter.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IcuFoldingTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/icu-plugin.ts#L47-L50 +type IcuFoldingTokenFilter struct { + Type string `json:"type,omitempty"` + UnicodeSetFilter string `json:"unicode_set_filter"` + Version *string `json:"version,omitempty"` +} + +func (s *IcuFoldingTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "unicode_set_filter": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UnicodeSetFilter", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UnicodeSetFilter = o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IcuFoldingTokenFilter) MarshalJSON() ([]byte, error) { + type innerIcuFoldingTokenFilter IcuFoldingTokenFilter + tmp := innerIcuFoldingTokenFilter{ + Type: s.Type, + UnicodeSetFilter: s.UnicodeSetFilter, + Version: s.Version, + } + + tmp.Type = "icu_folding" + + return json.Marshal(tmp) +} + +// NewIcuFoldingTokenFilter returns a IcuFoldingTokenFilter. +func NewIcuFoldingTokenFilter() *IcuFoldingTokenFilter { + r := &IcuFoldingTokenFilter{} + + return r +} + +type IcuFoldingTokenFilterVariant interface { + IcuFoldingTokenFilterCaster() *IcuFoldingTokenFilter +} + +func (s *IcuFoldingTokenFilter) IcuFoldingTokenFilterCaster() *IcuFoldingTokenFilter { + return s +} + +func (s *IcuFoldingTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icunormalizationcharfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icunormalizationcharfilter.go new file mode 100644 index 000000000..047268a46 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icunormalizationcharfilter.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icunormalizationmode" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icunormalizationtype" +) + +// IcuNormalizationCharFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/icu-plugin.ts#L40-L45 +type IcuNormalizationCharFilter struct { + Mode *icunormalizationmode.IcuNormalizationMode `json:"mode,omitempty"` + Name *icunormalizationtype.IcuNormalizationType `json:"name,omitempty"` + Type string `json:"type,omitempty"` + UnicodeSetFilter *string `json:"unicode_set_filter,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *IcuNormalizationCharFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "unicode_set_filter": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UnicodeSetFilter", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UnicodeSetFilter = &o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IcuNormalizationCharFilter) MarshalJSON() ([]byte, error) { + type innerIcuNormalizationCharFilter IcuNormalizationCharFilter + tmp := innerIcuNormalizationCharFilter{ + Mode: s.Mode, + Name: s.Name, + Type: s.Type, + UnicodeSetFilter: s.UnicodeSetFilter, + Version: s.Version, + } + + tmp.Type = "icu_normalizer" + + return json.Marshal(tmp) +} + +// NewIcuNormalizationCharFilter returns a IcuNormalizationCharFilter. +func NewIcuNormalizationCharFilter() *IcuNormalizationCharFilter { + r := &IcuNormalizationCharFilter{} + + return r +} + +type IcuNormalizationCharFilterVariant interface { + IcuNormalizationCharFilterCaster() *IcuNormalizationCharFilter +} + +func (s *IcuNormalizationCharFilter) IcuNormalizationCharFilterCaster() *IcuNormalizationCharFilter { + return s +} + +func (s *IcuNormalizationCharFilter) CharFilterDefinitionCaster() *CharFilterDefinition { + o := CharFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icunormalizationtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icunormalizationtokenfilter.go new file mode 100644 index 000000000..db9805840 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icunormalizationtokenfilter.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icunormalizationtype" +) + +// IcuNormalizationTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/icu-plugin.ts#L35-L38 +type IcuNormalizationTokenFilter struct { + Name icunormalizationtype.IcuNormalizationType `json:"name"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *IcuNormalizationTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IcuNormalizationTokenFilter) MarshalJSON() ([]byte, error) { + type innerIcuNormalizationTokenFilter IcuNormalizationTokenFilter + tmp := innerIcuNormalizationTokenFilter{ + Name: s.Name, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "icu_normalizer" + + return json.Marshal(tmp) +} + +// NewIcuNormalizationTokenFilter returns a IcuNormalizationTokenFilter. +func NewIcuNormalizationTokenFilter() *IcuNormalizationTokenFilter { + r := &IcuNormalizationTokenFilter{} + + return r +} + +type IcuNormalizationTokenFilterVariant interface { + IcuNormalizationTokenFilterCaster() *IcuNormalizationTokenFilter +} + +func (s *IcuNormalizationTokenFilter) IcuNormalizationTokenFilterCaster() *IcuNormalizationTokenFilter { + return s +} + +func (s *IcuNormalizationTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icutokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icutokenizer.go new file mode 100644 index 000000000..5a6eb1958 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icutokenizer.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IcuTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/icu-plugin.ts#L30-L33 +type IcuTokenizer struct { + RuleFiles string `json:"rule_files"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *IcuTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "rule_files": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RuleFiles", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RuleFiles = o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IcuTokenizer) MarshalJSON() ([]byte, error) { + type innerIcuTokenizer IcuTokenizer + tmp := innerIcuTokenizer{ + RuleFiles: s.RuleFiles, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "icu_tokenizer" + + return json.Marshal(tmp) +} + +// NewIcuTokenizer returns a IcuTokenizer. +func NewIcuTokenizer() *IcuTokenizer { + r := &IcuTokenizer{} + + return r +} + +type IcuTokenizerVariant interface { + IcuTokenizerCaster() *IcuTokenizer +} + +func (s *IcuTokenizer) IcuTokenizerCaster() *IcuTokenizer { + return s +} + +func (s *IcuTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icutransformtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icutransformtokenfilter.go new file mode 100644 index 000000000..fea3dc3e0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/icutransformtokenfilter.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icutransformdirection" +) + +// IcuTransformTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/icu-plugin.ts#L24-L28 +type IcuTransformTokenFilter struct { + Dir *icutransformdirection.IcuTransformDirection `json:"dir,omitempty"` + Id string `json:"id"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *IcuTransformTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dir": + if err := dec.Decode(&s.Dir); err != nil { + return fmt.Errorf("%s | %w", "Dir", err) + } + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IcuTransformTokenFilter) MarshalJSON() ([]byte, error) { + type innerIcuTransformTokenFilter IcuTransformTokenFilter + tmp := innerIcuTransformTokenFilter{ + Dir: s.Dir, + Id: s.Id, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "icu_transform" + + return json.Marshal(tmp) +} + +// NewIcuTransformTokenFilter returns a IcuTransformTokenFilter. +func NewIcuTransformTokenFilter() *IcuTransformTokenFilter { + r := &IcuTransformTokenFilter{} + + return r +} + +type IcuTransformTokenFilterVariant interface { + IcuTransformTokenFilterCaster() *IcuTransformTokenFilter +} + +func (s *IcuTransformTokenFilter) IcuTransformTokenFilterCaster() *IcuTransformTokenFilter { + return s +} + +func (s *IcuTransformTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ids.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ids.go new file mode 100644 index 000000000..db9da3666 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ids.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Ids type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L56-L56 +type Ids []string + +type IdsVariant interface { + IdsCaster() *Ids +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/idsquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/idsquery.go new file mode 100644 index 000000000..f7f7c27e1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/idsquery.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IdsQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L88-L96 +type IdsQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Values An array of document IDs. + Values []string `json:"values,omitempty"` +} + +func (s *IdsQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "values": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + + s.Values = append(s.Values, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Values); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + } + + } + } + return nil +} + +// NewIdsQuery returns a IdsQuery. +func NewIdsQuery() *IdsQuery { + r := &IdsQuery{} + + return r +} + +type IdsQueryVariant interface { + IdsQueryCaster() *IdsQuery +} + +func (s *IdsQuery) IdsQueryCaster() *IdsQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilm.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilm.go new file mode 100644 index 000000000..c8ad0bcd4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilm.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Ilm type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L172-L175 +type Ilm struct { + PolicyCount int `json:"policy_count"` + PolicyStats []IlmPolicyStatistics `json:"policy_stats"` +} + +func (s *Ilm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "policy_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PolicyCount", err) + } + s.PolicyCount = value + case float64: + f := int(v) + s.PolicyCount = f + } + + case "policy_stats": + if err := dec.Decode(&s.PolicyStats); err != nil { + return fmt.Errorf("%s | %w", "PolicyStats", err) + } + + } + } + return nil +} + +// NewIlm returns a Ilm. +func NewIlm() *Ilm { + r := &Ilm{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmactions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmactions.go new file mode 100644 index 000000000..da1137b9f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmactions.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IlmActions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/_types/Phase.ts#L39-L93 +type IlmActions struct { + // Allocate Phases allowed: warm, cold. + Allocate *AllocateAction `json:"allocate,omitempty"` + // Delete Phases allowed: delete. + Delete *DeleteAction `json:"delete,omitempty"` + // Downsample Phases allowed: hot, warm, cold. + Downsample *DownsampleAction `json:"downsample,omitempty"` + // Forcemerge Phases allowed: hot, warm. + Forcemerge *ForceMergeAction `json:"forcemerge,omitempty"` + // Freeze The freeze action is a noop in 8.x + Freeze *EmptyObject `json:"freeze,omitempty"` + // Migrate Phases allowed: warm, cold. + Migrate *MigrateAction `json:"migrate,omitempty"` + // Readonly Phases allowed: hot, warm, cold. + Readonly *EmptyObject `json:"readonly,omitempty"` + // Rollover Phases allowed: hot. + Rollover *RolloverAction `json:"rollover,omitempty"` + // SearchableSnapshot Phases allowed: hot, cold, frozen. + SearchableSnapshot *SearchableSnapshotAction `json:"searchable_snapshot,omitempty"` + // SetPriority Phases allowed: hot, warm, cold. + SetPriority *SetPriorityAction `json:"set_priority,omitempty"` + // Shrink Phases allowed: hot, warm. + Shrink *ShrinkAction `json:"shrink,omitempty"` + // Unfollow Phases allowed: hot, warm, cold, frozen. + Unfollow *EmptyObject `json:"unfollow,omitempty"` + // WaitForSnapshot Phases allowed: delete. + WaitForSnapshot *WaitForSnapshotAction `json:"wait_for_snapshot,omitempty"` +} + +// NewIlmActions returns a IlmActions. +func NewIlmActions() *IlmActions { + r := &IlmActions{} + + return r +} + +type IlmActionsVariant interface { + IlmActionsCaster() *IlmActions +} + +func (s *IlmActions) IlmActionsCaster() *IlmActions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmindicator.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmindicator.go new file mode 100644 index 000000000..18cd08aea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmindicator.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indicatorhealthstatus" +) + +// IlmIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L165-L169 +type IlmIndicator struct { + Details *IlmIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *IlmIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return fmt.Errorf("%s | %w", "Diagnosis", err) + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return fmt.Errorf("%s | %w", "Impacts", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Symptom", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewIlmIndicator returns a IlmIndicator. +func NewIlmIndicator() *IlmIndicator { + r := &IlmIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmindicatordetails.go new file mode 100644 index 000000000..da6682047 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmindicatordetails.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/lifecycleoperationmode" +) + +// IlmIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L170-L174 +type IlmIndicatorDetails struct { + IlmStatus lifecycleoperationmode.LifecycleOperationMode `json:"ilm_status"` + Policies int64 `json:"policies"` + StagnatingIndices int `json:"stagnating_indices"` +} + +func (s *IlmIndicatorDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ilm_status": + if err := dec.Decode(&s.IlmStatus); err != nil { + return fmt.Errorf("%s | %w", "IlmStatus", err) + } + + case "policies": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Policies", err) + } + s.Policies = value + case float64: + f := int64(v) + s.Policies = f + } + + case "stagnating_indices": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "StagnatingIndices", err) + } + s.StagnatingIndices = value + case float64: + f := int(v) + s.StagnatingIndices = f + } + + } + } + return nil +} + +// NewIlmIndicatorDetails returns a IlmIndicatorDetails. +func NewIlmIndicatorDetails() *IlmIndicatorDetails { + r := &IlmIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmpolicy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmpolicy.go new file mode 100644 index 000000000..89e813dfc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmpolicy.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IlmPolicy type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/_types/Policy.ts#L23-L29 +type IlmPolicy struct { + // Meta_ Arbitrary metadata that is not automatically generated or used by + // Elasticsearch. + Meta_ Metadata `json:"_meta,omitempty"` + Phases Phases `json:"phases"` +} + +func (s *IlmPolicy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "phases": + if err := dec.Decode(&s.Phases); err != nil { + return fmt.Errorf("%s | %w", "Phases", err) + } + + } + } + return nil +} + +// NewIlmPolicy returns a IlmPolicy. +func NewIlmPolicy() *IlmPolicy { + r := &IlmPolicy{} + + return r +} + +type IlmPolicyVariant interface { + IlmPolicyCaster() *IlmPolicy +} + +func (s *IlmPolicy) IlmPolicyCaster() *IlmPolicy { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmpolicystatistics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmpolicystatistics.go new file mode 100644 index 000000000..e0e9d349f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ilmpolicystatistics.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IlmPolicyStatistics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L167-L170 +type IlmPolicyStatistics struct { + IndicesManaged int `json:"indices_managed"` + Phases UsagePhases `json:"phases"` +} + +func (s *IlmPolicyStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indices_managed": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IndicesManaged", err) + } + s.IndicesManaged = value + case float64: + f := int(v) + s.IndicesManaged = f + } + + case "phases": + if err := dec.Decode(&s.Phases); err != nil { + return fmt.Errorf("%s | %w", "Phases", err) + } + + } + } + return nil +} + +// NewIlmPolicyStatistics returns a IlmPolicyStatistics. +func NewIlmPolicyStatistics() *IlmPolicyStatistics { + r := &IlmPolicyStatistics{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/impact.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/impact.go new file mode 100644 index 000000000..7bcb9dd37 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/impact.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/impactarea" +) + +// Impact type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L68-L73 +type Impact struct { + Description string `json:"description"` + Id string `json:"id"` + ImpactAreas []impactarea.ImpactArea `json:"impact_areas"` + Severity int `json:"severity"` +} + +func (s *Impact) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = o + + case "impact_areas": + if err := dec.Decode(&s.ImpactAreas); err != nil { + return fmt.Errorf("%s | %w", "ImpactAreas", err) + } + + case "severity": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Severity", err) + } + s.Severity = value + case float64: + f := int(v) + s.Severity = f + } + + } + } + return nil +} + +// NewImpact returns a Impact. +func NewImpact() *Impact { + r := &Impact{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/includedinvalidation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/includedinvalidation.go new file mode 100644 index 000000000..2027e3b74 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/includedinvalidation.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// IncludedInValidation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L73-L76 +type IncludedInValidation struct { + Constraint []ScalarValue `json:"constraint"` + Type string `json:"type,omitempty"` +} + +// MarshalJSON override marshalling to include literal value +func (s IncludedInValidation) MarshalJSON() ([]byte, error) { + type innerIncludedInValidation IncludedInValidation + tmp := innerIncludedInValidation{ + Constraint: s.Constraint, + Type: s.Type, + } + + tmp.Type = "included_in" + + return json.Marshal(tmp) +} + +// NewIncludedInValidation returns a IncludedInValidation. +func NewIncludedInValidation() *IncludedInValidation { + r := &IncludedInValidation{} + + return r +} + +type IncludedInValidationVariant interface { + IncludedInValidationCaster() *IncludedInValidation +} + +func (s *IncludedInValidation) IncludedInValidationCaster() *IncludedInValidation { + return s +} + +func (s *IncludedInValidation) ValidationCaster() *Validation { + o := Validation(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexaction.go new file mode 100644 index 000000000..ebf85a224 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexaction.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/optype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh" +) + +// IndexAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L256-L265 +type IndexAction struct { + DocId *string `json:"doc_id,omitempty"` + ExecutionTimeField *string `json:"execution_time_field,omitempty"` + Index string `json:"index"` + OpType *optype.OpType `json:"op_type,omitempty"` + Refresh *refresh.Refresh `json:"refresh,omitempty"` + Timeout Duration `json:"timeout,omitempty"` +} + +func (s *IndexAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_id": + if err := dec.Decode(&s.DocId); err != nil { + return fmt.Errorf("%s | %w", "DocId", err) + } + + case "execution_time_field": + if err := dec.Decode(&s.ExecutionTimeField); err != nil { + return fmt.Errorf("%s | %w", "ExecutionTimeField", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "op_type": + if err := dec.Decode(&s.OpType); err != nil { + return fmt.Errorf("%s | %w", "OpType", err) + } + + case "refresh": + if err := dec.Decode(&s.Refresh); err != nil { + return fmt.Errorf("%s | %w", "Refresh", err) + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + + } + } + return nil +} + +// NewIndexAction returns a IndexAction. +func NewIndexAction() *IndexAction { + r := &IndexAction{} + + return r +} + +type IndexActionVariant interface { + IndexActionCaster() *IndexAction +} + +func (s *IndexAction) IndexActionCaster() *IndexAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexaliases.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexaliases.go new file mode 100644 index 000000000..288878f11 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexaliases.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IndexAliases type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_alias/_types/response.ts#L24-L26 +type IndexAliases struct { + Aliases map[string]AliasDefinition `json:"aliases"` +} + +// NewIndexAliases returns a IndexAliases. +func NewIndexAliases() *IndexAliases { + r := &IndexAliases{ + Aliases: make(map[string]AliasDefinition), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexanddatastreamaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexanddatastreamaction.go new file mode 100644 index 000000000..87e11893e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexanddatastreamaction.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IndexAndDataStreamAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/modify_data_stream/types.ts#L39-L44 +type IndexAndDataStreamAction struct { + // DataStream Data stream targeted by the action. + DataStream string `json:"data_stream"` + // Index Index for the action. + Index string `json:"index"` +} + +func (s *IndexAndDataStreamAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_stream": + if err := dec.Decode(&s.DataStream); err != nil { + return fmt.Errorf("%s | %w", "DataStream", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + } + } + return nil +} + +// NewIndexAndDataStreamAction returns a IndexAndDataStreamAction. +func NewIndexAndDataStreamAction() *IndexAndDataStreamAction { + r := &IndexAndDataStreamAction{} + + return r +} + +type IndexAndDataStreamActionVariant interface { + IndexAndDataStreamActionCaster() *IndexAndDataStreamAction +} + +func (s *IndexAndDataStreamAction) IndexAndDataStreamActionCaster() *IndexAndDataStreamAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexcapabilities.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexcapabilities.go new file mode 100644 index 000000000..3cf703c83 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexcapabilities.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IndexCapabilities type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/get_rollup_index_caps/types.ts#L24-L26 +type IndexCapabilities struct { + RollupJobs []RollupJobSummary `json:"rollup_jobs"` +} + +// NewIndexCapabilities returns a IndexCapabilities. +func NewIndexCapabilities() *IndexCapabilities { + r := &IndexCapabilities{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexdetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexdetails.go new file mode 100644 index 000000000..0efb65f08 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexdetails.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndexDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotIndexDetails.ts#L23-L28 +type IndexDetails struct { + MaxSegmentsPerShard int64 `json:"max_segments_per_shard"` + ShardCount int `json:"shard_count"` + Size ByteSize `json:"size,omitempty"` + SizeInBytes int64 `json:"size_in_bytes"` +} + +func (s *IndexDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_segments_per_shard": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSegmentsPerShard", err) + } + s.MaxSegmentsPerShard = value + case float64: + f := int64(v) + s.MaxSegmentsPerShard = f + } + + case "shard_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardCount", err) + } + s.ShardCount = value + case float64: + f := int(v) + s.ShardCount = f + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + case "size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SizeInBytes", err) + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + } + } + return nil +} + +// NewIndexDetails returns a IndexDetails. +func NewIndexDetails() *IndexDetails { + r := &IndexDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexfield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexfield.go new file mode 100644 index 000000000..aefcee39e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexfield.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndexField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/meta-fields.ts#L46-L48 +type IndexField struct { + Enabled bool `json:"enabled"` +} + +func (s *IndexField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + +// NewIndexField returns a IndexField. +func NewIndexField() *IndexField { + r := &IndexField{} + + return r +} + +type IndexFieldVariant interface { + IndexFieldCaster() *IndexField +} + +func (s *IndexField) IndexFieldCaster() *IndexField { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexhealthstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexhealthstats.go new file mode 100644 index 000000000..a9394b9d5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexhealthstats.go @@ -0,0 +1,218 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/healthstatus" +) + +// IndexHealthStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/health/types.ts#L24-L35 +type IndexHealthStats struct { + ActivePrimaryShards int `json:"active_primary_shards"` + ActiveShards int `json:"active_shards"` + InitializingShards int `json:"initializing_shards"` + NumberOfReplicas int `json:"number_of_replicas"` + NumberOfShards int `json:"number_of_shards"` + RelocatingShards int `json:"relocating_shards"` + Shards map[string]ShardHealthStats `json:"shards,omitempty"` + Status healthstatus.HealthStatus `json:"status"` + UnassignedPrimaryShards int `json:"unassigned_primary_shards"` + UnassignedShards int `json:"unassigned_shards"` +} + +func (s *IndexHealthStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active_primary_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ActivePrimaryShards", err) + } + s.ActivePrimaryShards = value + case float64: + f := int(v) + s.ActivePrimaryShards = f + } + + case "active_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ActiveShards", err) + } + s.ActiveShards = value + case float64: + f := int(v) + s.ActiveShards = f + } + + case "initializing_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "InitializingShards", err) + } + s.InitializingShards = value + case float64: + f := int(v) + s.InitializingShards = f + } + + case "number_of_replicas": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfReplicas", err) + } + s.NumberOfReplicas = value + case float64: + f := int(v) + s.NumberOfReplicas = f + } + + case "number_of_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfShards", err) + } + s.NumberOfShards = value + case float64: + f := int(v) + s.NumberOfShards = f + } + + case "relocating_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RelocatingShards", err) + } + s.RelocatingShards = value + case float64: + f := int(v) + s.RelocatingShards = f + } + + case "shards": + if s.Shards == nil { + s.Shards = make(map[string]ShardHealthStats, 0) + } + if err := dec.Decode(&s.Shards); err != nil { + return fmt.Errorf("%s | %w", "Shards", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "unassigned_primary_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "UnassignedPrimaryShards", err) + } + s.UnassignedPrimaryShards = value + case float64: + f := int(v) + s.UnassignedPrimaryShards = f + } + + case "unassigned_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "UnassignedShards", err) + } + s.UnassignedShards = value + case float64: + f := int(v) + s.UnassignedShards = f + } + + } + } + return nil +} + +// NewIndexHealthStats returns a IndexHealthStats. +func NewIndexHealthStats() *IndexHealthStats { + r := &IndexHealthStats{ + Shards: make(map[string]ShardHealthStats), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexingslowlogsettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexingslowlogsettings.go new file mode 100644 index 000000000..f39cf9186 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexingslowlogsettings.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndexingSlowlogSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L603-L608 +type IndexingSlowlogSettings struct { + Level *string `json:"level,omitempty"` + Reformat *bool `json:"reformat,omitempty"` + Source *int `json:"source,omitempty"` + Threshold *IndexingSlowlogTresholds `json:"threshold,omitempty"` +} + +func (s *IndexingSlowlogSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "level": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Level", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Level = &o + + case "reformat": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Reformat", err) + } + s.Reformat = &value + case bool: + s.Reformat = &v + } + + case "source": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + s.Source = &value + case float64: + f := int(v) + s.Source = &f + } + + case "threshold": + if err := dec.Decode(&s.Threshold); err != nil { + return fmt.Errorf("%s | %w", "Threshold", err) + } + + } + } + return nil +} + +// NewIndexingSlowlogSettings returns a IndexingSlowlogSettings. +func NewIndexingSlowlogSettings() *IndexingSlowlogSettings { + r := &IndexingSlowlogSettings{} + + return r +} + +type IndexingSlowlogSettingsVariant interface { + IndexingSlowlogSettingsCaster() *IndexingSlowlogSettings +} + +func (s *IndexingSlowlogSettings) IndexingSlowlogSettingsCaster() *IndexingSlowlogSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexingslowlogtresholds.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexingslowlogtresholds.go new file mode 100644 index 000000000..b410d0dd0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexingslowlogtresholds.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IndexingSlowlogTresholds type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L610-L617 +type IndexingSlowlogTresholds struct { + // Index The indexing slow log, similar in functionality to the search slow log. The + // log file name ends with `_index_indexing_slowlog.json`. + // Log and the thresholds are configured in the same way as the search slowlog. + Index *SlowlogTresholdLevels `json:"index,omitempty"` +} + +// NewIndexingSlowlogTresholds returns a IndexingSlowlogTresholds. +func NewIndexingSlowlogTresholds() *IndexingSlowlogTresholds { + r := &IndexingSlowlogTresholds{} + + return r +} + +type IndexingSlowlogTresholdsVariant interface { + IndexingSlowlogTresholdsCaster() *IndexingSlowlogTresholds +} + +func (s *IndexingSlowlogTresholds) IndexingSlowlogTresholdsCaster() *IndexingSlowlogTresholds { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexingstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexingstats.go new file mode 100644 index 000000000..4350c285b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexingstats.go @@ -0,0 +1,272 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndexingStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L168-L186 +type IndexingStats struct { + DeleteCurrent int64 `json:"delete_current"` + DeleteTime Duration `json:"delete_time,omitempty"` + DeleteTimeInMillis int64 `json:"delete_time_in_millis"` + DeleteTotal int64 `json:"delete_total"` + IndexCurrent int64 `json:"index_current"` + IndexFailed int64 `json:"index_failed"` + IndexTime Duration `json:"index_time,omitempty"` + IndexTimeInMillis int64 `json:"index_time_in_millis"` + IndexTotal int64 `json:"index_total"` + IsThrottled bool `json:"is_throttled"` + NoopUpdateTotal int64 `json:"noop_update_total"` + PeakWriteLoad *Float64 `json:"peak_write_load,omitempty"` + RecentWriteLoad *Float64 `json:"recent_write_load,omitempty"` + ThrottleTime Duration `json:"throttle_time,omitempty"` + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` + Types map[string]IndexingStats `json:"types,omitempty"` + WriteLoad *Float64 `json:"write_load,omitempty"` +} + +func (s *IndexingStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "delete_current": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DeleteCurrent", err) + } + s.DeleteCurrent = value + case float64: + f := int64(v) + s.DeleteCurrent = f + } + + case "delete_time": + if err := dec.Decode(&s.DeleteTime); err != nil { + return fmt.Errorf("%s | %w", "DeleteTime", err) + } + + case "delete_time_in_millis": + if err := dec.Decode(&s.DeleteTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "DeleteTimeInMillis", err) + } + + case "delete_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DeleteTotal", err) + } + s.DeleteTotal = value + case float64: + f := int64(v) + s.DeleteTotal = f + } + + case "index_current": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexCurrent", err) + } + s.IndexCurrent = value + case float64: + f := int64(v) + s.IndexCurrent = f + } + + case "index_failed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexFailed", err) + } + s.IndexFailed = value + case float64: + f := int64(v) + s.IndexFailed = f + } + + case "index_time": + if err := dec.Decode(&s.IndexTime); err != nil { + return fmt.Errorf("%s | %w", "IndexTime", err) + } + + case "index_time_in_millis": + if err := dec.Decode(&s.IndexTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "IndexTimeInMillis", err) + } + + case "index_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexTotal", err) + } + s.IndexTotal = value + case float64: + f := int64(v) + s.IndexTotal = f + } + + case "is_throttled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsThrottled", err) + } + s.IsThrottled = value + case bool: + s.IsThrottled = v + } + + case "noop_update_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NoopUpdateTotal", err) + } + s.NoopUpdateTotal = value + case float64: + f := int64(v) + s.NoopUpdateTotal = f + } + + case "peak_write_load": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PeakWriteLoad", err) + } + f := Float64(value) + s.PeakWriteLoad = &f + case float64: + f := Float64(v) + s.PeakWriteLoad = &f + } + + case "recent_write_load": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RecentWriteLoad", err) + } + f := Float64(value) + s.RecentWriteLoad = &f + case float64: + f := Float64(v) + s.RecentWriteLoad = &f + } + + case "throttle_time": + if err := dec.Decode(&s.ThrottleTime); err != nil { + return fmt.Errorf("%s | %w", "ThrottleTime", err) + } + + case "throttle_time_in_millis": + if err := dec.Decode(&s.ThrottleTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "ThrottleTimeInMillis", err) + } + + case "types": + if s.Types == nil { + s.Types = make(map[string]IndexingStats, 0) + } + if err := dec.Decode(&s.Types); err != nil { + return fmt.Errorf("%s | %w", "Types", err) + } + + case "write_load": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "WriteLoad", err) + } + f := Float64(value) + s.WriteLoad = &f + case float64: + f := Float64(v) + s.WriteLoad = &f + } + + } + } + return nil +} + +// NewIndexingStats returns a IndexingStats. +func NewIndexingStats() *IndexingStats { + r := &IndexingStats{ + Types: make(map[string]IndexingStats), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexmappingrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexmappingrecord.go new file mode 100644 index 000000000..98dd6b209 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexmappingrecord.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IndexMappingRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_mapping/IndicesGetMappingResponse.ts#L29-L32 +type IndexMappingRecord struct { + Item *TypeMapping `json:"item,omitempty"` + Mappings TypeMapping `json:"mappings"` +} + +// NewIndexMappingRecord returns a IndexMappingRecord. +func NewIndexMappingRecord() *IndexMappingRecord { + r := &IndexMappingRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexoperation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexoperation.go new file mode 100644 index 000000000..fb9b0b2b0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexoperation.go @@ -0,0 +1,179 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +// IndexOperation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/bulk/types.ts#L142-L142 +type IndexOperation struct { + // DynamicTemplates A map from the full name of fields to the name of dynamic templates. + // It defaults to an empty map. + // If a name matches a dynamic template, that template will be applied + // regardless of other match predicates defined in the template. + // If a field is already defined in the mapping, then this parameter won't be + // used. + DynamicTemplates map[string]string `json:"dynamic_templates,omitempty"` + // Id_ The document ID. + Id_ *string `json:"_id,omitempty"` + IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` + IfSeqNo *int64 `json:"if_seq_no,omitempty"` + // Index_ The name of the index or index alias to perform the action on. + Index_ *string `json:"_index,omitempty"` + // Pipeline The ID of the pipeline to use to preprocess incoming documents. + // If the index has a default ingest pipeline specified, setting the value to + // `_none` turns off the default ingest pipeline for this request. + // If a final pipeline is configured, it will always run regardless of the value + // of this parameter. + Pipeline *string `json:"pipeline,omitempty"` + // RequireAlias If `true`, the request's actions must target an index alias. + RequireAlias *bool `json:"require_alias,omitempty"` + // Routing A custom value used to route operations to a specific shard. + Routing *string `json:"routing,omitempty"` + Version *int64 `json:"version,omitempty"` + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *IndexOperation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic_templates": + if s.DynamicTemplates == nil { + s.DynamicTemplates = make(map[string]string, 0) + } + if err := dec.Decode(&s.DynamicTemplates); err != nil { + return fmt.Errorf("%s | %w", "DynamicTemplates", err) + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "if_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IfPrimaryTerm", err) + } + s.IfPrimaryTerm = &value + case float64: + f := int64(v) + s.IfPrimaryTerm = &f + } + + case "if_seq_no": + if err := dec.Decode(&s.IfSeqNo); err != nil { + return fmt.Errorf("%s | %w", "IfSeqNo", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "pipeline": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pipeline", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pipeline = &o + + case "require_alias": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RequireAlias", err) + } + s.RequireAlias = &value + case bool: + s.RequireAlias = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return fmt.Errorf("%s | %w", "VersionType", err) + } + + } + } + return nil +} + +// NewIndexOperation returns a IndexOperation. +func NewIndexOperation() *IndexOperation { + r := &IndexOperation{ + DynamicTemplates: make(map[string]string), + } + + return r +} + +type IndexOperationVariant interface { + IndexOperationCaster() *IndexOperation +} + +func (s *IndexOperation) IndexOperationCaster() *IndexOperation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexprivilegescheck.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexprivilegescheck.go new file mode 100644 index 000000000..127552412 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexprivilegescheck.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexprivilege" +) + +// IndexPrivilegesCheck type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/has_privileges/types.ts#L34-L45 +type IndexPrivilegesCheck struct { + // AllowRestrictedIndices This needs to be set to `true` (default is `false`) if using wildcards or + // regexps for patterns that cover restricted indices. + // Implicitly, restricted indices do not match index patterns because restricted + // indices usually have limited privileges and including them in pattern tests + // would render most such tests false. + // If restricted indices are explicitly included in the names list, privileges + // will be checked against them regardless of the value of + // `allow_restricted_indices`. + AllowRestrictedIndices *bool `json:"allow_restricted_indices,omitempty"` + // Names A list of indices. + Names []string `json:"names"` + // Privileges A list of the privileges that you want to check for the specified indices. + Privileges []indexprivilege.IndexPrivilege `json:"privileges"` +} + +func (s *IndexPrivilegesCheck) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowRestrictedIndices", err) + } + s.AllowRestrictedIndices = &value + case bool: + s.AllowRestrictedIndices = &v + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + } + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return fmt.Errorf("%s | %w", "Privileges", err) + } + + } + } + return nil +} + +// NewIndexPrivilegesCheck returns a IndexPrivilegesCheck. +func NewIndexPrivilegesCheck() *IndexPrivilegesCheck { + r := &IndexPrivilegesCheck{} + + return r +} + +type IndexPrivilegesCheckVariant interface { + IndexPrivilegesCheckCaster() *IndexPrivilegesCheck +} + +func (s *IndexPrivilegesCheck) IndexPrivilegesCheckCaster() *IndexPrivilegesCheck { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexresult.go new file mode 100644 index 000000000..e3903ca0e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexresult.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IndexResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L267-L269 +type IndexResult struct { + Response IndexResultSummary `json:"response"` +} + +// NewIndexResult returns a IndexResult. +func NewIndexResult() *IndexResult { + r := &IndexResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexresultsummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexresultsummary.go new file mode 100644 index 000000000..dc4dc4a65 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexresultsummary.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// IndexResultSummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L271-L277 +type IndexResultSummary struct { + Created bool `json:"created"` + Id string `json:"id"` + Index string `json:"index"` + Result result.Result `json:"result"` + Version int64 `json:"version"` +} + +func (s *IndexResultSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "created": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Created", err) + } + s.Created = value + case bool: + s.Created = v + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "result": + if err := dec.Decode(&s.Result); err != nil { + return fmt.Errorf("%s | %w", "Result", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewIndexResultSummary returns a IndexResultSummary. +func NewIndexResultSummary() *IndexResultSummary { + r := &IndexResultSummary{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexrouting.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexrouting.go new file mode 100644 index 000000000..5a1732d29 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexrouting.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IndexRouting type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexRouting.ts#L22-L25 +type IndexRouting struct { + Allocation *IndexRoutingAllocation `json:"allocation,omitempty"` + Rebalance *IndexRoutingRebalance `json:"rebalance,omitempty"` +} + +// NewIndexRouting returns a IndexRouting. +func NewIndexRouting() *IndexRouting { + r := &IndexRouting{} + + return r +} + +type IndexRoutingVariant interface { + IndexRoutingCaster() *IndexRouting +} + +func (s *IndexRouting) IndexRoutingCaster() *IndexRouting { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingallocation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingallocation.go new file mode 100644 index 000000000..77eafbe67 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingallocation.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexroutingallocationoptions" +) + +// IndexRoutingAllocation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexRouting.ts#L27-L32 +type IndexRoutingAllocation struct { + Disk *IndexRoutingAllocationDisk `json:"disk,omitempty"` + Enable *indexroutingallocationoptions.IndexRoutingAllocationOptions `json:"enable,omitempty"` + Include *IndexRoutingAllocationInclude `json:"include,omitempty"` + InitialRecovery *IndexRoutingAllocationInitialRecovery `json:"initial_recovery,omitempty"` +} + +// NewIndexRoutingAllocation returns a IndexRoutingAllocation. +func NewIndexRoutingAllocation() *IndexRoutingAllocation { + r := &IndexRoutingAllocation{} + + return r +} + +type IndexRoutingAllocationVariant interface { + IndexRoutingAllocationCaster() *IndexRoutingAllocation +} + +func (s *IndexRoutingAllocation) IndexRoutingAllocationCaster() *IndexRoutingAllocation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingallocationdisk.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingallocationdisk.go new file mode 100644 index 000000000..b615f0759 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingallocationdisk.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndexRoutingAllocationDisk type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexRouting.ts#L62-L64 +type IndexRoutingAllocationDisk struct { + ThresholdEnabled *string `json:"threshold_enabled,omitempty"` +} + +func (s *IndexRoutingAllocationDisk) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "threshold_enabled": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ThresholdEnabled", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ThresholdEnabled = &o + + } + } + return nil +} + +// NewIndexRoutingAllocationDisk returns a IndexRoutingAllocationDisk. +func NewIndexRoutingAllocationDisk() *IndexRoutingAllocationDisk { + r := &IndexRoutingAllocationDisk{} + + return r +} + +type IndexRoutingAllocationDiskVariant interface { + IndexRoutingAllocationDiskCaster() *IndexRoutingAllocationDisk +} + +func (s *IndexRoutingAllocationDisk) IndexRoutingAllocationDiskCaster() *IndexRoutingAllocationDisk { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingallocationinclude.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingallocationinclude.go new file mode 100644 index 000000000..41327a2e4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingallocationinclude.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndexRoutingAllocationInclude type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexRouting.ts#L52-L55 +type IndexRoutingAllocationInclude struct { + Id_ *string `json:"_id,omitempty"` + TierPreference_ *string `json:"_tier_preference,omitempty"` +} + +func (s *IndexRoutingAllocationInclude) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_tier_preference": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TierPreference_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TierPreference_ = &o + + } + } + return nil +} + +// NewIndexRoutingAllocationInclude returns a IndexRoutingAllocationInclude. +func NewIndexRoutingAllocationInclude() *IndexRoutingAllocationInclude { + r := &IndexRoutingAllocationInclude{} + + return r +} + +type IndexRoutingAllocationIncludeVariant interface { + IndexRoutingAllocationIncludeCaster() *IndexRoutingAllocationInclude +} + +func (s *IndexRoutingAllocationInclude) IndexRoutingAllocationIncludeCaster() *IndexRoutingAllocationInclude { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingallocationinitialrecovery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingallocationinitialrecovery.go new file mode 100644 index 000000000..c6a6d2ab1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingallocationinitialrecovery.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IndexRoutingAllocationInitialRecovery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexRouting.ts#L57-L59 +type IndexRoutingAllocationInitialRecovery struct { + Id_ *string `json:"_id,omitempty"` +} + +func (s *IndexRoutingAllocationInitialRecovery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + } + } + return nil +} + +// NewIndexRoutingAllocationInitialRecovery returns a IndexRoutingAllocationInitialRecovery. +func NewIndexRoutingAllocationInitialRecovery() *IndexRoutingAllocationInitialRecovery { + r := &IndexRoutingAllocationInitialRecovery{} + + return r +} + +type IndexRoutingAllocationInitialRecoveryVariant interface { + IndexRoutingAllocationInitialRecoveryCaster() *IndexRoutingAllocationInitialRecovery +} + +func (s *IndexRoutingAllocationInitialRecovery) IndexRoutingAllocationInitialRecoveryCaster() *IndexRoutingAllocationInitialRecovery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingrebalance.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingrebalance.go new file mode 100644 index 000000000..d6b2f8eaf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexroutingrebalance.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexroutingrebalanceoptions" +) + +// IndexRoutingRebalance type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexRouting.ts#L34-L36 +type IndexRoutingRebalance struct { + Enable indexroutingrebalanceoptions.IndexRoutingRebalanceOptions `json:"enable"` +} + +// NewIndexRoutingRebalance returns a IndexRoutingRebalance. +func NewIndexRoutingRebalance() *IndexRoutingRebalance { + r := &IndexRoutingRebalance{} + + return r +} + +type IndexRoutingRebalanceVariant interface { + IndexRoutingRebalanceCaster() *IndexRoutingRebalance +} + +func (s *IndexRoutingRebalance) IndexRoutingRebalanceCaster() *IndexRoutingRebalance { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsegment.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsegment.go new file mode 100644 index 000000000..6afb3062e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsegment.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IndexSegment type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/segments/types.ts#L24-L26 +type IndexSegment struct { + Shards map[string][]ShardsSegment `json:"shards"` +} + +// NewIndexSegment returns a IndexSegment. +func NewIndexSegment() *IndexSegment { + r := &IndexSegment{ + Shards: make(map[string][]ShardsSegment), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsegmentsort.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsegmentsort.go new file mode 100644 index 000000000..20a476393 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsegmentsort.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortmissing" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortmode" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortorder" +) + +// IndexSegmentSort type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSegmentSort.ts#L22-L27 +type IndexSegmentSort struct { + Field []string `json:"field,omitempty"` + Missing []segmentsortmissing.SegmentSortMissing `json:"missing,omitempty"` + Mode []segmentsortmode.SegmentSortMode `json:"mode,omitempty"` + Order []segmentsortorder.SegmentSortOrder `json:"order,omitempty"` +} + +func (s *IndexSegmentSort) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + s.Field = append(s.Field, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + } + + case "missing": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := &segmentsortmissing.SegmentSortMissing{} + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + s.Missing = append(s.Missing, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + } + + case "mode": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := &segmentsortmode.SegmentSortMode{} + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + + s.Mode = append(s.Mode, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Mode); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + } + + case "order": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := &segmentsortorder.SegmentSortOrder{} + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + + s.Order = append(s.Order, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Order); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + } + + } + } + return nil +} + +// NewIndexSegmentSort returns a IndexSegmentSort. +func NewIndexSegmentSort() *IndexSegmentSort { + r := &IndexSegmentSort{} + + return r +} + +type IndexSegmentSortVariant interface { + IndexSegmentSortCaster() *IndexSegmentSort +} + +func (s *IndexSegmentSort) IndexSegmentSortCaster() *IndexSegmentSort { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingblocks.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingblocks.go new file mode 100644 index 000000000..7f7563933 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingblocks.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IndexSettingBlocks type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L264-L270 +type IndexSettingBlocks struct { + Metadata Stringifiedboolean `json:"metadata,omitempty"` + Read Stringifiedboolean `json:"read,omitempty"` + ReadOnly Stringifiedboolean `json:"read_only,omitempty"` + ReadOnlyAllowDelete Stringifiedboolean `json:"read_only_allow_delete,omitempty"` + Write Stringifiedboolean `json:"write,omitempty"` +} + +func (s *IndexSettingBlocks) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "read": + if err := dec.Decode(&s.Read); err != nil { + return fmt.Errorf("%s | %w", "Read", err) + } + + case "read_only": + if err := dec.Decode(&s.ReadOnly); err != nil { + return fmt.Errorf("%s | %w", "ReadOnly", err) + } + + case "read_only_allow_delete": + if err := dec.Decode(&s.ReadOnlyAllowDelete); err != nil { + return fmt.Errorf("%s | %w", "ReadOnlyAllowDelete", err) + } + + case "write": + if err := dec.Decode(&s.Write); err != nil { + return fmt.Errorf("%s | %w", "Write", err) + } + + } + } + return nil +} + +// NewIndexSettingBlocks returns a IndexSettingBlocks. +func NewIndexSettingBlocks() *IndexSettingBlocks { + r := &IndexSettingBlocks{} + + return r +} + +type IndexSettingBlocksVariant interface { + IndexSettingBlocksCaster() *IndexSettingBlocks +} + +func (s *IndexSettingBlocks) IndexSettingBlocksCaster() *IndexSettingBlocks { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingresults.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingresults.go new file mode 100644 index 000000000..f4e620a63 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingresults.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IndexSettingResults type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_data_stream_settings/IndicesPutDataStreamSettingsResponse.ts#L57-L69 +type IndexSettingResults struct { + // AppliedToDataStreamAndBackingIndices The list of settings that were applied to the data stream and to all of its + // backing indices. These settings will + // also be applied to the write index the next time the data stream is rolled + // over. + AppliedToDataStreamAndBackingIndices []string `json:"applied_to_data_stream_and_backing_indices"` + // AppliedToDataStreamOnly The list of settings that were applied to the data stream but not to backing + // indices. These will be applied to + // the write index the next time the data stream is rolled over. + AppliedToDataStreamOnly []string `json:"applied_to_data_stream_only"` + Errors []DataStreamSettingsError `json:"errors,omitempty"` +} + +// NewIndexSettingResults returns a IndexSettingResults. +func NewIndexSettingResults() *IndexSettingResults { + r := &IndexSettingResults{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettings.go new file mode 100644 index 000000000..6c4eb974b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettings.go @@ -0,0 +1,750 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexcheckonstartup" +) + +// IndexSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L70-L178 +type IndexSettings struct { + Analysis *IndexSettingsAnalysis `json:"analysis,omitempty"` + // Analyze Settings to define analyzers, tokenizers, token filters and character + // filters. + // Refer to the linked documentation for step-by-step examples of updating + // analyzers on existing indices. + Analyze *SettingsAnalyze `json:"analyze,omitempty"` + AutoExpandReplicas any `json:"auto_expand_replicas,omitempty"` + Blocks *IndexSettingBlocks `json:"blocks,omitempty"` + CheckOnStartup *indexcheckonstartup.IndexCheckOnStartup `json:"check_on_startup,omitempty"` + Codec *string `json:"codec,omitempty"` + CreationDate StringifiedEpochTimeUnitMillis `json:"creation_date,omitempty"` + CreationDateString DateTime `json:"creation_date_string,omitempty"` + DefaultPipeline *string `json:"default_pipeline,omitempty"` + FinalPipeline *string `json:"final_pipeline,omitempty"` + Format *string `json:"format,omitempty"` + GcDeletes Duration `json:"gc_deletes,omitempty"` + Hidden *string `json:"hidden,omitempty"` + Highlight *SettingsHighlight `json:"highlight,omitempty"` + Index *IndexSettings `json:"index,omitempty"` + IndexSettings map[string]json.RawMessage `json:"-"` + // IndexingPressure Configure indexing back pressure limits. + IndexingPressure *IndicesIndexingPressure `json:"indexing_pressure,omitempty"` + IndexingSlowlog *IndexingSlowlogSettings `json:"indexing.slowlog,omitempty"` + Lifecycle *IndexSettingsLifecycle `json:"lifecycle,omitempty"` + LoadFixedBitsetFiltersEagerly *bool `json:"load_fixed_bitset_filters_eagerly,omitempty"` + // Mapping Enable or disable dynamic mapping for an index. + Mapping *MappingLimitSettings `json:"mapping,omitempty"` + MaxDocvalueFieldsSearch *int `json:"max_docvalue_fields_search,omitempty"` + MaxInnerResultWindow *int `json:"max_inner_result_window,omitempty"` + MaxNgramDiff *int `json:"max_ngram_diff,omitempty"` + MaxRefreshListeners *int `json:"max_refresh_listeners,omitempty"` + MaxRegexLength *int `json:"max_regex_length,omitempty"` + MaxRescoreWindow *int `json:"max_rescore_window,omitempty"` + MaxResultWindow *int `json:"max_result_window,omitempty"` + MaxScriptFields *int `json:"max_script_fields,omitempty"` + MaxShingleDiff *int `json:"max_shingle_diff,omitempty"` + MaxSlicesPerScroll *int `json:"max_slices_per_scroll,omitempty"` + MaxTermsCount *int `json:"max_terms_count,omitempty"` + Merge *Merge `json:"merge,omitempty"` + Mode *string `json:"mode,omitempty"` + NumberOfReplicas *string `json:"number_of_replicas,omitempty"` + NumberOfRoutingShards *int `json:"number_of_routing_shards,omitempty"` + NumberOfShards *string `json:"number_of_shards,omitempty"` + Priority *string `json:"priority,omitempty"` + ProvidedName *string `json:"provided_name,omitempty"` + Queries *Queries `json:"queries,omitempty"` + QueryString *SettingsQueryString `json:"query_string,omitempty"` + RefreshInterval Duration `json:"refresh_interval,omitempty"` + Routing *IndexRouting `json:"routing,omitempty"` + RoutingPartitionSize Stringifiedinteger `json:"routing_partition_size,omitempty"` + RoutingPath []string `json:"routing_path,omitempty"` + Search *SettingsSearch `json:"search,omitempty"` + Settings *IndexSettings `json:"settings,omitempty"` + // Similarity Configure custom similarity settings to customize how search results are + // scored. + Similarity map[string]SettingsSimilarity `json:"similarity,omitempty"` + SoftDeletes *SoftDeletes `json:"soft_deletes,omitempty"` + Sort *IndexSegmentSort `json:"sort,omitempty"` + // Store The store module allows you to control how index data is stored and accessed + // on disk. + Store *Storage `json:"store,omitempty"` + TimeSeries *IndexSettingsTimeSeries `json:"time_series,omitempty"` + TopMetricsMaxSize *int `json:"top_metrics_max_size,omitempty"` + Translog *Translog `json:"translog,omitempty"` + Uuid *string `json:"uuid,omitempty"` + VerifiedBeforeClose *string `json:"verified_before_close,omitempty"` + Version *IndexVersioning `json:"version,omitempty"` +} + +func (s *IndexSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analysis": + if err := dec.Decode(&s.Analysis); err != nil { + return fmt.Errorf("%s | %w", "Analysis", err) + } + + case "analyze": + if err := dec.Decode(&s.Analyze); err != nil { + return fmt.Errorf("%s | %w", "Analyze", err) + } + + case "auto_expand_replicas": + if err := dec.Decode(&s.AutoExpandReplicas); err != nil { + return fmt.Errorf("%s | %w", "AutoExpandReplicas", err) + } + + case "blocks": + if err := dec.Decode(&s.Blocks); err != nil { + return fmt.Errorf("%s | %w", "Blocks", err) + } + + case "check_on_startup": + if err := dec.Decode(&s.CheckOnStartup); err != nil { + return fmt.Errorf("%s | %w", "CheckOnStartup", err) + } + + case "codec": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Codec", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Codec = &o + + case "creation_date": + if err := dec.Decode(&s.CreationDate); err != nil { + return fmt.Errorf("%s | %w", "CreationDate", err) + } + + case "creation_date_string": + if err := dec.Decode(&s.CreationDateString); err != nil { + return fmt.Errorf("%s | %w", "CreationDateString", err) + } + + case "default_pipeline": + if err := dec.Decode(&s.DefaultPipeline); err != nil { + return fmt.Errorf("%s | %w", "DefaultPipeline", err) + } + + case "final_pipeline": + if err := dec.Decode(&s.FinalPipeline); err != nil { + return fmt.Errorf("%s | %w", "FinalPipeline", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gc_deletes": + if err := dec.Decode(&s.GcDeletes); err != nil { + return fmt.Errorf("%s | %w", "GcDeletes", err) + } + + case "hidden": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Hidden", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Hidden = &o + + case "highlight": + if err := dec.Decode(&s.Highlight); err != nil { + return fmt.Errorf("%s | %w", "Highlight", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "indexing_pressure": + if err := dec.Decode(&s.IndexingPressure); err != nil { + return fmt.Errorf("%s | %w", "IndexingPressure", err) + } + + case "indexing.slowlog": + if err := dec.Decode(&s.IndexingSlowlog); err != nil { + return fmt.Errorf("%s | %w", "IndexingSlowlog", err) + } + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return fmt.Errorf("%s | %w", "Lifecycle", err) + } + + case "load_fixed_bitset_filters_eagerly": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "LoadFixedBitsetFiltersEagerly", err) + } + s.LoadFixedBitsetFiltersEagerly = &value + case bool: + s.LoadFixedBitsetFiltersEagerly = &v + } + + case "mapping": + if err := dec.Decode(&s.Mapping); err != nil { + return fmt.Errorf("%s | %w", "Mapping", err) + } + + case "max_docvalue_fields_search": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxDocvalueFieldsSearch", err) + } + s.MaxDocvalueFieldsSearch = &value + case float64: + f := int(v) + s.MaxDocvalueFieldsSearch = &f + } + + case "max_inner_result_window": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxInnerResultWindow", err) + } + s.MaxInnerResultWindow = &value + case float64: + f := int(v) + s.MaxInnerResultWindow = &f + } + + case "max_ngram_diff": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNgramDiff", err) + } + s.MaxNgramDiff = &value + case float64: + f := int(v) + s.MaxNgramDiff = &f + } + + case "max_refresh_listeners": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxRefreshListeners", err) + } + s.MaxRefreshListeners = &value + case float64: + f := int(v) + s.MaxRefreshListeners = &f + } + + case "max_regex_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxRegexLength", err) + } + s.MaxRegexLength = &value + case float64: + f := int(v) + s.MaxRegexLength = &f + } + + case "max_rescore_window": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxRescoreWindow", err) + } + s.MaxRescoreWindow = &value + case float64: + f := int(v) + s.MaxRescoreWindow = &f + } + + case "max_result_window": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxResultWindow", err) + } + s.MaxResultWindow = &value + case float64: + f := int(v) + s.MaxResultWindow = &f + } + + case "max_script_fields": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxScriptFields", err) + } + s.MaxScriptFields = &value + case float64: + f := int(v) + s.MaxScriptFields = &f + } + + case "max_shingle_diff": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxShingleDiff", err) + } + s.MaxShingleDiff = &value + case float64: + f := int(v) + s.MaxShingleDiff = &f + } + + case "max_slices_per_scroll": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSlicesPerScroll", err) + } + s.MaxSlicesPerScroll = &value + case float64: + f := int(v) + s.MaxSlicesPerScroll = &f + } + + case "max_terms_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTermsCount", err) + } + s.MaxTermsCount = &value + case float64: + f := int(v) + s.MaxTermsCount = &f + } + + case "merge": + if err := dec.Decode(&s.Merge); err != nil { + return fmt.Errorf("%s | %w", "Merge", err) + } + + case "mode": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Mode = &o + + case "number_of_replicas": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NumberOfReplicas", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NumberOfReplicas = &o + + case "number_of_routing_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfRoutingShards", err) + } + s.NumberOfRoutingShards = &value + case float64: + f := int(v) + s.NumberOfRoutingShards = &f + } + + case "number_of_shards": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NumberOfShards", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NumberOfShards = &o + + case "priority": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Priority = &o + + case "provided_name": + if err := dec.Decode(&s.ProvidedName); err != nil { + return fmt.Errorf("%s | %w", "ProvidedName", err) + } + + case "queries": + if err := dec.Decode(&s.Queries); err != nil { + return fmt.Errorf("%s | %w", "Queries", err) + } + + case "query_string": + if err := dec.Decode(&s.QueryString); err != nil { + return fmt.Errorf("%s | %w", "QueryString", err) + } + + case "refresh_interval": + if err := dec.Decode(&s.RefreshInterval); err != nil { + return fmt.Errorf("%s | %w", "RefreshInterval", err) + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "routing_partition_size": + if err := dec.Decode(&s.RoutingPartitionSize); err != nil { + return fmt.Errorf("%s | %w", "RoutingPartitionSize", err) + } + + case "routing_path": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "RoutingPath", err) + } + + s.RoutingPath = append(s.RoutingPath, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.RoutingPath); err != nil { + return fmt.Errorf("%s | %w", "RoutingPath", err) + } + } + + case "search": + if err := dec.Decode(&s.Search); err != nil { + return fmt.Errorf("%s | %w", "Search", err) + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "similarity": + if s.Similarity == nil { + s.Similarity = make(map[string]SettingsSimilarity, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + + switch kind["type"] { + case "BM25": + oo := NewSettingsSimilarityBm25() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Similarity | %w", err) + } + s.Similarity[key] = oo + case "boolean": + oo := NewSettingsSimilarityBoolean() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Similarity | %w", err) + } + s.Similarity[key] = oo + case "DFI": + oo := NewSettingsSimilarityDfi() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Similarity | %w", err) + } + s.Similarity[key] = oo + case "DFR": + oo := NewSettingsSimilarityDfr() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Similarity | %w", err) + } + s.Similarity[key] = oo + case "IB": + oo := NewSettingsSimilarityIb() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Similarity | %w", err) + } + s.Similarity[key] = oo + case "LMDirichlet": + oo := NewSettingsSimilarityLmd() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Similarity | %w", err) + } + s.Similarity[key] = oo + case "LMJelinekMercer": + oo := NewSettingsSimilarityLmj() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Similarity | %w", err) + } + s.Similarity[key] = oo + case "scripted": + oo := NewSettingsSimilarityScripted() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Similarity | %w", err) + } + s.Similarity[key] = oo + default: + oo := new(SettingsSimilarity) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(SettingsSimilarity) | %w", err) + } + s.Similarity[key] = oo + } + } + + case "soft_deletes": + if err := dec.Decode(&s.SoftDeletes); err != nil { + return fmt.Errorf("%s | %w", "SoftDeletes", err) + } + + case "sort": + if err := dec.Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + case "store": + if err := dec.Decode(&s.Store); err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + + case "time_series": + if err := dec.Decode(&s.TimeSeries); err != nil { + return fmt.Errorf("%s | %w", "TimeSeries", err) + } + + case "top_metrics_max_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TopMetricsMaxSize", err) + } + s.TopMetricsMaxSize = &value + case float64: + f := int(v) + s.TopMetricsMaxSize = &f + } + + case "translog": + if err := dec.Decode(&s.Translog); err != nil { + return fmt.Errorf("%s | %w", "Translog", err) + } + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return fmt.Errorf("%s | %w", "Uuid", err) + } + + case "verified_before_close": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VerifiedBeforeClose", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VerifiedBeforeClose = &o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + default: + + if key, ok := t.(string); ok { + if s.IndexSettings == nil { + s.IndexSettings = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "IndexSettings", err) + } + s.IndexSettings[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s IndexSettings) MarshalJSON() ([]byte, error) { + type opt IndexSettings + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.IndexSettings { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "IndexSettings") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewIndexSettings returns a IndexSettings. +func NewIndexSettings() *IndexSettings { + r := &IndexSettings{ + IndexSettings: make(map[string]json.RawMessage), + Similarity: make(map[string]SettingsSimilarity), + } + + return r +} + +type IndexSettingsVariant interface { + IndexSettingsCaster() *IndexSettings +} + +func (s *IndexSettings) IndexSettingsCaster() *IndexSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingsanalysis.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingsanalysis.go new file mode 100644 index 000000000..ac2acc766 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingsanalysis.go @@ -0,0 +1,1088 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IndexSettingsAnalysis type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L346-L352 +type IndexSettingsAnalysis struct { + Analyzer map[string]Analyzer `json:"analyzer,omitempty"` + CharFilter map[string]CharFilter `json:"char_filter,omitempty"` + Filter map[string]TokenFilter `json:"filter,omitempty"` + Normalizer map[string]Normalizer `json:"normalizer,omitempty"` + Tokenizer map[string]Tokenizer `json:"tokenizer,omitempty"` +} + +func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + if s.Analyzer == nil { + s.Analyzer = make(map[string]Analyzer, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "custom" + } + switch kind["type"] { + case "custom": + oo := NewCustomAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "fingerprint": + oo := NewFingerprintAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "keyword": + oo := NewKeywordAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "nori": + oo := NewNoriAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "pattern": + oo := NewPatternAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "simple": + oo := NewSimpleAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "standard": + oo := NewStandardAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "stop": + oo := NewStopAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "whitespace": + oo := NewWhitespaceAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "icu_analyzer": + oo := NewIcuAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "kuromoji": + oo := NewKuromojiAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "snowball": + oo := NewSnowballAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "arabic": + oo := NewArabicAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "armenian": + oo := NewArmenianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "basque": + oo := NewBasqueAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "bengali": + oo := NewBengaliAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "brazilian": + oo := NewBrazilianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "bulgarian": + oo := NewBulgarianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "catalan": + oo := NewCatalanAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "chinese": + oo := NewChineseAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "cjk": + oo := NewCjkAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "czech": + oo := NewCzechAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "danish": + oo := NewDanishAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "dutch": + oo := NewDutchAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "english": + oo := NewEnglishAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "estonian": + oo := NewEstonianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "finnish": + oo := NewFinnishAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "french": + oo := NewFrenchAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "galician": + oo := NewGalicianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "german": + oo := NewGermanAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "greek": + oo := NewGreekAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "hindi": + oo := NewHindiAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "hungarian": + oo := NewHungarianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "indonesian": + oo := NewIndonesianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "irish": + oo := NewIrishAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "italian": + oo := NewItalianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "latvian": + oo := NewLatvianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "lithuanian": + oo := NewLithuanianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "norwegian": + oo := NewNorwegianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "persian": + oo := NewPersianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "portuguese": + oo := NewPortugueseAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "romanian": + oo := NewRomanianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "russian": + oo := NewRussianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "serbian": + oo := NewSerbianAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "sorani": + oo := NewSoraniAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "spanish": + oo := NewSpanishAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "swedish": + oo := NewSwedishAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "turkish": + oo := NewTurkishAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + case "thai": + oo := NewThaiAnalyzer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Analyzer | %w", err) + } + s.Analyzer[key] = oo + default: + oo := new(Analyzer) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Analyzer) | %w", err) + } + s.Analyzer[key] = oo + } + } + + case "char_filter": + if s.CharFilter == nil { + s.CharFilter = make(map[string]CharFilter, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + + switch kind["type"] { + case "html_strip": + oo := NewHtmlStripCharFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("CharFilter | %w", err) + } + s.CharFilter[key] = oo + case "mapping": + oo := NewMappingCharFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("CharFilter | %w", err) + } + s.CharFilter[key] = oo + case "pattern_replace": + oo := NewPatternReplaceCharFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("CharFilter | %w", err) + } + s.CharFilter[key] = oo + case "icu_normalizer": + oo := NewIcuNormalizationCharFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("CharFilter | %w", err) + } + s.CharFilter[key] = oo + case "kuromoji_iteration_mark": + oo := NewKuromojiIterationMarkCharFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("CharFilter | %w", err) + } + s.CharFilter[key] = oo + default: + oo := new(CharFilter) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(CharFilter) | %w", err) + } + s.CharFilter[key] = oo + } + } + + case "filter": + if s.Filter == nil { + s.Filter = make(map[string]TokenFilter, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + + switch kind["type"] { + case "apostrophe": + oo := NewApostropheTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "arabic_stem": + oo := NewArabicStemTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "arabic_normalization": + oo := NewArabicNormalizationTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "asciifolding": + oo := NewAsciiFoldingTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "bengali_normalization": + oo := NewBengaliNormalizationTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "brazilian_stem": + oo := NewBrazilianStemTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "cjk_bigram": + oo := NewCjkBigramTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "cjk_width": + oo := NewCjkWidthTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "classic": + oo := NewClassicTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "common_grams": + oo := NewCommonGramsTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "condition": + oo := NewConditionTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "czech_stem": + oo := NewCzechStemTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "decimal_digit": + oo := NewDecimalDigitTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "delimited_payload": + oo := NewDelimitedPayloadTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "dutch_stem": + oo := NewDutchStemTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "edge_ngram": + oo := NewEdgeNGramTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "elision": + oo := NewElisionTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "fingerprint": + oo := NewFingerprintTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "flatten_graph": + oo := NewFlattenGraphTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "french_stem": + oo := NewFrenchStemTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "german_normalization": + oo := NewGermanNormalizationTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "german_stem": + oo := NewGermanStemTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "hindi_normalization": + oo := NewHindiNormalizationTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "hunspell": + oo := NewHunspellTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "hyphenation_decompounder": + oo := NewHyphenationDecompounderTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "indic_normalization": + oo := NewIndicNormalizationTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "keep_types": + oo := NewKeepTypesTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "keep": + oo := NewKeepWordsTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "keyword_marker": + oo := NewKeywordMarkerTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "keyword_repeat": + oo := NewKeywordRepeatTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "kstem": + oo := NewKStemTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "length": + oo := NewLengthTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "limit": + oo := NewLimitTokenCountTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "lowercase": + oo := NewLowercaseTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "min_hash": + oo := NewMinHashTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "multiplexer": + oo := NewMultiplexerTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "ngram": + oo := NewNGramTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "nori_part_of_speech": + oo := NewNoriPartOfSpeechTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "pattern_capture": + oo := NewPatternCaptureTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "pattern_replace": + oo := NewPatternReplaceTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "persian_normalization": + oo := NewPersianNormalizationTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "persian_stem": + oo := NewPersianStemTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "porter_stem": + oo := NewPorterStemTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "predicate_token_filter": + oo := NewPredicateTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "remove_duplicates": + oo := NewRemoveDuplicatesTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "reverse": + oo := NewReverseTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "russian_stem": + oo := NewRussianStemTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "scandinavian_folding": + oo := NewScandinavianFoldingTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "scandinavian_normalization": + oo := NewScandinavianNormalizationTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "serbian_normalization": + oo := NewSerbianNormalizationTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "shingle": + oo := NewShingleTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "snowball": + oo := NewSnowballTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "sorani_normalization": + oo := NewSoraniNormalizationTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "stemmer_override": + oo := NewStemmerOverrideTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "stemmer": + oo := NewStemmerTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "stop": + oo := NewStopTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "synonym_graph": + oo := NewSynonymGraphTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "synonym": + oo := NewSynonymTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "trim": + oo := NewTrimTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "truncate": + oo := NewTruncateTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "unique": + oo := NewUniqueTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "uppercase": + oo := NewUppercaseTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "word_delimiter_graph": + oo := NewWordDelimiterGraphTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "word_delimiter": + oo := NewWordDelimiterTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "ja_stop": + oo := NewJaStopTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "kuromoji_stemmer": + oo := NewKuromojiStemmerTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "kuromoji_readingform": + oo := NewKuromojiReadingFormTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "kuromoji_part_of_speech": + oo := NewKuromojiPartOfSpeechTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "icu_collation": + oo := NewIcuCollationTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "icu_folding": + oo := NewIcuFoldingTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "icu_normalizer": + oo := NewIcuNormalizationTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "icu_transform": + oo := NewIcuTransformTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "phonetic": + oo := NewPhoneticTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + case "dictionary_decompounder": + oo := NewDictionaryDecompounderTokenFilter() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Filter | %w", err) + } + s.Filter[key] = oo + default: + oo := new(TokenFilter) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(TokenFilter) | %w", err) + } + s.Filter[key] = oo + } + } + + case "normalizer": + if s.Normalizer == nil { + s.Normalizer = make(map[string]Normalizer, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "custom" + } + switch kind["type"] { + case "lowercase": + oo := NewLowercaseNormalizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Normalizer | %w", err) + } + s.Normalizer[key] = oo + case "custom": + oo := NewCustomNormalizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Normalizer | %w", err) + } + s.Normalizer[key] = oo + default: + oo := new(Normalizer) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Normalizer) | %w", err) + } + s.Normalizer[key] = oo + } + } + + case "tokenizer": + if s.Tokenizer == nil { + s.Tokenizer = make(map[string]Tokenizer, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + + switch kind["type"] { + case "char_group": + oo := NewCharGroupTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "classic": + oo := NewClassicTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "edge_ngram": + oo := NewEdgeNGramTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "keyword": + oo := NewKeywordTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "letter": + oo := NewLetterTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "lowercase": + oo := NewLowercaseTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "ngram": + oo := NewNGramTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "path_hierarchy": + oo := NewPathHierarchyTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "pattern": + oo := NewPatternTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "simple_pattern": + oo := NewSimplePatternTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "simple_pattern_split": + oo := NewSimplePatternSplitTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "standard": + oo := NewStandardTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "thai": + oo := NewThaiTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "uax_url_email": + oo := NewUaxEmailUrlTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "whitespace": + oo := NewWhitespaceTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "icu_tokenizer": + oo := NewIcuTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "kuromoji_tokenizer": + oo := NewKuromojiTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + case "nori_tokenizer": + oo := NewNoriTokenizer() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Tokenizer | %w", err) + } + s.Tokenizer[key] = oo + default: + oo := new(Tokenizer) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Tokenizer) | %w", err) + } + s.Tokenizer[key] = oo + } + } + + } + } + return nil +} + +// NewIndexSettingsAnalysis returns a IndexSettingsAnalysis. +func NewIndexSettingsAnalysis() *IndexSettingsAnalysis { + r := &IndexSettingsAnalysis{ + Analyzer: make(map[string]Analyzer), + CharFilter: make(map[string]CharFilter), + Filter: make(map[string]TokenFilter), + Normalizer: make(map[string]Normalizer), + Tokenizer: make(map[string]Tokenizer), + } + + return r +} + +type IndexSettingsAnalysisVariant interface { + IndexSettingsAnalysisCaster() *IndexSettingsAnalysis +} + +func (s *IndexSettingsAnalysis) IndexSettingsAnalysisCaster() *IndexSettingsAnalysis { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingslifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingslifecycle.go new file mode 100644 index 000000000..ae645bc19 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingslifecycle.go @@ -0,0 +1,172 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndexSettingsLifecycle type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L297-L336 +type IndexSettingsLifecycle struct { + // IndexingComplete Indicates whether or not the index has been rolled over. Automatically set to + // true when ILM completes the rollover action. + // You can explicitly set it to skip rollover. + IndexingComplete Stringifiedboolean `json:"indexing_complete,omitempty"` + // Name The name of the policy to use to manage the index. For information about how + // Elasticsearch applies policy changes, see Policy updates. + Name *string `json:"name,omitempty"` + // OriginationDate If specified, this is the timestamp used to calculate the index age for its + // phase transitions. Use this setting + // if you create a new index that contains old data and want to use the original + // creation date to calculate the index + // age. Specified as a Unix epoch value in milliseconds. + OriginationDate *int64 `json:"origination_date,omitempty"` + // ParseOriginationDate Set to true to parse the origination date from the index name. This + // origination date is used to calculate the index age + // for its phase transitions. The index name must match the pattern + // ^.*-{date_format}-\\d+, where the date_format is + // yyyy.MM.dd and the trailing digits are optional. An index that was rolled + // over would normally match the full format, + // for example logs-2016.10.31-000002). If the index name doesn’t match the + // pattern, index creation fails. + ParseOriginationDate *bool `json:"parse_origination_date,omitempty"` + // PreferIlm Preference for the system that manages a data stream backing index + // (preferring ILM when both ILM and DLM are + // applicable for an index). + PreferIlm *string `json:"prefer_ilm,omitempty"` + // RolloverAlias The index alias to update when the index rolls over. Specify when using a + // policy that contains a rollover action. + // When the index rolls over, the alias is updated to reflect that the index is + // no longer the write index. For more + // information about rolling indices, see Rollover. + RolloverAlias *string `json:"rollover_alias,omitempty"` + Step *IndexSettingsLifecycleStep `json:"step,omitempty"` +} + +func (s *IndexSettingsLifecycle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indexing_complete": + if err := dec.Decode(&s.IndexingComplete); err != nil { + return fmt.Errorf("%s | %w", "IndexingComplete", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "origination_date": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "OriginationDate", err) + } + s.OriginationDate = &value + case float64: + f := int64(v) + s.OriginationDate = &f + } + + case "parse_origination_date": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ParseOriginationDate", err) + } + s.ParseOriginationDate = &value + case bool: + s.ParseOriginationDate = &v + } + + case "prefer_ilm": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PreferIlm", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PreferIlm = &o + + case "rollover_alias": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RolloverAlias", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RolloverAlias = &o + + case "step": + if err := dec.Decode(&s.Step); err != nil { + return fmt.Errorf("%s | %w", "Step", err) + } + + } + } + return nil +} + +// NewIndexSettingsLifecycle returns a IndexSettingsLifecycle. +func NewIndexSettingsLifecycle() *IndexSettingsLifecycle { + r := &IndexSettingsLifecycle{} + + return r +} + +type IndexSettingsLifecycleVariant interface { + IndexSettingsLifecycleCaster() *IndexSettingsLifecycle +} + +func (s *IndexSettingsLifecycle) IndexSettingsLifecycleCaster() *IndexSettingsLifecycle { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingslifecyclestep.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingslifecyclestep.go new file mode 100644 index 000000000..4508a04e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingslifecyclestep.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IndexSettingsLifecycleStep type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L338-L344 +type IndexSettingsLifecycleStep struct { + // WaitTimeThreshold Time to wait for the cluster to resolve allocation issues during an ILM + // shrink action. Must be greater than 1h (1 hour). + // See Shard allocation for shrink. + WaitTimeThreshold Duration `json:"wait_time_threshold,omitempty"` +} + +func (s *IndexSettingsLifecycleStep) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "wait_time_threshold": + if err := dec.Decode(&s.WaitTimeThreshold); err != nil { + return fmt.Errorf("%s | %w", "WaitTimeThreshold", err) + } + + } + } + return nil +} + +// NewIndexSettingsLifecycleStep returns a IndexSettingsLifecycleStep. +func NewIndexSettingsLifecycleStep() *IndexSettingsLifecycleStep { + r := &IndexSettingsLifecycleStep{} + + return r +} + +type IndexSettingsLifecycleStepVariant interface { + IndexSettingsLifecycleStepCaster() *IndexSettingsLifecycleStep +} + +func (s *IndexSettingsLifecycleStep) IndexSettingsLifecycleStepCaster() *IndexSettingsLifecycleStep { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingstimeseries.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingstimeseries.go new file mode 100644 index 000000000..07f5c4c90 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexsettingstimeseries.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IndexSettingsTimeSeries type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L354-L357 +type IndexSettingsTimeSeries struct { + EndTime DateTime `json:"end_time,omitempty"` + StartTime DateTime `json:"start_time,omitempty"` +} + +func (s *IndexSettingsTimeSeries) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "end_time": + if err := dec.Decode(&s.EndTime); err != nil { + return fmt.Errorf("%s | %w", "EndTime", err) + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + + } + } + return nil +} + +// NewIndexSettingsTimeSeries returns a IndexSettingsTimeSeries. +func NewIndexSettingsTimeSeries() *IndexSettingsTimeSeries { + r := &IndexSettingsTimeSeries{} + + return r +} + +type IndexSettingsTimeSeriesVariant interface { + IndexSettingsTimeSeriesCaster() *IndexSettingsTimeSeries +} + +func (s *IndexSettingsTimeSeries) IndexSettingsTimeSeriesCaster() *IndexSettingsTimeSeries { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexstate.go new file mode 100644 index 000000000..e9d9acdc3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexstate.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IndexState type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexState.ts#L27-L40 +type IndexState struct { + Aliases map[string]Alias `json:"aliases,omitempty"` + DataStream *string `json:"data_stream,omitempty"` + // Defaults Default settings, included when the request's `include_default` is `true`. + Defaults *IndexSettings `json:"defaults,omitempty"` + // Lifecycle Data stream lifecycle applicable if this is a data stream. + Lifecycle *DataStreamLifecycle `json:"lifecycle,omitempty"` + Mappings *TypeMapping `json:"mappings,omitempty"` + Settings *IndexSettings `json:"settings,omitempty"` +} + +func (s *IndexState) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aliases": + if s.Aliases == nil { + s.Aliases = make(map[string]Alias, 0) + } + if err := dec.Decode(&s.Aliases); err != nil { + return fmt.Errorf("%s | %w", "Aliases", err) + } + + case "data_stream": + if err := dec.Decode(&s.DataStream); err != nil { + return fmt.Errorf("%s | %w", "DataStream", err) + } + + case "defaults": + if err := dec.Decode(&s.Defaults); err != nil { + return fmt.Errorf("%s | %w", "Defaults", err) + } + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return fmt.Errorf("%s | %w", "Lifecycle", err) + } + + case "mappings": + if err := dec.Decode(&s.Mappings); err != nil { + return fmt.Errorf("%s | %w", "Mappings", err) + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + } + } + return nil +} + +// NewIndexState returns a IndexState. +func NewIndexState() *IndexState { + r := &IndexState{ + Aliases: make(map[string]Alias), + } + + return r +} + +type IndexStateVariant interface { + IndexStateCaster() *IndexState +} + +func (s *IndexState) IndexStateCaster() *IndexState { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexstats.go new file mode 100644 index 000000000..a24113e3f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexstats.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IndexStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L52-L93 +type IndexStats struct { + Bulk *BulkStats `json:"bulk,omitempty"` + // Completion Contains statistics about completions across all shards assigned to the node. + Completion *CompletionStats `json:"completion,omitempty"` + // Docs Contains statistics about documents across all primary shards assigned to the + // node. + Docs *DocStats `json:"docs,omitempty"` + // Fielddata Contains statistics about the field data cache across all shards assigned to + // the node. + Fielddata *FielddataStats `json:"fielddata,omitempty"` + // Flush Contains statistics about flush operations for the node. + Flush *FlushStats `json:"flush,omitempty"` + // Get Contains statistics about get operations for the node. + Get *GetStats `json:"get,omitempty"` + // Indexing Contains statistics about indexing operations for the node. + Indexing *IndexingStats `json:"indexing,omitempty"` + // Indices Contains statistics about indices operations for the node. + Indices *IndicesStats `json:"indices,omitempty"` + // Merges Contains statistics about merge operations for the node. + Merges *MergesStats `json:"merges,omitempty"` + // QueryCache Contains statistics about the query cache across all shards assigned to the + // node. + QueryCache *QueryCacheStats `json:"query_cache,omitempty"` + // Recovery Contains statistics about recovery operations for the node. + Recovery *RecoveryStats `json:"recovery,omitempty"` + // Refresh Contains statistics about refresh operations for the node. + Refresh *RefreshStats `json:"refresh,omitempty"` + // RequestCache Contains statistics about the request cache across all shards assigned to the + // node. + RequestCache *RequestCacheStats `json:"request_cache,omitempty"` + // Search Contains statistics about search operations for the node. + Search *SearchStats `json:"search,omitempty"` + // Segments Contains statistics about segments across all shards assigned to the node. + Segments *SegmentsStats `json:"segments,omitempty"` + ShardStats *ShardsTotalStats `json:"shard_stats,omitempty"` + // Store Contains statistics about the size of shards assigned to the node. + Store *StoreStats `json:"store,omitempty"` + // Translog Contains statistics about transaction log operations for the node. + Translog *TranslogStats `json:"translog,omitempty"` + // Warmer Contains statistics about index warming operations for the node. + Warmer *WarmerStats `json:"warmer,omitempty"` +} + +// NewIndexStats returns a IndexStats. +func NewIndexStats() *IndexStats { + r := &IndexStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplate.go new file mode 100644 index 000000000..f42cd0040 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplate.go @@ -0,0 +1,207 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndexTemplate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexTemplate.ts#L29-L82 +type IndexTemplate struct { + AllowAutoCreate *bool `json:"allow_auto_create,omitempty"` + // ComposedOf An ordered list of component template names. + // Component templates are merged in the order specified, meaning that the last + // component template specified has the highest precedence. + ComposedOf []string `json:"composed_of"` + // DataStream If this object is included, the template is used to create data streams and + // their backing indices. + // Supports an empty object. + // Data streams require a matching index template with a `data_stream` object. + DataStream *IndexTemplateDataStreamConfiguration `json:"data_stream,omitempty"` + // Deprecated Marks this index template as deprecated. + // When creating or updating a non-deprecated index template that uses + // deprecated components, + // Elasticsearch will emit a deprecation warning. + Deprecated *bool `json:"deprecated,omitempty"` + // IgnoreMissingComponentTemplates A list of component template names that are allowed to be absent. + IgnoreMissingComponentTemplates []string `json:"ignore_missing_component_templates,omitempty"` + // IndexPatterns Name of the index template. + IndexPatterns []string `json:"index_patterns"` + // Meta_ Optional user metadata about the index template. May have any contents. + // This map is not automatically generated by Elasticsearch. + Meta_ Metadata `json:"_meta,omitempty"` + // Priority Priority to determine index template precedence when a new data stream or + // index is created. + // The index template with the highest priority is chosen. + // If no priority is specified the template is treated as though it is of + // priority 0 (lowest priority). + // This number is not automatically generated by Elasticsearch. + Priority *int64 `json:"priority,omitempty"` + // Template Template to be applied. + // It may optionally include an `aliases`, `mappings`, or `settings` + // configuration. + Template *IndexTemplateSummary `json:"template,omitempty"` + // Version Version number used to manage index templates externally. + // This number is not automatically generated by Elasticsearch. + Version *int64 `json:"version,omitempty"` +} + +func (s *IndexTemplate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_auto_create": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowAutoCreate", err) + } + s.AllowAutoCreate = &value + case bool: + s.AllowAutoCreate = &v + } + + case "composed_of": + if err := dec.Decode(&s.ComposedOf); err != nil { + return fmt.Errorf("%s | %w", "ComposedOf", err) + } + + case "data_stream": + if err := dec.Decode(&s.DataStream); err != nil { + return fmt.Errorf("%s | %w", "DataStream", err) + } + + case "deprecated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deprecated", err) + } + s.Deprecated = &value + case bool: + s.Deprecated = &v + } + + case "ignore_missing_component_templates": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissingComponentTemplates", err) + } + + s.IgnoreMissingComponentTemplates = append(s.IgnoreMissingComponentTemplates, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.IgnoreMissingComponentTemplates); err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissingComponentTemplates", err) + } + } + + case "index_patterns": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "IndexPatterns", err) + } + + s.IndexPatterns = append(s.IndexPatterns, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.IndexPatterns); err != nil { + return fmt.Errorf("%s | %w", "IndexPatterns", err) + } + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "priority": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + s.Priority = &value + case float64: + f := int64(v) + s.Priority = &f + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return fmt.Errorf("%s | %w", "Template", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewIndexTemplate returns a IndexTemplate. +func NewIndexTemplate() *IndexTemplate { + r := &IndexTemplate{} + + return r +} + +type IndexTemplateVariant interface { + IndexTemplateCaster() *IndexTemplate +} + +func (s *IndexTemplate) IndexTemplateCaster() *IndexTemplate { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplatedatastreamconfiguration.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplatedatastreamconfiguration.go new file mode 100644 index 000000000..45102d7f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplatedatastreamconfiguration.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndexTemplateDataStreamConfiguration type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexTemplate.ts#L84-L95 +type IndexTemplateDataStreamConfiguration struct { + // AllowCustomRouting If true, the data stream supports custom routing. + AllowCustomRouting *bool `json:"allow_custom_routing,omitempty"` + // Hidden If true, the data stream is hidden. + Hidden *bool `json:"hidden,omitempty"` +} + +func (s *IndexTemplateDataStreamConfiguration) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_custom_routing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowCustomRouting", err) + } + s.AllowCustomRouting = &value + case bool: + s.AllowCustomRouting = &v + } + + case "hidden": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Hidden", err) + } + s.Hidden = &value + case bool: + s.Hidden = &v + } + + } + } + return nil +} + +// NewIndexTemplateDataStreamConfiguration returns a IndexTemplateDataStreamConfiguration. +func NewIndexTemplateDataStreamConfiguration() *IndexTemplateDataStreamConfiguration { + r := &IndexTemplateDataStreamConfiguration{} + + return r +} + +type IndexTemplateDataStreamConfigurationVariant interface { + IndexTemplateDataStreamConfigurationCaster() *IndexTemplateDataStreamConfiguration +} + +func (s *IndexTemplateDataStreamConfiguration) IndexTemplateDataStreamConfigurationCaster() *IndexTemplateDataStreamConfiguration { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplateitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplateitem.go new file mode 100644 index 000000000..1b6f38322 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplateitem.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IndexTemplateItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L29-L32 +type IndexTemplateItem struct { + IndexTemplate IndexTemplate `json:"index_template"` + Name string `json:"name"` +} + +func (s *IndexTemplateItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index_template": + if err := dec.Decode(&s.IndexTemplate); err != nil { + return fmt.Errorf("%s | %w", "IndexTemplate", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewIndexTemplateItem returns a IndexTemplateItem. +func NewIndexTemplateItem() *IndexTemplateItem { + r := &IndexTemplateItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplatemapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplatemapping.go new file mode 100644 index 000000000..d116bc1af --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplatemapping.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IndexTemplateMapping type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L159-L181 +type IndexTemplateMapping struct { + // Aliases Aliases to add. + // If the index template includes a `data_stream` object, these are data stream + // aliases. + // Otherwise, these are index aliases. + // Data stream aliases ignore the `index_routing`, `routing`, and + // `search_routing` options. + Aliases map[string]Alias `json:"aliases,omitempty"` + Lifecycle *DataStreamLifecycle `json:"lifecycle,omitempty"` + // Mappings Mapping for fields in the index. + // If specified, this mapping can include field names, field data types, and + // mapping parameters. + Mappings *TypeMapping `json:"mappings,omitempty"` + // Settings Configuration options for the index. + Settings *IndexSettings `json:"settings,omitempty"` +} + +// NewIndexTemplateMapping returns a IndexTemplateMapping. +func NewIndexTemplateMapping() *IndexTemplateMapping { + r := &IndexTemplateMapping{ + Aliases: make(map[string]Alias), + } + + return r +} + +type IndexTemplateMappingVariant interface { + IndexTemplateMappingCaster() *IndexTemplateMapping +} + +func (s *IndexTemplateMapping) IndexTemplateMappingCaster() *IndexTemplateMapping { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplatesummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplatesummary.go new file mode 100644 index 000000000..3d0cc75ab --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indextemplatesummary.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IndexTemplateSummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexTemplate.ts#L97-L124 +type IndexTemplateSummary struct { + // Aliases Aliases to add. + // If the index template includes a `data_stream` object, these are data stream + // aliases. + // Otherwise, these are index aliases. + // Data stream aliases ignore the `index_routing`, `routing`, and + // `search_routing` options. + Aliases map[string]Alias `json:"aliases,omitempty"` + DataStreamOptions *DataStreamOptionsTemplate `json:"data_stream_options,omitempty"` + Lifecycle *DataStreamLifecycleWithRollover `json:"lifecycle,omitempty"` + // Mappings Mapping for fields in the index. + // If specified, this mapping can include field names, field data types, and + // mapping parameters. + Mappings *TypeMapping `json:"mappings,omitempty"` + // Settings Configuration options for the index. + Settings *IndexSettings `json:"settings,omitempty"` +} + +func (s *IndexTemplateSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aliases": + if s.Aliases == nil { + s.Aliases = make(map[string]Alias, 0) + } + if err := dec.Decode(&s.Aliases); err != nil { + return fmt.Errorf("%s | %w", "Aliases", err) + } + + case "data_stream_options": + if err := dec.Decode(&s.DataStreamOptions); err != nil { + return fmt.Errorf("%s | %w", "DataStreamOptions", err) + } + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return fmt.Errorf("%s | %w", "Lifecycle", err) + } + + case "mappings": + if err := dec.Decode(&s.Mappings); err != nil { + return fmt.Errorf("%s | %w", "Mappings", err) + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + } + } + return nil +} + +// NewIndexTemplateSummary returns a IndexTemplateSummary. +func NewIndexTemplateSummary() *IndexTemplateSummary { + r := &IndexTemplateSummary{ + Aliases: make(map[string]Alias), + } + + return r +} + +type IndexTemplateSummaryVariant interface { + IndexTemplateSummaryCaster() *IndexTemplateSummary +} + +func (s *IndexTemplateSummary) IndexTemplateSummaryCaster() *IndexTemplateSummary { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexversioning.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexversioning.go new file mode 100644 index 000000000..c29e17d10 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indexversioning.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndexVersioning type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L292-L295 +type IndexVersioning struct { + Created *string `json:"created,omitempty"` + CreatedString *string `json:"created_string,omitempty"` +} + +func (s *IndexVersioning) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "created": + if err := dec.Decode(&s.Created); err != nil { + return fmt.Errorf("%s | %w", "Created", err) + } + + case "created_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CreatedString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreatedString = &o + + } + } + return nil +} + +// NewIndexVersioning returns a IndexVersioning. +func NewIndexVersioning() *IndexVersioning { + r := &IndexVersioning{} + + return r +} + +type IndexVersioningVariant interface { + IndexVersioningCaster() *IndexVersioning +} + +func (s *IndexVersioning) IndexVersioningCaster() *IndexVersioning { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicatornode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicatornode.go new file mode 100644 index 000000000..5999fea07 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicatornode.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndicatorNode type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L93-L96 +type IndicatorNode struct { + Name *string `json:"name,omitempty"` + NodeId *string `json:"node_id,omitempty"` +} + +func (s *IndicatorNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "node_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeId = &o + + } + } + return nil +} + +// NewIndicatorNode returns a IndicatorNode. +func NewIndicatorNode() *IndicatorNode { + r := &IndicatorNode{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicators.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicators.go new file mode 100644 index 000000000..fc5f5fcc5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicators.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Indicators type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L33-L43 +type Indicators struct { + DataStreamLifecycle *DataStreamLifecycleIndicator `json:"data_stream_lifecycle,omitempty"` + Disk *DiskIndicator `json:"disk,omitempty"` + FileSettings *FileSettingsIndicator `json:"file_settings,omitempty"` + Ilm *IlmIndicator `json:"ilm,omitempty"` + MasterIsStable *MasterIsStableIndicator `json:"master_is_stable,omitempty"` + RepositoryIntegrity *RepositoryIntegrityIndicator `json:"repository_integrity,omitempty"` + ShardsAvailability *ShardsAvailabilityIndicator `json:"shards_availability,omitempty"` + ShardsCapacity *ShardsCapacityIndicator `json:"shards_capacity,omitempty"` + Slm *SlmIndicator `json:"slm,omitempty"` +} + +// NewIndicators returns a Indicators. +func NewIndicators() *Indicators { + r := &Indicators{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indices.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indices.go new file mode 100644 index 000000000..60dd6d50b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indices.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Indices type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L61-L61 +type Indices []string + +type IndicesVariant interface { + IndicesCaster() *Indices +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesaction.go new file mode 100644 index 000000000..57c10506e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesaction.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// IndicesAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/update_aliases/types.ts#L23-L39 +type IndicesAction struct { + // Add Adds a data stream or index to an alias. + // If the alias doesn’t exist, the `add` action creates it. + Add *AddAction `json:"add,omitempty"` + AdditionalIndicesActionProperty map[string]json.RawMessage `json:"-"` + // Remove Removes a data stream or index from an alias. + Remove *RemoveAction `json:"remove,omitempty"` + // RemoveIndex Deletes an index. + // You cannot use this action on aliases or data streams. + RemoveIndex *RemoveIndexAction `json:"remove_index,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s IndicesAction) MarshalJSON() ([]byte, error) { + type opt IndicesAction + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIndicesActionProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIndicesActionProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewIndicesAction returns a IndicesAction. +func NewIndicesAction() *IndicesAction { + r := &IndicesAction{ + AdditionalIndicesActionProperty: make(map[string]json.RawMessage), + } + + return r +} + +type IndicesActionVariant interface { + IndicesActionCaster() *IndicesAction +} + +func (s *IndicesAction) IndicesActionCaster() *IndicesAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesindexingpressure.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesindexingpressure.go new file mode 100644 index 000000000..7e175cada --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesindexingpressure.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IndicesIndexingPressure type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L590-L592 +type IndicesIndexingPressure struct { + Memory IndicesIndexingPressureMemory `json:"memory"` +} + +// NewIndicesIndexingPressure returns a IndicesIndexingPressure. +func NewIndicesIndexingPressure() *IndicesIndexingPressure { + r := &IndicesIndexingPressure{} + + return r +} + +type IndicesIndexingPressureVariant interface { + IndicesIndexingPressureCaster() *IndicesIndexingPressure +} + +func (s *IndicesIndexingPressure) IndicesIndexingPressureCaster() *IndicesIndexingPressure { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesindexingpressurememory.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesindexingpressurememory.go new file mode 100644 index 000000000..c79c7be91 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesindexingpressurememory.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndicesIndexingPressureMemory type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L594-L601 +type IndicesIndexingPressureMemory struct { + // Limit Number of outstanding bytes that may be consumed by indexing requests. When + // this limit is reached or exceeded, + // the node will reject new coordinating and primary operations. When replica + // operations consume 1.5x this limit, + // the node will reject new replica operations. Defaults to 10% of the heap. + Limit *int `json:"limit,omitempty"` +} + +func (s *IndicesIndexingPressureMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "limit": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Limit", err) + } + s.Limit = &value + case float64: + f := int(v) + s.Limit = &f + } + + } + } + return nil +} + +// NewIndicesIndexingPressureMemory returns a IndicesIndexingPressureMemory. +func NewIndicesIndexingPressureMemory() *IndicesIndexingPressureMemory { + r := &IndicesIndexingPressureMemory{} + + return r +} + +type IndicesIndexingPressureMemoryVariant interface { + IndicesIndexingPressureMemoryCaster() *IndicesIndexingPressureMemory +} + +func (s *IndicesIndexingPressureMemory) IndicesIndexingPressureMemoryCaster() *IndicesIndexingPressureMemory { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesmodifyaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesmodifyaction.go new file mode 100644 index 000000000..a804a2f78 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesmodifyaction.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// IndicesModifyAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/modify_data_stream/types.ts#L22-L37 +type IndicesModifyAction struct { + // AddBackingIndex Adds an existing index as a backing index for a data stream. + // The index is hidden as part of this operation. + // WARNING: Adding indices with the `add_backing_index` action can potentially + // result in improper data stream behavior. + // This should be considered an expert level API. + AddBackingIndex *IndexAndDataStreamAction `json:"add_backing_index,omitempty"` + AdditionalIndicesModifyActionProperty map[string]json.RawMessage `json:"-"` + // RemoveBackingIndex Removes a backing index from a data stream. + // The index is unhidden as part of this operation. + // A data stream’s write index cannot be removed. + RemoveBackingIndex *IndexAndDataStreamAction `json:"remove_backing_index,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s IndicesModifyAction) MarshalJSON() ([]byte, error) { + type opt IndicesModifyAction + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIndicesModifyActionProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIndicesModifyActionProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewIndicesModifyAction returns a IndicesModifyAction. +func NewIndicesModifyAction() *IndicesModifyAction { + r := &IndicesModifyAction{ + AdditionalIndicesModifyActionProperty: make(map[string]json.RawMessage), + } + + return r +} + +type IndicesModifyActionVariant interface { + IndicesModifyActionCaster() *IndicesModifyAction +} + +func (s *IndicesModifyAction) IndicesModifyActionCaster() *IndicesModifyAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesoptions.go new file mode 100644 index 000000000..b8c142ac8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesoptions.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" +) + +// IndicesOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L334-L361 +type IndicesOptions struct { + // AllowNoIndices If false, the request returns an error if any wildcard expression, index + // alias, or `_all` value targets only + // missing or closed indices. This behavior applies even if the request targets + // other open indices. For example, + // a request targeting `foo*,bar*` returns an error if an index starts with + // `foo` but no index starts with `bar`. + AllowNoIndices *bool `json:"allow_no_indices,omitempty"` + // ExpandWildcards Type of index that wildcard patterns can match. If the request can target + // data streams, this argument + // determines whether wildcard expressions match hidden data streams. Supports + // comma-separated values, + // such as `open,hidden`. + ExpandWildcards []expandwildcard.ExpandWildcard `json:"expand_wildcards,omitempty"` + // IgnoreThrottled If true, concrete, expanded or aliased indices are ignored when frozen. + IgnoreThrottled *bool `json:"ignore_throttled,omitempty"` + // IgnoreUnavailable If true, missing or closed indices are not included in the response. + IgnoreUnavailable *bool `json:"ignore_unavailable,omitempty"` +} + +func (s *IndicesOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_no_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowNoIndices", err) + } + s.AllowNoIndices = &value + case bool: + s.AllowNoIndices = &v + } + + case "expand_wildcards": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := &expandwildcard.ExpandWildcard{} + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ExpandWildcards", err) + } + + s.ExpandWildcards = append(s.ExpandWildcards, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.ExpandWildcards); err != nil { + return fmt.Errorf("%s | %w", "ExpandWildcards", err) + } + } + + case "ignore_throttled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreThrottled", err) + } + s.IgnoreThrottled = &value + case bool: + s.IgnoreThrottled = &v + } + + case "ignore_unavailable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnavailable", err) + } + s.IgnoreUnavailable = &value + case bool: + s.IgnoreUnavailable = &v + } + + } + } + return nil +} + +// NewIndicesOptions returns a IndicesOptions. +func NewIndicesOptions() *IndicesOptions { + r := &IndicesOptions{} + + return r +} + +type IndicesOptionsVariant interface { + IndicesOptionsCaster() *IndicesOptions +} + +func (s *IndicesOptions) IndicesOptionsCaster() *IndicesOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesprivileges.go new file mode 100644 index 000000000..cf82867ca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesprivileges.go @@ -0,0 +1,177 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexprivilege" +) + +// IndicesPrivileges type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L218-L244 +type IndicesPrivileges struct { + // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that + // cover restricted indices. Implicitly, restricted indices have limited + // privileges that can cause pattern tests to fail. If restricted indices are + // explicitly included in the `names` list, Elasticsearch checks privileges + // against these indices regardless of the value set for + // `allow_restricted_indices`. + AllowRestrictedIndices *bool `json:"allow_restricted_indices,omitempty"` + // FieldSecurity The document fields that the owners of the role have read access to. + FieldSecurity *FieldSecurity `json:"field_security,omitempty"` + // Names A list of indices (or index name patterns) to which the permissions in this + // entry apply. + Names []string `json:"names"` + // Privileges The index level privileges that owners of the role have on the specified + // indices. + Privileges []indexprivilege.IndexPrivilege `json:"privileges"` + // Query A search query that defines the documents the owners of the role have access + // to. A document within the specified indices must match this query for it to + // be accessible by the owners of the role. + Query IndicesPrivilegesQuery `json:"query,omitempty"` +} + +func (s *IndicesPrivileges) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowRestrictedIndices", err) + } + s.AllowRestrictedIndices = &value + case bool: + s.AllowRestrictedIndices = &v + } + + case "field_security": + if err := dec.Decode(&s.FieldSecurity); err != nil { + return fmt.Errorf("%s | %w", "FieldSecurity", err) + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + } + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return fmt.Errorf("%s | %w", "Privileges", err) + } + + case "query": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + query_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Query", err) + } + + switch t { + + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + o := NewQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = o + break query_field + + case "template": + o := NewRoleTemplateQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = o + break query_field + + } + } + if s.Query == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + } + + } + } + return nil +} + +// NewIndicesPrivileges returns a IndicesPrivileges. +func NewIndicesPrivileges() *IndicesPrivileges { + r := &IndicesPrivileges{} + + return r +} + +type IndicesPrivilegesVariant interface { + IndicesPrivilegesCaster() *IndicesPrivileges +} + +func (s *IndicesPrivileges) IndicesPrivilegesCaster() *IndicesPrivileges { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesprivilegesquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesprivilegesquery.go new file mode 100644 index 000000000..5f663bb8e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesprivilegesquery.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IndicesPrivilegesQuery holds the union for the following types: +// +// string +// Query +// RoleTemplateQuery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L343-L351 +type IndicesPrivilegesQuery any + +type IndicesPrivilegesQueryVariant interface { + IndicesPrivilegesQueryCaster() *IndicesPrivilegesQuery +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesrecord.go new file mode 100644 index 000000000..8e9fcfab1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesrecord.go @@ -0,0 +1,2053 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndicesRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/indices/types.ts#L20-L808 +type IndicesRecord struct { + // BulkAvgSizeInBytes average size in bytes of shard bulk + BulkAvgSizeInBytes *string `json:"bulk.avg_size_in_bytes,omitempty"` + // BulkAvgTime average time spend in shard bulk + BulkAvgTime *string `json:"bulk.avg_time,omitempty"` + // BulkTotalOperations number of bulk shard ops + BulkTotalOperations *string `json:"bulk.total_operations,omitempty"` + // BulkTotalSizeInBytes total size in bytes of shard bulk + BulkTotalSizeInBytes *string `json:"bulk.total_size_in_bytes,omitempty"` + // BulkTotalTime time spend in shard bulk + BulkTotalTime *string `json:"bulk.total_time,omitempty"` + // CompletionSize size of completion + CompletionSize *string `json:"completion.size,omitempty"` + // CreationDate index creation date (millisecond value) + CreationDate *string `json:"creation.date,omitempty"` + // CreationDateString index creation date (as string) + CreationDateString *string `json:"creation.date.string,omitempty"` + // DatasetSize total size of dataset (including the cache for partially mounted indices) + DatasetSize *string `json:"dataset.size,omitempty"` + // DocsCount available docs + DocsCount *string `json:"docs.count,omitempty"` + // DocsDeleted deleted docs + DocsDeleted *string `json:"docs.deleted,omitempty"` + // FielddataEvictions fielddata evictions + FielddataEvictions *string `json:"fielddata.evictions,omitempty"` + // FielddataMemorySize used fielddata cache + FielddataMemorySize *string `json:"fielddata.memory_size,omitempty"` + // FlushTotal number of flushes + FlushTotal *string `json:"flush.total,omitempty"` + // FlushTotalTime time spent in flush + FlushTotalTime *string `json:"flush.total_time,omitempty"` + // GetCurrent number of current get ops + GetCurrent *string `json:"get.current,omitempty"` + // GetExistsTime time spent in successful gets + GetExistsTime *string `json:"get.exists_time,omitempty"` + // GetExistsTotal number of successful gets + GetExistsTotal *string `json:"get.exists_total,omitempty"` + // GetMissingTime time spent in failed gets + GetMissingTime *string `json:"get.missing_time,omitempty"` + // GetMissingTotal number of failed gets + GetMissingTotal *string `json:"get.missing_total,omitempty"` + // GetTime time spent in get + GetTime *string `json:"get.time,omitempty"` + // GetTotal number of get ops + GetTotal *string `json:"get.total,omitempty"` + // Health current health status + Health *string `json:"health,omitempty"` + // Index index name + Index *string `json:"index,omitempty"` + // IndexingDeleteCurrent number of current deletions + IndexingDeleteCurrent *string `json:"indexing.delete_current,omitempty"` + // IndexingDeleteTime time spent in deletions + IndexingDeleteTime *string `json:"indexing.delete_time,omitempty"` + // IndexingDeleteTotal number of delete ops + IndexingDeleteTotal *string `json:"indexing.delete_total,omitempty"` + // IndexingIndexCurrent number of current indexing ops + IndexingIndexCurrent *string `json:"indexing.index_current,omitempty"` + // IndexingIndexFailed number of failed indexing ops + IndexingIndexFailed *string `json:"indexing.index_failed,omitempty"` + // IndexingIndexTime time spent in indexing + IndexingIndexTime *string `json:"indexing.index_time,omitempty"` + // IndexingIndexTotal number of indexing ops + IndexingIndexTotal *string `json:"indexing.index_total,omitempty"` + // MemoryTotal total used memory + MemoryTotal *string `json:"memory.total,omitempty"` + // MergesCurrent number of current merges + MergesCurrent *string `json:"merges.current,omitempty"` + // MergesCurrentDocs number of current merging docs + MergesCurrentDocs *string `json:"merges.current_docs,omitempty"` + // MergesCurrentSize size of current merges + MergesCurrentSize *string `json:"merges.current_size,omitempty"` + // MergesTotal number of completed merge ops + MergesTotal *string `json:"merges.total,omitempty"` + // MergesTotalDocs docs merged + MergesTotalDocs *string `json:"merges.total_docs,omitempty"` + // MergesTotalSize size merged + MergesTotalSize *string `json:"merges.total_size,omitempty"` + // MergesTotalTime time spent in merges + MergesTotalTime *string `json:"merges.total_time,omitempty"` + // Pri number of primary shards + Pri *string `json:"pri,omitempty"` + // PriBulkAvgSizeInBytes average size in bytes of shard bulk + PriBulkAvgSizeInBytes *string `json:"pri.bulk.avg_size_in_bytes,omitempty"` + // PriBulkAvgTime average time spend in shard bulk + PriBulkAvgTime *string `json:"pri.bulk.avg_time,omitempty"` + // PriBulkTotalOperations number of bulk shard ops + PriBulkTotalOperations *string `json:"pri.bulk.total_operations,omitempty"` + // PriBulkTotalSizeInBytes total size in bytes of shard bulk + PriBulkTotalSizeInBytes *string `json:"pri.bulk.total_size_in_bytes,omitempty"` + // PriBulkTotalTime time spend in shard bulk + PriBulkTotalTime *string `json:"pri.bulk.total_time,omitempty"` + // PriCompletionSize size of completion + PriCompletionSize *string `json:"pri.completion.size,omitempty"` + // PriFielddataEvictions fielddata evictions + PriFielddataEvictions *string `json:"pri.fielddata.evictions,omitempty"` + // PriFielddataMemorySize used fielddata cache + PriFielddataMemorySize *string `json:"pri.fielddata.memory_size,omitempty"` + // PriFlushTotal number of flushes + PriFlushTotal *string `json:"pri.flush.total,omitempty"` + // PriFlushTotalTime time spent in flush + PriFlushTotalTime *string `json:"pri.flush.total_time,omitempty"` + // PriGetCurrent number of current get ops + PriGetCurrent *string `json:"pri.get.current,omitempty"` + // PriGetExistsTime time spent in successful gets + PriGetExistsTime *string `json:"pri.get.exists_time,omitempty"` + // PriGetExistsTotal number of successful gets + PriGetExistsTotal *string `json:"pri.get.exists_total,omitempty"` + // PriGetMissingTime time spent in failed gets + PriGetMissingTime *string `json:"pri.get.missing_time,omitempty"` + // PriGetMissingTotal number of failed gets + PriGetMissingTotal *string `json:"pri.get.missing_total,omitempty"` + // PriGetTime time spent in get + PriGetTime *string `json:"pri.get.time,omitempty"` + // PriGetTotal number of get ops + PriGetTotal *string `json:"pri.get.total,omitempty"` + // PriIndexingDeleteCurrent number of current deletions + PriIndexingDeleteCurrent *string `json:"pri.indexing.delete_current,omitempty"` + // PriIndexingDeleteTime time spent in deletions + PriIndexingDeleteTime *string `json:"pri.indexing.delete_time,omitempty"` + // PriIndexingDeleteTotal number of delete ops + PriIndexingDeleteTotal *string `json:"pri.indexing.delete_total,omitempty"` + // PriIndexingIndexCurrent number of current indexing ops + PriIndexingIndexCurrent *string `json:"pri.indexing.index_current,omitempty"` + // PriIndexingIndexFailed number of failed indexing ops + PriIndexingIndexFailed *string `json:"pri.indexing.index_failed,omitempty"` + // PriIndexingIndexTime time spent in indexing + PriIndexingIndexTime *string `json:"pri.indexing.index_time,omitempty"` + // PriIndexingIndexTotal number of indexing ops + PriIndexingIndexTotal *string `json:"pri.indexing.index_total,omitempty"` + // PriMemoryTotal total user memory + PriMemoryTotal *string `json:"pri.memory.total,omitempty"` + // PriMergesCurrent number of current merges + PriMergesCurrent *string `json:"pri.merges.current,omitempty"` + // PriMergesCurrentDocs number of current merging docs + PriMergesCurrentDocs *string `json:"pri.merges.current_docs,omitempty"` + // PriMergesCurrentSize size of current merges + PriMergesCurrentSize *string `json:"pri.merges.current_size,omitempty"` + // PriMergesTotal number of completed merge ops + PriMergesTotal *string `json:"pri.merges.total,omitempty"` + // PriMergesTotalDocs docs merged + PriMergesTotalDocs *string `json:"pri.merges.total_docs,omitempty"` + // PriMergesTotalSize size merged + PriMergesTotalSize *string `json:"pri.merges.total_size,omitempty"` + // PriMergesTotalTime time spent in merges + PriMergesTotalTime *string `json:"pri.merges.total_time,omitempty"` + // PriQueryCacheEvictions query cache evictions + PriQueryCacheEvictions *string `json:"pri.query_cache.evictions,omitempty"` + // PriQueryCacheMemorySize used query cache + PriQueryCacheMemorySize *string `json:"pri.query_cache.memory_size,omitempty"` + // PriRefreshExternalTime time spent in external refreshes + PriRefreshExternalTime *string `json:"pri.refresh.external_time,omitempty"` + // PriRefreshExternalTotal total external refreshes + PriRefreshExternalTotal *string `json:"pri.refresh.external_total,omitempty"` + // PriRefreshListeners number of pending refresh listeners + PriRefreshListeners *string `json:"pri.refresh.listeners,omitempty"` + // PriRefreshTime time spent in refreshes + PriRefreshTime *string `json:"pri.refresh.time,omitempty"` + // PriRefreshTotal total refreshes + PriRefreshTotal *string `json:"pri.refresh.total,omitempty"` + // PriRequestCacheEvictions request cache evictions + PriRequestCacheEvictions *string `json:"pri.request_cache.evictions,omitempty"` + // PriRequestCacheHitCount request cache hit count + PriRequestCacheHitCount *string `json:"pri.request_cache.hit_count,omitempty"` + // PriRequestCacheMemorySize used request cache + PriRequestCacheMemorySize *string `json:"pri.request_cache.memory_size,omitempty"` + // PriRequestCacheMissCount request cache miss count + PriRequestCacheMissCount *string `json:"pri.request_cache.miss_count,omitempty"` + // PriSearchFetchCurrent current fetch phase ops + PriSearchFetchCurrent *string `json:"pri.search.fetch_current,omitempty"` + // PriSearchFetchTime time spent in fetch phase + PriSearchFetchTime *string `json:"pri.search.fetch_time,omitempty"` + // PriSearchFetchTotal total fetch ops + PriSearchFetchTotal *string `json:"pri.search.fetch_total,omitempty"` + // PriSearchOpenContexts open search contexts + PriSearchOpenContexts *string `json:"pri.search.open_contexts,omitempty"` + // PriSearchQueryCurrent current query phase ops + PriSearchQueryCurrent *string `json:"pri.search.query_current,omitempty"` + // PriSearchQueryTime time spent in query phase + PriSearchQueryTime *string `json:"pri.search.query_time,omitempty"` + // PriSearchQueryTotal total query phase ops + PriSearchQueryTotal *string `json:"pri.search.query_total,omitempty"` + // PriSearchScrollCurrent open scroll contexts + PriSearchScrollCurrent *string `json:"pri.search.scroll_current,omitempty"` + // PriSearchScrollTime time scroll contexts held open + PriSearchScrollTime *string `json:"pri.search.scroll_time,omitempty"` + // PriSearchScrollTotal completed scroll contexts + PriSearchScrollTotal *string `json:"pri.search.scroll_total,omitempty"` + // PriSegmentsCount number of segments + PriSegmentsCount *string `json:"pri.segments.count,omitempty"` + // PriSegmentsFixedBitsetMemory memory used by fixed bit sets for nested object field types and export type + // filters for types referred in _parent fields + PriSegmentsFixedBitsetMemory *string `json:"pri.segments.fixed_bitset_memory,omitempty"` + // PriSegmentsIndexWriterMemory memory used by index writer + PriSegmentsIndexWriterMemory *string `json:"pri.segments.index_writer_memory,omitempty"` + // PriSegmentsMemory memory used by segments + PriSegmentsMemory *string `json:"pri.segments.memory,omitempty"` + // PriSegmentsVersionMapMemory memory used by version map + PriSegmentsVersionMapMemory *string `json:"pri.segments.version_map_memory,omitempty"` + // PriStoreSize store size of primaries + PriStoreSize *string `json:"pri.store.size,omitempty"` + // PriSuggestCurrent number of current suggest ops + PriSuggestCurrent *string `json:"pri.suggest.current,omitempty"` + // PriSuggestTime time spend in suggest + PriSuggestTime *string `json:"pri.suggest.time,omitempty"` + // PriSuggestTotal number of suggest ops + PriSuggestTotal *string `json:"pri.suggest.total,omitempty"` + // PriWarmerCurrent current warmer ops + PriWarmerCurrent *string `json:"pri.warmer.current,omitempty"` + // PriWarmerTotal total warmer ops + PriWarmerTotal *string `json:"pri.warmer.total,omitempty"` + // PriWarmerTotalTime time spent in warmers + PriWarmerTotalTime *string `json:"pri.warmer.total_time,omitempty"` + // QueryCacheEvictions query cache evictions + QueryCacheEvictions *string `json:"query_cache.evictions,omitempty"` + // QueryCacheMemorySize used query cache + QueryCacheMemorySize *string `json:"query_cache.memory_size,omitempty"` + // RefreshExternalTime time spent in external refreshes + RefreshExternalTime *string `json:"refresh.external_time,omitempty"` + // RefreshExternalTotal total external refreshes + RefreshExternalTotal *string `json:"refresh.external_total,omitempty"` + // RefreshListeners number of pending refresh listeners + RefreshListeners *string `json:"refresh.listeners,omitempty"` + // RefreshTime time spent in refreshes + RefreshTime *string `json:"refresh.time,omitempty"` + // RefreshTotal total refreshes + RefreshTotal *string `json:"refresh.total,omitempty"` + // Rep number of replica shards + Rep *string `json:"rep,omitempty"` + // RequestCacheEvictions request cache evictions + RequestCacheEvictions *string `json:"request_cache.evictions,omitempty"` + // RequestCacheHitCount request cache hit count + RequestCacheHitCount *string `json:"request_cache.hit_count,omitempty"` + // RequestCacheMemorySize used request cache + RequestCacheMemorySize *string `json:"request_cache.memory_size,omitempty"` + // RequestCacheMissCount request cache miss count + RequestCacheMissCount *string `json:"request_cache.miss_count,omitempty"` + // SearchFetchCurrent current fetch phase ops + SearchFetchCurrent *string `json:"search.fetch_current,omitempty"` + // SearchFetchTime time spent in fetch phase + SearchFetchTime *string `json:"search.fetch_time,omitempty"` + // SearchFetchTotal total fetch ops + SearchFetchTotal *string `json:"search.fetch_total,omitempty"` + // SearchOpenContexts open search contexts + SearchOpenContexts *string `json:"search.open_contexts,omitempty"` + // SearchQueryCurrent current query phase ops + SearchQueryCurrent *string `json:"search.query_current,omitempty"` + // SearchQueryTime time spent in query phase + SearchQueryTime *string `json:"search.query_time,omitempty"` + // SearchQueryTotal total query phase ops + SearchQueryTotal *string `json:"search.query_total,omitempty"` + // SearchScrollCurrent open scroll contexts + SearchScrollCurrent *string `json:"search.scroll_current,omitempty"` + // SearchScrollTime time scroll contexts held open + SearchScrollTime *string `json:"search.scroll_time,omitempty"` + // SearchScrollTotal completed scroll contexts + SearchScrollTotal *string `json:"search.scroll_total,omitempty"` + // SearchThrottled indicates if the index is search throttled + SearchThrottled *string `json:"search.throttled,omitempty"` + // SegmentsCount number of segments + SegmentsCount *string `json:"segments.count,omitempty"` + // SegmentsFixedBitsetMemory memory used by fixed bit sets for nested object field types and export type + // filters for types referred in _parent fields + SegmentsFixedBitsetMemory *string `json:"segments.fixed_bitset_memory,omitempty"` + // SegmentsIndexWriterMemory memory used by index writer + SegmentsIndexWriterMemory *string `json:"segments.index_writer_memory,omitempty"` + // SegmentsMemory memory used by segments + SegmentsMemory *string `json:"segments.memory,omitempty"` + // SegmentsVersionMapMemory memory used by version map + SegmentsVersionMapMemory *string `json:"segments.version_map_memory,omitempty"` + // Status open/close status + Status *string `json:"status,omitempty"` + // StoreSize store size of primaries & replicas + StoreSize *string `json:"store.size,omitempty"` + // SuggestCurrent number of current suggest ops + SuggestCurrent *string `json:"suggest.current,omitempty"` + // SuggestTime time spend in suggest + SuggestTime *string `json:"suggest.time,omitempty"` + // SuggestTotal number of suggest ops + SuggestTotal *string `json:"suggest.total,omitempty"` + // Uuid index uuid + Uuid *string `json:"uuid,omitempty"` + // WarmerCurrent current warmer ops + WarmerCurrent *string `json:"warmer.current,omitempty"` + // WarmerTotal total warmer ops + WarmerTotal *string `json:"warmer.total,omitempty"` + // WarmerTotalTime time spent in warmers + WarmerTotalTime *string `json:"warmer.total_time,omitempty"` +} + +func (s *IndicesRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bulk.avg_size_in_bytes", "basi", "bulkAvgSizeInBytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkAvgSizeInBytes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkAvgSizeInBytes = &o + + case "bulk.avg_time", "bati", "bulkAvgTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkAvgTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkAvgTime = &o + + case "bulk.total_operations", "bto", "bulkTotalOperation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkTotalOperations", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalOperations = &o + + case "bulk.total_size_in_bytes", "btsi", "bulkTotalSizeInBytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkTotalSizeInBytes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalSizeInBytes = &o + + case "bulk.total_time", "btti", "bulkTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalTime = &o + + case "completion.size", "cs", "completionSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CompletionSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CompletionSize = &o + + case "creation.date", "cd": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CreationDate", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreationDate = &o + + case "creation.date.string", "cds": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CreationDateString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreationDateString = &o + + case "dataset.size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DatasetSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DatasetSize = &o + + case "docs.count", "dc", "docsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DocsCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocsCount = &o + + case "docs.deleted", "dd", "docsDeleted": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DocsDeleted", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocsDeleted = &o + + case "fielddata.evictions", "fe", "fielddataEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FielddataEvictions", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FielddataEvictions = &o + + case "fielddata.memory_size", "fm", "fielddataMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FielddataMemorySize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FielddataMemorySize = &o + + case "flush.total", "ft", "flushTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FlushTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FlushTotal = &o + + case "flush.total_time", "ftt", "flushTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FlushTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FlushTotalTime = &o + + case "get.current", "gc", "getCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetCurrent = &o + + case "get.exists_time", "geti", "getExistsTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetExistsTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetExistsTime = &o + + case "get.exists_total", "geto", "getExistsTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetExistsTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetExistsTotal = &o + + case "get.missing_time", "gmti", "getMissingTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetMissingTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetMissingTime = &o + + case "get.missing_total", "gmto", "getMissingTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetMissingTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetMissingTotal = &o + + case "get.time", "gti", "getTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetTime = &o + + case "get.total", "gto", "getTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetTotal = &o + + case "health", "h": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Health", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Health = &o + + case "index", "i", "idx": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Index = &o + + case "indexing.delete_current", "idc", "indexingDeleteCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingDeleteCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteCurrent = &o + + case "indexing.delete_time", "idti", "indexingDeleteTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingDeleteTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteTime = &o + + case "indexing.delete_total", "idto", "indexingDeleteTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingDeleteTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteTotal = &o + + case "indexing.index_current", "iic", "indexingIndexCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingIndexCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexCurrent = &o + + case "indexing.index_failed", "iif", "indexingIndexFailed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingIndexFailed", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexFailed = &o + + case "indexing.index_time", "iiti", "indexingIndexTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingIndexTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexTime = &o + + case "indexing.index_total", "iito", "indexingIndexTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingIndexTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexTotal = &o + + case "memory.total", "tm", "memoryTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MemoryTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MemoryTotal = &o + + case "merges.current", "mc", "mergesCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrent = &o + + case "merges.current_docs", "mcd", "mergesCurrentDocs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesCurrentDocs", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrentDocs = &o + + case "merges.current_size", "mcs", "mergesCurrentSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesCurrentSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrentSize = &o + + case "merges.total", "mt", "mergesTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotal = &o + + case "merges.total_docs", "mtd", "mergesTotalDocs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesTotalDocs", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalDocs = &o + + case "merges.total_size", "mts", "mergesTotalSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesTotalSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalSize = &o + + case "merges.total_time", "mtt", "mergesTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalTime = &o + + case "pri", "p", "shards.primary", "shardsPrimary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pri", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pri = &o + + case "pri.bulk.avg_size_in_bytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriBulkAvgSizeInBytes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriBulkAvgSizeInBytes = &o + + case "pri.bulk.avg_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriBulkAvgTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriBulkAvgTime = &o + + case "pri.bulk.total_operations": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriBulkTotalOperations", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriBulkTotalOperations = &o + + case "pri.bulk.total_size_in_bytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriBulkTotalSizeInBytes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriBulkTotalSizeInBytes = &o + + case "pri.bulk.total_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriBulkTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriBulkTotalTime = &o + + case "pri.completion.size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriCompletionSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriCompletionSize = &o + + case "pri.fielddata.evictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriFielddataEvictions", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriFielddataEvictions = &o + + case "pri.fielddata.memory_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriFielddataMemorySize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriFielddataMemorySize = &o + + case "pri.flush.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriFlushTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriFlushTotal = &o + + case "pri.flush.total_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriFlushTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriFlushTotalTime = &o + + case "pri.get.current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriGetCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriGetCurrent = &o + + case "pri.get.exists_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriGetExistsTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriGetExistsTime = &o + + case "pri.get.exists_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriGetExistsTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriGetExistsTotal = &o + + case "pri.get.missing_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriGetMissingTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriGetMissingTime = &o + + case "pri.get.missing_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriGetMissingTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriGetMissingTotal = &o + + case "pri.get.time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriGetTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriGetTime = &o + + case "pri.get.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriGetTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriGetTotal = &o + + case "pri.indexing.delete_current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriIndexingDeleteCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriIndexingDeleteCurrent = &o + + case "pri.indexing.delete_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriIndexingDeleteTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriIndexingDeleteTime = &o + + case "pri.indexing.delete_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriIndexingDeleteTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriIndexingDeleteTotal = &o + + case "pri.indexing.index_current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriIndexingIndexCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriIndexingIndexCurrent = &o + + case "pri.indexing.index_failed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriIndexingIndexFailed", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriIndexingIndexFailed = &o + + case "pri.indexing.index_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriIndexingIndexTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriIndexingIndexTime = &o + + case "pri.indexing.index_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriIndexingIndexTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriIndexingIndexTotal = &o + + case "pri.memory.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriMemoryTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMemoryTotal = &o + + case "pri.merges.current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriMergesCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMergesCurrent = &o + + case "pri.merges.current_docs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriMergesCurrentDocs", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMergesCurrentDocs = &o + + case "pri.merges.current_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriMergesCurrentSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMergesCurrentSize = &o + + case "pri.merges.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriMergesTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMergesTotal = &o + + case "pri.merges.total_docs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriMergesTotalDocs", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMergesTotalDocs = &o + + case "pri.merges.total_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriMergesTotalSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMergesTotalSize = &o + + case "pri.merges.total_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriMergesTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMergesTotalTime = &o + + case "pri.query_cache.evictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriQueryCacheEvictions", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriQueryCacheEvictions = &o + + case "pri.query_cache.memory_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriQueryCacheMemorySize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriQueryCacheMemorySize = &o + + case "pri.refresh.external_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriRefreshExternalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRefreshExternalTime = &o + + case "pri.refresh.external_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriRefreshExternalTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRefreshExternalTotal = &o + + case "pri.refresh.listeners": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriRefreshListeners", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRefreshListeners = &o + + case "pri.refresh.time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriRefreshTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRefreshTime = &o + + case "pri.refresh.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriRefreshTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRefreshTotal = &o + + case "pri.request_cache.evictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriRequestCacheEvictions", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRequestCacheEvictions = &o + + case "pri.request_cache.hit_count": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriRequestCacheHitCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRequestCacheHitCount = &o + + case "pri.request_cache.memory_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriRequestCacheMemorySize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRequestCacheMemorySize = &o + + case "pri.request_cache.miss_count": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriRequestCacheMissCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRequestCacheMissCount = &o + + case "pri.search.fetch_current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSearchFetchCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchFetchCurrent = &o + + case "pri.search.fetch_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSearchFetchTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchFetchTime = &o + + case "pri.search.fetch_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSearchFetchTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchFetchTotal = &o + + case "pri.search.open_contexts": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSearchOpenContexts", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchOpenContexts = &o + + case "pri.search.query_current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSearchQueryCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchQueryCurrent = &o + + case "pri.search.query_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSearchQueryTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchQueryTime = &o + + case "pri.search.query_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSearchQueryTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchQueryTotal = &o + + case "pri.search.scroll_current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSearchScrollCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchScrollCurrent = &o + + case "pri.search.scroll_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSearchScrollTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchScrollTime = &o + + case "pri.search.scroll_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSearchScrollTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchScrollTotal = &o + + case "pri.segments.count": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSegmentsCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSegmentsCount = &o + + case "pri.segments.fixed_bitset_memory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSegmentsFixedBitsetMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSegmentsFixedBitsetMemory = &o + + case "pri.segments.index_writer_memory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSegmentsIndexWriterMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSegmentsIndexWriterMemory = &o + + case "pri.segments.memory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSegmentsMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSegmentsMemory = &o + + case "pri.segments.version_map_memory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSegmentsVersionMapMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSegmentsVersionMapMemory = &o + + case "pri.store.size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriStoreSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriStoreSize = &o + + case "pri.suggest.current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSuggestCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSuggestCurrent = &o + + case "pri.suggest.time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSuggestTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSuggestTime = &o + + case "pri.suggest.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriSuggestTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSuggestTotal = &o + + case "pri.warmer.current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriWarmerCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriWarmerCurrent = &o + + case "pri.warmer.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriWarmerTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriWarmerTotal = &o + + case "pri.warmer.total_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PriWarmerTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriWarmerTotalTime = &o + + case "query_cache.evictions", "qce", "queryCacheEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryCacheEvictions", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheEvictions = &o + + case "query_cache.memory_size", "qcm", "queryCacheMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryCacheMemorySize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheMemorySize = &o + + case "refresh.external_time", "reti": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshExternalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshExternalTime = &o + + case "refresh.external_total", "reto": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshExternalTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshExternalTotal = &o + + case "refresh.listeners", "rli", "refreshListeners": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshListeners", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshListeners = &o + + case "refresh.time", "rti", "refreshTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshTime = &o + + case "refresh.total", "rto", "refreshTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshTotal = &o + + case "rep", "r", "shards.replica", "shardsReplica": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Rep", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Rep = &o + + case "request_cache.evictions", "rce", "requestCacheEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RequestCacheEvictions", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheEvictions = &o + + case "request_cache.hit_count", "rchc", "requestCacheHitCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RequestCacheHitCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheHitCount = &o + + case "request_cache.memory_size", "rcm", "requestCacheMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RequestCacheMemorySize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheMemorySize = &o + + case "request_cache.miss_count", "rcmc", "requestCacheMissCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RequestCacheMissCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheMissCount = &o + + case "search.fetch_current", "sfc", "searchFetchCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchFetchCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchCurrent = &o + + case "search.fetch_time", "sfti", "searchFetchTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchFetchTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchTime = &o + + case "search.fetch_total", "sfto", "searchFetchTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchFetchTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchTotal = &o + + case "search.open_contexts", "so", "searchOpenContexts": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchOpenContexts", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchOpenContexts = &o + + case "search.query_current", "sqc", "searchQueryCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchQueryCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryCurrent = &o + + case "search.query_time", "sqti", "searchQueryTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchQueryTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryTime = &o + + case "search.query_total", "sqto", "searchQueryTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchQueryTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryTotal = &o + + case "search.scroll_current", "scc", "searchScrollCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchScrollCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollCurrent = &o + + case "search.scroll_time", "scti", "searchScrollTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchScrollTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollTime = &o + + case "search.scroll_total", "scto", "searchScrollTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchScrollTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollTotal = &o + + case "search.throttled", "sth": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchThrottled", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchThrottled = &o + + case "segments.count", "sc", "segmentsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsCount = &o + + case "segments.fixed_bitset_memory", "sfbm", "fixedBitsetMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsFixedBitsetMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsFixedBitsetMemory = &o + + case "segments.index_writer_memory", "siwm", "segmentsIndexWriterMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsIndexWriterMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsIndexWriterMemory = &o + + case "segments.memory", "sm", "segmentsMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsMemory = &o + + case "segments.version_map_memory", "svmm", "segmentsVersionMapMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsVersionMapMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsVersionMapMemory = &o + + case "status", "s": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Status = &o + + case "store.size", "ss", "storeSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StoreSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StoreSize = &o + + case "suggest.current", "suc", "suggestCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SuggestCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SuggestCurrent = &o + + case "suggest.time", "suti", "suggestTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SuggestTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SuggestTime = &o + + case "suggest.total", "suto", "suggestTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SuggestTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SuggestTotal = &o + + case "uuid", "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Uuid", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Uuid = &o + + case "warmer.current", "wc", "warmerCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "WarmerCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WarmerCurrent = &o + + case "warmer.total", "wto", "warmerTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "WarmerTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WarmerTotal = &o + + case "warmer.total_time", "wtt", "warmerTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "WarmerTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WarmerTotalTime = &o + + } + } + return nil +} + +// NewIndicesRecord returns a IndicesRecord. +func NewIndicesRecord() *IndicesRecord { + r := &IndicesRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesshardsstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesshardsstats.go new file mode 100644 index 000000000..fa36b11b8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesshardsstats.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IndicesShardsStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L53-L56 +type IndicesShardsStats struct { + AllFields FieldSummary `json:"all_fields"` + Fields map[string]FieldSummary `json:"fields"` +} + +// NewIndicesShardsStats returns a IndicesShardsStats. +func NewIndicesShardsStats() *IndicesShardsStats { + r := &IndicesShardsStats{ + Fields: make(map[string]FieldSummary), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesshardstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesshardstats.go new file mode 100644 index 000000000..0725cd715 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesshardstats.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// IndicesShardStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L192-L223 +type IndicesShardStats struct { + Bulk *BulkStats `json:"bulk,omitempty"` + Commit *ShardCommit `json:"commit,omitempty"` + Completion *CompletionStats `json:"completion,omitempty"` + Docs *DocStats `json:"docs,omitempty"` + Fielddata *FielddataStats `json:"fielddata,omitempty"` + Flush *FlushStats `json:"flush,omitempty"` + Get *GetStats `json:"get,omitempty"` + Indexing *IndexingStats `json:"indexing,omitempty"` + Indices *IndicesStats `json:"indices,omitempty"` + Mappings *MappingStats `json:"mappings,omitempty"` + Merges *MergesStats `json:"merges,omitempty"` + QueryCache *ShardQueryCache `json:"query_cache,omitempty"` + Recovery *RecoveryStats `json:"recovery,omitempty"` + Refresh *RefreshStats `json:"refresh,omitempty"` + RequestCache *RequestCacheStats `json:"request_cache,omitempty"` + RetentionLeases *ShardRetentionLeases `json:"retention_leases,omitempty"` + Routing *ShardRouting `json:"routing,omitempty"` + Search *SearchStats `json:"search,omitempty"` + Segments *SegmentsStats `json:"segments,omitempty"` + SeqNo *ShardSequenceNumber `json:"seq_no,omitempty"` + ShardPath *ShardPath `json:"shard_path,omitempty"` + ShardStats *ShardsTotalStats `json:"shard_stats,omitempty"` + Shards map[string]json.RawMessage `json:"shards,omitempty"` + Store *StoreStats `json:"store,omitempty"` + Translog *TranslogStats `json:"translog,omitempty"` + Warmer *WarmerStats `json:"warmer,omitempty"` +} + +// NewIndicesShardStats returns a IndicesShardStats. +func NewIndicesShardStats() *IndicesShardStats { + r := &IndicesShardStats{ + Shards: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesshardstores.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesshardstores.go new file mode 100644 index 000000000..b49a10afb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesshardstores.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IndicesShardStores type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/shard_stores/types.ts#L25-L27 +type IndicesShardStores struct { + Shards map[string]ShardStoreWrapper `json:"shards"` +} + +// NewIndicesShardStores returns a IndicesShardStores. +func NewIndicesShardStores() *IndicesShardStores { + r := &IndicesShardStores{ + Shards: make(map[string]ShardStoreWrapper), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesstats.go new file mode 100644 index 000000000..dd65ebcce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesstats.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/healthstatus" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexmetadatastate" +) + +// IndicesStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L95-L110 +type IndicesStats struct { + Health *healthstatus.HealthStatus `json:"health,omitempty"` + Primaries *IndexStats `json:"primaries,omitempty"` + Shards map[string][]IndicesShardStats `json:"shards,omitempty"` + Status *indexmetadatastate.IndexMetadataState `json:"status,omitempty"` + Total *IndexStats `json:"total,omitempty"` + Uuid *string `json:"uuid,omitempty"` +} + +func (s *IndicesStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "health": + if err := dec.Decode(&s.Health); err != nil { + return fmt.Errorf("%s | %w", "Health", err) + } + + case "primaries": + if err := dec.Decode(&s.Primaries); err != nil { + return fmt.Errorf("%s | %w", "Primaries", err) + } + + case "shards": + if s.Shards == nil { + s.Shards = make(map[string][]IndicesShardStats, 0) + } + if err := dec.Decode(&s.Shards); err != nil { + return fmt.Errorf("%s | %w", "Shards", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return fmt.Errorf("%s | %w", "Uuid", err) + } + + } + } + return nil +} + +// NewIndicesStats returns a IndicesStats. +func NewIndicesStats() *IndicesStats { + r := &IndicesStats{ + Shards: make(map[string][]IndicesShardStats), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesvalidationexplanation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesvalidationexplanation.go new file mode 100644 index 000000000..58ac0014b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesvalidationexplanation.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndicesValidationExplanation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/validate_query/IndicesValidateQueryResponse.ts#L32-L37 +type IndicesValidationExplanation struct { + Error *string `json:"error,omitempty"` + Explanation *string `json:"explanation,omitempty"` + Index string `json:"index"` + Valid bool `json:"valid"` +} + +func (s *IndicesValidationExplanation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Error = &o + + case "explanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Explanation", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Explanation = &o + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "valid": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Valid", err) + } + s.Valid = value + case bool: + s.Valid = v + } + + } + } + return nil +} + +// NewIndicesValidationExplanation returns a IndicesValidationExplanation. +func NewIndicesValidationExplanation() *IndicesValidationExplanation { + r := &IndicesValidationExplanation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesversions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesversions.go new file mode 100644 index 000000000..2b8a147aa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicesversions.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndicesVersions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L359-L365 +type IndicesVersions struct { + IndexCount int `json:"index_count"` + PrimaryShardCount int `json:"primary_shard_count"` + TotalPrimaryBytes int64 `json:"total_primary_bytes"` + TotalPrimarySize ByteSize `json:"total_primary_size,omitempty"` + Version string `json:"version"` +} + +func (s *IndicesVersions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IndexCount", err) + } + s.IndexCount = value + case float64: + f := int(v) + s.IndexCount = f + } + + case "primary_shard_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryShardCount", err) + } + s.PrimaryShardCount = value + case float64: + f := int(v) + s.PrimaryShardCount = f + } + + case "total_primary_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalPrimaryBytes", err) + } + s.TotalPrimaryBytes = value + case float64: + f := int64(v) + s.TotalPrimaryBytes = f + } + + case "total_primary_size": + if err := dec.Decode(&s.TotalPrimarySize); err != nil { + return fmt.Errorf("%s | %w", "TotalPrimarySize", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewIndicesVersions returns a IndicesVersions. +func NewIndicesVersions() *IndicesVersions { + r := &IndicesVersions{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicnormalizationtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicnormalizationtokenfilter.go new file mode 100644 index 000000000..4aac007c4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indicnormalizationtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IndicNormalizationTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L506-L508 +type IndicNormalizationTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *IndicNormalizationTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IndicNormalizationTokenFilter) MarshalJSON() ([]byte, error) { + type innerIndicNormalizationTokenFilter IndicNormalizationTokenFilter + tmp := innerIndicNormalizationTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "indic_normalization" + + return json.Marshal(tmp) +} + +// NewIndicNormalizationTokenFilter returns a IndicNormalizationTokenFilter. +func NewIndicNormalizationTokenFilter() *IndicNormalizationTokenFilter { + r := &IndicNormalizationTokenFilter{} + + return r +} + +type IndicNormalizationTokenFilterVariant interface { + IndicNormalizationTokenFilterCaster() *IndicNormalizationTokenFilter +} + +func (s *IndicNormalizationTokenFilter) IndicNormalizationTokenFilterCaster() *IndicNormalizationTokenFilter { + return s +} + +func (s *IndicNormalizationTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indonesiananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indonesiananalyzer.go new file mode 100644 index 000000000..2a6956591 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/indonesiananalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IndonesianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L213-L218 +type IndonesianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *IndonesianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IndonesianAnalyzer) MarshalJSON() ([]byte, error) { + type innerIndonesianAnalyzer IndonesianAnalyzer + tmp := innerIndonesianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "indonesian" + + return json.Marshal(tmp) +} + +// NewIndonesianAnalyzer returns a IndonesianAnalyzer. +func NewIndonesianAnalyzer() *IndonesianAnalyzer { + r := &IndonesianAnalyzer{} + + return r +} + +type IndonesianAnalyzerVariant interface { + IndonesianAnalyzerCaster() *IndonesianAnalyzer +} + +func (s *IndonesianAnalyzer) IndonesianAnalyzerCaster() *IndonesianAnalyzer { + return s +} + +func (s *IndonesianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceaggregate.go new file mode 100644 index 000000000..674759b81 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceaggregate.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InferenceAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L757-L772 +type InferenceAggregate struct { + Data map[string]json.RawMessage `json:"-"` + FeatureImportance []InferenceFeatureImportance `json:"feature_importance,omitempty"` + Meta Metadata `json:"meta,omitempty"` + TopClasses []InferenceTopClassEntry `json:"top_classes,omitempty"` + Value FieldValue `json:"value,omitempty"` + Warning *string `json:"warning,omitempty"` +} + +func (s *InferenceAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_importance": + if err := dec.Decode(&s.FeatureImportance); err != nil { + return fmt.Errorf("%s | %w", "FeatureImportance", err) + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "top_classes": + if err := dec.Decode(&s.TopClasses); err != nil { + return fmt.Errorf("%s | %w", "TopClasses", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + case "warning": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Warning", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Warning = &o + + default: + + if key, ok := t.(string); ok { + if s.Data == nil { + s.Data = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "Data", err) + } + s.Data[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s InferenceAggregate) MarshalJSON() ([]byte, error) { + type opt InferenceAggregate + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Data { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Data") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewInferenceAggregate returns a InferenceAggregate. +func NewInferenceAggregate() *InferenceAggregate { + r := &InferenceAggregate{ + Data: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceaggregation.go new file mode 100644 index 000000000..7fff7faad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceaggregation.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// InferenceAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L225-L234 +type InferenceAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + // InferenceConfig Contains the inference type and its options. + InferenceConfig *InferenceConfigContainer `json:"inference_config,omitempty"` + // ModelId The ID or alias for the trained model. + ModelId string `json:"model_id"` +} + +func (s *InferenceAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "inference_config": + if err := dec.Decode(&s.InferenceConfig); err != nil { + return fmt.Errorf("%s | %w", "InferenceConfig", err) + } + + case "model_id": + if err := dec.Decode(&s.ModelId); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + + } + } + return nil +} + +// NewInferenceAggregation returns a InferenceAggregation. +func NewInferenceAggregation() *InferenceAggregation { + r := &InferenceAggregation{} + + return r +} + +type InferenceAggregationVariant interface { + InferenceAggregationCaster() *InferenceAggregation +} + +func (s *InferenceAggregation) InferenceAggregationCaster() *InferenceAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferencechunkingsettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferencechunkingsettings.go new file mode 100644 index 000000000..565cc506a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferencechunkingsettings.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InferenceChunkingSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/Services.ts#L288-L317 +type InferenceChunkingSettings struct { + // MaxChunkSize The maximum size of a chunk in words. + // This value cannot be higher than `300` or lower than `20` (for `sentence` + // strategy) or `10` (for `word` strategy). + MaxChunkSize *int `json:"max_chunk_size,omitempty"` + // Overlap The number of overlapping words for chunks. + // It is applicable only to a `word` chunking strategy. + // This value cannot be higher than half the `max_chunk_size` value. + Overlap *int `json:"overlap,omitempty"` + // SentenceOverlap The number of overlapping sentences for chunks. + // It is applicable only for a `sentence` chunking strategy. + // It can be either `1` or `0`. + SentenceOverlap *int `json:"sentence_overlap,omitempty"` + // Strategy The chunking strategy: `sentence` or `word`. + Strategy *string `json:"strategy,omitempty"` +} + +func (s *InferenceChunkingSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_chunk_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxChunkSize", err) + } + s.MaxChunkSize = &value + case float64: + f := int(v) + s.MaxChunkSize = &f + } + + case "overlap": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Overlap", err) + } + s.Overlap = &value + case float64: + f := int(v) + s.Overlap = &f + } + + case "sentence_overlap": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SentenceOverlap", err) + } + s.SentenceOverlap = &value + case float64: + f := int(v) + s.SentenceOverlap = &f + } + + case "strategy": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Strategy", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Strategy = &o + + } + } + return nil +} + +// NewInferenceChunkingSettings returns a InferenceChunkingSettings. +func NewInferenceChunkingSettings() *InferenceChunkingSettings { + r := &InferenceChunkingSettings{} + + return r +} + +type InferenceChunkingSettingsVariant interface { + InferenceChunkingSettingsCaster() *InferenceChunkingSettings +} + +func (s *InferenceChunkingSettings) InferenceChunkingSettingsCaster() *InferenceChunkingSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceclassimportance.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceclassimportance.go new file mode 100644 index 000000000..dd169b689 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceclassimportance.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InferenceClassImportance type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L786-L789 +type InferenceClassImportance struct { + ClassName string `json:"class_name"` + Importance Float64 `json:"importance"` +} + +func (s *InferenceClassImportance) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ClassName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClassName = o + + case "importance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Importance", err) + } + f := Float64(value) + s.Importance = f + case float64: + f := Float64(v) + s.Importance = f + } + + } + } + return nil +} + +// NewInferenceClassImportance returns a InferenceClassImportance. +func NewInferenceClassImportance() *InferenceClassImportance { + r := &InferenceClassImportance{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfig.go new file mode 100644 index 000000000..fda476011 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfig.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// InferenceConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1067-L1079 +type InferenceConfig struct { + AdditionalInferenceConfigProperty map[string]json.RawMessage `json:"-"` + // Classification Classification configuration for inference. + Classification *InferenceConfigClassification `json:"classification,omitempty"` + // Regression Regression configuration for inference. + Regression *InferenceConfigRegression `json:"regression,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s InferenceConfig) MarshalJSON() ([]byte, error) { + type opt InferenceConfig + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalInferenceConfigProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalInferenceConfigProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewInferenceConfig returns a InferenceConfig. +func NewInferenceConfig() *InferenceConfig { + r := &InferenceConfig{ + AdditionalInferenceConfigProperty: make(map[string]json.RawMessage), + } + + return r +} + +type InferenceConfigVariant interface { + InferenceConfigCaster() *InferenceConfig +} + +func (s *InferenceConfig) InferenceConfigCaster() *InferenceConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigclassification.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigclassification.go new file mode 100644 index 000000000..543a13403 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigclassification.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InferenceConfigClassification type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1094-L1120 +type InferenceConfigClassification struct { + // NumTopClasses Specifies the number of top class predictions to return. + NumTopClasses *int `json:"num_top_classes,omitempty"` + // NumTopFeatureImportanceValues Specifies the maximum number of feature importance values per document. + NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` + // PredictionFieldType Specifies the type of the predicted field to write. + // Valid values are: `string`, `number`, `boolean`. + PredictionFieldType *string `json:"prediction_field_type,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. + ResultsField *string `json:"results_field,omitempty"` + // TopClassesResultsField Specifies the field to which the top classes are written. + TopClassesResultsField *string `json:"top_classes_results_field,omitempty"` +} + +func (s *InferenceConfigClassification) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "num_top_classes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopClasses", err) + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "num_top_feature_importance_values": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopFeatureImportanceValues", err) + } + s.NumTopFeatureImportanceValues = &value + case float64: + f := int(v) + s.NumTopFeatureImportanceValues = &f + } + + case "prediction_field_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PredictionFieldType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PredictionFieldType = &o + + case "results_field": + if err := dec.Decode(&s.ResultsField); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + + case "top_classes_results_field": + if err := dec.Decode(&s.TopClassesResultsField); err != nil { + return fmt.Errorf("%s | %w", "TopClassesResultsField", err) + } + + } + } + return nil +} + +// NewInferenceConfigClassification returns a InferenceConfigClassification. +func NewInferenceConfigClassification() *InferenceConfigClassification { + r := &InferenceConfigClassification{} + + return r +} + +type InferenceConfigClassificationVariant interface { + InferenceConfigClassificationCaster() *InferenceConfigClassification +} + +func (s *InferenceConfigClassification) InferenceConfigClassificationCaster() *InferenceConfigClassification { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigcontainer.go new file mode 100644 index 000000000..653b1ddc2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigcontainer.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// InferenceConfigContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L236-L242 +type InferenceConfigContainer struct { + AdditionalInferenceConfigContainerProperty map[string]json.RawMessage `json:"-"` + // Classification Classification configuration for inference. + Classification *ClassificationInferenceOptions `json:"classification,omitempty"` + // Regression Regression configuration for inference. + Regression *RegressionInferenceOptions `json:"regression,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s InferenceConfigContainer) MarshalJSON() ([]byte, error) { + type opt InferenceConfigContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalInferenceConfigContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalInferenceConfigContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewInferenceConfigContainer returns a InferenceConfigContainer. +func NewInferenceConfigContainer() *InferenceConfigContainer { + r := &InferenceConfigContainer{ + AdditionalInferenceConfigContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type InferenceConfigContainerVariant interface { + InferenceConfigContainerCaster() *InferenceConfigContainer +} + +func (s *InferenceConfigContainer) InferenceConfigContainerCaster() *InferenceConfigContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigcreatecontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigcreatecontainer.go new file mode 100644 index 000000000..642084547 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigcreatecontainer.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// InferenceConfigCreateContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L26-L85 +type InferenceConfigCreateContainer struct { + AdditionalInferenceConfigCreateContainerProperty map[string]json.RawMessage `json:"-"` + // Classification Classification configuration for inference. + Classification *ClassificationInferenceOptions `json:"classification,omitempty"` + // FillMask Fill mask configuration for inference. + FillMask *FillMaskInferenceOptions `json:"fill_mask,omitempty"` + LearningToRank *LearningToRankConfig `json:"learning_to_rank,omitempty"` + // Ner Named entity recognition configuration for inference. + Ner *NerInferenceOptions `json:"ner,omitempty"` + // PassThrough Pass through configuration for inference. + PassThrough *PassThroughInferenceOptions `json:"pass_through,omitempty"` + // QuestionAnswering Question answering configuration for inference. + QuestionAnswering *QuestionAnsweringInferenceOptions `json:"question_answering,omitempty"` + // Regression Regression configuration for inference. + Regression *RegressionInferenceOptions `json:"regression,omitempty"` + // TextClassification Text classification configuration for inference. + TextClassification *TextClassificationInferenceOptions `json:"text_classification,omitempty"` + // TextEmbedding Text embedding configuration for inference. + TextEmbedding *TextEmbeddingInferenceOptions `json:"text_embedding,omitempty"` + // TextExpansion Text expansion configuration for inference. + TextExpansion *TextExpansionInferenceOptions `json:"text_expansion,omitempty"` + // ZeroShotClassification Zeroshot classification configuration for inference. + ZeroShotClassification *ZeroShotClassificationInferenceOptions `json:"zero_shot_classification,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s InferenceConfigCreateContainer) MarshalJSON() ([]byte, error) { + type opt InferenceConfigCreateContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalInferenceConfigCreateContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalInferenceConfigCreateContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewInferenceConfigCreateContainer returns a InferenceConfigCreateContainer. +func NewInferenceConfigCreateContainer() *InferenceConfigCreateContainer { + r := &InferenceConfigCreateContainer{ + AdditionalInferenceConfigCreateContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type InferenceConfigCreateContainerVariant interface { + InferenceConfigCreateContainerCaster() *InferenceConfigCreateContainer +} + +func (s *InferenceConfigCreateContainer) InferenceConfigCreateContainerCaster() *InferenceConfigCreateContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigregression.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigregression.go new file mode 100644 index 000000000..3bec13070 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigregression.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InferenceConfigRegression type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1081-L1092 +type InferenceConfigRegression struct { + // NumTopFeatureImportanceValues Specifies the maximum number of feature importance values per document. + NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. + ResultsField *string `json:"results_field,omitempty"` +} + +func (s *InferenceConfigRegression) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "num_top_feature_importance_values": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopFeatureImportanceValues", err) + } + s.NumTopFeatureImportanceValues = &value + case float64: + f := int(v) + s.NumTopFeatureImportanceValues = &f + } + + case "results_field": + if err := dec.Decode(&s.ResultsField); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + + } + } + return nil +} + +// NewInferenceConfigRegression returns a InferenceConfigRegression. +func NewInferenceConfigRegression() *InferenceConfigRegression { + r := &InferenceConfigRegression{} + + return r +} + +type InferenceConfigRegressionVariant interface { + InferenceConfigRegressionCaster() *InferenceConfigRegression +} + +func (s *InferenceConfigRegression) InferenceConfigRegressionCaster() *InferenceConfigRegression { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigupdatecontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigupdatecontainer.go new file mode 100644 index 000000000..8305cb0e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceconfigupdatecontainer.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// InferenceConfigUpdateContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L315-L337 +type InferenceConfigUpdateContainer struct { + AdditionalInferenceConfigUpdateContainerProperty map[string]json.RawMessage `json:"-"` + // Classification Classification configuration for inference. + Classification *ClassificationInferenceOptions `json:"classification,omitempty"` + // FillMask Fill mask configuration for inference. + FillMask *FillMaskInferenceUpdateOptions `json:"fill_mask,omitempty"` + // Ner Named entity recognition configuration for inference. + Ner *NerInferenceUpdateOptions `json:"ner,omitempty"` + // PassThrough Pass through configuration for inference. + PassThrough *PassThroughInferenceUpdateOptions `json:"pass_through,omitempty"` + // QuestionAnswering Question answering configuration for inference + QuestionAnswering *QuestionAnsweringInferenceUpdateOptions `json:"question_answering,omitempty"` + // Regression Regression configuration for inference. + Regression *RegressionInferenceOptions `json:"regression,omitempty"` + // TextClassification Text classification configuration for inference. + TextClassification *TextClassificationInferenceUpdateOptions `json:"text_classification,omitempty"` + // TextEmbedding Text embedding configuration for inference. + TextEmbedding *TextEmbeddingInferenceUpdateOptions `json:"text_embedding,omitempty"` + // TextExpansion Text expansion configuration for inference. + TextExpansion *TextExpansionInferenceUpdateOptions `json:"text_expansion,omitempty"` + // ZeroShotClassification Zeroshot classification configuration for inference. + ZeroShotClassification *ZeroShotClassificationInferenceUpdateOptions `json:"zero_shot_classification,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s InferenceConfigUpdateContainer) MarshalJSON() ([]byte, error) { + type opt InferenceConfigUpdateContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalInferenceConfigUpdateContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalInferenceConfigUpdateContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewInferenceConfigUpdateContainer returns a InferenceConfigUpdateContainer. +func NewInferenceConfigUpdateContainer() *InferenceConfigUpdateContainer { + r := &InferenceConfigUpdateContainer{ + AdditionalInferenceConfigUpdateContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type InferenceConfigUpdateContainerVariant interface { + InferenceConfigUpdateContainerCaster() *InferenceConfigUpdateContainer +} + +func (s *InferenceConfigUpdateContainer) InferenceConfigUpdateContainerCaster() *InferenceConfigUpdateContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceendpoint.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceendpoint.go new file mode 100644 index 000000000..c8cfb2fba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceendpoint.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InferenceEndpoint type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/Services.ts#L45-L65 +type InferenceEndpoint struct { + // ChunkingSettings Chunking configuration object + ChunkingSettings *InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` +} + +func (s *InferenceEndpoint) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "chunking_settings": + if err := dec.Decode(&s.ChunkingSettings); err != nil { + return fmt.Errorf("%s | %w", "ChunkingSettings", err) + } + + case "service": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Service", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Service = o + + case "service_settings": + if err := dec.Decode(&s.ServiceSettings); err != nil { + return fmt.Errorf("%s | %w", "ServiceSettings", err) + } + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + } + } + return nil +} + +// NewInferenceEndpoint returns a InferenceEndpoint. +func NewInferenceEndpoint() *InferenceEndpoint { + r := &InferenceEndpoint{} + + return r +} + +type InferenceEndpointVariant interface { + InferenceEndpointCaster() *InferenceEndpoint +} + +func (s *InferenceEndpoint) InferenceEndpointCaster() *InferenceEndpoint { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceendpointinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceendpointinfo.go new file mode 100644 index 000000000..f969872e6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceendpointinfo.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktype" +) + +// InferenceEndpointInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/Services.ts#L67-L79 +type InferenceEndpointInfo struct { + // ChunkingSettings Chunking configuration object + ChunkingSettings *InferenceChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId The inference Id + InferenceId string `json:"inference_id"` + // Service The service type + Service string `json:"service"` + // ServiceSettings Settings specific to the service + ServiceSettings json.RawMessage `json:"service_settings"` + // TaskSettings Task settings specific to the service and task type + TaskSettings json.RawMessage `json:"task_settings,omitempty"` + // TaskType The task type + TaskType tasktype.TaskType `json:"task_type"` +} + +func (s *InferenceEndpointInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "chunking_settings": + if err := dec.Decode(&s.ChunkingSettings); err != nil { + return fmt.Errorf("%s | %w", "ChunkingSettings", err) + } + + case "inference_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "InferenceId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InferenceId = o + + case "service": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Service", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Service = o + + case "service_settings": + if err := dec.Decode(&s.ServiceSettings); err != nil { + return fmt.Errorf("%s | %w", "ServiceSettings", err) + } + + case "task_settings": + if err := dec.Decode(&s.TaskSettings); err != nil { + return fmt.Errorf("%s | %w", "TaskSettings", err) + } + + case "task_type": + if err := dec.Decode(&s.TaskType); err != nil { + return fmt.Errorf("%s | %w", "TaskType", err) + } + + } + } + return nil +} + +// NewInferenceEndpointInfo returns a InferenceEndpointInfo. +func NewInferenceEndpointInfo() *InferenceEndpointInfo { + r := &InferenceEndpointInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferencefeatureimportance.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferencefeatureimportance.go new file mode 100644 index 000000000..6621fdfcf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferencefeatureimportance.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InferenceFeatureImportance type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L780-L784 +type InferenceFeatureImportance struct { + Classes []InferenceClassImportance `json:"classes,omitempty"` + FeatureName string `json:"feature_name"` + Importance *Float64 `json:"importance,omitempty"` +} + +func (s *InferenceFeatureImportance) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classes": + if err := dec.Decode(&s.Classes); err != nil { + return fmt.Errorf("%s | %w", "Classes", err) + } + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FeatureName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + case "importance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Importance", err) + } + f := Float64(value) + s.Importance = &f + case float64: + f := Float64(v) + s.Importance = &f + } + + } + } + return nil +} + +// NewInferenceFeatureImportance returns a InferenceFeatureImportance. +func NewInferenceFeatureImportance() *InferenceFeatureImportance { + r := &InferenceFeatureImportance{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceprocessor.go new file mode 100644 index 000000000..75194e0d8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceprocessor.go @@ -0,0 +1,206 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InferenceProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1028-L1060 +type InferenceProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // FieldMap Maps the document field names to the known field names of the model. + // This mapping takes precedence over any default mappings provided in the model + // configuration. + FieldMap map[string]json.RawMessage `json:"field_map,omitempty"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If true and any of the input fields defined in input_ouput are missing + // then those missing fields are quietly ignored, otherwise a missing field + // causes a failure. + // Only applies when using input_output configurations to explicitly list the + // input fields. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // InferenceConfig Contains the inference type and its options. + InferenceConfig *InferenceConfig `json:"inference_config,omitempty"` + // InputOutput Input fields for inference and output (destination) fields for the inference + // results. + // This option is incompatible with the target_field and field_map options. + InputOutput []InputConfig `json:"input_output,omitempty"` + // ModelId The ID or alias for the trained model, or the ID of the deployment. + ModelId string `json:"model_id"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField Field added to incoming documents to contain results objects. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *InferenceProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field_map": + if s.FieldMap == nil { + s.FieldMap = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.FieldMap); err != nil { + return fmt.Errorf("%s | %w", "FieldMap", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "inference_config": + if err := dec.Decode(&s.InferenceConfig); err != nil { + return fmt.Errorf("%s | %w", "InferenceConfig", err) + } + + case "input_output": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewInputConfig() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "InputOutput", err) + } + + s.InputOutput = append(s.InputOutput, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.InputOutput); err != nil { + return fmt.Errorf("%s | %w", "InputOutput", err) + } + } + + case "model_id": + if err := dec.Decode(&s.ModelId); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewInferenceProcessor returns a InferenceProcessor. +func NewInferenceProcessor() *InferenceProcessor { + r := &InferenceProcessor{ + FieldMap: make(map[string]json.RawMessage), + } + + return r +} + +type InferenceProcessorVariant interface { + InferenceProcessorCaster() *InferenceProcessor +} + +func (s *InferenceProcessor) InferenceProcessorCaster() *InferenceProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceresponseresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceresponseresult.go new file mode 100644 index 000000000..78f769ebf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferenceresponseresult.go @@ -0,0 +1,202 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InferenceResponseResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L478-L526 +type InferenceResponseResult struct { + // Entities If the model is trained for named entity recognition (NER) tasks, the + // response contains the recognized entities. + Entities []TrainedModelEntities `json:"entities,omitempty"` + // FeatureImportance The feature importance for the inference results. Relevant only for + // classification or regression models + FeatureImportance []TrainedModelInferenceFeatureImportance `json:"feature_importance,omitempty"` + // IsTruncated Indicates whether the input text was truncated to meet the model's maximum + // sequence length limit. This property + // is present only when it is true. + IsTruncated *bool `json:"is_truncated,omitempty"` + // PredictedValue If the model is trained for a text classification or zero shot classification + // task, the response is the + // predicted class. + // For named entity recognition (NER) tasks, it contains the annotated text + // output. + // For fill mask tasks, it contains the top prediction for replacing the mask + // token. + // For text embedding tasks, it contains the raw numerical text embedding + // values. + // For regression models, its a numerical value + // For classification models, it may be an integer, double, boolean or string + // depending on prediction type + PredictedValue [][]ScalarValue `json:"predicted_value,omitempty"` + // PredictedValueSequence For fill mask tasks, the response contains the input text sequence with the + // mask token replaced by the predicted + // value. + // Additionally + PredictedValueSequence *string `json:"predicted_value_sequence,omitempty"` + // PredictionProbability Specifies a probability for the predicted value. + PredictionProbability *Float64 `json:"prediction_probability,omitempty"` + // PredictionScore Specifies a confidence score for the predicted value. + PredictionScore *Float64 `json:"prediction_score,omitempty"` + // TopClasses For fill mask, text classification, and zero shot classification tasks, the + // response contains a list of top + // class entries. + TopClasses []TopClassEntry `json:"top_classes,omitempty"` + // Warning If the request failed, the response contains the reason for the failure. + Warning *string `json:"warning,omitempty"` +} + +func (s *InferenceResponseResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "entities": + if err := dec.Decode(&s.Entities); err != nil { + return fmt.Errorf("%s | %w", "Entities", err) + } + + case "feature_importance": + if err := dec.Decode(&s.FeatureImportance); err != nil { + return fmt.Errorf("%s | %w", "FeatureImportance", err) + } + + case "is_truncated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsTruncated", err) + } + s.IsTruncated = &value + case bool: + s.IsTruncated = &v + } + + case "predicted_value": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new([]ScalarValue) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "PredictedValue", err) + } + + s.PredictedValue = append(s.PredictedValue, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.PredictedValue); err != nil { + return fmt.Errorf("%s | %w", "PredictedValue", err) + } + } + + case "predicted_value_sequence": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PredictedValueSequence", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PredictedValueSequence = &o + + case "prediction_probability": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PredictionProbability", err) + } + f := Float64(value) + s.PredictionProbability = &f + case float64: + f := Float64(v) + s.PredictionProbability = &f + } + + case "prediction_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PredictionScore", err) + } + f := Float64(value) + s.PredictionScore = &f + case float64: + f := Float64(v) + s.PredictionScore = &f + } + + case "top_classes": + if err := dec.Decode(&s.TopClasses); err != nil { + return fmt.Errorf("%s | %w", "TopClasses", err) + } + + case "warning": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Warning", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Warning = &o + + } + } + return nil +} + +// NewInferenceResponseResult returns a InferenceResponseResult. +func NewInferenceResponseResult() *InferenceResponseResult { + r := &InferenceResponseResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferencetopclassentry.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferencetopclassentry.go new file mode 100644 index 000000000..9fa08a9f9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inferencetopclassentry.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InferenceTopClassEntry type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L774-L778 +type InferenceTopClassEntry struct { + ClassName FieldValue `json:"class_name"` + ClassProbability Float64 `json:"class_probability"` + ClassScore Float64 `json:"class_score"` +} + +func (s *InferenceTopClassEntry) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + if err := dec.Decode(&s.ClassName); err != nil { + return fmt.Errorf("%s | %w", "ClassName", err) + } + + case "class_probability": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ClassProbability", err) + } + f := Float64(value) + s.ClassProbability = f + case float64: + f := Float64(v) + s.ClassProbability = f + } + + case "class_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ClassScore", err) + } + f := Float64(value) + s.ClassScore = f + case float64: + f := Float64(v) + s.ClassScore = f + } + + } + } + return nil +} + +// NewInferenceTopClassEntry returns a InferenceTopClassEntry. +func NewInferenceTopClassEntry() *InferenceTopClassEntry { + r := &InferenceTopClassEntry{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/influence.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/influence.go new file mode 100644 index 000000000..63fa9222c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/influence.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Influence type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Anomaly.ts#L141-L144 +type Influence struct { + InfluencerFieldName string `json:"influencer_field_name"` + InfluencerFieldValues []string `json:"influencer_field_values"` +} + +func (s *Influence) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "influencer_field_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "InfluencerFieldName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InfluencerFieldName = o + + case "influencer_field_values": + if err := dec.Decode(&s.InfluencerFieldValues); err != nil { + return fmt.Errorf("%s | %w", "InfluencerFieldValues", err) + } + + } + } + return nil +} + +// NewInfluence returns a Influence. +func NewInfluence() *Influence { + r := &Influence{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/influencer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/influencer.go new file mode 100644 index 000000000..f81fa341a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/influencer.go @@ -0,0 +1,220 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Influencer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Influencer.ts#L24-L76 +type Influencer struct { + // BucketSpan The length of the bucket in seconds. This value matches the bucket span that + // is specified in the job. + BucketSpan int64 `json:"bucket_span"` + // Foo Additional influencer properties are added, depending on the fields being + // analyzed. For example, if it’s + // analyzing `user_name` as an influencer, a field `user_name` is added to the + // result document. This + // information enables you to filter the anomaly results more easily. + Foo *string `json:"foo,omitempty"` + // InfluencerFieldName The field name of the influencer. + InfluencerFieldName string `json:"influencer_field_name"` + // InfluencerFieldValue The entity that influenced, contributed to, or was to blame for the anomaly. + InfluencerFieldValue string `json:"influencer_field_value"` + // InfluencerScore A normalized score between 0-100, which is based on the probability of the + // influencer in this bucket aggregated + // across detectors. Unlike `initial_influencer_score`, this value is updated by + // a re-normalization process as new + // data is analyzed. + InfluencerScore Float64 `json:"influencer_score"` + // InitialInfluencerScore A normalized score between 0-100, which is based on the probability of the + // influencer aggregated across detectors. + // This is the initial value that was calculated at the time the bucket was + // processed. + InitialInfluencerScore Float64 `json:"initial_influencer_score"` + // IsInterim If true, this is an interim result. In other words, the results are + // calculated based on partial input data. + IsInterim bool `json:"is_interim"` + // JobId Identifier for the anomaly detection job. + JobId string `json:"job_id"` + // Probability The probability that the influencer has this behavior, in the range 0 to 1. + // This value can be held to a high + // precision of over 300 decimal places, so the `influencer_score` is provided + // as a human-readable and friendly + // interpretation of this value. + Probability Float64 `json:"probability"` + // ResultType Internal. This value is always set to `influencer`. + ResultType string `json:"result_type"` + // Timestamp The start time of the bucket for which these results were calculated. + Timestamp int64 `json:"timestamp"` +} + +func (s *Influencer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bucket_span": + if err := dec.Decode(&s.BucketSpan); err != nil { + return fmt.Errorf("%s | %w", "BucketSpan", err) + } + + case "foo": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Foo", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Foo = &o + + case "influencer_field_name": + if err := dec.Decode(&s.InfluencerFieldName); err != nil { + return fmt.Errorf("%s | %w", "InfluencerFieldName", err) + } + + case "influencer_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "InfluencerFieldValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InfluencerFieldValue = o + + case "influencer_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InfluencerScore", err) + } + f := Float64(value) + s.InfluencerScore = f + case float64: + f := Float64(v) + s.InfluencerScore = f + } + + case "initial_influencer_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InitialInfluencerScore", err) + } + f := Float64(value) + s.InitialInfluencerScore = f + case float64: + f := Float64(v) + s.InitialInfluencerScore = f + } + + case "is_interim": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsInterim", err) + } + s.IsInterim = value + case bool: + s.IsInterim = v + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "probability": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Probability", err) + } + f := Float64(value) + s.Probability = f + case float64: + f := Float64(v) + s.Probability = f + } + + case "result_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultType = o + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + } + } + return nil +} + +// NewInfluencer returns a Influencer. +func NewInfluencer() *Influencer { + r := &Influencer{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/infofeaturestate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/infofeaturestate.go new file mode 100644 index 000000000..090e7a8bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/infofeaturestate.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InfoFeatureState type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotInfoFeatureState.ts#L22-L25 +type InfoFeatureState struct { + FeatureName string `json:"feature_name"` + Indices []string `json:"indices"` +} + +func (s *InfoFeatureState) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FeatureName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + } + } + return nil +} + +// NewInfoFeatureState returns a InfoFeatureState. +func NewInfoFeatureState() *InfoFeatureState { + r := &InfoFeatureState{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingest.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingest.go new file mode 100644 index 000000000..bbdee69d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingest.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Ingest type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Simulation.ts#L28-L36 +type Ingest struct { + Pipeline *string `json:"pipeline,omitempty"` + Redact_ *Redact `json:"_redact,omitempty"` + Timestamp DateTime `json:"timestamp"` +} + +func (s *Ingest) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "pipeline": + if err := dec.Decode(&s.Pipeline); err != nil { + return fmt.Errorf("%s | %w", "Pipeline", err) + } + + case "_redact": + if err := dec.Decode(&s.Redact_); err != nil { + return fmt.Errorf("%s | %w", "Redact_", err) + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + } + } + return nil +} + +// NewIngest returns a Ingest. +func NewIngest() *Ingest { + r := &Ingest{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingestdocumentsimulation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingestdocumentsimulation.go new file mode 100644 index 000000000..761076883 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingestdocumentsimulation.go @@ -0,0 +1,168 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IngestDocumentSimulation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/simulate/ingest/SimulateIngestResponse.ts#L35-L78 +type IngestDocumentSimulation struct { + // Error Any error resulting from simulatng ingest on this doc. This can be an error + // generated by + // executing a processor, or a mapping validation error when simulating indexing + // the resulting + // doc. + Error *ErrorCause `json:"error,omitempty"` + // ExecutedPipelines A list of the names of the pipelines executed on this document. + ExecutedPipelines []string `json:"executed_pipelines"` + // Id_ Identifier for the document. + Id_ string `json:"_id"` + // IgnoredFields A list of the fields that would be ignored at the indexing step. For example, + // a field whose + // value is larger than the allowed limit would make it through all of the + // pipelines, but + // would not be indexed into Elasticsearch. + IgnoredFields []map[string]string `json:"ignored_fields,omitempty"` + // Index_ Name of the index that the document would be indexed into if this were not a + // simulation. + Index_ string `json:"_index"` + IngestDocumentSimulation map[string]string `json:"-"` + // Source_ JSON body for the document. + Source_ map[string]json.RawMessage `json:"_source"` + Version_ StringifiedVersionNumber `json:"_version"` +} + +func (s *IngestDocumentSimulation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + + case "executed_pipelines": + if err := dec.Decode(&s.ExecutedPipelines); err != nil { + return fmt.Errorf("%s | %w", "ExecutedPipelines", err) + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "ignored_fields": + if err := dec.Decode(&s.IgnoredFields); err != nil { + return fmt.Errorf("%s | %w", "IgnoredFields", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "_source": + if s.Source_ == nil { + s.Source_ = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + + case "_version": + if err := dec.Decode(&s.Version_); err != nil { + return fmt.Errorf("%s | %w", "Version_", err) + } + + default: + + if key, ok := t.(string); ok { + if s.IngestDocumentSimulation == nil { + s.IngestDocumentSimulation = make(map[string]string, 0) + } + raw := new(string) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "IngestDocumentSimulation", err) + } + s.IngestDocumentSimulation[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s IngestDocumentSimulation) MarshalJSON() ([]byte, error) { + type opt IngestDocumentSimulation + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.IngestDocumentSimulation { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "IngestDocumentSimulation") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewIngestDocumentSimulation returns a IngestDocumentSimulation. +func NewIngestDocumentSimulation() *IngestDocumentSimulation { + r := &IngestDocumentSimulation{ + IngestDocumentSimulation: make(map[string]string), + Source_: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingestpipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingestpipeline.go new file mode 100644 index 000000000..2d678beb3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingestpipeline.go @@ -0,0 +1,134 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IngestPipeline type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Pipeline.ts#L23-L51 +type IngestPipeline struct { + // Deprecated Marks this ingest pipeline as deprecated. + // When a deprecated ingest pipeline is referenced as the default or final + // pipeline when creating or updating a non-deprecated index template, + // Elasticsearch will emit a deprecation warning. + Deprecated *bool `json:"deprecated,omitempty"` + // Description Description of the ingest pipeline. + Description *string `json:"description,omitempty"` + // Meta_ Arbitrary metadata about the ingest pipeline. This map is not automatically + // generated by Elasticsearch. + Meta_ Metadata `json:"_meta,omitempty"` + // OnFailure Processors to run immediately after a processor failure. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Processors Processors used to perform transformations on documents before indexing. + // Processors run sequentially in the order specified. + Processors []ProcessorContainer `json:"processors,omitempty"` + // Version Version number used by external systems to track ingest pipelines. + Version *int64 `json:"version,omitempty"` +} + +func (s *IngestPipeline) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deprecated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deprecated", err) + } + s.Deprecated = &value + case bool: + s.Deprecated = &v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "processors": + if err := dec.Decode(&s.Processors); err != nil { + return fmt.Errorf("%s | %w", "Processors", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewIngestPipeline returns a IngestPipeline. +func NewIngestPipeline() *IngestPipeline { + r := &IngestPipeline{} + + return r +} + +type IngestPipelineVariant interface { + IngestPipelineCaster() *IngestPipeline +} + +func (s *IngestPipeline) IngestPipelineCaster() *IngestPipeline { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingestpipelineparams.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingestpipelineparams.go new file mode 100644 index 000000000..5f0c500f4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingestpipelineparams.go @@ -0,0 +1,129 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IngestPipelineParams type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L148-L153 +type IngestPipelineParams struct { + ExtractBinaryContent bool `json:"extract_binary_content"` + Name string `json:"name"` + ReduceWhitespace bool `json:"reduce_whitespace"` + RunMlInference bool `json:"run_ml_inference"` +} + +func (s *IngestPipelineParams) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "extract_binary_content": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ExtractBinaryContent", err) + } + s.ExtractBinaryContent = value + case bool: + s.ExtractBinaryContent = v + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "reduce_whitespace": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ReduceWhitespace", err) + } + s.ReduceWhitespace = value + case bool: + s.ReduceWhitespace = v + } + + case "run_ml_inference": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RunMlInference", err) + } + s.RunMlInference = value + case bool: + s.RunMlInference = v + } + + } + } + return nil +} + +// NewIngestPipelineParams returns a IngestPipelineParams. +func NewIngestPipelineParams() *IngestPipelineParams { + r := &IngestPipelineParams{} + + return r +} + +type IngestPipelineParamsVariant interface { + IngestPipelineParamsCaster() *IngestPipelineParams +} + +func (s *IngestPipelineParams) IngestPipelineParamsCaster() *IngestPipelineParams { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingeststats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingeststats.go new file mode 100644 index 000000000..7b469df3d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingeststats.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IngestStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L358-L396 +type IngestStats struct { + // Count Total number of documents ingested during the lifetime of this node. + Count int64 `json:"count"` + // Current Total number of documents currently being ingested. + Current int64 `json:"current"` + // Failed Total number of failed ingest operations during the lifetime of this node. + Failed int64 `json:"failed"` + // IngestedAsFirstPipelineInBytes Total number of bytes of all documents ingested by the pipeline. + // This field is only present on pipelines which are the first to process a + // document. + // Thus, it is not present on pipelines which only serve as a final pipeline + // after a default pipeline, a pipeline run after a reroute processor, or + // pipelines in pipeline processors. + IngestedAsFirstPipelineInBytes int64 `json:"ingested_as_first_pipeline_in_bytes"` + // Processors Total number of ingest processors. + Processors []map[string]KeyedProcessor `json:"processors"` + // ProducedAsFirstPipelineInBytes Total number of bytes of all documents produced by the pipeline. + // This field is only present on pipelines which are the first to process a + // document. + // Thus, it is not present on pipelines which only serve as a final pipeline + // after a default pipeline, a pipeline run after a reroute processor, or + // pipelines in pipeline processors. + // In situations where there are subsequent pipelines, the value represents the + // size of the document after all pipelines have run. + ProducedAsFirstPipelineInBytes int64 `json:"produced_as_first_pipeline_in_bytes"` + // TimeInMillis Total time, in milliseconds, spent preprocessing ingest documents during the + // lifetime of this node. + TimeInMillis int64 `json:"time_in_millis"` +} + +func (s *IngestStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "current": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Current", err) + } + s.Current = value + case float64: + f := int64(v) + s.Current = f + } + + case "failed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Failed", err) + } + s.Failed = value + case float64: + f := int64(v) + s.Failed = f + } + + case "ingested_as_first_pipeline_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IngestedAsFirstPipelineInBytes", err) + } + s.IngestedAsFirstPipelineInBytes = value + case float64: + f := int64(v) + s.IngestedAsFirstPipelineInBytes = f + } + + case "processors": + if err := dec.Decode(&s.Processors); err != nil { + return fmt.Errorf("%s | %w", "Processors", err) + } + + case "produced_as_first_pipeline_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ProducedAsFirstPipelineInBytes", err) + } + s.ProducedAsFirstPipelineInBytes = value + case float64: + f := int64(v) + s.ProducedAsFirstPipelineInBytes = f + } + + case "time_in_millis": + if err := dec.Decode(&s.TimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TimeInMillis", err) + } + + } + } + return nil +} + +// NewIngestStats returns a IngestStats. +func NewIngestStats() *IngestStats { + r := &IngestStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingesttotal.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingesttotal.go new file mode 100644 index 000000000..b7ef2ef3c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ingesttotal.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IngestTotal type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L398-L415 +type IngestTotal struct { + // Count Total number of documents ingested during the lifetime of this node. + Count int64 `json:"count"` + // Current Total number of documents currently being ingested. + Current int64 `json:"current"` + // Failed Total number of failed ingest operations during the lifetime of this node. + Failed int64 `json:"failed"` + // TimeInMillis Total time, in milliseconds, spent preprocessing ingest documents during the + // lifetime of this node. + TimeInMillis int64 `json:"time_in_millis"` +} + +func (s *IngestTotal) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "current": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Current", err) + } + s.Current = value + case float64: + f := int64(v) + s.Current = f + } + + case "failed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Failed", err) + } + s.Failed = value + case float64: + f := int64(v) + s.Failed = f + } + + case "time_in_millis": + if err := dec.Decode(&s.TimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TimeInMillis", err) + } + + } + } + return nil +} + +// NewIngestTotal returns a IngestTotal. +func NewIngestTotal() *IngestTotal { + r := &IngestTotal{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inlineget.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inlineget.go new file mode 100644 index 000000000..390b8edde --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inlineget.go @@ -0,0 +1,167 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InlineGet type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L319-L332 +type InlineGet struct { + Fields map[string]json.RawMessage `json:"fields,omitempty"` + Found bool `json:"found"` + Metadata map[string]json.RawMessage `json:"-"` + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + Routing_ *string `json:"_routing,omitempty"` + SeqNo_ *int64 `json:"_seq_no,omitempty"` + Source_ json.RawMessage `json:"_source,omitempty"` +} + +func (s *InlineGet) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "found": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Found", err) + } + s.Found = value + case bool: + s.Found = v + } + + case "_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryTerm_", err) + } + s.PrimaryTerm_ = &value + case float64: + f := int64(v) + s.PrimaryTerm_ = &f + } + + case "_routing": + if err := dec.Decode(&s.Routing_); err != nil { + return fmt.Errorf("%s | %w", "Routing_", err) + } + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return fmt.Errorf("%s | %w", "SeqNo_", err) + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + + default: + + if key, ok := t.(string); ok { + if s.Metadata == nil { + s.Metadata = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + s.Metadata[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s InlineGet) MarshalJSON() ([]byte, error) { + type opt InlineGet + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Metadata { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Metadata") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewInlineGet returns a InlineGet. +func NewInlineGet() *InlineGet { + r := &InlineGet{ + Fields: make(map[string]json.RawMessage), + Metadata: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inlinegetdictuserdefined.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inlinegetdictuserdefined.go new file mode 100644 index 000000000..7bc916e3a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inlinegetdictuserdefined.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InlineGetDictUserDefined type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L319-L332 +type InlineGetDictUserDefined struct { + Fields map[string]json.RawMessage `json:"fields,omitempty"` + Found bool `json:"found"` + InlineGetDictUserDefined map[string]json.RawMessage `json:"-"` + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + Routing_ *string `json:"_routing,omitempty"` + SeqNo_ *int64 `json:"_seq_no,omitempty"` + Source_ map[string]json.RawMessage `json:"_source,omitempty"` +} + +func (s *InlineGetDictUserDefined) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "found": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Found", err) + } + s.Found = value + case bool: + s.Found = v + } + + case "_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryTerm_", err) + } + s.PrimaryTerm_ = &value + case float64: + f := int64(v) + s.PrimaryTerm_ = &f + } + + case "_routing": + if err := dec.Decode(&s.Routing_); err != nil { + return fmt.Errorf("%s | %w", "Routing_", err) + } + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return fmt.Errorf("%s | %w", "SeqNo_", err) + } + + case "_source": + if s.Source_ == nil { + s.Source_ = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + + default: + + if key, ok := t.(string); ok { + if s.InlineGetDictUserDefined == nil { + s.InlineGetDictUserDefined = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "InlineGetDictUserDefined", err) + } + s.InlineGetDictUserDefined[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s InlineGetDictUserDefined) MarshalJSON() ([]byte, error) { + type opt InlineGetDictUserDefined + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.InlineGetDictUserDefined { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "InlineGetDictUserDefined") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewInlineGetDictUserDefined returns a InlineGetDictUserDefined. +func NewInlineGetDictUserDefined() *InlineGetDictUserDefined { + r := &InlineGetDictUserDefined{ + Fields: make(map[string]json.RawMessage), + InlineGetDictUserDefined: make(map[string]json.RawMessage), + Source_: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/innerhits.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/innerhits.go new file mode 100644 index 000000000..2c1d6f573 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/innerhits.go @@ -0,0 +1,298 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InnerHits type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/hits.ts#L107-L141 +type InnerHits struct { + Collapse *FieldCollapse `json:"collapse,omitempty"` + DocvalueFields []FieldAndFormat `json:"docvalue_fields,omitempty"` + Explain *bool `json:"explain,omitempty"` + Fields []string `json:"fields,omitempty"` + // From Inner hit starting document offset. + From *int `json:"from,omitempty"` + Highlight *Highlight `json:"highlight,omitempty"` + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + // Name The name for the particular inner hit definition in the response. + // Useful when a search request contains multiple inner hits. + Name *string `json:"name,omitempty"` + ScriptFields map[string]ScriptField `json:"script_fields,omitempty"` + SeqNoPrimaryTerm *bool `json:"seq_no_primary_term,omitempty"` + // Size The maximum number of hits to return per `inner_hits`. + Size *int `json:"size,omitempty"` + // Sort How the inner hits should be sorted per `inner_hits`. + // By default, inner hits are sorted by score. + Sort []SortCombinations `json:"sort,omitempty"` + Source_ SourceConfig `json:"_source,omitempty"` + StoredFields []string `json:"stored_fields,omitempty"` + TrackScores *bool `json:"track_scores,omitempty"` + Version *bool `json:"version,omitempty"` +} + +func (s *InnerHits) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collapse": + if err := dec.Decode(&s.Collapse); err != nil { + return fmt.Errorf("%s | %w", "Collapse", err) + } + + case "docvalue_fields": + if err := dec.Decode(&s.DocvalueFields); err != nil { + return fmt.Errorf("%s | %w", "DocvalueFields", err) + } + + case "explain": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Explain", err) + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "from": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "highlight": + if err := dec.Decode(&s.Highlight); err != nil { + return fmt.Errorf("%s | %w", "Highlight", err) + } + + case "ignore_unmapped": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnmapped", err) + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return fmt.Errorf("%s | %w", "ScriptFields", err) + } + + case "seq_no_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SeqNoPrimaryTerm", err) + } + s.SeqNoPrimaryTerm = &value + case bool: + s.SeqNoPrimaryTerm = &v + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + case "_source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source__field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source_", err) + } + + switch t { + + case "exclude_vectors", "excludes", "includes": + o := NewSourceFilter() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + s.Source_ = o + break source__field + + } + } + if s.Source_ == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + } + + case "stored_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + + s.StoredFields = append(s.StoredFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.StoredFields); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + } + + case "track_scores": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TrackScores", err) + } + s.TrackScores = &value + case bool: + s.TrackScores = &v + } + + case "version": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + s.Version = &value + case bool: + s.Version = &v + } + + } + } + return nil +} + +// NewInnerHits returns a InnerHits. +func NewInnerHits() *InnerHits { + r := &InnerHits{ + ScriptFields: make(map[string]ScriptField), + } + + return r +} + +type InnerHitsVariant interface { + InnerHitsCaster() *InnerHits +} + +func (s *InnerHits) InnerHitsCaster() *InnerHits { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/innerhitsresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/innerhitsresult.go new file mode 100644 index 000000000..a64ae6ee6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/innerhitsresult.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// InnerHitsResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/hits.ts#L85-L87 +type InnerHitsResult struct { + Hits HitsMetadata `json:"hits"` +} + +// NewInnerHitsResult returns a InnerHitsResult. +func NewInnerHitsResult() *InnerHitsResult { + r := &InnerHitsResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/innerretriever.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/innerretriever.go new file mode 100644 index 000000000..a25fc5efa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/innerretriever.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scorenormalizer" +) + +// InnerRetriever type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Retriever.ts#L85-L89 +type InnerRetriever struct { + Normalizer scorenormalizer.ScoreNormalizer `json:"normalizer"` + Retriever RetrieverContainer `json:"retriever"` + Weight float32 `json:"weight"` +} + +func (s *InnerRetriever) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "normalizer": + if err := dec.Decode(&s.Normalizer); err != nil { + return fmt.Errorf("%s | %w", "Normalizer", err) + } + + case "retriever": + if err := dec.Decode(&s.Retriever); err != nil { + return fmt.Errorf("%s | %w", "Retriever", err) + } + + case "weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Weight", err) + } + f := float32(value) + s.Weight = f + case float64: + f := float32(v) + s.Weight = f + } + + } + } + return nil +} + +// NewInnerRetriever returns a InnerRetriever. +func NewInnerRetriever() *InnerRetriever { + r := &InnerRetriever{} + + return r +} + +type InnerRetrieverVariant interface { + InnerRetrieverCaster() *InnerRetriever +} + +func (s *InnerRetriever) InnerRetrieverCaster() *InnerRetriever { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inprogress.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inprogress.go new file mode 100644 index 000000000..0fb13da1e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inprogress.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InProgress type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/_types/SnapshotLifecycle.ts#L141-L146 +type InProgress struct { + Name string `json:"name"` + StartTimeMillis int64 `json:"start_time_millis"` + State string `json:"state"` + Uuid string `json:"uuid"` +} + +func (s *InProgress) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "start_time_millis": + if err := dec.Decode(&s.StartTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeMillis", err) + } + + case "state": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = o + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return fmt.Errorf("%s | %w", "Uuid", err) + } + + } + } + return nil +} + +// NewInProgress returns a InProgress. +func NewInProgress() *InProgress { + r := &InProgress{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/input.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/input.go new file mode 100644 index 000000000..1c261a451 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/input.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Input type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model/types.ts#L56-L58 +type Input struct { + FieldNames []string `json:"field_names"` +} + +func (s *Input) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field_names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "FieldNames", err) + } + + s.FieldNames = append(s.FieldNames, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.FieldNames); err != nil { + return fmt.Errorf("%s | %w", "FieldNames", err) + } + } + + } + } + return nil +} + +// NewInput returns a Input. +func NewInput() *Input { + r := &Input{} + + return r +} + +type InputVariant interface { + InputCaster() *Input +} + +func (s *Input) InputCaster() *Input { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inputconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inputconfig.go new file mode 100644 index 000000000..cfcb8071d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/inputconfig.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// InputConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1062-L1065 +type InputConfig struct { + InputField string `json:"input_field"` + OutputField string `json:"output_field"` +} + +func (s *InputConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "InputField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InputField = o + + case "output_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "OutputField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OutputField = o + + } + } + return nil +} + +// NewInputConfig returns a InputConfig. +func NewInputConfig() *InputConfig { + r := &InputConfig{} + + return r +} + +type InputConfigVariant interface { + InputConfigCaster() *InputConfig +} + +func (s *InputConfig) InputConfigCaster() *InputConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/integernumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/integernumberproperty.go new file mode 100644 index 000000000..f638cc820 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/integernumberproperty.go @@ -0,0 +1,999 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype" +) + +// IntegerNumberProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L172-L175 +type IntegerNumberProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *int `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + // TimeSeriesMetric For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + s.NullValue = &value + case float64: + f := int(v) + s.NullValue = &f + } + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "time_series_metric": + if err := dec.Decode(&s.TimeSeriesMetric); err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IntegerNumberProperty) MarshalJSON() ([]byte, error) { + type innerIntegerNumberProperty IntegerNumberProperty + tmp := innerIntegerNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "integer" + + return json.Marshal(tmp) +} + +// NewIntegerNumberProperty returns a IntegerNumberProperty. +func NewIntegerNumberProperty() *IntegerNumberProperty { + r := &IntegerNumberProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type IntegerNumberPropertyVariant interface { + IntegerNumberPropertyCaster() *IntegerNumberProperty +} + +func (s *IntegerNumberProperty) IntegerNumberPropertyCaster() *IntegerNumberProperty { + return s +} + +func (s *IntegerNumberProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/integerrangeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/integerrangeproperty.go new file mode 100644 index 000000000..5dc9d6df7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/integerrangeproperty.go @@ -0,0 +1,922 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// IntegerRangeProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/range.ts#L42-L44 +type IntegerRangeProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IntegerRangeProperty) MarshalJSON() ([]byte, error) { + type innerIntegerRangeProperty IntegerRangeProperty + tmp := innerIntegerRangeProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "integer_range" + + return json.Marshal(tmp) +} + +// NewIntegerRangeProperty returns a IntegerRangeProperty. +func NewIntegerRangeProperty() *IntegerRangeProperty { + r := &IntegerRangeProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type IntegerRangePropertyVariant interface { + IntegerRangePropertyCaster() *IntegerRangeProperty +} + +func (s *IntegerRangeProperty) IntegerRangePropertyCaster() *IntegerRangeProperty { + return s +} + +func (s *IntegerRangeProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervals.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervals.go new file mode 100644 index 000000000..95cdee325 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervals.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// Intervals type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L83-L112 +type Intervals struct { + AdditionalIntervalsProperty map[string]json.RawMessage `json:"-"` + // AllOf Returns matches that span a combination of other rules. + AllOf *IntervalsAllOf `json:"all_of,omitempty"` + // AnyOf Returns intervals produced by any of its sub-rules. + AnyOf *IntervalsAnyOf `json:"any_of,omitempty"` + // Fuzzy Matches analyzed text. + Fuzzy *IntervalsFuzzy `json:"fuzzy,omitempty"` + // Match Matches analyzed text. + Match *IntervalsMatch `json:"match,omitempty"` + // Prefix Matches terms that start with a specified set of characters. + Prefix *IntervalsPrefix `json:"prefix,omitempty"` + Range *IntervalsRange `json:"range,omitempty"` + Regexp *IntervalsRegexp `json:"regexp,omitempty"` + // Wildcard Matches terms using a wildcard pattern. + Wildcard *IntervalsWildcard `json:"wildcard,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s Intervals) MarshalJSON() ([]byte, error) { + type opt Intervals + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIntervalsProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIntervalsProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewIntervals returns a Intervals. +func NewIntervals() *Intervals { + r := &Intervals{ + AdditionalIntervalsProperty: make(map[string]json.RawMessage), + } + + return r +} + +type IntervalsVariant interface { + IntervalsCaster() *Intervals +} + +func (s *Intervals) IntervalsCaster() *Intervals { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsallof.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsallof.go new file mode 100644 index 000000000..60788f9c6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsallof.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IntervalsAllOf type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L50-L70 +type IntervalsAllOf struct { + // Filter Rule used to filter returned intervals. + Filter *IntervalsFilter `json:"filter,omitempty"` + // Intervals An array of rules to combine. All rules must produce a match in a document + // for the overall source to match. + Intervals []Intervals `json:"intervals"` + // MaxGaps Maximum number of positions between the matching terms. + // Intervals produced by the rules further apart than this are not considered + // matches. + MaxGaps *int `json:"max_gaps,omitempty"` + // Ordered If `true`, intervals produced by the rules should appear in the order in + // which they are specified. + Ordered *bool `json:"ordered,omitempty"` +} + +func (s *IntervalsAllOf) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "intervals": + if err := dec.Decode(&s.Intervals); err != nil { + return fmt.Errorf("%s | %w", "Intervals", err) + } + + case "max_gaps": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxGaps", err) + } + s.MaxGaps = &value + case float64: + f := int(v) + s.MaxGaps = &f + } + + case "ordered": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Ordered", err) + } + s.Ordered = &value + case bool: + s.Ordered = &v + } + + } + } + return nil +} + +// NewIntervalsAllOf returns a IntervalsAllOf. +func NewIntervalsAllOf() *IntervalsAllOf { + r := &IntervalsAllOf{} + + return r +} + +type IntervalsAllOfVariant interface { + IntervalsAllOfCaster() *IntervalsAllOf +} + +func (s *IntervalsAllOf) IntervalsAllOfCaster() *IntervalsAllOf { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsanyof.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsanyof.go new file mode 100644 index 000000000..06ec6a962 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsanyof.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IntervalsAnyOf type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L72-L81 +type IntervalsAnyOf struct { + // Filter Rule used to filter returned intervals. + Filter *IntervalsFilter `json:"filter,omitempty"` + // Intervals An array of rules to match. + Intervals []Intervals `json:"intervals"` +} + +// NewIntervalsAnyOf returns a IntervalsAnyOf. +func NewIntervalsAnyOf() *IntervalsAnyOf { + r := &IntervalsAnyOf{} + + return r +} + +type IntervalsAnyOfVariant interface { + IntervalsAnyOfCaster() *IntervalsAnyOf +} + +func (s *IntervalsAnyOf) IntervalsAnyOfCaster() *IntervalsAnyOf { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsfilter.go new file mode 100644 index 000000000..359fc36b0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsfilter.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// IntervalsFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L114-L154 +type IntervalsFilter struct { + AdditionalIntervalsFilterProperty map[string]json.RawMessage `json:"-"` + // After Query used to return intervals that follow an interval from the `filter` + // rule. + After *Intervals `json:"after,omitempty"` + // Before Query used to return intervals that occur before an interval from the + // `filter` rule. + Before *Intervals `json:"before,omitempty"` + // ContainedBy Query used to return intervals contained by an interval from the `filter` + // rule. + ContainedBy *Intervals `json:"contained_by,omitempty"` + // Containing Query used to return intervals that contain an interval from the `filter` + // rule. + Containing *Intervals `json:"containing,omitempty"` + // NotContainedBy Query used to return intervals that are **not** contained by an interval from + // the `filter` rule. + NotContainedBy *Intervals `json:"not_contained_by,omitempty"` + // NotContaining Query used to return intervals that do **not** contain an interval from the + // `filter` rule. + NotContaining *Intervals `json:"not_containing,omitempty"` + // NotOverlapping Query used to return intervals that do **not** overlap with an interval from + // the `filter` rule. + NotOverlapping *Intervals `json:"not_overlapping,omitempty"` + // Overlapping Query used to return intervals that overlap with an interval from the + // `filter` rule. + Overlapping *Intervals `json:"overlapping,omitempty"` + // Script Script used to return matching documents. + // This script must return a boolean value: `true` or `false`. + Script *Script `json:"script,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s IntervalsFilter) MarshalJSON() ([]byte, error) { + type opt IntervalsFilter + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIntervalsFilterProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIntervalsFilterProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewIntervalsFilter returns a IntervalsFilter. +func NewIntervalsFilter() *IntervalsFilter { + r := &IntervalsFilter{ + AdditionalIntervalsFilterProperty: make(map[string]json.RawMessage), + } + + return r +} + +type IntervalsFilterVariant interface { + IntervalsFilterCaster() *IntervalsFilter +} + +func (s *IntervalsFilter) IntervalsFilterCaster() *IntervalsFilter { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsfuzzy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsfuzzy.go new file mode 100644 index 000000000..3c14878d9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsfuzzy.go @@ -0,0 +1,151 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IntervalsFuzzy type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L156-L186 +type IntervalsFuzzy struct { + // Analyzer Analyzer used to normalize the term. + Analyzer *string `json:"analyzer,omitempty"` + // Fuzziness Maximum edit distance allowed for matching. + Fuzziness Fuzziness `json:"fuzziness,omitempty"` + // PrefixLength Number of beginning characters left unchanged when creating expansions. + PrefixLength *int `json:"prefix_length,omitempty"` + // Term The term to match. + Term string `json:"term"` + // Transpositions Indicates whether edits include transpositions of two adjacent characters + // (for example, `ab` to `ba`). + Transpositions *bool `json:"transpositions,omitempty"` + // UseField If specified, match intervals from this field rather than the top-level + // field. + // The `term` is normalized using the search analyzer from this field, unless + // `analyzer` is specified separately. + UseField *string `json:"use_field,omitempty"` +} + +func (s *IntervalsFuzzy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "fuzziness": + if err := dec.Decode(&s.Fuzziness); err != nil { + return fmt.Errorf("%s | %w", "Fuzziness", err) + } + + case "prefix_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrefixLength", err) + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "term": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Term", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Term = o + + case "transpositions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Transpositions", err) + } + s.Transpositions = &value + case bool: + s.Transpositions = &v + } + + case "use_field": + if err := dec.Decode(&s.UseField); err != nil { + return fmt.Errorf("%s | %w", "UseField", err) + } + + } + } + return nil +} + +// NewIntervalsFuzzy returns a IntervalsFuzzy. +func NewIntervalsFuzzy() *IntervalsFuzzy { + r := &IntervalsFuzzy{} + + return r +} + +type IntervalsFuzzyVariant interface { + IntervalsFuzzyCaster() *IntervalsFuzzy +} + +func (s *IntervalsFuzzy) IntervalsFuzzyCaster() *IntervalsFuzzy { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsmatch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsmatch.go new file mode 100644 index 000000000..30e830030 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsmatch.go @@ -0,0 +1,151 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IntervalsMatch type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L188-L218 +type IntervalsMatch struct { + // Analyzer Analyzer used to analyze terms in the query. + Analyzer *string `json:"analyzer,omitempty"` + // Filter An optional interval filter. + Filter *IntervalsFilter `json:"filter,omitempty"` + // MaxGaps Maximum number of positions between the matching terms. + // Terms further apart than this are not considered matches. + MaxGaps *int `json:"max_gaps,omitempty"` + // Ordered If `true`, matching terms must appear in their specified order. + Ordered *bool `json:"ordered,omitempty"` + // Query Text you wish to find in the provided field. + Query string `json:"query"` + // UseField If specified, match intervals from this field rather than the top-level + // field. + // The `term` is normalized using the search analyzer from this field, unless + // `analyzer` is specified separately. + UseField *string `json:"use_field,omitempty"` +} + +func (s *IntervalsMatch) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "max_gaps": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxGaps", err) + } + s.MaxGaps = &value + case float64: + f := int(v) + s.MaxGaps = &f + } + + case "ordered": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Ordered", err) + } + s.Ordered = &value + case bool: + s.Ordered = &v + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "use_field": + if err := dec.Decode(&s.UseField); err != nil { + return fmt.Errorf("%s | %w", "UseField", err) + } + + } + } + return nil +} + +// NewIntervalsMatch returns a IntervalsMatch. +func NewIntervalsMatch() *IntervalsMatch { + r := &IntervalsMatch{} + + return r +} + +type IntervalsMatchVariant interface { + IntervalsMatchCaster() *IntervalsMatch +} + +func (s *IntervalsMatch) IntervalsMatchCaster() *IntervalsMatch { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsprefix.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsprefix.go new file mode 100644 index 000000000..06d043453 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsprefix.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IntervalsPrefix type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L220-L235 +type IntervalsPrefix struct { + // Analyzer Analyzer used to analyze the `prefix`. + Analyzer *string `json:"analyzer,omitempty"` + // Prefix Beginning characters of terms you wish to find in the top-level field. + Prefix string `json:"prefix"` + // UseField If specified, match intervals from this field rather than the top-level + // field. + // The `prefix` is normalized using the search analyzer from this field, unless + // `analyzer` is specified separately. + UseField *string `json:"use_field,omitempty"` +} + +func (s *IntervalsPrefix) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "prefix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Prefix", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Prefix = o + + case "use_field": + if err := dec.Decode(&s.UseField); err != nil { + return fmt.Errorf("%s | %w", "UseField", err) + } + + } + } + return nil +} + +// NewIntervalsPrefix returns a IntervalsPrefix. +func NewIntervalsPrefix() *IntervalsPrefix { + r := &IntervalsPrefix{} + + return r +} + +type IntervalsPrefixVariant interface { + IntervalsPrefixCaster() *IntervalsPrefix +} + +func (s *IntervalsPrefix) IntervalsPrefixCaster() *IntervalsPrefix { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsquery.go new file mode 100644 index 000000000..0b0b9e704 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsquery.go @@ -0,0 +1,206 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IntervalsQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L283-L317 +type IntervalsQuery struct { + AdditionalIntervalsQueryProperty map[string]json.RawMessage `json:"-"` + // AllOf Returns matches that span a combination of other rules. + AllOf *IntervalsAllOf `json:"all_of,omitempty"` + // AnyOf Returns intervals produced by any of its sub-rules. + AnyOf *IntervalsAnyOf `json:"any_of,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Fuzzy Matches terms that are similar to the provided term, within an edit distance + // defined by `fuzziness`. + Fuzzy *IntervalsFuzzy `json:"fuzzy,omitempty"` + // Match Matches analyzed text. + Match *IntervalsMatch `json:"match,omitempty"` + // Prefix Matches terms that start with a specified set of characters. + Prefix *IntervalsPrefix `json:"prefix,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + Range *IntervalsRange `json:"range,omitempty"` + Regexp *IntervalsRegexp `json:"regexp,omitempty"` + // Wildcard Matches terms using a wildcard pattern. + Wildcard *IntervalsWildcard `json:"wildcard,omitempty"` +} + +func (s *IntervalsQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "all_of": + if err := dec.Decode(&s.AllOf); err != nil { + return fmt.Errorf("%s | %w", "AllOf", err) + } + + case "any_of": + if err := dec.Decode(&s.AnyOf); err != nil { + return fmt.Errorf("%s | %w", "AnyOf", err) + } + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "fuzzy": + if err := dec.Decode(&s.Fuzzy); err != nil { + return fmt.Errorf("%s | %w", "Fuzzy", err) + } + + case "match": + if err := dec.Decode(&s.Match); err != nil { + return fmt.Errorf("%s | %w", "Match", err) + } + + case "prefix": + if err := dec.Decode(&s.Prefix); err != nil { + return fmt.Errorf("%s | %w", "Prefix", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "range": + if err := dec.Decode(&s.Range); err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + + case "regexp": + if err := dec.Decode(&s.Regexp); err != nil { + return fmt.Errorf("%s | %w", "Regexp", err) + } + + case "wildcard": + if err := dec.Decode(&s.Wildcard); err != nil { + return fmt.Errorf("%s | %w", "Wildcard", err) + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalIntervalsQueryProperty == nil { + s.AdditionalIntervalsQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalIntervalsQueryProperty", err) + } + s.AdditionalIntervalsQueryProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s IntervalsQuery) MarshalJSON() ([]byte, error) { + type opt IntervalsQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalIntervalsQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalIntervalsQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewIntervalsQuery returns a IntervalsQuery. +func NewIntervalsQuery() *IntervalsQuery { + r := &IntervalsQuery{ + AdditionalIntervalsQueryProperty: make(map[string]json.RawMessage), + } + + return r +} + +type IntervalsQueryVariant interface { + IntervalsQueryCaster() *IntervalsQuery +} + +func (s *IntervalsQuery) IntervalsQueryCaster() *IntervalsQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsrange.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsrange.go new file mode 100644 index 000000000..4b0f52924 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsrange.go @@ -0,0 +1,151 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IntervalsRange type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L237-L264 +type IntervalsRange struct { + // Analyzer Analyzer used to analyze the `prefix`. + Analyzer *string `json:"analyzer,omitempty"` + // Gt Lower term, either gte or gt must be provided. + Gt *string `json:"gt,omitempty"` + // Gte Lower term, either gte or gt must be provided. + Gte *string `json:"gte,omitempty"` + // Lt Upper term, either lte or lt must be provided. + Lt *string `json:"lt,omitempty"` + // Lte Upper term, either lte or lt must be provided. + Lte *string `json:"lte,omitempty"` + // UseField If specified, match intervals from this field rather than the top-level + // field. + // The `prefix` is normalized using the search analyzer from this field, unless + // `analyzer` is specified separately. + UseField *string `json:"use_field,omitempty"` +} + +func (s *IntervalsRange) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "gt": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Gt", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Gt = &o + + case "gte": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Gte", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Gte = &o + + case "lt": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Lt", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Lt = &o + + case "lte": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Lte", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Lte = &o + + case "use_field": + if err := dec.Decode(&s.UseField); err != nil { + return fmt.Errorf("%s | %w", "UseField", err) + } + + } + } + return nil +} + +// NewIntervalsRange returns a IntervalsRange. +func NewIntervalsRange() *IntervalsRange { + r := &IntervalsRange{} + + return r +} + +type IntervalsRangeVariant interface { + IntervalsRangeCaster() *IntervalsRange +} + +func (s *IntervalsRange) IntervalsRangeCaster() *IntervalsRange { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsregexp.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsregexp.go new file mode 100644 index 000000000..2e5652435 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalsregexp.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IntervalsRegexp type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L266-L281 +type IntervalsRegexp struct { + // Analyzer Analyzer used to analyze the `prefix`. + Analyzer *string `json:"analyzer,omitempty"` + // Pattern Regex pattern. + Pattern string `json:"pattern"` + // UseField If specified, match intervals from this field rather than the top-level + // field. + // The `prefix` is normalized using the search analyzer from this field, unless + // `analyzer` is specified separately. + UseField *string `json:"use_field,omitempty"` +} + +func (s *IntervalsRegexp) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = o + + case "use_field": + if err := dec.Decode(&s.UseField); err != nil { + return fmt.Errorf("%s | %w", "UseField", err) + } + + } + } + return nil +} + +// NewIntervalsRegexp returns a IntervalsRegexp. +func NewIntervalsRegexp() *IntervalsRegexp { + r := &IntervalsRegexp{} + + return r +} + +type IntervalsRegexpVariant interface { + IntervalsRegexpCaster() *IntervalsRegexp +} + +func (s *IntervalsRegexp) IntervalsRegexpCaster() *IntervalsRegexp { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalswildcard.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalswildcard.go new file mode 100644 index 000000000..f37ac2510 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/intervalswildcard.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IntervalsWildcard type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L319-L334 +type IntervalsWildcard struct { + // Analyzer Analyzer used to analyze the `pattern`. + // Defaults to the top-level field's analyzer. + Analyzer *string `json:"analyzer,omitempty"` + // Pattern Wildcard pattern used to find matching terms. + Pattern string `json:"pattern"` + // UseField If specified, match intervals from this field rather than the top-level + // field. + // The `pattern` is normalized using the search analyzer from this field, unless + // `analyzer` is specified separately. + UseField *string `json:"use_field,omitempty"` +} + +func (s *IntervalsWildcard) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = o + + case "use_field": + if err := dec.Decode(&s.UseField); err != nil { + return fmt.Errorf("%s | %w", "UseField", err) + } + + } + } + return nil +} + +// NewIntervalsWildcard returns a IntervalsWildcard. +func NewIntervalsWildcard() *IntervalsWildcard { + r := &IntervalsWildcard{} + + return r +} + +type IntervalsWildcardVariant interface { + IntervalsWildcardCaster() *IntervalsWildcard +} + +func (s *IntervalsWildcard) IntervalsWildcardCaster() *IntervalsWildcard { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/invertedindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/invertedindex.go new file mode 100644 index 000000000..14f3e8430 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/invertedindex.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// InvertedIndex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L69-L77 +type InvertedIndex struct { + Offsets uint `json:"offsets"` + Payloads uint `json:"payloads"` + Positions uint `json:"positions"` + Postings uint `json:"postings"` + Proximity uint `json:"proximity"` + TermFrequencies uint `json:"term_frequencies"` + Terms uint `json:"terms"` +} + +// NewInvertedIndex returns a InvertedIndex. +func NewInvertedIndex() *InvertedIndex { + r := &InvertedIndex{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/invocation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/invocation.go new file mode 100644 index 000000000..3b076804d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/invocation.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Invocation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/_types/SnapshotLifecycle.ts#L148-L151 +type Invocation struct { + SnapshotName string `json:"snapshot_name"` + Time DateTime `json:"time"` +} + +func (s *Invocation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "snapshot_name": + if err := dec.Decode(&s.SnapshotName); err != nil { + return fmt.Errorf("%s | %w", "SnapshotName", err) + } + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return fmt.Errorf("%s | %w", "Time", err) + } + + } + } + return nil +} + +// NewInvocation returns a Invocation. +func NewInvocation() *Invocation { + r := &Invocation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/invocations.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/invocations.go new file mode 100644 index 000000000..e86601a3a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/invocations.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Invocations type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L41-L43 +type Invocations struct { + Total int64 `json:"total"` +} + +func (s *Invocations) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + +// NewInvocations returns a Invocations. +func NewInvocations() *Invocations { + r := &Invocations{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iostatdevice.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iostatdevice.go new file mode 100644 index 000000000..ff74227c7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iostatdevice.go @@ -0,0 +1,167 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IoStatDevice type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L803-L828 +type IoStatDevice struct { + // DeviceName The Linux device name. + DeviceName *string `json:"device_name,omitempty"` + // Operations The total number of read and write operations for the device completed since + // starting Elasticsearch. + Operations *int64 `json:"operations,omitempty"` + // ReadKilobytes The total number of kilobytes read for the device since starting + // Elasticsearch. + ReadKilobytes *int64 `json:"read_kilobytes,omitempty"` + // ReadOperations The total number of read operations for the device completed since starting + // Elasticsearch. + ReadOperations *int64 `json:"read_operations,omitempty"` + // WriteKilobytes The total number of kilobytes written for the device since starting + // Elasticsearch. + WriteKilobytes *int64 `json:"write_kilobytes,omitempty"` + // WriteOperations The total number of write operations for the device completed since starting + // Elasticsearch. + WriteOperations *int64 `json:"write_operations,omitempty"` +} + +func (s *IoStatDevice) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "device_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DeviceName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DeviceName = &o + + case "operations": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Operations", err) + } + s.Operations = &value + case float64: + f := int64(v) + s.Operations = &f + } + + case "read_kilobytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ReadKilobytes", err) + } + s.ReadKilobytes = &value + case float64: + f := int64(v) + s.ReadKilobytes = &f + } + + case "read_operations": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ReadOperations", err) + } + s.ReadOperations = &value + case float64: + f := int64(v) + s.ReadOperations = &f + } + + case "write_kilobytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "WriteKilobytes", err) + } + s.WriteKilobytes = &value + case float64: + f := int64(v) + s.WriteKilobytes = &f + } + + case "write_operations": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "WriteOperations", err) + } + s.WriteOperations = &value + case float64: + f := int64(v) + s.WriteOperations = &f + } + + } + } + return nil +} + +// NewIoStatDevice returns a IoStatDevice. +func NewIoStatDevice() *IoStatDevice { + r := &IoStatDevice{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iostats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iostats.go new file mode 100644 index 000000000..6d0e610e6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iostats.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// IoStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L791-L801 +type IoStats struct { + // Devices Array of disk metrics for each device that is backing an Elasticsearch data + // path. + // These disk metrics are probed periodically and averages between the last + // probe and the current probe are computed. + Devices []IoStatDevice `json:"devices,omitempty"` + // Total The sum of the disk metrics for all devices that back an Elasticsearch data + // path. + Total *IoStatDevice `json:"total,omitempty"` +} + +// NewIoStats returns a IoStats. +func NewIoStats() *IoStats { + r := &IoStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipfilter.go new file mode 100644 index 000000000..d20a28ae7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipfilter.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IpFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L177-L180 +type IpFilter struct { + Http bool `json:"http"` + Transport bool `json:"transport"` +} + +func (s *IpFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "http": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Http", err) + } + s.Http = value + case bool: + s.Http = v + } + + case "transport": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Transport", err) + } + s.Transport = value + case bool: + s.Transport = v + } + + } + } + return nil +} + +// NewIpFilter returns a IpFilter. +func NewIpFilter() *IpFilter { + r := &IpFilter{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipinfo.go new file mode 100644 index 000000000..450778847 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipinfo.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Ipinfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Database.ts#L59-L59 +type Ipinfo struct { +} + +// NewIpinfo returns a Ipinfo. +func NewIpinfo() *Ipinfo { + r := &Ipinfo{} + + return r +} + +type IpinfoVariant interface { + IpinfoCaster() *Ipinfo +} + +func (s *Ipinfo) IpinfoCaster() *Ipinfo { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iplocationdatabaseconfigurationmetadata.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iplocationdatabaseconfigurationmetadata.go new file mode 100644 index 000000000..96e8c5249 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iplocationdatabaseconfigurationmetadata.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IpLocationDatabaseConfigurationMetadata type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/get_ip_location_database/GetIpLocationDatabaseResponse.ts#L28-L34 +type IpLocationDatabaseConfigurationMetadata struct { + Database DatabaseConfigurationFull `json:"database"` + Id string `json:"id"` + ModifiedDate *int64 `json:"modified_date,omitempty"` + ModifiedDateMillis *int64 `json:"modified_date_millis,omitempty"` + Version int64 `json:"version"` +} + +func (s *IpLocationDatabaseConfigurationMetadata) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "database": + if err := dec.Decode(&s.Database); err != nil { + return fmt.Errorf("%s | %w", "Database", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "modified_date": + if err := dec.Decode(&s.ModifiedDate); err != nil { + return fmt.Errorf("%s | %w", "ModifiedDate", err) + } + + case "modified_date_millis": + if err := dec.Decode(&s.ModifiedDateMillis); err != nil { + return fmt.Errorf("%s | %w", "ModifiedDateMillis", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewIpLocationDatabaseConfigurationMetadata returns a IpLocationDatabaseConfigurationMetadata. +func NewIpLocationDatabaseConfigurationMetadata() *IpLocationDatabaseConfigurationMetadata { + r := &IpLocationDatabaseConfigurationMetadata{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iplocationprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iplocationprocessor.go new file mode 100644 index 000000000..7541afc15 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iplocationprocessor.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IpLocationProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L479-L513 +type IpLocationProcessor struct { + // DatabaseFile The database filename referring to a database the module ships with + // (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom + // database in the ingest-geoip config directory. + DatabaseFile *string `json:"database_file,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // DownloadDatabaseOnPipelineCreation If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the + // missing database is downloaded when the pipeline is created. + // Else, the download is triggered by when the pipeline is used as the + // `default_pipeline` or `final_pipeline` in an index. + DownloadDatabaseOnPipelineCreation *bool `json:"download_database_on_pipeline_creation,omitempty"` + // Field The field to get the ip address from for the geographical lookup. + Field string `json:"field"` + // FirstOnly If `true`, only the first found IP location data will be returned, even if + // the field contains an array. + FirstOnly *bool `json:"first_only,omitempty"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Properties Controls what properties are added to the `target_field` based on the IP + // location lookup. + Properties []string `json:"properties,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field that will hold the geographical information looked up from the + // MaxMind database. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *IpLocationProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "database_file": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DatabaseFile", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DatabaseFile = &o + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "download_database_on_pipeline_creation": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DownloadDatabaseOnPipelineCreation", err) + } + s.DownloadDatabaseOnPipelineCreation = &value + case bool: + s.DownloadDatabaseOnPipelineCreation = &v + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "first_only": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FirstOnly", err) + } + s.FirstOnly = &value + case bool: + s.FirstOnly = &v + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "properties": + if err := dec.Decode(&s.Properties); err != nil { + return fmt.Errorf("%s | %w", "Properties", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewIpLocationProcessor returns a IpLocationProcessor. +func NewIpLocationProcessor() *IpLocationProcessor { + r := &IpLocationProcessor{} + + return r +} + +type IpLocationProcessorVariant interface { + IpLocationProcessorCaster() *IpLocationProcessor +} + +func (s *IpLocationProcessor) IpLocationProcessorCaster() *IpLocationProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipprefixaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipprefixaggregate.go new file mode 100644 index 000000000..665497eae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipprefixaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IpPrefixAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L711-L715 +type IpPrefixAggregate struct { + Buckets BucketsIpPrefixBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *IpPrefixAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]IpPrefixBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []IpPrefixBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewIpPrefixAggregate returns a IpPrefixAggregate. +func NewIpPrefixAggregate() *IpPrefixAggregate { + r := &IpPrefixAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipprefixaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipprefixaggregation.go new file mode 100644 index 000000000..da624ef19 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipprefixaggregation.go @@ -0,0 +1,166 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IpPrefixAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L1196-L1225 +type IpPrefixAggregation struct { + // AppendPrefixLength Defines whether the prefix length is appended to IP address keys in the + // response. + AppendPrefixLength *bool `json:"append_prefix_length,omitempty"` + // Field The IP address field to aggregation on. The field mapping type must be `ip`. + Field string `json:"field"` + // IsIpv6 Defines whether the prefix applies to IPv6 addresses. + IsIpv6 *bool `json:"is_ipv6,omitempty"` + // Keyed Defines whether buckets are returned as a hash rather than an array in the + // response. + Keyed *bool `json:"keyed,omitempty"` + // MinDocCount Minimum number of documents in a bucket for it to be included in the + // response. + MinDocCount *int64 `json:"min_doc_count,omitempty"` + // PrefixLength Length of the network prefix. For IPv4 addresses the accepted range is [0, + // 32]. + // For IPv6 addresses the accepted range is [0, 128]. + PrefixLength int `json:"prefix_length"` +} + +func (s *IpPrefixAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "append_prefix_length": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AppendPrefixLength", err) + } + s.AppendPrefixLength = &value + case bool: + s.AppendPrefixLength = &v + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "is_ipv6": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsIpv6", err) + } + s.IsIpv6 = &value + case bool: + s.IsIpv6 = &v + } + + case "keyed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Keyed", err) + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "min_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocCount", err) + } + s.MinDocCount = &value + case float64: + f := int64(v) + s.MinDocCount = &f + } + + case "prefix_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrefixLength", err) + } + s.PrefixLength = value + case float64: + f := int(v) + s.PrefixLength = f + } + + } + } + return nil +} + +// NewIpPrefixAggregation returns a IpPrefixAggregation. +func NewIpPrefixAggregation() *IpPrefixAggregation { + r := &IpPrefixAggregation{} + + return r +} + +type IpPrefixAggregationVariant interface { + IpPrefixAggregationCaster() *IpPrefixAggregation +} + +func (s *IpPrefixAggregation) IpPrefixAggregationCaster() *IpPrefixAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipprefixbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipprefixbucket.go new file mode 100644 index 000000000..af652695c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipprefixbucket.go @@ -0,0 +1,690 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// IpPrefixBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L717-L722 +type IpPrefixBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + IsIpv6 bool `json:"is_ipv6"` + Key string `json:"key"` + Netmask *string `json:"netmask,omitempty"` + PrefixLength int `json:"prefix_length"` +} + +func (s *IpPrefixBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "is_ipv6": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsIpv6", err) + } + s.IsIpv6 = value + case bool: + s.IsIpv6 = v + } + + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = o + + case "netmask": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Netmask", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Netmask = &o + + case "prefix_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrefixLength", err) + } + s.PrefixLength = value + case float64: + f := int(v) + s.PrefixLength = f + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s IpPrefixBucket) MarshalJSON() ([]byte, error) { + type opt IpPrefixBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewIpPrefixBucket returns a IpPrefixBucket. +func NewIpPrefixBucket() *IpPrefixBucket { + r := &IpPrefixBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipproperty.go new file mode 100644 index 000000000..5c9c58e4f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ipproperty.go @@ -0,0 +1,969 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// IpProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/specialized.ts#L74-L88 +type IpProperty struct { + Boost *Float64 `json:"boost,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *string `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *IpProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NullValue = &o + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IpProperty) MarshalJSON() ([]byte, error) { + type innerIpProperty IpProperty + tmp := innerIpProperty{ + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + Type: s.Type, + } + + tmp.Type = "ip" + + return json.Marshal(tmp) +} + +// NewIpProperty returns a IpProperty. +func NewIpProperty() *IpProperty { + r := &IpProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type IpPropertyVariant interface { + IpPropertyCaster() *IpProperty +} + +func (s *IpProperty) IpPropertyCaster() *IpProperty { + return s +} + +func (s *IpProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangeaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangeaggregate.go new file mode 100644 index 000000000..a71387081 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangeaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IpRangeAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L624-L629 +type IpRangeAggregate struct { + Buckets BucketsIpRangeBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *IpRangeAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]IpRangeBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []IpRangeBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewIpRangeAggregate returns a IpRangeAggregate. +func NewIpRangeAggregate() *IpRangeAggregate { + r := &IpRangeAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangeaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangeaggregation.go new file mode 100644 index 000000000..7442bc03b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangeaggregation.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// IpRangeAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L567-L576 +type IpRangeAggregation struct { + // Field The date field whose values are used to build ranges. + Field *string `json:"field,omitempty"` + // Ranges Array of IP ranges. + Ranges []IpRangeAggregationRange `json:"ranges,omitempty"` +} + +func (s *IpRangeAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "ranges": + if err := dec.Decode(&s.Ranges); err != nil { + return fmt.Errorf("%s | %w", "Ranges", err) + } + + } + } + return nil +} + +// NewIpRangeAggregation returns a IpRangeAggregation. +func NewIpRangeAggregation() *IpRangeAggregation { + r := &IpRangeAggregation{} + + return r +} + +type IpRangeAggregationVariant interface { + IpRangeAggregationCaster() *IpRangeAggregation +} + +func (s *IpRangeAggregation) IpRangeAggregationCaster() *IpRangeAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangeaggregationrange.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangeaggregationrange.go new file mode 100644 index 000000000..3d6450730 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangeaggregationrange.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IpRangeAggregationRange type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L578-L591 +type IpRangeAggregationRange struct { + // From Start of the range. + From *string `json:"from,omitempty"` + // Mask IP range defined as a CIDR mask. + Mask *string `json:"mask,omitempty"` + // To End of the range. + To *string `json:"to,omitempty"` +} + +func (s *IpRangeAggregationRange) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.From = &o + + case "mask": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Mask", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Mask = &o + + case "to": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.To = &o + + } + } + return nil +} + +// NewIpRangeAggregationRange returns a IpRangeAggregationRange. +func NewIpRangeAggregationRange() *IpRangeAggregationRange { + r := &IpRangeAggregationRange{} + + return r +} + +type IpRangeAggregationRangeVariant interface { + IpRangeAggregationRangeCaster() *IpRangeAggregationRange +} + +func (s *IpRangeAggregationRange) IpRangeAggregationRangeCaster() *IpRangeAggregationRange { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangebucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangebucket.go new file mode 100644 index 000000000..980a5b550 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangebucket.go @@ -0,0 +1,671 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// IpRangeBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L631-L635 +type IpRangeBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + From *string `json:"from,omitempty"` + Key *string `json:"key,omitempty"` + To *string `json:"to,omitempty"` +} + +func (s *IpRangeBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "from": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.From = &o + + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = &o + + case "to": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.To = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s IpRangeBucket) MarshalJSON() ([]byte, error) { + type opt IpRangeBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewIpRangeBucket returns a IpRangeBucket. +func NewIpRangeBucket() *IpRangeBucket { + r := &IpRangeBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangeproperty.go new file mode 100644 index 000000000..e88f207dc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/iprangeproperty.go @@ -0,0 +1,922 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// IpRangeProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/range.ts#L46-L48 +type IpRangeProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IpRangeProperty) MarshalJSON() ([]byte, error) { + type innerIpRangeProperty IpRangeProperty + tmp := innerIpRangeProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "ip_range" + + return json.Marshal(tmp) +} + +// NewIpRangeProperty returns a IpRangeProperty. +func NewIpRangeProperty() *IpRangeProperty { + r := &IpRangeProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type IpRangePropertyVariant interface { + IpRangePropertyCaster() *IpRangeProperty +} + +func (s *IpRangeProperty) IpRangePropertyCaster() *IpRangeProperty { + return s +} + +func (s *IpRangeProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/irishanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/irishanalyzer.go new file mode 100644 index 000000000..b3d7bd0f4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/irishanalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// IrishAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L220-L225 +type IrishAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *IrishAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IrishAnalyzer) MarshalJSON() ([]byte, error) { + type innerIrishAnalyzer IrishAnalyzer + tmp := innerIrishAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "irish" + + return json.Marshal(tmp) +} + +// NewIrishAnalyzer returns a IrishAnalyzer. +func NewIrishAnalyzer() *IrishAnalyzer { + r := &IrishAnalyzer{} + + return r +} + +type IrishAnalyzerVariant interface { + IrishAnalyzerCaster() *IrishAnalyzer +} + +func (s *IrishAnalyzer) IrishAnalyzerCaster() *IrishAnalyzer { + return s +} + +func (s *IrishAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/italiananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/italiananalyzer.go new file mode 100644 index 000000000..a175a8fa6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/italiananalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ItalianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L227-L232 +type ItalianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *ItalianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ItalianAnalyzer) MarshalJSON() ([]byte, error) { + type innerItalianAnalyzer ItalianAnalyzer + tmp := innerItalianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "italian" + + return json.Marshal(tmp) +} + +// NewItalianAnalyzer returns a ItalianAnalyzer. +func NewItalianAnalyzer() *ItalianAnalyzer { + r := &ItalianAnalyzer{} + + return r +} + +type ItalianAnalyzerVariant interface { + ItalianAnalyzerCaster() *ItalianAnalyzer +} + +func (s *ItalianAnalyzer) ItalianAnalyzerCaster() *ItalianAnalyzer { + return s +} + +func (s *ItalianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jastoptokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jastoptokenfilter.go new file mode 100644 index 000000000..ced70c596 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jastoptokenfilter.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// JaStopTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/kuromoji-plugin.ts#L32-L35 +type JaStopTokenFilter struct { + Stopwords StopWords `json:"stopwords,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *JaStopTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s JaStopTokenFilter) MarshalJSON() ([]byte, error) { + type innerJaStopTokenFilter JaStopTokenFilter + tmp := innerJaStopTokenFilter{ + Stopwords: s.Stopwords, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "ja_stop" + + return json.Marshal(tmp) +} + +// NewJaStopTokenFilter returns a JaStopTokenFilter. +func NewJaStopTokenFilter() *JaStopTokenFilter { + r := &JaStopTokenFilter{} + + return r +} + +type JaStopTokenFilterVariant interface { + JaStopTokenFilterCaster() *JaStopTokenFilter +} + +func (s *JaStopTokenFilter) JaStopTokenFilterCaster() *JaStopTokenFilter { + return s +} + +func (s *JaStopTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jinaaiservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jinaaiservicesettings.go new file mode 100644 index 000000000..c9f099fb0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jinaaiservicesettings.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaisimilaritytype" +) + +// JinaAIServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1439-L1468 +type JinaAIServiceSettings struct { + // ApiKey A valid API key of your JinaAI account. + // + // IMPORTANT: You need to provide the API key only once, during the inference + // model creation. + // The get inference endpoint API does not retrieve your API key. + // After creating the inference model, you cannot change the associated API key. + // If you want to use a different API key, delete the inference model and + // recreate it with the same name and the updated API key. + ApiKey string `json:"api_key"` + // ModelId The name of the model to use for the inference task. + // For a `rerank` task, it is required. + // For a `text_embedding` task, it is optional. + ModelId *string `json:"model_id,omitempty"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // JinaAI. + // By default, the `jinaai` service sets the number of requests allowed per + // minute to 2000 for all task types. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` + // Similarity For a `text_embedding` task, the similarity measure. One of cosine, + // dot_product, l2_norm. + // The default values varies with the embedding type. + // For example, a float embedding type uses a `dot_product` similarity measure + // by default. + Similarity *jinaaisimilaritytype.JinaAISimilarityType `json:"similarity,omitempty"` +} + +func (s *JinaAIServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = &o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + case "similarity": + if err := dec.Decode(&s.Similarity); err != nil { + return fmt.Errorf("%s | %w", "Similarity", err) + } + + } + } + return nil +} + +// NewJinaAIServiceSettings returns a JinaAIServiceSettings. +func NewJinaAIServiceSettings() *JinaAIServiceSettings { + r := &JinaAIServiceSettings{} + + return r +} + +type JinaAIServiceSettingsVariant interface { + JinaAIServiceSettingsCaster() *JinaAIServiceSettings +} + +func (s *JinaAIServiceSettings) JinaAIServiceSettingsCaster() *JinaAIServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jinaaitasksettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jinaaitasksettings.go new file mode 100644 index 000000000..1f04cc5a8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jinaaitasksettings.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaitextembeddingtask" +) + +// JinaAITaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1470-L1491 +type JinaAITaskSettings struct { + // ReturnDocuments For a `rerank` task, return the doc text within the results. + ReturnDocuments *bool `json:"return_documents,omitempty"` + // Task For a `text_embedding` task, the task passed to the model. + // Valid values are: + // + // * `classification`: Use it for embeddings passed through a text classifier. + // * `clustering`: Use it for the embeddings run through a clustering algorithm. + // * `ingest`: Use it for storing document embeddings in a vector database. + // * `search`: Use it for storing embeddings of search queries run against a + // vector database to find relevant documents. + Task *jinaaitextembeddingtask.JinaAITextEmbeddingTask `json:"task,omitempty"` + // TopN For a `rerank` task, the number of most relevant documents to return. + // It defaults to the number of the documents. + // If this inference endpoint is used in a `text_similarity_reranker` retriever + // query and `top_n` is set, it must be greater than or equal to + // `rank_window_size` in the query. + TopN *int `json:"top_n,omitempty"` +} + +func (s *JinaAITaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "return_documents": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ReturnDocuments", err) + } + s.ReturnDocuments = &value + case bool: + s.ReturnDocuments = &v + } + + case "task": + if err := dec.Decode(&s.Task); err != nil { + return fmt.Errorf("%s | %w", "Task", err) + } + + case "top_n": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TopN", err) + } + s.TopN = &value + case float64: + f := int(v) + s.TopN = &f + } + + } + } + return nil +} + +// NewJinaAITaskSettings returns a JinaAITaskSettings. +func NewJinaAITaskSettings() *JinaAITaskSettings { + r := &JinaAITaskSettings{} + + return r +} + +type JinaAITaskSettingsVariant interface { + JinaAITaskSettingsCaster() *JinaAITaskSettings +} + +func (s *JinaAITaskSettings) JinaAITaskSettingsCaster() *JinaAITaskSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/job.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/job.go new file mode 100644 index 000000000..7f235e50a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/job.go @@ -0,0 +1,353 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Job type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Job.ts#L61-L180 +type Job struct { + // AllowLazyOpen Advanced configuration option. + // Specifies whether this job can open when there is insufficient machine + // learning node capacity for it to be immediately assigned to a node. + AllowLazyOpen bool `json:"allow_lazy_open"` + // AnalysisConfig The analysis configuration, which specifies how to analyze the data. + // After you create a job, you cannot change the analysis configuration; all the + // properties are informational. + AnalysisConfig AnalysisConfig `json:"analysis_config"` + // AnalysisLimits Limits can be applied for the resources required to hold the mathematical + // models in memory. + // These limits are approximate and can be set per job. + // They do not control the memory used by other processes, for example the + // Elasticsearch Java processes. + AnalysisLimits *AnalysisLimits `json:"analysis_limits,omitempty"` + // BackgroundPersistInterval Advanced configuration option. + // The time between each periodic persistence of the model. + // The default value is a randomized value between 3 to 4 hours, which avoids + // all jobs persisting at exactly the same time. + // The smallest allowed value is 1 hour. + BackgroundPersistInterval Duration `json:"background_persist_interval,omitempty"` + Blocked *JobBlocked `json:"blocked,omitempty"` + CreateTime DateTime `json:"create_time,omitempty"` + // CustomSettings Advanced configuration option. + // Contains custom metadata about the job. + CustomSettings json.RawMessage `json:"custom_settings,omitempty"` + // DailyModelSnapshotRetentionAfterDays Advanced configuration option, which affects the automatic removal of old + // model snapshots for this job. + // It specifies a period of time (in days) after which only the first snapshot + // per day is retained. + // This period is relative to the timestamp of the most recent snapshot for this + // job. + // Valid values range from 0 to `model_snapshot_retention_days`. + DailyModelSnapshotRetentionAfterDays *int64 `json:"daily_model_snapshot_retention_after_days,omitempty"` + // DataDescription The data description defines the format of the input data when you send data + // to the job by using the post data API. + // Note that when configuring a datafeed, these properties are automatically + // set. + // When data is received via the post data API, it is not stored in + // Elasticsearch. + // Only the results for anomaly detection are retained. + DataDescription DataDescription `json:"data_description"` + // DatafeedConfig The datafeed, which retrieves data from Elasticsearch for analysis by the + // job. + // You can associate only one datafeed with each anomaly detection job. + DatafeedConfig *MLDatafeed `json:"datafeed_config,omitempty"` + // Deleting Indicates that the process of deleting the job is in progress but not yet + // completed. + // It is only reported when `true`. + Deleting *bool `json:"deleting,omitempty"` + // Description A description of the job. + Description *string `json:"description,omitempty"` + // FinishedTime If the job closed or failed, this is the time the job finished, otherwise it + // is `null`. + // This property is informational; you cannot change its value. + FinishedTime DateTime `json:"finished_time,omitempty"` + // Groups A list of job groups. + // A job can belong to no groups or many. + Groups []string `json:"groups,omitempty"` + // JobId Identifier for the anomaly detection job. + // This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + // hyphens, and underscores. + // It must start and end with alphanumeric characters. + JobId string `json:"job_id"` + // JobType Reserved for future use, currently set to `anomaly_detector`. + JobType *string `json:"job_type,omitempty"` + // JobVersion The machine learning configuration version number at which the the job was + // created. + JobVersion *string `json:"job_version,omitempty"` + // ModelPlotConfig This advanced configuration option stores model information along with the + // results. + // It provides a more detailed view into anomaly detection. + // Model plot provides a simplified and indicative view of the model and its + // bounds. + ModelPlotConfig *ModelPlotConfig `json:"model_plot_config,omitempty"` + ModelSnapshotId *string `json:"model_snapshot_id,omitempty"` + // ModelSnapshotRetentionDays Advanced configuration option, which affects the automatic removal of old + // model snapshots for this job. + // It specifies the maximum period of time (in days) that snapshots are + // retained. + // This period is relative to the timestamp of the most recent snapshot for this + // job. + // By default, snapshots ten days older than the newest snapshot are deleted. + ModelSnapshotRetentionDays int64 `json:"model_snapshot_retention_days"` + // RenormalizationWindowDays Advanced configuration option. + // The period over which adjustments to the score are applied, as new data is + // seen. + // The default value is the longer of 30 days or 100 `bucket_spans`. + RenormalizationWindowDays *int64 `json:"renormalization_window_days,omitempty"` + // ResultsIndexName A text string that affects the name of the machine learning results index. + // The default value is `shared`, which generates an index named + // `.ml-anomalies-shared`. + ResultsIndexName string `json:"results_index_name"` + // ResultsRetentionDays Advanced configuration option. + // The period of time (in days) that results are retained. + // Age is calculated relative to the timestamp of the latest bucket result. + // If this property has a non-null value, once per day at 00:30 (server time), + // results that are the specified number of days older than the latest bucket + // result are deleted from Elasticsearch. + // The default value is null, which means all results are retained. + // Annotations generated by the system also count as results for retention + // purposes; they are deleted after the same number of days as results. + // Annotations added by users are retained forever. + ResultsRetentionDays *int64 `json:"results_retention_days,omitempty"` +} + +func (s *Job) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_lazy_open": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowLazyOpen", err) + } + s.AllowLazyOpen = value + case bool: + s.AllowLazyOpen = v + } + + case "analysis_config": + if err := dec.Decode(&s.AnalysisConfig); err != nil { + return fmt.Errorf("%s | %w", "AnalysisConfig", err) + } + + case "analysis_limits": + if err := dec.Decode(&s.AnalysisLimits); err != nil { + return fmt.Errorf("%s | %w", "AnalysisLimits", err) + } + + case "background_persist_interval": + if err := dec.Decode(&s.BackgroundPersistInterval); err != nil { + return fmt.Errorf("%s | %w", "BackgroundPersistInterval", err) + } + + case "blocked": + if err := dec.Decode(&s.Blocked); err != nil { + return fmt.Errorf("%s | %w", "Blocked", err) + } + + case "create_time": + if err := dec.Decode(&s.CreateTime); err != nil { + return fmt.Errorf("%s | %w", "CreateTime", err) + } + + case "custom_settings": + if err := dec.Decode(&s.CustomSettings); err != nil { + return fmt.Errorf("%s | %w", "CustomSettings", err) + } + + case "daily_model_snapshot_retention_after_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DailyModelSnapshotRetentionAfterDays", err) + } + s.DailyModelSnapshotRetentionAfterDays = &value + case float64: + f := int64(v) + s.DailyModelSnapshotRetentionAfterDays = &f + } + + case "data_description": + if err := dec.Decode(&s.DataDescription); err != nil { + return fmt.Errorf("%s | %w", "DataDescription", err) + } + + case "datafeed_config": + if err := dec.Decode(&s.DatafeedConfig); err != nil { + return fmt.Errorf("%s | %w", "DatafeedConfig", err) + } + + case "deleting": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deleting", err) + } + s.Deleting = &value + case bool: + s.Deleting = &v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "finished_time": + if err := dec.Decode(&s.FinishedTime); err != nil { + return fmt.Errorf("%s | %w", "FinishedTime", err) + } + + case "groups": + if err := dec.Decode(&s.Groups); err != nil { + return fmt.Errorf("%s | %w", "Groups", err) + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "job_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "JobType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.JobType = &o + + case "job_version": + if err := dec.Decode(&s.JobVersion); err != nil { + return fmt.Errorf("%s | %w", "JobVersion", err) + } + + case "model_plot_config": + if err := dec.Decode(&s.ModelPlotConfig); err != nil { + return fmt.Errorf("%s | %w", "ModelPlotConfig", err) + } + + case "model_snapshot_id": + if err := dec.Decode(&s.ModelSnapshotId); err != nil { + return fmt.Errorf("%s | %w", "ModelSnapshotId", err) + } + + case "model_snapshot_retention_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ModelSnapshotRetentionDays", err) + } + s.ModelSnapshotRetentionDays = value + case float64: + f := int64(v) + s.ModelSnapshotRetentionDays = f + } + + case "renormalization_window_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RenormalizationWindowDays", err) + } + s.RenormalizationWindowDays = &value + case float64: + f := int64(v) + s.RenormalizationWindowDays = &f + } + + case "results_index_name": + if err := dec.Decode(&s.ResultsIndexName); err != nil { + return fmt.Errorf("%s | %w", "ResultsIndexName", err) + } + + case "results_retention_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ResultsRetentionDays", err) + } + s.ResultsRetentionDays = &value + case float64: + f := int64(v) + s.ResultsRetentionDays = &f + } + + } + } + return nil +} + +// NewJob returns a Job. +func NewJob() *Job { + r := &Job{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobblocked.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobblocked.go new file mode 100644 index 000000000..ae287d910 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobblocked.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jobblockedreason" +) + +// JobBlocked type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Job.ts#L392-L395 +type JobBlocked struct { + Reason jobblockedreason.JobBlockedReason `json:"reason"` + TaskId *string `json:"task_id,omitempty"` +} + +func (s *JobBlocked) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "reason": + if err := dec.Decode(&s.Reason); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + + case "task_id": + if err := dec.Decode(&s.TaskId); err != nil { + return fmt.Errorf("%s | %w", "TaskId", err) + } + + } + } + return nil +} + +// NewJobBlocked returns a JobBlocked. +func NewJobBlocked() *JobBlocked { + r := &JobBlocked{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobconfig.go new file mode 100644 index 000000000..71c763ad9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobconfig.go @@ -0,0 +1,303 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// JobConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Job.ts#L182-L283 +type JobConfig struct { + // AllowLazyOpen Advanced configuration option. Specifies whether this job can open when there + // is insufficient machine learning node capacity for it to be immediately + // assigned to a node. + AllowLazyOpen *bool `json:"allow_lazy_open,omitempty"` + // AnalysisConfig The analysis configuration, which specifies how to analyze the data. + // After you create a job, you cannot change the analysis configuration; all the + // properties are informational. + AnalysisConfig AnalysisConfig `json:"analysis_config"` + // AnalysisLimits Limits can be applied for the resources required to hold the mathematical + // models in memory. + // These limits are approximate and can be set per job. + // They do not control the memory used by other processes, for example the + // Elasticsearch Java processes. + AnalysisLimits *AnalysisLimits `json:"analysis_limits,omitempty"` + // BackgroundPersistInterval Advanced configuration option. + // The time between each periodic persistence of the model. + // The default value is a randomized value between 3 to 4 hours, which avoids + // all jobs persisting at exactly the same time. + // The smallest allowed value is 1 hour. + BackgroundPersistInterval Duration `json:"background_persist_interval,omitempty"` + // CustomSettings Advanced configuration option. + // Contains custom metadata about the job. + CustomSettings json.RawMessage `json:"custom_settings,omitempty"` + // DailyModelSnapshotRetentionAfterDays Advanced configuration option, which affects the automatic removal of old + // model snapshots for this job. + // It specifies a period of time (in days) after which only the first snapshot + // per day is retained. + // This period is relative to the timestamp of the most recent snapshot for this + // job. + DailyModelSnapshotRetentionAfterDays *int64 `json:"daily_model_snapshot_retention_after_days,omitempty"` + // DataDescription The data description defines the format of the input data when you send data + // to the job by using the post data API. + // Note that when configure a datafeed, these properties are automatically set. + DataDescription DataDescription `json:"data_description"` + // DatafeedConfig The datafeed, which retrieves data from Elasticsearch for analysis by the + // job. + // You can associate only one datafeed with each anomaly detection job. + DatafeedConfig *DatafeedConfig `json:"datafeed_config,omitempty"` + // Description A description of the job. + Description *string `json:"description,omitempty"` + // Groups A list of job groups. A job can belong to no groups or many. + Groups []string `json:"groups,omitempty"` + // JobId Identifier for the anomaly detection job. + // This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + // hyphens, and underscores. + // It must start and end with alphanumeric characters. + JobId *string `json:"job_id,omitempty"` + // JobType Reserved for future use, currently set to `anomaly_detector`. + JobType *string `json:"job_type,omitempty"` + // ModelPlotConfig This advanced configuration option stores model information along with the + // results. + // It provides a more detailed view into anomaly detection. + // Model plot provides a simplified and indicative view of the model and its + // bounds. + ModelPlotConfig *ModelPlotConfig `json:"model_plot_config,omitempty"` + // ModelSnapshotRetentionDays Advanced configuration option, which affects the automatic removal of old + // model snapshots for this job. + // It specifies the maximum period of time (in days) that snapshots are + // retained. + // This period is relative to the timestamp of the most recent snapshot for this + // job. + // The default value is `10`, which means snapshots ten days older than the + // newest snapshot are deleted. + ModelSnapshotRetentionDays *int64 `json:"model_snapshot_retention_days,omitempty"` + // RenormalizationWindowDays Advanced configuration option. + // The period over which adjustments to the score are applied, as new data is + // seen. + // The default value is the longer of 30 days or 100 `bucket_spans`. + RenormalizationWindowDays *int64 `json:"renormalization_window_days,omitempty"` + // ResultsIndexName A text string that affects the name of the machine learning results index. + // The default value is `shared`, which generates an index named + // `.ml-anomalies-shared`. + ResultsIndexName *string `json:"results_index_name,omitempty"` + // ResultsRetentionDays Advanced configuration option. + // The period of time (in days) that results are retained. + // Age is calculated relative to the timestamp of the latest bucket result. + // If this property has a non-null value, once per day at 00:30 (server time), + // results that are the specified number of days older than the latest bucket + // result are deleted from Elasticsearch. + // The default value is null, which means all results are retained. + // Annotations generated by the system also count as results for retention + // purposes; they are deleted after the same number of days as results. + // Annotations added by users are retained forever. + ResultsRetentionDays *int64 `json:"results_retention_days,omitempty"` +} + +func (s *JobConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_lazy_open": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowLazyOpen", err) + } + s.AllowLazyOpen = &value + case bool: + s.AllowLazyOpen = &v + } + + case "analysis_config": + if err := dec.Decode(&s.AnalysisConfig); err != nil { + return fmt.Errorf("%s | %w", "AnalysisConfig", err) + } + + case "analysis_limits": + if err := dec.Decode(&s.AnalysisLimits); err != nil { + return fmt.Errorf("%s | %w", "AnalysisLimits", err) + } + + case "background_persist_interval": + if err := dec.Decode(&s.BackgroundPersistInterval); err != nil { + return fmt.Errorf("%s | %w", "BackgroundPersistInterval", err) + } + + case "custom_settings": + if err := dec.Decode(&s.CustomSettings); err != nil { + return fmt.Errorf("%s | %w", "CustomSettings", err) + } + + case "daily_model_snapshot_retention_after_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DailyModelSnapshotRetentionAfterDays", err) + } + s.DailyModelSnapshotRetentionAfterDays = &value + case float64: + f := int64(v) + s.DailyModelSnapshotRetentionAfterDays = &f + } + + case "data_description": + if err := dec.Decode(&s.DataDescription); err != nil { + return fmt.Errorf("%s | %w", "DataDescription", err) + } + + case "datafeed_config": + if err := dec.Decode(&s.DatafeedConfig); err != nil { + return fmt.Errorf("%s | %w", "DatafeedConfig", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "groups": + if err := dec.Decode(&s.Groups); err != nil { + return fmt.Errorf("%s | %w", "Groups", err) + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "job_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "JobType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.JobType = &o + + case "model_plot_config": + if err := dec.Decode(&s.ModelPlotConfig); err != nil { + return fmt.Errorf("%s | %w", "ModelPlotConfig", err) + } + + case "model_snapshot_retention_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ModelSnapshotRetentionDays", err) + } + s.ModelSnapshotRetentionDays = &value + case float64: + f := int64(v) + s.ModelSnapshotRetentionDays = &f + } + + case "renormalization_window_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RenormalizationWindowDays", err) + } + s.RenormalizationWindowDays = &value + case float64: + f := int64(v) + s.RenormalizationWindowDays = &f + } + + case "results_index_name": + if err := dec.Decode(&s.ResultsIndexName); err != nil { + return fmt.Errorf("%s | %w", "ResultsIndexName", err) + } + + case "results_retention_days": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ResultsRetentionDays", err) + } + s.ResultsRetentionDays = &value + case float64: + f := int64(v) + s.ResultsRetentionDays = &f + } + + } + } + return nil +} + +// NewJobConfig returns a JobConfig. +func NewJobConfig() *JobConfig { + r := &JobConfig{} + + return r +} + +type JobConfigVariant interface { + JobConfigCaster() *JobConfig +} + +func (s *JobConfig) JobConfigCaster() *JobConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobforecaststatistics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobforecaststatistics.go new file mode 100644 index 000000000..bea44cba5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobforecaststatistics.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// JobForecastStatistics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Job.ts#L343-L350 +type JobForecastStatistics struct { + ForecastedJobs int `json:"forecasted_jobs"` + MemoryBytes *JobStatistics `json:"memory_bytes,omitempty"` + ProcessingTimeMs *JobStatistics `json:"processing_time_ms,omitempty"` + Records *JobStatistics `json:"records,omitempty"` + Status map[string]int64 `json:"status,omitempty"` + Total int64 `json:"total"` +} + +func (s *JobForecastStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "forecasted_jobs": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ForecastedJobs", err) + } + s.ForecastedJobs = value + case float64: + f := int(v) + s.ForecastedJobs = f + } + + case "memory_bytes": + if err := dec.Decode(&s.MemoryBytes); err != nil { + return fmt.Errorf("%s | %w", "MemoryBytes", err) + } + + case "processing_time_ms": + if err := dec.Decode(&s.ProcessingTimeMs); err != nil { + return fmt.Errorf("%s | %w", "ProcessingTimeMs", err) + } + + case "records": + if err := dec.Decode(&s.Records); err != nil { + return fmt.Errorf("%s | %w", "Records", err) + } + + case "status": + if s.Status == nil { + s.Status = make(map[string]int64, 0) + } + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + +// NewJobForecastStatistics returns a JobForecastStatistics. +func NewJobForecastStatistics() *JobForecastStatistics { + r := &JobForecastStatistics{ + Status: make(map[string]int64), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobsrecord.go new file mode 100644 index 000000000..9c299f6cf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobsrecord.go @@ -0,0 +1,911 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/categorizationstatus" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jobstate" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/memorystatus" +) + +// JobsRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/ml_jobs/types.ts#L24-L347 +type JobsRecord struct { + // AssignmentExplanation For open anomaly detection jobs only, contains messages relating to the + // selection of a node to run the job. + AssignmentExplanation *string `json:"assignment_explanation,omitempty"` + // BucketsCount The number of bucket results produced by the job. + BucketsCount *string `json:"buckets.count,omitempty"` + // BucketsTimeExpAvg The exponential moving average of all bucket processing times, in + // milliseconds. + BucketsTimeExpAvg *string `json:"buckets.time.exp_avg,omitempty"` + // BucketsTimeExpAvgHour The exponential moving average of bucket processing times calculated in a one + // hour time window, in milliseconds. + BucketsTimeExpAvgHour *string `json:"buckets.time.exp_avg_hour,omitempty"` + // BucketsTimeMax The maximum of all bucket processing times, in milliseconds. + BucketsTimeMax *string `json:"buckets.time.max,omitempty"` + // BucketsTimeMin The minimum of all bucket processing times, in milliseconds. + BucketsTimeMin *string `json:"buckets.time.min,omitempty"` + // BucketsTimeTotal The sum of all bucket processing times, in milliseconds. + BucketsTimeTotal *string `json:"buckets.time.total,omitempty"` + // DataBuckets The total number of buckets processed. + DataBuckets *string `json:"data.buckets,omitempty"` + // DataEarliestRecord The timestamp of the earliest chronologically input document. + DataEarliestRecord *string `json:"data.earliest_record,omitempty"` + // DataEmptyBuckets The number of buckets which did not contain any data. + // If your data contains many empty buckets, consider increasing your + // `bucket_span` or using functions that are tolerant to gaps in data such as + // mean, `non_null_sum` or `non_zero_count`. + DataEmptyBuckets *string `json:"data.empty_buckets,omitempty"` + // DataInputBytes The number of bytes of input data posted to the anomaly detection job. + DataInputBytes ByteSize `json:"data.input_bytes,omitempty"` + // DataInputFields The total number of fields in input documents posted to the anomaly detection + // job. + // This count includes fields that are not used in the analysis. + // However, be aware that if you are using a datafeed, it extracts only the + // required fields from the documents it retrieves before posting them to the + // job. + DataInputFields *string `json:"data.input_fields,omitempty"` + // DataInputRecords The number of input documents posted to the anomaly detection job. + DataInputRecords *string `json:"data.input_records,omitempty"` + // DataInvalidDates The number of input documents with either a missing date field or a date that + // could not be parsed. + DataInvalidDates *string `json:"data.invalid_dates,omitempty"` + // DataLast The timestamp at which data was last analyzed, according to server time. + DataLast *string `json:"data.last,omitempty"` + // DataLastEmptyBucket The timestamp of the last bucket that did not contain any data. + DataLastEmptyBucket *string `json:"data.last_empty_bucket,omitempty"` + // DataLastSparseBucket The timestamp of the last bucket that was considered sparse. + DataLastSparseBucket *string `json:"data.last_sparse_bucket,omitempty"` + // DataLatestRecord The timestamp of the latest chronologically input document. + DataLatestRecord *string `json:"data.latest_record,omitempty"` + // DataMissingFields The number of input documents that are missing a field that the anomaly + // detection job is configured to analyze. + // Input documents with missing fields are still processed because it is + // possible that not all fields are missing. + // If you are using datafeeds or posting data to the job in JSON format, a high + // `missing_field_count` is often not an indication of data issues. + // It is not necessarily a cause for concern. + DataMissingFields *string `json:"data.missing_fields,omitempty"` + // DataOutOfOrderTimestamps The number of input documents that have a timestamp chronologically preceding + // the start of the current anomaly detection bucket offset by the latency + // window. + // This information is applicable only when you provide data to the anomaly + // detection job by using the post data API. + // These out of order documents are discarded, since jobs require time series + // data to be in ascending chronological order. + DataOutOfOrderTimestamps *string `json:"data.out_of_order_timestamps,omitempty"` + // DataProcessedFields The total number of fields in all the documents that have been processed by + // the anomaly detection job. + // Only fields that are specified in the detector configuration object + // contribute to this count. + // The timestamp is not included in this count. + DataProcessedFields *string `json:"data.processed_fields,omitempty"` + // DataProcessedRecords The number of input documents that have been processed by the anomaly + // detection job. + // This value includes documents with missing fields, since they are nonetheless + // analyzed. + // If you use datafeeds and have aggregations in your search query, the + // `processed_record_count` is the number of aggregation results processed, not + // the number of Elasticsearch documents. + DataProcessedRecords *string `json:"data.processed_records,omitempty"` + // DataSparseBuckets The number of buckets that contained few data points compared to the expected + // number of data points. + // If your data contains many sparse buckets, consider using a longer + // `bucket_span`. + DataSparseBuckets *string `json:"data.sparse_buckets,omitempty"` + // ForecastsMemoryAvg The average memory usage in bytes for forecasts related to the anomaly + // detection job. + ForecastsMemoryAvg *string `json:"forecasts.memory.avg,omitempty"` + // ForecastsMemoryMax The maximum memory usage in bytes for forecasts related to the anomaly + // detection job. + ForecastsMemoryMax *string `json:"forecasts.memory.max,omitempty"` + // ForecastsMemoryMin The minimum memory usage in bytes for forecasts related to the anomaly + // detection job. + ForecastsMemoryMin *string `json:"forecasts.memory.min,omitempty"` + // ForecastsMemoryTotal The total memory usage in bytes for forecasts related to the anomaly + // detection job. + ForecastsMemoryTotal *string `json:"forecasts.memory.total,omitempty"` + // ForecastsRecordsAvg The average number of `model_forecast` documents written for forecasts + // related to the anomaly detection job. + ForecastsRecordsAvg *string `json:"forecasts.records.avg,omitempty"` + // ForecastsRecordsMax The maximum number of `model_forecast` documents written for forecasts + // related to the anomaly detection job. + ForecastsRecordsMax *string `json:"forecasts.records.max,omitempty"` + // ForecastsRecordsMin The minimum number of `model_forecast` documents written for forecasts + // related to the anomaly detection job. + ForecastsRecordsMin *string `json:"forecasts.records.min,omitempty"` + // ForecastsRecordsTotal The total number of `model_forecast` documents written for forecasts related + // to the anomaly detection job. + ForecastsRecordsTotal *string `json:"forecasts.records.total,omitempty"` + // ForecastsTimeAvg The average runtime in milliseconds for forecasts related to the anomaly + // detection job. + ForecastsTimeAvg *string `json:"forecasts.time.avg,omitempty"` + // ForecastsTimeMax The maximum runtime in milliseconds for forecasts related to the anomaly + // detection job. + ForecastsTimeMax *string `json:"forecasts.time.max,omitempty"` + // ForecastsTimeMin The minimum runtime in milliseconds for forecasts related to the anomaly + // detection job. + ForecastsTimeMin *string `json:"forecasts.time.min,omitempty"` + // ForecastsTimeTotal The total runtime in milliseconds for forecasts related to the anomaly + // detection job. + ForecastsTimeTotal *string `json:"forecasts.time.total,omitempty"` + // ForecastsTotal The number of individual forecasts currently available for the job. + // A value of one or more indicates that forecasts exist. + ForecastsTotal *string `json:"forecasts.total,omitempty"` + // Id The anomaly detection job identifier. + Id *string `json:"id,omitempty"` + // ModelBucketAllocationFailures The number of buckets for which new entities in incoming data were not + // processed due to insufficient model memory. + // This situation is also signified by a `hard_limit: memory_status` property + // value. + ModelBucketAllocationFailures *string `json:"model.bucket_allocation_failures,omitempty"` + // ModelByFields The number of `by` field values that were analyzed by the models. + // This value is cumulative for all detectors in the job. + ModelByFields *string `json:"model.by_fields,omitempty"` + // ModelBytes The number of bytes of memory used by the models. + // This is the maximum value since the last time the model was persisted. + // If the job is closed, this value indicates the latest size. + ModelBytes ByteSize `json:"model.bytes,omitempty"` + // ModelBytesExceeded The number of bytes over the high limit for memory usage at the last + // allocation failure. + ModelBytesExceeded ByteSize `json:"model.bytes_exceeded,omitempty"` + // ModelCategorizationStatus The status of categorization for the job. + ModelCategorizationStatus *categorizationstatus.CategorizationStatus `json:"model.categorization_status,omitempty"` + // ModelCategorizedDocCount The number of documents that have had a field categorized. + ModelCategorizedDocCount *string `json:"model.categorized_doc_count,omitempty"` + // ModelDeadCategoryCount The number of categories created by categorization that will never be + // assigned again because another category’s definition makes it a superset of + // the dead category. + // Dead categories are a side effect of the way categorization has no prior + // training. + ModelDeadCategoryCount *string `json:"model.dead_category_count,omitempty"` + // ModelFailedCategoryCount The number of times that categorization wanted to create a new category but + // couldn’t because the job had hit its `model_memory_limit`. + // This count does not track which specific categories failed to be created. + // Therefore you cannot use this value to determine the number of unique + // categories that were missed. + ModelFailedCategoryCount *string `json:"model.failed_category_count,omitempty"` + // ModelFrequentCategoryCount The number of categories that match more than 1% of categorized documents. + ModelFrequentCategoryCount *string `json:"model.frequent_category_count,omitempty"` + // ModelLogTime The timestamp when the model stats were gathered, according to server time. + ModelLogTime *string `json:"model.log_time,omitempty"` + // ModelMemoryLimit The upper limit for model memory usage, checked on increasing values. + ModelMemoryLimit *string `json:"model.memory_limit,omitempty"` + // ModelMemoryStatus The status of the mathematical models. + ModelMemoryStatus *memorystatus.MemoryStatus `json:"model.memory_status,omitempty"` + // ModelOverFields The number of `over` field values that were analyzed by the models. + // This value is cumulative for all detectors in the job. + ModelOverFields *string `json:"model.over_fields,omitempty"` + // ModelPartitionFields The number of `partition` field values that were analyzed by the models. + // This value is cumulative for all detectors in the job. + ModelPartitionFields *string `json:"model.partition_fields,omitempty"` + // ModelRareCategoryCount The number of categories that match just one categorized document. + ModelRareCategoryCount *string `json:"model.rare_category_count,omitempty"` + // ModelTimestamp The timestamp of the last record when the model stats were gathered. + ModelTimestamp *string `json:"model.timestamp,omitempty"` + // ModelTotalCategoryCount The number of categories created by categorization. + ModelTotalCategoryCount *string `json:"model.total_category_count,omitempty"` + // NodeAddress The network address of the assigned node. + NodeAddress *string `json:"node.address,omitempty"` + // NodeEphemeralId The ephemeral identifier of the assigned node. + NodeEphemeralId *string `json:"node.ephemeral_id,omitempty"` + // NodeId The uniqe identifier of the assigned node. + NodeId *string `json:"node.id,omitempty"` + // NodeName The name of the assigned node. + NodeName *string `json:"node.name,omitempty"` + // OpenedTime For open jobs only, the amount of time the job has been opened. + OpenedTime *string `json:"opened_time,omitempty"` + // State The status of the anomaly detection job. + State *jobstate.JobState `json:"state,omitempty"` +} + +func (s *JobsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_explanation", "ae": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AssignmentExplanation", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentExplanation = &o + + case "buckets.count", "bc", "bucketsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BucketsCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BucketsCount = &o + + case "buckets.time.exp_avg", "btea", "bucketsTimeExpAvg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BucketsTimeExpAvg", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BucketsTimeExpAvg = &o + + case "buckets.time.exp_avg_hour", "bteah", "bucketsTimeExpAvgHour": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BucketsTimeExpAvgHour", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BucketsTimeExpAvgHour = &o + + case "buckets.time.max", "btmax", "bucketsTimeMax": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BucketsTimeMax", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BucketsTimeMax = &o + + case "buckets.time.min", "btmin", "bucketsTimeMin": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BucketsTimeMin", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BucketsTimeMin = &o + + case "buckets.time.total", "btt", "bucketsTimeTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BucketsTimeTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BucketsTimeTotal = &o + + case "data.buckets", "db", "dataBuckets": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataBuckets", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataBuckets = &o + + case "data.earliest_record", "der", "dataEarliestRecord": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataEarliestRecord", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataEarliestRecord = &o + + case "data.empty_buckets", "deb", "dataEmptyBuckets": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataEmptyBuckets", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataEmptyBuckets = &o + + case "data.input_bytes", "dib", "dataInputBytes": + if err := dec.Decode(&s.DataInputBytes); err != nil { + return fmt.Errorf("%s | %w", "DataInputBytes", err) + } + + case "data.input_fields", "dif", "dataInputFields": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataInputFields", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataInputFields = &o + + case "data.input_records", "dir", "dataInputRecords": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataInputRecords", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataInputRecords = &o + + case "data.invalid_dates", "did", "dataInvalidDates": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataInvalidDates", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataInvalidDates = &o + + case "data.last", "dl", "dataLast": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataLast", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataLast = &o + + case "data.last_empty_bucket", "dleb", "dataLastEmptyBucket": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataLastEmptyBucket", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataLastEmptyBucket = &o + + case "data.last_sparse_bucket", "dlsb", "dataLastSparseBucket": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataLastSparseBucket", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataLastSparseBucket = &o + + case "data.latest_record", "dlr", "dataLatestRecord": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataLatestRecord", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataLatestRecord = &o + + case "data.missing_fields", "dmf", "dataMissingFields": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataMissingFields", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataMissingFields = &o + + case "data.out_of_order_timestamps", "doot", "dataOutOfOrderTimestamps": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataOutOfOrderTimestamps", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataOutOfOrderTimestamps = &o + + case "data.processed_fields", "dpf", "dataProcessedFields": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataProcessedFields", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataProcessedFields = &o + + case "data.processed_records", "dpr", "dataProcessedRecords": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataProcessedRecords", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataProcessedRecords = &o + + case "data.sparse_buckets", "dsb", "dataSparseBuckets": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataSparseBuckets", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataSparseBuckets = &o + + case "forecasts.memory.avg", "fmavg", "forecastsMemoryAvg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ForecastsMemoryAvg", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsMemoryAvg = &o + + case "forecasts.memory.max", "fmmax", "forecastsMemoryMax": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ForecastsMemoryMax", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsMemoryMax = &o + + case "forecasts.memory.min", "fmmin", "forecastsMemoryMin": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ForecastsMemoryMin", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsMemoryMin = &o + + case "forecasts.memory.total", "fmt", "forecastsMemoryTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ForecastsMemoryTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsMemoryTotal = &o + + case "forecasts.records.avg", "fravg", "forecastsRecordsAvg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ForecastsRecordsAvg", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsRecordsAvg = &o + + case "forecasts.records.max", "frmax", "forecastsRecordsMax": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ForecastsRecordsMax", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsRecordsMax = &o + + case "forecasts.records.min", "frmin", "forecastsRecordsMin": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ForecastsRecordsMin", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsRecordsMin = &o + + case "forecasts.records.total", "frt", "forecastsRecordsTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ForecastsRecordsTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsRecordsTotal = &o + + case "forecasts.time.avg", "ftavg", "forecastsTimeAvg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ForecastsTimeAvg", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsTimeAvg = &o + + case "forecasts.time.max", "ftmax", "forecastsTimeMax": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ForecastsTimeMax", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsTimeMax = &o + + case "forecasts.time.min", "ftmin", "forecastsTimeMin": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ForecastsTimeMin", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsTimeMin = &o + + case "forecasts.time.total", "ftt", "forecastsTimeTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ForecastsTimeTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsTimeTotal = &o + + case "forecasts.total", "ft", "forecastsTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ForecastsTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsTotal = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "model.bucket_allocation_failures", "mbaf", "modelBucketAllocationFailures": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelBucketAllocationFailures", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelBucketAllocationFailures = &o + + case "model.by_fields", "mbf", "modelByFields": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelByFields", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelByFields = &o + + case "model.bytes", "mb", "modelBytes": + if err := dec.Decode(&s.ModelBytes); err != nil { + return fmt.Errorf("%s | %w", "ModelBytes", err) + } + + case "model.bytes_exceeded", "mbe", "modelBytesExceeded": + if err := dec.Decode(&s.ModelBytesExceeded); err != nil { + return fmt.Errorf("%s | %w", "ModelBytesExceeded", err) + } + + case "model.categorization_status", "mcs", "modelCategorizationStatus": + if err := dec.Decode(&s.ModelCategorizationStatus); err != nil { + return fmt.Errorf("%s | %w", "ModelCategorizationStatus", err) + } + + case "model.categorized_doc_count", "mcdc", "modelCategorizedDocCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelCategorizedDocCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelCategorizedDocCount = &o + + case "model.dead_category_count", "mdcc", "modelDeadCategoryCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelDeadCategoryCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelDeadCategoryCount = &o + + case "model.failed_category_count", "mfcc", "modelFailedCategoryCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelFailedCategoryCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelFailedCategoryCount = &o + + case "model.frequent_category_count", "modelFrequentCategoryCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelFrequentCategoryCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelFrequentCategoryCount = &o + + case "model.log_time", "mlt", "modelLogTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelLogTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelLogTime = &o + + case "model.memory_limit", "mml", "modelMemoryLimit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelMemoryLimit", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelMemoryLimit = &o + + case "model.memory_status", "mms", "modelMemoryStatus": + if err := dec.Decode(&s.ModelMemoryStatus); err != nil { + return fmt.Errorf("%s | %w", "ModelMemoryStatus", err) + } + + case "model.over_fields", "mof", "modelOverFields": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelOverFields", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelOverFields = &o + + case "model.partition_fields", "mpf", "modelPartitionFields": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelPartitionFields", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelPartitionFields = &o + + case "model.rare_category_count", "mrcc", "modelRareCategoryCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelRareCategoryCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelRareCategoryCount = &o + + case "model.timestamp", "mt", "modelTimestamp": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelTimestamp", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelTimestamp = &o + + case "model.total_category_count", "mtcc", "modelTotalCategoryCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelTotalCategoryCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelTotalCategoryCount = &o + + case "node.address", "na", "nodeAddress": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NodeAddress", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeAddress = &o + + case "node.ephemeral_id", "ne", "nodeEphemeralId": + if err := dec.Decode(&s.NodeEphemeralId); err != nil { + return fmt.Errorf("%s | %w", "NodeEphemeralId", err) + } + + case "node.id", "ni", "nodeId": + if err := dec.Decode(&s.NodeId); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + + case "node.name", "nn", "nodeName": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NodeName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeName = &o + + case "opened_time", "ot": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "OpenedTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OpenedTime = &o + + case "state", "s": + if err := dec.Decode(&s.State); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + + } + } + return nil +} + +// NewJobsRecord returns a JobsRecord. +func NewJobsRecord() *JobsRecord { + r := &JobsRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobstatistics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobstatistics.go new file mode 100644 index 000000000..8ef5b3cbe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobstatistics.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// JobStatistics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Job.ts#L54-L59 +type JobStatistics struct { + Avg Float64 `json:"avg"` + Max Float64 `json:"max"` + Min Float64 `json:"min"` + Total Float64 `json:"total"` +} + +func (s *JobStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Avg", err) + } + f := Float64(value) + s.Avg = f + case float64: + f := Float64(v) + s.Avg = f + } + + case "max": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + f := Float64(value) + s.Max = f + case float64: + f := Float64(v) + s.Max = f + } + + case "min": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + f := Float64(value) + s.Min = f + case float64: + f := Float64(v) + s.Min = f + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + f := Float64(value) + s.Total = f + case float64: + f := Float64(v) + s.Total = f + } + + } + } + return nil +} + +// NewJobStatistics returns a JobStatistics. +func NewJobStatistics() *JobStatistics { + r := &JobStatistics{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobstats.go new file mode 100644 index 000000000..e03c45405 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobstats.go @@ -0,0 +1,169 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jobstate" +) + +// JobStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Job.ts#L284-L330 +type JobStats struct { + // AssignmentExplanation For open anomaly detection jobs only, contains messages relating to the + // selection of a node to run the job. + AssignmentExplanation *string `json:"assignment_explanation,omitempty"` + // DataCounts An object that describes the quantity of input to the job and any related + // error counts. + // The `data_count` values are cumulative for the lifetime of a job. + // If a model snapshot is reverted or old results are deleted, the job counts + // are not reset. + DataCounts DataCounts `json:"data_counts"` + // Deleting Indicates that the process of deleting the job is in progress but not yet + // completed. It is only reported when `true`. + Deleting *bool `json:"deleting,omitempty"` + // ForecastsStats An object that provides statistical information about forecasts belonging to + // this job. + // Some statistics are omitted if no forecasts have been made. + ForecastsStats JobForecastStatistics `json:"forecasts_stats"` + // JobId Identifier for the anomaly detection job. + JobId string `json:"job_id"` + // ModelSizeStats An object that provides information about the size and contents of the model. + ModelSizeStats ModelSizeStats `json:"model_size_stats"` + // Node Contains properties for the node that runs the job. + // This information is available only for open jobs. + Node *DiscoveryNodeCompact `json:"node,omitempty"` + // OpenTime For open jobs only, the elapsed time for which the job has been open. + OpenTime DateTime `json:"open_time,omitempty"` + // State The status of the anomaly detection job, which can be one of the following + // values: `closed`, `closing`, `failed`, `opened`, `opening`. + State jobstate.JobState `json:"state"` + // TimingStats An object that provides statistical information about timing aspect of this + // job. + TimingStats JobTimingStats `json:"timing_stats"` +} + +func (s *JobStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_explanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AssignmentExplanation", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentExplanation = &o + + case "data_counts": + if err := dec.Decode(&s.DataCounts); err != nil { + return fmt.Errorf("%s | %w", "DataCounts", err) + } + + case "deleting": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deleting", err) + } + s.Deleting = &value + case bool: + s.Deleting = &v + } + + case "forecasts_stats": + if err := dec.Decode(&s.ForecastsStats); err != nil { + return fmt.Errorf("%s | %w", "ForecastsStats", err) + } + + case "job_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.JobId = o + + case "model_size_stats": + if err := dec.Decode(&s.ModelSizeStats); err != nil { + return fmt.Errorf("%s | %w", "ModelSizeStats", err) + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "open_time": + if err := dec.Decode(&s.OpenTime); err != nil { + return fmt.Errorf("%s | %w", "OpenTime", err) + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + + case "timing_stats": + if err := dec.Decode(&s.TimingStats); err != nil { + return fmt.Errorf("%s | %w", "TimingStats", err) + } + + } + } + return nil +} + +// NewJobStats returns a JobStats. +func NewJobStats() *JobStats { + r := &JobStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobtimingstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobtimingstats.go new file mode 100644 index 000000000..073ba3b8d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobtimingstats.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// JobTimingStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Job.ts#L332-L341 +type JobTimingStats struct { + AverageBucketProcessingTimeMs Float64 `json:"average_bucket_processing_time_ms,omitempty"` + BucketCount int64 `json:"bucket_count"` + ExponentialAverageBucketProcessingTimeMs Float64 `json:"exponential_average_bucket_processing_time_ms,omitempty"` + ExponentialAverageBucketProcessingTimePerHourMs Float64 `json:"exponential_average_bucket_processing_time_per_hour_ms"` + JobId string `json:"job_id"` + MaximumBucketProcessingTimeMs Float64 `json:"maximum_bucket_processing_time_ms,omitempty"` + MinimumBucketProcessingTimeMs Float64 `json:"minimum_bucket_processing_time_ms,omitempty"` + TotalBucketProcessingTimeMs Float64 `json:"total_bucket_processing_time_ms"` +} + +func (s *JobTimingStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "average_bucket_processing_time_ms": + if err := dec.Decode(&s.AverageBucketProcessingTimeMs); err != nil { + return fmt.Errorf("%s | %w", "AverageBucketProcessingTimeMs", err) + } + + case "bucket_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BucketCount", err) + } + s.BucketCount = value + case float64: + f := int64(v) + s.BucketCount = f + } + + case "exponential_average_bucket_processing_time_ms": + if err := dec.Decode(&s.ExponentialAverageBucketProcessingTimeMs); err != nil { + return fmt.Errorf("%s | %w", "ExponentialAverageBucketProcessingTimeMs", err) + } + + case "exponential_average_bucket_processing_time_per_hour_ms": + if err := dec.Decode(&s.ExponentialAverageBucketProcessingTimePerHourMs); err != nil { + return fmt.Errorf("%s | %w", "ExponentialAverageBucketProcessingTimePerHourMs", err) + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "maximum_bucket_processing_time_ms": + if err := dec.Decode(&s.MaximumBucketProcessingTimeMs); err != nil { + return fmt.Errorf("%s | %w", "MaximumBucketProcessingTimeMs", err) + } + + case "minimum_bucket_processing_time_ms": + if err := dec.Decode(&s.MinimumBucketProcessingTimeMs); err != nil { + return fmt.Errorf("%s | %w", "MinimumBucketProcessingTimeMs", err) + } + + case "total_bucket_processing_time_ms": + if err := dec.Decode(&s.TotalBucketProcessingTimeMs); err != nil { + return fmt.Errorf("%s | %w", "TotalBucketProcessingTimeMs", err) + } + + } + } + return nil +} + +// NewJobTimingStats returns a JobTimingStats. +func NewJobTimingStats() *JobTimingStats { + r := &JobTimingStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobusage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobusage.go new file mode 100644 index 000000000..4aadb74db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jobusage.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// JobUsage type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L370-L376 +type JobUsage struct { + Count int `json:"count"` + CreatedBy map[string]int64 `json:"created_by"` + Detectors JobStatistics `json:"detectors"` + Forecasts MlJobForecasts `json:"forecasts"` + ModelSize JobStatistics `json:"model_size"` +} + +func (s *JobUsage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "created_by": + if s.CreatedBy == nil { + s.CreatedBy = make(map[string]int64, 0) + } + if err := dec.Decode(&s.CreatedBy); err != nil { + return fmt.Errorf("%s | %w", "CreatedBy", err) + } + + case "detectors": + if err := dec.Decode(&s.Detectors); err != nil { + return fmt.Errorf("%s | %w", "Detectors", err) + } + + case "forecasts": + if err := dec.Decode(&s.Forecasts); err != nil { + return fmt.Errorf("%s | %w", "Forecasts", err) + } + + case "model_size": + if err := dec.Decode(&s.ModelSize); err != nil { + return fmt.Errorf("%s | %w", "ModelSize", err) + } + + } + } + return nil +} + +// NewJobUsage returns a JobUsage. +func NewJobUsage() *JobUsage { + r := &JobUsage{ + CreatedBy: make(map[string]int64), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/joinprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/joinprocessor.go new file mode 100644 index 000000000..718208ed7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/joinprocessor.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// JoinProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1122-L1137 +type JoinProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field Field containing array values to join. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Separator The separator character. + Separator string `json:"separator"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the joined value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *JoinProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Separator", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Separator = o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewJoinProcessor returns a JoinProcessor. +func NewJoinProcessor() *JoinProcessor { + r := &JoinProcessor{} + + return r +} + +type JoinProcessorVariant interface { + JoinProcessorCaster() *JoinProcessor +} + +func (s *JoinProcessor) JoinProcessorCaster() *JoinProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/joinproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/joinproperty.go new file mode 100644 index 000000000..b7b746d18 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/joinproperty.go @@ -0,0 +1,866 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// JoinProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L103-L107 +type JoinProperty struct { + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + EagerGlobalOrdinals *bool `json:"eager_global_ordinals,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Relations map[string][]string `json:"relations,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *JoinProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "eager_global_ordinals": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "EagerGlobalOrdinals", err) + } + s.EagerGlobalOrdinals = &value + case bool: + s.EagerGlobalOrdinals = &v + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "relations": + if s.Relations == nil { + s.Relations = make(map[string][]string, 0) + } + rawMsg := make(map[string]json.RawMessage, 0) + dec.Decode(&rawMsg) + for key, value := range rawMsg { + switch { + case bytes.HasPrefix(value, []byte("\"")), bytes.HasPrefix(value, []byte("{")): + o := new(string) + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return fmt.Errorf("%s | %w", "Relations", err) + } + s.Relations[key] = append(s.Relations[key], *o) + default: + o := []string{} + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return fmt.Errorf("%s | %w", "Relations", err) + } + s.Relations[key] = o + } + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s JoinProperty) MarshalJSON() ([]byte, error) { + type innerJoinProperty JoinProperty + tmp := innerJoinProperty{ + Dynamic: s.Dynamic, + EagerGlobalOrdinals: s.EagerGlobalOrdinals, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Relations: s.Relations, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "join" + + return json.Marshal(tmp) +} + +// NewJoinProperty returns a JoinProperty. +func NewJoinProperty() *JoinProperty { + r := &JoinProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + Relations: make(map[string][]string), + } + + return r +} + +type JoinPropertyVariant interface { + JoinPropertyCaster() *JoinProperty +} + +func (s *JoinProperty) JoinPropertyCaster() *JoinProperty { + return s +} + +func (s *JoinProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jsonprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jsonprocessor.go new file mode 100644 index 000000000..a54a0c2da --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jsonprocessor.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jsonprocessorconflictstrategy" +) + +// JsonProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1139-L1168 +type JsonProcessor struct { + // AddToRoot Flag that forces the parsed JSON to be added at the top level of the + // document. + // `target_field` must not be set when this option is chosen. + AddToRoot *bool `json:"add_to_root,omitempty"` + // AddToRootConflictStrategy When set to `replace`, root fields that conflict with fields from the parsed + // JSON will be overridden. + // When set to `merge`, conflicting fields will be merged. + // Only applicable `if add_to_root` is set to true. + AddToRootConflictStrategy *jsonprocessorconflictstrategy.JsonProcessorConflictStrategy `json:"add_to_root_conflict_strategy,omitempty"` + // AllowDuplicateKeys When set to `true`, the JSON parser will not fail if the JSON contains + // duplicate keys. + // Instead, the last encountered value for any duplicate key wins. + AllowDuplicateKeys *bool `json:"allow_duplicate_keys,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to be parsed. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field that the converted structured object will be written into. + // Any existing content in this field will be overwritten. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *JsonProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "add_to_root": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AddToRoot", err) + } + s.AddToRoot = &value + case bool: + s.AddToRoot = &v + } + + case "add_to_root_conflict_strategy": + if err := dec.Decode(&s.AddToRootConflictStrategy); err != nil { + return fmt.Errorf("%s | %w", "AddToRootConflictStrategy", err) + } + + case "allow_duplicate_keys": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowDuplicateKeys", err) + } + s.AllowDuplicateKeys = &value + case bool: + s.AllowDuplicateKeys = &v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewJsonProcessor returns a JsonProcessor. +func NewJsonProcessor() *JsonProcessor { + r := &JsonProcessor{} + + return r +} + +type JsonProcessorVariant interface { + JsonProcessorCaster() *JsonProcessor +} + +func (s *JsonProcessor) JsonProcessorCaster() *JsonProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvm.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvm.go new file mode 100644 index 000000000..a26134268 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvm.go @@ -0,0 +1,152 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Jvm type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L884-L918 +type Jvm struct { + // BufferPools Contains statistics about JVM buffer pools for the node. + BufferPools map[string]NodeBufferPool `json:"buffer_pools,omitempty"` + // Classes Contains statistics about classes loaded by JVM for the node. + Classes *JvmClasses `json:"classes,omitempty"` + // Gc Contains statistics about JVM garbage collectors for the node. + Gc *GarbageCollector `json:"gc,omitempty"` + // Mem Contains JVM memory usage statistics for the node. + Mem *JvmMemoryStats `json:"mem,omitempty"` + // Threads Contains statistics about JVM thread usage for the node. + Threads *JvmThreads `json:"threads,omitempty"` + // Timestamp Last time JVM statistics were refreshed. + Timestamp *int64 `json:"timestamp,omitempty"` + // Uptime Human-readable JVM uptime. + // Only returned if the `human` query parameter is `true`. + Uptime *string `json:"uptime,omitempty"` + // UptimeInMillis JVM uptime in milliseconds. + UptimeInMillis *int64 `json:"uptime_in_millis,omitempty"` +} + +func (s *Jvm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buffer_pools": + if s.BufferPools == nil { + s.BufferPools = make(map[string]NodeBufferPool, 0) + } + if err := dec.Decode(&s.BufferPools); err != nil { + return fmt.Errorf("%s | %w", "BufferPools", err) + } + + case "classes": + if err := dec.Decode(&s.Classes); err != nil { + return fmt.Errorf("%s | %w", "Classes", err) + } + + case "gc": + if err := dec.Decode(&s.Gc); err != nil { + return fmt.Errorf("%s | %w", "Gc", err) + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return fmt.Errorf("%s | %w", "Mem", err) + } + + case "threads": + if err := dec.Decode(&s.Threads); err != nil { + return fmt.Errorf("%s | %w", "Threads", err) + } + + case "timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + s.Timestamp = &value + case float64: + f := int64(v) + s.Timestamp = &f + } + + case "uptime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Uptime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Uptime = &o + + case "uptime_in_millis": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "UptimeInMillis", err) + } + s.UptimeInMillis = &value + case float64: + f := int64(v) + s.UptimeInMillis = &f + } + + } + } + return nil +} + +// NewJvm returns a Jvm. +func NewJvm() *Jvm { + r := &Jvm{ + BufferPools: make(map[string]NodeBufferPool), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvmclasses.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvmclasses.go new file mode 100644 index 000000000..86d7b3033 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvmclasses.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// JvmClasses type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L986-L999 +type JvmClasses struct { + // CurrentLoadedCount Number of classes currently loaded by JVM. + CurrentLoadedCount *int64 `json:"current_loaded_count,omitempty"` + // TotalLoadedCount Total number of classes loaded since the JVM started. + TotalLoadedCount *int64 `json:"total_loaded_count,omitempty"` + // TotalUnloadedCount Total number of classes unloaded since the JVM started. + TotalUnloadedCount *int64 `json:"total_unloaded_count,omitempty"` +} + +func (s *JvmClasses) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current_loaded_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CurrentLoadedCount", err) + } + s.CurrentLoadedCount = &value + case float64: + f := int64(v) + s.CurrentLoadedCount = &f + } + + case "total_loaded_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalLoadedCount", err) + } + s.TotalLoadedCount = &value + case float64: + f := int64(v) + s.TotalLoadedCount = &f + } + + case "total_unloaded_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalUnloadedCount", err) + } + s.TotalUnloadedCount = &value + case float64: + f := int64(v) + s.TotalUnloadedCount = &f + } + + } + } + return nil +} + +// NewJvmClasses returns a JvmClasses. +func NewJvmClasses() *JvmClasses { + r := &JvmClasses{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvmmemorystats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvmmemorystats.go new file mode 100644 index 000000000..997072d8d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvmmemorystats.go @@ -0,0 +1,184 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// JvmMemoryStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L920-L954 +type JvmMemoryStats struct { + // HeapCommittedInBytes Amount of memory, in bytes, available for use by the heap. + HeapCommittedInBytes *int64 `json:"heap_committed_in_bytes,omitempty"` + // HeapMax Maximum amount of memory, available for use by the heap. + HeapMax ByteSize `json:"heap_max,omitempty"` + // HeapMaxInBytes Maximum amount of memory, in bytes, available for use by the heap. + HeapMaxInBytes *int64 `json:"heap_max_in_bytes,omitempty"` + // HeapUsedInBytes Memory, in bytes, currently in use by the heap. + HeapUsedInBytes *int64 `json:"heap_used_in_bytes,omitempty"` + // HeapUsedPercent Percentage of memory currently in use by the heap. + HeapUsedPercent *int64 `json:"heap_used_percent,omitempty"` + // NonHeapCommittedInBytes Amount of non-heap memory available, in bytes. + NonHeapCommittedInBytes *int64 `json:"non_heap_committed_in_bytes,omitempty"` + // NonHeapUsedInBytes Non-heap memory used, in bytes. + NonHeapUsedInBytes *int64 `json:"non_heap_used_in_bytes,omitempty"` + // Pools Contains statistics about heap memory usage for the node. + Pools map[string]Pool `json:"pools,omitempty"` +} + +func (s *JvmMemoryStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "heap_committed_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "HeapCommittedInBytes", err) + } + s.HeapCommittedInBytes = &value + case float64: + f := int64(v) + s.HeapCommittedInBytes = &f + } + + case "heap_max": + if err := dec.Decode(&s.HeapMax); err != nil { + return fmt.Errorf("%s | %w", "HeapMax", err) + } + + case "heap_max_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "HeapMaxInBytes", err) + } + s.HeapMaxInBytes = &value + case float64: + f := int64(v) + s.HeapMaxInBytes = &f + } + + case "heap_used_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "HeapUsedInBytes", err) + } + s.HeapUsedInBytes = &value + case float64: + f := int64(v) + s.HeapUsedInBytes = &f + } + + case "heap_used_percent": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "HeapUsedPercent", err) + } + s.HeapUsedPercent = &value + case float64: + f := int64(v) + s.HeapUsedPercent = &f + } + + case "non_heap_committed_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NonHeapCommittedInBytes", err) + } + s.NonHeapCommittedInBytes = &value + case float64: + f := int64(v) + s.NonHeapCommittedInBytes = &f + } + + case "non_heap_used_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NonHeapUsedInBytes", err) + } + s.NonHeapUsedInBytes = &value + case float64: + f := int64(v) + s.NonHeapUsedInBytes = &f + } + + case "pools": + if s.Pools == nil { + s.Pools = make(map[string]Pool, 0) + } + if err := dec.Decode(&s.Pools); err != nil { + return fmt.Errorf("%s | %w", "Pools", err) + } + + } + } + return nil +} + +// NewJvmMemoryStats returns a JvmMemoryStats. +func NewJvmMemoryStats() *JvmMemoryStats { + r := &JvmMemoryStats{ + Pools: make(map[string]Pool), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvmstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvmstats.go new file mode 100644 index 000000000..5f365ab92 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvmstats.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// JvmStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_memory_stats/types.ts#L50-L63 +type JvmStats struct { + // HeapMax Maximum amount of memory available for use by the heap. + HeapMax ByteSize `json:"heap_max,omitempty"` + // HeapMaxInBytes Maximum amount of memory, in bytes, available for use by the heap. + HeapMaxInBytes int `json:"heap_max_in_bytes"` + // JavaInference Amount of Java heap currently being used for caching inference models. + JavaInference ByteSize `json:"java_inference,omitempty"` + // JavaInferenceInBytes Amount of Java heap, in bytes, currently being used for caching inference + // models. + JavaInferenceInBytes int `json:"java_inference_in_bytes"` + // JavaInferenceMax Maximum amount of Java heap to be used for caching inference models. + JavaInferenceMax ByteSize `json:"java_inference_max,omitempty"` + // JavaInferenceMaxInBytes Maximum amount of Java heap, in bytes, to be used for caching inference + // models. + JavaInferenceMaxInBytes int `json:"java_inference_max_in_bytes"` +} + +func (s *JvmStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "heap_max": + if err := dec.Decode(&s.HeapMax); err != nil { + return fmt.Errorf("%s | %w", "HeapMax", err) + } + + case "heap_max_in_bytes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "HeapMaxInBytes", err) + } + s.HeapMaxInBytes = value + case float64: + f := int(v) + s.HeapMaxInBytes = f + } + + case "java_inference": + if err := dec.Decode(&s.JavaInference); err != nil { + return fmt.Errorf("%s | %w", "JavaInference", err) + } + + case "java_inference_in_bytes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "JavaInferenceInBytes", err) + } + s.JavaInferenceInBytes = value + case float64: + f := int(v) + s.JavaInferenceInBytes = f + } + + case "java_inference_max": + if err := dec.Decode(&s.JavaInferenceMax); err != nil { + return fmt.Errorf("%s | %w", "JavaInferenceMax", err) + } + + case "java_inference_max_in_bytes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "JavaInferenceMaxInBytes", err) + } + s.JavaInferenceMaxInBytes = value + case float64: + f := int(v) + s.JavaInferenceMaxInBytes = f + } + + } + } + return nil +} + +// NewJvmStats returns a JvmStats. +func NewJvmStats() *JvmStats { + r := &JvmStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvmthreads.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvmthreads.go new file mode 100644 index 000000000..59c20f2ef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/jvmthreads.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// JvmThreads type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L975-L984 +type JvmThreads struct { + // Count Number of active threads in use by JVM. + Count *int64 `json:"count,omitempty"` + // PeakCount Highest number of threads used by JVM. + PeakCount *int64 `json:"peak_count,omitempty"` +} + +func (s *JvmThreads) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "peak_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PeakCount", err) + } + s.PeakCount = &value + case float64: + f := int64(v) + s.PeakCount = &f + } + + } + } + return nil +} + +// NewJvmThreads returns a JvmThreads. +func NewJvmThreads() *JvmThreads { + r := &JvmThreads{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keeptypestokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keeptypestokenfilter.go new file mode 100644 index 000000000..89f334d96 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keeptypestokenfilter.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/keeptypesmode" +) + +// KeepTypesTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L287-L293 +type KeepTypesTokenFilter struct { + // Mode Indicates whether to keep or remove the specified token types. + Mode *keeptypesmode.KeepTypesMode `json:"mode,omitempty"` + Type string `json:"type,omitempty"` + // Types List of token types to keep or remove. + Types []string `json:"types"` + Version *string `json:"version,omitempty"` +} + +func (s *KeepTypesTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "types": + if err := dec.Decode(&s.Types); err != nil { + return fmt.Errorf("%s | %w", "Types", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KeepTypesTokenFilter) MarshalJSON() ([]byte, error) { + type innerKeepTypesTokenFilter KeepTypesTokenFilter + tmp := innerKeepTypesTokenFilter{ + Mode: s.Mode, + Type: s.Type, + Types: s.Types, + Version: s.Version, + } + + tmp.Type = "keep_types" + + return json.Marshal(tmp) +} + +// NewKeepTypesTokenFilter returns a KeepTypesTokenFilter. +func NewKeepTypesTokenFilter() *KeepTypesTokenFilter { + r := &KeepTypesTokenFilter{} + + return r +} + +type KeepTypesTokenFilterVariant interface { + KeepTypesTokenFilterCaster() *KeepTypesTokenFilter +} + +func (s *KeepTypesTokenFilter) KeepTypesTokenFilterCaster() *KeepTypesTokenFilter { + return s +} + +func (s *KeepTypesTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keepwordstokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keepwordstokenfilter.go new file mode 100644 index 000000000..d8d2c9cf2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keepwordstokenfilter.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KeepWordsTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L295-L306 +type KeepWordsTokenFilter struct { + // KeepWords List of words to keep. Only tokens that match words in this list are included + // in the output. + // Either this parameter or `keep_words_path` must be specified. + KeepWords []string `json:"keep_words,omitempty"` + // KeepWordsCase If `true`, lowercase all keep words. Defaults to `false`. + KeepWordsCase *bool `json:"keep_words_case,omitempty"` + // KeepWordsPath Path to a file that contains a list of words to keep. Only tokens that match + // words in this list are included in the output. + // This path must be absolute or relative to the `config` location, and the file + // must be UTF-8 encoded. Each word in the file must be separated by a line + // break. + // Either this parameter or `keep_words` must be specified. + KeepWordsPath *string `json:"keep_words_path,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *KeepWordsTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "keep_words": + if err := dec.Decode(&s.KeepWords); err != nil { + return fmt.Errorf("%s | %w", "KeepWords", err) + } + + case "keep_words_case": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "KeepWordsCase", err) + } + s.KeepWordsCase = &value + case bool: + s.KeepWordsCase = &v + } + + case "keep_words_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "KeepWordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeepWordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KeepWordsTokenFilter) MarshalJSON() ([]byte, error) { + type innerKeepWordsTokenFilter KeepWordsTokenFilter + tmp := innerKeepWordsTokenFilter{ + KeepWords: s.KeepWords, + KeepWordsCase: s.KeepWordsCase, + KeepWordsPath: s.KeepWordsPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "keep" + + return json.Marshal(tmp) +} + +// NewKeepWordsTokenFilter returns a KeepWordsTokenFilter. +func NewKeepWordsTokenFilter() *KeepWordsTokenFilter { + r := &KeepWordsTokenFilter{} + + return r +} + +type KeepWordsTokenFilterVariant interface { + KeepWordsTokenFilterCaster() *KeepWordsTokenFilter +} + +func (s *KeepWordsTokenFilter) KeepWordsTokenFilterCaster() *KeepWordsTokenFilter { + return s +} + +func (s *KeepWordsTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keyedpercentiles.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keyedpercentiles.go new file mode 100644 index 000000000..8e9106599 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keyedpercentiles.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "io" + + "bytes" + + "encoding/json" + + "errors" + + "fmt" +) + +// KeyedPercentiles type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L160-L160 +type KeyedPercentiles map[string]string + +func (s KeyedPercentiles) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + if key, ok := t.(string); ok { + + var tmp any + if err := dec.Decode(&tmp); err != nil { + return err + } + s[key] = fmt.Sprintf("%v", tmp) + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keyedprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keyedprocessor.go new file mode 100644 index 000000000..9753c5739 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keyedprocessor.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KeyedProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L417-L420 +type KeyedProcessor struct { + Stats *Processor `json:"stats,omitempty"` + Type *string `json:"type,omitempty"` +} + +func (s *KeyedProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return fmt.Errorf("%s | %w", "Stats", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + } + } + return nil +} + +// NewKeyedProcessor returns a KeyedProcessor. +func NewKeyedProcessor() *KeyedProcessor { + r := &KeyedProcessor{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keyvalueprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keyvalueprocessor.go new file mode 100644 index 000000000..8de5dc37f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keyvalueprocessor.go @@ -0,0 +1,268 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KeyValueProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1177-L1229 +type KeyValueProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // ExcludeKeys List of keys to exclude from document. + ExcludeKeys []string `json:"exclude_keys,omitempty"` + // Field The field to be parsed. + // Supports template snippets. + Field string `json:"field"` + // FieldSplit Regex pattern to use for splitting key-value pairs. + FieldSplit string `json:"field_split"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // IncludeKeys List of keys to filter and insert into document. + // Defaults to including all keys. + IncludeKeys []string `json:"include_keys,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Prefix Prefix to be added to extracted keys. + Prefix *string `json:"prefix,omitempty"` + // StripBrackets If `true`. strip brackets `()`, `<>`, `[]` as well as quotes `'` and `"` from + // extracted values. + StripBrackets *bool `json:"strip_brackets,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to insert the extracted keys into. + // Defaults to the root of the document. + // Supports template snippets. + TargetField *string `json:"target_field,omitempty"` + // TrimKey String of characters to trim from extracted keys. + TrimKey *string `json:"trim_key,omitempty"` + // TrimValue String of characters to trim from extracted values. + TrimValue *string `json:"trim_value,omitempty"` + // ValueSplit Regex pattern to use for splitting the key from the value within a key-value + // pair. + ValueSplit string `json:"value_split"` +} + +func (s *KeyValueProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "exclude_keys": + if err := dec.Decode(&s.ExcludeKeys); err != nil { + return fmt.Errorf("%s | %w", "ExcludeKeys", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "field_split": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FieldSplit", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FieldSplit = o + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "include_keys": + if err := dec.Decode(&s.IncludeKeys); err != nil { + return fmt.Errorf("%s | %w", "IncludeKeys", err) + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "prefix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Prefix", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Prefix = &o + + case "strip_brackets": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "StripBrackets", err) + } + s.StripBrackets = &value + case bool: + s.StripBrackets = &v + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + case "trim_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TrimKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TrimKey = &o + + case "trim_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TrimValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TrimValue = &o + + case "value_split": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueSplit", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueSplit = o + + } + } + return nil +} + +// NewKeyValueProcessor returns a KeyValueProcessor. +func NewKeyValueProcessor() *KeyValueProcessor { + r := &KeyValueProcessor{} + + return r +} + +type KeyValueProcessorVariant interface { + KeyValueProcessorCaster() *KeyValueProcessor +} + +func (s *KeyValueProcessor) KeyValueProcessorCaster() *KeyValueProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordanalyzer.go new file mode 100644 index 000000000..fcfc7e404 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordanalyzer.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// KeywordAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L66-L70 +type KeywordAnalyzer struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *KeywordAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KeywordAnalyzer) MarshalJSON() ([]byte, error) { + type innerKeywordAnalyzer KeywordAnalyzer + tmp := innerKeywordAnalyzer{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "keyword" + + return json.Marshal(tmp) +} + +// NewKeywordAnalyzer returns a KeywordAnalyzer. +func NewKeywordAnalyzer() *KeywordAnalyzer { + r := &KeywordAnalyzer{} + + return r +} + +type KeywordAnalyzerVariant interface { + KeywordAnalyzerCaster() *KeywordAnalyzer +} + +func (s *KeywordAnalyzer) KeywordAnalyzerCaster() *KeywordAnalyzer { + return s +} + +func (s *KeywordAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordmarkertokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordmarkertokenfilter.go new file mode 100644 index 000000000..de654692a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordmarkertokenfilter.go @@ -0,0 +1,179 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KeywordMarkerTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L308-L322 +type KeywordMarkerTokenFilter struct { + // IgnoreCase If `true`, matching for the `keywords` and `keywords_path` parameters ignores + // letter case. Defaults to `false`. + IgnoreCase *bool `json:"ignore_case,omitempty"` + // Keywords Array of keywords. Tokens that match these keywords are not stemmed. + // This parameter, `keywords_path`, or `keywords_pattern` must be specified. You + // cannot specify this parameter and `keywords_pattern`. + Keywords []string `json:"keywords,omitempty"` + // KeywordsPath Path to a file that contains a list of keywords. Tokens that match these + // keywords are not stemmed. + // This path must be absolute or relative to the `config` location, and the file + // must be UTF-8 encoded. Each word in the file must be separated by a line + // break. + // This parameter, `keywords`, or `keywords_pattern` must be specified. You + // cannot specify this parameter and `keywords_pattern`. + KeywordsPath *string `json:"keywords_path,omitempty"` + // KeywordsPattern Java regular expression used to match tokens. Tokens that match this + // expression are marked as keywords and not stemmed. + // This parameter, `keywords`, or `keywords_path` must be specified. You cannot + // specify this parameter and `keywords` or `keywords_pattern`. + KeywordsPattern *string `json:"keywords_pattern,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *KeywordMarkerTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ignore_case": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreCase", err) + } + s.IgnoreCase = &value + case bool: + s.IgnoreCase = &v + } + + case "keywords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Keywords", err) + } + + s.Keywords = append(s.Keywords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Keywords); err != nil { + return fmt.Errorf("%s | %w", "Keywords", err) + } + } + + case "keywords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "KeywordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeywordsPath = &o + + case "keywords_pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "KeywordsPattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeywordsPattern = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KeywordMarkerTokenFilter) MarshalJSON() ([]byte, error) { + type innerKeywordMarkerTokenFilter KeywordMarkerTokenFilter + tmp := innerKeywordMarkerTokenFilter{ + IgnoreCase: s.IgnoreCase, + Keywords: s.Keywords, + KeywordsPath: s.KeywordsPath, + KeywordsPattern: s.KeywordsPattern, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "keyword_marker" + + return json.Marshal(tmp) +} + +// NewKeywordMarkerTokenFilter returns a KeywordMarkerTokenFilter. +func NewKeywordMarkerTokenFilter() *KeywordMarkerTokenFilter { + r := &KeywordMarkerTokenFilter{} + + return r +} + +type KeywordMarkerTokenFilterVariant interface { + KeywordMarkerTokenFilterCaster() *KeywordMarkerTokenFilter +} + +func (s *KeywordMarkerTokenFilter) KeywordMarkerTokenFilterCaster() *KeywordMarkerTokenFilter { + return s +} + +func (s *KeywordMarkerTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordproperty.go new file mode 100644 index 000000000..6586755d9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordproperty.go @@ -0,0 +1,1037 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// KeywordProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L109-L128 +type KeywordProperty struct { + Boost *Float64 `json:"boost,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + EagerGlobalOrdinals *bool `json:"eager_global_ordinals,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + Index *bool `json:"index,omitempty"` + IndexOptions *indexoptions.IndexOptions `json:"index_options,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Normalizer *string `json:"normalizer,omitempty"` + Norms *bool `json:"norms,omitempty"` + NullValue *string `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Similarity *string `json:"similarity,omitempty"` + SplitQueriesOnWhitespace *bool `json:"split_queries_on_whitespace,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *KeywordProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "eager_global_ordinals": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "EagerGlobalOrdinals", err) + } + s.EagerGlobalOrdinals = &value + case bool: + s.EagerGlobalOrdinals = &v + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "index_options": + if err := dec.Decode(&s.IndexOptions); err != nil { + return fmt.Errorf("%s | %w", "IndexOptions", err) + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "normalizer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Normalizer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Normalizer = &o + + case "norms": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Norms", err) + } + s.Norms = &value + case bool: + s.Norms = &v + } + + case "null_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NullValue = &o + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "similarity": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Similarity", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o + + case "split_queries_on_whitespace": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SplitQueriesOnWhitespace", err) + } + s.SplitQueriesOnWhitespace = &value + case bool: + s.SplitQueriesOnWhitespace = &v + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KeywordProperty) MarshalJSON() ([]byte, error) { + type innerKeywordProperty KeywordProperty + tmp := innerKeywordProperty{ + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + EagerGlobalOrdinals: s.EagerGlobalOrdinals, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + IndexOptions: s.IndexOptions, + Meta: s.Meta, + Normalizer: s.Normalizer, + Norms: s.Norms, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Similarity: s.Similarity, + SplitQueriesOnWhitespace: s.SplitQueriesOnWhitespace, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + Type: s.Type, + } + + tmp.Type = "keyword" + + return json.Marshal(tmp) +} + +// NewKeywordProperty returns a KeywordProperty. +func NewKeywordProperty() *KeywordProperty { + r := &KeywordProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type KeywordPropertyVariant interface { + KeywordPropertyCaster() *KeywordProperty +} + +func (s *KeywordProperty) KeywordPropertyCaster() *KeywordProperty { + return s +} + +func (s *KeywordProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordrepeattokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordrepeattokenfilter.go new file mode 100644 index 000000000..01fb64614 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordrepeattokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// KeywordRepeatTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L510-L512 +type KeywordRepeatTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *KeywordRepeatTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KeywordRepeatTokenFilter) MarshalJSON() ([]byte, error) { + type innerKeywordRepeatTokenFilter KeywordRepeatTokenFilter + tmp := innerKeywordRepeatTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "keyword_repeat" + + return json.Marshal(tmp) +} + +// NewKeywordRepeatTokenFilter returns a KeywordRepeatTokenFilter. +func NewKeywordRepeatTokenFilter() *KeywordRepeatTokenFilter { + r := &KeywordRepeatTokenFilter{} + + return r +} + +type KeywordRepeatTokenFilterVariant interface { + KeywordRepeatTokenFilterCaster() *KeywordRepeatTokenFilter +} + +func (s *KeywordRepeatTokenFilter) KeywordRepeatTokenFilterCaster() *KeywordRepeatTokenFilter { + return s +} + +func (s *KeywordRepeatTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordtokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordtokenizer.go new file mode 100644 index 000000000..9a9971844 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/keywordtokenizer.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KeywordTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L69-L75 +type KeywordTokenizer struct { + BufferSize *int `json:"buffer_size,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *KeywordTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buffer_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "BufferSize", err) + } + s.BufferSize = &value + case float64: + f := int(v) + s.BufferSize = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KeywordTokenizer) MarshalJSON() ([]byte, error) { + type innerKeywordTokenizer KeywordTokenizer + tmp := innerKeywordTokenizer{ + BufferSize: s.BufferSize, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "keyword" + + return json.Marshal(tmp) +} + +// NewKeywordTokenizer returns a KeywordTokenizer. +func NewKeywordTokenizer() *KeywordTokenizer { + r := &KeywordTokenizer{} + + return r +} + +type KeywordTokenizerVariant interface { + KeywordTokenizerCaster() *KeywordTokenizer +} + +func (s *KeywordTokenizer) KeywordTokenizerCaster() *KeywordTokenizer { + return s +} + +func (s *KeywordTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kibanatoken.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kibanatoken.go new file mode 100644 index 000000000..3a10c3769 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kibanatoken.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KibanaToken type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/enroll_kibana/Response.ts#L31-L41 +type KibanaToken struct { + // Name The name of the bearer token for the `elastic/kibana` service account. + Name string `json:"name"` + // Value The value of the bearer token for the `elastic/kibana` service account. + // Use this value to authenticate the service account with Elasticsearch. + Value string `json:"value"` +} + +func (s *KibanaToken) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil +} + +// NewKibanaToken returns a KibanaToken. +func NewKibanaToken() *KibanaToken { + r := &KibanaToken{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knncollectorresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knncollectorresult.go new file mode 100644 index 000000000..c30b43a86 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knncollectorresult.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KnnCollectorResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L222-L228 +type KnnCollectorResult struct { + Children []KnnCollectorResult `json:"children,omitempty"` + Name string `json:"name"` + Reason string `json:"reason"` + Time Duration `json:"time,omitempty"` + TimeInNanos int64 `json:"time_in_nanos"` +} + +func (s *KnnCollectorResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return fmt.Errorf("%s | %w", "Children", err) + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = o + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return fmt.Errorf("%s | %w", "Time", err) + } + + case "time_in_nanos": + if err := dec.Decode(&s.TimeInNanos); err != nil { + return fmt.Errorf("%s | %w", "TimeInNanos", err) + } + + } + } + return nil +} + +// NewKnnCollectorResult returns a KnnCollectorResult. +func NewKnnCollectorResult() *KnnCollectorResult { + r := &KnnCollectorResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnquery.go new file mode 100644 index 000000000..9e9b87108 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnquery.go @@ -0,0 +1,207 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KnnQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Knn.ts#L64-L87 +type KnnQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Field The name of the vector field to search against + Field string `json:"field"` + // Filter Filters for the kNN search query + Filter []Query `json:"filter,omitempty"` + // K The final number of nearest neighbors to return as top hits + K *int `json:"k,omitempty"` + // NumCandidates The number of nearest neighbor candidates to consider per shard + NumCandidates *int `json:"num_candidates,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // QueryVector The query vector + QueryVector []float32 `json:"query_vector,omitempty"` + // QueryVectorBuilder The query vector builder. You must provide a query_vector_builder or + // query_vector, but not both. + QueryVectorBuilder *QueryVectorBuilder `json:"query_vector_builder,omitempty"` + // RescoreVector Apply oversampling and rescoring to quantized vectors + RescoreVector *RescoreVector `json:"rescore_vector,omitempty"` + // Similarity The minimum similarity for a vector to be considered a match + Similarity *float32 `json:"similarity,omitempty"` +} + +func (s *KnnQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "k": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "K", err) + } + s.K = &value + case float64: + f := int(v) + s.K = &f + } + + case "num_candidates": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumCandidates", err) + } + s.NumCandidates = &value + case float64: + f := int(v) + s.NumCandidates = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "query_vector": + if err := dec.Decode(&s.QueryVector); err != nil { + return fmt.Errorf("%s | %w", "QueryVector", err) + } + + case "query_vector_builder": + if err := dec.Decode(&s.QueryVectorBuilder); err != nil { + return fmt.Errorf("%s | %w", "QueryVectorBuilder", err) + } + + case "rescore_vector": + if err := dec.Decode(&s.RescoreVector); err != nil { + return fmt.Errorf("%s | %w", "RescoreVector", err) + } + + case "similarity": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Similarity", err) + } + f := float32(value) + s.Similarity = &f + case float64: + f := float32(v) + s.Similarity = &f + } + + } + } + return nil +} + +// NewKnnQuery returns a KnnQuery. +func NewKnnQuery() *KnnQuery { + r := &KnnQuery{} + + return r +} + +type KnnQueryVariant interface { + KnnQueryCaster() *KnnQuery +} + +func (s *KnnQuery) KnnQueryCaster() *KnnQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnqueryprofilebreakdown.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnqueryprofilebreakdown.go new file mode 100644 index 000000000..63d6d56d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnqueryprofilebreakdown.go @@ -0,0 +1,383 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KnnQueryProfileBreakdown type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L199-L220 +type KnnQueryProfileBreakdown struct { + Advance int64 `json:"advance"` + AdvanceCount int64 `json:"advance_count"` + BuildScorer int64 `json:"build_scorer"` + BuildScorerCount int64 `json:"build_scorer_count"` + ComputeMaxScore int64 `json:"compute_max_score"` + ComputeMaxScoreCount int64 `json:"compute_max_score_count"` + CountWeight int64 `json:"count_weight"` + CountWeightCount int64 `json:"count_weight_count"` + CreateWeight int64 `json:"create_weight"` + CreateWeightCount int64 `json:"create_weight_count"` + Match int64 `json:"match"` + MatchCount int64 `json:"match_count"` + NextDoc int64 `json:"next_doc"` + NextDocCount int64 `json:"next_doc_count"` + Score int64 `json:"score"` + ScoreCount int64 `json:"score_count"` + SetMinCompetitiveScore int64 `json:"set_min_competitive_score"` + SetMinCompetitiveScoreCount int64 `json:"set_min_competitive_score_count"` + ShallowAdvance int64 `json:"shallow_advance"` + ShallowAdvanceCount int64 `json:"shallow_advance_count"` +} + +func (s *KnnQueryProfileBreakdown) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "advance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Advance", err) + } + s.Advance = value + case float64: + f := int64(v) + s.Advance = f + } + + case "advance_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AdvanceCount", err) + } + s.AdvanceCount = value + case float64: + f := int64(v) + s.AdvanceCount = f + } + + case "build_scorer": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BuildScorer", err) + } + s.BuildScorer = value + case float64: + f := int64(v) + s.BuildScorer = f + } + + case "build_scorer_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BuildScorerCount", err) + } + s.BuildScorerCount = value + case float64: + f := int64(v) + s.BuildScorerCount = f + } + + case "compute_max_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ComputeMaxScore", err) + } + s.ComputeMaxScore = value + case float64: + f := int64(v) + s.ComputeMaxScore = f + } + + case "compute_max_score_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ComputeMaxScoreCount", err) + } + s.ComputeMaxScoreCount = value + case float64: + f := int64(v) + s.ComputeMaxScoreCount = f + } + + case "count_weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CountWeight", err) + } + s.CountWeight = value + case float64: + f := int64(v) + s.CountWeight = f + } + + case "count_weight_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CountWeightCount", err) + } + s.CountWeightCount = value + case float64: + f := int64(v) + s.CountWeightCount = f + } + + case "create_weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CreateWeight", err) + } + s.CreateWeight = value + case float64: + f := int64(v) + s.CreateWeight = f + } + + case "create_weight_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CreateWeightCount", err) + } + s.CreateWeightCount = value + case float64: + f := int64(v) + s.CreateWeightCount = f + } + + case "match": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Match", err) + } + s.Match = value + case float64: + f := int64(v) + s.Match = f + } + + case "match_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MatchCount", err) + } + s.MatchCount = value + case float64: + f := int64(v) + s.MatchCount = f + } + + case "next_doc": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NextDoc", err) + } + s.NextDoc = value + case float64: + f := int64(v) + s.NextDoc = f + } + + case "next_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NextDocCount", err) + } + s.NextDocCount = value + case float64: + f := int64(v) + s.NextDocCount = f + } + + case "score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Score", err) + } + s.Score = value + case float64: + f := int64(v) + s.Score = f + } + + case "score_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ScoreCount", err) + } + s.ScoreCount = value + case float64: + f := int64(v) + s.ScoreCount = f + } + + case "set_min_competitive_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SetMinCompetitiveScore", err) + } + s.SetMinCompetitiveScore = value + case float64: + f := int64(v) + s.SetMinCompetitiveScore = f + } + + case "set_min_competitive_score_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SetMinCompetitiveScoreCount", err) + } + s.SetMinCompetitiveScoreCount = value + case float64: + f := int64(v) + s.SetMinCompetitiveScoreCount = f + } + + case "shallow_advance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShallowAdvance", err) + } + s.ShallowAdvance = value + case float64: + f := int64(v) + s.ShallowAdvance = f + } + + case "shallow_advance_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShallowAdvanceCount", err) + } + s.ShallowAdvanceCount = value + case float64: + f := int64(v) + s.ShallowAdvanceCount = f + } + + } + } + return nil +} + +// NewKnnQueryProfileBreakdown returns a KnnQueryProfileBreakdown. +func NewKnnQueryProfileBreakdown() *KnnQueryProfileBreakdown { + r := &KnnQueryProfileBreakdown{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnqueryprofileresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnqueryprofileresult.go new file mode 100644 index 000000000..2cffc2f48 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnqueryprofileresult.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KnnQueryProfileResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L188-L196 +type KnnQueryProfileResult struct { + Breakdown KnnQueryProfileBreakdown `json:"breakdown"` + Children []KnnQueryProfileResult `json:"children,omitempty"` + Debug map[string]json.RawMessage `json:"debug,omitempty"` + Description string `json:"description"` + Time Duration `json:"time,omitempty"` + TimeInNanos int64 `json:"time_in_nanos"` + Type string `json:"type"` +} + +func (s *KnnQueryProfileResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "breakdown": + if err := dec.Decode(&s.Breakdown); err != nil { + return fmt.Errorf("%s | %w", "Breakdown", err) + } + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return fmt.Errorf("%s | %w", "Children", err) + } + + case "debug": + if s.Debug == nil { + s.Debug = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Debug); err != nil { + return fmt.Errorf("%s | %w", "Debug", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return fmt.Errorf("%s | %w", "Time", err) + } + + case "time_in_nanos": + if err := dec.Decode(&s.TimeInNanos); err != nil { + return fmt.Errorf("%s | %w", "TimeInNanos", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewKnnQueryProfileResult returns a KnnQueryProfileResult. +func NewKnnQueryProfileResult() *KnnQueryProfileResult { + r := &KnnQueryProfileResult{ + Debug: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnretriever.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnretriever.go new file mode 100644 index 000000000..a59cdb9ac --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnretriever.go @@ -0,0 +1,213 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KnnRetriever type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Retriever.ts#L115-L133 +type KnnRetriever struct { + // Field The name of the vector field to search against. + Field string `json:"field"` + // Filter Query to filter the documents that can match. + Filter []Query `json:"filter,omitempty"` + // K Number of nearest neighbors to return as top hits. + K int `json:"k"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are not + // included in the top documents. + MinScore *float32 `json:"min_score,omitempty"` + // Name_ Retriever name. + Name_ *string `json:"_name,omitempty"` + // NumCandidates Number of nearest neighbor candidates to consider per shard. + NumCandidates int `json:"num_candidates"` + // QueryVector Query vector. Must have the same number of dimensions as the vector field you + // are searching against. You must provide a query_vector_builder or + // query_vector, but not both. + QueryVector []float32 `json:"query_vector,omitempty"` + // QueryVectorBuilder Defines a model to build a query vector. + QueryVectorBuilder *QueryVectorBuilder `json:"query_vector_builder,omitempty"` + // RescoreVector Apply oversampling and rescoring to quantized vectors + RescoreVector *RescoreVector `json:"rescore_vector,omitempty"` + // Similarity The minimum similarity required for a document to be considered a match. + Similarity *float32 `json:"similarity,omitempty"` +} + +func (s *KnnRetriever) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = o + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "k": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "K", err) + } + s.K = value + case float64: + f := int(v) + s.K = f + } + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := float32(value) + s.MinScore = &f + case float64: + f := float32(v) + s.MinScore = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name_ = &o + + case "num_candidates": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumCandidates", err) + } + s.NumCandidates = value + case float64: + f := int(v) + s.NumCandidates = f + } + + case "query_vector": + if err := dec.Decode(&s.QueryVector); err != nil { + return fmt.Errorf("%s | %w", "QueryVector", err) + } + + case "query_vector_builder": + if err := dec.Decode(&s.QueryVectorBuilder); err != nil { + return fmt.Errorf("%s | %w", "QueryVectorBuilder", err) + } + + case "rescore_vector": + if err := dec.Decode(&s.RescoreVector); err != nil { + return fmt.Errorf("%s | %w", "RescoreVector", err) + } + + case "similarity": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Similarity", err) + } + f := float32(value) + s.Similarity = &f + case float64: + f := float32(v) + s.Similarity = &f + } + + } + } + return nil +} + +// NewKnnRetriever returns a KnnRetriever. +func NewKnnRetriever() *KnnRetriever { + r := &KnnRetriever{} + + return r +} + +type KnnRetrieverVariant interface { + KnnRetrieverCaster() *KnnRetriever +} + +func (s *KnnRetriever) KnnRetrieverCaster() *KnnRetriever { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnsearch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnsearch.go new file mode 100644 index 000000000..e6f05a1f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/knnsearch.go @@ -0,0 +1,197 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KnnSearch type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Knn.ts#L35-L62 +type KnnSearch struct { + // Boost Boost value to apply to kNN scores + Boost *float32 `json:"boost,omitempty"` + // Field The name of the vector field to search against + Field string `json:"field"` + // Filter Filters for the kNN search query + Filter []Query `json:"filter,omitempty"` + // InnerHits If defined, each search hit will contain inner hits. + InnerHits *InnerHits `json:"inner_hits,omitempty"` + // K The final number of nearest neighbors to return as top hits + K *int `json:"k,omitempty"` + // NumCandidates The number of nearest neighbor candidates to consider per shard + NumCandidates *int `json:"num_candidates,omitempty"` + // QueryVector The query vector + QueryVector []float32 `json:"query_vector,omitempty"` + // QueryVectorBuilder The query vector builder. You must provide a query_vector_builder or + // query_vector, but not both. + QueryVectorBuilder *QueryVectorBuilder `json:"query_vector_builder,omitempty"` + // RescoreVector Apply oversampling and rescoring to quantized vectors + RescoreVector *RescoreVector `json:"rescore_vector,omitempty"` + // Similarity The minimum similarity for a vector to be considered a match + Similarity *float32 `json:"similarity,omitempty"` +} + +func (s *KnnSearch) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "inner_hits": + if err := dec.Decode(&s.InnerHits); err != nil { + return fmt.Errorf("%s | %w", "InnerHits", err) + } + + case "k": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "K", err) + } + s.K = &value + case float64: + f := int(v) + s.K = &f + } + + case "num_candidates": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumCandidates", err) + } + s.NumCandidates = &value + case float64: + f := int(v) + s.NumCandidates = &f + } + + case "query_vector": + if err := dec.Decode(&s.QueryVector); err != nil { + return fmt.Errorf("%s | %w", "QueryVector", err) + } + + case "query_vector_builder": + if err := dec.Decode(&s.QueryVectorBuilder); err != nil { + return fmt.Errorf("%s | %w", "QueryVectorBuilder", err) + } + + case "rescore_vector": + if err := dec.Decode(&s.RescoreVector); err != nil { + return fmt.Errorf("%s | %w", "RescoreVector", err) + } + + case "similarity": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Similarity", err) + } + f := float32(value) + s.Similarity = &f + case float64: + f := float32(v) + s.Similarity = &f + } + + } + } + return nil +} + +// NewKnnSearch returns a KnnSearch. +func NewKnnSearch() *KnnSearch { + r := &KnnSearch{} + + return r +} + +type KnnSearchVariant interface { + KnnSearchCaster() *KnnSearch +} + +func (s *KnnSearch) KnnSearchCaster() *KnnSearch { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kstemtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kstemtokenfilter.go new file mode 100644 index 000000000..67bac279d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kstemtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// KStemTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L324-L326 +type KStemTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *KStemTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KStemTokenFilter) MarshalJSON() ([]byte, error) { + type innerKStemTokenFilter KStemTokenFilter + tmp := innerKStemTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "kstem" + + return json.Marshal(tmp) +} + +// NewKStemTokenFilter returns a KStemTokenFilter. +func NewKStemTokenFilter() *KStemTokenFilter { + r := &KStemTokenFilter{} + + return r +} + +type KStemTokenFilterVariant interface { + KStemTokenFilterCaster() *KStemTokenFilter +} + +func (s *KStemTokenFilter) KStemTokenFilterCaster() *KStemTokenFilter { + return s +} + +func (s *KStemTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojianalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojianalyzer.go new file mode 100644 index 000000000..594015677 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojianalyzer.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/kuromojitokenizationmode" +) + +// KuromojiAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/kuromoji-plugin.ts#L26-L30 +type KuromojiAnalyzer struct { + Mode kuromojitokenizationmode.KuromojiTokenizationMode `json:"mode"` + Type string `json:"type,omitempty"` + UserDictionary *string `json:"user_dictionary,omitempty"` +} + +func (s *KuromojiAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "user_dictionary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UserDictionary", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UserDictionary = &o + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KuromojiAnalyzer) MarshalJSON() ([]byte, error) { + type innerKuromojiAnalyzer KuromojiAnalyzer + tmp := innerKuromojiAnalyzer{ + Mode: s.Mode, + Type: s.Type, + UserDictionary: s.UserDictionary, + } + + tmp.Type = "kuromoji" + + return json.Marshal(tmp) +} + +// NewKuromojiAnalyzer returns a KuromojiAnalyzer. +func NewKuromojiAnalyzer() *KuromojiAnalyzer { + r := &KuromojiAnalyzer{} + + return r +} + +type KuromojiAnalyzerVariant interface { + KuromojiAnalyzerCaster() *KuromojiAnalyzer +} + +func (s *KuromojiAnalyzer) KuromojiAnalyzerCaster() *KuromojiAnalyzer { + return s +} + +func (s *KuromojiAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojiiterationmarkcharfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojiiterationmarkcharfilter.go new file mode 100644 index 000000000..416c71311 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojiiterationmarkcharfilter.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KuromojiIterationMarkCharFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/kuromoji-plugin.ts#L37-L41 +type KuromojiIterationMarkCharFilter struct { + NormalizeKana bool `json:"normalize_kana"` + NormalizeKanji bool `json:"normalize_kanji"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *KuromojiIterationMarkCharFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "normalize_kana": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "NormalizeKana", err) + } + s.NormalizeKana = value + case bool: + s.NormalizeKana = v + } + + case "normalize_kanji": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "NormalizeKanji", err) + } + s.NormalizeKanji = value + case bool: + s.NormalizeKanji = v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KuromojiIterationMarkCharFilter) MarshalJSON() ([]byte, error) { + type innerKuromojiIterationMarkCharFilter KuromojiIterationMarkCharFilter + tmp := innerKuromojiIterationMarkCharFilter{ + NormalizeKana: s.NormalizeKana, + NormalizeKanji: s.NormalizeKanji, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "kuromoji_iteration_mark" + + return json.Marshal(tmp) +} + +// NewKuromojiIterationMarkCharFilter returns a KuromojiIterationMarkCharFilter. +func NewKuromojiIterationMarkCharFilter() *KuromojiIterationMarkCharFilter { + r := &KuromojiIterationMarkCharFilter{} + + return r +} + +type KuromojiIterationMarkCharFilterVariant interface { + KuromojiIterationMarkCharFilterCaster() *KuromojiIterationMarkCharFilter +} + +func (s *KuromojiIterationMarkCharFilter) KuromojiIterationMarkCharFilterCaster() *KuromojiIterationMarkCharFilter { + return s +} + +func (s *KuromojiIterationMarkCharFilter) CharFilterDefinitionCaster() *CharFilterDefinition { + o := CharFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojipartofspeechtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojipartofspeechtokenfilter.go new file mode 100644 index 000000000..44aae178b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojipartofspeechtokenfilter.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// KuromojiPartOfSpeechTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/kuromoji-plugin.ts#L43-L46 +type KuromojiPartOfSpeechTokenFilter struct { + Stoptags []string `json:"stoptags"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *KuromojiPartOfSpeechTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stoptags": + if err := dec.Decode(&s.Stoptags); err != nil { + return fmt.Errorf("%s | %w", "Stoptags", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KuromojiPartOfSpeechTokenFilter) MarshalJSON() ([]byte, error) { + type innerKuromojiPartOfSpeechTokenFilter KuromojiPartOfSpeechTokenFilter + tmp := innerKuromojiPartOfSpeechTokenFilter{ + Stoptags: s.Stoptags, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "kuromoji_part_of_speech" + + return json.Marshal(tmp) +} + +// NewKuromojiPartOfSpeechTokenFilter returns a KuromojiPartOfSpeechTokenFilter. +func NewKuromojiPartOfSpeechTokenFilter() *KuromojiPartOfSpeechTokenFilter { + r := &KuromojiPartOfSpeechTokenFilter{} + + return r +} + +type KuromojiPartOfSpeechTokenFilterVariant interface { + KuromojiPartOfSpeechTokenFilterCaster() *KuromojiPartOfSpeechTokenFilter +} + +func (s *KuromojiPartOfSpeechTokenFilter) KuromojiPartOfSpeechTokenFilterCaster() *KuromojiPartOfSpeechTokenFilter { + return s +} + +func (s *KuromojiPartOfSpeechTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojireadingformtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojireadingformtokenfilter.go new file mode 100644 index 000000000..7bd4948ae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojireadingformtokenfilter.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KuromojiReadingFormTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/kuromoji-plugin.ts#L48-L51 +type KuromojiReadingFormTokenFilter struct { + Type string `json:"type,omitempty"` + UseRomaji bool `json:"use_romaji"` + Version *string `json:"version,omitempty"` +} + +func (s *KuromojiReadingFormTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "use_romaji": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "UseRomaji", err) + } + s.UseRomaji = value + case bool: + s.UseRomaji = v + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KuromojiReadingFormTokenFilter) MarshalJSON() ([]byte, error) { + type innerKuromojiReadingFormTokenFilter KuromojiReadingFormTokenFilter + tmp := innerKuromojiReadingFormTokenFilter{ + Type: s.Type, + UseRomaji: s.UseRomaji, + Version: s.Version, + } + + tmp.Type = "kuromoji_readingform" + + return json.Marshal(tmp) +} + +// NewKuromojiReadingFormTokenFilter returns a KuromojiReadingFormTokenFilter. +func NewKuromojiReadingFormTokenFilter() *KuromojiReadingFormTokenFilter { + r := &KuromojiReadingFormTokenFilter{} + + return r +} + +type KuromojiReadingFormTokenFilterVariant interface { + KuromojiReadingFormTokenFilterCaster() *KuromojiReadingFormTokenFilter +} + +func (s *KuromojiReadingFormTokenFilter) KuromojiReadingFormTokenFilterCaster() *KuromojiReadingFormTokenFilter { + return s +} + +func (s *KuromojiReadingFormTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojistemmertokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojistemmertokenfilter.go new file mode 100644 index 000000000..a6f04b94a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojistemmertokenfilter.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// KuromojiStemmerTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/kuromoji-plugin.ts#L53-L56 +type KuromojiStemmerTokenFilter struct { + MinimumLength int `json:"minimum_length"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *KuromojiStemmerTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "minimum_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinimumLength", err) + } + s.MinimumLength = value + case float64: + f := int(v) + s.MinimumLength = f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KuromojiStemmerTokenFilter) MarshalJSON() ([]byte, error) { + type innerKuromojiStemmerTokenFilter KuromojiStemmerTokenFilter + tmp := innerKuromojiStemmerTokenFilter{ + MinimumLength: s.MinimumLength, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "kuromoji_stemmer" + + return json.Marshal(tmp) +} + +// NewKuromojiStemmerTokenFilter returns a KuromojiStemmerTokenFilter. +func NewKuromojiStemmerTokenFilter() *KuromojiStemmerTokenFilter { + r := &KuromojiStemmerTokenFilter{} + + return r +} + +type KuromojiStemmerTokenFilterVariant interface { + KuromojiStemmerTokenFilterCaster() *KuromojiStemmerTokenFilter +} + +func (s *KuromojiStemmerTokenFilter) KuromojiStemmerTokenFilterCaster() *KuromojiStemmerTokenFilter { + return s +} + +func (s *KuromojiStemmerTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojitokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojitokenizer.go new file mode 100644 index 000000000..3ff69fbe8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/kuromojitokenizer.go @@ -0,0 +1,195 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/kuromojitokenizationmode" +) + +// KuromojiTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/kuromoji-plugin.ts#L64-L73 +type KuromojiTokenizer struct { + DiscardCompoundToken *bool `json:"discard_compound_token,omitempty"` + DiscardPunctuation *bool `json:"discard_punctuation,omitempty"` + Mode kuromojitokenizationmode.KuromojiTokenizationMode `json:"mode"` + NbestCost *int `json:"nbest_cost,omitempty"` + NbestExamples *string `json:"nbest_examples,omitempty"` + Type string `json:"type,omitempty"` + UserDictionary *string `json:"user_dictionary,omitempty"` + UserDictionaryRules []string `json:"user_dictionary_rules,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *KuromojiTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "discard_compound_token": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DiscardCompoundToken", err) + } + s.DiscardCompoundToken = &value + case bool: + s.DiscardCompoundToken = &v + } + + case "discard_punctuation": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DiscardPunctuation", err) + } + s.DiscardPunctuation = &value + case bool: + s.DiscardPunctuation = &v + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + + case "nbest_cost": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NbestCost", err) + } + s.NbestCost = &value + case float64: + f := int(v) + s.NbestCost = &f + } + + case "nbest_examples": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NbestExamples", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NbestExamples = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "user_dictionary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UserDictionary", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UserDictionary = &o + + case "user_dictionary_rules": + if err := dec.Decode(&s.UserDictionaryRules); err != nil { + return fmt.Errorf("%s | %w", "UserDictionaryRules", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KuromojiTokenizer) MarshalJSON() ([]byte, error) { + type innerKuromojiTokenizer KuromojiTokenizer + tmp := innerKuromojiTokenizer{ + DiscardCompoundToken: s.DiscardCompoundToken, + DiscardPunctuation: s.DiscardPunctuation, + Mode: s.Mode, + NbestCost: s.NbestCost, + NbestExamples: s.NbestExamples, + Type: s.Type, + UserDictionary: s.UserDictionary, + UserDictionaryRules: s.UserDictionaryRules, + Version: s.Version, + } + + tmp.Type = "kuromoji_tokenizer" + + return json.Marshal(tmp) +} + +// NewKuromojiTokenizer returns a KuromojiTokenizer. +func NewKuromojiTokenizer() *KuromojiTokenizer { + r := &KuromojiTokenizer{} + + return r +} + +type KuromojiTokenizerVariant interface { + KuromojiTokenizerCaster() *KuromojiTokenizer +} + +func (s *KuromojiTokenizer) KuromojiTokenizerCaster() *KuromojiTokenizer { + return s +} + +func (s *KuromojiTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/languagecontext.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/languagecontext.go new file mode 100644 index 000000000..ae5b984cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/languagecontext.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptlanguage" +) + +// LanguageContext type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/get_script_languages/types.ts#L22-L25 +type LanguageContext struct { + Contexts []string `json:"contexts"` + Language scriptlanguage.ScriptLanguage `json:"language"` +} + +// NewLanguageContext returns a LanguageContext. +func NewLanguageContext() *LanguageContext { + r := &LanguageContext{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/laplacesmoothingmodel.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/laplacesmoothingmodel.go new file mode 100644 index 000000000..8d7792bdc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/laplacesmoothingmodel.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LaplaceSmoothingModel type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L431-L436 +type LaplaceSmoothingModel struct { + // Alpha A constant that is added to all counts to balance weights. + Alpha Float64 `json:"alpha"` +} + +func (s *LaplaceSmoothingModel) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Alpha", err) + } + f := Float64(value) + s.Alpha = f + case float64: + f := Float64(v) + s.Alpha = f + } + + } + } + return nil +} + +// NewLaplaceSmoothingModel returns a LaplaceSmoothingModel. +func NewLaplaceSmoothingModel() *LaplaceSmoothingModel { + r := &LaplaceSmoothingModel{} + + return r +} + +type LaplaceSmoothingModelVariant interface { + LaplaceSmoothingModelCaster() *LaplaceSmoothingModel +} + +func (s *LaplaceSmoothingModel) LaplaceSmoothingModelCaster() *LaplaceSmoothingModel { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/latest.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/latest.go new file mode 100644 index 000000000..31bf4f78d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/latest.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Latest type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/_types/Transform.ts#L47-L52 +type Latest struct { + // Sort Specifies the date field that is used to identify the latest documents. + Sort string `json:"sort"` + // UniqueKey Specifies an array of one or more fields that are used to group the data. + UniqueKey []string `json:"unique_key"` +} + +func (s *Latest) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "sort": + if err := dec.Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + case "unique_key": + if err := dec.Decode(&s.UniqueKey); err != nil { + return fmt.Errorf("%s | %w", "UniqueKey", err) + } + + } + } + return nil +} + +// NewLatest returns a Latest. +func NewLatest() *Latest { + r := &Latest{} + + return r +} + +type LatestVariant interface { + LatestCaster() *Latest +} + +func (s *Latest) LatestCaster() *Latest { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/latlongeolocation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/latlongeolocation.go new file mode 100644 index 000000000..a37699a4b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/latlongeolocation.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LatLonGeoLocation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Geo.ts#L120-L129 +type LatLonGeoLocation struct { + // Lat Latitude + Lat Float64 `json:"lat"` + // Lon Longitude + Lon Float64 `json:"lon"` +} + +func (s *LatLonGeoLocation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lat": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Lat", err) + } + f := Float64(value) + s.Lat = f + case float64: + f := Float64(v) + s.Lat = f + } + + case "lon": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Lon", err) + } + f := Float64(value) + s.Lon = f + case float64: + f := Float64(v) + s.Lon = f + } + + } + } + return nil +} + +// NewLatLonGeoLocation returns a LatLonGeoLocation. +func NewLatLonGeoLocation() *LatLonGeoLocation { + r := &LatLonGeoLocation{} + + return r +} + +type LatLonGeoLocationVariant interface { + LatLonGeoLocationCaster() *LatLonGeoLocation +} + +func (s *LatLonGeoLocation) LatLonGeoLocationCaster() *LatLonGeoLocation { + return s +} + +func (s *LatLonGeoLocation) GeoLocationCaster() *GeoLocation { + o := GeoLocation(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/latviananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/latviananalyzer.go new file mode 100644 index 000000000..f615c1ce6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/latviananalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LatvianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L234-L239 +type LatvianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *LatvianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LatvianAnalyzer) MarshalJSON() ([]byte, error) { + type innerLatvianAnalyzer LatvianAnalyzer + tmp := innerLatvianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "latvian" + + return json.Marshal(tmp) +} + +// NewLatvianAnalyzer returns a LatvianAnalyzer. +func NewLatvianAnalyzer() *LatvianAnalyzer { + r := &LatvianAnalyzer{} + + return r +} + +type LatvianAnalyzerVariant interface { + LatvianAnalyzerCaster() *LatvianAnalyzer +} + +func (s *LatvianAnalyzer) LatvianAnalyzerCaster() *LatvianAnalyzer { + return s +} + +func (s *LatvianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/learningtorank.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/learningtorank.go new file mode 100644 index 000000000..e68ff7528 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/learningtorank.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LearningToRank type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/rescoring.ts#L88-L97 +type LearningToRank struct { + // ModelId The unique identifier of the trained model uploaded to Elasticsearch + ModelId string `json:"model_id"` + // Params Named parameters to be passed to the query templates used for feature + Params map[string]json.RawMessage `json:"params,omitempty"` +} + +func (s *LearningToRank) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + } + } + return nil +} + +// NewLearningToRank returns a LearningToRank. +func NewLearningToRank() *LearningToRank { + r := &LearningToRank{ + Params: make(map[string]json.RawMessage), + } + + return r +} + +type LearningToRankVariant interface { + LearningToRankCaster() *LearningToRank +} + +func (s *LearningToRank) LearningToRankCaster() *LearningToRank { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/learningtorankconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/learningtorankconfig.go new file mode 100644 index 000000000..63d0813e7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/learningtorankconfig.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LearningToRankConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L87-L91 +type LearningToRankConfig struct { + DefaultParams map[string]json.RawMessage `json:"default_params,omitempty"` + FeatureExtractors []map[string]QueryFeatureExtractor `json:"feature_extractors,omitempty"` + NumTopFeatureImportanceValues int `json:"num_top_feature_importance_values"` +} + +func (s *LearningToRankConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "default_params": + if s.DefaultParams == nil { + s.DefaultParams = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.DefaultParams); err != nil { + return fmt.Errorf("%s | %w", "DefaultParams", err) + } + + case "feature_extractors": + if err := dec.Decode(&s.FeatureExtractors); err != nil { + return fmt.Errorf("%s | %w", "FeatureExtractors", err) + } + + case "num_top_feature_importance_values": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopFeatureImportanceValues", err) + } + s.NumTopFeatureImportanceValues = value + case float64: + f := int(v) + s.NumTopFeatureImportanceValues = f + } + + } + } + return nil +} + +// NewLearningToRankConfig returns a LearningToRankConfig. +func NewLearningToRankConfig() *LearningToRankConfig { + r := &LearningToRankConfig{ + DefaultParams: make(map[string]json.RawMessage), + } + + return r +} + +type LearningToRankConfigVariant interface { + LearningToRankConfigCaster() *LearningToRankConfig +} + +func (s *LearningToRankConfig) LearningToRankConfigCaster() *LearningToRankConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lengthtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lengthtokenfilter.go new file mode 100644 index 000000000..19f48222f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lengthtokenfilter.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LengthTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L328-L334 +type LengthTokenFilter struct { + // Max Maximum character length of a token. Longer tokens are excluded from the + // output. Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`. + Max *int `json:"max,omitempty"` + // Min Minimum character length of a token. Shorter tokens are excluded from the + // output. Defaults to `0`. + Min *int `json:"min,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *LengthTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + s.Max = &value + case float64: + f := int(v) + s.Max = &f + } + + case "min": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + s.Min = &value + case float64: + f := int(v) + s.Min = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LengthTokenFilter) MarshalJSON() ([]byte, error) { + type innerLengthTokenFilter LengthTokenFilter + tmp := innerLengthTokenFilter{ + Max: s.Max, + Min: s.Min, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "length" + + return json.Marshal(tmp) +} + +// NewLengthTokenFilter returns a LengthTokenFilter. +func NewLengthTokenFilter() *LengthTokenFilter { + r := &LengthTokenFilter{} + + return r +} + +type LengthTokenFilterVariant interface { + LengthTokenFilterCaster() *LengthTokenFilter +} + +func (s *LengthTokenFilter) LengthTokenFilterCaster() *LengthTokenFilter { + return s +} + +func (s *LengthTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lessthanvalidation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lessthanvalidation.go new file mode 100644 index 000000000..796d87d8a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lessthanvalidation.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LessThanValidation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L58-L61 +type LessThanValidation struct { + Constraint Float64 `json:"constraint"` + Type string `json:"type,omitempty"` +} + +func (s *LessThanValidation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "constraint": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Constraint", err) + } + f := Float64(value) + s.Constraint = f + case float64: + f := Float64(v) + s.Constraint = f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LessThanValidation) MarshalJSON() ([]byte, error) { + type innerLessThanValidation LessThanValidation + tmp := innerLessThanValidation{ + Constraint: s.Constraint, + Type: s.Type, + } + + tmp.Type = "less_than" + + return json.Marshal(tmp) +} + +// NewLessThanValidation returns a LessThanValidation. +func NewLessThanValidation() *LessThanValidation { + r := &LessThanValidation{} + + return r +} + +type LessThanValidationVariant interface { + LessThanValidationCaster() *LessThanValidation +} + +func (s *LessThanValidation) LessThanValidationCaster() *LessThanValidation { + return s +} + +func (s *LessThanValidation) ValidationCaster() *Validation { + o := Validation(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lettertokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lettertokenizer.go new file mode 100644 index 000000000..2ad6ad698 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lettertokenizer.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// LetterTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L77-L79 +type LetterTokenizer struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *LetterTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LetterTokenizer) MarshalJSON() ([]byte, error) { + type innerLetterTokenizer LetterTokenizer + tmp := innerLetterTokenizer{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "letter" + + return json.Marshal(tmp) +} + +// NewLetterTokenizer returns a LetterTokenizer. +func NewLetterTokenizer() *LetterTokenizer { + r := &LetterTokenizer{} + + return r +} + +type LetterTokenizerVariant interface { + LetterTokenizerCaster() *LetterTokenizer +} + +func (s *LetterTokenizer) LetterTokenizerCaster() *LetterTokenizer { + return s +} + +func (s *LetterTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/license.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/license.go new file mode 100644 index 000000000..d15b82b7e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/license.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensetype" +) + +// License type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/license/_types/License.ts#L42-L53 +type License struct { + ExpiryDateInMillis int64 `json:"expiry_date_in_millis"` + IssueDateInMillis int64 `json:"issue_date_in_millis"` + IssuedTo string `json:"issued_to"` + Issuer string `json:"issuer"` + MaxNodes *int64 `json:"max_nodes,omitempty"` + MaxResourceUnits *int64 `json:"max_resource_units,omitempty"` + Signature string `json:"signature"` + StartDateInMillis *int64 `json:"start_date_in_millis,omitempty"` + Type licensetype.LicenseType `json:"type"` + Uid string `json:"uid"` +} + +func (s *License) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expiry_date_in_millis": + if err := dec.Decode(&s.ExpiryDateInMillis); err != nil { + return fmt.Errorf("%s | %w", "ExpiryDateInMillis", err) + } + + case "issue_date_in_millis": + if err := dec.Decode(&s.IssueDateInMillis); err != nil { + return fmt.Errorf("%s | %w", "IssueDateInMillis", err) + } + + case "issued_to": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IssuedTo", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IssuedTo = o + + case "issuer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Issuer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Issuer = o + + case "max_nodes": + if err := dec.Decode(&s.MaxNodes); err != nil { + return fmt.Errorf("%s | %w", "MaxNodes", err) + } + + case "max_resource_units": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxResourceUnits", err) + } + s.MaxResourceUnits = &value + case float64: + f := int64(v) + s.MaxResourceUnits = &f + } + + case "signature": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Signature", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Signature = o + + case "start_date_in_millis": + if err := dec.Decode(&s.StartDateInMillis); err != nil { + return fmt.Errorf("%s | %w", "StartDateInMillis", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "uid": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Uid", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Uid = o + + } + } + return nil +} + +// NewLicense returns a License. +func NewLicense() *License { + r := &License{} + + return r +} + +type LicenseVariant interface { + LicenseCaster() *License +} + +func (s *License) LicenseCaster() *License { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/licenseinformation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/licenseinformation.go new file mode 100644 index 000000000..9f364abc5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/licenseinformation.go @@ -0,0 +1,152 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensestatus" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensetype" +) + +// LicenseInformation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/license/get/types.ts#L25-L38 +type LicenseInformation struct { + ExpiryDate DateTime `json:"expiry_date,omitempty"` + ExpiryDateInMillis *int64 `json:"expiry_date_in_millis,omitempty"` + IssueDate DateTime `json:"issue_date"` + IssueDateInMillis int64 `json:"issue_date_in_millis"` + IssuedTo string `json:"issued_to"` + Issuer string `json:"issuer"` + MaxNodes *int64 `json:"max_nodes,omitempty"` + MaxResourceUnits *int `json:"max_resource_units,omitempty"` + StartDateInMillis int64 `json:"start_date_in_millis"` + Status licensestatus.LicenseStatus `json:"status"` + Type licensetype.LicenseType `json:"type"` + Uid string `json:"uid"` +} + +func (s *LicenseInformation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expiry_date": + if err := dec.Decode(&s.ExpiryDate); err != nil { + return fmt.Errorf("%s | %w", "ExpiryDate", err) + } + + case "expiry_date_in_millis": + if err := dec.Decode(&s.ExpiryDateInMillis); err != nil { + return fmt.Errorf("%s | %w", "ExpiryDateInMillis", err) + } + + case "issue_date": + if err := dec.Decode(&s.IssueDate); err != nil { + return fmt.Errorf("%s | %w", "IssueDate", err) + } + + case "issue_date_in_millis": + if err := dec.Decode(&s.IssueDateInMillis); err != nil { + return fmt.Errorf("%s | %w", "IssueDateInMillis", err) + } + + case "issued_to": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IssuedTo", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IssuedTo = o + + case "issuer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Issuer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Issuer = o + + case "max_nodes": + if err := dec.Decode(&s.MaxNodes); err != nil { + return fmt.Errorf("%s | %w", "MaxNodes", err) + } + + case "max_resource_units": + if err := dec.Decode(&s.MaxResourceUnits); err != nil { + return fmt.Errorf("%s | %w", "MaxResourceUnits", err) + } + + case "start_date_in_millis": + if err := dec.Decode(&s.StartDateInMillis); err != nil { + return fmt.Errorf("%s | %w", "StartDateInMillis", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "uid": + if err := dec.Decode(&s.Uid); err != nil { + return fmt.Errorf("%s | %w", "Uid", err) + } + + } + } + return nil +} + +// NewLicenseInformation returns a LicenseInformation. +func NewLicenseInformation() *LicenseInformation { + r := &LicenseInformation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycle.go new file mode 100644 index 000000000..8d593ceb8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycle.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Lifecycle type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/get_lifecycle/types.ts#L24-L28 +type Lifecycle struct { + ModifiedDate DateTime `json:"modified_date"` + Policy IlmPolicy `json:"policy"` + Version int64 `json:"version"` +} + +func (s *Lifecycle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "modified_date": + if err := dec.Decode(&s.ModifiedDate); err != nil { + return fmt.Errorf("%s | %w", "ModifiedDate", err) + } + + case "policy": + if err := dec.Decode(&s.Policy); err != nil { + return fmt.Errorf("%s | %w", "Policy", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewLifecycle returns a Lifecycle. +func NewLifecycle() *Lifecycle { + r := &Lifecycle{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycleexplain.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycleexplain.go new file mode 100644 index 000000000..7b8363db9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycleexplain.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// LifecycleExplain holds the union for the following types: +// +// LifecycleExplainManaged +// LifecycleExplainUnmanaged +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/explain_lifecycle/types.ts#L65-L68 +type LifecycleExplain any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycleexplainmanaged.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycleexplainmanaged.go new file mode 100644 index 000000000..170e18eaa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycleexplainmanaged.go @@ -0,0 +1,329 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LifecycleExplainManaged type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/explain_lifecycle/types.ts#L27-L58 +type LifecycleExplainManaged struct { + Action *string `json:"action,omitempty"` + ActionTime DateTime `json:"action_time,omitempty"` + ActionTimeMillis *int64 `json:"action_time_millis,omitempty"` + Age Duration `json:"age,omitempty"` + FailedStep *string `json:"failed_step,omitempty"` + FailedStepRetryCount *int `json:"failed_step_retry_count,omitempty"` + Index string `json:"index"` + IndexCreationDate DateTime `json:"index_creation_date,omitempty"` + IndexCreationDateMillis *int64 `json:"index_creation_date_millis,omitempty"` + IsAutoRetryableError *bool `json:"is_auto_retryable_error,omitempty"` + LifecycleDate DateTime `json:"lifecycle_date,omitempty"` + LifecycleDateMillis *int64 `json:"lifecycle_date_millis,omitempty"` + Managed bool `json:"managed,omitempty"` + Phase *string `json:"phase,omitempty"` + PhaseExecution *LifecycleExplainPhaseExecution `json:"phase_execution,omitempty"` + PhaseTime DateTime `json:"phase_time,omitempty"` + PhaseTimeMillis *int64 `json:"phase_time_millis,omitempty"` + Policy *string `json:"policy,omitempty"` + PreviousStepInfo map[string]json.RawMessage `json:"previous_step_info,omitempty"` + RepositoryName *string `json:"repository_name,omitempty"` + ShrinkIndexName *string `json:"shrink_index_name,omitempty"` + Skip bool `json:"skip"` + SnapshotName *string `json:"snapshot_name,omitempty"` + Step *string `json:"step,omitempty"` + StepInfo map[string]json.RawMessage `json:"step_info,omitempty"` + StepTime DateTime `json:"step_time,omitempty"` + StepTimeMillis *int64 `json:"step_time_millis,omitempty"` + TimeSinceIndexCreation Duration `json:"time_since_index_creation,omitempty"` +} + +func (s *LifecycleExplainManaged) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action": + if err := dec.Decode(&s.Action); err != nil { + return fmt.Errorf("%s | %w", "Action", err) + } + + case "action_time": + if err := dec.Decode(&s.ActionTime); err != nil { + return fmt.Errorf("%s | %w", "ActionTime", err) + } + + case "action_time_millis": + if err := dec.Decode(&s.ActionTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "ActionTimeMillis", err) + } + + case "age": + if err := dec.Decode(&s.Age); err != nil { + return fmt.Errorf("%s | %w", "Age", err) + } + + case "failed_step": + if err := dec.Decode(&s.FailedStep); err != nil { + return fmt.Errorf("%s | %w", "FailedStep", err) + } + + case "failed_step_retry_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FailedStepRetryCount", err) + } + s.FailedStepRetryCount = &value + case float64: + f := int(v) + s.FailedStepRetryCount = &f + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "index_creation_date": + if err := dec.Decode(&s.IndexCreationDate); err != nil { + return fmt.Errorf("%s | %w", "IndexCreationDate", err) + } + + case "index_creation_date_millis": + if err := dec.Decode(&s.IndexCreationDateMillis); err != nil { + return fmt.Errorf("%s | %w", "IndexCreationDateMillis", err) + } + + case "is_auto_retryable_error": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsAutoRetryableError", err) + } + s.IsAutoRetryableError = &value + case bool: + s.IsAutoRetryableError = &v + } + + case "lifecycle_date": + if err := dec.Decode(&s.LifecycleDate); err != nil { + return fmt.Errorf("%s | %w", "LifecycleDate", err) + } + + case "lifecycle_date_millis": + if err := dec.Decode(&s.LifecycleDateMillis); err != nil { + return fmt.Errorf("%s | %w", "LifecycleDateMillis", err) + } + + case "managed": + if err := dec.Decode(&s.Managed); err != nil { + return fmt.Errorf("%s | %w", "Managed", err) + } + + case "phase": + if err := dec.Decode(&s.Phase); err != nil { + return fmt.Errorf("%s | %w", "Phase", err) + } + + case "phase_execution": + if err := dec.Decode(&s.PhaseExecution); err != nil { + return fmt.Errorf("%s | %w", "PhaseExecution", err) + } + + case "phase_time": + if err := dec.Decode(&s.PhaseTime); err != nil { + return fmt.Errorf("%s | %w", "PhaseTime", err) + } + + case "phase_time_millis": + if err := dec.Decode(&s.PhaseTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "PhaseTimeMillis", err) + } + + case "policy": + if err := dec.Decode(&s.Policy); err != nil { + return fmt.Errorf("%s | %w", "Policy", err) + } + + case "previous_step_info": + if s.PreviousStepInfo == nil { + s.PreviousStepInfo = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.PreviousStepInfo); err != nil { + return fmt.Errorf("%s | %w", "PreviousStepInfo", err) + } + + case "repository_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RepositoryName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RepositoryName = &o + + case "shrink_index_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ShrinkIndexName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ShrinkIndexName = &o + + case "skip": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Skip", err) + } + s.Skip = value + case bool: + s.Skip = v + } + + case "snapshot_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SnapshotName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SnapshotName = &o + + case "step": + if err := dec.Decode(&s.Step); err != nil { + return fmt.Errorf("%s | %w", "Step", err) + } + + case "step_info": + if s.StepInfo == nil { + s.StepInfo = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.StepInfo); err != nil { + return fmt.Errorf("%s | %w", "StepInfo", err) + } + + case "step_time": + if err := dec.Decode(&s.StepTime); err != nil { + return fmt.Errorf("%s | %w", "StepTime", err) + } + + case "step_time_millis": + if err := dec.Decode(&s.StepTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "StepTimeMillis", err) + } + + case "time_since_index_creation": + if err := dec.Decode(&s.TimeSinceIndexCreation); err != nil { + return fmt.Errorf("%s | %w", "TimeSinceIndexCreation", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LifecycleExplainManaged) MarshalJSON() ([]byte, error) { + type innerLifecycleExplainManaged LifecycleExplainManaged + tmp := innerLifecycleExplainManaged{ + Action: s.Action, + ActionTime: s.ActionTime, + ActionTimeMillis: s.ActionTimeMillis, + Age: s.Age, + FailedStep: s.FailedStep, + FailedStepRetryCount: s.FailedStepRetryCount, + Index: s.Index, + IndexCreationDate: s.IndexCreationDate, + IndexCreationDateMillis: s.IndexCreationDateMillis, + IsAutoRetryableError: s.IsAutoRetryableError, + LifecycleDate: s.LifecycleDate, + LifecycleDateMillis: s.LifecycleDateMillis, + Managed: s.Managed, + Phase: s.Phase, + PhaseExecution: s.PhaseExecution, + PhaseTime: s.PhaseTime, + PhaseTimeMillis: s.PhaseTimeMillis, + Policy: s.Policy, + PreviousStepInfo: s.PreviousStepInfo, + RepositoryName: s.RepositoryName, + ShrinkIndexName: s.ShrinkIndexName, + Skip: s.Skip, + SnapshotName: s.SnapshotName, + Step: s.Step, + StepInfo: s.StepInfo, + StepTime: s.StepTime, + StepTimeMillis: s.StepTimeMillis, + TimeSinceIndexCreation: s.TimeSinceIndexCreation, + } + + tmp.Managed = true + + return json.Marshal(tmp) +} + +// NewLifecycleExplainManaged returns a LifecycleExplainManaged. +func NewLifecycleExplainManaged() *LifecycleExplainManaged { + r := &LifecycleExplainManaged{ + PreviousStepInfo: make(map[string]json.RawMessage), + StepInfo: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycleexplainphaseexecution.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycleexplainphaseexecution.go new file mode 100644 index 000000000..6975e0cc4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycleexplainphaseexecution.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// LifecycleExplainPhaseExecution type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/explain_lifecycle/types.ts#L70-L75 +type LifecycleExplainPhaseExecution struct { + ModifiedDateInMillis int64 `json:"modified_date_in_millis"` + PhaseDefinition *Phase `json:"phase_definition,omitempty"` + Policy string `json:"policy"` + Version int64 `json:"version"` +} + +func (s *LifecycleExplainPhaseExecution) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "modified_date_in_millis": + if err := dec.Decode(&s.ModifiedDateInMillis); err != nil { + return fmt.Errorf("%s | %w", "ModifiedDateInMillis", err) + } + + case "phase_definition": + if err := dec.Decode(&s.PhaseDefinition); err != nil { + return fmt.Errorf("%s | %w", "PhaseDefinition", err) + } + + case "policy": + if err := dec.Decode(&s.Policy); err != nil { + return fmt.Errorf("%s | %w", "Policy", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewLifecycleExplainPhaseExecution returns a LifecycleExplainPhaseExecution. +func NewLifecycleExplainPhaseExecution() *LifecycleExplainPhaseExecution { + r := &LifecycleExplainPhaseExecution{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycleexplainunmanaged.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycleexplainunmanaged.go new file mode 100644 index 000000000..1798f80b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lifecycleexplainunmanaged.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// LifecycleExplainUnmanaged type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/explain_lifecycle/types.ts#L60-L63 +type LifecycleExplainUnmanaged struct { + Index string `json:"index"` + Managed bool `json:"managed,omitempty"` +} + +func (s *LifecycleExplainUnmanaged) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "managed": + if err := dec.Decode(&s.Managed); err != nil { + return fmt.Errorf("%s | %w", "Managed", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LifecycleExplainUnmanaged) MarshalJSON() ([]byte, error) { + type innerLifecycleExplainUnmanaged LifecycleExplainUnmanaged + tmp := innerLifecycleExplainUnmanaged{ + Index: s.Index, + Managed: s.Managed, + } + + tmp.Managed = false + + return json.Marshal(tmp) +} + +// NewLifecycleExplainUnmanaged returns a LifecycleExplainUnmanaged. +func NewLifecycleExplainUnmanaged() *LifecycleExplainUnmanaged { + r := &LifecycleExplainUnmanaged{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/like.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/like.go new file mode 100644 index 000000000..504521ce5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/like.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Like holds the union for the following types: +// +// string +// LikeDocument +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L198-L203 +type Like any + +type LikeVariant interface { + LikeCaster() *Like +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/likedocument.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/likedocument.go new file mode 100644 index 000000000..f041439c1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/likedocument.go @@ -0,0 +1,134 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +// LikeDocument type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L174-L196 +type LikeDocument struct { + // Doc A document not present in the index. + Doc json.RawMessage `json:"doc,omitempty"` + Fields []string `json:"fields,omitempty"` + // Id_ ID of a document. + Id_ *string `json:"_id,omitempty"` + // Index_ Index of a document. + Index_ *string `json:"_index,omitempty"` + // PerFieldAnalyzer Overrides the default analyzer. + PerFieldAnalyzer map[string]string `json:"per_field_analyzer,omitempty"` + Routing *string `json:"routing,omitempty"` + Version *int64 `json:"version,omitempty"` + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *LikeDocument) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc": + if err := dec.Decode(&s.Doc); err != nil { + return fmt.Errorf("%s | %w", "Doc", err) + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "per_field_analyzer": + if s.PerFieldAnalyzer == nil { + s.PerFieldAnalyzer = make(map[string]string, 0) + } + if err := dec.Decode(&s.PerFieldAnalyzer); err != nil { + return fmt.Errorf("%s | %w", "PerFieldAnalyzer", err) + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return fmt.Errorf("%s | %w", "VersionType", err) + } + + } + } + return nil +} + +// NewLikeDocument returns a LikeDocument. +func NewLikeDocument() *LikeDocument { + r := &LikeDocument{ + PerFieldAnalyzer: make(map[string]string), + } + + return r +} + +type LikeDocumentVariant interface { + LikeDocumentCaster() *LikeDocument +} + +func (s *LikeDocument) LikeDocumentCaster() *LikeDocument { + return s +} + +func (s *LikeDocument) LikeCaster() *Like { + o := Like(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/limits.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/limits.go new file mode 100644 index 000000000..d28ef5e4c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/limits.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Limits type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/info/types.ts#L34-L40 +type Limits struct { + EffectiveMaxModelMemoryLimit ByteSize `json:"effective_max_model_memory_limit,omitempty"` + MaxModelMemoryLimit ByteSize `json:"max_model_memory_limit,omitempty"` + MaxSingleMlNodeProcessors *int `json:"max_single_ml_node_processors,omitempty"` + TotalMlMemory ByteSize `json:"total_ml_memory"` + TotalMlProcessors *int `json:"total_ml_processors,omitempty"` +} + +func (s *Limits) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "effective_max_model_memory_limit": + if err := dec.Decode(&s.EffectiveMaxModelMemoryLimit); err != nil { + return fmt.Errorf("%s | %w", "EffectiveMaxModelMemoryLimit", err) + } + + case "max_model_memory_limit": + if err := dec.Decode(&s.MaxModelMemoryLimit); err != nil { + return fmt.Errorf("%s | %w", "MaxModelMemoryLimit", err) + } + + case "max_single_ml_node_processors": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSingleMlNodeProcessors", err) + } + s.MaxSingleMlNodeProcessors = &value + case float64: + f := int(v) + s.MaxSingleMlNodeProcessors = &f + } + + case "total_ml_memory": + if err := dec.Decode(&s.TotalMlMemory); err != nil { + return fmt.Errorf("%s | %w", "TotalMlMemory", err) + } + + case "total_ml_processors": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalMlProcessors", err) + } + s.TotalMlProcessors = &value + case float64: + f := int(v) + s.TotalMlProcessors = &f + } + + } + } + return nil +} + +// NewLimits returns a Limits. +func NewLimits() *Limits { + r := &Limits{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/limittokencounttokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/limittokencounttokenfilter.go new file mode 100644 index 000000000..45a945c32 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/limittokencounttokenfilter.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LimitTokenCountTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L336-L342 +type LimitTokenCountTokenFilter struct { + // ConsumeAllTokens If `true`, the limit filter exhausts the token stream, even if the + // `max_token_count` has already been reached. Defaults to `false`. + ConsumeAllTokens *bool `json:"consume_all_tokens,omitempty"` + // MaxTokenCount Maximum number of tokens to keep. Once this limit is reached, any remaining + // tokens are excluded from the output. Defaults to `1`. + MaxTokenCount Stringifiedinteger `json:"max_token_count,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *LimitTokenCountTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "consume_all_tokens": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ConsumeAllTokens", err) + } + s.ConsumeAllTokens = &value + case bool: + s.ConsumeAllTokens = &v + } + + case "max_token_count": + if err := dec.Decode(&s.MaxTokenCount); err != nil { + return fmt.Errorf("%s | %w", "MaxTokenCount", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LimitTokenCountTokenFilter) MarshalJSON() ([]byte, error) { + type innerLimitTokenCountTokenFilter LimitTokenCountTokenFilter + tmp := innerLimitTokenCountTokenFilter{ + ConsumeAllTokens: s.ConsumeAllTokens, + MaxTokenCount: s.MaxTokenCount, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "limit" + + return json.Marshal(tmp) +} + +// NewLimitTokenCountTokenFilter returns a LimitTokenCountTokenFilter. +func NewLimitTokenCountTokenFilter() *LimitTokenCountTokenFilter { + r := &LimitTokenCountTokenFilter{} + + return r +} + +type LimitTokenCountTokenFilterVariant interface { + LimitTokenCountTokenFilterCaster() *LimitTokenCountTokenFilter +} + +func (s *LimitTokenCountTokenFilter) LimitTokenCountTokenFilterCaster() *LimitTokenCountTokenFilter { + return s +} + +func (s *LimitTokenCountTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/linearinterpolationsmoothingmodel.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/linearinterpolationsmoothingmodel.go new file mode 100644 index 000000000..56bfe040f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/linearinterpolationsmoothingmodel.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LinearInterpolationSmoothingModel type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L438-L442 +type LinearInterpolationSmoothingModel struct { + BigramLambda Float64 `json:"bigram_lambda"` + TrigramLambda Float64 `json:"trigram_lambda"` + UnigramLambda Float64 `json:"unigram_lambda"` +} + +func (s *LinearInterpolationSmoothingModel) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bigram_lambda": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BigramLambda", err) + } + f := Float64(value) + s.BigramLambda = f + case float64: + f := Float64(v) + s.BigramLambda = f + } + + case "trigram_lambda": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TrigramLambda", err) + } + f := Float64(value) + s.TrigramLambda = f + case float64: + f := Float64(v) + s.TrigramLambda = f + } + + case "unigram_lambda": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "UnigramLambda", err) + } + f := Float64(value) + s.UnigramLambda = f + case float64: + f := Float64(v) + s.UnigramLambda = f + } + + } + } + return nil +} + +// NewLinearInterpolationSmoothingModel returns a LinearInterpolationSmoothingModel. +func NewLinearInterpolationSmoothingModel() *LinearInterpolationSmoothingModel { + r := &LinearInterpolationSmoothingModel{} + + return r +} + +type LinearInterpolationSmoothingModelVariant interface { + LinearInterpolationSmoothingModelCaster() *LinearInterpolationSmoothingModel +} + +func (s *LinearInterpolationSmoothingModel) LinearInterpolationSmoothingModelCaster() *LinearInterpolationSmoothingModel { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/linearmovingaverageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/linearmovingaverageaggregation.go new file mode 100644 index 000000000..2f1959252 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/linearmovingaverageaggregation.go @@ -0,0 +1,188 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// LinearMovingAverageAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L268-L271 +type LinearMovingAverageAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Minimize *bool `json:"minimize,omitempty"` + Model string `json:"model,omitempty"` + Predict *int `json:"predict,omitempty"` + Settings EmptyObject `json:"settings"` + Window *int `json:"window,omitempty"` +} + +func (s *LinearMovingAverageAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "minimize": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Minimize", err) + } + s.Minimize = &value + case bool: + s.Minimize = &v + } + + case "model": + if err := dec.Decode(&s.Model); err != nil { + return fmt.Errorf("%s | %w", "Model", err) + } + + case "predict": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Predict", err) + } + s.Predict = &value + case float64: + f := int(v) + s.Predict = &f + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "window": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Window", err) + } + s.Window = &value + case float64: + f := int(v) + s.Window = &f + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LinearMovingAverageAggregation) MarshalJSON() ([]byte, error) { + type innerLinearMovingAverageAggregation LinearMovingAverageAggregation + tmp := innerLinearMovingAverageAggregation{ + BucketsPath: s.BucketsPath, + Format: s.Format, + GapPolicy: s.GapPolicy, + Minimize: s.Minimize, + Model: s.Model, + Predict: s.Predict, + Settings: s.Settings, + Window: s.Window, + } + + tmp.Model = "linear" + + return json.Marshal(tmp) +} + +// NewLinearMovingAverageAggregation returns a LinearMovingAverageAggregation. +func NewLinearMovingAverageAggregation() *LinearMovingAverageAggregation { + r := &LinearMovingAverageAggregation{} + + return r +} + +type LinearMovingAverageAggregationVariant interface { + LinearMovingAverageAggregationCaster() *LinearMovingAverageAggregation +} + +func (s *LinearMovingAverageAggregation) LinearMovingAverageAggregationCaster() *LinearMovingAverageAggregation { + return s +} + +func (s *LinearMovingAverageAggregation) MovingAverageAggregationCaster() *MovingAverageAggregation { + o := MovingAverageAggregation(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/linearretriever.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/linearretriever.go new file mode 100644 index 000000000..4b455fc15 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/linearretriever.go @@ -0,0 +1,173 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scorenormalizer" +) + +// LinearRetriever type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Retriever.ts#L68-L75 +type LinearRetriever struct { + Fields []string `json:"fields,omitempty"` + // Filter Query to filter the documents that can match. + Filter []Query `json:"filter,omitempty"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are not + // included in the top documents. + MinScore *float32 `json:"min_score,omitempty"` + // Name_ Retriever name. + Name_ *string `json:"_name,omitempty"` + Normalizer *scorenormalizer.ScoreNormalizer `json:"normalizer,omitempty"` + Query *string `json:"query,omitempty"` + RankWindowSize *int `json:"rank_window_size,omitempty"` + // Retrievers Inner retrievers. + Retrievers []InnerRetriever `json:"retrievers,omitempty"` +} + +func (s *LinearRetriever) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := float32(value) + s.MinScore = &f + case float64: + f := float32(v) + s.MinScore = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name_ = &o + + case "normalizer": + if err := dec.Decode(&s.Normalizer); err != nil { + return fmt.Errorf("%s | %w", "Normalizer", err) + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = &o + + case "rank_window_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RankWindowSize", err) + } + s.RankWindowSize = &value + case float64: + f := int(v) + s.RankWindowSize = &f + } + + case "retrievers": + if err := dec.Decode(&s.Retrievers); err != nil { + return fmt.Errorf("%s | %w", "Retrievers", err) + } + + } + } + return nil +} + +// NewLinearRetriever returns a LinearRetriever. +func NewLinearRetriever() *LinearRetriever { + r := &LinearRetriever{} + + return r +} + +type LinearRetrieverVariant interface { + LinearRetrieverCaster() *LinearRetriever +} + +func (s *LinearRetriever) LinearRetrieverCaster() *LinearRetriever { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/listtypevalidation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/listtypevalidation.go new file mode 100644 index 000000000..2808035c7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/listtypevalidation.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ListTypeValidation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L68-L71 +type ListTypeValidation struct { + Constraint string `json:"constraint"` + Type string `json:"type,omitempty"` +} + +func (s *ListTypeValidation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "constraint": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Constraint", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Constraint = o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ListTypeValidation) MarshalJSON() ([]byte, error) { + type innerListTypeValidation ListTypeValidation + tmp := innerListTypeValidation{ + Constraint: s.Constraint, + Type: s.Type, + } + + tmp.Type = "list_type" + + return json.Marshal(tmp) +} + +// NewListTypeValidation returns a ListTypeValidation. +func NewListTypeValidation() *ListTypeValidation { + r := &ListTypeValidation{} + + return r +} + +type ListTypeValidationVariant interface { + ListTypeValidationCaster() *ListTypeValidation +} + +func (s *ListTypeValidation) ListTypeValidationCaster() *ListTypeValidation { + return s +} + +func (s *ListTypeValidation) ValidationCaster() *Validation { + o := Validation(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lithuaniananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lithuaniananalyzer.go new file mode 100644 index 000000000..510442c0e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lithuaniananalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LithuanianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L241-L246 +type LithuanianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *LithuanianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LithuanianAnalyzer) MarshalJSON() ([]byte, error) { + type innerLithuanianAnalyzer LithuanianAnalyzer + tmp := innerLithuanianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "lithuanian" + + return json.Marshal(tmp) +} + +// NewLithuanianAnalyzer returns a LithuanianAnalyzer. +func NewLithuanianAnalyzer() *LithuanianAnalyzer { + r := &LithuanianAnalyzer{} + + return r +} + +type LithuanianAnalyzerVariant interface { + LithuanianAnalyzerCaster() *LithuanianAnalyzer +} + +func (s *LithuanianAnalyzer) LithuanianAnalyzerCaster() *LithuanianAnalyzer { + return s +} + +func (s *LithuanianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/local.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/local.go new file mode 100644 index 000000000..419dda1d0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/local.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Local type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Database.ts#L63-L65 +type Local struct { + Type string `json:"type"` +} + +func (s *Local) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewLocal returns a Local. +func NewLocal() *Local { + r := &Local{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/loggingaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/loggingaction.go new file mode 100644 index 000000000..0eb0e8b4e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/loggingaction.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LoggingAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L281-L285 +type LoggingAction struct { + Category *string `json:"category,omitempty"` + Level *string `json:"level,omitempty"` + Text string `json:"text"` +} + +func (s *LoggingAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "category": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Category", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Category = &o + + case "level": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Level", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Level = &o + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + } + } + return nil +} + +// NewLoggingAction returns a LoggingAction. +func NewLoggingAction() *LoggingAction { + r := &LoggingAction{} + + return r +} + +type LoggingActionVariant interface { + LoggingActionCaster() *LoggingAction +} + +func (s *LoggingAction) LoggingActionCaster() *LoggingAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/loggingresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/loggingresult.go new file mode 100644 index 000000000..1166ae341 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/loggingresult.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LoggingResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L287-L289 +type LoggingResult struct { + LoggedText string `json:"logged_text"` +} + +func (s *LoggingResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "logged_text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LoggedText", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LoggedText = o + + } + } + return nil +} + +// NewLoggingResult returns a LoggingResult. +func NewLoggingResult() *LoggingResult { + r := &LoggingResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/logstashpipeline.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/logstashpipeline.go new file mode 100644 index 000000000..c4e3e15fa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/logstashpipeline.go @@ -0,0 +1,138 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LogstashPipeline type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/logstash/_types/Pipeline.ts#L56-L87 +type LogstashPipeline struct { + // Description A description of the pipeline. + // This description is not used by Elasticsearch or Logstash. + Description string `json:"description"` + // LastModified The date the pipeline was last updated. + // It must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. + LastModified DateTime `json:"last_modified"` + // Pipeline The configuration for the pipeline. + Pipeline string `json:"pipeline"` + // PipelineMetadata Optional metadata about the pipeline, which can have any contents. + // This metadata is not generated or used by Elasticsearch or Logstash. + PipelineMetadata PipelineMetadata `json:"pipeline_metadata"` + // PipelineSettings Settings for the pipeline. + // It supports only flat keys in dot notation. + PipelineSettings PipelineSettings `json:"pipeline_settings"` + // Username The user who last updated the pipeline. + Username string `json:"username"` +} + +func (s *LogstashPipeline) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "last_modified": + if err := dec.Decode(&s.LastModified); err != nil { + return fmt.Errorf("%s | %w", "LastModified", err) + } + + case "pipeline": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pipeline", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pipeline = o + + case "pipeline_metadata": + if err := dec.Decode(&s.PipelineMetadata); err != nil { + return fmt.Errorf("%s | %w", "PipelineMetadata", err) + } + + case "pipeline_settings": + if err := dec.Decode(&s.PipelineSettings); err != nil { + return fmt.Errorf("%s | %w", "PipelineSettings", err) + } + + case "username": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Username = o + + } + } + return nil +} + +// NewLogstashPipeline returns a LogstashPipeline. +func NewLogstashPipeline() *LogstashPipeline { + r := &LogstashPipeline{} + + return r +} + +type LogstashPipelineVariant interface { + LogstashPipelineCaster() *LogstashPipeline +} + +func (s *LogstashPipeline) LogstashPipelineCaster() *LogstashPipeline { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longnumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longnumberproperty.go new file mode 100644 index 000000000..263434e4a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longnumberproperty.go @@ -0,0 +1,998 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype" +) + +// LongNumberProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L177-L180 +type LongNumberProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *int64 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + // TimeSeriesMetric For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + s.NullValue = &value + case float64: + f := int64(v) + s.NullValue = &f + } + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "time_series_metric": + if err := dec.Decode(&s.TimeSeriesMetric); err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LongNumberProperty) MarshalJSON() ([]byte, error) { + type innerLongNumberProperty LongNumberProperty + tmp := innerLongNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "long" + + return json.Marshal(tmp) +} + +// NewLongNumberProperty returns a LongNumberProperty. +func NewLongNumberProperty() *LongNumberProperty { + r := &LongNumberProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type LongNumberPropertyVariant interface { + LongNumberPropertyCaster() *LongNumberProperty +} + +func (s *LongNumberProperty) LongNumberPropertyCaster() *LongNumberProperty { + return s +} + +func (s *LongNumberProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longrangeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longrangeproperty.go new file mode 100644 index 000000000..2bcc6633b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longrangeproperty.go @@ -0,0 +1,922 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// LongRangeProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/range.ts#L50-L52 +type LongRangeProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LongRangeProperty) MarshalJSON() ([]byte, error) { + type innerLongRangeProperty LongRangeProperty + tmp := innerLongRangeProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "long_range" + + return json.Marshal(tmp) +} + +// NewLongRangeProperty returns a LongRangeProperty. +func NewLongRangeProperty() *LongRangeProperty { + r := &LongRangeProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type LongRangePropertyVariant interface { + LongRangePropertyCaster() *LongRangeProperty +} + +func (s *LongRangeProperty) LongRangePropertyCaster() *LongRangeProperty { + return s +} + +func (s *LongRangeProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longraretermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longraretermsaggregate.go new file mode 100644 index 000000000..82f658f91 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longraretermsaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// LongRareTermsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L471-L476 +type LongRareTermsAggregate struct { + Buckets BucketsLongRareTermsBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *LongRareTermsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]LongRareTermsBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []LongRareTermsBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewLongRareTermsAggregate returns a LongRareTermsAggregate. +func NewLongRareTermsAggregate() *LongRareTermsAggregate { + r := &LongRareTermsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longraretermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longraretermsbucket.go new file mode 100644 index 000000000..d6cf22929 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longraretermsbucket.go @@ -0,0 +1,661 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// LongRareTermsBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L478-L481 +type LongRareTermsBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key int64 `json:"key"` + KeyAsString *string `json:"key_as_string,omitempty"` +} + +func (s *LongRareTermsBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + s.Key = value + case float64: + f := int64(v) + s.Key = f + } + + case "key_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "KeyAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s LongRareTermsBucket) MarshalJSON() ([]byte, error) { + type opt LongRareTermsBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewLongRareTermsBucket returns a LongRareTermsBucket. +func NewLongRareTermsBucket() *LongRareTermsBucket { + r := &LongRareTermsBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longtermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longtermsaggregate.go new file mode 100644 index 000000000..f8dbc6883 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longtermsaggregate.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LongTermsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L439-L444 +type LongTermsAggregate struct { + Buckets BucketsLongTermsBucket `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` +} + +func (s *LongTermsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]LongTermsBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []LongTermsBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "doc_count_error_upper_bound": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "sum_other_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SumOtherDocCount", err) + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f + } + + } + } + return nil +} + +// NewLongTermsAggregate returns a LongTermsAggregate. +func NewLongTermsAggregate() *LongTermsAggregate { + r := &LongTermsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longtermsbucket.go new file mode 100644 index 000000000..d3ae5d258 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/longtermsbucket.go @@ -0,0 +1,677 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// LongTermsBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L446-L449 +type LongTermsBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Key int64 `json:"key"` + KeyAsString *string `json:"key_as_string,omitempty"` +} + +func (s *LongTermsBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "doc_count_error_upper_bound": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f + } + + case "key": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + s.Key = value + case float64: + f := int64(v) + s.Key = f + } + + case "key_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "KeyAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s LongTermsBucket) MarshalJSON() ([]byte, error) { + type opt LongTermsBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewLongTermsBucket returns a LongTermsBucket. +func NewLongTermsBucket() *LongTermsBucket { + r := &LongTermsBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lowercasenormalizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lowercasenormalizer.go new file mode 100644 index 000000000..a16f17e24 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lowercasenormalizer.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// LowercaseNormalizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/normalizers.ts#L26-L28 +type LowercaseNormalizer struct { + Type string `json:"type,omitempty"` +} + +// MarshalJSON override marshalling to include literal value +func (s LowercaseNormalizer) MarshalJSON() ([]byte, error) { + type innerLowercaseNormalizer LowercaseNormalizer + tmp := innerLowercaseNormalizer{ + Type: s.Type, + } + + tmp.Type = "lowercase" + + return json.Marshal(tmp) +} + +// NewLowercaseNormalizer returns a LowercaseNormalizer. +func NewLowercaseNormalizer() *LowercaseNormalizer { + r := &LowercaseNormalizer{} + + return r +} + +type LowercaseNormalizerVariant interface { + LowercaseNormalizerCaster() *LowercaseNormalizer +} + +func (s *LowercaseNormalizer) LowercaseNormalizerCaster() *LowercaseNormalizer { + return s +} + +func (s *LowercaseNormalizer) NormalizerCaster() *Normalizer { + o := Normalizer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lowercaseprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lowercaseprocessor.go new file mode 100644 index 000000000..c8f00c61f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lowercaseprocessor.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// LowercaseProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1231-L1247 +type LowercaseProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to make lowercase. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the converted value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *LowercaseProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewLowercaseProcessor returns a LowercaseProcessor. +func NewLowercaseProcessor() *LowercaseProcessor { + r := &LowercaseProcessor{} + + return r +} + +type LowercaseProcessorVariant interface { + LowercaseProcessorCaster() *LowercaseProcessor +} + +func (s *LowercaseProcessor) LowercaseProcessorCaster() *LowercaseProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lowercasetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lowercasetokenfilter.go new file mode 100644 index 000000000..c1a91d0e8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lowercasetokenfilter.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/lowercasetokenfilterlanguages" +) + +// LowercaseTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L350-L354 +type LowercaseTokenFilter struct { + // Language Language-specific lowercase token filter to use. + Language *lowercasetokenfilterlanguages.LowercaseTokenFilterLanguages `json:"language,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *LowercaseTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "language": + if err := dec.Decode(&s.Language); err != nil { + return fmt.Errorf("%s | %w", "Language", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LowercaseTokenFilter) MarshalJSON() ([]byte, error) { + type innerLowercaseTokenFilter LowercaseTokenFilter + tmp := innerLowercaseTokenFilter{ + Language: s.Language, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "lowercase" + + return json.Marshal(tmp) +} + +// NewLowercaseTokenFilter returns a LowercaseTokenFilter. +func NewLowercaseTokenFilter() *LowercaseTokenFilter { + r := &LowercaseTokenFilter{} + + return r +} + +type LowercaseTokenFilterVariant interface { + LowercaseTokenFilterCaster() *LowercaseTokenFilter +} + +func (s *LowercaseTokenFilter) LowercaseTokenFilterCaster() *LowercaseTokenFilter { + return s +} + +func (s *LowercaseTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lowercasetokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lowercasetokenizer.go new file mode 100644 index 000000000..f4c220cf4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/lowercasetokenizer.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// LowercaseTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L81-L83 +type LowercaseTokenizer struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *LowercaseTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LowercaseTokenizer) MarshalJSON() ([]byte, error) { + type innerLowercaseTokenizer LowercaseTokenizer + tmp := innerLowercaseTokenizer{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "lowercase" + + return json.Marshal(tmp) +} + +// NewLowercaseTokenizer returns a LowercaseTokenizer. +func NewLowercaseTokenizer() *LowercaseTokenizer { + r := &LowercaseTokenizer{} + + return r +} + +type LowercaseTokenizerVariant interface { + LowercaseTokenizerCaster() *LowercaseTokenizer +} + +func (s *LowercaseTokenizer) LowercaseTokenizerCaster() *LowercaseTokenizer { + return s +} + +func (s *LowercaseTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/machinelearning.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/machinelearning.go new file mode 100644 index 000000000..5f6c2e37a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/machinelearning.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MachineLearning type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L378-L385 +type MachineLearning struct { + Available bool `json:"available"` + DataFrameAnalyticsJobs MlDataFrameAnalyticsJobs `json:"data_frame_analytics_jobs"` + Datafeeds map[string]XpackDatafeed `json:"datafeeds"` + Enabled bool `json:"enabled"` + Inference MlInference `json:"inference"` + // Jobs Job usage statistics. The `_all` entry is always present and gathers + // statistics for all jobs. + Jobs map[string]JobUsage `json:"jobs"` + NodeCount int `json:"node_count"` +} + +func (s *MachineLearning) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "data_frame_analytics_jobs": + if err := dec.Decode(&s.DataFrameAnalyticsJobs); err != nil { + return fmt.Errorf("%s | %w", "DataFrameAnalyticsJobs", err) + } + + case "datafeeds": + if s.Datafeeds == nil { + s.Datafeeds = make(map[string]XpackDatafeed, 0) + } + if err := dec.Decode(&s.Datafeeds); err != nil { + return fmt.Errorf("%s | %w", "Datafeeds", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "inference": + if err := dec.Decode(&s.Inference); err != nil { + return fmt.Errorf("%s | %w", "Inference", err) + } + + case "jobs": + if s.Jobs == nil { + s.Jobs = make(map[string]JobUsage, 0) + } + if err := dec.Decode(&s.Jobs); err != nil { + return fmt.Errorf("%s | %w", "Jobs", err) + } + + case "node_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NodeCount", err) + } + s.NodeCount = value + case float64: + f := int(v) + s.NodeCount = f + } + + } + } + return nil +} + +// NewMachineLearning returns a MachineLearning. +func NewMachineLearning() *MachineLearning { + r := &MachineLearning{ + Datafeeds: make(map[string]XpackDatafeed), + Jobs: make(map[string]JobUsage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/manageuserprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/manageuserprivileges.go new file mode 100644 index 000000000..3d2751c15 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/manageuserprivileges.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ManageUserPrivileges type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L440-L442 +type ManageUserPrivileges struct { + Applications []string `json:"applications"` +} + +// NewManageUserPrivileges returns a ManageUserPrivileges. +func NewManageUserPrivileges() *ManageUserPrivileges { + r := &ManageUserPrivileges{} + + return r +} + +type ManageUserPrivilegesVariant interface { + ManageUserPrivilegesCaster() *ManageUserPrivileges +} + +func (s *ManageUserPrivileges) ManageUserPrivilegesCaster() *ManageUserPrivileges { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mapboxvectortiles.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mapboxvectortiles.go new file mode 100644 index 000000000..d4820726c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mapboxvectortiles.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// MapboxVectorTiles type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Binary.ts#L21-L21 +type MapboxVectorTiles []byte diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappingcharfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappingcharfilter.go new file mode 100644 index 000000000..498b43257 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappingcharfilter.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MappingCharFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/char_filters.ts#L51-L55 +type MappingCharFilter struct { + Mappings []string `json:"mappings,omitempty"` + MappingsPath *string `json:"mappings_path,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *MappingCharFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mappings": + if err := dec.Decode(&s.Mappings); err != nil { + return fmt.Errorf("%s | %w", "Mappings", err) + } + + case "mappings_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MappingsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MappingsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s MappingCharFilter) MarshalJSON() ([]byte, error) { + type innerMappingCharFilter MappingCharFilter + tmp := innerMappingCharFilter{ + Mappings: s.Mappings, + MappingsPath: s.MappingsPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "mapping" + + return json.Marshal(tmp) +} + +// NewMappingCharFilter returns a MappingCharFilter. +func NewMappingCharFilter() *MappingCharFilter { + r := &MappingCharFilter{} + + return r +} + +type MappingCharFilterVariant interface { + MappingCharFilterCaster() *MappingCharFilter +} + +func (s *MappingCharFilter) MappingCharFilterCaster() *MappingCharFilter { + return s +} + +func (s *MappingCharFilter) CharFilterDefinitionCaster() *CharFilterDefinition { + o := CharFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettings.go new file mode 100644 index 000000000..e4efd106b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettings.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MappingLimitSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L438-L452 +type MappingLimitSettings struct { + Coerce *bool `json:"coerce,omitempty"` + Depth *MappingLimitSettingsDepth `json:"depth,omitempty"` + DimensionFields *MappingLimitSettingsDimensionFields `json:"dimension_fields,omitempty"` + FieldNameLength *MappingLimitSettingsFieldNameLength `json:"field_name_length,omitempty"` + IgnoreMalformed *string `json:"ignore_malformed,omitempty"` + NestedFields *MappingLimitSettingsNestedFields `json:"nested_fields,omitempty"` + NestedObjects *MappingLimitSettingsNestedObjects `json:"nested_objects,omitempty"` + Source *MappingLimitSettingsSourceFields `json:"source,omitempty"` + TotalFields *MappingLimitSettingsTotalFields `json:"total_fields,omitempty"` +} + +func (s *MappingLimitSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "depth": + if err := dec.Decode(&s.Depth); err != nil { + return fmt.Errorf("%s | %w", "Depth", err) + } + + case "dimension_fields": + if err := dec.Decode(&s.DimensionFields); err != nil { + return fmt.Errorf("%s | %w", "DimensionFields", err) + } + + case "field_name_length": + if err := dec.Decode(&s.FieldNameLength); err != nil { + return fmt.Errorf("%s | %w", "FieldNameLength", err) + } + + case "ignore_malformed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IgnoreMalformed = &o + + case "nested_fields": + if err := dec.Decode(&s.NestedFields); err != nil { + return fmt.Errorf("%s | %w", "NestedFields", err) + } + + case "nested_objects": + if err := dec.Decode(&s.NestedObjects); err != nil { + return fmt.Errorf("%s | %w", "NestedObjects", err) + } + + case "source": + if err := dec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + + case "total_fields": + if err := dec.Decode(&s.TotalFields); err != nil { + return fmt.Errorf("%s | %w", "TotalFields", err) + } + + } + } + return nil +} + +// NewMappingLimitSettings returns a MappingLimitSettings. +func NewMappingLimitSettings() *MappingLimitSettings { + r := &MappingLimitSettings{} + + return r +} + +type MappingLimitSettingsVariant interface { + MappingLimitSettingsCaster() *MappingLimitSettings +} + +func (s *MappingLimitSettings) MappingLimitSettingsCaster() *MappingLimitSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsdepth.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsdepth.go new file mode 100644 index 000000000..8bed6306d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsdepth.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MappingLimitSettingsDepth type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L473-L480 +type MappingLimitSettingsDepth struct { + // Limit The maximum depth for a field, which is measured as the number of inner + // objects. For instance, if all fields are defined + // at the root object level, then the depth is 1. If there is one object + // mapping, then the depth is 2, etc. + Limit *int64 `json:"limit,omitempty"` +} + +func (s *MappingLimitSettingsDepth) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "limit": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Limit", err) + } + s.Limit = &value + case float64: + f := int64(v) + s.Limit = &f + } + + } + } + return nil +} + +// NewMappingLimitSettingsDepth returns a MappingLimitSettingsDepth. +func NewMappingLimitSettingsDepth() *MappingLimitSettingsDepth { + r := &MappingLimitSettingsDepth{} + + return r +} + +type MappingLimitSettingsDepthVariant interface { + MappingLimitSettingsDepthCaster() *MappingLimitSettingsDepth +} + +func (s *MappingLimitSettingsDepth) MappingLimitSettingsDepthCaster() *MappingLimitSettingsDepth { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsdimensionfields.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsdimensionfields.go new file mode 100644 index 000000000..5e6a9eeef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsdimensionfields.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MappingLimitSettingsDimensionFields type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L510-L516 +type MappingLimitSettingsDimensionFields struct { + // Limit [preview] This functionality is in technical preview and may be changed or + // removed in a future release. + // Elastic will work to fix any issues, but features in technical preview are + // not subject to the support SLA of official GA features. + Limit *int64 `json:"limit,omitempty"` +} + +func (s *MappingLimitSettingsDimensionFields) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "limit": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Limit", err) + } + s.Limit = &value + case float64: + f := int64(v) + s.Limit = &f + } + + } + } + return nil +} + +// NewMappingLimitSettingsDimensionFields returns a MappingLimitSettingsDimensionFields. +func NewMappingLimitSettingsDimensionFields() *MappingLimitSettingsDimensionFields { + r := &MappingLimitSettingsDimensionFields{} + + return r +} + +type MappingLimitSettingsDimensionFieldsVariant interface { + MappingLimitSettingsDimensionFieldsCaster() *MappingLimitSettingsDimensionFields +} + +func (s *MappingLimitSettingsDimensionFields) MappingLimitSettingsDimensionFieldsCaster() *MappingLimitSettingsDimensionFields { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsfieldnamelength.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsfieldnamelength.go new file mode 100644 index 000000000..ad68e8d1d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsfieldnamelength.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MappingLimitSettingsFieldNameLength type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L501-L508 +type MappingLimitSettingsFieldNameLength struct { + // Limit Setting for the maximum length of a field name. This setting isn’t really + // something that addresses mappings explosion but + // might still be useful if you want to limit the field length. It usually + // shouldn’t be necessary to set this setting. The + // default is okay unless a user starts to add a huge number of fields with + // really long names. Default is `Long.MAX_VALUE` (no limit). + Limit *int64 `json:"limit,omitempty"` +} + +func (s *MappingLimitSettingsFieldNameLength) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "limit": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Limit", err) + } + s.Limit = &value + case float64: + f := int64(v) + s.Limit = &f + } + + } + } + return nil +} + +// NewMappingLimitSettingsFieldNameLength returns a MappingLimitSettingsFieldNameLength. +func NewMappingLimitSettingsFieldNameLength() *MappingLimitSettingsFieldNameLength { + r := &MappingLimitSettingsFieldNameLength{} + + return r +} + +type MappingLimitSettingsFieldNameLengthVariant interface { + MappingLimitSettingsFieldNameLengthCaster() *MappingLimitSettingsFieldNameLength +} + +func (s *MappingLimitSettingsFieldNameLength) MappingLimitSettingsFieldNameLengthCaster() *MappingLimitSettingsFieldNameLength { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsnestedfields.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsnestedfields.go new file mode 100644 index 000000000..23693b63f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsnestedfields.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MappingLimitSettingsNestedFields type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L482-L490 +type MappingLimitSettingsNestedFields struct { + // Limit The maximum number of distinct nested mappings in an index. The nested type + // should only be used in special cases, when + // arrays of objects need to be queried independently of each other. To + // safeguard against poorly designed mappings, this + // setting limits the number of unique nested types per index. + Limit *int64 `json:"limit,omitempty"` +} + +func (s *MappingLimitSettingsNestedFields) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "limit": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Limit", err) + } + s.Limit = &value + case float64: + f := int64(v) + s.Limit = &f + } + + } + } + return nil +} + +// NewMappingLimitSettingsNestedFields returns a MappingLimitSettingsNestedFields. +func NewMappingLimitSettingsNestedFields() *MappingLimitSettingsNestedFields { + r := &MappingLimitSettingsNestedFields{} + + return r +} + +type MappingLimitSettingsNestedFieldsVariant interface { + MappingLimitSettingsNestedFieldsCaster() *MappingLimitSettingsNestedFields +} + +func (s *MappingLimitSettingsNestedFields) MappingLimitSettingsNestedFieldsCaster() *MappingLimitSettingsNestedFields { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsnestedobjects.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsnestedobjects.go new file mode 100644 index 000000000..385ffafef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingsnestedobjects.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MappingLimitSettingsNestedObjects type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L492-L499 +type MappingLimitSettingsNestedObjects struct { + // Limit The maximum number of nested JSON objects that a single document can contain + // across all nested types. This limit helps + // to prevent out of memory errors when a document contains too many nested + // objects. + Limit *int64 `json:"limit,omitempty"` +} + +func (s *MappingLimitSettingsNestedObjects) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "limit": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Limit", err) + } + s.Limit = &value + case float64: + f := int64(v) + s.Limit = &f + } + + } + } + return nil +} + +// NewMappingLimitSettingsNestedObjects returns a MappingLimitSettingsNestedObjects. +func NewMappingLimitSettingsNestedObjects() *MappingLimitSettingsNestedObjects { + r := &MappingLimitSettingsNestedObjects{} + + return r +} + +type MappingLimitSettingsNestedObjectsVariant interface { + MappingLimitSettingsNestedObjectsCaster() *MappingLimitSettingsNestedObjects +} + +func (s *MappingLimitSettingsNestedObjects) MappingLimitSettingsNestedObjectsCaster() *MappingLimitSettingsNestedObjects { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingssourcefields.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingssourcefields.go new file mode 100644 index 000000000..00075694a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingssourcefields.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sourcemode" +) + +// MappingLimitSettingsSourceFields type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L518-L520 +type MappingLimitSettingsSourceFields struct { + Mode sourcemode.SourceMode `json:"mode"` +} + +// NewMappingLimitSettingsSourceFields returns a MappingLimitSettingsSourceFields. +func NewMappingLimitSettingsSourceFields() *MappingLimitSettingsSourceFields { + r := &MappingLimitSettingsSourceFields{} + + return r +} + +type MappingLimitSettingsSourceFieldsVariant interface { + MappingLimitSettingsSourceFieldsCaster() *MappingLimitSettingsSourceFields +} + +func (s *MappingLimitSettingsSourceFields) MappingLimitSettingsSourceFieldsCaster() *MappingLimitSettingsSourceFields { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingstotalfields.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingstotalfields.go new file mode 100644 index 000000000..21fe3db36 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappinglimitsettingstotalfields.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MappingLimitSettingsTotalFields type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L454-L471 +type MappingLimitSettingsTotalFields struct { + // IgnoreDynamicBeyondLimit This setting determines what happens when a dynamically mapped field would + // exceed the total fields limit. When set + // to false (the default), the index request of the document that tries to add a + // dynamic field to the mapping will fail + // with the message Limit of total fields [X] has been exceeded. When set to + // true, the index request will not fail. + // Instead, fields that would exceed the limit are not added to the mapping, + // similar to dynamic: false. + // The fields that were not added to the mapping will be added to the _ignored + // field. + IgnoreDynamicBeyondLimit *string `json:"ignore_dynamic_beyond_limit,omitempty"` + // Limit The maximum number of fields in an index. Field and object mappings, as well + // as field aliases count towards this limit. + // The limit is in place to prevent mappings and searches from becoming too + // large. Higher values can lead to performance + // degradations and memory issues, especially in clusters with a high load or + // few resources. + Limit *string `json:"limit,omitempty"` +} + +func (s *MappingLimitSettingsTotalFields) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ignore_dynamic_beyond_limit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IgnoreDynamicBeyondLimit", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IgnoreDynamicBeyondLimit = &o + + case "limit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Limit", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Limit = &o + + } + } + return nil +} + +// NewMappingLimitSettingsTotalFields returns a MappingLimitSettingsTotalFields. +func NewMappingLimitSettingsTotalFields() *MappingLimitSettingsTotalFields { + r := &MappingLimitSettingsTotalFields{} + + return r +} + +type MappingLimitSettingsTotalFieldsVariant interface { + MappingLimitSettingsTotalFieldsCaster() *MappingLimitSettingsTotalFields +} + +func (s *MappingLimitSettingsTotalFields) MappingLimitSettingsTotalFieldsCaster() *MappingLimitSettingsTotalFields { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappingstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappingstats.go new file mode 100644 index 000000000..5dc94a75b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mappingstats.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MappingStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L186-L190 +type MappingStats struct { + TotalCount int64 `json:"total_count"` + TotalEstimatedOverhead ByteSize `json:"total_estimated_overhead,omitempty"` + TotalEstimatedOverheadInBytes int64 `json:"total_estimated_overhead_in_bytes"` +} + +func (s *MappingStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "total_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalCount", err) + } + s.TotalCount = value + case float64: + f := int64(v) + s.TotalCount = f + } + + case "total_estimated_overhead": + if err := dec.Decode(&s.TotalEstimatedOverhead); err != nil { + return fmt.Errorf("%s | %w", "TotalEstimatedOverhead", err) + } + + case "total_estimated_overhead_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalEstimatedOverheadInBytes", err) + } + s.TotalEstimatedOverheadInBytes = value + case float64: + f := int64(v) + s.TotalEstimatedOverheadInBytes = f + } + + } + } + return nil +} + +// NewMappingStats returns a MappingStats. +func NewMappingStats() *MappingStats { + r := &MappingStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterisstableindicator.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterisstableindicator.go new file mode 100644 index 000000000..a2779682f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterisstableindicator.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indicatorhealthstatus" +) + +// MasterIsStableIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L82-L86 +type MasterIsStableIndicator struct { + Details *MasterIsStableIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *MasterIsStableIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return fmt.Errorf("%s | %w", "Diagnosis", err) + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return fmt.Errorf("%s | %w", "Impacts", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Symptom", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewMasterIsStableIndicator returns a MasterIsStableIndicator. +func NewMasterIsStableIndicator() *MasterIsStableIndicator { + r := &MasterIsStableIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterisstableindicatorclusterformationnode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterisstableindicatorclusterformationnode.go new file mode 100644 index 000000000..3aa23da0f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterisstableindicatorclusterformationnode.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MasterIsStableIndicatorClusterFormationNode type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L101-L105 +type MasterIsStableIndicatorClusterFormationNode struct { + ClusterFormationMessage string `json:"cluster_formation_message"` + Name *string `json:"name,omitempty"` + NodeId string `json:"node_id"` +} + +func (s *MasterIsStableIndicatorClusterFormationNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cluster_formation_message": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ClusterFormationMessage", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClusterFormationMessage = o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "node_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeId = o + + } + } + return nil +} + +// NewMasterIsStableIndicatorClusterFormationNode returns a MasterIsStableIndicatorClusterFormationNode. +func NewMasterIsStableIndicatorClusterFormationNode() *MasterIsStableIndicatorClusterFormationNode { + r := &MasterIsStableIndicatorClusterFormationNode{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterisstableindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterisstableindicatordetails.go new file mode 100644 index 000000000..50a857285 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterisstableindicatordetails.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// MasterIsStableIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L87-L92 +type MasterIsStableIndicatorDetails struct { + ClusterFormation []MasterIsStableIndicatorClusterFormationNode `json:"cluster_formation,omitempty"` + CurrentMaster IndicatorNode `json:"current_master"` + ExceptionFetchingHistory *MasterIsStableIndicatorExceptionFetchingHistory `json:"exception_fetching_history,omitempty"` + RecentMasters []IndicatorNode `json:"recent_masters"` +} + +// NewMasterIsStableIndicatorDetails returns a MasterIsStableIndicatorDetails. +func NewMasterIsStableIndicatorDetails() *MasterIsStableIndicatorDetails { + r := &MasterIsStableIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterisstableindicatorexceptionfetchinghistory.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterisstableindicatorexceptionfetchinghistory.go new file mode 100644 index 000000000..57a6deb9a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterisstableindicatorexceptionfetchinghistory.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MasterIsStableIndicatorExceptionFetchingHistory type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L97-L100 +type MasterIsStableIndicatorExceptionFetchingHistory struct { + Message string `json:"message"` + StackTrace string `json:"stack_trace"` +} + +func (s *MasterIsStableIndicatorExceptionFetchingHistory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "message": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Message", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Message = o + + case "stack_trace": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StackTrace", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StackTrace = o + + } + } + return nil +} + +// NewMasterIsStableIndicatorExceptionFetchingHistory returns a MasterIsStableIndicatorExceptionFetchingHistory. +func NewMasterIsStableIndicatorExceptionFetchingHistory() *MasterIsStableIndicatorExceptionFetchingHistory { + r := &MasterIsStableIndicatorExceptionFetchingHistory{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterrecord.go new file mode 100644 index 000000000..07422b126 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/masterrecord.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MasterRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/master/types.ts#L20-L39 +type MasterRecord struct { + // Host host name + Host *string `json:"host,omitempty"` + // Id node id + Id *string `json:"id,omitempty"` + // Ip ip address + Ip *string `json:"ip,omitempty"` + // Node node name + Node *string `json:"node,omitempty"` +} + +func (s *MasterRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "host", "h": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Host = &o + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "ip": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "node", "n": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = &o + + } + } + return nil +} + +// NewMasterRecord returns a MasterRecord. +func NewMasterRecord() *MasterRecord { + r := &MasterRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchallquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchallquery.go new file mode 100644 index 000000000..7394510ec --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchallquery.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MatchAllQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/MatchAllQuery.ts#L22-L25 +type MatchAllQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *MatchAllQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewMatchAllQuery returns a MatchAllQuery. +func NewMatchAllQuery() *MatchAllQuery { + r := &MatchAllQuery{} + + return r +} + +type MatchAllQueryVariant interface { + MatchAllQueryCaster() *MatchAllQuery +} + +func (s *MatchAllQuery) MatchAllQueryCaster() *MatchAllQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchboolprefixquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchboolprefixquery.go new file mode 100644 index 000000000..3eaf24323 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchboolprefixquery.go @@ -0,0 +1,242 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator" +) + +// MatchBoolPrefixQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L406-L463 +type MatchBoolPrefixQuery struct { + // Analyzer Analyzer used to convert the text in the query value into tokens. + Analyzer *string `json:"analyzer,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Fuzziness Maximum edit distance allowed for matching. + // Can be applied to the term subqueries constructed for all terms but the final + // term. + Fuzziness Fuzziness `json:"fuzziness,omitempty"` + // FuzzyRewrite Method used to rewrite the query. + // Can be applied to the term subqueries constructed for all terms but the final + // term. + FuzzyRewrite *string `json:"fuzzy_rewrite,omitempty"` + // FuzzyTranspositions If `true`, edits for fuzzy matching include transpositions of two adjacent + // characters (for example, `ab` to `ba`). + // Can be applied to the term subqueries constructed for all terms but the final + // term. + FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` + // MaxExpansions Maximum number of terms to which the query will expand. + // Can be applied to the term subqueries constructed for all terms but the final + // term. + MaxExpansions *int `json:"max_expansions,omitempty"` + // MinimumShouldMatch Minimum number of clauses that must match for a document to be returned. + // Applied to the constructed bool query. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // Operator Boolean logic used to interpret text in the query value. + // Applied to the constructed bool query. + Operator *operator.Operator `json:"operator,omitempty"` + // PrefixLength Number of beginning characters left unchanged for fuzzy matching. + // Can be applied to the term subqueries constructed for all terms but the final + // term. + PrefixLength *int `json:"prefix_length,omitempty"` + // Query Terms you wish to find in the provided field. + // The last term is used in a prefix query. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *MatchBoolPrefixQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Query) + if err != nil { + return err + } + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "fuzziness": + if err := dec.Decode(&s.Fuzziness); err != nil { + return fmt.Errorf("%s | %w", "Fuzziness", err) + } + + case "fuzzy_rewrite": + if err := dec.Decode(&s.FuzzyRewrite); err != nil { + return fmt.Errorf("%s | %w", "FuzzyRewrite", err) + } + + case "fuzzy_transpositions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FuzzyTranspositions", err) + } + s.FuzzyTranspositions = &value + case bool: + s.FuzzyTranspositions = &v + } + + case "max_expansions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxExpansions", err) + } + s.MaxExpansions = &value + case float64: + f := int(v) + s.MaxExpansions = &f + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return fmt.Errorf("%s | %w", "MinimumShouldMatch", err) + } + + case "operator": + if err := dec.Decode(&s.Operator); err != nil { + return fmt.Errorf("%s | %w", "Operator", err) + } + + case "prefix_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrefixLength", err) + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewMatchBoolPrefixQuery returns a MatchBoolPrefixQuery. +func NewMatchBoolPrefixQuery() *MatchBoolPrefixQuery { + r := &MatchBoolPrefixQuery{} + + return r +} + +type MatchBoolPrefixQueryVariant interface { + MatchBoolPrefixQueryCaster() *MatchBoolPrefixQuery +} + +func (s *MatchBoolPrefixQuery) MatchBoolPrefixQueryCaster() *MatchBoolPrefixQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchedfield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchedfield.go new file mode 100644 index 000000000..a8dd2862a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchedfield.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MatchedField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/text_structure/test_grok_pattern/types.ts#L23-L27 +type MatchedField struct { + Length int `json:"length"` + Match string `json:"match"` + Offset int `json:"offset"` +} + +func (s *MatchedField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Length", err) + } + s.Length = value + case float64: + f := int(v) + s.Length = f + } + + case "match": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Match", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Match = o + + case "offset": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Offset", err) + } + s.Offset = value + case float64: + f := int(v) + s.Offset = f + } + + } + } + return nil +} + +// NewMatchedField returns a MatchedField. +func NewMatchedField() *MatchedField { + r := &MatchedField{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchedtext.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchedtext.go new file mode 100644 index 000000000..d65b32447 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchedtext.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MatchedText type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/text_structure/test_grok_pattern/types.ts#L29-L32 +type MatchedText struct { + Fields map[string][]MatchedField `json:"fields,omitempty"` + Matched bool `json:"matched"` +} + +func (s *MatchedText) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string][]MatchedField, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "matched": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Matched", err) + } + s.Matched = value + case bool: + s.Matched = v + } + + } + } + return nil +} + +// NewMatchedText returns a MatchedText. +func NewMatchedText() *MatchedText { + r := &MatchedText{ + Fields: make(map[string][]MatchedField), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchnonequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchnonequery.go new file mode 100644 index 000000000..84450cfcf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchnonequery.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MatchNoneQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/MatchNoneQuery.ts#L22-L25 +type MatchNoneQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *MatchNoneQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewMatchNoneQuery returns a MatchNoneQuery. +func NewMatchNoneQuery() *MatchNoneQuery { + r := &MatchNoneQuery{} + + return r +} + +type MatchNoneQueryVariant interface { + MatchNoneQueryCaster() *MatchNoneQuery +} + +func (s *MatchNoneQuery) MatchNoneQueryCaster() *MatchNoneQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchonlytextproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchonlytextproperty.go new file mode 100644 index 000000000..1eef9ea2a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchonlytextproperty.go @@ -0,0 +1,470 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// MatchOnlyTextProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L282-L307 +type MatchOnlyTextProperty struct { + // CopyTo Allows you to copy the values of multiple fields into a group + // field, which can then be queried as a single field. + CopyTo []string `json:"copy_to,omitempty"` + // Fields Multi-fields allow the same string value to be indexed in multiple ways for + // different purposes, such as one + // field for search and a multi-field for sorting and aggregations, or the same + // string value analyzed by different analyzers. + Fields map[string]Property `json:"fields,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *MatchOnlyTextProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s MatchOnlyTextProperty) MarshalJSON() ([]byte, error) { + type innerMatchOnlyTextProperty MatchOnlyTextProperty + tmp := innerMatchOnlyTextProperty{ + CopyTo: s.CopyTo, + Fields: s.Fields, + Meta: s.Meta, + Type: s.Type, + } + + tmp.Type = "match_only_text" + + return json.Marshal(tmp) +} + +// NewMatchOnlyTextProperty returns a MatchOnlyTextProperty. +func NewMatchOnlyTextProperty() *MatchOnlyTextProperty { + r := &MatchOnlyTextProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + } + + return r +} + +type MatchOnlyTextPropertyVariant interface { + MatchOnlyTextPropertyCaster() *MatchOnlyTextProperty +} + +func (s *MatchOnlyTextProperty) MatchOnlyTextPropertyCaster() *MatchOnlyTextProperty { + return s +} + +func (s *MatchOnlyTextProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchphraseprefixquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchphraseprefixquery.go new file mode 100644 index 000000000..5f4e4f8ec --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchphraseprefixquery.go @@ -0,0 +1,193 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/zerotermsquery" +) + +// MatchPhrasePrefixQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L491-L520 +type MatchPhrasePrefixQuery struct { + // Analyzer Analyzer used to convert text in the query value into tokens. + Analyzer *string `json:"analyzer,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // MaxExpansions Maximum number of terms to which the last provided term of the query value + // will expand. + MaxExpansions *int `json:"max_expansions,omitempty"` + // Query Text you wish to find in the provided field. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // Slop Maximum number of positions allowed between matching tokens. + Slop *int `json:"slop,omitempty"` + // ZeroTermsQuery Indicates whether no documents are returned if the analyzer removes all + // tokens, such as when using a `stop` filter. + ZeroTermsQuery *zerotermsquery.ZeroTermsQuery `json:"zero_terms_query,omitempty"` +} + +func (s *MatchPhrasePrefixQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Query) + if err != nil { + return err + } + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "max_expansions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxExpansions", err) + } + s.MaxExpansions = &value + case float64: + f := int(v) + s.MaxExpansions = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "slop": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Slop", err) + } + s.Slop = &value + case float64: + f := int(v) + s.Slop = &f + } + + case "zero_terms_query": + if err := dec.Decode(&s.ZeroTermsQuery); err != nil { + return fmt.Errorf("%s | %w", "ZeroTermsQuery", err) + } + + } + } + return nil +} + +// NewMatchPhrasePrefixQuery returns a MatchPhrasePrefixQuery. +func NewMatchPhrasePrefixQuery() *MatchPhrasePrefixQuery { + r := &MatchPhrasePrefixQuery{} + + return r +} + +type MatchPhrasePrefixQueryVariant interface { + MatchPhrasePrefixQueryCaster() *MatchPhrasePrefixQuery +} + +func (s *MatchPhrasePrefixQuery) MatchPhrasePrefixQueryCaster() *MatchPhrasePrefixQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchphrasequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchphrasequery.go new file mode 100644 index 000000000..ecf7a8ca4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchphrasequery.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/zerotermsquery" +) + +// MatchPhraseQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L465-L489 +type MatchPhraseQuery struct { + // Analyzer Analyzer used to convert the text in the query value into tokens. + Analyzer *string `json:"analyzer,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Query Query terms that are analyzed and turned into a phrase query. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // Slop Maximum number of positions allowed between matching tokens. + Slop *int `json:"slop,omitempty"` + // ZeroTermsQuery Indicates whether no documents are returned if the `analyzer` removes all + // tokens, such as when using a `stop` filter. + ZeroTermsQuery *zerotermsquery.ZeroTermsQuery `json:"zero_terms_query,omitempty"` +} + +func (s *MatchPhraseQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Query) + if err != nil { + return err + } + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "slop": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Slop", err) + } + s.Slop = &value + case float64: + f := int(v) + s.Slop = &f + } + + case "zero_terms_query": + if err := dec.Decode(&s.ZeroTermsQuery); err != nil { + return fmt.Errorf("%s | %w", "ZeroTermsQuery", err) + } + + } + } + return nil +} + +// NewMatchPhraseQuery returns a MatchPhraseQuery. +func NewMatchPhraseQuery() *MatchPhraseQuery { + r := &MatchPhraseQuery{} + + return r +} + +type MatchPhraseQueryVariant interface { + MatchPhraseQueryCaster() *MatchPhraseQuery +} + +func (s *MatchPhraseQuery) MatchPhraseQueryCaster() *MatchPhraseQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchquery.go new file mode 100644 index 000000000..2151d6edc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matchquery.go @@ -0,0 +1,289 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/zerotermsquery" +) + +// MatchQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L336-L404 +type MatchQuery struct { + // Analyzer Analyzer used to convert the text in the query value into tokens. + Analyzer *string `json:"analyzer,omitempty"` + // AutoGenerateSynonymsPhraseQuery If `true`, match phrase queries are automatically created for multi-term + // synonyms. + AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + CutoffFrequency *Float64 `json:"cutoff_frequency,omitempty"` + // Fuzziness Maximum edit distance allowed for matching. + Fuzziness Fuzziness `json:"fuzziness,omitempty"` + // FuzzyRewrite Method used to rewrite the query. + FuzzyRewrite *string `json:"fuzzy_rewrite,omitempty"` + // FuzzyTranspositions If `true`, edits for fuzzy matching include transpositions of two adjacent + // characters (for example, `ab` to `ba`). + FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` + // Lenient If `true`, format-based errors, such as providing a text query value for a + // numeric field, are ignored. + Lenient *bool `json:"lenient,omitempty"` + // MaxExpansions Maximum number of terms to which the query will expand. + MaxExpansions *int `json:"max_expansions,omitempty"` + // MinimumShouldMatch Minimum number of clauses that must match for a document to be returned. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // Operator Boolean logic used to interpret text in the query value. + Operator *operator.Operator `json:"operator,omitempty"` + // PrefixLength Number of beginning characters left unchanged for fuzzy matching. + PrefixLength *int `json:"prefix_length,omitempty"` + // Query Text, number, boolean value or date you wish to find in the provided field. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // ZeroTermsQuery Indicates whether no documents are returned if the `analyzer` removes all + // tokens, such as when using a `stop` filter. + ZeroTermsQuery *zerotermsquery.ZeroTermsQuery `json:"zero_terms_query,omitempty"` +} + +func (s *MatchQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Query) + if err != nil { + return err + } + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "auto_generate_synonyms_phrase_query": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AutoGenerateSynonymsPhraseQuery", err) + } + s.AutoGenerateSynonymsPhraseQuery = &value + case bool: + s.AutoGenerateSynonymsPhraseQuery = &v + } + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "cutoff_frequency": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CutoffFrequency", err) + } + f := Float64(value) + s.CutoffFrequency = &f + case float64: + f := Float64(v) + s.CutoffFrequency = &f + } + + case "fuzziness": + if err := dec.Decode(&s.Fuzziness); err != nil { + return fmt.Errorf("%s | %w", "Fuzziness", err) + } + + case "fuzzy_rewrite": + if err := dec.Decode(&s.FuzzyRewrite); err != nil { + return fmt.Errorf("%s | %w", "FuzzyRewrite", err) + } + + case "fuzzy_transpositions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FuzzyTranspositions", err) + } + s.FuzzyTranspositions = &value + case bool: + s.FuzzyTranspositions = &v + } + + case "lenient": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Lenient", err) + } + s.Lenient = &value + case bool: + s.Lenient = &v + } + + case "max_expansions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxExpansions", err) + } + s.MaxExpansions = &value + case float64: + f := int(v) + s.MaxExpansions = &f + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return fmt.Errorf("%s | %w", "MinimumShouldMatch", err) + } + + case "operator": + if err := dec.Decode(&s.Operator); err != nil { + return fmt.Errorf("%s | %w", "Operator", err) + } + + case "prefix_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrefixLength", err) + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "zero_terms_query": + if err := dec.Decode(&s.ZeroTermsQuery); err != nil { + return fmt.Errorf("%s | %w", "ZeroTermsQuery", err) + } + + } + } + return nil +} + +// NewMatchQuery returns a MatchQuery. +func NewMatchQuery() *MatchQuery { + r := &MatchQuery{} + + return r +} + +type MatchQueryVariant interface { + MatchQueryCaster() *MatchQuery +} + +func (s *MatchQuery) MatchQueryCaster() *MatchQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matrixstatsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matrixstatsaggregate.go new file mode 100644 index 000000000..eebe9b23c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matrixstatsaggregate.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MatrixStatsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L868-L875 +type MatrixStatsAggregate struct { + DocCount int64 `json:"doc_count"` + Fields []MatrixStatsFields `json:"fields,omitempty"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *MatrixStatsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewMatrixStatsAggregate returns a MatrixStatsAggregate. +func NewMatrixStatsAggregate() *MatrixStatsAggregate { + r := &MatrixStatsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matrixstatsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matrixstatsaggregation.go new file mode 100644 index 000000000..ceb27cf08 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matrixstatsaggregation.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortmode" +) + +// MatrixStatsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/matrix.ts#L38-L44 +type MatrixStatsAggregation struct { + // Fields An array of fields for computing the statistics. + Fields []string `json:"fields,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing map[string]Float64 `json:"missing,omitempty"` + // Mode Array value the aggregation will use for array or multi-valued fields. + Mode *sortmode.SortMode `json:"mode,omitempty"` +} + +func (s *MatrixStatsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + } + + case "missing": + if s.Missing == nil { + s.Missing = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + + } + } + return nil +} + +// NewMatrixStatsAggregation returns a MatrixStatsAggregation. +func NewMatrixStatsAggregation() *MatrixStatsAggregation { + r := &MatrixStatsAggregation{ + Missing: make(map[string]Float64), + } + + return r +} + +type MatrixStatsAggregationVariant interface { + MatrixStatsAggregationCaster() *MatrixStatsAggregation +} + +func (s *MatrixStatsAggregation) MatrixStatsAggregationCaster() *MatrixStatsAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matrixstatsfields.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matrixstatsfields.go new file mode 100644 index 000000000..4ba149fa6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/matrixstatsfields.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MatrixStatsFields type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L877-L886 +type MatrixStatsFields struct { + Correlation map[string]Float64 `json:"correlation"` + Count int64 `json:"count"` + Covariance map[string]Float64 `json:"covariance"` + Kurtosis Float64 `json:"kurtosis"` + Mean Float64 `json:"mean"` + Name string `json:"name"` + Skewness Float64 `json:"skewness"` + Variance Float64 `json:"variance"` +} + +func (s *MatrixStatsFields) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "correlation": + if s.Correlation == nil { + s.Correlation = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.Correlation); err != nil { + return fmt.Errorf("%s | %w", "Correlation", err) + } + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "covariance": + if s.Covariance == nil { + s.Covariance = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.Covariance); err != nil { + return fmt.Errorf("%s | %w", "Covariance", err) + } + + case "kurtosis": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Kurtosis", err) + } + f := Float64(value) + s.Kurtosis = f + case float64: + f := Float64(v) + s.Kurtosis = f + } + + case "mean": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Mean", err) + } + f := Float64(value) + s.Mean = f + case float64: + f := Float64(v) + s.Mean = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "skewness": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Skewness", err) + } + f := Float64(value) + s.Skewness = f + case float64: + f := Float64(v) + s.Skewness = f + } + + case "variance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Variance", err) + } + f := Float64(value) + s.Variance = f + case float64: + f := Float64(v) + s.Variance = f + } + + } + } + return nil +} + +// NewMatrixStatsFields returns a MatrixStatsFields. +func NewMatrixStatsFields() *MatrixStatsFields { + r := &MatrixStatsFields{ + Correlation: make(map[string]Float64), + Covariance: make(map[string]Float64), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/maxaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/maxaggregate.go new file mode 100644 index 000000000..a3877d69e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/maxaggregate.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MaxAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L205-L209 +type MaxAggregate struct { + Meta Metadata `json:"meta,omitempty"` + // Value The metric value. A missing value generally means that there was no data to + // aggregate, + // unless specified otherwise. + Value *Float64 `json:"value,omitempty"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *MaxAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + +// NewMaxAggregate returns a MaxAggregate. +func NewMaxAggregate() *MaxAggregate { + r := &MaxAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/maxaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/maxaggregation.go new file mode 100644 index 000000000..7ba0c2cb9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/maxaggregation.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MaxAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L171-L171 +type MaxAggregation struct { + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *MaxAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewMaxAggregation returns a MaxAggregation. +func NewMaxAggregation() *MaxAggregation { + r := &MaxAggregation{} + + return r +} + +type MaxAggregationVariant interface { + MaxAggregationCaster() *MaxAggregation +} + +func (s *MaxAggregation) MaxAggregationCaster() *MaxAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/maxbucketaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/maxbucketaggregation.go new file mode 100644 index 000000000..2b6949c37 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/maxbucketaggregation.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// MaxBucketAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L244-L247 +type MaxBucketAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` +} + +func (s *MaxBucketAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + } + } + return nil +} + +// NewMaxBucketAggregation returns a MaxBucketAggregation. +func NewMaxBucketAggregation() *MaxBucketAggregation { + r := &MaxBucketAggregation{} + + return r +} + +type MaxBucketAggregationVariant interface { + MaxBucketAggregationCaster() *MaxBucketAggregation +} + +func (s *MaxBucketAggregation) MaxBucketAggregationCaster() *MaxBucketAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/maxmind.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/maxmind.go new file mode 100644 index 000000000..1a7a90eae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/maxmind.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Maxmind type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Database.ts#L55-L57 +type Maxmind struct { + AccountId string `json:"account_id"` +} + +func (s *Maxmind) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "account_id": + if err := dec.Decode(&s.AccountId); err != nil { + return fmt.Errorf("%s | %w", "AccountId", err) + } + + } + } + return nil +} + +// NewMaxmind returns a Maxmind. +func NewMaxmind() *Maxmind { + r := &Maxmind{} + + return r +} + +type MaxmindVariant interface { + MaxmindCaster() *Maxmind +} + +func (s *Maxmind) MaxmindCaster() *Maxmind { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/medianabsolutedeviationaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/medianabsolutedeviationaggregate.go new file mode 100644 index 000000000..1bb12dbb2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/medianabsolutedeviationaggregate.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MedianAbsoluteDeviationAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L196-L197 +type MedianAbsoluteDeviationAggregate struct { + Meta Metadata `json:"meta,omitempty"` + // Value The metric value. A missing value generally means that there was no data to + // aggregate, + // unless specified otherwise. + Value *Float64 `json:"value,omitempty"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *MedianAbsoluteDeviationAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + +// NewMedianAbsoluteDeviationAggregate returns a MedianAbsoluteDeviationAggregate. +func NewMedianAbsoluteDeviationAggregate() *MedianAbsoluteDeviationAggregate { + r := &MedianAbsoluteDeviationAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/medianabsolutedeviationaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/medianabsolutedeviationaggregation.go new file mode 100644 index 000000000..021290039 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/medianabsolutedeviationaggregation.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tdigestexecutionhint" +) + +// MedianAbsoluteDeviationAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L173-L188 +type MedianAbsoluteDeviationAggregation struct { + // Compression Limits the maximum number of nodes used by the underlying TDigest algorithm + // to `20 * compression`, enabling control of memory usage and approximation + // error. + Compression *Float64 `json:"compression,omitempty"` + // ExecutionHint The default implementation of TDigest is optimized for performance, scaling + // to millions or even billions of sample values while maintaining acceptable + // accuracy levels (close to 1% relative error for millions of samples in some + // cases). + // To use an implementation optimized for accuracy, set this parameter to + // high_accuracy instead. + ExecutionHint *tdigestexecutionhint.TDigestExecutionHint `json:"execution_hint,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *MedianAbsoluteDeviationAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compression": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Compression", err) + } + f := Float64(value) + s.Compression = &f + case float64: + f := Float64(v) + s.Compression = &f + } + + case "execution_hint": + if err := dec.Decode(&s.ExecutionHint); err != nil { + return fmt.Errorf("%s | %w", "ExecutionHint", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewMedianAbsoluteDeviationAggregation returns a MedianAbsoluteDeviationAggregation. +func NewMedianAbsoluteDeviationAggregation() *MedianAbsoluteDeviationAggregation { + r := &MedianAbsoluteDeviationAggregation{} + + return r +} + +type MedianAbsoluteDeviationAggregationVariant interface { + MedianAbsoluteDeviationAggregationCaster() *MedianAbsoluteDeviationAggregation +} + +func (s *MedianAbsoluteDeviationAggregation) MedianAbsoluteDeviationAggregationCaster() *MedianAbsoluteDeviationAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/memmlstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/memmlstats.go new file mode 100644 index 000000000..1e2dc1204 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/memmlstats.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MemMlStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_memory_stats/types.ts#L90-L111 +type MemMlStats struct { + // AnomalyDetectors Amount of native memory set aside for anomaly detection jobs. + AnomalyDetectors ByteSize `json:"anomaly_detectors,omitempty"` + // AnomalyDetectorsInBytes Amount of native memory, in bytes, set aside for anomaly detection jobs. + AnomalyDetectorsInBytes int `json:"anomaly_detectors_in_bytes"` + // DataFrameAnalytics Amount of native memory set aside for data frame analytics jobs. + DataFrameAnalytics ByteSize `json:"data_frame_analytics,omitempty"` + // DataFrameAnalyticsInBytes Amount of native memory, in bytes, set aside for data frame analytics jobs. + DataFrameAnalyticsInBytes int `json:"data_frame_analytics_in_bytes"` + // Max Maximum amount of native memory (separate to the JVM heap) that may be used + // by machine learning native processes. + Max ByteSize `json:"max,omitempty"` + // MaxInBytes Maximum amount of native memory (separate to the JVM heap), in bytes, that + // may be used by machine learning native processes. + MaxInBytes int `json:"max_in_bytes"` + // NativeCodeOverhead Amount of native memory set aside for loading machine learning native code + // shared libraries. + NativeCodeOverhead ByteSize `json:"native_code_overhead,omitempty"` + // NativeCodeOverheadInBytes Amount of native memory, in bytes, set aside for loading machine learning + // native code shared libraries. + NativeCodeOverheadInBytes int `json:"native_code_overhead_in_bytes"` + // NativeInference Amount of native memory set aside for trained models that have a PyTorch + // model_type. + NativeInference ByteSize `json:"native_inference,omitempty"` + // NativeInferenceInBytes Amount of native memory, in bytes, set aside for trained models that have a + // PyTorch model_type. + NativeInferenceInBytes int `json:"native_inference_in_bytes"` +} + +func (s *MemMlStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "anomaly_detectors": + if err := dec.Decode(&s.AnomalyDetectors); err != nil { + return fmt.Errorf("%s | %w", "AnomalyDetectors", err) + } + + case "anomaly_detectors_in_bytes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "AnomalyDetectorsInBytes", err) + } + s.AnomalyDetectorsInBytes = value + case float64: + f := int(v) + s.AnomalyDetectorsInBytes = f + } + + case "data_frame_analytics": + if err := dec.Decode(&s.DataFrameAnalytics); err != nil { + return fmt.Errorf("%s | %w", "DataFrameAnalytics", err) + } + + case "data_frame_analytics_in_bytes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DataFrameAnalyticsInBytes", err) + } + s.DataFrameAnalyticsInBytes = value + case float64: + f := int(v) + s.DataFrameAnalyticsInBytes = f + } + + case "max": + if err := dec.Decode(&s.Max); err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + + case "max_in_bytes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxInBytes", err) + } + s.MaxInBytes = value + case float64: + f := int(v) + s.MaxInBytes = f + } + + case "native_code_overhead": + if err := dec.Decode(&s.NativeCodeOverhead); err != nil { + return fmt.Errorf("%s | %w", "NativeCodeOverhead", err) + } + + case "native_code_overhead_in_bytes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NativeCodeOverheadInBytes", err) + } + s.NativeCodeOverheadInBytes = value + case float64: + f := int(v) + s.NativeCodeOverheadInBytes = f + } + + case "native_inference": + if err := dec.Decode(&s.NativeInference); err != nil { + return fmt.Errorf("%s | %w", "NativeInference", err) + } + + case "native_inference_in_bytes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NativeInferenceInBytes", err) + } + s.NativeInferenceInBytes = value + case float64: + f := int(v) + s.NativeInferenceInBytes = f + } + + } + } + return nil +} + +// NewMemMlStats returns a MemMlStats. +func NewMemMlStats() *MemMlStats { + r := &MemMlStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/memory.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/memory.go new file mode 100644 index 000000000..e68c91e08 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/memory.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Memory type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_memory_stats/types.ts#L25-L48 +type Memory struct { + Attributes map[string]string `json:"attributes"` + EphemeralId string `json:"ephemeral_id"` + // Jvm Contains Java Virtual Machine (JVM) statistics for the node. + Jvm JvmStats `json:"jvm"` + // Mem Contains statistics about memory usage for the node. + Mem MemStats `json:"mem"` + // Name Human-readable identifier for the node. Based on the Node name setting + // setting. + Name string `json:"name"` + // Roles Roles assigned to the node. + Roles []string `json:"roles"` + // TransportAddress The host and port where transport HTTP connections are accepted. + TransportAddress string `json:"transport_address"` +} + +func (s *Memory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "ephemeral_id": + if err := dec.Decode(&s.EphemeralId); err != nil { + return fmt.Errorf("%s | %w", "EphemeralId", err) + } + + case "jvm": + if err := dec.Decode(&s.Jvm); err != nil { + return fmt.Errorf("%s | %w", "Jvm", err) + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return fmt.Errorf("%s | %w", "Mem", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + } + } + return nil +} + +// NewMemory returns a Memory. +func NewMemory() *Memory { + r := &Memory{ + Attributes: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/memorystats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/memorystats.go new file mode 100644 index 000000000..2d92bc625 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/memorystats.go @@ -0,0 +1,221 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MemoryStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L634-L658 +type MemoryStats struct { + // AdjustedTotalInBytes If the amount of physical memory has been overridden using the + // `es`.`total_memory_bytes` system property then this reports the overridden + // value in bytes. + // Otherwise it reports the same value as `total_in_bytes`. + AdjustedTotalInBytes *int64 `json:"adjusted_total_in_bytes,omitempty"` + // FreeInBytes Amount of free physical memory in bytes. + FreeInBytes *int64 `json:"free_in_bytes,omitempty"` + Resident *string `json:"resident,omitempty"` + ResidentInBytes *int64 `json:"resident_in_bytes,omitempty"` + Share *string `json:"share,omitempty"` + ShareInBytes *int64 `json:"share_in_bytes,omitempty"` + // TotalInBytes Total amount of physical memory in bytes. + TotalInBytes *int64 `json:"total_in_bytes,omitempty"` + TotalVirtual *string `json:"total_virtual,omitempty"` + TotalVirtualInBytes *int64 `json:"total_virtual_in_bytes,omitempty"` + // UsedInBytes Amount of used physical memory in bytes. + UsedInBytes *int64 `json:"used_in_bytes,omitempty"` +} + +func (s *MemoryStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adjusted_total_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AdjustedTotalInBytes", err) + } + s.AdjustedTotalInBytes = &value + case float64: + f := int64(v) + s.AdjustedTotalInBytes = &f + } + + case "free_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FreeInBytes", err) + } + s.FreeInBytes = &value + case float64: + f := int64(v) + s.FreeInBytes = &f + } + + case "resident": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Resident", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Resident = &o + + case "resident_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ResidentInBytes", err) + } + s.ResidentInBytes = &value + case float64: + f := int64(v) + s.ResidentInBytes = &f + } + + case "share": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Share", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Share = &o + + case "share_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShareInBytes", err) + } + s.ShareInBytes = &value + case float64: + f := int64(v) + s.ShareInBytes = &f + } + + case "total_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalInBytes", err) + } + s.TotalInBytes = &value + case float64: + f := int64(v) + s.TotalInBytes = &f + } + + case "total_virtual": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TotalVirtual", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TotalVirtual = &o + + case "total_virtual_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalVirtualInBytes", err) + } + s.TotalVirtualInBytes = &value + case float64: + f := int64(v) + s.TotalVirtualInBytes = &f + } + + case "used_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "UsedInBytes", err) + } + s.UsedInBytes = &value + case float64: + f := int64(v) + s.UsedInBytes = &f + } + + } + } + return nil +} + +// NewMemoryStats returns a MemoryStats. +func NewMemoryStats() *MemoryStats { + r := &MemoryStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/memstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/memstats.go new file mode 100644 index 000000000..c7100af6c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/memstats.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MemStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/get_memory_stats/types.ts#L65-L88 +type MemStats struct { + // AdjustedTotal If the amount of physical memory has been overridden using the + // es.total_memory_bytes system property + // then this reports the overridden value. Otherwise it reports the same value + // as total. + AdjustedTotal ByteSize `json:"adjusted_total,omitempty"` + // AdjustedTotalInBytes If the amount of physical memory has been overridden using the + // `es.total_memory_bytes` system property + // then this reports the overridden value in bytes. Otherwise it reports the + // same value as `total_in_bytes`. + AdjustedTotalInBytes int `json:"adjusted_total_in_bytes"` + // Ml Contains statistics about machine learning use of native memory on the node. + Ml MemMlStats `json:"ml"` + // Total Total amount of physical memory. + Total ByteSize `json:"total,omitempty"` + // TotalInBytes Total amount of physical memory in bytes. + TotalInBytes int `json:"total_in_bytes"` +} + +func (s *MemStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adjusted_total": + if err := dec.Decode(&s.AdjustedTotal); err != nil { + return fmt.Errorf("%s | %w", "AdjustedTotal", err) + } + + case "adjusted_total_in_bytes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "AdjustedTotalInBytes", err) + } + s.AdjustedTotalInBytes = value + case float64: + f := int(v) + s.AdjustedTotalInBytes = f + } + + case "ml": + if err := dec.Decode(&s.Ml); err != nil { + return fmt.Errorf("%s | %w", "Ml", err) + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + + case "total_in_bytes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalInBytes", err) + } + s.TotalInBytes = value + case float64: + f := int(v) + s.TotalInBytes = f + } + + } + } + return nil +} + +// NewMemStats returns a MemStats. +func NewMemStats() *MemStats { + r := &MemStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/merge.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/merge.go new file mode 100644 index 000000000..867918389 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/merge.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Merge type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L359-L361 +type Merge struct { + Scheduler *MergeScheduler `json:"scheduler,omitempty"` +} + +// NewMerge returns a Merge. +func NewMerge() *Merge { + r := &Merge{} + + return r +} + +type MergeVariant interface { + MergeCaster() *Merge +} + +func (s *Merge) MergeCaster() *Merge { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mergescheduler.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mergescheduler.go new file mode 100644 index 000000000..81adc33c7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mergescheduler.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// MergeScheduler type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L363-L366 +type MergeScheduler struct { + MaxMergeCount Stringifiedinteger `json:"max_merge_count,omitempty"` + MaxThreadCount Stringifiedinteger `json:"max_thread_count,omitempty"` +} + +func (s *MergeScheduler) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_merge_count": + if err := dec.Decode(&s.MaxMergeCount); err != nil { + return fmt.Errorf("%s | %w", "MaxMergeCount", err) + } + + case "max_thread_count": + if err := dec.Decode(&s.MaxThreadCount); err != nil { + return fmt.Errorf("%s | %w", "MaxThreadCount", err) + } + + } + } + return nil +} + +// NewMergeScheduler returns a MergeScheduler. +func NewMergeScheduler() *MergeScheduler { + r := &MergeScheduler{} + + return r +} + +type MergeSchedulerVariant interface { + MergeSchedulerCaster() *MergeScheduler +} + +func (s *MergeScheduler) MergeSchedulerCaster() *MergeScheduler { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mergesstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mergesstats.go new file mode 100644 index 000000000..015fd1f91 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mergesstats.go @@ -0,0 +1,250 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MergesStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L188-L205 +type MergesStats struct { + Current int64 `json:"current"` + CurrentDocs int64 `json:"current_docs"` + CurrentSize *string `json:"current_size,omitempty"` + CurrentSizeInBytes int64 `json:"current_size_in_bytes"` + Total int64 `json:"total"` + TotalAutoThrottle *string `json:"total_auto_throttle,omitempty"` + TotalAutoThrottleInBytes int64 `json:"total_auto_throttle_in_bytes"` + TotalDocs int64 `json:"total_docs"` + TotalSize *string `json:"total_size,omitempty"` + TotalSizeInBytes int64 `json:"total_size_in_bytes"` + TotalStoppedTime Duration `json:"total_stopped_time,omitempty"` + TotalStoppedTimeInMillis int64 `json:"total_stopped_time_in_millis"` + TotalThrottledTime Duration `json:"total_throttled_time,omitempty"` + TotalThrottledTimeInMillis int64 `json:"total_throttled_time_in_millis"` + TotalTime Duration `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +func (s *MergesStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Current", err) + } + s.Current = value + case float64: + f := int64(v) + s.Current = f + } + + case "current_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CurrentDocs", err) + } + s.CurrentDocs = value + case float64: + f := int64(v) + s.CurrentDocs = f + } + + case "current_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CurrentSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CurrentSize = &o + + case "current_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CurrentSizeInBytes", err) + } + s.CurrentSizeInBytes = value + case float64: + f := int64(v) + s.CurrentSizeInBytes = f + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + case "total_auto_throttle": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TotalAutoThrottle", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TotalAutoThrottle = &o + + case "total_auto_throttle_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalAutoThrottleInBytes", err) + } + s.TotalAutoThrottleInBytes = value + case float64: + f := int64(v) + s.TotalAutoThrottleInBytes = f + } + + case "total_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalDocs", err) + } + s.TotalDocs = value + case float64: + f := int64(v) + s.TotalDocs = f + } + + case "total_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TotalSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TotalSize = &o + + case "total_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeInBytes", err) + } + s.TotalSizeInBytes = value + case float64: + f := int64(v) + s.TotalSizeInBytes = f + } + + case "total_stopped_time": + if err := dec.Decode(&s.TotalStoppedTime); err != nil { + return fmt.Errorf("%s | %w", "TotalStoppedTime", err) + } + + case "total_stopped_time_in_millis": + if err := dec.Decode(&s.TotalStoppedTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalStoppedTimeInMillis", err) + } + + case "total_throttled_time": + if err := dec.Decode(&s.TotalThrottledTime); err != nil { + return fmt.Errorf("%s | %w", "TotalThrottledTime", err) + } + + case "total_throttled_time_in_millis": + if err := dec.Decode(&s.TotalThrottledTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalThrottledTimeInMillis", err) + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return fmt.Errorf("%s | %w", "TotalTime", err) + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalTimeInMillis", err) + } + + } + } + return nil +} + +// NewMergesStats returns a MergesStats. +func NewMergesStats() *MergesStats { + r := &MergesStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/message.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/message.go new file mode 100644 index 000000000..0caa7ea59 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/message.go @@ -0,0 +1,149 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Message type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L174-L228 +type Message struct { + // Content The content of the message. + // + // String example: + // ``` + // + // { + // "content": "Some string" + // } + // + // ``` + // + // Object example: + // ``` + // + // { + // "content": [ + // { + // "text": "Some text", + // "type": "text" + // } + // ] + // } + // + // ``` + Content MessageContent `json:"content,omitempty"` + // Role The role of the message author. Valid values are `user`, `assistant`, + // `system`, and `tool`. + Role string `json:"role"` + // ToolCallId Only for `tool` role messages. The tool call that this message is responding + // to. + ToolCallId *string `json:"tool_call_id,omitempty"` + // ToolCalls Only for `assistant` role messages. The tool calls generated by the model. If + // it's specified, the `content` field is optional. + // Example: + // ``` + // + // { + // "tool_calls": [ + // { + // "id": "call_KcAjWtAww20AihPHphUh46Gd", + // "type": "function", + // "function": { + // "name": "get_current_weather", + // "arguments": "{\"location\":\"Boston, MA\"}" + // } + // } + // ] + // } + // + // ``` + ToolCalls []ToolCall `json:"tool_calls,omitempty"` +} + +func (s *Message) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "content": + if err := dec.Decode(&s.Content); err != nil { + return fmt.Errorf("%s | %w", "Content", err) + } + + case "role": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Role", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Role = o + + case "tool_call_id": + if err := dec.Decode(&s.ToolCallId); err != nil { + return fmt.Errorf("%s | %w", "ToolCallId", err) + } + + case "tool_calls": + if err := dec.Decode(&s.ToolCalls); err != nil { + return fmt.Errorf("%s | %w", "ToolCalls", err) + } + + } + } + return nil +} + +// NewMessage returns a Message. +func NewMessage() *Message { + r := &Message{} + + return r +} + +type MessageVariant interface { + MessageCaster() *Message +} + +func (s *Message) MessageCaster() *Message { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/messagecontent.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/messagecontent.go new file mode 100644 index 000000000..f6c48f13d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/messagecontent.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// MessageContent holds the union for the following types: +// +// string +// []ContentObject +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L169-L172 +type MessageContent any + +type MessageContentVariant interface { + MessageContentCaster() *MessageContent +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/metadata.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/metadata.go new file mode 100644 index 000000000..22eefcb20 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/metadata.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// Metadata type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L93-L93 +type Metadata map[string]json.RawMessage + +type MetadataVariant interface { + MetadataCaster() *Metadata +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/metrics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/metrics.go new file mode 100644 index 000000000..769f03519 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/metrics.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Metrics type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L70-L70 +type Metrics []string diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mgetoperation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mgetoperation.go new file mode 100644 index 000000000..23c6f27bf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mgetoperation.go @@ -0,0 +1,162 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +// MgetOperation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/mget/types.ts#L32-L55 +type MgetOperation struct { + // Id_ The unique document ID. + Id_ string `json:"_id"` + // Index_ The index that contains the document. + Index_ *string `json:"_index,omitempty"` + // Routing The key for the primary shard the document resides on. Required if routing is + // used during indexing. + Routing *string `json:"routing,omitempty"` + // Source_ If `false`, excludes all _source fields. + Source_ SourceConfig `json:"_source,omitempty"` + // StoredFields The stored fields you want to retrieve. + StoredFields []string `json:"stored_fields,omitempty"` + Version *int64 `json:"version,omitempty"` + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *MgetOperation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "_source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source__field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source_", err) + } + + switch t { + + case "exclude_vectors", "excludes", "includes": + o := NewSourceFilter() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + s.Source_ = o + break source__field + + } + } + if s.Source_ == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + } + + case "stored_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + + s.StoredFields = append(s.StoredFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.StoredFields); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return fmt.Errorf("%s | %w", "VersionType", err) + } + + } + } + return nil +} + +// NewMgetOperation returns a MgetOperation. +func NewMgetOperation() *MgetOperation { + r := &MgetOperation{} + + return r +} + +type MgetOperationVariant interface { + MgetOperationCaster() *MgetOperation +} + +func (s *MgetOperation) MgetOperationCaster() *MgetOperation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mgetresponseitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mgetresponseitem.go new file mode 100644 index 000000000..7d77180a6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mgetresponseitem.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// MgetResponseItem holds the union for the following types: +// +// GetResult +// MultiGetError +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/mget/types.ts#L57-L60 +type MgetResponseItem any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/migrateaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/migrateaction.go new file mode 100644 index 000000000..25595573f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/migrateaction.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MigrateAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/_types/Phase.ts#L141-L143 +type MigrateAction struct { + Enabled *bool `json:"enabled,omitempty"` +} + +func (s *MigrateAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + } + } + return nil +} + +// NewMigrateAction returns a MigrateAction. +func NewMigrateAction() *MigrateAction { + r := &MigrateAction{} + + return r +} + +type MigrateActionVariant interface { + MigrateActionCaster() *MigrateAction +} + +func (s *MigrateAction) MigrateActionCaster() *MigrateAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/migratereindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/migratereindex.go new file mode 100644 index 000000000..a473ca3f2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/migratereindex.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/modeenum" +) + +// MigrateReindex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/migrate_reindex/MigrateReindexRequest.ts#L39-L48 +type MigrateReindex struct { + // Mode Reindex mode. Currently only 'upgrade' is supported. + Mode modeenum.ModeEnum `json:"mode"` + // Source The source index or data stream (only data streams are currently supported). + Source SourceIndex `json:"source"` +} + +// NewMigrateReindex returns a MigrateReindex. +func NewMigrateReindex() *MigrateReindex { + r := &MigrateReindex{} + + return r +} + +type MigrateReindexVariant interface { + MigrateReindexCaster() *MigrateReindex +} + +func (s *MigrateReindex) MigrateReindexCaster() *MigrateReindex { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/migrationfeatureindexinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/migrationfeatureindexinfo.go new file mode 100644 index 000000000..a965861fc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/migrationfeatureindexinfo.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// MigrationFeatureIndexInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L44-L48 +type MigrationFeatureIndexInfo struct { + FailureCause *ErrorCause `json:"failure_cause,omitempty"` + Index string `json:"index"` + Version string `json:"version"` +} + +func (s *MigrationFeatureIndexInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "failure_cause": + if err := dec.Decode(&s.FailureCause); err != nil { + return fmt.Errorf("%s | %w", "FailureCause", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewMigrationFeatureIndexInfo returns a MigrationFeatureIndexInfo. +func NewMigrationFeatureIndexInfo() *MigrationFeatureIndexInfo { + r := &MigrationFeatureIndexInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minaggregate.go new file mode 100644 index 000000000..314d90739 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minaggregate.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MinAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L199-L203 +type MinAggregate struct { + Meta Metadata `json:"meta,omitempty"` + // Value The metric value. A missing value generally means that there was no data to + // aggregate, + // unless specified otherwise. + Value *Float64 `json:"value,omitempty"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *MinAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + +// NewMinAggregate returns a MinAggregate. +func NewMinAggregate() *MinAggregate { + r := &MinAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minaggregation.go new file mode 100644 index 000000000..5a7cb8abf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minaggregation.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MinAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L190-L190 +type MinAggregation struct { + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *MinAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewMinAggregation returns a MinAggregation. +func NewMinAggregation() *MinAggregation { + r := &MinAggregation{} + + return r +} + +type MinAggregationVariant interface { + MinAggregationCaster() *MinAggregation +} + +func (s *MinAggregation) MinAggregationCaster() *MinAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minbucketaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minbucketaggregation.go new file mode 100644 index 000000000..a4496a426 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minbucketaggregation.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// MinBucketAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L249-L252 +type MinBucketAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` +} + +func (s *MinBucketAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + } + } + return nil +} + +// NewMinBucketAggregation returns a MinBucketAggregation. +func NewMinBucketAggregation() *MinBucketAggregation { + r := &MinBucketAggregation{} + + return r +} + +type MinBucketAggregationVariant interface { + MinBucketAggregationCaster() *MinBucketAggregation +} + +func (s *MinBucketAggregation) MinBucketAggregationCaster() *MinBucketAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minhashtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minhashtokenfilter.go new file mode 100644 index 000000000..ae403901f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minhashtokenfilter.go @@ -0,0 +1,180 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MinHashTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L514-L525 +type MinHashTokenFilter struct { + // BucketCount Number of buckets to which hashes are assigned. Defaults to `512`. + BucketCount *int `json:"bucket_count,omitempty"` + // HashCount Number of ways to hash each token in the stream. Defaults to `1`. + HashCount *int `json:"hash_count,omitempty"` + // HashSetSize Number of hashes to keep from each bucket. Defaults to `1`. + // Hashes are retained by ascending size, starting with the bucket’s smallest + // hash first. + HashSetSize *int `json:"hash_set_size,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` + // WithRotation If `true`, the filter fills empty buckets with the value of the first + // non-empty bucket to its circular right if the `hash_set_size` is `1`. If the + // `bucket_count` argument is greater than 1, this parameter defaults to `true`. + // Otherwise, this parameter defaults to `false`. + WithRotation *bool `json:"with_rotation,omitempty"` +} + +func (s *MinHashTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bucket_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "BucketCount", err) + } + s.BucketCount = &value + case float64: + f := int(v) + s.BucketCount = &f + } + + case "hash_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "HashCount", err) + } + s.HashCount = &value + case float64: + f := int(v) + s.HashCount = &f + } + + case "hash_set_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "HashSetSize", err) + } + s.HashSetSize = &value + case float64: + f := int(v) + s.HashSetSize = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "with_rotation": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "WithRotation", err) + } + s.WithRotation = &value + case bool: + s.WithRotation = &v + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s MinHashTokenFilter) MarshalJSON() ([]byte, error) { + type innerMinHashTokenFilter MinHashTokenFilter + tmp := innerMinHashTokenFilter{ + BucketCount: s.BucketCount, + HashCount: s.HashCount, + HashSetSize: s.HashSetSize, + Type: s.Type, + Version: s.Version, + WithRotation: s.WithRotation, + } + + tmp.Type = "min_hash" + + return json.Marshal(tmp) +} + +// NewMinHashTokenFilter returns a MinHashTokenFilter. +func NewMinHashTokenFilter() *MinHashTokenFilter { + r := &MinHashTokenFilter{} + + return r +} + +type MinHashTokenFilterVariant interface { + MinHashTokenFilterCaster() *MinHashTokenFilter +} + +func (s *MinHashTokenFilter) MinHashTokenFilterCaster() *MinHashTokenFilter { + return s +} + +func (s *MinHashTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minimallicenseinformation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minimallicenseinformation.go new file mode 100644 index 000000000..4f3fbc17a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minimallicenseinformation.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensestatus" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensetype" +) + +// MinimalLicenseInformation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/info/types.ts#L34-L40 +type MinimalLicenseInformation struct { + ExpiryDateInMillis int64 `json:"expiry_date_in_millis"` + Mode licensetype.LicenseType `json:"mode"` + Status licensestatus.LicenseStatus `json:"status"` + Type licensetype.LicenseType `json:"type"` + Uid string `json:"uid"` +} + +func (s *MinimalLicenseInformation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expiry_date_in_millis": + if err := dec.Decode(&s.ExpiryDateInMillis); err != nil { + return fmt.Errorf("%s | %w", "ExpiryDateInMillis", err) + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "uid": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Uid", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Uid = o + + } + } + return nil +} + +// NewMinimalLicenseInformation returns a MinimalLicenseInformation. +func NewMinimalLicenseInformation() *MinimalLicenseInformation { + r := &MinimalLicenseInformation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minimumshouldmatch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minimumshouldmatch.go new file mode 100644 index 000000000..ab0fc5574 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/minimumshouldmatch.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// MinimumShouldMatch holds the union for the following types: +// +// int +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L160-L164 +type MinimumShouldMatch any + +type MinimumShouldMatchVariant interface { + MinimumShouldMatchCaster() *MinimumShouldMatch +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/missing.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/missing.go new file mode 100644 index 000000000..b31c0d88c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/missing.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Missing holds the union for the following types: +// +// string +// int +// Float64 +// bool +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/AggregationContainer.ts#L535-L535 +type Missing any + +type MissingVariant interface { + MissingCaster() *Missing +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/missingaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/missingaggregate.go new file mode 100644 index 000000000..03cbc7a24 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/missingaggregate.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// MissingAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L528-L532 +type MissingAggregate struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *MissingAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s MissingAggregate) MarshalJSON() ([]byte, error) { + type opt MissingAggregate + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewMissingAggregate returns a MissingAggregate. +func NewMissingAggregate() *MissingAggregate { + r := &MissingAggregate{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/missingaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/missingaggregation.go new file mode 100644 index 000000000..dec976550 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/missingaggregation.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// MissingAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L593-L599 +type MissingAggregation struct { + // Field The name of the field. + Field *string `json:"field,omitempty"` + Missing Missing `json:"missing,omitempty"` +} + +func (s *MissingAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + } + } + return nil +} + +// NewMissingAggregation returns a MissingAggregation. +func NewMissingAggregation() *MissingAggregation { + r := &MissingAggregation{} + + return r +} + +type MissingAggregationVariant interface { + MissingAggregationCaster() *MissingAggregation +} + +func (s *MissingAggregation) MissingAggregationCaster() *MissingAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mistralservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mistralservicesettings.go new file mode 100644 index 000000000..344163c51 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mistralservicesettings.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MistralServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1515-L1542 +type MistralServiceSettings struct { + // ApiKey A valid API key of your Mistral account. + // You can find your Mistral API keys or you can create a new one on the API + // Keys page. + // + // IMPORTANT: You need to provide the API key only once, during the inference + // model creation. + // The get inference endpoint API does not retrieve your API key. + // After creating the inference model, you cannot change the associated API key. + // If you want to use a different API key, delete the inference model and + // recreate it with the same name and the updated API key. + ApiKey string `json:"api_key"` + // MaxInputTokens The maximum number of tokens per input before chunking occurs. + MaxInputTokens *int `json:"max_input_tokens,omitempty"` + // Model The name of the model to use for the inference task. + // Refer to the Mistral models documentation for the list of available models. + Model string `json:"model"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // the Mistral API. + // By default, the `mistral` service sets the number of requests allowed per + // minute to 240. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` +} + +func (s *MistralServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "max_input_tokens": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxInputTokens", err) + } + s.MaxInputTokens = &value + case float64: + f := int(v) + s.MaxInputTokens = &f + } + + case "model": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Model", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Model = o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + } + } + return nil +} + +// NewMistralServiceSettings returns a MistralServiceSettings. +func NewMistralServiceSettings() *MistralServiceSettings { + r := &MistralServiceSettings{} + + return r +} + +type MistralServiceSettingsVariant interface { + MistralServiceSettingsCaster() *MistralServiceSettings +} + +func (s *MistralServiceSettings) MistralServiceSettingsCaster() *MistralServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlcounter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlcounter.go new file mode 100644 index 000000000..b22f714bf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlcounter.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MlCounter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L265-L267 +type MlCounter struct { + Count int64 `json:"count"` +} + +func (s *MlCounter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + } + } + return nil +} + +// NewMlCounter returns a MlCounter. +func NewMlCounter() *MlCounter { + r := &MlCounter{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldatafeed.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldatafeed.go new file mode 100644 index 000000000..1684578a3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldatafeed.go @@ -0,0 +1,193 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MLDatafeed type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Datafeed.ts#L37-L61 +type MLDatafeed struct { + Aggregations map[string]Aggregations `json:"aggregations,omitempty"` + // Authorization The security privileges that the datafeed uses to run its queries. If Elastic + // Stack security features were disabled at the time of the most recent update + // to the datafeed, this property is omitted. + Authorization *DatafeedAuthorization `json:"authorization,omitempty"` + ChunkingConfig *ChunkingConfig `json:"chunking_config,omitempty"` + DatafeedId string `json:"datafeed_id"` + DelayedDataCheckConfig DelayedDataCheckConfig `json:"delayed_data_check_config"` + Frequency Duration `json:"frequency,omitempty"` + Indexes []string `json:"indexes,omitempty"` + Indices []string `json:"indices"` + IndicesOptions *IndicesOptions `json:"indices_options,omitempty"` + JobId string `json:"job_id"` + MaxEmptySearches *int `json:"max_empty_searches,omitempty"` + Query Query `json:"query"` + QueryDelay Duration `json:"query_delay,omitempty"` + RuntimeMappings RuntimeFields `json:"runtime_mappings,omitempty"` + ScriptFields map[string]ScriptField `json:"script_fields,omitempty"` + ScrollSize *int `json:"scroll_size,omitempty"` +} + +func (s *MLDatafeed) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "authorization": + if err := dec.Decode(&s.Authorization); err != nil { + return fmt.Errorf("%s | %w", "Authorization", err) + } + + case "chunking_config": + if err := dec.Decode(&s.ChunkingConfig); err != nil { + return fmt.Errorf("%s | %w", "ChunkingConfig", err) + } + + case "datafeed_id": + if err := dec.Decode(&s.DatafeedId); err != nil { + return fmt.Errorf("%s | %w", "DatafeedId", err) + } + + case "delayed_data_check_config": + if err := dec.Decode(&s.DelayedDataCheckConfig); err != nil { + return fmt.Errorf("%s | %w", "DelayedDataCheckConfig", err) + } + + case "frequency": + if err := dec.Decode(&s.Frequency); err != nil { + return fmt.Errorf("%s | %w", "Frequency", err) + } + + case "indexes": + if err := dec.Decode(&s.Indexes); err != nil { + return fmt.Errorf("%s | %w", "Indexes", err) + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "indices_options": + if err := dec.Decode(&s.IndicesOptions); err != nil { + return fmt.Errorf("%s | %w", "IndicesOptions", err) + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "max_empty_searches": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxEmptySearches", err) + } + s.MaxEmptySearches = &value + case float64: + f := int(v) + s.MaxEmptySearches = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "query_delay": + if err := dec.Decode(&s.QueryDelay); err != nil { + return fmt.Errorf("%s | %w", "QueryDelay", err) + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return fmt.Errorf("%s | %w", "ScriptFields", err) + } + + case "scroll_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ScrollSize", err) + } + s.ScrollSize = &value + case float64: + f := int(v) + s.ScrollSize = &f + } + + } + } + return nil +} + +// NewMLDatafeed returns a MLDatafeed. +func NewMLDatafeed() *MLDatafeed { + r := &MLDatafeed{ + Aggregations: make(map[string]Aggregations), + ScriptFields: make(map[string]ScriptField), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldataframeanalyticsjobs.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldataframeanalyticsjobs.go new file mode 100644 index 000000000..65a02b3fb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldataframeanalyticsjobs.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// MlDataFrameAnalyticsJobs type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L187-L192 +type MlDataFrameAnalyticsJobs struct { + All_ MlDataFrameAnalyticsJobsCount `json:"_all"` + AnalysisCounts *MlDataFrameAnalyticsJobsAnalysis `json:"analysis_counts,omitempty"` + MemoryUsage *MlDataFrameAnalyticsJobsMemory `json:"memory_usage,omitempty"` + Stopped *MlDataFrameAnalyticsJobsCount `json:"stopped,omitempty"` +} + +// NewMlDataFrameAnalyticsJobs returns a MlDataFrameAnalyticsJobs. +func NewMlDataFrameAnalyticsJobs() *MlDataFrameAnalyticsJobs { + r := &MlDataFrameAnalyticsJobs{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldataframeanalyticsjobsanalysis.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldataframeanalyticsjobsanalysis.go new file mode 100644 index 000000000..7f8d92270 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldataframeanalyticsjobsanalysis.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MlDataFrameAnalyticsJobsAnalysis type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L194-L198 +type MlDataFrameAnalyticsJobsAnalysis struct { + Classification *int `json:"classification,omitempty"` + OutlierDetection *int `json:"outlier_detection,omitempty"` + Regression *int `json:"regression,omitempty"` +} + +func (s *MlDataFrameAnalyticsJobsAnalysis) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classification": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Classification", err) + } + s.Classification = &value + case float64: + f := int(v) + s.Classification = &f + } + + case "outlier_detection": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "OutlierDetection", err) + } + s.OutlierDetection = &value + case float64: + f := int(v) + s.OutlierDetection = &f + } + + case "regression": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Regression", err) + } + s.Regression = &value + case float64: + f := int(v) + s.Regression = &f + } + + } + } + return nil +} + +// NewMlDataFrameAnalyticsJobsAnalysis returns a MlDataFrameAnalyticsJobsAnalysis. +func NewMlDataFrameAnalyticsJobsAnalysis() *MlDataFrameAnalyticsJobsAnalysis { + r := &MlDataFrameAnalyticsJobsAnalysis{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldataframeanalyticsjobscount.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldataframeanalyticsjobscount.go new file mode 100644 index 000000000..19664f955 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldataframeanalyticsjobscount.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MlDataFrameAnalyticsJobsCount type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L204-L206 +type MlDataFrameAnalyticsJobsCount struct { + Count int64 `json:"count"` +} + +func (s *MlDataFrameAnalyticsJobsCount) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + } + } + return nil +} + +// NewMlDataFrameAnalyticsJobsCount returns a MlDataFrameAnalyticsJobsCount. +func NewMlDataFrameAnalyticsJobsCount() *MlDataFrameAnalyticsJobsCount { + r := &MlDataFrameAnalyticsJobsCount{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldataframeanalyticsjobsmemory.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldataframeanalyticsjobsmemory.go new file mode 100644 index 000000000..176e33b47 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mldataframeanalyticsjobsmemory.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// MlDataFrameAnalyticsJobsMemory type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L200-L202 +type MlDataFrameAnalyticsJobsMemory struct { + PeakUsageBytes JobStatistics `json:"peak_usage_bytes"` +} + +// NewMlDataFrameAnalyticsJobsMemory returns a MlDataFrameAnalyticsJobsMemory. +func NewMlDataFrameAnalyticsJobsMemory() *MlDataFrameAnalyticsJobsMemory { + r := &MlDataFrameAnalyticsJobsMemory{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlfilter.go new file mode 100644 index 000000000..1a58dcf02 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlfilter.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MLFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Filter.ts#L22-L29 +type MLFilter struct { + // Description A description of the filter. + Description *string `json:"description,omitempty"` + // FilterId A string that uniquely identifies a filter. + FilterId string `json:"filter_id"` + // Items An array of strings which is the filter item list. + Items []string `json:"items"` +} + +func (s *MLFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "filter_id": + if err := dec.Decode(&s.FilterId); err != nil { + return fmt.Errorf("%s | %w", "FilterId", err) + } + + case "items": + if err := dec.Decode(&s.Items); err != nil { + return fmt.Errorf("%s | %w", "Items", err) + } + + } + } + return nil +} + +// NewMLFilter returns a MLFilter. +func NewMLFilter() *MLFilter { + r := &MLFilter{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinference.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinference.go new file mode 100644 index 000000000..8280486ae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinference.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// MlInference type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L208-L216 +type MlInference struct { + Deployments *MlInferenceDeployments `json:"deployments,omitempty"` + IngestProcessors map[string]MlInferenceIngestProcessor `json:"ingest_processors"` + TrainedModels MlInferenceTrainedModels `json:"trained_models"` +} + +// NewMlInference returns a MlInference. +func NewMlInference() *MlInference { + r := &MlInference{ + IngestProcessors: make(map[string]MlInferenceIngestProcessor), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferencedeployments.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferencedeployments.go new file mode 100644 index 000000000..98c54513a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferencedeployments.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MlInferenceDeployments type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L237-L242 +type MlInferenceDeployments struct { + Count int `json:"count"` + InferenceCounts JobStatistics `json:"inference_counts"` + ModelSizesBytes JobStatistics `json:"model_sizes_bytes"` + TimeMs MlInferenceDeploymentsTimeMs `json:"time_ms"` +} + +func (s *MlInferenceDeployments) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "inference_counts": + if err := dec.Decode(&s.InferenceCounts); err != nil { + return fmt.Errorf("%s | %w", "InferenceCounts", err) + } + + case "model_sizes_bytes": + if err := dec.Decode(&s.ModelSizesBytes); err != nil { + return fmt.Errorf("%s | %w", "ModelSizesBytes", err) + } + + case "time_ms": + if err := dec.Decode(&s.TimeMs); err != nil { + return fmt.Errorf("%s | %w", "TimeMs", err) + } + + } + } + return nil +} + +// NewMlInferenceDeployments returns a MlInferenceDeployments. +func NewMlInferenceDeployments() *MlInferenceDeployments { + r := &MlInferenceDeployments{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferencedeploymentstimems.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferencedeploymentstimems.go new file mode 100644 index 000000000..6479b3bc1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferencedeploymentstimems.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MlInferenceDeploymentsTimeMs type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L244-L246 +type MlInferenceDeploymentsTimeMs struct { + Avg Float64 `json:"avg"` +} + +func (s *MlInferenceDeploymentsTimeMs) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Avg", err) + } + f := Float64(value) + s.Avg = f + case float64: + f := Float64(v) + s.Avg = f + } + + } + } + return nil +} + +// NewMlInferenceDeploymentsTimeMs returns a MlInferenceDeploymentsTimeMs. +func NewMlInferenceDeploymentsTimeMs() *MlInferenceDeploymentsTimeMs { + r := &MlInferenceDeploymentsTimeMs{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferenceingestprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferenceingestprocessor.go new file mode 100644 index 000000000..813178045 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferenceingestprocessor.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// MlInferenceIngestProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L218-L223 +type MlInferenceIngestProcessor struct { + NumDocsProcessed MlInferenceIngestProcessorCount `json:"num_docs_processed"` + NumFailures MlInferenceIngestProcessorCount `json:"num_failures"` + Pipelines MlCounter `json:"pipelines"` + TimeMs MlInferenceIngestProcessorCount `json:"time_ms"` +} + +// NewMlInferenceIngestProcessor returns a MlInferenceIngestProcessor. +func NewMlInferenceIngestProcessor() *MlInferenceIngestProcessor { + r := &MlInferenceIngestProcessor{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferenceingestprocessorcount.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferenceingestprocessorcount.go new file mode 100644 index 000000000..fa081e297 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferenceingestprocessorcount.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MlInferenceIngestProcessorCount type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L248-L252 +type MlInferenceIngestProcessorCount struct { + Max int64 `json:"max"` + Min int64 `json:"min"` + Sum int64 `json:"sum"` +} + +func (s *MlInferenceIngestProcessorCount) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + s.Max = value + case float64: + f := int64(v) + s.Max = f + } + + case "min": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + s.Min = value + case float64: + f := int64(v) + s.Min = f + } + + case "sum": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Sum", err) + } + s.Sum = value + case float64: + f := int64(v) + s.Sum = f + } + + } + } + return nil +} + +// NewMlInferenceIngestProcessorCount returns a MlInferenceIngestProcessorCount. +func NewMlInferenceIngestProcessorCount() *MlInferenceIngestProcessorCount { + r := &MlInferenceIngestProcessorCount{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferencetrainedmodels.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferencetrainedmodels.go new file mode 100644 index 000000000..0b34f995a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferencetrainedmodels.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// MlInferenceTrainedModels type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L225-L235 +type MlInferenceTrainedModels struct { + All_ MlCounter `json:"_all"` + Count *MlInferenceTrainedModelsCount `json:"count,omitempty"` + EstimatedHeapMemoryUsageBytes *JobStatistics `json:"estimated_heap_memory_usage_bytes,omitempty"` + EstimatedOperations *JobStatistics `json:"estimated_operations,omitempty"` + ModelSizeBytes *JobStatistics `json:"model_size_bytes,omitempty"` +} + +// NewMlInferenceTrainedModels returns a MlInferenceTrainedModels. +func NewMlInferenceTrainedModels() *MlInferenceTrainedModels { + r := &MlInferenceTrainedModels{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferencetrainedmodelscount.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferencetrainedmodelscount.go new file mode 100644 index 000000000..8d367831f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mlinferencetrainedmodelscount.go @@ -0,0 +1,191 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MlInferenceTrainedModelsCount type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L254-L263 +type MlInferenceTrainedModelsCount struct { + Classification *int64 `json:"classification,omitempty"` + Ner *int64 `json:"ner,omitempty"` + Other int64 `json:"other"` + PassThrough *int64 `json:"pass_through,omitempty"` + Prepackaged int64 `json:"prepackaged"` + Regression *int64 `json:"regression,omitempty"` + TextEmbedding *int64 `json:"text_embedding,omitempty"` + Total int64 `json:"total"` +} + +func (s *MlInferenceTrainedModelsCount) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classification": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Classification", err) + } + s.Classification = &value + case float64: + f := int64(v) + s.Classification = &f + } + + case "ner": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Ner", err) + } + s.Ner = &value + case float64: + f := int64(v) + s.Ner = &f + } + + case "other": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Other", err) + } + s.Other = value + case float64: + f := int64(v) + s.Other = f + } + + case "pass_through": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PassThrough", err) + } + s.PassThrough = &value + case float64: + f := int64(v) + s.PassThrough = &f + } + + case "prepackaged": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Prepackaged", err) + } + s.Prepackaged = value + case float64: + f := int64(v) + s.Prepackaged = f + } + + case "regression": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Regression", err) + } + s.Regression = &value + case float64: + f := int64(v) + s.Regression = &f + } + + case "text_embedding": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TextEmbedding", err) + } + s.TextEmbedding = &value + case float64: + f := int64(v) + s.TextEmbedding = &f + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + +// NewMlInferenceTrainedModelsCount returns a MlInferenceTrainedModelsCount. +func NewMlInferenceTrainedModelsCount() *MlInferenceTrainedModelsCount { + r := &MlInferenceTrainedModelsCount{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mljobforecasts.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mljobforecasts.go new file mode 100644 index 000000000..d8ed48203 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mljobforecasts.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MlJobForecasts type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L182-L185 +type MlJobForecasts struct { + ForecastedJobs int64 `json:"forecasted_jobs"` + Total int64 `json:"total"` +} + +func (s *MlJobForecasts) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "forecasted_jobs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ForecastedJobs", err) + } + s.ForecastedJobs = value + case float64: + f := int64(v) + s.ForecastedJobs = f + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + +// NewMlJobForecasts returns a MlJobForecasts. +func NewMlJobForecasts() *MlJobForecasts { + r := &MlJobForecasts{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelpackageconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelpackageconfig.go new file mode 100644 index 000000000..d22412d94 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelpackageconfig.go @@ -0,0 +1,201 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ModelPackageConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L257-L272 +type ModelPackageConfig struct { + CreateTime *int64 `json:"create_time,omitempty"` + Description *string `json:"description,omitempty"` + InferenceConfig map[string]json.RawMessage `json:"inference_config,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` + MinimumVersion *string `json:"minimum_version,omitempty"` + ModelRepository *string `json:"model_repository,omitempty"` + ModelType *string `json:"model_type,omitempty"` + PackagedModelId string `json:"packaged_model_id"` + PlatformArchitecture *string `json:"platform_architecture,omitempty"` + PrefixStrings *TrainedModelPrefixStrings `json:"prefix_strings,omitempty"` + Sha256 *string `json:"sha256,omitempty"` + Size ByteSize `json:"size,omitempty"` + Tags []string `json:"tags,omitempty"` + VocabularyFile *string `json:"vocabulary_file,omitempty"` +} + +func (s *ModelPackageConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "create_time": + if err := dec.Decode(&s.CreateTime); err != nil { + return fmt.Errorf("%s | %w", "CreateTime", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "inference_config": + if s.InferenceConfig == nil { + s.InferenceConfig = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.InferenceConfig); err != nil { + return fmt.Errorf("%s | %w", "InferenceConfig", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "minimum_version": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MinimumVersion", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinimumVersion = &o + + case "model_repository": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelRepository", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelRepository = &o + + case "model_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelType = &o + + case "packaged_model_id": + if err := dec.Decode(&s.PackagedModelId); err != nil { + return fmt.Errorf("%s | %w", "PackagedModelId", err) + } + + case "platform_architecture": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PlatformArchitecture", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PlatformArchitecture = &o + + case "prefix_strings": + if err := dec.Decode(&s.PrefixStrings); err != nil { + return fmt.Errorf("%s | %w", "PrefixStrings", err) + } + + case "sha256": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Sha256", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Sha256 = &o + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + case "tags": + if err := dec.Decode(&s.Tags); err != nil { + return fmt.Errorf("%s | %w", "Tags", err) + } + + case "vocabulary_file": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VocabularyFile", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VocabularyFile = &o + + } + } + return nil +} + +// NewModelPackageConfig returns a ModelPackageConfig. +func NewModelPackageConfig() *ModelPackageConfig { + r := &ModelPackageConfig{ + InferenceConfig: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelplotconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelplotconfig.go new file mode 100644 index 000000000..dc02a84c9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelplotconfig.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ModelPlotConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/ModelPlot.ts#L23-L42 +type ModelPlotConfig struct { + // AnnotationsEnabled If true, enables calculation and storage of the model change annotations for + // each entity that is being analyzed. + AnnotationsEnabled *bool `json:"annotations_enabled,omitempty"` + // Enabled If true, enables calculation and storage of the model bounds for each entity + // that is being analyzed. + Enabled *bool `json:"enabled,omitempty"` + // Terms Limits data collection to this comma separated list of partition or by field + // values. If terms are not specified or it is an empty string, no filtering is + // applied. Wildcards are not supported. Only the specified terms can be viewed + // when using the Single Metric Viewer. + Terms *string `json:"terms,omitempty"` +} + +func (s *ModelPlotConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "annotations_enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AnnotationsEnabled", err) + } + s.AnnotationsEnabled = &value + case bool: + s.AnnotationsEnabled = &v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "terms": + if err := dec.Decode(&s.Terms); err != nil { + return fmt.Errorf("%s | %w", "Terms", err) + } + + } + } + return nil +} + +// NewModelPlotConfig returns a ModelPlotConfig. +func NewModelPlotConfig() *ModelPlotConfig { + r := &ModelPlotConfig{} + + return r +} + +type ModelPlotConfigVariant interface { + ModelPlotConfigCaster() *ModelPlotConfig +} + +func (s *ModelPlotConfig) ModelPlotConfigCaster() *ModelPlotConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelsizestats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelsizestats.go new file mode 100644 index 000000000..ce6b641b0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelsizestats.go @@ -0,0 +1,328 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/categorizationstatus" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/memorystatus" +) + +// ModelSizeStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Model.ts#L59-L82 +type ModelSizeStats struct { + AssignmentMemoryBasis *string `json:"assignment_memory_basis,omitempty"` + BucketAllocationFailuresCount int64 `json:"bucket_allocation_failures_count"` + CategorizationStatus categorizationstatus.CategorizationStatus `json:"categorization_status"` + CategorizedDocCount int `json:"categorized_doc_count"` + DeadCategoryCount int `json:"dead_category_count"` + FailedCategoryCount int `json:"failed_category_count"` + FrequentCategoryCount int `json:"frequent_category_count"` + JobId string `json:"job_id"` + LogTime DateTime `json:"log_time"` + MemoryStatus memorystatus.MemoryStatus `json:"memory_status"` + ModelBytes ByteSize `json:"model_bytes"` + ModelBytesExceeded ByteSize `json:"model_bytes_exceeded,omitempty"` + ModelBytesMemoryLimit ByteSize `json:"model_bytes_memory_limit,omitempty"` + OutputMemoryAllocatorBytes ByteSize `json:"output_memory_allocator_bytes,omitempty"` + PeakModelBytes ByteSize `json:"peak_model_bytes,omitempty"` + RareCategoryCount int `json:"rare_category_count"` + ResultType string `json:"result_type"` + Timestamp *int64 `json:"timestamp,omitempty"` + TotalByFieldCount int64 `json:"total_by_field_count"` + TotalCategoryCount int `json:"total_category_count"` + TotalOverFieldCount int64 `json:"total_over_field_count"` + TotalPartitionFieldCount int64 `json:"total_partition_field_count"` +} + +func (s *ModelSizeStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_memory_basis": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AssignmentMemoryBasis", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentMemoryBasis = &o + + case "bucket_allocation_failures_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BucketAllocationFailuresCount", err) + } + s.BucketAllocationFailuresCount = value + case float64: + f := int64(v) + s.BucketAllocationFailuresCount = f + } + + case "categorization_status": + if err := dec.Decode(&s.CategorizationStatus); err != nil { + return fmt.Errorf("%s | %w", "CategorizationStatus", err) + } + + case "categorized_doc_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CategorizedDocCount", err) + } + s.CategorizedDocCount = value + case float64: + f := int(v) + s.CategorizedDocCount = f + } + + case "dead_category_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DeadCategoryCount", err) + } + s.DeadCategoryCount = value + case float64: + f := int(v) + s.DeadCategoryCount = f + } + + case "failed_category_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FailedCategoryCount", err) + } + s.FailedCategoryCount = value + case float64: + f := int(v) + s.FailedCategoryCount = f + } + + case "frequent_category_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FrequentCategoryCount", err) + } + s.FrequentCategoryCount = value + case float64: + f := int(v) + s.FrequentCategoryCount = f + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "log_time": + if err := dec.Decode(&s.LogTime); err != nil { + return fmt.Errorf("%s | %w", "LogTime", err) + } + + case "memory_status": + if err := dec.Decode(&s.MemoryStatus); err != nil { + return fmt.Errorf("%s | %w", "MemoryStatus", err) + } + + case "model_bytes": + if err := dec.Decode(&s.ModelBytes); err != nil { + return fmt.Errorf("%s | %w", "ModelBytes", err) + } + + case "model_bytes_exceeded": + if err := dec.Decode(&s.ModelBytesExceeded); err != nil { + return fmt.Errorf("%s | %w", "ModelBytesExceeded", err) + } + + case "model_bytes_memory_limit": + if err := dec.Decode(&s.ModelBytesMemoryLimit); err != nil { + return fmt.Errorf("%s | %w", "ModelBytesMemoryLimit", err) + } + + case "output_memory_allocator_bytes": + if err := dec.Decode(&s.OutputMemoryAllocatorBytes); err != nil { + return fmt.Errorf("%s | %w", "OutputMemoryAllocatorBytes", err) + } + + case "peak_model_bytes": + if err := dec.Decode(&s.PeakModelBytes); err != nil { + return fmt.Errorf("%s | %w", "PeakModelBytes", err) + } + + case "rare_category_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RareCategoryCount", err) + } + s.RareCategoryCount = value + case float64: + f := int(v) + s.RareCategoryCount = f + } + + case "result_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultType = o + + case "timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + s.Timestamp = &value + case float64: + f := int64(v) + s.Timestamp = &f + } + + case "total_by_field_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalByFieldCount", err) + } + s.TotalByFieldCount = value + case float64: + f := int64(v) + s.TotalByFieldCount = f + } + + case "total_category_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalCategoryCount", err) + } + s.TotalCategoryCount = value + case float64: + f := int(v) + s.TotalCategoryCount = f + } + + case "total_over_field_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalOverFieldCount", err) + } + s.TotalOverFieldCount = value + case float64: + f := int64(v) + s.TotalOverFieldCount = f + } + + case "total_partition_field_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalPartitionFieldCount", err) + } + s.TotalPartitionFieldCount = value + case float64: + f := int64(v) + s.TotalPartitionFieldCount = f + } + + } + } + return nil +} + +// NewModelSizeStats returns a ModelSizeStats. +func NewModelSizeStats() *ModelSizeStats { + r := &ModelSizeStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelsnapshot.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelsnapshot.go new file mode 100644 index 000000000..6212e9525 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelsnapshot.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ModelSnapshot type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Model.ts#L25-L46 +type ModelSnapshot struct { + // Description An optional description of the job. + Description *string `json:"description,omitempty"` + // JobId A numerical character string that uniquely identifies the job that the + // snapshot was created for. + JobId string `json:"job_id"` + // LatestRecordTimeStamp The timestamp of the latest processed record. + LatestRecordTimeStamp *int `json:"latest_record_time_stamp,omitempty"` + // LatestResultTimeStamp The timestamp of the latest bucket result. + LatestResultTimeStamp *int `json:"latest_result_time_stamp,omitempty"` + // MinVersion The minimum version required to be able to restore the model snapshot. + MinVersion string `json:"min_version"` + // ModelSizeStats Summary information describing the model. + ModelSizeStats *ModelSizeStats `json:"model_size_stats,omitempty"` + // Retain If true, this snapshot will not be deleted during automatic cleanup of + // snapshots older than model_snapshot_retention_days. However, this snapshot + // will be deleted when the job is deleted. The default value is false. + Retain bool `json:"retain"` + // SnapshotDocCount For internal use only. + SnapshotDocCount int64 `json:"snapshot_doc_count"` + // SnapshotId A numerical character string that uniquely identifies the model snapshot. + SnapshotId string `json:"snapshot_id"` + // Timestamp The creation timestamp for the snapshot. + Timestamp int64 `json:"timestamp"` +} + +func (s *ModelSnapshot) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "latest_record_time_stamp": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "LatestRecordTimeStamp", err) + } + s.LatestRecordTimeStamp = &value + case float64: + f := int(v) + s.LatestRecordTimeStamp = &f + } + + case "latest_result_time_stamp": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "LatestResultTimeStamp", err) + } + s.LatestResultTimeStamp = &value + case float64: + f := int(v) + s.LatestResultTimeStamp = &f + } + + case "min_version": + if err := dec.Decode(&s.MinVersion); err != nil { + return fmt.Errorf("%s | %w", "MinVersion", err) + } + + case "model_size_stats": + if err := dec.Decode(&s.ModelSizeStats); err != nil { + return fmt.Errorf("%s | %w", "ModelSizeStats", err) + } + + case "retain": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Retain", err) + } + s.Retain = value + case bool: + s.Retain = v + } + + case "snapshot_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SnapshotDocCount", err) + } + s.SnapshotDocCount = value + case float64: + f := int64(v) + s.SnapshotDocCount = f + } + + case "snapshot_id": + if err := dec.Decode(&s.SnapshotId); err != nil { + return fmt.Errorf("%s | %w", "SnapshotId", err) + } + + case "timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + s.Timestamp = value + case float64: + f := int64(v) + s.Timestamp = f + } + + } + } + return nil +} + +// NewModelSnapshot returns a ModelSnapshot. +func NewModelSnapshot() *ModelSnapshot { + r := &ModelSnapshot{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelsnapshotupgrade.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelsnapshotupgrade.go new file mode 100644 index 000000000..a84eedb53 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/modelsnapshotupgrade.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotupgradestate" +) + +// ModelSnapshotUpgrade type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Model.ts#L48-L57 +type ModelSnapshotUpgrade struct { + AssignmentExplanation string `json:"assignment_explanation"` + JobId string `json:"job_id"` + Node DiscoveryNode `json:"node"` + SnapshotId string `json:"snapshot_id"` + State snapshotupgradestate.SnapshotUpgradeState `json:"state"` +} + +func (s *ModelSnapshotUpgrade) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_explanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AssignmentExplanation", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentExplanation = o + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "snapshot_id": + if err := dec.Decode(&s.SnapshotId); err != nil { + return fmt.Errorf("%s | %w", "SnapshotId", err) + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + + } + } + return nil +} + +// NewModelSnapshotUpgrade returns a ModelSnapshotUpgrade. +func NewModelSnapshotUpgrade() *ModelSnapshotUpgrade { + r := &ModelSnapshotUpgrade{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/monitoring.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/monitoring.go new file mode 100644 index 000000000..1dac33c80 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/monitoring.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Monitoring type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L387-L390 +type Monitoring struct { + Available bool `json:"available"` + CollectionEnabled bool `json:"collection_enabled"` + Enabled bool `json:"enabled"` + EnabledExporters map[string]int64 `json:"enabled_exporters"` +} + +func (s *Monitoring) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "collection_enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CollectionEnabled", err) + } + s.CollectionEnabled = value + case bool: + s.CollectionEnabled = v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "enabled_exporters": + if s.EnabledExporters == nil { + s.EnabledExporters = make(map[string]int64, 0) + } + if err := dec.Decode(&s.EnabledExporters); err != nil { + return fmt.Errorf("%s | %w", "EnabledExporters", err) + } + + } + } + return nil +} + +// NewMonitoring returns a Monitoring. +func NewMonitoring() *Monitoring { + r := &Monitoring{ + EnabledExporters: make(map[string]int64), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/morelikethisquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/morelikethisquery.go new file mode 100644 index 000000000..6e699fc60 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/morelikethisquery.go @@ -0,0 +1,370 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +// MoreLikeThisQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L87-L172 +type MoreLikeThisQuery struct { + // Analyzer The analyzer that is used to analyze the free form text. + // Defaults to the analyzer associated with the first field in fields. + Analyzer *string `json:"analyzer,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // BoostTerms Each term in the formed query could be further boosted by their tf-idf score. + // This sets the boost factor to use when using this feature. + // Defaults to deactivated (0). + BoostTerms *Float64 `json:"boost_terms,omitempty"` + // FailOnUnsupportedField Controls whether the query should fail (throw an exception) if any of the + // specified fields are not of the supported types (`text` or `keyword`). + FailOnUnsupportedField *bool `json:"fail_on_unsupported_field,omitempty"` + // Fields A list of fields to fetch and analyze the text from. + // Defaults to the `index.query.default_field` index setting, which has a + // default value of `*`. + Fields []string `json:"fields,omitempty"` + // Include Specifies whether the input documents should also be included in the search + // results returned. + Include *bool `json:"include,omitempty"` + // Like Specifies free form text and/or a single or multiple documents for which you + // want to find similar documents. + Like []Like `json:"like"` + // MaxDocFreq The maximum document frequency above which the terms are ignored from the + // input document. + MaxDocFreq *int `json:"max_doc_freq,omitempty"` + // MaxQueryTerms The maximum number of query terms that can be selected. + MaxQueryTerms *int `json:"max_query_terms,omitempty"` + // MaxWordLength The maximum word length above which the terms are ignored. + // Defaults to unbounded (`0`). + MaxWordLength *int `json:"max_word_length,omitempty"` + // MinDocFreq The minimum document frequency below which the terms are ignored from the + // input document. + MinDocFreq *int `json:"min_doc_freq,omitempty"` + // MinTermFreq The minimum term frequency below which the terms are ignored from the input + // document. + MinTermFreq *int `json:"min_term_freq,omitempty"` + // MinWordLength The minimum word length below which the terms are ignored. + MinWordLength *int `json:"min_word_length,omitempty"` + // MinimumShouldMatch After the disjunctive query has been formed, this parameter controls the + // number of terms that must match. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + Routing *string `json:"routing,omitempty"` + // StopWords An array of stop words. + // Any word in this set is ignored. + StopWords StopWords `json:"stop_words,omitempty"` + // Unlike Used in combination with `like` to exclude documents that match a set of + // terms. + Unlike []Like `json:"unlike,omitempty"` + Version *int64 `json:"version,omitempty"` + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *MoreLikeThisQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "boost_terms": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BoostTerms", err) + } + f := Float64(value) + s.BoostTerms = &f + case float64: + f := Float64(v) + s.BoostTerms = &f + } + + case "fail_on_unsupported_field": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FailOnUnsupportedField", err) + } + s.FailOnUnsupportedField = &value + case bool: + s.FailOnUnsupportedField = &v + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "include": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + s.Include = &value + case bool: + s.Include = &v + } + + case "like": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(Like) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Like", err) + } + + s.Like = append(s.Like, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Like); err != nil { + return fmt.Errorf("%s | %w", "Like", err) + } + } + + case "max_doc_freq": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxDocFreq", err) + } + s.MaxDocFreq = &value + case float64: + f := int(v) + s.MaxDocFreq = &f + } + + case "max_query_terms": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxQueryTerms", err) + } + s.MaxQueryTerms = &value + case float64: + f := int(v) + s.MaxQueryTerms = &f + } + + case "max_word_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxWordLength", err) + } + s.MaxWordLength = &value + case float64: + f := int(v) + s.MaxWordLength = &f + } + + case "min_doc_freq": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocFreq", err) + } + s.MinDocFreq = &value + case float64: + f := int(v) + s.MinDocFreq = &f + } + + case "min_term_freq": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinTermFreq", err) + } + s.MinTermFreq = &value + case float64: + f := int(v) + s.MinTermFreq = &f + } + + case "min_word_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinWordLength", err) + } + s.MinWordLength = &value + case float64: + f := int(v) + s.MinWordLength = &f + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return fmt.Errorf("%s | %w", "MinimumShouldMatch", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "stop_words": + if err := dec.Decode(&s.StopWords); err != nil { + return fmt.Errorf("%s | %w", "StopWords", err) + } + + case "unlike": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(Like) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Unlike", err) + } + + s.Unlike = append(s.Unlike, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Unlike); err != nil { + return fmt.Errorf("%s | %w", "Unlike", err) + } + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return fmt.Errorf("%s | %w", "VersionType", err) + } + + } + } + return nil +} + +// NewMoreLikeThisQuery returns a MoreLikeThisQuery. +func NewMoreLikeThisQuery() *MoreLikeThisQuery { + r := &MoreLikeThisQuery{} + + return r +} + +type MoreLikeThisQueryVariant interface { + MoreLikeThisQueryCaster() *MoreLikeThisQuery +} + +func (s *MoreLikeThisQuery) MoreLikeThisQueryCaster() *MoreLikeThisQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mountedsnapshot.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mountedsnapshot.go new file mode 100644 index 000000000..5667814c3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mountedsnapshot.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// MountedSnapshot type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/searchable_snapshots/mount/types.ts#L23-L27 +type MountedSnapshot struct { + Indices []string `json:"indices"` + Shards ShardStatistics `json:"shards"` + Snapshot string `json:"snapshot"` +} + +func (s *MountedSnapshot) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + case "shards": + if err := dec.Decode(&s.Shards); err != nil { + return fmt.Errorf("%s | %w", "Shards", err) + } + + case "snapshot": + if err := dec.Decode(&s.Snapshot); err != nil { + return fmt.Errorf("%s | %w", "Snapshot", err) + } + + } + } + return nil +} + +// NewMountedSnapshot returns a MountedSnapshot. +func NewMountedSnapshot() *MountedSnapshot { + r := &MountedSnapshot{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/movingaverageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/movingaverageaggregation.go new file mode 100644 index 000000000..d09ca309f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/movingaverageaggregation.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// MovingAverageAggregation holds the union for the following types: +// +// LinearMovingAverageAggregation +// SimpleMovingAverageAggregation +// EwmaMovingAverageAggregation +// HoltMovingAverageAggregation +// HoltWintersMovingAverageAggregation +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L254-L260 +type MovingAverageAggregation any + +type MovingAverageAggregationVariant interface { + MovingAverageAggregationCaster() *MovingAverageAggregation +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/movingfunctionaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/movingfunctionaggregation.go new file mode 100644 index 000000000..e0ce70137 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/movingfunctionaggregation.go @@ -0,0 +1,156 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// MovingFunctionAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L314-L332 +type MovingFunctionAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + // Script The script that should be executed on each window of data. + Script *string `json:"script,omitempty"` + // Shift By default, the window consists of the last n values excluding the current + // bucket. + // Increasing `shift` by 1, moves the starting window position by 1 to the + // right. + Shift *int `json:"shift,omitempty"` + // Window The size of window to "slide" across the histogram. + Window *int `json:"window,omitempty"` +} + +func (s *MovingFunctionAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "script": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Script = &o + + case "shift": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Shift", err) + } + s.Shift = &value + case float64: + f := int(v) + s.Shift = &f + } + + case "window": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Window", err) + } + s.Window = &value + case float64: + f := int(v) + s.Window = &f + } + + } + } + return nil +} + +// NewMovingFunctionAggregation returns a MovingFunctionAggregation. +func NewMovingFunctionAggregation() *MovingFunctionAggregation { + r := &MovingFunctionAggregation{} + + return r +} + +type MovingFunctionAggregationVariant interface { + MovingFunctionAggregationCaster() *MovingFunctionAggregation +} + +func (s *MovingFunctionAggregation) MovingFunctionAggregationCaster() *MovingFunctionAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/movingpercentilesaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/movingpercentilesaggregation.go new file mode 100644 index 000000000..4e09098c2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/movingpercentilesaggregation.go @@ -0,0 +1,157 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// MovingPercentilesAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L334-L349 +type MovingPercentilesAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Keyed *bool `json:"keyed,omitempty"` + // Shift By default, the window consists of the last n values excluding the current + // bucket. + // Increasing `shift` by 1, moves the starting window position by 1 to the + // right. + Shift *int `json:"shift,omitempty"` + // Window The size of window to "slide" across the histogram. + Window *int `json:"window,omitempty"` +} + +func (s *MovingPercentilesAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "keyed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Keyed", err) + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "shift": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Shift", err) + } + s.Shift = &value + case float64: + f := int(v) + s.Shift = &f + } + + case "window": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Window", err) + } + s.Window = &value + case float64: + f := int(v) + s.Window = &f + } + + } + } + return nil +} + +// NewMovingPercentilesAggregation returns a MovingPercentilesAggregation. +func NewMovingPercentilesAggregation() *MovingPercentilesAggregation { + r := &MovingPercentilesAggregation{} + + return r +} + +type MovingPercentilesAggregationVariant interface { + MovingPercentilesAggregationCaster() *MovingPercentilesAggregation +} + +func (s *MovingPercentilesAggregation) MovingPercentilesAggregationCaster() *MovingPercentilesAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/msearchrequestitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/msearchrequestitem.go new file mode 100644 index 000000000..543c1dfa3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/msearchrequestitem.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// MsearchRequestItem holds the union for the following types: +// +// MultisearchHeader +// SearchRequestBody +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/msearch/types.ts#L26-L29 +type MsearchRequestItem any + +type MsearchRequestItemVariant interface { + MsearchRequestItemCaster() *MsearchRequestItem +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/msearchresponseitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/msearchresponseitem.go new file mode 100644 index 000000000..2452ff5c8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/msearchresponseitem.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// MsearchResponseItem holds the union for the following types: +// +// MultiSearchItem +// ErrorResponseBase +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/msearch/types.ts#L53-L56 +type MsearchResponseItem any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mtermvectorsoperation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mtermvectorsoperation.go new file mode 100644 index 000000000..e97dd2c5c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mtermvectorsoperation.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +// MTermVectorsOperation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/mtermvectors/types.ts#L35-L94 +type MTermVectorsOperation struct { + // Doc An artificial document (a document not present in the index) for which you + // want to retrieve term vectors. + Doc json.RawMessage `json:"doc,omitempty"` + // FieldStatistics If `true`, the response includes the document count, sum of document + // frequencies, and sum of total term frequencies. + FieldStatistics *bool `json:"field_statistics,omitempty"` + // Fields Comma-separated list or wildcard expressions of fields to include in the + // statistics. + // Used as the default list unless a specific field list is provided in the + // `completion_fields` or `fielddata_fields` parameters. + Fields []string `json:"fields,omitempty"` + // Filter Filter terms based on their tf-idf scores. + Filter *TermVectorsFilter `json:"filter,omitempty"` + // Id_ The ID of the document. + Id_ *string `json:"_id,omitempty"` + // Index_ The index of the document. + Index_ *string `json:"_index,omitempty"` + // Offsets If `true`, the response includes term offsets. + Offsets *bool `json:"offsets,omitempty"` + // Payloads If `true`, the response includes term payloads. + Payloads *bool `json:"payloads,omitempty"` + // Positions If `true`, the response includes term positions. + Positions *bool `json:"positions,omitempty"` + // Routing Custom value used to route operations to a specific shard. + Routing *string `json:"routing,omitempty"` + // TermStatistics If true, the response includes term frequency and document frequency. + TermStatistics *bool `json:"term_statistics,omitempty"` + // Version If `true`, returns the document version as part of a hit. + Version *int64 `json:"version,omitempty"` + // VersionType Specific version type. + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *MTermVectorsOperation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc": + if err := dec.Decode(&s.Doc); err != nil { + return fmt.Errorf("%s | %w", "Doc", err) + } + + case "field_statistics": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FieldStatistics", err) + } + s.FieldStatistics = &value + case bool: + s.FieldStatistics = &v + } + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "offsets": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Offsets", err) + } + s.Offsets = &value + case bool: + s.Offsets = &v + } + + case "payloads": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Payloads", err) + } + s.Payloads = &value + case bool: + s.Payloads = &v + } + + case "positions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Positions", err) + } + s.Positions = &value + case bool: + s.Positions = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "term_statistics": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TermStatistics", err) + } + s.TermStatistics = &value + case bool: + s.TermStatistics = &v + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return fmt.Errorf("%s | %w", "VersionType", err) + } + + } + } + return nil +} + +// NewMTermVectorsOperation returns a MTermVectorsOperation. +func NewMTermVectorsOperation() *MTermVectorsOperation { + r := &MTermVectorsOperation{} + + return r +} + +type MTermVectorsOperationVariant interface { + MTermVectorsOperationCaster() *MTermVectorsOperation +} + +func (s *MTermVectorsOperation) MTermVectorsOperationCaster() *MTermVectorsOperation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multigeterror.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multigeterror.go new file mode 100644 index 000000000..3bdab3d83 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multigeterror.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// MultiGetError type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/mget/types.ts#L62-L66 +type MultiGetError struct { + Error ErrorCause `json:"error"` + Id_ string `json:"_id"` + Index_ string `json:"_index"` +} + +func (s *MultiGetError) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + } + } + return nil +} + +// NewMultiGetError returns a MultiGetError. +func NewMultiGetError() *MultiGetError { + r := &MultiGetError{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multimatchquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multimatchquery.go new file mode 100644 index 000000000..6d05af28d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multimatchquery.go @@ -0,0 +1,344 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/textquerytype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/zerotermsquery" +) + +// MultiMatchQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L522-L608 +type MultiMatchQuery struct { + // Analyzer Analyzer used to convert the text in the query value into tokens. + Analyzer *string `json:"analyzer,omitempty"` + // AutoGenerateSynonymsPhraseQuery If `true`, match phrase queries are automatically created for multi-term + // synonyms. + AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + CutoffFrequency *Float64 `json:"cutoff_frequency,omitempty"` + // Fields The fields to be queried. + // Defaults to the `index.query.default_field` index settings, which in turn + // defaults to `*`. + Fields []string `json:"fields,omitempty"` + // Fuzziness Maximum edit distance allowed for matching. + Fuzziness Fuzziness `json:"fuzziness,omitempty"` + // FuzzyRewrite Method used to rewrite the query. + FuzzyRewrite *string `json:"fuzzy_rewrite,omitempty"` + // FuzzyTranspositions If `true`, edits for fuzzy matching include transpositions of two adjacent + // characters (for example, `ab` to `ba`). + // Can be applied to the term subqueries constructed for all terms but the final + // term. + FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` + // Lenient If `true`, format-based errors, such as providing a text query value for a + // numeric field, are ignored. + Lenient *bool `json:"lenient,omitempty"` + // MaxExpansions Maximum number of terms to which the query will expand. + MaxExpansions *int `json:"max_expansions,omitempty"` + // MinimumShouldMatch Minimum number of clauses that must match for a document to be returned. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // Operator Boolean logic used to interpret text in the query value. + Operator *operator.Operator `json:"operator,omitempty"` + // PrefixLength Number of beginning characters left unchanged for fuzzy matching. + PrefixLength *int `json:"prefix_length,omitempty"` + // Query Text, number, boolean value or date you wish to find in the provided field. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // Slop Maximum number of positions allowed between matching tokens. + Slop *int `json:"slop,omitempty"` + // TieBreaker Determines how scores for each per-term blended query and scores across + // groups are combined. + TieBreaker *Float64 `json:"tie_breaker,omitempty"` + // Type How `the` multi_match query is executed internally. + Type *textquerytype.TextQueryType `json:"type,omitempty"` + // ZeroTermsQuery Indicates whether no documents are returned if the `analyzer` removes all + // tokens, such as when using a `stop` filter. + ZeroTermsQuery *zerotermsquery.ZeroTermsQuery `json:"zero_terms_query,omitempty"` +} + +func (s *MultiMatchQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "auto_generate_synonyms_phrase_query": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AutoGenerateSynonymsPhraseQuery", err) + } + s.AutoGenerateSynonymsPhraseQuery = &value + case bool: + s.AutoGenerateSynonymsPhraseQuery = &v + } + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "cutoff_frequency": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CutoffFrequency", err) + } + f := Float64(value) + s.CutoffFrequency = &f + case float64: + f := Float64(v) + s.CutoffFrequency = &f + } + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + } + + case "fuzziness": + if err := dec.Decode(&s.Fuzziness); err != nil { + return fmt.Errorf("%s | %w", "Fuzziness", err) + } + + case "fuzzy_rewrite": + if err := dec.Decode(&s.FuzzyRewrite); err != nil { + return fmt.Errorf("%s | %w", "FuzzyRewrite", err) + } + + case "fuzzy_transpositions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FuzzyTranspositions", err) + } + s.FuzzyTranspositions = &value + case bool: + s.FuzzyTranspositions = &v + } + + case "lenient": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Lenient", err) + } + s.Lenient = &value + case bool: + s.Lenient = &v + } + + case "max_expansions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxExpansions", err) + } + s.MaxExpansions = &value + case float64: + f := int(v) + s.MaxExpansions = &f + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return fmt.Errorf("%s | %w", "MinimumShouldMatch", err) + } + + case "operator": + if err := dec.Decode(&s.Operator); err != nil { + return fmt.Errorf("%s | %w", "Operator", err) + } + + case "prefix_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrefixLength", err) + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "slop": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Slop", err) + } + s.Slop = &value + case float64: + f := int(v) + s.Slop = &f + } + + case "tie_breaker": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TieBreaker", err) + } + f := Float64(value) + s.TieBreaker = &f + case float64: + f := Float64(v) + s.TieBreaker = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "zero_terms_query": + if err := dec.Decode(&s.ZeroTermsQuery); err != nil { + return fmt.Errorf("%s | %w", "ZeroTermsQuery", err) + } + + } + } + return nil +} + +// NewMultiMatchQuery returns a MultiMatchQuery. +func NewMultiMatchQuery() *MultiMatchQuery { + r := &MultiMatchQuery{} + + return r +} + +type MultiMatchQueryVariant interface { + MultiMatchQueryCaster() *MultiMatchQuery +} + +func (s *MultiMatchQuery) MultiMatchQueryCaster() *MultiMatchQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multiplexertokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multiplexertokenfilter.go new file mode 100644 index 000000000..17b24b46a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multiplexertokenfilter.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// MultiplexerTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L356-L362 +type MultiplexerTokenFilter struct { + // Filters A list of token filters to apply to incoming tokens. + Filters []string `json:"filters"` + // PreserveOriginal If `true` (the default) then emit the original token in addition to the + // filtered tokens. + PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *MultiplexerTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filters": + if err := dec.Decode(&s.Filters); err != nil { + return fmt.Errorf("%s | %w", "Filters", err) + } + + case "preserve_original": + if err := dec.Decode(&s.PreserveOriginal); err != nil { + return fmt.Errorf("%s | %w", "PreserveOriginal", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s MultiplexerTokenFilter) MarshalJSON() ([]byte, error) { + type innerMultiplexerTokenFilter MultiplexerTokenFilter + tmp := innerMultiplexerTokenFilter{ + Filters: s.Filters, + PreserveOriginal: s.PreserveOriginal, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "multiplexer" + + return json.Marshal(tmp) +} + +// NewMultiplexerTokenFilter returns a MultiplexerTokenFilter. +func NewMultiplexerTokenFilter() *MultiplexerTokenFilter { + r := &MultiplexerTokenFilter{} + + return r +} + +type MultiplexerTokenFilterVariant interface { + MultiplexerTokenFilterCaster() *MultiplexerTokenFilter +} + +func (s *MultiplexerTokenFilter) MultiplexerTokenFilterCaster() *MultiplexerTokenFilter { + return s +} + +func (s *MultiplexerTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multisearchheader.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multisearchheader.go new file mode 100644 index 000000000..95cd04aaf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multisearchheader.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype" +) + +// MultisearchHeader type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/msearch/types.ts#L31-L46 +type MultisearchHeader struct { + AllowNoIndices *bool `json:"allow_no_indices,omitempty"` + AllowPartialSearchResults *bool `json:"allow_partial_search_results,omitempty"` + CcsMinimizeRoundtrips *bool `json:"ccs_minimize_roundtrips,omitempty"` + ExpandWildcards []expandwildcard.ExpandWildcard `json:"expand_wildcards,omitempty"` + IgnoreThrottled *bool `json:"ignore_throttled,omitempty"` + IgnoreUnavailable *bool `json:"ignore_unavailable,omitempty"` + Index []string `json:"index,omitempty"` + Preference *string `json:"preference,omitempty"` + RequestCache *bool `json:"request_cache,omitempty"` + Routing *string `json:"routing,omitempty"` + SearchType *searchtype.SearchType `json:"search_type,omitempty"` +} + +func (s *MultisearchHeader) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_no_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowNoIndices", err) + } + s.AllowNoIndices = &value + case bool: + s.AllowNoIndices = &v + } + + case "allow_partial_search_results": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowPartialSearchResults", err) + } + s.AllowPartialSearchResults = &value + case bool: + s.AllowPartialSearchResults = &v + } + + case "ccs_minimize_roundtrips": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CcsMinimizeRoundtrips", err) + } + s.CcsMinimizeRoundtrips = &value + case bool: + s.CcsMinimizeRoundtrips = &v + } + + case "expand_wildcards": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := &expandwildcard.ExpandWildcard{} + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ExpandWildcards", err) + } + + s.ExpandWildcards = append(s.ExpandWildcards, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.ExpandWildcards); err != nil { + return fmt.Errorf("%s | %w", "ExpandWildcards", err) + } + } + + case "ignore_throttled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreThrottled", err) + } + s.IgnoreThrottled = &value + case bool: + s.IgnoreThrottled = &v + } + + case "ignore_unavailable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnavailable", err) + } + s.IgnoreUnavailable = &value + case bool: + s.IgnoreUnavailable = &v + } + + case "index": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + s.Index = append(s.Index, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + } + + case "preference": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Preference", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Preference = &o + + case "request_cache": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RequestCache", err) + } + s.RequestCache = &value + case bool: + s.RequestCache = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "search_type": + if err := dec.Decode(&s.SearchType); err != nil { + return fmt.Errorf("%s | %w", "SearchType", err) + } + + } + } + return nil +} + +// NewMultisearchHeader returns a MultisearchHeader. +func NewMultisearchHeader() *MultisearchHeader { + r := &MultisearchHeader{} + + return r +} + +type MultisearchHeaderVariant interface { + MultisearchHeaderCaster() *MultisearchHeader +} + +func (s *MultisearchHeader) MultisearchHeaderCaster() *MultisearchHeader { + return s +} + +func (s *MultisearchHeader) MsearchRequestItemCaster() *MsearchRequestItem { + o := MsearchRequestItem(s) + return &o +} + +func (s *MultisearchHeader) RequestItemCaster() *RequestItem { + o := RequestItem(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multisearchitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multisearchitem.go new file mode 100644 index 000000000..29300ce24 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multisearchitem.go @@ -0,0 +1,830 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// MultiSearchItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/msearch/types.ts#L58-L61 +type MultiSearchItem struct { + Aggregations map[string]Aggregate `json:"aggregations,omitempty"` + Clusters_ *ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + // Hits The returned documents and metadata. + Hits HitsMetadata `json:"hits"` + MaxScore *Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *Profile `json:"profile,omitempty"` + // ScrollId_ The identifier for the search and its search context. + // You can use this scroll ID with the scroll API to retrieve the next batch of + // search results for the request. + // This property is returned only if the `scroll` query parameter is specified + // in the request. + ScrollId_ *string `json:"_scroll_id,omitempty"` + // Shards_ A count of shards used for the request. + Shards_ ShardStatistics `json:"_shards"` + Status *int `json:"status,omitempty"` + Suggest map[string][]Suggest `json:"suggest,omitempty"` + TerminatedEarly *bool `json:"terminated_early,omitempty"` + // TimedOut If `true`, the request timed out before completion; returned results may be + // partial or empty. + TimedOut bool `json:"timed_out"` + // Took The number of milliseconds it took Elasticsearch to run the request. + // This value is calculated by measuring the time elapsed between receipt of a + // request on the coordinating node and the time at which the coordinating node + // is ready to send the response. + // It includes: + // + // * Communication time between the coordinating node and data nodes + // * Time the request spends in the search thread pool, queued for execution + // * Actual run time + // + // It does not include: + // + // * Time needed to send the request to Elasticsearch + // * Time needed to serialize the JSON response + // * Time needed to send the response to a client + Took int64 `json:"took"` +} + +func (s *MultiSearchItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + } + + case "_clusters": + if err := dec.Decode(&s.Clusters_); err != nil { + return fmt.Errorf("%s | %w", "Clusters_", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return fmt.Errorf("%s | %w", "Hits", err) + } + + case "max_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxScore", err) + } + f := Float64(value) + s.MaxScore = &f + case float64: + f := Float64(v) + s.MaxScore = &f + } + + case "num_reduce_phases": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumReducePhases", err) + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f + } + + case "pit_id": + if err := dec.Decode(&s.PitId); err != nil { + return fmt.Errorf("%s | %w", "PitId", err) + } + + case "profile": + if err := dec.Decode(&s.Profile); err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + + case "_scroll_id": + if err := dec.Decode(&s.ScrollId_); err != nil { + return fmt.Errorf("%s | %w", "ScrollId_", err) + } + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return fmt.Errorf("%s | %w", "Shards_", err) + } + + case "status": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + s.Status = &value + case float64: + f := int(v) + s.Status = &f + } + + case "suggest": + if s.Suggest == nil { + s.Suggest = make(map[string][]Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]Suggest, 0) + } + switch elems[0] { + + case "completion": + o := NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } + } + + case "terminated_early": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TerminatedEarly", err) + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v + } + + case "timed_out": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimedOut", err) + } + s.TimedOut = value + case bool: + s.TimedOut = v + } + + case "took": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + s.Took = value + case float64: + f := int64(v) + s.Took = f + } + + } + } + return nil +} + +// NewMultiSearchItem returns a MultiSearchItem. +func NewMultiSearchItem() *MultiSearchItem { + r := &MultiSearchItem{ + Aggregations: make(map[string]Aggregate), + Fields: make(map[string]json.RawMessage), + Suggest: make(map[string][]Suggest), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multitermlookup.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multitermlookup.go new file mode 100644 index 000000000..91c25b119 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multitermlookup.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// MultiTermLookup type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L643-L653 +type MultiTermLookup struct { + // Field A fields from which to retrieve terms. + Field string `json:"field"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` +} + +func (s *MultiTermLookup) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + } + } + return nil +} + +// NewMultiTermLookup returns a MultiTermLookup. +func NewMultiTermLookup() *MultiTermLookup { + r := &MultiTermLookup{} + + return r +} + +type MultiTermLookupVariant interface { + MultiTermLookupCaster() *MultiTermLookup +} + +func (s *MultiTermLookup) MultiTermLookupCaster() *MultiTermLookup { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multitermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multitermsaggregate.go new file mode 100644 index 000000000..0810e7621 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multitermsaggregate.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MultiTermsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L501-L506 +type MultiTermsAggregate struct { + Buckets BucketsMultiTermsBucket `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` +} + +func (s *MultiTermsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]MultiTermsBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []MultiTermsBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "doc_count_error_upper_bound": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "sum_other_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SumOtherDocCount", err) + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f + } + + } + } + return nil +} + +// NewMultiTermsAggregate returns a MultiTermsAggregate. +func NewMultiTermsAggregate() *MultiTermsAggregate { + r := &MultiTermsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multitermsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multitermsaggregation.go new file mode 100644 index 000000000..34335cf65 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multitermsaggregation.go @@ -0,0 +1,201 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termsaggregationcollectmode" +) + +// MultiTermsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L601-L641 +type MultiTermsAggregation struct { + // CollectMode Specifies the strategy for data collection. + CollectMode *termsaggregationcollectmode.TermsAggregationCollectMode `json:"collect_mode,omitempty"` + // MinDocCount The minimum number of documents in a bucket for it to be returned. + MinDocCount *int64 `json:"min_doc_count,omitempty"` + // Order Specifies the sort order of the buckets. + // Defaults to sorting by descending document count. + Order AggregateOrder `json:"order,omitempty"` + // ShardMinDocCount The minimum number of documents in a bucket on each shard for it to be + // returned. + ShardMinDocCount *int64 `json:"shard_min_doc_count,omitempty"` + // ShardSize The number of candidate terms produced by each shard. + // By default, `shard_size` will be automatically estimated based on the number + // of shards and the `size` parameter. + ShardSize *int `json:"shard_size,omitempty"` + // ShowTermDocCountError Calculates the doc count error on per term basis. + ShowTermDocCountError *bool `json:"show_term_doc_count_error,omitempty"` + // Size The number of term buckets should be returned out of the overall terms list. + Size *int `json:"size,omitempty"` + // Terms The field from which to generate sets of terms. + Terms []MultiTermLookup `json:"terms"` +} + +func (s *MultiTermsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collect_mode": + if err := dec.Decode(&s.CollectMode); err != nil { + return fmt.Errorf("%s | %w", "CollectMode", err) + } + + case "min_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocCount", err) + } + s.MinDocCount = &value + case float64: + f := int64(v) + s.MinDocCount = &f + } + + case "order": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]sortorder.SortOrder, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + s.Order = o + case '[': + o := make([]map[string]sortorder.SortOrder, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + s.Order = o + } + + case "shard_min_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShardMinDocCount", err) + } + s.ShardMinDocCount = &value + case float64: + f := int64(v) + s.ShardMinDocCount = &f + } + + case "shard_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSize", err) + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "show_term_doc_count_error": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShowTermDocCountError", err) + } + s.ShowTermDocCountError = &value + case bool: + s.ShowTermDocCountError = &v + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "terms": + if err := dec.Decode(&s.Terms); err != nil { + return fmt.Errorf("%s | %w", "Terms", err) + } + + } + } + return nil +} + +// NewMultiTermsAggregation returns a MultiTermsAggregation. +func NewMultiTermsAggregation() *MultiTermsAggregation { + r := &MultiTermsAggregation{} + + return r +} + +type MultiTermsAggregationVariant interface { + MultiTermsAggregationCaster() *MultiTermsAggregation +} + +func (s *MultiTermsAggregation) MultiTermsAggregationCaster() *MultiTermsAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multitermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multitermsbucket.go new file mode 100644 index 000000000..abff79533 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/multitermsbucket.go @@ -0,0 +1,667 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// MultiTermsBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L508-L512 +type MultiTermsBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Key []FieldValue `json:"key"` + KeyAsString *string `json:"key_as_string,omitempty"` +} + +func (s *MultiTermsBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "doc_count_error_upper_bound": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f + } + + case "key": + if err := dec.Decode(&s.Key); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + + case "key_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "KeyAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s MultiTermsBucket) MarshalJSON() ([]byte, error) { + type opt MultiTermsBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewMultiTermsBucket returns a MultiTermsBucket. +func NewMultiTermsBucket() *MultiTermsBucket { + r := &MultiTermsBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/murmur3hashproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/murmur3hashproperty.go new file mode 100644 index 000000000..3844a1604 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/murmur3hashproperty.go @@ -0,0 +1,872 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// Murmur3HashProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/specialized.ts#L90-L92 +type Murmur3HashProperty struct { + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s Murmur3HashProperty) MarshalJSON() ([]byte, error) { + type innerMurmur3HashProperty Murmur3HashProperty + tmp := innerMurmur3HashProperty{ + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "murmur3" + + return json.Marshal(tmp) +} + +// NewMurmur3HashProperty returns a Murmur3HashProperty. +func NewMurmur3HashProperty() *Murmur3HashProperty { + r := &Murmur3HashProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type Murmur3HashPropertyVariant interface { + Murmur3HashPropertyCaster() *Murmur3HashProperty +} + +func (s *Murmur3HashProperty) Murmur3HashPropertyCaster() *Murmur3HashProperty { + return s +} + +func (s *Murmur3HashProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mutualinformationheuristic.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mutualinformationheuristic.go new file mode 100644 index 000000000..e90cfd813 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/mutualinformationheuristic.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// MutualInformationHeuristic type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L800-L809 +type MutualInformationHeuristic struct { + // BackgroundIsSuperset Set to `false` if you defined a custom background filter that represents a + // different set of documents that you want to compare to. + BackgroundIsSuperset *bool `json:"background_is_superset,omitempty"` + // IncludeNegatives Set to `false` to filter out the terms that appear less often in the subset + // than in documents outside the subset. + IncludeNegatives *bool `json:"include_negatives,omitempty"` +} + +func (s *MutualInformationHeuristic) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "background_is_superset": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "BackgroundIsSuperset", err) + } + s.BackgroundIsSuperset = &value + case bool: + s.BackgroundIsSuperset = &v + } + + case "include_negatives": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncludeNegatives", err) + } + s.IncludeNegatives = &value + case bool: + s.IncludeNegatives = &v + } + + } + } + return nil +} + +// NewMutualInformationHeuristic returns a MutualInformationHeuristic. +func NewMutualInformationHeuristic() *MutualInformationHeuristic { + r := &MutualInformationHeuristic{} + + return r +} + +type MutualInformationHeuristicVariant interface { + MutualInformationHeuristicCaster() *MutualInformationHeuristic +} + +func (s *MutualInformationHeuristic) MutualInformationHeuristicCaster() *MutualInformationHeuristic { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/names.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/names.go new file mode 100644 index 000000000..9e7d5ffd5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/names.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Names type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L75-L75 +type Names []string + +type NamesVariant interface { + NamesCaster() *Names +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nativecode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nativecode.go new file mode 100644 index 000000000..24bebb330 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nativecode.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NativeCode type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/info/types.ts#L29-L32 +type NativeCode struct { + BuildHash string `json:"build_hash"` + Version string `json:"version"` +} + +func (s *NativeCode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "build_hash": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BuildHash", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildHash = o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewNativeCode returns a NativeCode. +func NewNativeCode() *NativeCode { + r := &NativeCode{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nativecodeinformation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nativecodeinformation.go new file mode 100644 index 000000000..619c7ac92 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nativecodeinformation.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NativeCodeInformation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/info/types.ts#L29-L32 +type NativeCodeInformation struct { + BuildHash string `json:"build_hash"` + Version string `json:"version"` +} + +func (s *NativeCodeInformation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "build_hash": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BuildHash", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildHash = o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewNativeCodeInformation returns a NativeCodeInformation. +func NewNativeCodeInformation() *NativeCodeInformation { + r := &NativeCodeInformation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nerinferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nerinferenceoptions.go new file mode 100644 index 000000000..4d3027c76 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nerinferenceoptions.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NerInferenceOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L273-L282 +type NerInferenceOptions struct { + // ClassificationLabels The token classification labels. Must be IOB formatted tags + ClassificationLabels []string `json:"classification_labels,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options + Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` + Vocabulary *Vocabulary `json:"vocabulary,omitempty"` +} + +func (s *NerInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classification_labels": + if err := dec.Decode(&s.ClassificationLabels); err != nil { + return fmt.Errorf("%s | %w", "ClassificationLabels", err) + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + case "vocabulary": + if err := dec.Decode(&s.Vocabulary); err != nil { + return fmt.Errorf("%s | %w", "Vocabulary", err) + } + + } + } + return nil +} + +// NewNerInferenceOptions returns a NerInferenceOptions. +func NewNerInferenceOptions() *NerInferenceOptions { + r := &NerInferenceOptions{} + + return r +} + +type NerInferenceOptionsVariant interface { + NerInferenceOptionsCaster() *NerInferenceOptions +} + +func (s *NerInferenceOptions) NerInferenceOptionsCaster() *NerInferenceOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nerinferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nerinferenceupdateoptions.go new file mode 100644 index 000000000..9078370b7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nerinferenceupdateoptions.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NerInferenceUpdateOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L423-L428 +type NerInferenceUpdateOptions struct { + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options to update when inferring + Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` +} + +func (s *NerInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + } + } + return nil +} + +// NewNerInferenceUpdateOptions returns a NerInferenceUpdateOptions. +func NewNerInferenceUpdateOptions() *NerInferenceUpdateOptions { + r := &NerInferenceUpdateOptions{} + + return r +} + +type NerInferenceUpdateOptionsVariant interface { + NerInferenceUpdateOptionsCaster() *NerInferenceUpdateOptions +} + +func (s *NerInferenceUpdateOptions) NerInferenceUpdateOptionsCaster() *NerInferenceUpdateOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedaggregate.go new file mode 100644 index 000000000..21ff150c6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedaggregate.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// NestedAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L534-L538 +type NestedAggregate struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *NestedAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s NestedAggregate) MarshalJSON() ([]byte, error) { + type opt NestedAggregate + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewNestedAggregate returns a NestedAggregate. +func NewNestedAggregate() *NestedAggregate { + r := &NestedAggregate{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedaggregation.go new file mode 100644 index 000000000..40d4ea7a7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedaggregation.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// NestedAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L655-L660 +type NestedAggregation struct { + // Path The path to the field of type `nested`. + Path *string `json:"path,omitempty"` +} + +func (s *NestedAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + + } + } + return nil +} + +// NewNestedAggregation returns a NestedAggregation. +func NewNestedAggregation() *NestedAggregation { + r := &NestedAggregation{} + + return r +} + +type NestedAggregationVariant interface { + NestedAggregationCaster() *NestedAggregation +} + +func (s *NestedAggregation) NestedAggregationCaster() *NestedAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedidentity.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedidentity.go new file mode 100644 index 000000000..18675bc15 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedidentity.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NestedIdentity type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/hits.ts#L89-L93 +type NestedIdentity struct { + Field string `json:"field"` + Nested_ *NestedIdentity `json:"_nested,omitempty"` + Offset int `json:"offset"` +} + +func (s *NestedIdentity) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "_nested": + if err := dec.Decode(&s.Nested_); err != nil { + return fmt.Errorf("%s | %w", "Nested_", err) + } + + case "offset": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Offset", err) + } + s.Offset = value + case float64: + f := int(v) + s.Offset = f + } + + } + } + return nil +} + +// NewNestedIdentity returns a NestedIdentity. +func NewNestedIdentity() *NestedIdentity { + r := &NestedIdentity{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedproperty.go new file mode 100644 index 000000000..416f84fa1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedproperty.go @@ -0,0 +1,904 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// NestedProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/complex.ts#L40-L45 +type NestedProperty struct { + CopyTo []string `json:"copy_to,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IncludeInParent *bool `json:"include_in_parent,omitempty"` + IncludeInRoot *bool `json:"include_in_root,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *NestedProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "include_in_parent": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncludeInParent", err) + } + s.IncludeInParent = &value + case bool: + s.IncludeInParent = &v + } + + case "include_in_root": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncludeInRoot", err) + } + s.IncludeInRoot = &value + case bool: + s.IncludeInRoot = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s NestedProperty) MarshalJSON() ([]byte, error) { + type innerNestedProperty NestedProperty + tmp := innerNestedProperty{ + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + Enabled: s.Enabled, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IncludeInParent: s.IncludeInParent, + IncludeInRoot: s.IncludeInRoot, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "nested" + + return json.Marshal(tmp) +} + +// NewNestedProperty returns a NestedProperty. +func NewNestedProperty() *NestedProperty { + r := &NestedProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type NestedPropertyVariant interface { + NestedPropertyCaster() *NestedProperty +} + +func (s *NestedProperty) NestedPropertyCaster() *NestedProperty { + return s +} + +func (s *NestedProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedquery.go new file mode 100644 index 000000000..7580359cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedquery.go @@ -0,0 +1,154 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/childscoremode" +) + +// NestedQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/joining.ts#L112-L139 +type NestedQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // IgnoreUnmapped Indicates whether to ignore an unmapped path and not return any documents + // instead of an error. + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + // InnerHits If defined, each search hit will contain inner hits. + InnerHits *InnerHits `json:"inner_hits,omitempty"` + // Path Path to the nested object you wish to search. + Path string `json:"path"` + // Query Query you wish to run on nested objects in the path. + Query Query `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // ScoreMode How scores for matching child objects affect the root parent document’s + // relevance score. + ScoreMode *childscoremode.ChildScoreMode `json:"score_mode,omitempty"` +} + +func (s *NestedQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "ignore_unmapped": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnmapped", err) + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "inner_hits": + if err := dec.Decode(&s.InnerHits); err != nil { + return fmt.Errorf("%s | %w", "InnerHits", err) + } + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "score_mode": + if err := dec.Decode(&s.ScoreMode); err != nil { + return fmt.Errorf("%s | %w", "ScoreMode", err) + } + + } + } + return nil +} + +// NewNestedQuery returns a NestedQuery. +func NewNestedQuery() *NestedQuery { + r := &NestedQuery{} + + return r +} + +type NestedQueryVariant interface { + NestedQueryCaster() *NestedQuery +} + +func (s *NestedQuery) NestedQueryCaster() *NestedQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedsortvalue.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedsortvalue.go new file mode 100644 index 000000000..df5d333d1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nestedsortvalue.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NestedSortValue type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/sort.ts#L29-L34 +type NestedSortValue struct { + Filter *Query `json:"filter,omitempty"` + MaxChildren *int `json:"max_children,omitempty"` + Nested *NestedSortValue `json:"nested,omitempty"` + Path string `json:"path"` +} + +func (s *NestedSortValue) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "max_children": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxChildren", err) + } + s.MaxChildren = &value + case float64: + f := int(v) + s.MaxChildren = &f + } + + case "nested": + if err := dec.Decode(&s.Nested); err != nil { + return fmt.Errorf("%s | %w", "Nested", err) + } + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + + } + } + return nil +} + +// NewNestedSortValue returns a NestedSortValue. +func NewNestedSortValue() *NestedSortValue { + r := &NestedSortValue{} + + return r +} + +type NestedSortValueVariant interface { + NestedSortValueCaster() *NestedSortValue +} + +func (s *NestedSortValue) NestedSortValueCaster() *NestedSortValue { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/networkdirectionprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/networkdirectionprocessor.go new file mode 100644 index 000000000..038b50642 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/networkdirectionprocessor.go @@ -0,0 +1,187 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NetworkDirectionProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1249-L1283 +type NetworkDirectionProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // DestinationIp Field containing the destination IP address. + DestinationIp *string `json:"destination_ip,omitempty"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If true and any required fields are missing, the processor quietly exits + // without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // InternalNetworks List of internal networks. Supports IPv4 and IPv6 addresses and ranges in + // CIDR notation. Also supports the named ranges listed below. These may be + // constructed with template snippets. Must specify only one of + // internal_networks or internal_networks_field. + InternalNetworks []string `json:"internal_networks,omitempty"` + // InternalNetworksField A field on the given document to read the internal_networks configuration + // from. + InternalNetworksField *string `json:"internal_networks_field,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // SourceIp Field containing the source IP address. + SourceIp *string `json:"source_ip,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField Output field for the network direction. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *NetworkDirectionProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "destination_ip": + if err := dec.Decode(&s.DestinationIp); err != nil { + return fmt.Errorf("%s | %w", "DestinationIp", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "internal_networks": + if err := dec.Decode(&s.InternalNetworks); err != nil { + return fmt.Errorf("%s | %w", "InternalNetworks", err) + } + + case "internal_networks_field": + if err := dec.Decode(&s.InternalNetworksField); err != nil { + return fmt.Errorf("%s | %w", "InternalNetworksField", err) + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "source_ip": + if err := dec.Decode(&s.SourceIp); err != nil { + return fmt.Errorf("%s | %w", "SourceIp", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewNetworkDirectionProcessor returns a NetworkDirectionProcessor. +func NewNetworkDirectionProcessor() *NetworkDirectionProcessor { + r := &NetworkDirectionProcessor{} + + return r +} + +type NetworkDirectionProcessorVariant interface { + NetworkDirectionProcessorCaster() *NetworkDirectionProcessor +} + +func (s *NetworkDirectionProcessor) NetworkDirectionProcessorCaster() *NetworkDirectionProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nevercondition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nevercondition.go new file mode 100644 index 000000000..f750c896b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nevercondition.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NeverCondition type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Conditions.ts#L73-L73 +type NeverCondition struct { +} + +// NewNeverCondition returns a NeverCondition. +func NewNeverCondition() *NeverCondition { + r := &NeverCondition{} + + return r +} + +type NeverConditionVariant interface { + NeverConditionCaster() *NeverCondition +} + +func (s *NeverCondition) NeverConditionCaster() *NeverCondition { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ngramtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ngramtokenfilter.go new file mode 100644 index 000000000..a024bae6f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ngramtokenfilter.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NGramTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L364-L372 +type NGramTokenFilter struct { + // MaxGram Maximum length of characters in a gram. Defaults to `2`. + MaxGram *int `json:"max_gram,omitempty"` + // MinGram Minimum length of characters in a gram. Defaults to `1`. + MinGram *int `json:"min_gram,omitempty"` + // PreserveOriginal Emits original token when set to `true`. Defaults to `false`. + PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *NGramTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_gram": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxGram", err) + } + s.MaxGram = &value + case float64: + f := int(v) + s.MaxGram = &f + } + + case "min_gram": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinGram", err) + } + s.MinGram = &value + case float64: + f := int(v) + s.MinGram = &f + } + + case "preserve_original": + if err := dec.Decode(&s.PreserveOriginal); err != nil { + return fmt.Errorf("%s | %w", "PreserveOriginal", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s NGramTokenFilter) MarshalJSON() ([]byte, error) { + type innerNGramTokenFilter NGramTokenFilter + tmp := innerNGramTokenFilter{ + MaxGram: s.MaxGram, + MinGram: s.MinGram, + PreserveOriginal: s.PreserveOriginal, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "ngram" + + return json.Marshal(tmp) +} + +// NewNGramTokenFilter returns a NGramTokenFilter. +func NewNGramTokenFilter() *NGramTokenFilter { + r := &NGramTokenFilter{} + + return r +} + +type NGramTokenFilterVariant interface { + NGramTokenFilterCaster() *NGramTokenFilter +} + +func (s *NGramTokenFilter) NGramTokenFilterCaster() *NGramTokenFilter { + return s +} + +func (s *NGramTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ngramtokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ngramtokenizer.go new file mode 100644 index 000000000..d1035c6e1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ngramtokenizer.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tokenchar" +) + +// NGramTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L85-L95 +type NGramTokenizer struct { + CustomTokenChars *string `json:"custom_token_chars,omitempty"` + MaxGram *int `json:"max_gram,omitempty"` + MinGram *int `json:"min_gram,omitempty"` + TokenChars []tokenchar.TokenChar `json:"token_chars,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *NGramTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "custom_token_chars": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CustomTokenChars", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CustomTokenChars = &o + + case "max_gram": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxGram", err) + } + s.MaxGram = &value + case float64: + f := int(v) + s.MaxGram = &f + } + + case "min_gram": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinGram", err) + } + s.MinGram = &value + case float64: + f := int(v) + s.MinGram = &f + } + + case "token_chars": + if err := dec.Decode(&s.TokenChars); err != nil { + return fmt.Errorf("%s | %w", "TokenChars", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s NGramTokenizer) MarshalJSON() ([]byte, error) { + type innerNGramTokenizer NGramTokenizer + tmp := innerNGramTokenizer{ + CustomTokenChars: s.CustomTokenChars, + MaxGram: s.MaxGram, + MinGram: s.MinGram, + TokenChars: s.TokenChars, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "ngram" + + return json.Marshal(tmp) +} + +// NewNGramTokenizer returns a NGramTokenizer. +func NewNGramTokenizer() *NGramTokenizer { + r := &NGramTokenizer{} + + return r +} + +type NGramTokenizerVariant interface { + NGramTokenizerCaster() *NGramTokenizer +} + +func (s *NGramTokenizer) NGramTokenizerCaster() *NGramTokenizer { + return s +} + +func (s *NGramTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nlpberttokenizationconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nlpberttokenizationconfig.go new file mode 100644 index 000000000..241456eaf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nlpberttokenizationconfig.go @@ -0,0 +1,150 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tokenizationtruncate" +) + +// NlpBertTokenizationConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L188-L189 +type NlpBertTokenizationConfig struct { + // DoLowerCase Should the tokenizer lower case the text + DoLowerCase *bool `json:"do_lower_case,omitempty"` + // MaxSequenceLength Maximum input sequence length for the model + MaxSequenceLength *int `json:"max_sequence_length,omitempty"` + // Span Tokenization spanning options. Special value of -1 indicates no spanning + // takes place + Span *int `json:"span,omitempty"` + // Truncate Should tokenization input be automatically truncated before sending to the + // model for inference + Truncate *tokenizationtruncate.TokenizationTruncate `json:"truncate,omitempty"` + // WithSpecialTokens Is tokenization completed with special tokens + WithSpecialTokens *bool `json:"with_special_tokens,omitempty"` +} + +func (s *NlpBertTokenizationConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "do_lower_case": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DoLowerCase", err) + } + s.DoLowerCase = &value + case bool: + s.DoLowerCase = &v + } + + case "max_sequence_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSequenceLength", err) + } + s.MaxSequenceLength = &value + case float64: + f := int(v) + s.MaxSequenceLength = &f + } + + case "span": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Span", err) + } + s.Span = &value + case float64: + f := int(v) + s.Span = &f + } + + case "truncate": + if err := dec.Decode(&s.Truncate); err != nil { + return fmt.Errorf("%s | %w", "Truncate", err) + } + + case "with_special_tokens": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "WithSpecialTokens", err) + } + s.WithSpecialTokens = &value + case bool: + s.WithSpecialTokens = &v + } + + } + } + return nil +} + +// NewNlpBertTokenizationConfig returns a NlpBertTokenizationConfig. +func NewNlpBertTokenizationConfig() *NlpBertTokenizationConfig { + r := &NlpBertTokenizationConfig{} + + return r +} + +type NlpBertTokenizationConfigVariant interface { + NlpBertTokenizationConfigCaster() *NlpBertTokenizationConfig +} + +func (s *NlpBertTokenizationConfig) NlpBertTokenizationConfigCaster() *NlpBertTokenizationConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nlprobertatokenizationconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nlprobertatokenizationconfig.go new file mode 100644 index 000000000..2c137eb5c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nlprobertatokenizationconfig.go @@ -0,0 +1,166 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tokenizationtruncate" +) + +// NlpRobertaTokenizationConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L191-L198 +type NlpRobertaTokenizationConfig struct { + // AddPrefixSpace Should the tokenizer prefix input with a space character + AddPrefixSpace *bool `json:"add_prefix_space,omitempty"` + // DoLowerCase Should the tokenizer lower case the text + DoLowerCase *bool `json:"do_lower_case,omitempty"` + // MaxSequenceLength Maximum input sequence length for the model + MaxSequenceLength *int `json:"max_sequence_length,omitempty"` + // Span Tokenization spanning options. Special value of -1 indicates no spanning + // takes place + Span *int `json:"span,omitempty"` + // Truncate Should tokenization input be automatically truncated before sending to the + // model for inference + Truncate *tokenizationtruncate.TokenizationTruncate `json:"truncate,omitempty"` + // WithSpecialTokens Is tokenization completed with special tokens + WithSpecialTokens *bool `json:"with_special_tokens,omitempty"` +} + +func (s *NlpRobertaTokenizationConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "add_prefix_space": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AddPrefixSpace", err) + } + s.AddPrefixSpace = &value + case bool: + s.AddPrefixSpace = &v + } + + case "do_lower_case": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DoLowerCase", err) + } + s.DoLowerCase = &value + case bool: + s.DoLowerCase = &v + } + + case "max_sequence_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSequenceLength", err) + } + s.MaxSequenceLength = &value + case float64: + f := int(v) + s.MaxSequenceLength = &f + } + + case "span": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Span", err) + } + s.Span = &value + case float64: + f := int(v) + s.Span = &f + } + + case "truncate": + if err := dec.Decode(&s.Truncate); err != nil { + return fmt.Errorf("%s | %w", "Truncate", err) + } + + case "with_special_tokens": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "WithSpecialTokens", err) + } + s.WithSpecialTokens = &value + case bool: + s.WithSpecialTokens = &v + } + + } + } + return nil +} + +// NewNlpRobertaTokenizationConfig returns a NlpRobertaTokenizationConfig. +func NewNlpRobertaTokenizationConfig() *NlpRobertaTokenizationConfig { + r := &NlpRobertaTokenizationConfig{} + + return r +} + +type NlpRobertaTokenizationConfigVariant interface { + NlpRobertaTokenizationConfigCaster() *NlpRobertaTokenizationConfig +} + +func (s *NlpRobertaTokenizationConfig) NlpRobertaTokenizationConfigCaster() *NlpRobertaTokenizationConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nlptokenizationupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nlptokenizationupdateoptions.go new file mode 100644 index 000000000..9d054ef0b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nlptokenizationupdateoptions.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tokenizationtruncate" +) + +// NlpTokenizationUpdateOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L375-L380 +type NlpTokenizationUpdateOptions struct { + // Span Span options to apply + Span *int `json:"span,omitempty"` + // Truncate Truncate options to apply + Truncate *tokenizationtruncate.TokenizationTruncate `json:"truncate,omitempty"` +} + +func (s *NlpTokenizationUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "span": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Span", err) + } + s.Span = &value + case float64: + f := int(v) + s.Span = &f + } + + case "truncate": + if err := dec.Decode(&s.Truncate); err != nil { + return fmt.Errorf("%s | %w", "Truncate", err) + } + + } + } + return nil +} + +// NewNlpTokenizationUpdateOptions returns a NlpTokenizationUpdateOptions. +func NewNlpTokenizationUpdateOptions() *NlpTokenizationUpdateOptions { + r := &NlpTokenizationUpdateOptions{} + + return r +} + +type NlpTokenizationUpdateOptionsVariant interface { + NlpTokenizationUpdateOptionsCaster() *NlpTokenizationUpdateOptions +} + +func (s *NlpTokenizationUpdateOptions) NlpTokenizationUpdateOptionsCaster() *NlpTokenizationUpdateOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/node.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/node.go new file mode 100644 index 000000000..a85693ef7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/node.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Node type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/searchable_snapshots/cache_stats/Response.ts#L30-L32 +type Node struct { + SharedCache Shared `json:"shared_cache"` +} + +// NewNode returns a Node. +func NewNode() *Node { + r := &Node{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeallocationexplanation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeallocationexplanation.go new file mode 100644 index 000000000..eaeb8f6ba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeallocationexplanation.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/decision" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noderole" +) + +// NodeAllocationExplanation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/allocation_explain/types.ts#L103-L117 +type NodeAllocationExplanation struct { + Deciders []AllocationDecision `json:"deciders"` + NodeAttributes map[string]string `json:"node_attributes"` + NodeDecision decision.Decision `json:"node_decision"` + NodeId string `json:"node_id"` + NodeName string `json:"node_name"` + Roles []noderole.NodeRole `json:"roles"` + Store *AllocationStore `json:"store,omitempty"` + TransportAddress string `json:"transport_address"` + WeightRanking int `json:"weight_ranking"` +} + +func (s *NodeAllocationExplanation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deciders": + if err := dec.Decode(&s.Deciders); err != nil { + return fmt.Errorf("%s | %w", "Deciders", err) + } + + case "node_attributes": + if s.NodeAttributes == nil { + s.NodeAttributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.NodeAttributes); err != nil { + return fmt.Errorf("%s | %w", "NodeAttributes", err) + } + + case "node_decision": + if err := dec.Decode(&s.NodeDecision); err != nil { + return fmt.Errorf("%s | %w", "NodeDecision", err) + } + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + + case "node_name": + if err := dec.Decode(&s.NodeName); err != nil { + return fmt.Errorf("%s | %w", "NodeName", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "store": + if err := dec.Decode(&s.Store); err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + case "weight_ranking": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "WeightRanking", err) + } + s.WeightRanking = value + case float64: + f := int(v) + s.WeightRanking = f + } + + } + } + return nil +} + +// NewNodeAllocationExplanation returns a NodeAllocationExplanation. +func NewNodeAllocationExplanation() *NodeAllocationExplanation { + r := &NodeAllocationExplanation{ + NodeAttributes: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeattributes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeattributes.go new file mode 100644 index 000000000..ba8046687 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeattributes.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// NodeAttributes type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Node.ts#L41-L52 +type NodeAttributes struct { + // Attributes Lists node attributes. + Attributes map[string]string `json:"attributes"` + // EphemeralId The ephemeral ID of the node. + EphemeralId string `json:"ephemeral_id"` + // Id The unique identifier of the node. + Id *string `json:"id,omitempty"` + // Name The unique identifier of the node. + Name string `json:"name"` + // TransportAddress The host and port where transport HTTP connections are accepted. + TransportAddress string `json:"transport_address"` +} + +func (s *NodeAttributes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "ephemeral_id": + if err := dec.Decode(&s.EphemeralId); err != nil { + return fmt.Errorf("%s | %w", "EphemeralId", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + } + } + return nil +} + +// NewNodeAttributes returns a NodeAttributes. +func NewNodeAttributes() *NodeAttributes { + r := &NodeAttributes{ + Attributes: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeattributesrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeattributesrecord.go new file mode 100644 index 000000000..08e4b72e6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeattributesrecord.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeAttributesRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/nodeattrs/types.ts#L20-L55 +type NodeAttributesRecord struct { + // Attr The attribute name. + Attr *string `json:"attr,omitempty"` + // Host The host name. + Host *string `json:"host,omitempty"` + // Id The unique node identifier. + Id *string `json:"id,omitempty"` + // Ip The IP address. + Ip *string `json:"ip,omitempty"` + // Node The node name. + Node *string `json:"node,omitempty"` + // Pid The process identifier. + Pid *string `json:"pid,omitempty"` + // Port The bound transport port. + Port *string `json:"port,omitempty"` + // Value The attribute value. + Value *string `json:"value,omitempty"` +} + +func (s *NodeAttributesRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attr": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Attr", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Attr = &o + + case "host", "h": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Host = &o + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "ip", "i": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = &o + + case "pid": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pid", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pid = &o + + case "port": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Port", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Port = &o + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = &o + + } + } + return nil +} + +// NewNodeAttributesRecord returns a NodeAttributesRecord. +func NewNodeAttributesRecord() *NodeAttributesRecord { + r := &NodeAttributesRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodebufferpool.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodebufferpool.go new file mode 100644 index 000000000..cfda4b5ee --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodebufferpool.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeBufferPool type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L861-L882 +type NodeBufferPool struct { + // Count Number of buffer pools. + Count *int64 `json:"count,omitempty"` + // TotalCapacity Total capacity of buffer pools. + TotalCapacity *string `json:"total_capacity,omitempty"` + // TotalCapacityInBytes Total capacity of buffer pools in bytes. + TotalCapacityInBytes *int64 `json:"total_capacity_in_bytes,omitempty"` + // Used Size of buffer pools. + Used *string `json:"used,omitempty"` + // UsedInBytes Size of buffer pools in bytes. + UsedInBytes *int64 `json:"used_in_bytes,omitempty"` +} + +func (s *NodeBufferPool) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "total_capacity": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TotalCapacity", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TotalCapacity = &o + + case "total_capacity_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalCapacityInBytes", err) + } + s.TotalCapacityInBytes = &value + case float64: + f := int64(v) + s.TotalCapacityInBytes = &f + } + + case "used": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Used", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Used = &o + + case "used_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "UsedInBytes", err) + } + s.UsedInBytes = &value + case float64: + f := int64(v) + s.UsedInBytes = &f + } + + } + } + return nil +} + +// NewNodeBufferPool returns a NodeBufferPool. +func NewNodeBufferPool() *NodeBufferPool { + r := &NodeBufferPool{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodediskusage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodediskusage.go new file mode 100644 index 000000000..2e12ac50d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodediskusage.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// NodeDiskUsage type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/allocation_explain/types.ts#L57-L61 +type NodeDiskUsage struct { + LeastAvailable DiskUsage `json:"least_available"` + MostAvailable DiskUsage `json:"most_available"` + NodeName string `json:"node_name"` +} + +func (s *NodeDiskUsage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "least_available": + if err := dec.Decode(&s.LeastAvailable); err != nil { + return fmt.Errorf("%s | %w", "LeastAvailable", err) + } + + case "most_available": + if err := dec.Decode(&s.MostAvailable); err != nil { + return fmt.Errorf("%s | %w", "MostAvailable", err) + } + + case "node_name": + if err := dec.Decode(&s.NodeName); err != nil { + return fmt.Errorf("%s | %w", "NodeName", err) + } + + } + } + return nil +} + +// NewNodeDiskUsage returns a NodeDiskUsage. +func NewNodeDiskUsage() *NodeDiskUsage { + r := &NodeDiskUsage{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeids.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeids.go new file mode 100644 index 000000000..d6ba1882b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeids.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodeIds type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L58-L58 +type NodeIds []string diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfo.go new file mode 100644 index 000000000..eced2b780 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfo.go @@ -0,0 +1,285 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noderole" +) + +// NodeInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L32-L72 +type NodeInfo struct { + Aggregations map[string]NodeInfoAggregation `json:"aggregations,omitempty"` + Attributes map[string]string `json:"attributes"` + BuildFlavor string `json:"build_flavor"` + // BuildHash Short hash of the last git commit in this release. + BuildHash string `json:"build_hash"` + BuildType string `json:"build_type"` + ComponentVersions map[string]int `json:"component_versions"` + // Host The node’s host name. + Host string `json:"host"` + Http *NodeInfoHttp `json:"http,omitempty"` + IndexVersion int64 `json:"index_version"` + Ingest *NodeInfoIngest `json:"ingest,omitempty"` + // Ip The node’s IP address. + Ip string `json:"ip"` + Jvm *NodeJvmInfo `json:"jvm,omitempty"` + Modules []PluginStats `json:"modules,omitempty"` + // Name The node's name + Name string `json:"name"` + Os *NodeOperatingSystemInfo `json:"os,omitempty"` + Plugins []PluginStats `json:"plugins,omitempty"` + Process *NodeProcessInfo `json:"process,omitempty"` + RemoteClusterServer *RemoveClusterServer `json:"remote_cluster_server,omitempty"` + Roles []noderole.NodeRole `json:"roles"` + Settings *NodeInfoSettings `json:"settings,omitempty"` + ThreadPool map[string]NodeThreadPoolInfo `json:"thread_pool,omitempty"` + // TotalIndexingBuffer Total heap allowed to be used to hold recently indexed documents before they + // must be written to disk. This size is a shared pool across all shards on this + // node, and is controlled by Indexing Buffer settings. + TotalIndexingBuffer *int64 `json:"total_indexing_buffer,omitempty"` + // TotalIndexingBufferInBytes Same as total_indexing_buffer, but expressed in bytes. + TotalIndexingBufferInBytes ByteSize `json:"total_indexing_buffer_in_bytes,omitempty"` + Transport *NodeInfoTransport `json:"transport,omitempty"` + // TransportAddress Host and port where transport HTTP connections are accepted. + TransportAddress string `json:"transport_address"` + TransportVersion int64 `json:"transport_version"` + // Version Elasticsearch version running on this node. + Version string `json:"version"` +} + +func (s *NodeInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]NodeInfoAggregation, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "build_flavor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BuildFlavor", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildFlavor = o + + case "build_hash": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BuildHash", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildHash = o + + case "build_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BuildType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildType = o + + case "component_versions": + if s.ComponentVersions == nil { + s.ComponentVersions = make(map[string]int, 0) + } + if err := dec.Decode(&s.ComponentVersions); err != nil { + return fmt.Errorf("%s | %w", "ComponentVersions", err) + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + case "http": + if err := dec.Decode(&s.Http); err != nil { + return fmt.Errorf("%s | %w", "Http", err) + } + + case "index_version": + if err := dec.Decode(&s.IndexVersion); err != nil { + return fmt.Errorf("%s | %w", "IndexVersion", err) + } + + case "ingest": + if err := dec.Decode(&s.Ingest); err != nil { + return fmt.Errorf("%s | %w", "Ingest", err) + } + + case "ip": + if err := dec.Decode(&s.Ip); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + + case "jvm": + if err := dec.Decode(&s.Jvm); err != nil { + return fmt.Errorf("%s | %w", "Jvm", err) + } + + case "modules": + if err := dec.Decode(&s.Modules); err != nil { + return fmt.Errorf("%s | %w", "Modules", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "os": + if err := dec.Decode(&s.Os); err != nil { + return fmt.Errorf("%s | %w", "Os", err) + } + + case "plugins": + if err := dec.Decode(&s.Plugins); err != nil { + return fmt.Errorf("%s | %w", "Plugins", err) + } + + case "process": + if err := dec.Decode(&s.Process); err != nil { + return fmt.Errorf("%s | %w", "Process", err) + } + + case "remote_cluster_server": + if err := dec.Decode(&s.RemoteClusterServer); err != nil { + return fmt.Errorf("%s | %w", "RemoteClusterServer", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "thread_pool": + if s.ThreadPool == nil { + s.ThreadPool = make(map[string]NodeThreadPoolInfo, 0) + } + if err := dec.Decode(&s.ThreadPool); err != nil { + return fmt.Errorf("%s | %w", "ThreadPool", err) + } + + case "total_indexing_buffer": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalIndexingBuffer", err) + } + s.TotalIndexingBuffer = &value + case float64: + f := int64(v) + s.TotalIndexingBuffer = &f + } + + case "total_indexing_buffer_in_bytes": + if err := dec.Decode(&s.TotalIndexingBufferInBytes); err != nil { + return fmt.Errorf("%s | %w", "TotalIndexingBufferInBytes", err) + } + + case "transport": + if err := dec.Decode(&s.Transport); err != nil { + return fmt.Errorf("%s | %w", "Transport", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + case "transport_version": + if err := dec.Decode(&s.TransportVersion); err != nil { + return fmt.Errorf("%s | %w", "TransportVersion", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewNodeInfo returns a NodeInfo. +func NewNodeInfo() *NodeInfo { + r := &NodeInfo{ + Aggregations: make(map[string]NodeInfoAggregation), + Attributes: make(map[string]string), + ComponentVersions: make(map[string]int), + ThreadPool: make(map[string]NodeThreadPoolInfo), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoaction.go new file mode 100644 index 000000000..51eae5ec5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoaction.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L194-L196 +type NodeInfoAction struct { + DestructiveRequiresName string `json:"destructive_requires_name"` +} + +func (s *NodeInfoAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "destructive_requires_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DestructiveRequiresName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DestructiveRequiresName = o + + } + } + return nil +} + +// NewNodeInfoAction returns a NodeInfoAction. +func NewNodeInfoAction() *NodeInfoAction { + r := &NodeInfoAction{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoaggregation.go new file mode 100644 index 000000000..09183de52 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoaggregation.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodeInfoAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L250-L252 +type NodeInfoAggregation struct { + Types []string `json:"types"` +} + +// NewNodeInfoAggregation returns a NodeInfoAggregation. +func NewNodeInfoAggregation() *NodeInfoAggregation { + r := &NodeInfoAggregation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfobootstrap.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfobootstrap.go new file mode 100644 index 000000000..634d9b190 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfobootstrap.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoBootstrap type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L214-L216 +type NodeInfoBootstrap struct { + MemoryLock string `json:"memory_lock"` +} + +func (s *NodeInfoBootstrap) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "memory_lock": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MemoryLock", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MemoryLock = o + + } + } + return nil +} + +// NewNodeInfoBootstrap returns a NodeInfoBootstrap. +func NewNodeInfoBootstrap() *NodeInfoBootstrap { + r := &NodeInfoBootstrap{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoclient.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoclient.go new file mode 100644 index 000000000..ec033981e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoclient.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoClient type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L198-L200 +type NodeInfoClient struct { + Type string `json:"type"` +} + +func (s *NodeInfoClient) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewNodeInfoClient returns a NodeInfoClient. +func NewNodeInfoClient() *NodeInfoClient { + r := &NodeInfoClient{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfodiscover.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfodiscover.go new file mode 100644 index 000000000..8429640c2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfodiscover.go @@ -0,0 +1,144 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoDiscover type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L183-L192 +type NodeInfoDiscover struct { + NodeInfoDiscover map[string]json.RawMessage `json:"-"` + SeedHosts []string `json:"seed_hosts,omitempty"` + SeedProviders []string `json:"seed_providers,omitempty"` + Type *string `json:"type,omitempty"` +} + +func (s *NodeInfoDiscover) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "seed_hosts": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "SeedHosts", err) + } + + s.SeedHosts = append(s.SeedHosts, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.SeedHosts); err != nil { + return fmt.Errorf("%s | %w", "SeedHosts", err) + } + } + + case "seed_providers": + if err := dec.Decode(&s.SeedProviders); err != nil { + return fmt.Errorf("%s | %w", "SeedProviders", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + default: + + if key, ok := t.(string); ok { + if s.NodeInfoDiscover == nil { + s.NodeInfoDiscover = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "NodeInfoDiscover", err) + } + s.NodeInfoDiscover[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s NodeInfoDiscover) MarshalJSON() ([]byte, error) { + type opt NodeInfoDiscover + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.NodeInfoDiscover { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "NodeInfoDiscover") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewNodeInfoDiscover returns a NodeInfoDiscover. +func NewNodeInfoDiscover() *NodeInfoDiscover { + r := &NodeInfoDiscover{ + NodeInfoDiscover: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfohttp.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfohttp.go new file mode 100644 index 000000000..935eb8b2d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfohttp.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoHttp type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L326-L331 +type NodeInfoHttp struct { + BoundAddress []string `json:"bound_address"` + MaxContentLength ByteSize `json:"max_content_length,omitempty"` + MaxContentLengthInBytes int64 `json:"max_content_length_in_bytes"` + PublishAddress string `json:"publish_address"` +} + +func (s *NodeInfoHttp) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bound_address": + if err := dec.Decode(&s.BoundAddress); err != nil { + return fmt.Errorf("%s | %w", "BoundAddress", err) + } + + case "max_content_length": + if err := dec.Decode(&s.MaxContentLength); err != nil { + return fmt.Errorf("%s | %w", "MaxContentLength", err) + } + + case "max_content_length_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxContentLengthInBytes", err) + } + s.MaxContentLengthInBytes = value + case float64: + f := int64(v) + s.MaxContentLengthInBytes = f + } + + case "publish_address": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PublishAddress", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PublishAddress = o + + } + } + return nil +} + +// NewNodeInfoHttp returns a NodeInfoHttp. +func NewNodeInfoHttp() *NodeInfoHttp { + r := &NodeInfoHttp{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoingest.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoingest.go new file mode 100644 index 000000000..4d6231827 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoingest.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodeInfoIngest type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L242-L244 +type NodeInfoIngest struct { + Processors []NodeInfoIngestProcessor `json:"processors"` +} + +// NewNodeInfoIngest returns a NodeInfoIngest. +func NewNodeInfoIngest() *NodeInfoIngest { + r := &NodeInfoIngest{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoingestdownloader.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoingestdownloader.go new file mode 100644 index 000000000..b78def3e8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoingestdownloader.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoIngestDownloader type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L138-L140 +type NodeInfoIngestDownloader struct { + Enabled string `json:"enabled"` +} + +func (s *NodeInfoIngestDownloader) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Enabled = o + + } + } + return nil +} + +// NewNodeInfoIngestDownloader returns a NodeInfoIngestDownloader. +func NewNodeInfoIngestDownloader() *NodeInfoIngestDownloader { + r := &NodeInfoIngestDownloader{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoingestinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoingestinfo.go new file mode 100644 index 000000000..df78a90f0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoingestinfo.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodeInfoIngestInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L134-L136 +type NodeInfoIngestInfo struct { + Downloader NodeInfoIngestDownloader `json:"downloader"` +} + +// NewNodeInfoIngestInfo returns a NodeInfoIngestInfo. +func NewNodeInfoIngestInfo() *NodeInfoIngestInfo { + r := &NodeInfoIngestInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoingestprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoingestprocessor.go new file mode 100644 index 000000000..f721a114b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoingestprocessor.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoIngestProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L246-L248 +type NodeInfoIngestProcessor struct { + Type string `json:"type"` +} + +func (s *NodeInfoIngestProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewNodeInfoIngestProcessor returns a NodeInfoIngestProcessor. +func NewNodeInfoIngestProcessor() *NodeInfoIngestProcessor { + r := &NodeInfoIngestProcessor{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfojvmmemory.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfojvmmemory.go new file mode 100644 index 000000000..2631a8c3b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfojvmmemory.go @@ -0,0 +1,173 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoJvmMemory type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L333-L344 +type NodeInfoJvmMemory struct { + DirectMax ByteSize `json:"direct_max,omitempty"` + DirectMaxInBytes int64 `json:"direct_max_in_bytes"` + HeapInit ByteSize `json:"heap_init,omitempty"` + HeapInitInBytes int64 `json:"heap_init_in_bytes"` + HeapMax ByteSize `json:"heap_max,omitempty"` + HeapMaxInBytes int64 `json:"heap_max_in_bytes"` + NonHeapInit ByteSize `json:"non_heap_init,omitempty"` + NonHeapInitInBytes int64 `json:"non_heap_init_in_bytes"` + NonHeapMax ByteSize `json:"non_heap_max,omitempty"` + NonHeapMaxInBytes int64 `json:"non_heap_max_in_bytes"` +} + +func (s *NodeInfoJvmMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "direct_max": + if err := dec.Decode(&s.DirectMax); err != nil { + return fmt.Errorf("%s | %w", "DirectMax", err) + } + + case "direct_max_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DirectMaxInBytes", err) + } + s.DirectMaxInBytes = value + case float64: + f := int64(v) + s.DirectMaxInBytes = f + } + + case "heap_init": + if err := dec.Decode(&s.HeapInit); err != nil { + return fmt.Errorf("%s | %w", "HeapInit", err) + } + + case "heap_init_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "HeapInitInBytes", err) + } + s.HeapInitInBytes = value + case float64: + f := int64(v) + s.HeapInitInBytes = f + } + + case "heap_max": + if err := dec.Decode(&s.HeapMax); err != nil { + return fmt.Errorf("%s | %w", "HeapMax", err) + } + + case "heap_max_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "HeapMaxInBytes", err) + } + s.HeapMaxInBytes = value + case float64: + f := int64(v) + s.HeapMaxInBytes = f + } + + case "non_heap_init": + if err := dec.Decode(&s.NonHeapInit); err != nil { + return fmt.Errorf("%s | %w", "NonHeapInit", err) + } + + case "non_heap_init_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NonHeapInitInBytes", err) + } + s.NonHeapInitInBytes = value + case float64: + f := int64(v) + s.NonHeapInitInBytes = f + } + + case "non_heap_max": + if err := dec.Decode(&s.NonHeapMax); err != nil { + return fmt.Errorf("%s | %w", "NonHeapMax", err) + } + + case "non_heap_max_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NonHeapMaxInBytes", err) + } + s.NonHeapMaxInBytes = value + case float64: + f := int64(v) + s.NonHeapMaxInBytes = f + } + + } + } + return nil +} + +// NewNodeInfoJvmMemory returns a NodeInfoJvmMemory. +func NewNodeInfoJvmMemory() *NodeInfoJvmMemory { + r := &NodeInfoJvmMemory{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfomemory.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfomemory.go new file mode 100644 index 000000000..3a9330ef5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfomemory.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoMemory type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L346-L349 +type NodeInfoMemory struct { + Total string `json:"total"` + TotalInBytes int64 `json:"total_in_bytes"` +} + +func (s *NodeInfoMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Total = o + + case "total_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalInBytes", err) + } + s.TotalInBytes = value + case float64: + f := int64(v) + s.TotalInBytes = f + } + + } + } + return nil +} + +// NewNodeInfoMemory returns a NodeInfoMemory. +func NewNodeInfoMemory() *NodeInfoMemory { + r := &NodeInfoMemory{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfooscpu.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfooscpu.go new file mode 100644 index 000000000..4a988dc68 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfooscpu.go @@ -0,0 +1,187 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoOSCPU type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L351-L360 +type NodeInfoOSCPU struct { + CacheSize string `json:"cache_size"` + CacheSizeInBytes int `json:"cache_size_in_bytes"` + CoresPerSocket int `json:"cores_per_socket"` + Mhz int `json:"mhz"` + Model string `json:"model"` + TotalCores int `json:"total_cores"` + TotalSockets int `json:"total_sockets"` + Vendor string `json:"vendor"` +} + +func (s *NodeInfoOSCPU) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CacheSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CacheSize = o + + case "cache_size_in_bytes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CacheSizeInBytes", err) + } + s.CacheSizeInBytes = value + case float64: + f := int(v) + s.CacheSizeInBytes = f + } + + case "cores_per_socket": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CoresPerSocket", err) + } + s.CoresPerSocket = value + case float64: + f := int(v) + s.CoresPerSocket = f + } + + case "mhz": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Mhz", err) + } + s.Mhz = value + case float64: + f := int(v) + s.Mhz = f + } + + case "model": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Model", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Model = o + + case "total_cores": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalCores", err) + } + s.TotalCores = value + case float64: + f := int(v) + s.TotalCores = f + } + + case "total_sockets": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSockets", err) + } + s.TotalSockets = value + case float64: + f := int(v) + s.TotalSockets = f + } + + case "vendor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Vendor", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Vendor = o + + } + } + return nil +} + +// NewNodeInfoOSCPU returns a NodeInfoOSCPU. +func NewNodeInfoOSCPU() *NodeInfoOSCPU { + r := &NodeInfoOSCPU{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfopath.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfopath.go new file mode 100644 index 000000000..2b2e5d270 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfopath.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoPath type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L168-L173 +type NodeInfoPath struct { + Data []string `json:"data,omitempty"` + Home *string `json:"home,omitempty"` + Logs *string `json:"logs,omitempty"` + Repo []string `json:"repo,omitempty"` +} + +func (s *NodeInfoPath) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Data", err) + } + + s.Data = append(s.Data, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Data); err != nil { + return fmt.Errorf("%s | %w", "Data", err) + } + } + + case "home": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Home", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Home = &o + + case "logs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Logs", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Logs = &o + + case "repo": + if err := dec.Decode(&s.Repo); err != nil { + return fmt.Errorf("%s | %w", "Repo", err) + } + + } + } + return nil +} + +// NewNodeInfoPath returns a NodeInfoPath. +func NewNodeInfoPath() *NodeInfoPath { + r := &NodeInfoPath{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinforepositories.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinforepositories.go new file mode 100644 index 000000000..bbd41bee0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinforepositories.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodeInfoRepositories type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L175-L177 +type NodeInfoRepositories struct { + Url NodeInfoRepositoriesUrl `json:"url"` +} + +// NewNodeInfoRepositories returns a NodeInfoRepositories. +func NewNodeInfoRepositories() *NodeInfoRepositories { + r := &NodeInfoRepositories{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinforepositoriesurl.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinforepositoriesurl.go new file mode 100644 index 000000000..bd7c3a6c9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinforepositoriesurl.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoRepositoriesUrl type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L179-L181 +type NodeInfoRepositoriesUrl struct { + AllowedUrls string `json:"allowed_urls"` +} + +func (s *NodeInfoRepositoriesUrl) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allowed_urls": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AllowedUrls", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AllowedUrls = o + + } + } + return nil +} + +// NewNodeInfoRepositoriesUrl returns a NodeInfoRepositoriesUrl. +func NewNodeInfoRepositoriesUrl() *NodeInfoRepositoriesUrl { + r := &NodeInfoRepositoriesUrl{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoscript.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoscript.go new file mode 100644 index 000000000..4c189d639 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoscript.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoScript type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L304-L307 +type NodeInfoScript struct { + AllowedTypes string `json:"allowed_types"` + DisableMaxCompilationsRate *string `json:"disable_max_compilations_rate,omitempty"` +} + +func (s *NodeInfoScript) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allowed_types": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AllowedTypes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AllowedTypes = o + + case "disable_max_compilations_rate": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DisableMaxCompilationsRate", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DisableMaxCompilationsRate = &o + + } + } + return nil +} + +// NewNodeInfoScript returns a NodeInfoScript. +func NewNodeInfoScript() *NodeInfoScript { + r := &NodeInfoScript{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosearch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosearch.go new file mode 100644 index 000000000..154503de6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosearch.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodeInfoSearch type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L309-L311 +type NodeInfoSearch struct { + Remote NodeInfoSearchRemote `json:"remote"` +} + +// NewNodeInfoSearch returns a NodeInfoSearch. +func NewNodeInfoSearch() *NodeInfoSearch { + r := &NodeInfoSearch{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosearchremote.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosearchremote.go new file mode 100644 index 000000000..9ede1ca08 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosearchremote.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoSearchRemote type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L313-L315 +type NodeInfoSearchRemote struct { + Connect string `json:"connect"` +} + +func (s *NodeInfoSearchRemote) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "connect": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Connect", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Connect = o + + } + } + return nil +} + +// NewNodeInfoSearchRemote returns a NodeInfoSearchRemote. +func NewNodeInfoSearchRemote() *NodeInfoSearchRemote { + r := &NodeInfoSearchRemote{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettings.go new file mode 100644 index 000000000..9628ccd9b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettings.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodeInfoSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L79-L95 +type NodeInfoSettings struct { + Action *NodeInfoAction `json:"action,omitempty"` + Bootstrap *NodeInfoBootstrap `json:"bootstrap,omitempty"` + Client *NodeInfoClient `json:"client,omitempty"` + Cluster NodeInfoSettingsCluster `json:"cluster"` + Discovery *NodeInfoDiscover `json:"discovery,omitempty"` + Http NodeInfoSettingsHttp `json:"http"` + Ingest *NodeInfoSettingsIngest `json:"ingest,omitempty"` + Network *NodeInfoSettingsNetwork `json:"network,omitempty"` + Node NodeInfoSettingsNode `json:"node"` + Path *NodeInfoPath `json:"path,omitempty"` + Repositories *NodeInfoRepositories `json:"repositories,omitempty"` + Script *NodeInfoScript `json:"script,omitempty"` + Search *NodeInfoSearch `json:"search,omitempty"` + Transport NodeInfoSettingsTransport `json:"transport"` + Xpack *NodeInfoXpack `json:"xpack,omitempty"` +} + +// NewNodeInfoSettings returns a NodeInfoSettings. +func NewNodeInfoSettings() *NodeInfoSettings { + r := &NodeInfoSettings{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingscluster.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingscluster.go new file mode 100644 index 000000000..6e62d20d9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingscluster.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// NodeInfoSettingsCluster type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L142-L152 +type NodeInfoSettingsCluster struct { + DeprecationIndexing *DeprecationIndexing `json:"deprecation_indexing,omitempty"` + Election NodeInfoSettingsClusterElection `json:"election"` + InitialMasterNodes []string `json:"initial_master_nodes,omitempty"` + Name string `json:"name"` + Routing *IndexRouting `json:"routing,omitempty"` +} + +func (s *NodeInfoSettingsCluster) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deprecation_indexing": + if err := dec.Decode(&s.DeprecationIndexing); err != nil { + return fmt.Errorf("%s | %w", "DeprecationIndexing", err) + } + + case "election": + if err := dec.Decode(&s.Election); err != nil { + return fmt.Errorf("%s | %w", "Election", err) + } + + case "initial_master_nodes": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "InitialMasterNodes", err) + } + + s.InitialMasterNodes = append(s.InitialMasterNodes, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.InitialMasterNodes); err != nil { + return fmt.Errorf("%s | %w", "InitialMasterNodes", err) + } + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + } + } + return nil +} + +// NewNodeInfoSettingsCluster returns a NodeInfoSettingsCluster. +func NewNodeInfoSettingsCluster() *NodeInfoSettingsCluster { + r := &NodeInfoSettingsCluster{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingsclusterelection.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingsclusterelection.go new file mode 100644 index 000000000..9915aeec7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingsclusterelection.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// NodeInfoSettingsClusterElection type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L158-L160 +type NodeInfoSettingsClusterElection struct { + Strategy string `json:"strategy"` +} + +func (s *NodeInfoSettingsClusterElection) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "strategy": + if err := dec.Decode(&s.Strategy); err != nil { + return fmt.Errorf("%s | %w", "Strategy", err) + } + + } + } + return nil +} + +// NewNodeInfoSettingsClusterElection returns a NodeInfoSettingsClusterElection. +func NewNodeInfoSettingsClusterElection() *NodeInfoSettingsClusterElection { + r := &NodeInfoSettingsClusterElection{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingshttp.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingshttp.go new file mode 100644 index 000000000..ed2c986bc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingshttp.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoSettingsHttp type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L202-L207 +type NodeInfoSettingsHttp struct { + Compression *string `json:"compression,omitempty"` + Port *string `json:"port,omitempty"` + Type NodeInfoSettingsHttpType `json:"type"` + TypeDefault *string `json:"type.default,omitempty"` +} + +func (s *NodeInfoSettingsHttp) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compression": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Compression", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Compression = &o + + case "port": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Port", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Port = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "type.default": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TypeDefault", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TypeDefault = &o + + } + } + return nil +} + +// NewNodeInfoSettingsHttp returns a NodeInfoSettingsHttp. +func NewNodeInfoSettingsHttp() *NodeInfoSettingsHttp { + r := &NodeInfoSettingsHttp{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingshttptype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingshttptype.go new file mode 100644 index 000000000..48d8853ba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingshttptype.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoSettingsHttpType type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L209-L212 +type NodeInfoSettingsHttpType struct { + Default string `json:"default"` +} + +func (s *NodeInfoSettingsHttpType) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Default) + if err != nil { + return err + } + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "default": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Default", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Default = o + + } + } + return nil +} + +// NewNodeInfoSettingsHttpType returns a NodeInfoSettingsHttpType. +func NewNodeInfoSettingsHttpType() *NodeInfoSettingsHttpType { + r := &NodeInfoSettingsHttpType{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingsingest.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingsingest.go new file mode 100644 index 000000000..964527f1a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingsingest.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodeInfoSettingsIngest type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L97-L132 +type NodeInfoSettingsIngest struct { + Append *NodeInfoIngestInfo `json:"append,omitempty"` + Attachment *NodeInfoIngestInfo `json:"attachment,omitempty"` + Bytes *NodeInfoIngestInfo `json:"bytes,omitempty"` + Circle *NodeInfoIngestInfo `json:"circle,omitempty"` + Convert *NodeInfoIngestInfo `json:"convert,omitempty"` + Csv *NodeInfoIngestInfo `json:"csv,omitempty"` + Date *NodeInfoIngestInfo `json:"date,omitempty"` + DateIndexName *NodeInfoIngestInfo `json:"date_index_name,omitempty"` + Dissect *NodeInfoIngestInfo `json:"dissect,omitempty"` + DotExpander *NodeInfoIngestInfo `json:"dot_expander,omitempty"` + Drop *NodeInfoIngestInfo `json:"drop,omitempty"` + Enrich *NodeInfoIngestInfo `json:"enrich,omitempty"` + Fail *NodeInfoIngestInfo `json:"fail,omitempty"` + Foreach *NodeInfoIngestInfo `json:"foreach,omitempty"` + Geoip *NodeInfoIngestInfo `json:"geoip,omitempty"` + Grok *NodeInfoIngestInfo `json:"grok,omitempty"` + Gsub *NodeInfoIngestInfo `json:"gsub,omitempty"` + Inference *NodeInfoIngestInfo `json:"inference,omitempty"` + Join *NodeInfoIngestInfo `json:"join,omitempty"` + Json *NodeInfoIngestInfo `json:"json,omitempty"` + Kv *NodeInfoIngestInfo `json:"kv,omitempty"` + Lowercase *NodeInfoIngestInfo `json:"lowercase,omitempty"` + Pipeline *NodeInfoIngestInfo `json:"pipeline,omitempty"` + Remove *NodeInfoIngestInfo `json:"remove,omitempty"` + Rename *NodeInfoIngestInfo `json:"rename,omitempty"` + Script *NodeInfoIngestInfo `json:"script,omitempty"` + Set *NodeInfoIngestInfo `json:"set,omitempty"` + SetSecurityUser *NodeInfoIngestInfo `json:"set_security_user,omitempty"` + Sort *NodeInfoIngestInfo `json:"sort,omitempty"` + Split *NodeInfoIngestInfo `json:"split,omitempty"` + Trim *NodeInfoIngestInfo `json:"trim,omitempty"` + Uppercase *NodeInfoIngestInfo `json:"uppercase,omitempty"` + Urldecode *NodeInfoIngestInfo `json:"urldecode,omitempty"` + UserAgent *NodeInfoIngestInfo `json:"user_agent,omitempty"` +} + +// NewNodeInfoSettingsIngest returns a NodeInfoSettingsIngest. +func NewNodeInfoSettingsIngest() *NodeInfoSettingsIngest { + r := &NodeInfoSettingsIngest{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingsnetwork.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingsnetwork.go new file mode 100644 index 000000000..517ae2ded --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingsnetwork.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// NodeInfoSettingsNetwork type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L238-L240 +type NodeInfoSettingsNetwork struct { + Host []string `json:"host,omitempty"` +} + +func (s *NodeInfoSettingsNetwork) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "host": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + s.Host = append(s.Host, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + } + + } + } + return nil +} + +// NewNodeInfoSettingsNetwork returns a NodeInfoSettingsNetwork. +func NewNodeInfoSettingsNetwork() *NodeInfoSettingsNetwork { + r := &NodeInfoSettingsNetwork{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingsnode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingsnode.go new file mode 100644 index 000000000..4ccb215b9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingsnode.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoSettingsNode type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L162-L166 +type NodeInfoSettingsNode struct { + Attr map[string]json.RawMessage `json:"attr"` + MaxLocalStorageNodes *string `json:"max_local_storage_nodes,omitempty"` + Name string `json:"name"` +} + +func (s *NodeInfoSettingsNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attr": + if s.Attr == nil { + s.Attr = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Attr); err != nil { + return fmt.Errorf("%s | %w", "Attr", err) + } + + case "max_local_storage_nodes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxLocalStorageNodes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxLocalStorageNodes = &o + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewNodeInfoSettingsNode returns a NodeInfoSettingsNode. +func NewNodeInfoSettingsNode() *NodeInfoSettingsNode { + r := &NodeInfoSettingsNode{ + Attr: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingstransport.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingstransport.go new file mode 100644 index 000000000..6e902b388 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingstransport.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoSettingsTransport type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L218-L227 +type NodeInfoSettingsTransport struct { + Features *NodeInfoSettingsTransportFeatures `json:"features,omitempty"` + // IgnoreDeserializationErrors Only used in unit tests + IgnoreDeserializationErrors Stringifiedboolean `json:"ignore_deserialization_errors,omitempty"` + Type NodeInfoSettingsTransportType `json:"type"` + TypeDefault *string `json:"type.default,omitempty"` +} + +func (s *NodeInfoSettingsTransport) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "features": + if err := dec.Decode(&s.Features); err != nil { + return fmt.Errorf("%s | %w", "Features", err) + } + + case "ignore_deserialization_errors": + if err := dec.Decode(&s.IgnoreDeserializationErrors); err != nil { + return fmt.Errorf("%s | %w", "IgnoreDeserializationErrors", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "type.default": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TypeDefault", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TypeDefault = &o + + } + } + return nil +} + +// NewNodeInfoSettingsTransport returns a NodeInfoSettingsTransport. +func NewNodeInfoSettingsTransport() *NodeInfoSettingsTransport { + r := &NodeInfoSettingsTransport{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingstransportfeatures.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingstransportfeatures.go new file mode 100644 index 000000000..afc23d4e1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingstransportfeatures.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoSettingsTransportFeatures type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L234-L236 +type NodeInfoSettingsTransportFeatures struct { + XPack string `json:"x-pack"` +} + +func (s *NodeInfoSettingsTransportFeatures) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "x-pack": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "XPack", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.XPack = o + + } + } + return nil +} + +// NewNodeInfoSettingsTransportFeatures returns a NodeInfoSettingsTransportFeatures. +func NewNodeInfoSettingsTransportFeatures() *NodeInfoSettingsTransportFeatures { + r := &NodeInfoSettingsTransportFeatures{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingstransporttype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingstransporttype.go new file mode 100644 index 000000000..d9cfebb48 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfosettingstransporttype.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoSettingsTransportType type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L229-L232 +type NodeInfoSettingsTransportType struct { + Default string `json:"default"` +} + +func (s *NodeInfoSettingsTransportType) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Default) + if err != nil { + return err + } + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "default": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Default", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Default = o + + } + } + return nil +} + +// NewNodeInfoSettingsTransportType returns a NodeInfoSettingsTransportType. +func NewNodeInfoSettingsTransportType() *NodeInfoSettingsTransportType { + r := &NodeInfoSettingsTransportType{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfotransport.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfotransport.go new file mode 100644 index 000000000..61c746ba4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfotransport.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoTransport type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L362-L366 +type NodeInfoTransport struct { + BoundAddress []string `json:"bound_address"` + Profiles map[string]string `json:"profiles"` + PublishAddress string `json:"publish_address"` +} + +func (s *NodeInfoTransport) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bound_address": + if err := dec.Decode(&s.BoundAddress); err != nil { + return fmt.Errorf("%s | %w", "BoundAddress", err) + } + + case "profiles": + if s.Profiles == nil { + s.Profiles = make(map[string]string, 0) + } + if err := dec.Decode(&s.Profiles); err != nil { + return fmt.Errorf("%s | %w", "Profiles", err) + } + + case "publish_address": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PublishAddress", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PublishAddress = o + + } + } + return nil +} + +// NewNodeInfoTransport returns a NodeInfoTransport. +func NewNodeInfoTransport() *NodeInfoTransport { + r := &NodeInfoTransport{ + Profiles: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpack.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpack.go new file mode 100644 index 000000000..731996da4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpack.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// NodeInfoXpack type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L254-L259 +type NodeInfoXpack struct { + License *NodeInfoXpackLicense `json:"license,omitempty"` + Ml *NodeInfoXpackMl `json:"ml,omitempty"` + Notification map[string]json.RawMessage `json:"notification,omitempty"` + Security NodeInfoXpackSecurity `json:"security"` +} + +// NewNodeInfoXpack returns a NodeInfoXpack. +func NewNodeInfoXpack() *NodeInfoXpack { + r := &NodeInfoXpack{ + Notification: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacklicense.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacklicense.go new file mode 100644 index 000000000..93b89accc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacklicense.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodeInfoXpackLicense type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L296-L298 +type NodeInfoXpackLicense struct { + SelfGenerated NodeInfoXpackLicenseType `json:"self_generated"` +} + +// NewNodeInfoXpackLicense returns a NodeInfoXpackLicense. +func NewNodeInfoXpackLicense() *NodeInfoXpackLicense { + r := &NodeInfoXpackLicense{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacklicensetype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacklicensetype.go new file mode 100644 index 000000000..8b3bb723b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacklicensetype.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoXpackLicenseType type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L300-L302 +type NodeInfoXpackLicenseType struct { + Type string `json:"type"` +} + +func (s *NodeInfoXpackLicenseType) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewNodeInfoXpackLicenseType returns a NodeInfoXpackLicenseType. +func NewNodeInfoXpackLicenseType() *NodeInfoXpackLicenseType { + r := &NodeInfoXpackLicenseType{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpackml.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpackml.go new file mode 100644 index 000000000..22d883d81 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpackml.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoXpackMl type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L268-L270 +type NodeInfoXpackMl struct { + UseAutoMachineMemoryPercent *bool `json:"use_auto_machine_memory_percent,omitempty"` +} + +func (s *NodeInfoXpackMl) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "use_auto_machine_memory_percent": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "UseAutoMachineMemoryPercent", err) + } + s.UseAutoMachineMemoryPercent = &value + case bool: + s.UseAutoMachineMemoryPercent = &v + } + + } + } + return nil +} + +// NewNodeInfoXpackMl returns a NodeInfoXpackMl. +func NewNodeInfoXpackMl() *NodeInfoXpackMl { + r := &NodeInfoXpackMl{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurity.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurity.go new file mode 100644 index 000000000..f2ccb4b68 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurity.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoXpackSecurity type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L261-L266 +type NodeInfoXpackSecurity struct { + Authc *NodeInfoXpackSecurityAuthc `json:"authc,omitempty"` + Enabled string `json:"enabled"` + Http *NodeInfoXpackSecuritySsl `json:"http,omitempty"` + Transport *NodeInfoXpackSecuritySsl `json:"transport,omitempty"` +} + +func (s *NodeInfoXpackSecurity) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "authc": + if err := dec.Decode(&s.Authc); err != nil { + return fmt.Errorf("%s | %w", "Authc", err) + } + + case "enabled": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Enabled = o + + case "http": + if err := dec.Decode(&s.Http); err != nil { + return fmt.Errorf("%s | %w", "Http", err) + } + + case "transport": + if err := dec.Decode(&s.Transport); err != nil { + return fmt.Errorf("%s | %w", "Transport", err) + } + + } + } + return nil +} + +// NewNodeInfoXpackSecurity returns a NodeInfoXpackSecurity. +func NewNodeInfoXpackSecurity() *NodeInfoXpackSecurity { + r := &NodeInfoXpackSecurity{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityauthc.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityauthc.go new file mode 100644 index 000000000..8894d2ee2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityauthc.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodeInfoXpackSecurityAuthc type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L276-L279 +type NodeInfoXpackSecurityAuthc struct { + Realms *NodeInfoXpackSecurityAuthcRealms `json:"realms,omitempty"` + Token *NodeInfoXpackSecurityAuthcToken `json:"token,omitempty"` +} + +// NewNodeInfoXpackSecurityAuthc returns a NodeInfoXpackSecurityAuthc. +func NewNodeInfoXpackSecurityAuthc() *NodeInfoXpackSecurityAuthc { + r := &NodeInfoXpackSecurityAuthc{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityauthcrealms.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityauthcrealms.go new file mode 100644 index 000000000..98cfb3629 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityauthcrealms.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodeInfoXpackSecurityAuthcRealms type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L281-L285 +type NodeInfoXpackSecurityAuthcRealms struct { + File map[string]NodeInfoXpackSecurityAuthcRealmsStatus `json:"file,omitempty"` + Native map[string]NodeInfoXpackSecurityAuthcRealmsStatus `json:"native,omitempty"` + Pki map[string]NodeInfoXpackSecurityAuthcRealmsStatus `json:"pki,omitempty"` +} + +// NewNodeInfoXpackSecurityAuthcRealms returns a NodeInfoXpackSecurityAuthcRealms. +func NewNodeInfoXpackSecurityAuthcRealms() *NodeInfoXpackSecurityAuthcRealms { + r := &NodeInfoXpackSecurityAuthcRealms{ + File: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus), + Native: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus), + Pki: make(map[string]NodeInfoXpackSecurityAuthcRealmsStatus), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go new file mode 100644 index 000000000..1d2aa1645 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoXpackSecurityAuthcRealmsStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L291-L294 +type NodeInfoXpackSecurityAuthcRealmsStatus struct { + Enabled *string `json:"enabled,omitempty"` + Order string `json:"order"` +} + +func (s *NodeInfoXpackSecurityAuthcRealmsStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Enabled = &o + + case "order": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Order = o + + } + } + return nil +} + +// NewNodeInfoXpackSecurityAuthcRealmsStatus returns a NodeInfoXpackSecurityAuthcRealmsStatus. +func NewNodeInfoXpackSecurityAuthcRealmsStatus() *NodeInfoXpackSecurityAuthcRealmsStatus { + r := &NodeInfoXpackSecurityAuthcRealmsStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityauthctoken.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityauthctoken.go new file mode 100644 index 000000000..36820e844 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityauthctoken.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeInfoXpackSecurityAuthcToken type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L287-L289 +type NodeInfoXpackSecurityAuthcToken struct { + Enabled string `json:"enabled"` +} + +func (s *NodeInfoXpackSecurityAuthcToken) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Enabled = o + + } + } + return nil +} + +// NewNodeInfoXpackSecurityAuthcToken returns a NodeInfoXpackSecurityAuthcToken. +func NewNodeInfoXpackSecurityAuthcToken() *NodeInfoXpackSecurityAuthcToken { + r := &NodeInfoXpackSecurityAuthcToken{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityssl.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityssl.go new file mode 100644 index 000000000..778860be5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeinfoxpacksecurityssl.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodeInfoXpackSecuritySsl type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L272-L274 +type NodeInfoXpackSecuritySsl struct { + Ssl map[string]string `json:"ssl"` +} + +// NewNodeInfoXpackSecuritySsl returns a NodeInfoXpackSecuritySsl. +func NewNodeInfoXpackSecuritySsl() *NodeInfoXpackSecuritySsl { + r := &NodeInfoXpackSecuritySsl{ + Ssl: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodejvminfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodejvminfo.go new file mode 100644 index 000000000..42dc7b112 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodejvminfo.go @@ -0,0 +1,169 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeJvmInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L368-L381 +type NodeJvmInfo struct { + GcCollectors []string `json:"gc_collectors"` + InputArguments []string `json:"input_arguments"` + Mem NodeInfoJvmMemory `json:"mem"` + MemoryPools []string `json:"memory_pools"` + Pid int `json:"pid"` + StartTimeInMillis int64 `json:"start_time_in_millis"` + UsingBundledJdk bool `json:"using_bundled_jdk"` + UsingCompressedOrdinaryObjectPointers *string `json:"using_compressed_ordinary_object_pointers,omitempty"` + Version string `json:"version"` + VmName string `json:"vm_name"` + VmVendor string `json:"vm_vendor"` + VmVersion string `json:"vm_version"` +} + +func (s *NodeJvmInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "gc_collectors": + if err := dec.Decode(&s.GcCollectors); err != nil { + return fmt.Errorf("%s | %w", "GcCollectors", err) + } + + case "input_arguments": + if err := dec.Decode(&s.InputArguments); err != nil { + return fmt.Errorf("%s | %w", "InputArguments", err) + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return fmt.Errorf("%s | %w", "Mem", err) + } + + case "memory_pools": + if err := dec.Decode(&s.MemoryPools); err != nil { + return fmt.Errorf("%s | %w", "MemoryPools", err) + } + + case "pid": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Pid", err) + } + s.Pid = value + case float64: + f := int(v) + s.Pid = f + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeInMillis", err) + } + + case "using_bundled_jdk": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "UsingBundledJdk", err) + } + s.UsingBundledJdk = value + case bool: + s.UsingBundledJdk = v + } + + case "using_compressed_ordinary_object_pointers": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UsingCompressedOrdinaryObjectPointers", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UsingCompressedOrdinaryObjectPointers = &o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "vm_name": + if err := dec.Decode(&s.VmName); err != nil { + return fmt.Errorf("%s | %w", "VmName", err) + } + + case "vm_vendor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "VmVendor", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VmVendor = o + + case "vm_version": + if err := dec.Decode(&s.VmVersion); err != nil { + return fmt.Errorf("%s | %w", "VmVersion", err) + } + + } + } + return nil +} + +// NewNodeJvmInfo returns a NodeJvmInfo. +func NewNodeJvmInfo() *NodeJvmInfo { + r := &NodeJvmInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeoperatingsysteminfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeoperatingsysteminfo.go new file mode 100644 index 000000000..c52545334 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeoperatingsysteminfo.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeOperatingSystemInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L383-L400 +type NodeOperatingSystemInfo struct { + // AllocatedProcessors The number of processors actually used to calculate thread pool size. This + // number can be set with the node.processors setting of a node and defaults to + // the number of processors reported by the OS. + AllocatedProcessors *int `json:"allocated_processors,omitempty"` + // Arch Name of the JVM architecture (ex: amd64, x86) + Arch string `json:"arch"` + // AvailableProcessors Number of processors available to the Java virtual machine + AvailableProcessors int `json:"available_processors"` + Cpu *NodeInfoOSCPU `json:"cpu,omitempty"` + Mem *NodeInfoMemory `json:"mem,omitempty"` + // Name Name of the operating system (ex: Linux, Windows, Mac OS X) + Name string `json:"name"` + PrettyName string `json:"pretty_name"` + // RefreshIntervalInMillis Refresh interval for the OS statistics + RefreshIntervalInMillis int64 `json:"refresh_interval_in_millis"` + Swap *NodeInfoMemory `json:"swap,omitempty"` + // Version Version of the operating system + Version string `json:"version"` +} + +func (s *NodeOperatingSystemInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocated_processors": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllocatedProcessors", err) + } + s.AllocatedProcessors = &value + case float64: + f := int(v) + s.AllocatedProcessors = &f + } + + case "arch": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Arch", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Arch = o + + case "available_processors": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "AvailableProcessors", err) + } + s.AvailableProcessors = value + case float64: + f := int(v) + s.AvailableProcessors = f + } + + case "cpu": + if err := dec.Decode(&s.Cpu); err != nil { + return fmt.Errorf("%s | %w", "Cpu", err) + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return fmt.Errorf("%s | %w", "Mem", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "pretty_name": + if err := dec.Decode(&s.PrettyName); err != nil { + return fmt.Errorf("%s | %w", "PrettyName", err) + } + + case "refresh_interval_in_millis": + if err := dec.Decode(&s.RefreshIntervalInMillis); err != nil { + return fmt.Errorf("%s | %w", "RefreshIntervalInMillis", err) + } + + case "swap": + if err := dec.Decode(&s.Swap); err != nil { + return fmt.Errorf("%s | %w", "Swap", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewNodeOperatingSystemInfo returns a NodeOperatingSystemInfo. +func NewNodeOperatingSystemInfo() *NodeOperatingSystemInfo { + r := &NodeOperatingSystemInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodepackagingtype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodepackagingtype.go new file mode 100644 index 000000000..c29e5260d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodepackagingtype.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodePackagingType type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L700-L713 +type NodePackagingType struct { + // Count Number of selected nodes using the distribution flavor and file type. + Count int `json:"count"` + // Flavor Type of Elasticsearch distribution. This is always `default`. + Flavor string `json:"flavor"` + // Type File type (such as `tar` or `zip`) used for the distribution package. + Type string `json:"type"` +} + +func (s *NodePackagingType) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "flavor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Flavor", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flavor = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewNodePackagingType returns a NodePackagingType. +func NewNodePackagingType() *NodePackagingType { + r := &NodePackagingType{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeprocessinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeprocessinfo.go new file mode 100644 index 000000000..a8be4872a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeprocessinfo.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeProcessInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L402-L409 +type NodeProcessInfo struct { + // Id Process identifier (PID) + Id int64 `json:"id"` + // Mlockall Indicates if the process address space has been successfully locked in memory + Mlockall bool `json:"mlockall"` + // RefreshIntervalInMillis Refresh interval for the process statistics + RefreshIntervalInMillis int64 `json:"refresh_interval_in_millis"` +} + +func (s *NodeProcessInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + s.Id = value + case float64: + f := int64(v) + s.Id = f + } + + case "mlockall": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Mlockall", err) + } + s.Mlockall = value + case bool: + s.Mlockall = v + } + + case "refresh_interval_in_millis": + if err := dec.Decode(&s.RefreshIntervalInMillis); err != nil { + return fmt.Errorf("%s | %w", "RefreshIntervalInMillis", err) + } + + } + } + return nil +} + +// NewNodeProcessInfo returns a NodeProcessInfo. +func NewNodeProcessInfo() *NodeProcessInfo { + r := &NodeProcessInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodereloadresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodereloadresult.go new file mode 100644 index 000000000..e0e3ad246 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodereloadresult.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// NodeReloadResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/NodeReloadResult.ts#L23-L26 +type NodeReloadResult struct { + Name string `json:"name"` + ReloadException *ErrorCause `json:"reload_exception,omitempty"` +} + +func (s *NodeReloadResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "reload_exception": + if err := dec.Decode(&s.ReloadException); err != nil { + return fmt.Errorf("%s | %w", "ReloadException", err) + } + + } + } + return nil +} + +// NewNodeReloadResult returns a NodeReloadResult. +func NewNodeReloadResult() *NodeReloadResult { + r := &NodeReloadResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodescontext.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodescontext.go new file mode 100644 index 000000000..0f396c7a3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodescontext.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodesContext type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L1075-L1080 +type NodesContext struct { + CacheEvictions *int64 `json:"cache_evictions,omitempty"` + CompilationLimitTriggered *int64 `json:"compilation_limit_triggered,omitempty"` + Compilations *int64 `json:"compilations,omitempty"` + Context *string `json:"context,omitempty"` +} + +func (s *NodesContext) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_evictions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CacheEvictions", err) + } + s.CacheEvictions = &value + case float64: + f := int64(v) + s.CacheEvictions = &f + } + + case "compilation_limit_triggered": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CompilationLimitTriggered", err) + } + s.CompilationLimitTriggered = &value + case float64: + f := int64(v) + s.CompilationLimitTriggered = &f + } + + case "compilations": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Compilations", err) + } + s.Compilations = &value + case float64: + f := int64(v) + s.Compilations = &f + } + + case "context": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Context", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Context = &o + + } + } + return nil +} + +// NewNodesContext returns a NodesContext. +func NewNodesContext() *NodesContext { + r := &NodesContext{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodescredentials.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodescredentials.go new file mode 100644 index 000000000..7f4c7b239 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodescredentials.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodesCredentials type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_service_credentials/types.ts#L23-L28 +type NodesCredentials struct { + // FileTokens File-backed tokens collected from all nodes + FileTokens map[string]NodesCredentialsFileToken `json:"file_tokens"` + // NodeStats General status showing how nodes respond to the above collection request + NodeStats NodeStatistics `json:"_nodes"` +} + +// NewNodesCredentials returns a NodesCredentials. +func NewNodesCredentials() *NodesCredentials { + r := &NodesCredentials{ + FileTokens: make(map[string]NodesCredentialsFileToken), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodescredentialsfiletoken.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodescredentialsfiletoken.go new file mode 100644 index 000000000..2cebf36fc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodescredentialsfiletoken.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodesCredentialsFileToken type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_service_credentials/types.ts#L30-L32 +type NodesCredentialsFileToken struct { + Nodes []string `json:"nodes"` +} + +// NewNodesCredentialsFileToken returns a NodesCredentialsFileToken. +func NewNodesCredentialsFileToken() *NodesCredentialsFileToken { + r := &NodesCredentialsFileToken{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeshard.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeshard.go new file mode 100644 index 000000000..97e67530e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeshard.go @@ -0,0 +1,154 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardroutingstate" +) + +// NodeShard type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Node.ts#L54-L65 +type NodeShard struct { + AllocationId map[string]string `json:"allocation_id,omitempty"` + Index string `json:"index"` + Node *string `json:"node,omitempty"` + Primary bool `json:"primary"` + RecoverySource map[string]string `json:"recovery_source,omitempty"` + RelocatingNode *string `json:"relocating_node,omitempty"` + RelocationFailureInfo *RelocationFailureInfo `json:"relocation_failure_info,omitempty"` + Shard int `json:"shard"` + State shardroutingstate.ShardRoutingState `json:"state"` + UnassignedInfo *UnassignedInformation `json:"unassigned_info,omitempty"` +} + +func (s *NodeShard) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocation_id": + if s.AllocationId == nil { + s.AllocationId = make(map[string]string, 0) + } + if err := dec.Decode(&s.AllocationId); err != nil { + return fmt.Errorf("%s | %w", "AllocationId", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "primary": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Primary", err) + } + s.Primary = value + case bool: + s.Primary = v + } + + case "recovery_source": + if s.RecoverySource == nil { + s.RecoverySource = make(map[string]string, 0) + } + if err := dec.Decode(&s.RecoverySource); err != nil { + return fmt.Errorf("%s | %w", "RecoverySource", err) + } + + case "relocating_node": + if err := dec.Decode(&s.RelocatingNode); err != nil { + return fmt.Errorf("%s | %w", "RelocatingNode", err) + } + + case "relocation_failure_info": + if err := dec.Decode(&s.RelocationFailureInfo); err != nil { + return fmt.Errorf("%s | %w", "RelocationFailureInfo", err) + } + + case "shard": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Shard", err) + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + + case "unassigned_info": + if err := dec.Decode(&s.UnassignedInfo); err != nil { + return fmt.Errorf("%s | %w", "UnassignedInfo", err) + } + + } + } + return nil +} + +// NewNodeShard returns a NodeShard. +func NewNodeShard() *NodeShard { + r := &NodeShard{ + AllocationId: make(map[string]string), + RecoverySource: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeshutdownstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeshutdownstatus.go new file mode 100644 index 000000000..6a7ba8ab1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeshutdownstatus.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shutdownstatus" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shutdowntype" +) + +// NodeShutdownStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L29-L38 +type NodeShutdownStatus struct { + NodeId string `json:"node_id"` + PersistentTasks PersistentTaskStatus `json:"persistent_tasks"` + Plugins PluginsStatus `json:"plugins"` + Reason string `json:"reason"` + ShardMigration ShardMigrationStatus `json:"shard_migration"` + ShutdownStartedmillis int64 `json:"shutdown_startedmillis"` + Status shutdownstatus.ShutdownStatus `json:"status"` + Type shutdowntype.ShutdownType `json:"type"` +} + +func (s *NodeShutdownStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + + case "persistent_tasks": + if err := dec.Decode(&s.PersistentTasks); err != nil { + return fmt.Errorf("%s | %w", "PersistentTasks", err) + } + + case "plugins": + if err := dec.Decode(&s.Plugins); err != nil { + return fmt.Errorf("%s | %w", "Plugins", err) + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = o + + case "shard_migration": + if err := dec.Decode(&s.ShardMigration); err != nil { + return fmt.Errorf("%s | %w", "ShardMigration", err) + } + + case "shutdown_startedmillis": + if err := dec.Decode(&s.ShutdownStartedmillis); err != nil { + return fmt.Errorf("%s | %w", "ShutdownStartedmillis", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewNodeShutdownStatus returns a NodeShutdownStatus. +func NewNodeShutdownStatus() *NodeShutdownStatus { + r := &NodeShutdownStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodesindexingpressure.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodesindexingpressure.go new file mode 100644 index 000000000..645974176 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodesindexingpressure.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodesIndexingPressure type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L116-L121 +type NodesIndexingPressure struct { + // Memory Contains statistics for memory consumption from indexing load. + Memory *NodesIndexingPressureMemory `json:"memory,omitempty"` +} + +// NewNodesIndexingPressure returns a NodesIndexingPressure. +func NewNodesIndexingPressure() *NodesIndexingPressure { + r := &NodesIndexingPressure{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodesindexingpressurememory.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodesindexingpressurememory.go new file mode 100644 index 000000000..80ff70550 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodesindexingpressurememory.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodesIndexingPressureMemory type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L123-L142 +type NodesIndexingPressureMemory struct { + // Current Contains statistics for current indexing load. + Current *PressureMemory `json:"current,omitempty"` + // Limit Configured memory limit for the indexing requests. + // Replica requests have an automatic limit that is 1.5x this value. + Limit ByteSize `json:"limit,omitempty"` + // LimitInBytes Configured memory limit, in bytes, for the indexing requests. + // Replica requests have an automatic limit that is 1.5x this value. + LimitInBytes *int64 `json:"limit_in_bytes,omitempty"` + // Total Contains statistics for the cumulative indexing load since the node started. + Total *PressureMemory `json:"total,omitempty"` +} + +func (s *NodesIndexingPressureMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current": + if err := dec.Decode(&s.Current); err != nil { + return fmt.Errorf("%s | %w", "Current", err) + } + + case "limit": + if err := dec.Decode(&s.Limit); err != nil { + return fmt.Errorf("%s | %w", "Limit", err) + } + + case "limit_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LimitInBytes", err) + } + s.LimitInBytes = &value + case float64: + f := int64(v) + s.LimitInBytes = &f + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + + } + } + return nil +} + +// NewNodesIndexingPressureMemory returns a NodesIndexingPressureMemory. +func NewNodesIndexingPressureMemory() *NodesIndexingPressureMemory { + r := &NodesIndexingPressureMemory{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodesingest.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodesingest.go new file mode 100644 index 000000000..8a667fc92 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodesingest.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// NodesIngest type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L347-L356 +type NodesIngest struct { + // Pipelines Contains statistics about ingest pipelines for the node. + Pipelines map[string]IngestStats `json:"pipelines,omitempty"` + // Total Contains statistics about ingest operations for the node. + Total *IngestTotal `json:"total,omitempty"` +} + +// NewNodesIngest returns a NodesIngest. +func NewNodesIngest() *NodesIngest { + r := &NodesIngest{ + Pipelines: make(map[string]IngestStats), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodesrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodesrecord.go new file mode 100644 index 000000000..10a39fed1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodesrecord.go @@ -0,0 +1,1344 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodesRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/nodes/types.ts#L23-L542 +type NodesRecord struct { + // Build The Elasticsearch build hash. + Build *string `json:"build,omitempty"` + // BulkAvgSizeInBytes The average size in bytes of shard bulk. + BulkAvgSizeInBytes *string `json:"bulk.avg_size_in_bytes,omitempty"` + // BulkAvgTime The average time spend in shard bulk. + BulkAvgTime *string `json:"bulk.avg_time,omitempty"` + // BulkTotalOperations The number of bulk shard operations. + BulkTotalOperations *string `json:"bulk.total_operations,omitempty"` + // BulkTotalSizeInBytes The total size in bytes of shard bulk. + BulkTotalSizeInBytes *string `json:"bulk.total_size_in_bytes,omitempty"` + // BulkTotalTime The time spend in shard bulk. + BulkTotalTime *string `json:"bulk.total_time,omitempty"` + // CompletionSize The size of completion. + CompletionSize *string `json:"completion.size,omitempty"` + // Cpu The recent system CPU usage as a percentage. + Cpu *string `json:"cpu,omitempty"` + // DiskAvail The available disk space. + DiskAvail ByteSize `json:"disk.avail,omitempty"` + // DiskTotal The total disk space. + DiskTotal ByteSize `json:"disk.total,omitempty"` + // DiskUsed The used disk space. + DiskUsed ByteSize `json:"disk.used,omitempty"` + // DiskUsedPercent The used disk space percentage. + DiskUsedPercent Percentage `json:"disk.used_percent,omitempty"` + // FielddataEvictions The fielddata evictions. + FielddataEvictions *string `json:"fielddata.evictions,omitempty"` + // FielddataMemorySize The used fielddata cache. + FielddataMemorySize *string `json:"fielddata.memory_size,omitempty"` + // FileDescCurrent The used file descriptors. + FileDescCurrent *string `json:"file_desc.current,omitempty"` + // FileDescMax The maximum number of file descriptors. + FileDescMax *string `json:"file_desc.max,omitempty"` + // FileDescPercent The used file descriptor ratio. + FileDescPercent Percentage `json:"file_desc.percent,omitempty"` + // Flavor The Elasticsearch distribution flavor. + Flavor *string `json:"flavor,omitempty"` + // FlushTotal The number of flushes. + FlushTotal *string `json:"flush.total,omitempty"` + // FlushTotalTime The time spent in flush. + FlushTotalTime *string `json:"flush.total_time,omitempty"` + // GetCurrent The number of current get ops. + GetCurrent *string `json:"get.current,omitempty"` + // GetExistsTime The time spent in successful gets. + GetExistsTime *string `json:"get.exists_time,omitempty"` + // GetExistsTotal The number of successful get operations. + GetExistsTotal *string `json:"get.exists_total,omitempty"` + // GetMissingTime The time spent in failed gets. + GetMissingTime *string `json:"get.missing_time,omitempty"` + // GetMissingTotal The number of failed gets. + GetMissingTotal *string `json:"get.missing_total,omitempty"` + // GetTime The time spent in get. + GetTime *string `json:"get.time,omitempty"` + // GetTotal The number of get ops. + GetTotal *string `json:"get.total,omitempty"` + // HeapCurrent The used heap. + HeapCurrent *string `json:"heap.current,omitempty"` + // HeapMax The maximum configured heap. + HeapMax *string `json:"heap.max,omitempty"` + // HeapPercent The used heap ratio. + HeapPercent Percentage `json:"heap.percent,omitempty"` + // HttpAddress The bound HTTP address. + HttpAddress *string `json:"http_address,omitempty"` + // Id The unique node identifier. + Id *string `json:"id,omitempty"` + // IndexingDeleteCurrent The number of current deletions. + IndexingDeleteCurrent *string `json:"indexing.delete_current,omitempty"` + // IndexingDeleteTime The time spent in deletions. + IndexingDeleteTime *string `json:"indexing.delete_time,omitempty"` + // IndexingDeleteTotal The number of delete operations. + IndexingDeleteTotal *string `json:"indexing.delete_total,omitempty"` + // IndexingIndexCurrent The number of current indexing operations. + IndexingIndexCurrent *string `json:"indexing.index_current,omitempty"` + // IndexingIndexFailed The number of failed indexing operations. + IndexingIndexFailed *string `json:"indexing.index_failed,omitempty"` + // IndexingIndexTime The time spent in indexing. + IndexingIndexTime *string `json:"indexing.index_time,omitempty"` + // IndexingIndexTotal The number of indexing operations. + IndexingIndexTotal *string `json:"indexing.index_total,omitempty"` + // Ip The IP address. + Ip *string `json:"ip,omitempty"` + // Jdk The Java version. + Jdk *string `json:"jdk,omitempty"` + // Load15M The load average for the last fifteen minutes. + Load15M *string `json:"load_15m,omitempty"` + // Load1M The load average for the most recent minute. + Load1M *string `json:"load_1m,omitempty"` + // Load5M The load average for the last five minutes. + Load5M *string `json:"load_5m,omitempty"` + // Master Indicates whether the node is the elected master node. + // Returned values include `*`(elected master) and `-`(not elected master). + Master *string `json:"master,omitempty"` + // MergesCurrent The number of current merges. + MergesCurrent *string `json:"merges.current,omitempty"` + // MergesCurrentDocs The number of current merging docs. + MergesCurrentDocs *string `json:"merges.current_docs,omitempty"` + // MergesCurrentSize The size of current merges. + MergesCurrentSize *string `json:"merges.current_size,omitempty"` + // MergesTotal The number of completed merge operations. + MergesTotal *string `json:"merges.total,omitempty"` + // MergesTotalDocs The docs merged. + MergesTotalDocs *string `json:"merges.total_docs,omitempty"` + // MergesTotalSize The size merged. + MergesTotalSize *string `json:"merges.total_size,omitempty"` + // MergesTotalTime The time spent in merges. + MergesTotalTime *string `json:"merges.total_time,omitempty"` + // Name The node name. + Name *string `json:"name,omitempty"` + // NodeRole The roles of the node. + // Returned values include `c`(cold node), `d`(data node), `f`(frozen node), + // `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master + // eligible node), `r`(remote cluster client node), `s`(content node), + // `t`(transform node), `v`(voting-only node), `w`(warm node),and + // `-`(coordinating node only). + NodeRole *string `json:"node.role,omitempty"` + // Pid The process identifier. + Pid *string `json:"pid,omitempty"` + // Port The bound transport port. + Port *string `json:"port,omitempty"` + // QueryCacheEvictions The query cache evictions. + QueryCacheEvictions *string `json:"query_cache.evictions,omitempty"` + // QueryCacheHitCount The query cache hit counts. + QueryCacheHitCount *string `json:"query_cache.hit_count,omitempty"` + // QueryCacheMemorySize The used query cache. + QueryCacheMemorySize *string `json:"query_cache.memory_size,omitempty"` + // QueryCacheMissCount The query cache miss counts. + QueryCacheMissCount *string `json:"query_cache.miss_count,omitempty"` + // RamCurrent The used machine memory. + RamCurrent *string `json:"ram.current,omitempty"` + // RamMax The total machine memory. + RamMax *string `json:"ram.max,omitempty"` + // RamPercent The used machine memory ratio. + RamPercent Percentage `json:"ram.percent,omitempty"` + // RefreshExternalTime The time spent in external refreshes. + RefreshExternalTime *string `json:"refresh.external_time,omitempty"` + // RefreshExternalTotal The total external refreshes. + RefreshExternalTotal *string `json:"refresh.external_total,omitempty"` + // RefreshListeners The number of pending refresh listeners. + RefreshListeners *string `json:"refresh.listeners,omitempty"` + // RefreshTime The time spent in refreshes. + RefreshTime *string `json:"refresh.time,omitempty"` + // RefreshTotal The total refreshes. + RefreshTotal *string `json:"refresh.total,omitempty"` + // RequestCacheEvictions The request cache evictions. + RequestCacheEvictions *string `json:"request_cache.evictions,omitempty"` + // RequestCacheHitCount The request cache hit counts. + RequestCacheHitCount *string `json:"request_cache.hit_count,omitempty"` + // RequestCacheMemorySize The used request cache. + RequestCacheMemorySize *string `json:"request_cache.memory_size,omitempty"` + // RequestCacheMissCount The request cache miss counts. + RequestCacheMissCount *string `json:"request_cache.miss_count,omitempty"` + // ScriptCacheEvictions The total compiled scripts evicted from the cache. + ScriptCacheEvictions *string `json:"script.cache_evictions,omitempty"` + // ScriptCompilationLimitTriggered The script cache compilation limit triggered. + ScriptCompilationLimitTriggered *string `json:"script.compilation_limit_triggered,omitempty"` + // ScriptCompilations The total script compilations. + ScriptCompilations *string `json:"script.compilations,omitempty"` + // SearchFetchCurrent The current fetch phase operations. + SearchFetchCurrent *string `json:"search.fetch_current,omitempty"` + // SearchFetchTime The time spent in fetch phase. + SearchFetchTime *string `json:"search.fetch_time,omitempty"` + // SearchFetchTotal The total fetch operations. + SearchFetchTotal *string `json:"search.fetch_total,omitempty"` + // SearchOpenContexts The open search contexts. + SearchOpenContexts *string `json:"search.open_contexts,omitempty"` + // SearchQueryCurrent The current query phase operations. + SearchQueryCurrent *string `json:"search.query_current,omitempty"` + // SearchQueryTime The time spent in query phase. + SearchQueryTime *string `json:"search.query_time,omitempty"` + // SearchQueryTotal The total query phase operations. + SearchQueryTotal *string `json:"search.query_total,omitempty"` + // SearchScrollCurrent The open scroll contexts. + SearchScrollCurrent *string `json:"search.scroll_current,omitempty"` + // SearchScrollTime The time scroll contexts held open. + SearchScrollTime *string `json:"search.scroll_time,omitempty"` + // SearchScrollTotal The completed scroll contexts. + SearchScrollTotal *string `json:"search.scroll_total,omitempty"` + // SegmentsCount The number of segments. + SegmentsCount *string `json:"segments.count,omitempty"` + // SegmentsFixedBitsetMemory The memory used by fixed bit sets for nested object field types and export + // type filters for types referred in _parent fields. + SegmentsFixedBitsetMemory *string `json:"segments.fixed_bitset_memory,omitempty"` + // SegmentsIndexWriterMemory The memory used by the index writer. + SegmentsIndexWriterMemory *string `json:"segments.index_writer_memory,omitempty"` + // SegmentsMemory The memory used by segments. + SegmentsMemory *string `json:"segments.memory,omitempty"` + // SegmentsVersionMapMemory The memory used by the version map. + SegmentsVersionMapMemory *string `json:"segments.version_map_memory,omitempty"` + // SuggestCurrent The number of current suggest operations. + SuggestCurrent *string `json:"suggest.current,omitempty"` + // SuggestTime The time spend in suggest. + SuggestTime *string `json:"suggest.time,omitempty"` + // SuggestTotal The number of suggest operations. + SuggestTotal *string `json:"suggest.total,omitempty"` + // Type The Elasticsearch distribution type. + Type *string `json:"type,omitempty"` + // Uptime The node uptime. + Uptime *string `json:"uptime,omitempty"` + // Version The Elasticsearch version. + Version *string `json:"version,omitempty"` +} + +func (s *NodesRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "build", "b": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Build", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Build = &o + + case "bulk.avg_size_in_bytes", "basi", "bulkAvgSizeInBytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkAvgSizeInBytes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkAvgSizeInBytes = &o + + case "bulk.avg_time", "bati", "bulkAvgTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkAvgTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkAvgTime = &o + + case "bulk.total_operations", "bto", "bulkTotalOperations": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkTotalOperations", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalOperations = &o + + case "bulk.total_size_in_bytes", "btsi", "bulkTotalSizeInBytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkTotalSizeInBytes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalSizeInBytes = &o + + case "bulk.total_time", "btti", "bulkTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalTime = &o + + case "completion.size", "cs", "completionSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CompletionSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CompletionSize = &o + + case "cpu": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Cpu", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Cpu = &o + + case "disk.avail", "d", "da", "disk", "diskAvail": + if err := dec.Decode(&s.DiskAvail); err != nil { + return fmt.Errorf("%s | %w", "DiskAvail", err) + } + + case "disk.total", "dt", "diskTotal": + if err := dec.Decode(&s.DiskTotal); err != nil { + return fmt.Errorf("%s | %w", "DiskTotal", err) + } + + case "disk.used", "du", "diskUsed": + if err := dec.Decode(&s.DiskUsed); err != nil { + return fmt.Errorf("%s | %w", "DiskUsed", err) + } + + case "disk.used_percent", "dup", "diskUsedPercent": + if err := dec.Decode(&s.DiskUsedPercent); err != nil { + return fmt.Errorf("%s | %w", "DiskUsedPercent", err) + } + + case "fielddata.evictions", "fe", "fielddataEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FielddataEvictions", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FielddataEvictions = &o + + case "fielddata.memory_size", "fm", "fielddataMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FielddataMemorySize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FielddataMemorySize = &o + + case "file_desc.current", "fdc", "fileDescriptorCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FileDescCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FileDescCurrent = &o + + case "file_desc.max", "fdm", "fileDescriptorMax": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FileDescMax", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FileDescMax = &o + + case "file_desc.percent", "fdp", "fileDescriptorPercent": + if err := dec.Decode(&s.FileDescPercent); err != nil { + return fmt.Errorf("%s | %w", "FileDescPercent", err) + } + + case "flavor", "f": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Flavor", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flavor = &o + + case "flush.total", "ft", "flushTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FlushTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FlushTotal = &o + + case "flush.total_time", "ftt", "flushTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FlushTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FlushTotalTime = &o + + case "get.current", "gc", "getCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetCurrent = &o + + case "get.exists_time", "geti", "getExistsTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetExistsTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetExistsTime = &o + + case "get.exists_total", "geto", "getExistsTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetExistsTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetExistsTotal = &o + + case "get.missing_time", "gmti", "getMissingTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetMissingTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetMissingTime = &o + + case "get.missing_total", "gmto", "getMissingTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetMissingTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetMissingTotal = &o + + case "get.time", "gti", "getTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetTime = &o + + case "get.total", "gto", "getTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetTotal = &o + + case "heap.current", "hc", "heapCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "HeapCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HeapCurrent = &o + + case "heap.max", "hm", "heapMax": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "HeapMax", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HeapMax = &o + + case "heap.percent", "hp", "heapPercent": + if err := dec.Decode(&s.HeapPercent); err != nil { + return fmt.Errorf("%s | %w", "HeapPercent", err) + } + + case "http_address", "http": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "HttpAddress", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HttpAddress = &o + + case "id", "nodeId": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "indexing.delete_current", "idc", "indexingDeleteCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingDeleteCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteCurrent = &o + + case "indexing.delete_time", "idti", "indexingDeleteTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingDeleteTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteTime = &o + + case "indexing.delete_total", "idto", "indexingDeleteTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingDeleteTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteTotal = &o + + case "indexing.index_current", "iic", "indexingIndexCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingIndexCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexCurrent = &o + + case "indexing.index_failed", "iif", "indexingIndexFailed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingIndexFailed", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexFailed = &o + + case "indexing.index_time", "iiti", "indexingIndexTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingIndexTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexTime = &o + + case "indexing.index_total", "iito", "indexingIndexTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingIndexTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexTotal = &o + + case "ip", "i": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "jdk", "j": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Jdk", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Jdk = &o + + case "load_15m", "l": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Load15M", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Load15M = &o + + case "load_1m": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Load1M", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Load1M = &o + + case "load_5m": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Load5M", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Load5M = &o + + case "master", "m": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Master", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Master = &o + + case "merges.current", "mc", "mergesCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrent = &o + + case "merges.current_docs", "mcd", "mergesCurrentDocs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesCurrentDocs", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrentDocs = &o + + case "merges.current_size", "mcs", "mergesCurrentSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesCurrentSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrentSize = &o + + case "merges.total", "mt", "mergesTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotal = &o + + case "merges.total_docs", "mtd", "mergesTotalDocs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesTotalDocs", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalDocs = &o + + case "merges.total_size", "mts", "mergesTotalSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesTotalSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalSize = &o + + case "merges.total_time", "mtt", "mergesTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalTime = &o + + case "name", "n": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "node.role", "r", "role", "nodeRole": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NodeRole", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeRole = &o + + case "pid", "p": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pid", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pid = &o + + case "port", "po": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Port", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Port = &o + + case "query_cache.evictions", "qce", "queryCacheEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryCacheEvictions", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheEvictions = &o + + case "query_cache.hit_count", "qchc", "queryCacheHitCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryCacheHitCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheHitCount = &o + + case "query_cache.memory_size", "qcm", "queryCacheMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryCacheMemorySize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheMemorySize = &o + + case "query_cache.miss_count", "qcmc", "queryCacheMissCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryCacheMissCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheMissCount = &o + + case "ram.current", "rc", "ramCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RamCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RamCurrent = &o + + case "ram.max", "rn", "ramMax": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RamMax", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RamMax = &o + + case "ram.percent", "rp", "ramPercent": + if err := dec.Decode(&s.RamPercent); err != nil { + return fmt.Errorf("%s | %w", "RamPercent", err) + } + + case "refresh.external_time", "rti", "refreshTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshExternalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshExternalTime = &o + + case "refresh.external_total", "rto", "refreshTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshExternalTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshExternalTotal = &o + + case "refresh.listeners", "rli", "refreshListeners": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshListeners", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshListeners = &o + + case "refresh.time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshTime = &o + + case "refresh.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshTotal = &o + + case "request_cache.evictions", "rce", "requestCacheEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RequestCacheEvictions", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheEvictions = &o + + case "request_cache.hit_count", "rchc", "requestCacheHitCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RequestCacheHitCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheHitCount = &o + + case "request_cache.memory_size", "rcm", "requestCacheMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RequestCacheMemorySize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheMemorySize = &o + + case "request_cache.miss_count", "rcmc", "requestCacheMissCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RequestCacheMissCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheMissCount = &o + + case "script.cache_evictions", "scrce", "scriptCacheEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ScriptCacheEvictions", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ScriptCacheEvictions = &o + + case "script.compilation_limit_triggered", "scrclt", "scriptCacheCompilationLimitTriggered": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ScriptCompilationLimitTriggered", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ScriptCompilationLimitTriggered = &o + + case "script.compilations", "scrcc", "scriptCompilations": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ScriptCompilations", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ScriptCompilations = &o + + case "search.fetch_current", "sfc", "searchFetchCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchFetchCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchCurrent = &o + + case "search.fetch_time", "sfti", "searchFetchTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchFetchTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchTime = &o + + case "search.fetch_total", "sfto", "searchFetchTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchFetchTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchTotal = &o + + case "search.open_contexts", "so", "searchOpenContexts": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchOpenContexts", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchOpenContexts = &o + + case "search.query_current", "sqc", "searchQueryCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchQueryCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryCurrent = &o + + case "search.query_time", "sqti", "searchQueryTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchQueryTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryTime = &o + + case "search.query_total", "sqto", "searchQueryTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchQueryTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryTotal = &o + + case "search.scroll_current", "scc", "searchScrollCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchScrollCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollCurrent = &o + + case "search.scroll_time", "scti", "searchScrollTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchScrollTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollTime = &o + + case "search.scroll_total", "scto", "searchScrollTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchScrollTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollTotal = &o + + case "segments.count", "sc", "segmentsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsCount = &o + + case "segments.fixed_bitset_memory", "sfbm", "fixedBitsetMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsFixedBitsetMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsFixedBitsetMemory = &o + + case "segments.index_writer_memory", "siwm", "segmentsIndexWriterMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsIndexWriterMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsIndexWriterMemory = &o + + case "segments.memory", "sm", "segmentsMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsMemory = &o + + case "segments.version_map_memory", "svmm", "segmentsVersionMapMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsVersionMapMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsVersionMapMemory = &o + + case "suggest.current", "suc", "suggestCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SuggestCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SuggestCurrent = &o + + case "suggest.time", "suti", "suggestTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SuggestTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SuggestTime = &o + + case "suggest.total", "suto", "suggestTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SuggestTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SuggestTotal = &o + + case "type", "t": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + case "uptime", "u": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Uptime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Uptime = &o + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewNodesRecord returns a NodesRecord. +func NewNodesRecord() *NodesRecord { + r := &NodesRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodestatistics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodestatistics.go new file mode 100644 index 000000000..768dd08b7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodestatistics.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeStatistics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Node.ts#L28-L39 +type NodeStatistics struct { + // Failed Number of nodes that rejected the request or failed to respond. If this value + // is not 0, a reason for the rejection or failure is included in the response. + Failed int `json:"failed"` + Failures []ErrorCause `json:"failures,omitempty"` + // Successful Number of nodes that responded successfully to the request. + Successful int `json:"successful"` + // Total Total number of nodes selected by the request. + Total int `json:"total"` +} + +func (s *NodeStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "failed": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Failed", err) + } + s.Failed = value + case float64: + f := int(v) + s.Failed = f + } + + case "failures": + if err := dec.Decode(&s.Failures); err != nil { + return fmt.Errorf("%s | %w", "Failures", err) + } + + case "successful": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Successful", err) + } + s.Successful = value + case float64: + f := int(v) + s.Successful = f + } + + case "total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int(v) + s.Total = f + } + + } + } + return nil +} + +// NewNodeStatistics returns a NodeStatistics. +func NewNodeStatistics() *NodeStatistics { + r := &NodeStatistics{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodetasks.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodetasks.go new file mode 100644 index 000000000..2a2a07b76 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodetasks.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// NodeTasks type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/tasks/_types/TaskListResponseBase.ts#L49-L57 +type NodeTasks struct { + Attributes map[string]string `json:"attributes,omitempty"` + Host *string `json:"host,omitempty"` + Ip *string `json:"ip,omitempty"` + Name *string `json:"name,omitempty"` + Roles []string `json:"roles,omitempty"` + Tasks map[string]TaskInfo `json:"tasks"` + TransportAddress *string `json:"transport_address,omitempty"` +} + +func (s *NodeTasks) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + case "ip": + if err := dec.Decode(&s.Ip); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "tasks": + if s.Tasks == nil { + s.Tasks = make(map[string]TaskInfo, 0) + } + if err := dec.Decode(&s.Tasks); err != nil { + return fmt.Errorf("%s | %w", "Tasks", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + } + } + return nil +} + +// NewNodeTasks returns a NodeTasks. +func NewNodeTasks() *NodeTasks { + r := &NodeTasks{ + Attributes: make(map[string]string), + Tasks: make(map[string]TaskInfo), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodethreadpoolinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodethreadpoolinfo.go new file mode 100644 index 000000000..ccc5f91f0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodethreadpoolinfo.go @@ -0,0 +1,150 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NodeThreadPoolInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L317-L324 +type NodeThreadPoolInfo struct { + Core *int `json:"core,omitempty"` + KeepAlive Duration `json:"keep_alive,omitempty"` + Max *int `json:"max,omitempty"` + QueueSize int `json:"queue_size"` + Size *int `json:"size,omitempty"` + Type string `json:"type"` +} + +func (s *NodeThreadPoolInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "core": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Core", err) + } + s.Core = &value + case float64: + f := int(v) + s.Core = &f + } + + case "keep_alive": + if err := dec.Decode(&s.KeepAlive); err != nil { + return fmt.Errorf("%s | %w", "KeepAlive", err) + } + + case "max": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + s.Max = &value + case float64: + f := int(v) + s.Max = &f + } + + case "queue_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "QueueSize", err) + } + s.QueueSize = value + case float64: + f := int(v) + s.QueueSize = f + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewNodeThreadPoolInfo returns a NodeThreadPoolInfo. +func NewNodeThreadPoolInfo() *NodeThreadPoolInfo { + r := &NodeThreadPoolInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeusage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeusage.go new file mode 100644 index 000000000..21ed3a2bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nodeusage.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// NodeUsage type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/usage/types.ts#L25-L30 +type NodeUsage struct { + Aggregations map[string]json.RawMessage `json:"aggregations"` + RestActions map[string]int `json:"rest_actions"` + Since int64 `json:"since"` + Timestamp int64 `json:"timestamp"` +} + +func (s *NodeUsage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "rest_actions": + if s.RestActions == nil { + s.RestActions = make(map[string]int, 0) + } + if err := dec.Decode(&s.RestActions); err != nil { + return fmt.Errorf("%s | %w", "RestActions", err) + } + + case "since": + if err := dec.Decode(&s.Since); err != nil { + return fmt.Errorf("%s | %w", "Since", err) + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + } + } + return nil +} + +// NewNodeUsage returns a NodeUsage. +func NewNodeUsage() *NodeUsage { + r := &NodeUsage{ + Aggregations: make(map[string]json.RawMessage), + RestActions: make(map[string]int), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/norianalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/norianalyzer.go new file mode 100644 index 000000000..a31b09344 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/norianalyzer.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noridecompoundmode" +) + +// NoriAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L323-L330 +type NoriAnalyzer struct { + DecompoundMode *noridecompoundmode.NoriDecompoundMode `json:"decompound_mode,omitempty"` + Stoptags []string `json:"stoptags,omitempty"` + Type string `json:"type,omitempty"` + UserDictionary *string `json:"user_dictionary,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *NoriAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decompound_mode": + if err := dec.Decode(&s.DecompoundMode); err != nil { + return fmt.Errorf("%s | %w", "DecompoundMode", err) + } + + case "stoptags": + if err := dec.Decode(&s.Stoptags); err != nil { + return fmt.Errorf("%s | %w", "Stoptags", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "user_dictionary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UserDictionary", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UserDictionary = &o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s NoriAnalyzer) MarshalJSON() ([]byte, error) { + type innerNoriAnalyzer NoriAnalyzer + tmp := innerNoriAnalyzer{ + DecompoundMode: s.DecompoundMode, + Stoptags: s.Stoptags, + Type: s.Type, + UserDictionary: s.UserDictionary, + Version: s.Version, + } + + tmp.Type = "nori" + + return json.Marshal(tmp) +} + +// NewNoriAnalyzer returns a NoriAnalyzer. +func NewNoriAnalyzer() *NoriAnalyzer { + r := &NoriAnalyzer{} + + return r +} + +type NoriAnalyzerVariant interface { + NoriAnalyzerCaster() *NoriAnalyzer +} + +func (s *NoriAnalyzer) NoriAnalyzerCaster() *NoriAnalyzer { + return s +} + +func (s *NoriAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/noripartofspeechtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/noripartofspeechtokenfilter.go new file mode 100644 index 000000000..0f1b532b1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/noripartofspeechtokenfilter.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// NoriPartOfSpeechTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/nori-plugin.ts#L37-L41 +type NoriPartOfSpeechTokenFilter struct { + // Stoptags An array of part-of-speech tags that should be removed. + Stoptags []string `json:"stoptags,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *NoriPartOfSpeechTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stoptags": + if err := dec.Decode(&s.Stoptags); err != nil { + return fmt.Errorf("%s | %w", "Stoptags", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s NoriPartOfSpeechTokenFilter) MarshalJSON() ([]byte, error) { + type innerNoriPartOfSpeechTokenFilter NoriPartOfSpeechTokenFilter + tmp := innerNoriPartOfSpeechTokenFilter{ + Stoptags: s.Stoptags, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "nori_part_of_speech" + + return json.Marshal(tmp) +} + +// NewNoriPartOfSpeechTokenFilter returns a NoriPartOfSpeechTokenFilter. +func NewNoriPartOfSpeechTokenFilter() *NoriPartOfSpeechTokenFilter { + r := &NoriPartOfSpeechTokenFilter{} + + return r +} + +type NoriPartOfSpeechTokenFilterVariant interface { + NoriPartOfSpeechTokenFilterCaster() *NoriPartOfSpeechTokenFilter +} + +func (s *NoriPartOfSpeechTokenFilter) NoriPartOfSpeechTokenFilterCaster() *NoriPartOfSpeechTokenFilter { + return s +} + +func (s *NoriPartOfSpeechTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/noritokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/noritokenizer.go new file mode 100644 index 000000000..d7f118c4f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/noritokenizer.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noridecompoundmode" +) + +// NoriTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/nori-plugin.ts#L29-L35 +type NoriTokenizer struct { + DecompoundMode *noridecompoundmode.NoriDecompoundMode `json:"decompound_mode,omitempty"` + DiscardPunctuation *bool `json:"discard_punctuation,omitempty"` + Type string `json:"type,omitempty"` + UserDictionary *string `json:"user_dictionary,omitempty"` + UserDictionaryRules []string `json:"user_dictionary_rules,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *NoriTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decompound_mode": + if err := dec.Decode(&s.DecompoundMode); err != nil { + return fmt.Errorf("%s | %w", "DecompoundMode", err) + } + + case "discard_punctuation": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DiscardPunctuation", err) + } + s.DiscardPunctuation = &value + case bool: + s.DiscardPunctuation = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "user_dictionary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UserDictionary", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UserDictionary = &o + + case "user_dictionary_rules": + if err := dec.Decode(&s.UserDictionaryRules); err != nil { + return fmt.Errorf("%s | %w", "UserDictionaryRules", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s NoriTokenizer) MarshalJSON() ([]byte, error) { + type innerNoriTokenizer NoriTokenizer + tmp := innerNoriTokenizer{ + DecompoundMode: s.DecompoundMode, + DiscardPunctuation: s.DiscardPunctuation, + Type: s.Type, + UserDictionary: s.UserDictionary, + UserDictionaryRules: s.UserDictionaryRules, + Version: s.Version, + } + + tmp.Type = "nori_tokenizer" + + return json.Marshal(tmp) +} + +// NewNoriTokenizer returns a NoriTokenizer. +func NewNoriTokenizer() *NoriTokenizer { + r := &NoriTokenizer{} + + return r +} + +type NoriTokenizerVariant interface { + NoriTokenizerCaster() *NoriTokenizer +} + +func (s *NoriTokenizer) NoriTokenizerCaster() *NoriTokenizer { + return s +} + +func (s *NoriTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/normalizeaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/normalizeaggregation.go new file mode 100644 index 000000000..f278a091f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/normalizeaggregation.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/normalizemethod" +) + +// NormalizeAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L351-L359 +type NormalizeAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + // Method The specific method to apply. + Method *normalizemethod.NormalizeMethod `json:"method,omitempty"` +} + +func (s *NormalizeAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "method": + if err := dec.Decode(&s.Method); err != nil { + return fmt.Errorf("%s | %w", "Method", err) + } + + } + } + return nil +} + +// NewNormalizeAggregation returns a NormalizeAggregation. +func NewNormalizeAggregation() *NormalizeAggregation { + r := &NormalizeAggregation{} + + return r +} + +type NormalizeAggregationVariant interface { + NormalizeAggregationCaster() *NormalizeAggregation +} + +func (s *NormalizeAggregation) NormalizeAggregationCaster() *NormalizeAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/normalizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/normalizer.go new file mode 100644 index 000000000..6c28abd5c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/normalizer.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Normalizer holds the union for the following types: +// +// LowercaseNormalizer +// CustomNormalizer +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/normalizers.ts#L20-L24 +type Normalizer any + +type NormalizerVariant interface { + NormalizerCaster() *Normalizer +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/norwegiananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/norwegiananalyzer.go new file mode 100644 index 000000000..996da4967 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/norwegiananalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NorwegianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L248-L253 +type NorwegianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *NorwegianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s NorwegianAnalyzer) MarshalJSON() ([]byte, error) { + type innerNorwegianAnalyzer NorwegianAnalyzer + tmp := innerNorwegianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "norwegian" + + return json.Marshal(tmp) +} + +// NewNorwegianAnalyzer returns a NorwegianAnalyzer. +func NewNorwegianAnalyzer() *NorwegianAnalyzer { + r := &NorwegianAnalyzer{} + + return r +} + +type NorwegianAnalyzerVariant interface { + NorwegianAnalyzerCaster() *NorwegianAnalyzer +} + +func (s *NorwegianAnalyzer) NorwegianAnalyzerCaster() *NorwegianAnalyzer { + return s +} + +func (s *NorwegianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/notfoundaliases.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/notfoundaliases.go new file mode 100644 index 000000000..f307e6a5c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/notfoundaliases.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// NotFoundAliases type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_alias/_types/response.ts#L28-L36 +type NotFoundAliases struct { + Error string `json:"error"` + NotFoundAliases map[string]IndexAliases `json:"-"` + Status int `json:"status"` +} + +func (s *NotFoundAliases) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Error = o + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + default: + + if key, ok := t.(string); ok { + if s.NotFoundAliases == nil { + s.NotFoundAliases = make(map[string]IndexAliases, 0) + } + raw := NewIndexAliases() + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "NotFoundAliases", err) + } + s.NotFoundAliases[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s NotFoundAliases) MarshalJSON() ([]byte, error) { + type opt NotFoundAliases + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.NotFoundAliases { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "NotFoundAliases") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewNotFoundAliases returns a NotFoundAliases. +func NewNotFoundAliases() *NotFoundAliases { + r := &NotFoundAliases{ + NotFoundAliases: make(map[string]IndexAliases), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nullvalue.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nullvalue.go new file mode 100644 index 000000000..17bfbd935 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/nullvalue.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +// NullValue is a custom type used to represent the concept of a null or missing value. +// It can be used as a placeholder for variables or fields that are not initialized, +// or to indicate that a specific piece of data is intentionally absent. +type NullValue struct{} + +// MarshalJSON converts the NullValue to JSON format. +// It always returns a "null" value as per JSON standard for null values. +func (n NullValue) MarshalJSON() ([]byte, error) { + return []byte("null"), nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/numberrangequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/numberrangequery.go new file mode 100644 index 000000000..e4842b4c3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/numberrangequery.go @@ -0,0 +1,192 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/rangerelation" +) + +// NumberRangeQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L168-L168 +type NumberRangeQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Gt Greater than. + Gt *Float64 `json:"gt,omitempty"` + // Gte Greater than or equal to. + Gte *Float64 `json:"gte,omitempty"` + // Lt Less than. + Lt *Float64 `json:"lt,omitempty"` + // Lte Less than or equal to. + Lte *Float64 `json:"lte,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Relation Indicates how the range query matches values for `range` fields. + Relation *rangerelation.RangeRelation `json:"relation,omitempty"` +} + +func (s *NumberRangeQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "gt": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Gt", err) + } + f := Float64(value) + s.Gt = &f + case float64: + f := Float64(v) + s.Gt = &f + } + + case "gte": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Gte", err) + } + f := Float64(value) + s.Gte = &f + case float64: + f := Float64(v) + s.Gte = &f + } + + case "lt": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Lt", err) + } + f := Float64(value) + s.Lt = &f + case float64: + f := Float64(v) + s.Lt = &f + } + + case "lte": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Lte", err) + } + f := Float64(value) + s.Lte = &f + case float64: + f := Float64(v) + s.Lte = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "relation": + if err := dec.Decode(&s.Relation); err != nil { + return fmt.Errorf("%s | %w", "Relation", err) + } + + } + } + return nil +} + +// NewNumberRangeQuery returns a NumberRangeQuery. +func NewNumberRangeQuery() *NumberRangeQuery { + r := &NumberRangeQuery{} + + return r +} + +type NumberRangeQueryVariant interface { + NumberRangeQueryCaster() *NumberRangeQuery +} + +func (s *NumberRangeQuery) NumberRangeQueryCaster() *NumberRangeQuery { + return s +} + +func (s *NumberRangeQuery) RangeQueryCaster() *RangeQuery { + o := RangeQuery(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/numericdecayfunction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/numericdecayfunction.go new file mode 100644 index 000000000..ad76aa1bb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/numericdecayfunction.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/multivaluemode" +) + +// NumericDecayFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L208-L208 +type NumericDecayFunction struct { + DecayFunctionBasedoubledouble map[string]DecayPlacementdoubledouble `json:"-"` + // MultiValueMode Determines how the distance is calculated when a field used for computing the + // decay contains multiple values. + MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s NumericDecayFunction) MarshalJSON() ([]byte, error) { + type opt NumericDecayFunction + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.DecayFunctionBasedoubledouble { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "DecayFunctionBasedoubledouble") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewNumericDecayFunction returns a NumericDecayFunction. +func NewNumericDecayFunction() *NumericDecayFunction { + r := &NumericDecayFunction{ + DecayFunctionBasedoubledouble: make(map[string]DecayPlacementdoubledouble), + } + + return r +} + +type NumericDecayFunctionVariant interface { + NumericDecayFunctionCaster() *NumericDecayFunction +} + +func (s *NumericDecayFunction) NumericDecayFunctionCaster() *NumericDecayFunction { + return s +} + +func (s *NumericDecayFunction) DecayFunctionCaster() *DecayFunction { + o := DecayFunction(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/numericfielddata.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/numericfielddata.go new file mode 100644 index 000000000..9b30a50bb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/numericfielddata.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/numericfielddataformat" +) + +// NumericFielddata type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/NumericFielddata.ts#L22-L24 +type NumericFielddata struct { + Format numericfielddataformat.NumericFielddataFormat `json:"format"` +} + +// NewNumericFielddata returns a NumericFielddata. +func NewNumericFielddata() *NumericFielddata { + r := &NumericFielddata{} + + return r +} + +type NumericFielddataVariant interface { + NumericFielddataCaster() *NumericFielddata +} + +func (s *NumericFielddata) NumericFielddataCaster() *NumericFielddata { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/objectproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/objectproperty.go new file mode 100644 index 000000000..3c5036ded --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/objectproperty.go @@ -0,0 +1,880 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/subobjects" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// ObjectProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/complex.ts#L47-L51 +type ObjectProperty struct { + CopyTo []string `json:"copy_to,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + Subobjects *subobjects.Subobjects `json:"subobjects,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *ObjectProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "subobjects": + if err := dec.Decode(&s.Subobjects); err != nil { + return fmt.Errorf("%s | %w", "Subobjects", err) + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ObjectProperty) MarshalJSON() ([]byte, error) { + type innerObjectProperty ObjectProperty + tmp := innerObjectProperty{ + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + Enabled: s.Enabled, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + Subobjects: s.Subobjects, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "object" + + return json.Marshal(tmp) +} + +// NewObjectProperty returns a ObjectProperty. +func NewObjectProperty() *ObjectProperty { + r := &ObjectProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type ObjectPropertyVariant interface { + ObjectPropertyCaster() *ObjectProperty +} + +func (s *ObjectProperty) ObjectPropertyCaster() *ObjectProperty { + return s +} + +func (s *ObjectProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/onehotencodingpreprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/onehotencodingpreprocessor.go new file mode 100644 index 000000000..2ca573c9f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/onehotencodingpreprocessor.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// OneHotEncodingPreprocessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model/types.ts#L44-L47 +type OneHotEncodingPreprocessor struct { + Field string `json:"field"` + HotMap map[string]string `json:"hot_map"` +} + +func (s *OneHotEncodingPreprocessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = o + + case "hot_map": + if s.HotMap == nil { + s.HotMap = make(map[string]string, 0) + } + if err := dec.Decode(&s.HotMap); err != nil { + return fmt.Errorf("%s | %w", "HotMap", err) + } + + } + } + return nil +} + +// NewOneHotEncodingPreprocessor returns a OneHotEncodingPreprocessor. +func NewOneHotEncodingPreprocessor() *OneHotEncodingPreprocessor { + r := &OneHotEncodingPreprocessor{ + HotMap: make(map[string]string), + } + + return r +} + +type OneHotEncodingPreprocessorVariant interface { + OneHotEncodingPreprocessorCaster() *OneHotEncodingPreprocessor +} + +func (s *OneHotEncodingPreprocessor) OneHotEncodingPreprocessorCaster() *OneHotEncodingPreprocessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/openaiservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/openaiservicesettings.go new file mode 100644 index 000000000..47b885a26 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/openaiservicesettings.go @@ -0,0 +1,173 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// OpenAIServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1554-L1596 +type OpenAIServiceSettings struct { + // ApiKey A valid API key of your OpenAI account. + // You can find your OpenAI API keys in your OpenAI account under the API keys + // section. + // + // IMPORTANT: You need to provide the API key only once, during the inference + // model creation. + // The get inference endpoint API does not retrieve your API key. + // After creating the inference model, you cannot change the associated API key. + // If you want to use a different API key, delete the inference model and + // recreate it with the same name and the updated API key. + ApiKey string `json:"api_key"` + // Dimensions The number of dimensions the resulting output embeddings should have. + // It is supported only in `text-embedding-3` and later models. + // If it is not set, the OpenAI defined default for the model is used. + Dimensions *int `json:"dimensions,omitempty"` + // ModelId The name of the model to use for the inference task. + // Refer to the OpenAI documentation for the list of available text embedding + // models. + ModelId string `json:"model_id"` + // OrganizationId The unique identifier for your organization. + // You can find the Organization ID in your OpenAI account under *Settings > + // Organizations*. + OrganizationId *string `json:"organization_id,omitempty"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // OpenAI. + // The `openai` service sets a default number of requests allowed per minute + // depending on the task type. + // For `text_embedding`, it is set to `3000`. + // For `completion`, it is set to `500`. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` + // Url The URL endpoint to use for the requests. + // It can be changed for testing purposes. + Url *string `json:"url,omitempty"` +} + +func (s *OpenAIServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "dimensions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Dimensions", err) + } + s.Dimensions = &value + case float64: + f := int(v) + s.Dimensions = &f + } + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "organization_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "OrganizationId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OrganizationId = &o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = &o + + } + } + return nil +} + +// NewOpenAIServiceSettings returns a OpenAIServiceSettings. +func NewOpenAIServiceSettings() *OpenAIServiceSettings { + r := &OpenAIServiceSettings{} + + return r +} + +type OpenAIServiceSettingsVariant interface { + OpenAIServiceSettingsCaster() *OpenAIServiceSettings +} + +func (s *OpenAIServiceSettings) OpenAIServiceSettingsCaster() *OpenAIServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/openaitasksettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/openaitasksettings.go new file mode 100644 index 000000000..958d9d044 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/openaitasksettings.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// OpenAITaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1598-L1604 +type OpenAITaskSettings struct { + // User For a `completion` or `text_embedding` task, specify the user issuing the + // request. + // This information can be used for abuse detection. + User *string `json:"user,omitempty"` +} + +func (s *OpenAITaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "user": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "User", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.User = &o + + } + } + return nil +} + +// NewOpenAITaskSettings returns a OpenAITaskSettings. +func NewOpenAITaskSettings() *OpenAITaskSettings { + r := &OpenAITaskSettings{} + + return r +} + +type OpenAITaskSettingsVariant interface { + OpenAITaskSettingsCaster() *OpenAITaskSettings +} + +func (s *OpenAITaskSettings) OpenAITaskSettingsCaster() *OpenAITaskSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/operatingsystem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/operatingsystem.go new file mode 100644 index 000000000..8b2e28d14 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/operatingsystem.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// OperatingSystem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L1023-L1029 +type OperatingSystem struct { + Cgroup *Cgroup `json:"cgroup,omitempty"` + Cpu *Cpu `json:"cpu,omitempty"` + Mem *ExtendedMemoryStats `json:"mem,omitempty"` + Swap *MemoryStats `json:"swap,omitempty"` + Timestamp *int64 `json:"timestamp,omitempty"` +} + +func (s *OperatingSystem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cgroup": + if err := dec.Decode(&s.Cgroup); err != nil { + return fmt.Errorf("%s | %w", "Cgroup", err) + } + + case "cpu": + if err := dec.Decode(&s.Cpu); err != nil { + return fmt.Errorf("%s | %w", "Cpu", err) + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return fmt.Errorf("%s | %w", "Mem", err) + } + + case "swap": + if err := dec.Decode(&s.Swap); err != nil { + return fmt.Errorf("%s | %w", "Swap", err) + } + + case "timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + s.Timestamp = &value + case float64: + f := int64(v) + s.Timestamp = &f + } + + } + } + return nil +} + +// NewOperatingSystem returns a OperatingSystem. +func NewOperatingSystem() *OperatingSystem { + r := &OperatingSystem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/operatingsystemmemoryinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/operatingsystemmemoryinfo.go new file mode 100644 index 000000000..49ca88283 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/operatingsystemmemoryinfo.go @@ -0,0 +1,199 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// OperatingSystemMemoryInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L715-L763 +type OperatingSystemMemoryInfo struct { + // AdjustedTotal Total amount of memory across all selected nodes, but using the value + // specified using the `es.total_memory_bytes` system property instead of + // measured total memory for those nodes where that system property was set. + AdjustedTotal ByteSize `json:"adjusted_total,omitempty"` + // AdjustedTotalInBytes Total amount, in bytes, of memory across all selected nodes, but using the + // value specified using the `es.total_memory_bytes` system property instead of + // measured total memory for those nodes where that system property was set. + AdjustedTotalInBytes *int64 `json:"adjusted_total_in_bytes,omitempty"` + // Free Amount of free physical memory across all selected nodes. + Free ByteSize `json:"free,omitempty"` + // FreeInBytes Amount, in bytes, of free physical memory across all selected nodes. + FreeInBytes int64 `json:"free_in_bytes"` + // FreePercent Percentage of free physical memory across all selected nodes. + FreePercent int `json:"free_percent"` + // Total Total amount of physical memory across all selected nodes. + Total ByteSize `json:"total,omitempty"` + // TotalInBytes Total amount, in bytes, of physical memory across all selected nodes. + TotalInBytes int64 `json:"total_in_bytes"` + // Used Amount of physical memory in use across all selected nodes. + Used ByteSize `json:"used,omitempty"` + // UsedInBytes Amount, in bytes, of physical memory in use across all selected nodes. + UsedInBytes int64 `json:"used_in_bytes"` + // UsedPercent Percentage of physical memory in use across all selected nodes. + UsedPercent int `json:"used_percent"` +} + +func (s *OperatingSystemMemoryInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adjusted_total": + if err := dec.Decode(&s.AdjustedTotal); err != nil { + return fmt.Errorf("%s | %w", "AdjustedTotal", err) + } + + case "adjusted_total_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AdjustedTotalInBytes", err) + } + s.AdjustedTotalInBytes = &value + case float64: + f := int64(v) + s.AdjustedTotalInBytes = &f + } + + case "free": + if err := dec.Decode(&s.Free); err != nil { + return fmt.Errorf("%s | %w", "Free", err) + } + + case "free_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FreeInBytes", err) + } + s.FreeInBytes = value + case float64: + f := int64(v) + s.FreeInBytes = f + } + + case "free_percent": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FreePercent", err) + } + s.FreePercent = value + case float64: + f := int(v) + s.FreePercent = f + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + + case "total_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalInBytes", err) + } + s.TotalInBytes = value + case float64: + f := int64(v) + s.TotalInBytes = f + } + + case "used": + if err := dec.Decode(&s.Used); err != nil { + return fmt.Errorf("%s | %w", "Used", err) + } + + case "used_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "UsedInBytes", err) + } + s.UsedInBytes = value + case float64: + f := int64(v) + s.UsedInBytes = f + } + + case "used_percent": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "UsedPercent", err) + } + s.UsedPercent = value + case float64: + f := int(v) + s.UsedPercent = f + } + + } + } + return nil +} + +// NewOperatingSystemMemoryInfo returns a OperatingSystemMemoryInfo. +func NewOperatingSystemMemoryInfo() *OperatingSystemMemoryInfo { + r := &OperatingSystemMemoryInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/operationcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/operationcontainer.go new file mode 100644 index 000000000..d4cdb8dd6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/operationcontainer.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// OperationContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/bulk/types.ts#L158-L180 +type OperationContainer struct { + AdditionalOperationContainerProperty map[string]json.RawMessage `json:"-"` + // Create Index the specified document if it does not already exist. + // The following line must contain the source data to be indexed. + Create *CreateOperation `json:"create,omitempty"` + // Delete Remove the specified document from the index. + Delete *DeleteOperation `json:"delete,omitempty"` + // Index Index the specified document. + // If the document exists, it replaces the document and increments the version. + // The following line must contain the source data to be indexed. + Index *IndexOperation `json:"index,omitempty"` + // Update Perform a partial document update. + // The following line must contain the partial document and update options. + Update *UpdateOperation `json:"update,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s OperationContainer) MarshalJSON() ([]byte, error) { + type opt OperationContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalOperationContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalOperationContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewOperationContainer returns a OperationContainer. +func NewOperationContainer() *OperationContainer { + r := &OperationContainer{ + AdditionalOperationContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type OperationContainerVariant interface { + OperationContainerCaster() *OperationContainer +} + +func (s *OperationContainer) OperationContainerCaster() *OperationContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/outlierdetectionparameters.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/outlierdetectionparameters.go new file mode 100644 index 000000000..785569d1f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/outlierdetectionparameters.go @@ -0,0 +1,180 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// OutlierDetectionParameters type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L528-L562 +type OutlierDetectionParameters struct { + // ComputeFeatureInfluence Specifies whether the feature influence calculation is enabled. + ComputeFeatureInfluence *bool `json:"compute_feature_influence,omitempty"` + // FeatureInfluenceThreshold The minimum outlier score that a document needs to have in order to calculate + // its feature influence score. + // Value range: 0-1 + FeatureInfluenceThreshold *Float64 `json:"feature_influence_threshold,omitempty"` + // Method The method that outlier detection uses. + // Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and + // `ensemble`. + // The default value is ensemble, which means that outlier detection uses an + // ensemble of different methods and normalises and combines their individual + // outlier scores to obtain the overall outlier score. + Method *string `json:"method,omitempty"` + // NNeighbors Defines the value for how many nearest neighbors each method of outlier + // detection uses to calculate its outlier score. + // When the value is not set, different values are used for different ensemble + // members. + // This default behavior helps improve the diversity in the ensemble; only + // override it if you are confident that the value you choose is appropriate for + // the data set. + NNeighbors *int `json:"n_neighbors,omitempty"` + // OutlierFraction The proportion of the data set that is assumed to be outlying prior to + // outlier detection. + // For example, 0.05 means it is assumed that 5% of values are real outliers and + // 95% are inliers. + OutlierFraction *Float64 `json:"outlier_fraction,omitempty"` + // StandardizationEnabled If `true`, the following operation is performed on the columns before + // computing outlier scores: (x_i - mean(x_i)) / sd(x_i). + StandardizationEnabled *bool `json:"standardization_enabled,omitempty"` +} + +func (s *OutlierDetectionParameters) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compute_feature_influence": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ComputeFeatureInfluence", err) + } + s.ComputeFeatureInfluence = &value + case bool: + s.ComputeFeatureInfluence = &v + } + + case "feature_influence_threshold": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FeatureInfluenceThreshold", err) + } + f := Float64(value) + s.FeatureInfluenceThreshold = &f + case float64: + f := Float64(v) + s.FeatureInfluenceThreshold = &f + } + + case "method": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Method", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Method = &o + + case "n_neighbors": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NNeighbors", err) + } + s.NNeighbors = &value + case float64: + f := int(v) + s.NNeighbors = &f + } + + case "outlier_fraction": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "OutlierFraction", err) + } + f := Float64(value) + s.OutlierFraction = &f + case float64: + f := Float64(v) + s.OutlierFraction = &f + } + + case "standardization_enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "StandardizationEnabled", err) + } + s.StandardizationEnabled = &value + case bool: + s.StandardizationEnabled = &v + } + + } + } + return nil +} + +// NewOutlierDetectionParameters returns a OutlierDetectionParameters. +func NewOutlierDetectionParameters() *OutlierDetectionParameters { + r := &OutlierDetectionParameters{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/overallbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/overallbucket.go new file mode 100644 index 000000000..3fe3157dc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/overallbucket.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// OverallBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Bucket.ts#L129-L144 +type OverallBucket struct { + // BucketSpan The length of the bucket in seconds. Matches the job with the longest + // bucket_span value. + BucketSpan int64 `json:"bucket_span"` + // IsInterim If true, this is an interim result. In other words, the results are + // calculated based on partial input data. + IsInterim bool `json:"is_interim"` + // Jobs An array of objects that contain the max_anomaly_score per job_id. + Jobs []OverallBucketJob `json:"jobs"` + // OverallScore The top_n average of the maximum bucket anomaly_score per job. + OverallScore Float64 `json:"overall_score"` + // ResultType Internal. This is always set to overall_bucket. + ResultType string `json:"result_type"` + // Timestamp The start time of the bucket for which these results were calculated. + Timestamp int64 `json:"timestamp"` + // TimestampString The start time of the bucket for which these results were calculated. + TimestampString DateTime `json:"timestamp_string,omitempty"` +} + +func (s *OverallBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bucket_span": + if err := dec.Decode(&s.BucketSpan); err != nil { + return fmt.Errorf("%s | %w", "BucketSpan", err) + } + + case "is_interim": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsInterim", err) + } + s.IsInterim = value + case bool: + s.IsInterim = v + } + + case "jobs": + if err := dec.Decode(&s.Jobs); err != nil { + return fmt.Errorf("%s | %w", "Jobs", err) + } + + case "overall_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "OverallScore", err) + } + f := Float64(value) + s.OverallScore = f + case float64: + f := Float64(v) + s.OverallScore = f + } + + case "result_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultType = o + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + case "timestamp_string": + if err := dec.Decode(&s.TimestampString); err != nil { + return fmt.Errorf("%s | %w", "TimestampString", err) + } + + } + } + return nil +} + +// NewOverallBucket returns a OverallBucket. +func NewOverallBucket() *OverallBucket { + r := &OverallBucket{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/overallbucketjob.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/overallbucketjob.go new file mode 100644 index 000000000..ef58fa0d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/overallbucketjob.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// OverallBucketJob type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Bucket.ts#L145-L148 +type OverallBucketJob struct { + JobId string `json:"job_id"` + MaxAnomalyScore Float64 `json:"max_anomaly_score"` +} + +func (s *OverallBucketJob) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "max_anomaly_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxAnomalyScore", err) + } + f := Float64(value) + s.MaxAnomalyScore = f + case float64: + f := Float64(v) + s.MaxAnomalyScore = f + } + + } + } + return nil +} + +// NewOverallBucketJob returns a OverallBucketJob. +func NewOverallBucketJob() *OverallBucketJob { + r := &OverallBucketJob{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/overlapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/overlapping.go new file mode 100644 index 000000000..50f05916e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/overlapping.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Overlapping type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L39-L42 +type Overlapping struct { + IndexPatterns []string `json:"index_patterns"` + Name string `json:"name"` +} + +func (s *Overlapping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index_patterns": + if err := dec.Decode(&s.IndexPatterns); err != nil { + return fmt.Errorf("%s | %w", "IndexPatterns", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewOverlapping returns a Overlapping. +func NewOverlapping() *Overlapping { + r := &Overlapping{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/page.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/page.go new file mode 100644 index 000000000..218f1f64b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/page.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Page type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Page.ts#L22-L33 +type Page struct { + // From Skips the specified number of items. + From *int `json:"from,omitempty"` + // Size Specifies the maximum number of items to obtain. + Size *int `json:"size,omitempty"` +} + +func (s *Page) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + +// NewPage returns a Page. +func NewPage() *Page { + r := &Page{} + + return r +} + +type PageVariant interface { + PageCaster() *Page +} + +func (s *Page) PageCaster() *Page { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutyaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutyaction.go new file mode 100644 index 000000000..12a1c3158 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutyaction.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pagerdutyeventtype" +) + +// PagerDutyAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L54-L54 +type PagerDutyAction struct { + Account *string `json:"account,omitempty"` + AttachPayload bool `json:"attach_payload"` + Client *string `json:"client,omitempty"` + ClientUrl *string `json:"client_url,omitempty"` + Contexts []PagerDutyContext `json:"contexts,omitempty"` + Description string `json:"description"` + EventType *pagerdutyeventtype.PagerDutyEventType `json:"event_type,omitempty"` + IncidentKey string `json:"incident_key"` + Proxy *PagerDutyEventProxy `json:"proxy,omitempty"` +} + +func (s *PagerDutyAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Account", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Account = &o + + case "attach_payload": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AttachPayload", err) + } + s.AttachPayload = value + case bool: + s.AttachPayload = v + } + + case "client": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Client", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Client = &o + + case "client_url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ClientUrl", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClientUrl = &o + + case "contexts", "context": + if err := dec.Decode(&s.Contexts); err != nil { + return fmt.Errorf("%s | %w", "Contexts", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "event_type": + if err := dec.Decode(&s.EventType); err != nil { + return fmt.Errorf("%s | %w", "EventType", err) + } + + case "incident_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IncidentKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IncidentKey = o + + case "proxy": + if err := dec.Decode(&s.Proxy); err != nil { + return fmt.Errorf("%s | %w", "Proxy", err) + } + + } + } + return nil +} + +// NewPagerDutyAction returns a PagerDutyAction. +func NewPagerDutyAction() *PagerDutyAction { + r := &PagerDutyAction{} + + return r +} + +type PagerDutyActionVariant interface { + PagerDutyActionCaster() *PagerDutyAction +} + +func (s *PagerDutyAction) PagerDutyActionCaster() *PagerDutyAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutycontext.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutycontext.go new file mode 100644 index 000000000..d4033e823 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutycontext.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pagerdutycontexttype" +) + +// PagerDutyContext type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L61-L65 +type PagerDutyContext struct { + Href *string `json:"href,omitempty"` + Src *string `json:"src,omitempty"` + Type pagerdutycontexttype.PagerDutyContextType `json:"type"` +} + +func (s *PagerDutyContext) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "href": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Href", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Href = &o + + case "src": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Src", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Src = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewPagerDutyContext returns a PagerDutyContext. +func NewPagerDutyContext() *PagerDutyContext { + r := &PagerDutyContext{} + + return r +} + +type PagerDutyContextVariant interface { + PagerDutyContextCaster() *PagerDutyContext +} + +func (s *PagerDutyContext) PagerDutyContextCaster() *PagerDutyContext { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutyevent.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutyevent.go new file mode 100644 index 000000000..019a0908f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutyevent.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pagerdutyeventtype" +) + +// PagerDutyEvent type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L40-L52 +type PagerDutyEvent struct { + Account *string `json:"account,omitempty"` + AttachPayload bool `json:"attach_payload"` + Client *string `json:"client,omitempty"` + ClientUrl *string `json:"client_url,omitempty"` + Contexts []PagerDutyContext `json:"contexts,omitempty"` + Description string `json:"description"` + EventType *pagerdutyeventtype.PagerDutyEventType `json:"event_type,omitempty"` + IncidentKey string `json:"incident_key"` + Proxy *PagerDutyEventProxy `json:"proxy,omitempty"` +} + +func (s *PagerDutyEvent) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Account", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Account = &o + + case "attach_payload": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AttachPayload", err) + } + s.AttachPayload = value + case bool: + s.AttachPayload = v + } + + case "client": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Client", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Client = &o + + case "client_url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ClientUrl", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClientUrl = &o + + case "contexts", "context": + if err := dec.Decode(&s.Contexts); err != nil { + return fmt.Errorf("%s | %w", "Contexts", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "event_type": + if err := dec.Decode(&s.EventType); err != nil { + return fmt.Errorf("%s | %w", "EventType", err) + } + + case "incident_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IncidentKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IncidentKey = o + + case "proxy": + if err := dec.Decode(&s.Proxy); err != nil { + return fmt.Errorf("%s | %w", "Proxy", err) + } + + } + } + return nil +} + +// NewPagerDutyEvent returns a PagerDutyEvent. +func NewPagerDutyEvent() *PagerDutyEvent { + r := &PagerDutyEvent{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutyeventproxy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutyeventproxy.go new file mode 100644 index 000000000..f75d05cda --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutyeventproxy.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PagerDutyEventProxy type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L56-L59 +type PagerDutyEventProxy struct { + Host *string `json:"host,omitempty"` + Port *int `json:"port,omitempty"` +} + +func (s *PagerDutyEventProxy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + case "port": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Port", err) + } + s.Port = &value + case float64: + f := int(v) + s.Port = &f + } + + } + } + return nil +} + +// NewPagerDutyEventProxy returns a PagerDutyEventProxy. +func NewPagerDutyEventProxy() *PagerDutyEventProxy { + r := &PagerDutyEventProxy{} + + return r +} + +type PagerDutyEventProxyVariant interface { + PagerDutyEventProxyCaster() *PagerDutyEventProxy +} + +func (s *PagerDutyEventProxy) PagerDutyEventProxyCaster() *PagerDutyEventProxy { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutyresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutyresult.go new file mode 100644 index 000000000..74c4cb91f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pagerdutyresult.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PagerDutyResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L78-L83 +type PagerDutyResult struct { + Event PagerDutyEvent `json:"event"` + Reason *string `json:"reason,omitempty"` + Request *HttpInputRequestResult `json:"request,omitempty"` + Response *HttpInputResponseResult `json:"response,omitempty"` +} + +func (s *PagerDutyResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "event": + if err := dec.Decode(&s.Event); err != nil { + return fmt.Errorf("%s | %w", "Event", err) + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "request": + if err := dec.Decode(&s.Request); err != nil { + return fmt.Errorf("%s | %w", "Request", err) + } + + case "response": + if err := dec.Decode(&s.Response); err != nil { + return fmt.Errorf("%s | %w", "Response", err) + } + + } + } + return nil +} + +// NewPagerDutyResult returns a PagerDutyResult. +func NewPagerDutyResult() *PagerDutyResult { + r := &PagerDutyResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/painlesscontextsetup.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/painlesscontextsetup.go new file mode 100644 index 000000000..1f7328d93 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/painlesscontextsetup.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// PainlessContextSetup type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/scripts_painless_execute/types.ts#L27-L46 +type PainlessContextSetup struct { + // Document Document that's temporarily indexed in-memory and accessible from the script. + Document json.RawMessage `json:"document,omitempty"` + // Index Index containing a mapping that's compatible with the indexed document. + // You may specify a remote index by prefixing the index with the remote cluster + // alias. + // For example, `remote1:my_index` indicates that you want to run the painless + // script against the "my_index" index on the "remote1" cluster. + // This request will be forwarded to the "remote1" cluster if you have + // configured a connection to that remote cluster. + // + // NOTE: Wildcards are not accepted in the index expression for this endpoint. + // The expression `*:myindex` will return the error "No such remote cluster" and + // the expression `logs*` or `remote1:logs*` will return the error "index not + // found". + Index string `json:"index"` + // Query Use this parameter to specify a query for computing a score. + Query *Query `json:"query,omitempty"` +} + +func (s *PainlessContextSetup) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "document": + if err := dec.Decode(&s.Document); err != nil { + return fmt.Errorf("%s | %w", "Document", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + } + } + return nil +} + +// NewPainlessContextSetup returns a PainlessContextSetup. +func NewPainlessContextSetup() *PainlessContextSetup { + r := &PainlessContextSetup{} + + return r +} + +type PainlessContextSetupVariant interface { + PainlessContextSetupCaster() *PainlessContextSetup +} + +func (s *PainlessContextSetup) PainlessContextSetupCaster() *PainlessContextSetup { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/parentaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/parentaggregate.go new file mode 100644 index 000000000..cd1b0e707 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/parentaggregate.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// ParentAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L896-L900 +type ParentAggregate struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *ParentAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s ParentAggregate) MarshalJSON() ([]byte, error) { + type opt ParentAggregate + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewParentAggregate returns a ParentAggregate. +func NewParentAggregate() *ParentAggregate { + r := &ParentAggregate{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/parentaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/parentaggregation.go new file mode 100644 index 000000000..1af8bcf2c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/parentaggregation.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ParentAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L662-L667 +type ParentAggregation struct { + // Type The child type that should be selected. + Type *string `json:"type,omitempty"` +} + +func (s *ParentAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewParentAggregation returns a ParentAggregation. +func NewParentAggregation() *ParentAggregation { + r := &ParentAggregation{} + + return r +} + +type ParentAggregationVariant interface { + ParentAggregationCaster() *ParentAggregation +} + +func (s *ParentAggregation) ParentAggregationCaster() *ParentAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/parentidquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/parentidquery.go new file mode 100644 index 000000000..8917268b8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/parentidquery.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ParentIdQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/joining.ts#L141-L158 +type ParentIdQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Id ID of the parent document. + Id *string `json:"id,omitempty"` + // IgnoreUnmapped Indicates whether to ignore an unmapped `type` and not return any documents + // instead of an error. + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Type Name of the child relationship mapped for the `join` field. + Type *string `json:"type,omitempty"` +} + +func (s *ParentIdQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "ignore_unmapped": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnmapped", err) + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewParentIdQuery returns a ParentIdQuery. +func NewParentIdQuery() *ParentIdQuery { + r := &ParentIdQuery{} + + return r +} + +type ParentIdQueryVariant interface { + ParentIdQueryCaster() *ParentIdQuery +} + +func (s *ParentIdQuery) ParentIdQueryCaster() *ParentIdQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/parenttaskinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/parenttaskinfo.go new file mode 100644 index 000000000..b299a04a6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/parenttaskinfo.go @@ -0,0 +1,218 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ParentTaskInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/tasks/_types/TaskListResponseBase.ts#L45-L47 +type ParentTaskInfo struct { + Action string `json:"action"` + Cancellable bool `json:"cancellable"` + Cancelled *bool `json:"cancelled,omitempty"` + Children []TaskInfo `json:"children,omitempty"` + // Description Human readable text that identifies the particular request that the task is + // performing. + // For example, it might identify the search request being performed by a search + // task. + // Other kinds of tasks have different descriptions, like `_reindex` which has + // the source and the destination, or `_bulk` which just has the number of + // requests and the destination indices. + // Many requests will have only an empty description because more detailed + // information about the request is not easily available or particularly helpful + // in identifying the request. + Description *string `json:"description,omitempty"` + Headers map[string]string `json:"headers"` + Id int64 `json:"id"` + Node string `json:"node"` + ParentTaskId *string `json:"parent_task_id,omitempty"` + RunningTime Duration `json:"running_time,omitempty"` + RunningTimeInNanos int64 `json:"running_time_in_nanos"` + StartTimeInMillis int64 `json:"start_time_in_millis"` + // Status The internal status of the task, which varies from task to task. + // The format also varies. + // While the goal is to keep the status for a particular task consistent from + // version to version, this is not always possible because sometimes the + // implementation changes. + // Fields might be removed from the status for a particular request so any + // parsing you do of the status might break in minor releases. + Status json.RawMessage `json:"status,omitempty"` + Type string `json:"type"` +} + +func (s *ParentTaskInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Action", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Action = o + + case "cancellable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Cancellable", err) + } + s.Cancellable = value + case bool: + s.Cancellable = v + } + + case "cancelled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Cancelled", err) + } + s.Cancelled = &value + case bool: + s.Cancelled = &v + } + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return fmt.Errorf("%s | %w", "Children", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "headers": + if s.Headers == nil { + s.Headers = make(map[string]string, 0) + } + if err := dec.Decode(&s.Headers); err != nil { + return fmt.Errorf("%s | %w", "Headers", err) + } + + case "id": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + s.Id = value + case float64: + f := int64(v) + s.Id = f + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "parent_task_id": + if err := dec.Decode(&s.ParentTaskId); err != nil { + return fmt.Errorf("%s | %w", "ParentTaskId", err) + } + + case "running_time": + if err := dec.Decode(&s.RunningTime); err != nil { + return fmt.Errorf("%s | %w", "RunningTime", err) + } + + case "running_time_in_nanos": + if err := dec.Decode(&s.RunningTimeInNanos); err != nil { + return fmt.Errorf("%s | %w", "RunningTimeInNanos", err) + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeInMillis", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewParentTaskInfo returns a ParentTaskInfo. +func NewParentTaskInfo() *ParentTaskInfo { + r := &ParentTaskInfo{ + Headers: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/passthroughinferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/passthroughinferenceoptions.go new file mode 100644 index 000000000..3537890f2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/passthroughinferenceoptions.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PassThroughInferenceOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L239-L246 +type PassThroughInferenceOptions struct { + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options + Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` + Vocabulary *Vocabulary `json:"vocabulary,omitempty"` +} + +func (s *PassThroughInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + case "vocabulary": + if err := dec.Decode(&s.Vocabulary); err != nil { + return fmt.Errorf("%s | %w", "Vocabulary", err) + } + + } + } + return nil +} + +// NewPassThroughInferenceOptions returns a PassThroughInferenceOptions. +func NewPassThroughInferenceOptions() *PassThroughInferenceOptions { + r := &PassThroughInferenceOptions{} + + return r +} + +type PassThroughInferenceOptionsVariant interface { + PassThroughInferenceOptionsCaster() *PassThroughInferenceOptions +} + +func (s *PassThroughInferenceOptions) PassThroughInferenceOptionsCaster() *PassThroughInferenceOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/passthroughinferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/passthroughinferenceupdateoptions.go new file mode 100644 index 000000000..886f4c357 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/passthroughinferenceupdateoptions.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PassThroughInferenceUpdateOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L404-L409 +type PassThroughInferenceUpdateOptions struct { + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options to update when inferring + Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` +} + +func (s *PassThroughInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + } + } + return nil +} + +// NewPassThroughInferenceUpdateOptions returns a PassThroughInferenceUpdateOptions. +func NewPassThroughInferenceUpdateOptions() *PassThroughInferenceUpdateOptions { + r := &PassThroughInferenceUpdateOptions{} + + return r +} + +type PassThroughInferenceUpdateOptionsVariant interface { + PassThroughInferenceUpdateOptionsCaster() *PassThroughInferenceUpdateOptions +} + +func (s *PassThroughInferenceUpdateOptions) PassThroughInferenceUpdateOptionsCaster() *PassThroughInferenceUpdateOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/passthroughobjectproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/passthroughobjectproperty.go new file mode 100644 index 000000000..171f83a61 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/passthroughobjectproperty.go @@ -0,0 +1,906 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// PassthroughObjectProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/complex.ts#L53-L58 +type PassthroughObjectProperty struct { + CopyTo []string `json:"copy_to,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Priority *int `json:"priority,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *PassthroughObjectProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "priority": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + s.Priority = &value + case float64: + f := int(v) + s.Priority = &f + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PassthroughObjectProperty) MarshalJSON() ([]byte, error) { + type innerPassthroughObjectProperty PassthroughObjectProperty + tmp := innerPassthroughObjectProperty{ + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + Enabled: s.Enabled, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Priority: s.Priority, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + Type: s.Type, + } + + tmp.Type = "passthrough" + + return json.Marshal(tmp) +} + +// NewPassthroughObjectProperty returns a PassthroughObjectProperty. +func NewPassthroughObjectProperty() *PassthroughObjectProperty { + r := &PassthroughObjectProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type PassthroughObjectPropertyVariant interface { + PassthroughObjectPropertyCaster() *PassthroughObjectProperty +} + +func (s *PassthroughObjectProperty) PassthroughObjectPropertyCaster() *PassthroughObjectProperty { + return s +} + +func (s *PassthroughObjectProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pathhierarchytokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pathhierarchytokenizer.go new file mode 100644 index 000000000..00dc6a7b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pathhierarchytokenizer.go @@ -0,0 +1,150 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PathHierarchyTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L97-L104 +type PathHierarchyTokenizer struct { + BufferSize Stringifiedinteger `json:"buffer_size,omitempty"` + Delimiter *string `json:"delimiter,omitempty"` + Replacement *string `json:"replacement,omitempty"` + Reverse Stringifiedboolean `json:"reverse,omitempty"` + Skip Stringifiedinteger `json:"skip,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *PathHierarchyTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buffer_size": + if err := dec.Decode(&s.BufferSize); err != nil { + return fmt.Errorf("%s | %w", "BufferSize", err) + } + + case "delimiter": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Delimiter", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Delimiter = &o + + case "replacement": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Replacement", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Replacement = &o + + case "reverse": + if err := dec.Decode(&s.Reverse); err != nil { + return fmt.Errorf("%s | %w", "Reverse", err) + } + + case "skip": + if err := dec.Decode(&s.Skip); err != nil { + return fmt.Errorf("%s | %w", "Skip", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PathHierarchyTokenizer) MarshalJSON() ([]byte, error) { + type innerPathHierarchyTokenizer PathHierarchyTokenizer + tmp := innerPathHierarchyTokenizer{ + BufferSize: s.BufferSize, + Delimiter: s.Delimiter, + Replacement: s.Replacement, + Reverse: s.Reverse, + Skip: s.Skip, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "path_hierarchy" + + return json.Marshal(tmp) +} + +// NewPathHierarchyTokenizer returns a PathHierarchyTokenizer. +func NewPathHierarchyTokenizer() *PathHierarchyTokenizer { + r := &PathHierarchyTokenizer{} + + return r +} + +type PathHierarchyTokenizerVariant interface { + PathHierarchyTokenizerCaster() *PathHierarchyTokenizer +} + +func (s *PathHierarchyTokenizer) PathHierarchyTokenizerCaster() *PathHierarchyTokenizer { + return s +} + +func (s *PathHierarchyTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patternanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patternanalyzer.go new file mode 100644 index 000000000..ab74bf637 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patternanalyzer.go @@ -0,0 +1,176 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PatternAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L332-L365 +type PatternAnalyzer struct { + // Flags Java regular expression flags. Flags should be pipe-separated, eg + // "CASE_INSENSITIVE|COMMENTS". + Flags *string `json:"flags,omitempty"` + // Lowercase Should terms be lowercased or not. + // Defaults to `true`. + Lowercase *bool `json:"lowercase,omitempty"` + // Pattern A Java regular expression. + // Defaults to `\W+`. + Pattern *string `json:"pattern,omitempty"` + // Stopwords A pre-defined stop words list like `_english_` or an array containing a list + // of stop words. + // Defaults to `_none_`. + Stopwords StopWords `json:"stopwords,omitempty"` + // StopwordsPath The path to a file containing stop words. + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *PatternAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "flags": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Flags", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flags = &o + + case "lowercase": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Lowercase", err) + } + s.Lowercase = &value + case bool: + s.Lowercase = &v + } + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = &o + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PatternAnalyzer) MarshalJSON() ([]byte, error) { + type innerPatternAnalyzer PatternAnalyzer + tmp := innerPatternAnalyzer{ + Flags: s.Flags, + Lowercase: s.Lowercase, + Pattern: s.Pattern, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "pattern" + + return json.Marshal(tmp) +} + +// NewPatternAnalyzer returns a PatternAnalyzer. +func NewPatternAnalyzer() *PatternAnalyzer { + r := &PatternAnalyzer{} + + return r +} + +type PatternAnalyzerVariant interface { + PatternAnalyzerCaster() *PatternAnalyzer +} + +func (s *PatternAnalyzer) PatternAnalyzerCaster() *PatternAnalyzer { + return s +} + +func (s *PatternAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patterncapturetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patterncapturetokenfilter.go new file mode 100644 index 000000000..77d893cc4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patterncapturetokenfilter.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// PatternCaptureTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L374-L380 +type PatternCaptureTokenFilter struct { + // Patterns A list of regular expressions to match. + Patterns []string `json:"patterns"` + // PreserveOriginal If set to `true` (the default) it will emit the original token. + PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *PatternCaptureTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "patterns": + if err := dec.Decode(&s.Patterns); err != nil { + return fmt.Errorf("%s | %w", "Patterns", err) + } + + case "preserve_original": + if err := dec.Decode(&s.PreserveOriginal); err != nil { + return fmt.Errorf("%s | %w", "PreserveOriginal", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PatternCaptureTokenFilter) MarshalJSON() ([]byte, error) { + type innerPatternCaptureTokenFilter PatternCaptureTokenFilter + tmp := innerPatternCaptureTokenFilter{ + Patterns: s.Patterns, + PreserveOriginal: s.PreserveOriginal, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "pattern_capture" + + return json.Marshal(tmp) +} + +// NewPatternCaptureTokenFilter returns a PatternCaptureTokenFilter. +func NewPatternCaptureTokenFilter() *PatternCaptureTokenFilter { + r := &PatternCaptureTokenFilter{} + + return r +} + +type PatternCaptureTokenFilterVariant interface { + PatternCaptureTokenFilterCaster() *PatternCaptureTokenFilter +} + +func (s *PatternCaptureTokenFilter) PatternCaptureTokenFilterCaster() *PatternCaptureTokenFilter { + return s +} + +func (s *PatternCaptureTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patternreplacecharfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patternreplacecharfilter.go new file mode 100644 index 000000000..58251e484 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patternreplacecharfilter.go @@ -0,0 +1,143 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PatternReplaceCharFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/char_filters.ts#L57-L62 +type PatternReplaceCharFilter struct { + Flags *string `json:"flags,omitempty"` + Pattern string `json:"pattern"` + Replacement *string `json:"replacement,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *PatternReplaceCharFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "flags": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Flags", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flags = &o + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = o + + case "replacement": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Replacement", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Replacement = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PatternReplaceCharFilter) MarshalJSON() ([]byte, error) { + type innerPatternReplaceCharFilter PatternReplaceCharFilter + tmp := innerPatternReplaceCharFilter{ + Flags: s.Flags, + Pattern: s.Pattern, + Replacement: s.Replacement, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "pattern_replace" + + return json.Marshal(tmp) +} + +// NewPatternReplaceCharFilter returns a PatternReplaceCharFilter. +func NewPatternReplaceCharFilter() *PatternReplaceCharFilter { + r := &PatternReplaceCharFilter{} + + return r +} + +type PatternReplaceCharFilterVariant interface { + PatternReplaceCharFilterCaster() *PatternReplaceCharFilter +} + +func (s *PatternReplaceCharFilter) PatternReplaceCharFilterCaster() *PatternReplaceCharFilter { + return s +} + +func (s *PatternReplaceCharFilter) CharFilterDefinitionCaster() *CharFilterDefinition { + o := CharFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patternreplacetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patternreplacetokenfilter.go new file mode 100644 index 000000000..641aa11b7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patternreplacetokenfilter.go @@ -0,0 +1,166 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PatternReplaceTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L382-L391 +type PatternReplaceTokenFilter struct { + // All If `true`, all substrings matching the pattern parameter’s regular expression + // are replaced. If `false`, the filter replaces only the first matching + // substring in each token. Defaults to `true`. + All *bool `json:"all,omitempty"` + Flags *string `json:"flags,omitempty"` + // Pattern Regular expression, written in Java’s regular expression syntax. The filter + // replaces token substrings matching this pattern with the substring in the + // `replacement` parameter. + Pattern string `json:"pattern"` + // Replacement Replacement substring. Defaults to an empty substring (`""`). + Replacement *string `json:"replacement,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *PatternReplaceTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "all": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "All", err) + } + s.All = &value + case bool: + s.All = &v + } + + case "flags": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Flags", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flags = &o + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = o + + case "replacement": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Replacement", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Replacement = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PatternReplaceTokenFilter) MarshalJSON() ([]byte, error) { + type innerPatternReplaceTokenFilter PatternReplaceTokenFilter + tmp := innerPatternReplaceTokenFilter{ + All: s.All, + Flags: s.Flags, + Pattern: s.Pattern, + Replacement: s.Replacement, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "pattern_replace" + + return json.Marshal(tmp) +} + +// NewPatternReplaceTokenFilter returns a PatternReplaceTokenFilter. +func NewPatternReplaceTokenFilter() *PatternReplaceTokenFilter { + r := &PatternReplaceTokenFilter{} + + return r +} + +type PatternReplaceTokenFilterVariant interface { + PatternReplaceTokenFilterCaster() *PatternReplaceTokenFilter +} + +func (s *PatternReplaceTokenFilter) PatternReplaceTokenFilterCaster() *PatternReplaceTokenFilter { + return s +} + +func (s *PatternReplaceTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patterntokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patterntokenizer.go new file mode 100644 index 000000000..782f86079 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/patterntokenizer.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PatternTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L106-L111 +type PatternTokenizer struct { + Flags *string `json:"flags,omitempty"` + Group *int `json:"group,omitempty"` + Pattern *string `json:"pattern,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *PatternTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "flags": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Flags", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flags = &o + + case "group": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Group", err) + } + s.Group = &value + case float64: + f := int(v) + s.Group = &f + } + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PatternTokenizer) MarshalJSON() ([]byte, error) { + type innerPatternTokenizer PatternTokenizer + tmp := innerPatternTokenizer{ + Flags: s.Flags, + Group: s.Group, + Pattern: s.Pattern, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "pattern" + + return json.Marshal(tmp) +} + +// NewPatternTokenizer returns a PatternTokenizer. +func NewPatternTokenizer() *PatternTokenizer { + r := &PatternTokenizer{} + + return r +} + +type PatternTokenizerVariant interface { + PatternTokenizerCaster() *PatternTokenizer +} + +func (s *PatternTokenizer) PatternTokenizerCaster() *PatternTokenizer { + return s +} + +func (s *PatternTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pendingtask.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pendingtask.go new file mode 100644 index 000000000..a96d0b633 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pendingtask.go @@ -0,0 +1,144 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PendingTask type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/pending_tasks/types.ts#L23-L47 +type PendingTask struct { + // Executing Indicates whether the pending tasks are currently executing or not. + Executing bool `json:"executing"` + // InsertOrder The number that represents when the task has been inserted into the task + // queue. + InsertOrder int `json:"insert_order"` + // Priority The priority of the pending task. + // The valid priorities in descending priority order are: `IMMEDIATE` > `URGENT` + // > `HIGH` > `NORMAL` > `LOW` > `LANGUID`. + Priority string `json:"priority"` + // Source A general description of the cluster task that may include a reason and + // origin. + Source string `json:"source"` + // TimeInQueue The time since the task is waiting for being performed. + TimeInQueue Duration `json:"time_in_queue,omitempty"` + // TimeInQueueMillis The time expressed in milliseconds since the task is waiting for being + // performed. + TimeInQueueMillis int64 `json:"time_in_queue_millis"` +} + +func (s *PendingTask) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "executing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Executing", err) + } + s.Executing = value + case bool: + s.Executing = v + } + + case "insert_order": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "InsertOrder", err) + } + s.InsertOrder = value + case float64: + f := int(v) + s.InsertOrder = f + } + + case "priority": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Priority = o + + case "source": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = o + + case "time_in_queue": + if err := dec.Decode(&s.TimeInQueue); err != nil { + return fmt.Errorf("%s | %w", "TimeInQueue", err) + } + + case "time_in_queue_millis": + if err := dec.Decode(&s.TimeInQueueMillis); err != nil { + return fmt.Errorf("%s | %w", "TimeInQueueMillis", err) + } + + } + } + return nil +} + +// NewPendingTask returns a PendingTask. +func NewPendingTask() *PendingTask { + r := &PendingTask{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pendingtasksrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pendingtasksrecord.go new file mode 100644 index 000000000..c4d8df2c3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pendingtasksrecord.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PendingTasksRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/pending_tasks/types.ts#L20-L41 +type PendingTasksRecord struct { + // InsertOrder The task insertion order. + InsertOrder *string `json:"insertOrder,omitempty"` + // Priority The task priority. + Priority *string `json:"priority,omitempty"` + // Source The task source. + Source *string `json:"source,omitempty"` + // TimeInQueue Indicates how long the task has been in queue. + TimeInQueue *string `json:"timeInQueue,omitempty"` +} + +func (s *PendingTasksRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "insertOrder", "o": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "InsertOrder", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InsertOrder = &o + + case "priority", "p": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Priority = &o + + case "source", "s": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = &o + + case "timeInQueue", "t": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TimeInQueue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TimeInQueue = &o + + } + } + return nil +} + +// NewPendingTasksRecord returns a PendingTasksRecord. +func NewPendingTasksRecord() *PendingTasksRecord { + r := &PendingTasksRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentage.go new file mode 100644 index 000000000..6bf6f25d0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentage.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Percentage holds the union for the following types: +// +// string +// float32 +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Numeric.ts#L28-L28 +type Percentage any + +type PercentageVariant interface { + PercentageCaster() *Percentage +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentagescoreheuristic.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentagescoreheuristic.go new file mode 100644 index 000000000..892e05497 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentagescoreheuristic.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// PercentageScoreHeuristic type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L811-L811 +type PercentageScoreHeuristic struct { +} + +// NewPercentageScoreHeuristic returns a PercentageScoreHeuristic. +func NewPercentageScoreHeuristic() *PercentageScoreHeuristic { + r := &PercentageScoreHeuristic{} + + return r +} + +type PercentageScoreHeuristicVariant interface { + PercentageScoreHeuristicCaster() *PercentageScoreHeuristic +} + +func (s *PercentageScoreHeuristic) PercentageScoreHeuristicCaster() *PercentageScoreHeuristic { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentileranksaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentileranksaggregation.go new file mode 100644 index 000000000..27bf2f5e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentileranksaggregation.go @@ -0,0 +1,146 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PercentileRanksAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L192-L214 +type PercentileRanksAggregation struct { + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Hdr Uses the alternative High Dynamic Range Histogram algorithm to calculate + // percentile ranks. + Hdr *HdrMethod `json:"hdr,omitempty"` + // Keyed By default, the aggregation associates a unique string key with each bucket + // and returns the ranges as a hash rather than an array. + // Set to `false` to disable this behavior. + Keyed *bool `json:"keyed,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` + // Tdigest Sets parameters for the default TDigest algorithm used to calculate + // percentile ranks. + Tdigest *TDigest `json:"tdigest,omitempty"` + // Values An array of values for which to calculate the percentile ranks. + Values *[]Float64 `json:"values,omitempty"` +} + +func (s *PercentileRanksAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "hdr": + if err := dec.Decode(&s.Hdr); err != nil { + return fmt.Errorf("%s | %w", "Hdr", err) + } + + case "keyed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Keyed", err) + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "tdigest": + if err := dec.Decode(&s.Tdigest); err != nil { + return fmt.Errorf("%s | %w", "Tdigest", err) + } + + case "values": + if err := dec.Decode(&s.Values); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + + } + } + return nil +} + +// NewPercentileRanksAggregation returns a PercentileRanksAggregation. +func NewPercentileRanksAggregation() *PercentileRanksAggregation { + r := &PercentileRanksAggregation{} + + return r +} + +type PercentileRanksAggregationVariant interface { + PercentileRanksAggregationCaster() *PercentileRanksAggregation +} + +func (s *PercentileRanksAggregation) PercentileRanksAggregationCaster() *PercentileRanksAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentiles.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentiles.go new file mode 100644 index 000000000..e4ccfe5bf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentiles.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Percentiles holds the union for the following types: +// +// KeyedPercentiles +// []ArrayPercentilesItem +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L152-L153 +type Percentiles any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentilesaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentilesaggregation.go new file mode 100644 index 000000000..241ad6ee2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentilesaggregation.go @@ -0,0 +1,146 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PercentilesAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L216-L235 +type PercentilesAggregation struct { + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Hdr Uses the alternative High Dynamic Range Histogram algorithm to calculate + // percentiles. + Hdr *HdrMethod `json:"hdr,omitempty"` + // Keyed By default, the aggregation associates a unique string key with each bucket + // and returns the ranges as a hash rather than an array. + // Set to `false` to disable this behavior. + Keyed *bool `json:"keyed,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + // Percents The percentiles to calculate. + Percents []Float64 `json:"percents,omitempty"` + Script *Script `json:"script,omitempty"` + // Tdigest Sets parameters for the default TDigest algorithm used to calculate + // percentiles. + Tdigest *TDigest `json:"tdigest,omitempty"` +} + +func (s *PercentilesAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "hdr": + if err := dec.Decode(&s.Hdr); err != nil { + return fmt.Errorf("%s | %w", "Hdr", err) + } + + case "keyed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Keyed", err) + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "percents": + if err := dec.Decode(&s.Percents); err != nil { + return fmt.Errorf("%s | %w", "Percents", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "tdigest": + if err := dec.Decode(&s.Tdigest); err != nil { + return fmt.Errorf("%s | %w", "Tdigest", err) + } + + } + } + return nil +} + +// NewPercentilesAggregation returns a PercentilesAggregation. +func NewPercentilesAggregation() *PercentilesAggregation { + r := &PercentilesAggregation{} + + return r +} + +type PercentilesAggregationVariant interface { + PercentilesAggregationCaster() *PercentilesAggregation +} + +func (s *PercentilesAggregation) PercentilesAggregationCaster() *PercentilesAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentilesbucketaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentilesbucketaggregate.go new file mode 100644 index 000000000..62a2b0840 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentilesbucketaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// PercentilesBucketAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L180-L181 +type PercentilesBucketAggregate struct { + Meta Metadata `json:"meta,omitempty"` + Values Percentiles `json:"values"` +} + +func (s *PercentilesBucketAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "values": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]any, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + s.Values = o + case '[': + o := []ArrayPercentilesItem{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + s.Values = o + } + + } + } + return nil +} + +// NewPercentilesBucketAggregate returns a PercentilesBucketAggregate. +func NewPercentilesBucketAggregate() *PercentilesBucketAggregate { + r := &PercentilesBucketAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentilesbucketaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentilesbucketaggregation.go new file mode 100644 index 000000000..77038f186 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percentilesbucketaggregation.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// PercentilesBucketAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L389-L397 +type PercentilesBucketAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + // Percents The list of percentiles to calculate. + Percents []Float64 `json:"percents,omitempty"` +} + +func (s *PercentilesBucketAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "percents": + if err := dec.Decode(&s.Percents); err != nil { + return fmt.Errorf("%s | %w", "Percents", err) + } + + } + } + return nil +} + +// NewPercentilesBucketAggregation returns a PercentilesBucketAggregation. +func NewPercentilesBucketAggregation() *PercentilesBucketAggregation { + r := &PercentilesBucketAggregation{} + + return r +} + +type PercentilesBucketAggregationVariant interface { + PercentilesBucketAggregationCaster() *PercentilesBucketAggregation +} + +func (s *PercentilesBucketAggregation) PercentilesBucketAggregationCaster() *PercentilesBucketAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percolatequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percolatequery.go new file mode 100644 index 000000000..c069bc25e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percolatequery.go @@ -0,0 +1,185 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PercolateQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L205-L245 +type PercolateQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Document The source of the document being percolated. + Document json.RawMessage `json:"document,omitempty"` + // Documents An array of sources of the documents being percolated. + Documents []json.RawMessage `json:"documents,omitempty"` + // Field Field that holds the indexed queries. The field must use the `percolator` + // mapping type. + Field string `json:"field"` + // Id The ID of a stored document to percolate. + Id *string `json:"id,omitempty"` + // Index The index of a stored document to percolate. + Index *string `json:"index,omitempty"` + // Name The suffix used for the `_percolator_document_slot` field when multiple + // `percolate` queries are specified. + Name *string `json:"name,omitempty"` + // Preference Preference used to fetch document to percolate. + Preference *string `json:"preference,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Routing Routing used to fetch document to percolate. + Routing *string `json:"routing,omitempty"` + // Version The expected version of a stored document to percolate. + Version *int64 `json:"version,omitempty"` +} + +func (s *PercolateQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "document": + if err := dec.Decode(&s.Document); err != nil { + return fmt.Errorf("%s | %w", "Document", err) + } + + case "documents": + if err := dec.Decode(&s.Documents); err != nil { + return fmt.Errorf("%s | %w", "Documents", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "preference": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Preference", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Preference = &o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewPercolateQuery returns a PercolateQuery. +func NewPercolateQuery() *PercolateQuery { + r := &PercolateQuery{} + + return r +} + +type PercolateQueryVariant interface { + PercolateQueryCaster() *PercolateQuery +} + +func (s *PercolateQuery) PercolateQueryCaster() *PercolateQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percolatorproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percolatorproperty.go new file mode 100644 index 000000000..4b57ee9ee --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/percolatorproperty.go @@ -0,0 +1,822 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// PercolatorProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L203-L205 +type PercolatorProperty struct { + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PercolatorProperty) MarshalJSON() ([]byte, error) { + type innerPercolatorProperty PercolatorProperty + tmp := innerPercolatorProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "percolator" + + return json.Marshal(tmp) +} + +// NewPercolatorProperty returns a PercolatorProperty. +func NewPercolatorProperty() *PercolatorProperty { + r := &PercolatorProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type PercolatorPropertyVariant interface { + PercolatorPropertyCaster() *PercolatorProperty +} + +func (s *PercolatorProperty) PercolatorPropertyCaster() *PercolatorProperty { + return s +} + +func (s *PercolatorProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/perpartitioncategorization.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/perpartitioncategorization.go new file mode 100644 index 000000000..249939f15 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/perpartitioncategorization.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PerPartitionCategorization type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Analysis.ts#L150-L159 +type PerPartitionCategorization struct { + // Enabled To enable this setting, you must also set the `partition_field_name` property + // to the same value in every detector that uses the keyword `mlcategory`. + // Otherwise, job creation fails. + Enabled *bool `json:"enabled,omitempty"` + // StopOnWarn This setting can be set to true only if per-partition categorization is + // enabled. If true, both categorization and subsequent anomaly detection stops + // for partitions where the categorization status changes to warn. This setting + // makes it viable to have a job where it is expected that categorization works + // well for some partitions but not others; you do not pay the cost of bad + // categorization forever in the partitions where it works badly. + StopOnWarn *bool `json:"stop_on_warn,omitempty"` +} + +func (s *PerPartitionCategorization) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "stop_on_warn": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "StopOnWarn", err) + } + s.StopOnWarn = &value + case bool: + s.StopOnWarn = &v + } + + } + } + return nil +} + +// NewPerPartitionCategorization returns a PerPartitionCategorization. +func NewPerPartitionCategorization() *PerPartitionCategorization { + r := &PerPartitionCategorization{} + + return r +} + +type PerPartitionCategorizationVariant interface { + PerPartitionCategorizationCaster() *PerPartitionCategorization +} + +func (s *PerPartitionCategorization) PerPartitionCategorizationCaster() *PerPartitionCategorization { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/perrepositorystats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/perrepositorystats.go new file mode 100644 index 000000000..e33801c03 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/perrepositorystats.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PerRepositoryStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L665-L670 +type PerRepositoryStats struct { + CurrentCounts RepositoryStatsCurrentCounts `json:"current_counts"` + OldestStartTime *string `json:"oldest_start_time,omitempty"` + OldestStartTimeMillis int64 `json:"oldest_start_time_millis"` + Type string `json:"type"` +} + +func (s *PerRepositoryStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current_counts": + if err := dec.Decode(&s.CurrentCounts); err != nil { + return fmt.Errorf("%s | %w", "CurrentCounts", err) + } + + case "oldest_start_time": + if err := dec.Decode(&s.OldestStartTime); err != nil { + return fmt.Errorf("%s | %w", "OldestStartTime", err) + } + + case "oldest_start_time_millis": + if err := dec.Decode(&s.OldestStartTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "OldestStartTimeMillis", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewPerRepositoryStats returns a PerRepositoryStats. +func NewPerRepositoryStats() *PerRepositoryStats { + r := &PerRepositoryStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/persiananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/persiananalyzer.go new file mode 100644 index 000000000..f5a354dd2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/persiananalyzer.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PersianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L255-L259 +type PersianAnalyzer struct { + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *PersianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PersianAnalyzer) MarshalJSON() ([]byte, error) { + type innerPersianAnalyzer PersianAnalyzer + tmp := innerPersianAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "persian" + + return json.Marshal(tmp) +} + +// NewPersianAnalyzer returns a PersianAnalyzer. +func NewPersianAnalyzer() *PersianAnalyzer { + r := &PersianAnalyzer{} + + return r +} + +type PersianAnalyzerVariant interface { + PersianAnalyzerCaster() *PersianAnalyzer +} + +func (s *PersianAnalyzer) PersianAnalyzerCaster() *PersianAnalyzer { + return s +} + +func (s *PersianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/persiannormalizationtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/persiannormalizationtokenfilter.go new file mode 100644 index 000000000..3eec82143 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/persiannormalizationtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// PersianNormalizationTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L527-L529 +type PersianNormalizationTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *PersianNormalizationTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PersianNormalizationTokenFilter) MarshalJSON() ([]byte, error) { + type innerPersianNormalizationTokenFilter PersianNormalizationTokenFilter + tmp := innerPersianNormalizationTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "persian_normalization" + + return json.Marshal(tmp) +} + +// NewPersianNormalizationTokenFilter returns a PersianNormalizationTokenFilter. +func NewPersianNormalizationTokenFilter() *PersianNormalizationTokenFilter { + r := &PersianNormalizationTokenFilter{} + + return r +} + +type PersianNormalizationTokenFilterVariant interface { + PersianNormalizationTokenFilterCaster() *PersianNormalizationTokenFilter +} + +func (s *PersianNormalizationTokenFilter) PersianNormalizationTokenFilterCaster() *PersianNormalizationTokenFilter { + return s +} + +func (s *PersianNormalizationTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/persianstemtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/persianstemtokenfilter.go new file mode 100644 index 000000000..06b0e2955 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/persianstemtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// PersianStemTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L571-L573 +type PersianStemTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *PersianStemTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PersianStemTokenFilter) MarshalJSON() ([]byte, error) { + type innerPersianStemTokenFilter PersianStemTokenFilter + tmp := innerPersianStemTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "persian_stem" + + return json.Marshal(tmp) +} + +// NewPersianStemTokenFilter returns a PersianStemTokenFilter. +func NewPersianStemTokenFilter() *PersianStemTokenFilter { + r := &PersianStemTokenFilter{} + + return r +} + +type PersianStemTokenFilterVariant interface { + PersianStemTokenFilterCaster() *PersianStemTokenFilter +} + +func (s *PersianStemTokenFilter) PersianStemTokenFilterCaster() *PersianStemTokenFilter { + return s +} + +func (s *PersianStemTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/persistenttaskstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/persistenttaskstatus.go new file mode 100644 index 000000000..7471214bb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/persistenttaskstatus.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shutdownstatus" +) + +// PersistentTaskStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L56-L58 +type PersistentTaskStatus struct { + Status shutdownstatus.ShutdownStatus `json:"status"` +} + +// NewPersistentTaskStatus returns a PersistentTaskStatus. +func NewPersistentTaskStatus() *PersistentTaskStatus { + r := &PersistentTaskStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phase.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phase.go new file mode 100644 index 000000000..da218e2a7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phase.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Phase type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/_types/Phase.ts#L26-L29 +type Phase struct { + Actions *IlmActions `json:"actions,omitempty"` + MinAge Duration `json:"min_age,omitempty"` +} + +func (s *Phase) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return fmt.Errorf("%s | %w", "Actions", err) + } + + case "min_age": + if err := dec.Decode(&s.MinAge); err != nil { + return fmt.Errorf("%s | %w", "MinAge", err) + } + + } + } + return nil +} + +// NewPhase returns a Phase. +func NewPhase() *Phase { + r := &Phase{} + + return r +} + +type PhaseVariant interface { + PhaseCaster() *Phase +} + +func (s *Phase) PhaseCaster() *Phase { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phases.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phases.go new file mode 100644 index 000000000..9528e6c18 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phases.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Phases type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/_types/Phase.ts#L31-L37 +type Phases struct { + Cold *Phase `json:"cold,omitempty"` + Delete *Phase `json:"delete,omitempty"` + Frozen *Phase `json:"frozen,omitempty"` + Hot *Phase `json:"hot,omitempty"` + Warm *Phase `json:"warm,omitempty"` +} + +// NewPhases returns a Phases. +func NewPhases() *Phases { + r := &Phases{} + + return r +} + +type PhasesVariant interface { + PhasesCaster() *Phases +} + +func (s *Phases) PhasesCaster() *Phases { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phonetictokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phonetictokenfilter.go new file mode 100644 index 000000000..89c52039e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phonetictokenfilter.go @@ -0,0 +1,179 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticencoder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticlanguage" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticnametype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticruletype" +) + +// PhoneticTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/phonetic-plugin.ts#L64-L72 +type PhoneticTokenFilter struct { + Encoder phoneticencoder.PhoneticEncoder `json:"encoder"` + Languageset []phoneticlanguage.PhoneticLanguage `json:"languageset,omitempty"` + MaxCodeLen *int `json:"max_code_len,omitempty"` + NameType *phoneticnametype.PhoneticNameType `json:"name_type,omitempty"` + Replace *bool `json:"replace,omitempty"` + RuleType *phoneticruletype.PhoneticRuleType `json:"rule_type,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *PhoneticTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "encoder": + if err := dec.Decode(&s.Encoder); err != nil { + return fmt.Errorf("%s | %w", "Encoder", err) + } + + case "languageset": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := &phoneticlanguage.PhoneticLanguage{} + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Languageset", err) + } + + s.Languageset = append(s.Languageset, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Languageset); err != nil { + return fmt.Errorf("%s | %w", "Languageset", err) + } + } + + case "max_code_len": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxCodeLen", err) + } + s.MaxCodeLen = &value + case float64: + f := int(v) + s.MaxCodeLen = &f + } + + case "name_type": + if err := dec.Decode(&s.NameType); err != nil { + return fmt.Errorf("%s | %w", "NameType", err) + } + + case "replace": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Replace", err) + } + s.Replace = &value + case bool: + s.Replace = &v + } + + case "rule_type": + if err := dec.Decode(&s.RuleType); err != nil { + return fmt.Errorf("%s | %w", "RuleType", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PhoneticTokenFilter) MarshalJSON() ([]byte, error) { + type innerPhoneticTokenFilter PhoneticTokenFilter + tmp := innerPhoneticTokenFilter{ + Encoder: s.Encoder, + Languageset: s.Languageset, + MaxCodeLen: s.MaxCodeLen, + NameType: s.NameType, + Replace: s.Replace, + RuleType: s.RuleType, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "phonetic" + + return json.Marshal(tmp) +} + +// NewPhoneticTokenFilter returns a PhoneticTokenFilter. +func NewPhoneticTokenFilter() *PhoneticTokenFilter { + r := &PhoneticTokenFilter{} + + return r +} + +type PhoneticTokenFilterVariant interface { + PhoneticTokenFilterCaster() *PhoneticTokenFilter +} + +func (s *PhoneticTokenFilter) PhoneticTokenFilterCaster() *PhoneticTokenFilter { + return s +} + +func (s *PhoneticTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggest.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggest.go new file mode 100644 index 000000000..7e36a892a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggest.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PhraseSuggest type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L58-L63 +type PhraseSuggest struct { + Length int `json:"length"` + Offset int `json:"offset"` + Options []PhraseSuggestOption `json:"options"` + Text string `json:"text"` +} + +func (s *PhraseSuggest) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Length", err) + } + s.Length = value + case float64: + f := int(v) + s.Length = f + } + + case "offset": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Offset", err) + } + s.Offset = value + case float64: + f := int(v) + s.Offset = f + } + + case "options": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewPhraseSuggestOption() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Options", err) + } + + s.Options = append(s.Options, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Options); err != nil { + return fmt.Errorf("%s | %w", "Options", err) + } + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + } + } + return nil +} + +// NewPhraseSuggest returns a PhraseSuggest. +func NewPhraseSuggest() *PhraseSuggest { + r := &PhraseSuggest{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggestcollate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggestcollate.go new file mode 100644 index 000000000..ad683f26c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggestcollate.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PhraseSuggestCollate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L334-L347 +type PhraseSuggestCollate struct { + // Params Parameters to use if the query is templated. + Params map[string]json.RawMessage `json:"params,omitempty"` + // Prune Returns all suggestions with an extra `collate_match` option indicating + // whether the generated phrase matched any document. + Prune *bool `json:"prune,omitempty"` + // Query A collate query that is run once for every suggestion. + Query PhraseSuggestCollateQuery `json:"query"` +} + +func (s *PhraseSuggestCollate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "prune": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Prune", err) + } + s.Prune = &value + case bool: + s.Prune = &v + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + } + } + return nil +} + +// NewPhraseSuggestCollate returns a PhraseSuggestCollate. +func NewPhraseSuggestCollate() *PhraseSuggestCollate { + r := &PhraseSuggestCollate{ + Params: make(map[string]json.RawMessage), + } + + return r +} + +type PhraseSuggestCollateVariant interface { + PhraseSuggestCollateCaster() *PhraseSuggestCollate +} + +func (s *PhraseSuggestCollate) PhraseSuggestCollateCaster() *PhraseSuggestCollate { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggestcollatequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggestcollatequery.go new file mode 100644 index 000000000..ec782e050 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggestcollatequery.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// PhraseSuggestCollateQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L349-L358 +type PhraseSuggestCollateQuery struct { + // Id The search template ID. + Id *string `json:"id,omitempty"` + // Source The query source. + Source ScriptSource `json:"source,omitempty"` +} + +func (s *PhraseSuggestCollateQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source", err) + } + + switch t { + + case "aggregations", "collapse", "docvalue_fields", "explain", "ext", "fields", "from", "highlight", "indices_boost", "knn", "min_score", "pit", "post_filter", "profile", "query", "rank", "rescore", "retriever", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "_source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version": + o := NewSearchRequestBody() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + s.Source = o + break source_field + + } + } + if s.Source == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + } + + } + } + return nil +} + +// NewPhraseSuggestCollateQuery returns a PhraseSuggestCollateQuery. +func NewPhraseSuggestCollateQuery() *PhraseSuggestCollateQuery { + r := &PhraseSuggestCollateQuery{} + + return r +} + +type PhraseSuggestCollateQueryVariant interface { + PhraseSuggestCollateQueryCaster() *PhraseSuggestCollateQuery +} + +func (s *PhraseSuggestCollateQuery) PhraseSuggestCollateQueryCaster() *PhraseSuggestCollateQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggester.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggester.go new file mode 100644 index 000000000..16e647054 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggester.go @@ -0,0 +1,309 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PhraseSuggester type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L360-L418 +type PhraseSuggester struct { + // Analyzer The analyzer to analyze the suggest text with. + // Defaults to the search analyzer of the suggest field. + Analyzer *string `json:"analyzer,omitempty"` + // Collate Checks each suggestion against the specified query to prune suggestions for + // which no matching docs exist in the index. + Collate *PhraseSuggestCollate `json:"collate,omitempty"` + // Confidence Defines a factor applied to the input phrases score, which is used as a + // threshold for other suggest candidates. + // Only candidates that score higher than the threshold will be included in the + // result. + Confidence *Float64 `json:"confidence,omitempty"` + // DirectGenerator A list of candidate generators that produce a list of possible terms per term + // in the given text. + DirectGenerator []DirectGenerator `json:"direct_generator,omitempty"` + // Field The field to fetch the candidate suggestions from. + // Needs to be set globally or per suggestion. + Field string `json:"field"` + ForceUnigrams *bool `json:"force_unigrams,omitempty"` + // GramSize Sets max size of the n-grams (shingles) in the field. + // If the field doesn’t contain n-grams (shingles), this should be omitted or + // set to `1`. + // If the field uses a shingle filter, the `gram_size` is set to the + // `max_shingle_size` if not explicitly set. + GramSize *int `json:"gram_size,omitempty"` + // Highlight Sets up suggestion highlighting. + // If not provided, no highlighted field is returned. + Highlight *PhraseSuggestHighlight `json:"highlight,omitempty"` + // MaxErrors The maximum percentage of the terms considered to be misspellings in order to + // form a correction. + // This method accepts a float value in the range `[0..1)` as a fraction of the + // actual query terms or a number `>=1` as an absolute number of query terms. + MaxErrors *Float64 `json:"max_errors,omitempty"` + // RealWordErrorLikelihood The likelihood of a term being misspelled even if the term exists in the + // dictionary. + RealWordErrorLikelihood *Float64 `json:"real_word_error_likelihood,omitempty"` + // Separator The separator that is used to separate terms in the bigram field. + // If not set, the whitespace character is used as a separator. + Separator *string `json:"separator,omitempty"` + // ShardSize Sets the maximum number of suggested terms to be retrieved from each + // individual shard. + ShardSize *int `json:"shard_size,omitempty"` + // Size The maximum corrections to be returned per suggest text token. + Size *int `json:"size,omitempty"` + // Smoothing The smoothing model used to balance weight between infrequent grams (grams + // (shingles) are not existing in the index) and frequent grams (appear at least + // once in the index). + // The default model is Stupid Backoff. + Smoothing *SmoothingModelContainer `json:"smoothing,omitempty"` + // Text The text/query to provide suggestions for. + Text *string `json:"text,omitempty"` + TokenLimit *int `json:"token_limit,omitempty"` +} + +func (s *PhraseSuggester) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "collate": + if err := dec.Decode(&s.Collate); err != nil { + return fmt.Errorf("%s | %w", "Collate", err) + } + + case "confidence": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Confidence", err) + } + f := Float64(value) + s.Confidence = &f + case float64: + f := Float64(v) + s.Confidence = &f + } + + case "direct_generator": + if err := dec.Decode(&s.DirectGenerator); err != nil { + return fmt.Errorf("%s | %w", "DirectGenerator", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "force_unigrams": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ForceUnigrams", err) + } + s.ForceUnigrams = &value + case bool: + s.ForceUnigrams = &v + } + + case "gram_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "GramSize", err) + } + s.GramSize = &value + case float64: + f := int(v) + s.GramSize = &f + } + + case "highlight": + if err := dec.Decode(&s.Highlight); err != nil { + return fmt.Errorf("%s | %w", "Highlight", err) + } + + case "max_errors": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxErrors", err) + } + f := Float64(value) + s.MaxErrors = &f + case float64: + f := Float64(v) + s.MaxErrors = &f + } + + case "real_word_error_likelihood": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RealWordErrorLikelihood", err) + } + f := Float64(value) + s.RealWordErrorLikelihood = &f + case float64: + f := Float64(v) + s.RealWordErrorLikelihood = &f + } + + case "separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Separator", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Separator = &o + + case "shard_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSize", err) + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "smoothing": + if err := dec.Decode(&s.Smoothing); err != nil { + return fmt.Errorf("%s | %w", "Smoothing", err) + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = &o + + case "token_limit": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TokenLimit", err) + } + s.TokenLimit = &value + case float64: + f := int(v) + s.TokenLimit = &f + } + + } + } + return nil +} + +// NewPhraseSuggester returns a PhraseSuggester. +func NewPhraseSuggester() *PhraseSuggester { + r := &PhraseSuggester{} + + return r +} + +type PhraseSuggesterVariant interface { + PhraseSuggesterCaster() *PhraseSuggester +} + +func (s *PhraseSuggester) PhraseSuggesterCaster() *PhraseSuggester { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggesthighlight.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggesthighlight.go new file mode 100644 index 000000000..9cdd143fc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggesthighlight.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PhraseSuggestHighlight type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L420-L429 +type PhraseSuggestHighlight struct { + // PostTag Use in conjunction with `pre_tag` to define the HTML tags to use for the + // highlighted text. + PostTag string `json:"post_tag"` + // PreTag Use in conjunction with `post_tag` to define the HTML tags to use for the + // highlighted text. + PreTag string `json:"pre_tag"` +} + +func (s *PhraseSuggestHighlight) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "post_tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PostTag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PostTag = o + + case "pre_tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PreTag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PreTag = o + + } + } + return nil +} + +// NewPhraseSuggestHighlight returns a PhraseSuggestHighlight. +func NewPhraseSuggestHighlight() *PhraseSuggestHighlight { + r := &PhraseSuggestHighlight{} + + return r +} + +type PhraseSuggestHighlightVariant interface { + PhraseSuggestHighlightCaster() *PhraseSuggestHighlight +} + +func (s *PhraseSuggestHighlight) PhraseSuggestHighlightCaster() *PhraseSuggestHighlight { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggestoption.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggestoption.go new file mode 100644 index 000000000..1b9a16270 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/phrasesuggestoption.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PhraseSuggestOption type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L87-L92 +type PhraseSuggestOption struct { + CollateMatch *bool `json:"collate_match,omitempty"` + Highlighted *string `json:"highlighted,omitempty"` + Score Float64 `json:"score"` + Text string `json:"text"` +} + +func (s *PhraseSuggestOption) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collate_match": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CollateMatch", err) + } + s.CollateMatch = &value + case bool: + s.CollateMatch = &v + } + + case "highlighted": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Highlighted", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Highlighted = &o + + case "score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Score", err) + } + f := Float64(value) + s.Score = f + case float64: + f := Float64(v) + s.Score = f + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + } + } + return nil +} + +// NewPhraseSuggestOption returns a PhraseSuggestOption. +func NewPhraseSuggestOption() *PhraseSuggestOption { + r := &PhraseSuggestOption{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pinneddoc.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pinneddoc.go new file mode 100644 index 000000000..74237c124 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pinneddoc.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// PinnedDoc type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L269-L278 +type PinnedDoc struct { + // Id_ The unique document ID. + Id_ string `json:"_id"` + // Index_ The index that contains the document. + Index_ *string `json:"_index,omitempty"` +} + +func (s *PinnedDoc) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + } + } + return nil +} + +// NewPinnedDoc returns a PinnedDoc. +func NewPinnedDoc() *PinnedDoc { + r := &PinnedDoc{} + + return r +} + +type PinnedDocVariant interface { + PinnedDocCaster() *PinnedDoc +} + +func (s *PinnedDoc) PinnedDocCaster() *PinnedDoc { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pinnedquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pinnedquery.go new file mode 100644 index 000000000..b75da71d4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pinnedquery.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PinnedQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L247-L267 +type PinnedQuery struct { + AdditionalPinnedQueryProperty map[string]json.RawMessage `json:"-"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Docs Documents listed in the order they are to appear in results. + // Required if `ids` is not specified. + Docs []PinnedDoc `json:"docs,omitempty"` + // Ids Document IDs listed in the order they are to appear in results. + // Required if `docs` is not specified. + Ids []string `json:"ids,omitempty"` + // Organic Any choice of query used to rank documents which will be ranked below the + // "pinned" documents. + Organic Query `json:"organic"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *PinnedQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "docs": + if err := dec.Decode(&s.Docs); err != nil { + return fmt.Errorf("%s | %w", "Docs", err) + } + + case "ids": + if err := dec.Decode(&s.Ids); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + + case "organic": + if err := dec.Decode(&s.Organic); err != nil { + return fmt.Errorf("%s | %w", "Organic", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + default: + + if key, ok := t.(string); ok { + if s.AdditionalPinnedQueryProperty == nil { + s.AdditionalPinnedQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalPinnedQueryProperty", err) + } + s.AdditionalPinnedQueryProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s PinnedQuery) MarshalJSON() ([]byte, error) { + type opt PinnedQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalPinnedQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalPinnedQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewPinnedQuery returns a PinnedQuery. +func NewPinnedQuery() *PinnedQuery { + r := &PinnedQuery{ + AdditionalPinnedQueryProperty: make(map[string]json.RawMessage), + } + + return r +} + +type PinnedQueryVariant interface { + PinnedQueryCaster() *PinnedQuery +} + +func (s *PinnedQuery) PinnedQueryCaster() *PinnedQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pinnedretriever.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pinnedretriever.go new file mode 100644 index 000000000..bc0823031 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pinnedretriever.go @@ -0,0 +1,158 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PinnedRetriever type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Retriever.ts#L77-L83 +type PinnedRetriever struct { + Docs []SpecifiedDocument `json:"docs,omitempty"` + // Filter Query to filter the documents that can match. + Filter []Query `json:"filter,omitempty"` + Ids []string `json:"ids,omitempty"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are not + // included in the top documents. + MinScore *float32 `json:"min_score,omitempty"` + // Name_ Retriever name. + Name_ *string `json:"_name,omitempty"` + RankWindowSize *int `json:"rank_window_size,omitempty"` + // Retriever Inner retriever. + Retriever RetrieverContainer `json:"retriever"` +} + +func (s *PinnedRetriever) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "docs": + if err := dec.Decode(&s.Docs); err != nil { + return fmt.Errorf("%s | %w", "Docs", err) + } + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "ids": + if err := dec.Decode(&s.Ids); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := float32(value) + s.MinScore = &f + case float64: + f := float32(v) + s.MinScore = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name_ = &o + + case "rank_window_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RankWindowSize", err) + } + s.RankWindowSize = &value + case float64: + f := int(v) + s.RankWindowSize = &f + } + + case "retriever": + if err := dec.Decode(&s.Retriever); err != nil { + return fmt.Errorf("%s | %w", "Retriever", err) + } + + } + } + return nil +} + +// NewPinnedRetriever returns a PinnedRetriever. +func NewPinnedRetriever() *PinnedRetriever { + r := &PinnedRetriever{} + + return r +} + +type PinnedRetrieverVariant interface { + PinnedRetrieverCaster() *PinnedRetriever +} + +func (s *PinnedRetriever) PinnedRetrieverCaster() *PinnedRetriever { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelineconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelineconfig.go new file mode 100644 index 000000000..7e8181a26 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelineconfig.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PipelineConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Pipeline.ts#L67-L81 +type PipelineConfig struct { + // Description Description of the ingest pipeline. + Description *string `json:"description,omitempty"` + // Processors Processors used to perform transformations on documents before indexing. + // Processors run sequentially in the order specified. + Processors []ProcessorContainer `json:"processors"` + // Version Version number used by external systems to track ingest pipelines. + Version *int64 `json:"version,omitempty"` +} + +func (s *PipelineConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "processors": + if err := dec.Decode(&s.Processors); err != nil { + return fmt.Errorf("%s | %w", "Processors", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewPipelineConfig returns a PipelineConfig. +func NewPipelineConfig() *PipelineConfig { + r := &PipelineConfig{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelinemetadata.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelinemetadata.go new file mode 100644 index 000000000..ec78cd513 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelinemetadata.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PipelineMetadata type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/logstash/_types/Pipeline.ts#L23-L26 +type PipelineMetadata struct { + Type string `json:"type"` + Version string `json:"version"` +} + +func (s *PipelineMetadata) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + case "version": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Version = o + + } + } + return nil +} + +// NewPipelineMetadata returns a PipelineMetadata. +func NewPipelineMetadata() *PipelineMetadata { + r := &PipelineMetadata{} + + return r +} + +type PipelineMetadataVariant interface { + PipelineMetadataCaster() *PipelineMetadata +} + +func (s *PipelineMetadata) PipelineMetadataCaster() *PipelineMetadata { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelineprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelineprocessor.go new file mode 100644 index 000000000..02306e383 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelineprocessor.go @@ -0,0 +1,155 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PipelineProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1285-L1296 +type PipelineProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissingPipeline Whether to ignore missing pipelines instead of failing. + IgnoreMissingPipeline *bool `json:"ignore_missing_pipeline,omitempty"` + // Name The name of the pipeline to execute. + // Supports template snippets. + Name string `json:"name"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *PipelineProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing_pipeline": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissingPipeline", err) + } + s.IgnoreMissingPipeline = &value + case bool: + s.IgnoreMissingPipeline = &v + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil +} + +// NewPipelineProcessor returns a PipelineProcessor. +func NewPipelineProcessor() *PipelineProcessor { + r := &PipelineProcessor{} + + return r +} + +type PipelineProcessorVariant interface { + PipelineProcessorCaster() *PipelineProcessor +} + +func (s *PipelineProcessor) PipelineProcessorCaster() *PipelineProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelineprocessorresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelineprocessorresult.go new file mode 100644 index 000000000..d5105d771 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelineprocessorresult.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pipelinesimulationstatusoptions" +) + +// PipelineProcessorResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Simulation.ts#L59-L67 +type PipelineProcessorResult struct { + Description *string `json:"description,omitempty"` + Doc *DocumentSimulation `json:"doc,omitempty"` + Error *ErrorCause `json:"error,omitempty"` + IgnoredError *ErrorCause `json:"ignored_error,omitempty"` + ProcessorType *string `json:"processor_type,omitempty"` + Status *pipelinesimulationstatusoptions.PipelineSimulationStatusOptions `json:"status,omitempty"` + Tag *string `json:"tag,omitempty"` +} + +func (s *PipelineProcessorResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "doc": + if err := dec.Decode(&s.Doc); err != nil { + return fmt.Errorf("%s | %w", "Doc", err) + } + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + + case "ignored_error": + if err := dec.Decode(&s.IgnoredError); err != nil { + return fmt.Errorf("%s | %w", "IgnoredError", err) + } + + case "processor_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ProcessorType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProcessorType = &o + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil +} + +// NewPipelineProcessorResult returns a PipelineProcessorResult. +func NewPipelineProcessorResult() *PipelineProcessorResult { + r := &PipelineProcessorResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelinesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelinesettings.go new file mode 100644 index 000000000..a98c251f1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipelinesettings.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PipelineSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/logstash/_types/Pipeline.ts#L28-L55 +type PipelineSettings struct { + // PipelineBatchDelay When creating pipeline event batches, how long in milliseconds to wait for + // each event before dispatching an undersized batch to pipeline workers. + PipelineBatchDelay int `json:"pipeline.batch.delay"` + // PipelineBatchSize The maximum number of events an individual worker thread will collect from + // inputs before attempting to execute its filters and outputs. + PipelineBatchSize int `json:"pipeline.batch.size"` + // PipelineWorkers The number of workers that will, in parallel, execute the filter and output + // stages of the pipeline. + PipelineWorkers int `json:"pipeline.workers"` + // QueueCheckpointWrites The maximum number of written events before forcing a checkpoint when + // persistent queues are enabled (`queue.type: persisted`). + QueueCheckpointWrites int `json:"queue.checkpoint.writes"` + // QueueMaxBytes The total capacity of the queue (`queue.type: persisted`) in number of bytes. + QueueMaxBytes string `json:"queue.max_bytes"` + // QueueType The internal queuing model to use for event buffering. + QueueType string `json:"queue.type"` +} + +func (s *PipelineSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "pipeline.batch.delay": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PipelineBatchDelay", err) + } + s.PipelineBatchDelay = value + case float64: + f := int(v) + s.PipelineBatchDelay = f + } + + case "pipeline.batch.size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PipelineBatchSize", err) + } + s.PipelineBatchSize = value + case float64: + f := int(v) + s.PipelineBatchSize = f + } + + case "pipeline.workers": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PipelineWorkers", err) + } + s.PipelineWorkers = value + case float64: + f := int(v) + s.PipelineWorkers = f + } + + case "queue.checkpoint.writes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "QueueCheckpointWrites", err) + } + s.QueueCheckpointWrites = value + case float64: + f := int(v) + s.QueueCheckpointWrites = f + } + + case "queue.max_bytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueueMaxBytes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueueMaxBytes = o + + case "queue.type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueueType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueueType = o + + } + } + return nil +} + +// NewPipelineSettings returns a PipelineSettings. +func NewPipelineSettings() *PipelineSettings { + r := &PipelineSettings{} + + return r +} + +type PipelineSettingsVariant interface { + PipelineSettingsCaster() *PipelineSettings +} + +func (s *PipelineSettings) PipelineSettingsCaster() *PipelineSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipeseparatedflagssimplequerystringflag.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipeseparatedflagssimplequerystringflag.go new file mode 100644 index 000000000..542d271cd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pipeseparatedflagssimplequerystringflag.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// PipeSeparatedFlagsSimpleQueryStringFlag holds the union for the following types: +// +// simplequerystringflag.SimpleQueryStringFlag +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_spec_utils/PipeSeparatedFlags.ts#L20-L27 +type PipeSeparatedFlagsSimpleQueryStringFlag any + +type PipeSeparatedFlagsSimpleQueryStringFlagVariant interface { + PipeSeparatedFlagsSimpleQueryStringFlagCaster() *PipeSeparatedFlagsSimpleQueryStringFlag +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pivot.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pivot.go new file mode 100644 index 000000000..349480bb9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pivot.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Pivot type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/_types/Transform.ts#L54-L68 +type Pivot struct { + // Aggregations Defines how to aggregate the grouped data. The following aggregations are + // currently supported: average, bucket + // script, bucket selector, cardinality, filter, geo bounds, geo centroid, geo + // line, max, median absolute deviation, + // min, missing, percentiles, rare terms, scripted metric, stats, sum, terms, + // top metrics, value count, weighted + // average. + Aggregations map[string]Aggregations `json:"aggregations,omitempty"` + // GroupBy Defines how to group the data. More than one grouping can be defined per + // pivot. The following groupings are + // currently supported: date histogram, geotile grid, histogram, terms. + GroupBy map[string]PivotGroupByContainer `json:"group_by,omitempty"` +} + +// NewPivot returns a Pivot. +func NewPivot() *Pivot { + r := &Pivot{ + Aggregations: make(map[string]Aggregations), + GroupBy: make(map[string]PivotGroupByContainer), + } + + return r +} + +type PivotVariant interface { + PivotCaster() *Pivot +} + +func (s *Pivot) PivotCaster() *Pivot { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pivotgroupbycontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pivotgroupbycontainer.go new file mode 100644 index 000000000..a4805943d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pivotgroupbycontainer.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// PivotGroupByContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/_types/Transform.ts#L70-L78 +type PivotGroupByContainer struct { + AdditionalPivotGroupByContainerProperty map[string]json.RawMessage `json:"-"` + DateHistogram *DateHistogramAggregation `json:"date_histogram,omitempty"` + GeotileGrid *GeoTileGridAggregation `json:"geotile_grid,omitempty"` + Histogram *HistogramAggregation `json:"histogram,omitempty"` + Terms *TermsAggregation `json:"terms,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s PivotGroupByContainer) MarshalJSON() ([]byte, error) { + type opt PivotGroupByContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalPivotGroupByContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalPivotGroupByContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewPivotGroupByContainer returns a PivotGroupByContainer. +func NewPivotGroupByContainer() *PivotGroupByContainer { + r := &PivotGroupByContainer{ + AdditionalPivotGroupByContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type PivotGroupByContainerVariant interface { + PivotGroupByContainerCaster() *PivotGroupByContainer +} + +func (s *PivotGroupByContainer) PivotGroupByContainerCaster() *PivotGroupByContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pluginsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pluginsrecord.go new file mode 100644 index 000000000..69f4b05e1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pluginsrecord.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PluginsRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/plugins/types.ts#L22-L52 +type PluginsRecord struct { + // Component The component name. + Component *string `json:"component,omitempty"` + // Description The plugin details. + Description *string `json:"description,omitempty"` + // Id The unique node identifier. + Id *string `json:"id,omitempty"` + // Name The node name. + Name *string `json:"name,omitempty"` + // Type The plugin type. + Type *string `json:"type,omitempty"` + // Version The component version. + Version *string `json:"version,omitempty"` +} + +func (s *PluginsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "component", "c": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Component", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Component = &o + + case "description", "d": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "name", "n": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "type", "t": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewPluginsRecord returns a PluginsRecord. +func NewPluginsRecord() *PluginsRecord { + r := &PluginsRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pluginsstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pluginsstatus.go new file mode 100644 index 000000000..6fb58e1ba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pluginsstatus.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shutdownstatus" +) + +// PluginsStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L60-L62 +type PluginsStatus struct { + Status shutdownstatus.ShutdownStatus `json:"status"` +} + +// NewPluginsStatus returns a PluginsStatus. +func NewPluginsStatus() *PluginsStatus { + r := &PluginsStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pluginstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pluginstats.go new file mode 100644 index 000000000..4947de5b5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pluginstats.go @@ -0,0 +1,149 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PluginStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L207-L217 +type PluginStats struct { + Classname string `json:"classname"` + Description string `json:"description"` + ElasticsearchVersion string `json:"elasticsearch_version"` + ExtendedPlugins []string `json:"extended_plugins"` + HasNativeController bool `json:"has_native_controller"` + JavaVersion string `json:"java_version"` + Licensed bool `json:"licensed"` + Name string `json:"name"` + Version string `json:"version"` +} + +func (s *PluginStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classname": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Classname", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Classname = o + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "elasticsearch_version": + if err := dec.Decode(&s.ElasticsearchVersion); err != nil { + return fmt.Errorf("%s | %w", "ElasticsearchVersion", err) + } + + case "extended_plugins": + if err := dec.Decode(&s.ExtendedPlugins); err != nil { + return fmt.Errorf("%s | %w", "ExtendedPlugins", err) + } + + case "has_native_controller": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "HasNativeController", err) + } + s.HasNativeController = value + case bool: + s.HasNativeController = v + } + + case "java_version": + if err := dec.Decode(&s.JavaVersion); err != nil { + return fmt.Errorf("%s | %w", "JavaVersion", err) + } + + case "licensed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Licensed", err) + } + s.Licensed = value + case bool: + s.Licensed = v + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewPluginStats returns a PluginStats. +func NewPluginStats() *PluginStats { + r := &PluginStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pointintimereference.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pointintimereference.go new file mode 100644 index 000000000..ff59c76db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pointintimereference.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// PointInTimeReference type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/PointInTimeReference.ts#L23-L26 +type PointInTimeReference struct { + Id string `json:"id"` + KeepAlive Duration `json:"keep_alive,omitempty"` +} + +func (s *PointInTimeReference) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "keep_alive": + if err := dec.Decode(&s.KeepAlive); err != nil { + return fmt.Errorf("%s | %w", "KeepAlive", err) + } + + } + } + return nil +} + +// NewPointInTimeReference returns a PointInTimeReference. +func NewPointInTimeReference() *PointInTimeReference { + r := &PointInTimeReference{} + + return r +} + +type PointInTimeReferenceVariant interface { + PointInTimeReferenceCaster() *PointInTimeReference +} + +func (s *PointInTimeReference) PointInTimeReferenceCaster() *PointInTimeReference { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pointproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pointproperty.go new file mode 100644 index 000000000..c16538edf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pointproperty.go @@ -0,0 +1,918 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// PointProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/geo.ts#L74-L79 +type PointProperty struct { + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + IgnoreZValue *bool `json:"ignore_z_value,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *string `json:"null_value,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *PointProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "ignore_z_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreZValue", err) + } + s.IgnoreZValue = &value + case bool: + s.IgnoreZValue = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NullValue = &o + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PointProperty) MarshalJSON() ([]byte, error) { + type innerPointProperty PointProperty + tmp := innerPointProperty{ + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "point" + + return json.Marshal(tmp) +} + +// NewPointProperty returns a PointProperty. +func NewPointProperty() *PointProperty { + r := &PointProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type PointPropertyVariant interface { + PointPropertyCaster() *PointProperty +} + +func (s *PointProperty) PointPropertyCaster() *PointProperty { + return s +} + +func (s *PointProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pool.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pool.go new file mode 100644 index 000000000..c165e3791 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pool.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Pool type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L956-L973 +type Pool struct { + // MaxInBytes Maximum amount of memory, in bytes, available for use by the heap. + MaxInBytes *int64 `json:"max_in_bytes,omitempty"` + // PeakMaxInBytes Largest amount of memory, in bytes, historically used by the heap. + PeakMaxInBytes *int64 `json:"peak_max_in_bytes,omitempty"` + // PeakUsedInBytes Largest amount of memory, in bytes, historically used by the heap. + PeakUsedInBytes *int64 `json:"peak_used_in_bytes,omitempty"` + // UsedInBytes Memory, in bytes, used by the heap. + UsedInBytes *int64 `json:"used_in_bytes,omitempty"` +} + +func (s *Pool) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxInBytes", err) + } + s.MaxInBytes = &value + case float64: + f := int64(v) + s.MaxInBytes = &f + } + + case "peak_max_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PeakMaxInBytes", err) + } + s.PeakMaxInBytes = &value + case float64: + f := int64(v) + s.PeakMaxInBytes = &f + } + + case "peak_used_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PeakUsedInBytes", err) + } + s.PeakUsedInBytes = &value + case float64: + f := int64(v) + s.PeakUsedInBytes = &f + } + + case "used_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "UsedInBytes", err) + } + s.UsedInBytes = &value + case float64: + f := int64(v) + s.UsedInBytes = &f + } + + } + } + return nil +} + +// NewPool returns a Pool. +func NewPool() *Pool { + r := &Pool{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/porterstemtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/porterstemtokenfilter.go new file mode 100644 index 000000000..6cc7033a5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/porterstemtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// PorterStemTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L393-L395 +type PorterStemTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *PorterStemTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PorterStemTokenFilter) MarshalJSON() ([]byte, error) { + type innerPorterStemTokenFilter PorterStemTokenFilter + tmp := innerPorterStemTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "porter_stem" + + return json.Marshal(tmp) +} + +// NewPorterStemTokenFilter returns a PorterStemTokenFilter. +func NewPorterStemTokenFilter() *PorterStemTokenFilter { + r := &PorterStemTokenFilter{} + + return r +} + +type PorterStemTokenFilterVariant interface { + PorterStemTokenFilterCaster() *PorterStemTokenFilter +} + +func (s *PorterStemTokenFilter) PorterStemTokenFilterCaster() *PorterStemTokenFilter { + return s +} + +func (s *PorterStemTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/portugueseanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/portugueseanalyzer.go new file mode 100644 index 000000000..59c5f2197 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/portugueseanalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PortugueseAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L261-L266 +type PortugueseAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *PortugueseAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PortugueseAnalyzer) MarshalJSON() ([]byte, error) { + type innerPortugueseAnalyzer PortugueseAnalyzer + tmp := innerPortugueseAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "portuguese" + + return json.Marshal(tmp) +} + +// NewPortugueseAnalyzer returns a PortugueseAnalyzer. +func NewPortugueseAnalyzer() *PortugueseAnalyzer { + r := &PortugueseAnalyzer{} + + return r +} + +type PortugueseAnalyzerVariant interface { + PortugueseAnalyzerCaster() *PortugueseAnalyzer +} + +func (s *PortugueseAnalyzer) PortugueseAnalyzerCaster() *PortugueseAnalyzer { + return s +} + +func (s *PortugueseAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/postmigrationfeature.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/postmigrationfeature.go new file mode 100644 index 000000000..59f9fa5f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/postmigrationfeature.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PostMigrationFeature type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L28-L30 +type PostMigrationFeature struct { + FeatureName string `json:"feature_name"` +} + +func (s *PostMigrationFeature) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FeatureName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + } + } + return nil +} + +// NewPostMigrationFeature returns a PostMigrationFeature. +func NewPostMigrationFeature() *PostMigrationFeature { + r := &PostMigrationFeature{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/predicatetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/predicatetokenfilter.go new file mode 100644 index 000000000..23681aeab --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/predicatetokenfilter.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// PredicateTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L397-L401 +type PredicateTokenFilter struct { + // Script Script containing a condition used to filter incoming tokens. Only tokens + // that match this script are included in the output. + Script Script `json:"script"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *PredicateTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PredicateTokenFilter) MarshalJSON() ([]byte, error) { + type innerPredicateTokenFilter PredicateTokenFilter + tmp := innerPredicateTokenFilter{ + Script: s.Script, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "predicate_token_filter" + + return json.Marshal(tmp) +} + +// NewPredicateTokenFilter returns a PredicateTokenFilter. +func NewPredicateTokenFilter() *PredicateTokenFilter { + r := &PredicateTokenFilter{} + + return r +} + +type PredicateTokenFilterVariant interface { + PredicateTokenFilterCaster() *PredicateTokenFilter +} + +func (s *PredicateTokenFilter) PredicateTokenFilterCaster() *PredicateTokenFilter { + return s +} + +func (s *PredicateTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/predictedvalue.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/predictedvalue.go new file mode 100644 index 000000000..c84f7b196 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/predictedvalue.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// PredictedValue type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L476-L476 +type PredictedValue []ScalarValue diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/prefixquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/prefixquery.go new file mode 100644 index 000000000..356bb23a3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/prefixquery.go @@ -0,0 +1,158 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PrefixQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L98-L120 +type PrefixQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // CaseInsensitive Allows ASCII case insensitive matching of the value with the indexed field + // values when set to `true`. + // Default is `false` which means the case sensitivity of matching depends on + // the underlying field’s mapping. + CaseInsensitive *bool `json:"case_insensitive,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Rewrite Method used to rewrite the query. + Rewrite *string `json:"rewrite,omitempty"` + // Value Beginning characters of terms you wish to find in the provided field. + Value string `json:"value"` +} + +func (s *PrefixQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Value) + if err != nil { + return err + } + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "case_insensitive": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CaseInsensitive", err) + } + s.CaseInsensitive = &value + case bool: + s.CaseInsensitive = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "rewrite": + if err := dec.Decode(&s.Rewrite); err != nil { + return fmt.Errorf("%s | %w", "Rewrite", err) + } + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil +} + +// NewPrefixQuery returns a PrefixQuery. +func NewPrefixQuery() *PrefixQuery { + r := &PrefixQuery{} + + return r +} + +type PrefixQueryVariant interface { + PrefixQueryCaster() *PrefixQuery +} + +func (s *PrefixQuery) PrefixQueryCaster() *PrefixQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/preprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/preprocessor.go new file mode 100644 index 000000000..ec810d2d2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/preprocessor.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// Preprocessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model/types.ts#L31-L36 +type Preprocessor struct { + AdditionalPreprocessorProperty map[string]json.RawMessage `json:"-"` + FrequencyEncoding *FrequencyEncodingPreprocessor `json:"frequency_encoding,omitempty"` + OneHotEncoding *OneHotEncodingPreprocessor `json:"one_hot_encoding,omitempty"` + TargetMeanEncoding *TargetMeanEncodingPreprocessor `json:"target_mean_encoding,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s Preprocessor) MarshalJSON() ([]byte, error) { + type opt Preprocessor + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalPreprocessorProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalPreprocessorProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewPreprocessor returns a Preprocessor. +func NewPreprocessor() *Preprocessor { + r := &Preprocessor{ + AdditionalPreprocessorProperty: make(map[string]json.RawMessage), + } + + return r +} + +type PreprocessorVariant interface { + PreprocessorCaster() *Preprocessor +} + +func (s *Preprocessor) PreprocessorCaster() *Preprocessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pressurememory.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pressurememory.go new file mode 100644 index 000000000..53e48e72c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/pressurememory.go @@ -0,0 +1,273 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PressureMemory type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L144-L201 +type PressureMemory struct { + // All Memory consumed by indexing requests in the coordinating, primary, or replica + // stage. + All ByteSize `json:"all,omitempty"` + // AllInBytes Memory consumed, in bytes, by indexing requests in the coordinating, primary, + // or replica stage. + AllInBytes *int64 `json:"all_in_bytes,omitempty"` + // CombinedCoordinatingAndPrimary Memory consumed by indexing requests in the coordinating or primary stage. + // This value is not the sum of coordinating and primary as a node can reuse the + // coordinating memory if the primary stage is executed locally. + CombinedCoordinatingAndPrimary ByteSize `json:"combined_coordinating_and_primary,omitempty"` + // CombinedCoordinatingAndPrimaryInBytes Memory consumed, in bytes, by indexing requests in the coordinating or + // primary stage. + // This value is not the sum of coordinating and primary as a node can reuse the + // coordinating memory if the primary stage is executed locally. + CombinedCoordinatingAndPrimaryInBytes *int64 `json:"combined_coordinating_and_primary_in_bytes,omitempty"` + // Coordinating Memory consumed by indexing requests in the coordinating stage. + Coordinating ByteSize `json:"coordinating,omitempty"` + // CoordinatingInBytes Memory consumed, in bytes, by indexing requests in the coordinating stage. + CoordinatingInBytes *int64 `json:"coordinating_in_bytes,omitempty"` + // CoordinatingRejections Number of indexing requests rejected in the coordinating stage. + CoordinatingRejections *int64 `json:"coordinating_rejections,omitempty"` + LargeOperationRejections *int64 `json:"large_operation_rejections,omitempty"` + // Primary Memory consumed by indexing requests in the primary stage. + Primary ByteSize `json:"primary,omitempty"` + PrimaryDocumentRejections *int64 `json:"primary_document_rejections,omitempty"` + // PrimaryInBytes Memory consumed, in bytes, by indexing requests in the primary stage. + PrimaryInBytes *int64 `json:"primary_in_bytes,omitempty"` + // PrimaryRejections Number of indexing requests rejected in the primary stage. + PrimaryRejections *int64 `json:"primary_rejections,omitempty"` + // Replica Memory consumed by indexing requests in the replica stage. + Replica ByteSize `json:"replica,omitempty"` + // ReplicaInBytes Memory consumed, in bytes, by indexing requests in the replica stage. + ReplicaInBytes *int64 `json:"replica_in_bytes,omitempty"` + // ReplicaRejections Number of indexing requests rejected in the replica stage. + ReplicaRejections *int64 `json:"replica_rejections,omitempty"` +} + +func (s *PressureMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "all": + if err := dec.Decode(&s.All); err != nil { + return fmt.Errorf("%s | %w", "All", err) + } + + case "all_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AllInBytes", err) + } + s.AllInBytes = &value + case float64: + f := int64(v) + s.AllInBytes = &f + } + + case "combined_coordinating_and_primary": + if err := dec.Decode(&s.CombinedCoordinatingAndPrimary); err != nil { + return fmt.Errorf("%s | %w", "CombinedCoordinatingAndPrimary", err) + } + + case "combined_coordinating_and_primary_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CombinedCoordinatingAndPrimaryInBytes", err) + } + s.CombinedCoordinatingAndPrimaryInBytes = &value + case float64: + f := int64(v) + s.CombinedCoordinatingAndPrimaryInBytes = &f + } + + case "coordinating": + if err := dec.Decode(&s.Coordinating); err != nil { + return fmt.Errorf("%s | %w", "Coordinating", err) + } + + case "coordinating_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CoordinatingInBytes", err) + } + s.CoordinatingInBytes = &value + case float64: + f := int64(v) + s.CoordinatingInBytes = &f + } + + case "coordinating_rejections": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CoordinatingRejections", err) + } + s.CoordinatingRejections = &value + case float64: + f := int64(v) + s.CoordinatingRejections = &f + } + + case "large_operation_rejections": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LargeOperationRejections", err) + } + s.LargeOperationRejections = &value + case float64: + f := int64(v) + s.LargeOperationRejections = &f + } + + case "primary": + if err := dec.Decode(&s.Primary); err != nil { + return fmt.Errorf("%s | %w", "Primary", err) + } + + case "primary_document_rejections": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryDocumentRejections", err) + } + s.PrimaryDocumentRejections = &value + case float64: + f := int64(v) + s.PrimaryDocumentRejections = &f + } + + case "primary_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryInBytes", err) + } + s.PrimaryInBytes = &value + case float64: + f := int64(v) + s.PrimaryInBytes = &f + } + + case "primary_rejections": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryRejections", err) + } + s.PrimaryRejections = &value + case float64: + f := int64(v) + s.PrimaryRejections = &f + } + + case "replica": + if err := dec.Decode(&s.Replica); err != nil { + return fmt.Errorf("%s | %w", "Replica", err) + } + + case "replica_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ReplicaInBytes", err) + } + s.ReplicaInBytes = &value + case float64: + f := int64(v) + s.ReplicaInBytes = &f + } + + case "replica_rejections": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ReplicaRejections", err) + } + s.ReplicaRejections = &value + case float64: + f := int64(v) + s.ReplicaRejections = &f + } + + } + } + return nil +} + +// NewPressureMemory returns a PressureMemory. +func NewPressureMemory() *PressureMemory { + r := &PressureMemory{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/privileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/privileges.go new file mode 100644 index 000000000..05f3d99d9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/privileges.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Privileges type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/has_privileges/types.ts#L49-L49 +type Privileges map[string]bool diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/privilegesactions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/privilegesactions.go new file mode 100644 index 000000000..1268f4410 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/privilegesactions.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PrivilegesActions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/put_privileges/types.ts#L22-L27 +type PrivilegesActions struct { + Actions []string `json:"actions"` + Application *string `json:"application,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` + Name *string `json:"name,omitempty"` +} + +func (s *PrivilegesActions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return fmt.Errorf("%s | %w", "Actions", err) + } + + case "application": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Application", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Application = &o + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewPrivilegesActions returns a PrivilegesActions. +func NewPrivilegesActions() *PrivilegesActions { + r := &PrivilegesActions{} + + return r +} + +type PrivilegesActionsVariant interface { + PrivilegesActionsCaster() *PrivilegesActions +} + +func (s *PrivilegesActions) PrivilegesActionsCaster() *PrivilegesActions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/privilegescheck.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/privilegescheck.go new file mode 100644 index 000000000..2847a14c6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/privilegescheck.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterprivilege" +) + +// PrivilegesCheck type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/has_privileges_user_profile/types.ts#L30-L37 +type PrivilegesCheck struct { + Application []ApplicationPrivilegesCheck `json:"application,omitempty"` + // Cluster A list of the cluster privileges that you want to check. + Cluster []clusterprivilege.ClusterPrivilege `json:"cluster,omitempty"` + Index []IndexPrivilegesCheck `json:"index,omitempty"` +} + +// NewPrivilegesCheck returns a PrivilegesCheck. +func NewPrivilegesCheck() *PrivilegesCheck { + r := &PrivilegesCheck{} + + return r +} + +type PrivilegesCheckVariant interface { + PrivilegesCheckCaster() *PrivilegesCheck +} + +func (s *PrivilegesCheck) PrivilegesCheckCaster() *PrivilegesCheck { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/process.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/process.go new file mode 100644 index 000000000..e659b4c7a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/process.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Process type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L1031-L1053 +type Process struct { + // Cpu Contains CPU statistics for the node. + Cpu *Cpu `json:"cpu,omitempty"` + // MaxFileDescriptors Maximum number of file descriptors allowed on the system, or `-1` if not + // supported. + MaxFileDescriptors *int `json:"max_file_descriptors,omitempty"` + // Mem Contains virtual memory statistics for the node. + Mem *MemoryStats `json:"mem,omitempty"` + // OpenFileDescriptors Number of opened file descriptors associated with the current or `-1` if not + // supported. + OpenFileDescriptors *int `json:"open_file_descriptors,omitempty"` + // Timestamp Last time the statistics were refreshed. + // Recorded in milliseconds since the Unix Epoch. + Timestamp *int64 `json:"timestamp,omitempty"` +} + +func (s *Process) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cpu": + if err := dec.Decode(&s.Cpu); err != nil { + return fmt.Errorf("%s | %w", "Cpu", err) + } + + case "max_file_descriptors": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxFileDescriptors", err) + } + s.MaxFileDescriptors = &value + case float64: + f := int(v) + s.MaxFileDescriptors = &f + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return fmt.Errorf("%s | %w", "Mem", err) + } + + case "open_file_descriptors": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "OpenFileDescriptors", err) + } + s.OpenFileDescriptors = &value + case float64: + f := int(v) + s.OpenFileDescriptors = &f + } + + case "timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + s.Timestamp = &value + case float64: + f := int64(v) + s.Timestamp = &f + } + + } + } + return nil +} + +// NewProcess returns a Process. +func NewProcess() *Process { + r := &Process{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/processor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/processor.go new file mode 100644 index 000000000..182b38575 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/processor.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Processor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L422-L439 +type Processor struct { + // Count Number of documents transformed by the processor. + Count *int64 `json:"count,omitempty"` + // Current Number of documents currently being transformed by the processor. + Current *int64 `json:"current,omitempty"` + // Failed Number of failed operations for the processor. + Failed *int64 `json:"failed,omitempty"` + // TimeInMillis Time, in milliseconds, spent by the processor transforming documents. + TimeInMillis *int64 `json:"time_in_millis,omitempty"` +} + +func (s *Processor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "current": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Current", err) + } + s.Current = &value + case float64: + f := int64(v) + s.Current = &f + } + + case "failed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Failed", err) + } + s.Failed = &value + case float64: + f := int64(v) + s.Failed = &f + } + + case "time_in_millis": + if err := dec.Decode(&s.TimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TimeInMillis", err) + } + + } + } + return nil +} + +// NewProcessor returns a Processor. +func NewProcessor() *Processor { + r := &Processor{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/processorcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/processorcontainer.go new file mode 100644 index 000000000..4855371b4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/processorcontainer.go @@ -0,0 +1,264 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// ProcessorContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L28-L302 +type ProcessorContainer struct { + AdditionalProcessorContainerProperty map[string]json.RawMessage `json:"-"` + // Append Appends one or more values to an existing array if the field already exists + // and it is an array. + // Converts a scalar to an array and appends one or more values to it if the + // field exists and it is a scalar. + // Creates an array containing the provided values if the field doesn’t exist. + // Accepts a single value or an array of values. + Append *AppendProcessor `json:"append,omitempty"` + // Attachment The attachment processor lets Elasticsearch extract file attachments in + // common formats (such as PPT, XLS, and PDF) by using the Apache text + // extraction library Tika. + Attachment *AttachmentProcessor `json:"attachment,omitempty"` + // Bytes Converts a human readable byte value (for example `1kb`) to its value in + // bytes (for example `1024`). + // If the field is an array of strings, all members of the array will be + // converted. + // Supported human readable units are "b", "kb", "mb", "gb", "tb", "pb" case + // insensitive. + // An error will occur if the field is not a supported format or resultant value + // exceeds 2^63. + Bytes *BytesProcessor `json:"bytes,omitempty"` + // Circle Converts circle definitions of shapes to regular polygons which approximate + // them. + Circle *CircleProcessor `json:"circle,omitempty"` + // CommunityId Computes the Community ID for network flow data as defined in the + // Community ID Specification. You can use a community ID to correlate network + // events related to a single flow. + CommunityId *CommunityIDProcessor `json:"community_id,omitempty"` + // Convert Converts a field in the currently ingested document to a different type, such + // as converting a string to an integer. + // If the field value is an array, all members will be converted. + Convert *ConvertProcessor `json:"convert,omitempty"` + // Csv Extracts fields from CSV line out of a single text field within a document. + // Any empty field in CSV will be skipped. + Csv *CsvProcessor `json:"csv,omitempty"` + // Date Parses dates from fields, and then uses the date or timestamp as the + // timestamp for the document. + Date *DateProcessor `json:"date,omitempty"` + // DateIndexName The purpose of this processor is to point documents to the right time based + // index based on a date or timestamp field in a document by using the date math + // index name support. + DateIndexName *DateIndexNameProcessor `json:"date_index_name,omitempty"` + // Dissect Extracts structured fields out of a single text field by matching the text + // field against a delimiter-based pattern. + Dissect *DissectProcessor `json:"dissect,omitempty"` + // DotExpander Expands a field with dots into an object field. + // This processor allows fields with dots in the name to be accessible by other + // processors in the pipeline. + // Otherwise these fields can’t be accessed by any processor. + DotExpander *DotExpanderProcessor `json:"dot_expander,omitempty"` + // Drop Drops the document without raising any errors. + // This is useful to prevent the document from getting indexed based on some + // condition. + Drop *DropProcessor `json:"drop,omitempty"` + // Enrich The `enrich` processor can enrich documents with data from another index. + Enrich *EnrichProcessor `json:"enrich,omitempty"` + // Fail Raises an exception. + // This is useful for when you expect a pipeline to fail and want to relay a + // specific message to the requester. + Fail *FailProcessor `json:"fail,omitempty"` + // Fingerprint Computes a hash of the document’s content. You can use this hash for + // content fingerprinting. + Fingerprint *FingerprintProcessor `json:"fingerprint,omitempty"` + // Foreach Runs an ingest processor on each element of an array or object. + Foreach *ForeachProcessor `json:"foreach,omitempty"` + // GeoGrid Converts geo-grid definitions of grid tiles or cells to regular bounding + // boxes or polygons which describe their shape. + // This is useful if there is a need to interact with the tile shapes as + // spatially indexable fields. + GeoGrid *GeoGridProcessor `json:"geo_grid,omitempty"` + // Geoip The `geoip` processor adds information about the geographical location of an + // IPv4 or IPv6 address. + Geoip *GeoIpProcessor `json:"geoip,omitempty"` + // Grok Extracts structured fields out of a single text field within a document. + // You choose which field to extract matched fields from, as well as the grok + // pattern you expect will match. + // A grok pattern is like a regular expression that supports aliased expressions + // that can be reused. + Grok *GrokProcessor `json:"grok,omitempty"` + // Gsub Converts a string field by applying a regular expression and a replacement. + // If the field is an array of string, all members of the array will be + // converted. + // If any non-string values are encountered, the processor will throw an + // exception. + Gsub *GsubProcessor `json:"gsub,omitempty"` + // HtmlStrip Removes HTML tags from the field. + // If the field is an array of strings, HTML tags will be removed from all + // members of the array. + HtmlStrip *HtmlStripProcessor `json:"html_strip,omitempty"` + // Inference Uses a pre-trained data frame analytics model or a model deployed for natural + // language processing tasks to infer against the data that is being ingested in + // the pipeline. + Inference *InferenceProcessor `json:"inference,omitempty"` + // IpLocation Currently an undocumented alias for GeoIP Processor. + IpLocation *IpLocationProcessor `json:"ip_location,omitempty"` + // Join Joins each element of an array into a single string using a separator + // character between each element. + // Throws an error when the field is not an array. + Join *JoinProcessor `json:"join,omitempty"` + // Json Converts a JSON string into a structured JSON object. + Json *JsonProcessor `json:"json,omitempty"` + // Kv This processor helps automatically parse messages (or specific event fields) + // which are of the `foo=bar` variety. + Kv *KeyValueProcessor `json:"kv,omitempty"` + // Lowercase Converts a string to its lowercase equivalent. + // If the field is an array of strings, all members of the array will be + // converted. + Lowercase *LowercaseProcessor `json:"lowercase,omitempty"` + // NetworkDirection Calculates the network direction given a source IP address, destination IP + // address, and a list of internal networks. + NetworkDirection *NetworkDirectionProcessor `json:"network_direction,omitempty"` + // Pipeline Executes another pipeline. + Pipeline *PipelineProcessor `json:"pipeline,omitempty"` + // Redact The Redact processor uses the Grok rules engine to obscure text in the input + // document matching the given Grok patterns. + // The processor can be used to obscure Personal Identifying Information (PII) + // by configuring it to detect known patterns such as email or IP addresses. + // Text that matches a Grok pattern is replaced with a configurable string such + // as `` where an email address is matched or simply replace all matches + // with the text `` if preferred. + Redact *RedactProcessor `json:"redact,omitempty"` + // RegisteredDomain Extracts the registered domain (also known as the effective top-level + // domain or eTLD), sub-domain, and top-level domain from a fully qualified + // domain name (FQDN). Uses the registered domains defined in the Mozilla + // Public Suffix List. + RegisteredDomain *RegisteredDomainProcessor `json:"registered_domain,omitempty"` + // Remove Removes existing fields. + // If one field doesn’t exist, an exception will be thrown. + Remove *RemoveProcessor `json:"remove,omitempty"` + // Rename Renames an existing field. + // If the field doesn’t exist or the new name is already used, an exception will + // be thrown. + Rename *RenameProcessor `json:"rename,omitempty"` + // Reroute Routes a document to another target index or data stream. + // When setting the `destination` option, the target is explicitly specified and + // the dataset and namespace options can’t be set. + // When the `destination` option is not set, this processor is in a data stream + // mode. Note that in this mode, the reroute processor can only be used on data + // streams that follow the data stream naming scheme. + Reroute *RerouteProcessor `json:"reroute,omitempty"` + // Script Runs an inline or stored script on incoming documents. + // The script runs in the `ingest` context. + Script *ScriptProcessor `json:"script,omitempty"` + // Set Adds a field with the specified value. + // If the field already exists, its value will be replaced with the provided + // one. + Set *SetProcessor `json:"set,omitempty"` + // SetSecurityUser Sets user-related details (such as `username`, `roles`, `email`, `full_name`, + // `metadata`, `api_key`, `realm` and `authentication_type`) from the current + // authenticated user to the current document by pre-processing the ingest. + SetSecurityUser *SetSecurityUserProcessor `json:"set_security_user,omitempty"` + // Sort Sorts the elements of an array ascending or descending. + // Homogeneous arrays of numbers will be sorted numerically, while arrays of + // strings or heterogeneous arrays of strings + numbers will be sorted + // lexicographically. + // Throws an error when the field is not an array. + Sort *SortProcessor `json:"sort,omitempty"` + // Split Splits a field into an array using a separator character. + // Only works on string fields. + Split *SplitProcessor `json:"split,omitempty"` + // Terminate Terminates the current ingest pipeline, causing no further processors to be + // run. + // This will normally be executed conditionally, using the `if` option. + Terminate *TerminateProcessor `json:"terminate,omitempty"` + // Trim Trims whitespace from a field. + // If the field is an array of strings, all members of the array will be + // trimmed. + // This only works on leading and trailing whitespace. + Trim *TrimProcessor `json:"trim,omitempty"` + // Uppercase Converts a string to its uppercase equivalent. + // If the field is an array of strings, all members of the array will be + // converted. + Uppercase *UppercaseProcessor `json:"uppercase,omitempty"` + // UriParts Parses a Uniform Resource Identifier (URI) string and extracts its components + // as an object. + // This URI object includes properties for the URI’s domain, path, fragment, + // port, query, scheme, user info, username, and password. + UriParts *UriPartsProcessor `json:"uri_parts,omitempty"` + // Urldecode URL-decodes a string. + // If the field is an array of strings, all members of the array will be + // decoded. + Urldecode *UrlDecodeProcessor `json:"urldecode,omitempty"` + // UserAgent The `user_agent` processor extracts details from the user agent string a + // browser sends with its web requests. + // This processor adds this information by default under the `user_agent` field. + UserAgent *UserAgentProcessor `json:"user_agent,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s ProcessorContainer) MarshalJSON() ([]byte, error) { + type opt ProcessorContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalProcessorContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalProcessorContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewProcessorContainer returns a ProcessorContainer. +func NewProcessorContainer() *ProcessorContainer { + r := &ProcessorContainer{ + AdditionalProcessorContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type ProcessorContainerVariant interface { + ProcessorContainerCaster() *ProcessorContainer +} + +func (s *ProcessorContainer) ProcessorContainerCaster() *ProcessorContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/profile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/profile.go new file mode 100644 index 000000000..dc2a4a042 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/profile.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Profile type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L101-L103 +type Profile struct { + Shards []ShardProfile `json:"shards"` +} + +// NewProfile returns a Profile. +func NewProfile() *Profile { + r := &Profile{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/property.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/property.go new file mode 100644 index 000000000..df9eb049c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/property.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Property holds the union for the following types: +// +// BinaryProperty +// BooleanProperty +// DynamicProperty +// JoinProperty +// KeywordProperty +// MatchOnlyTextProperty +// PercolatorProperty +// RankFeatureProperty +// RankFeaturesProperty +// SearchAsYouTypeProperty +// TextProperty +// VersionProperty +// WildcardProperty +// DateNanosProperty +// DateProperty +// AggregateMetricDoubleProperty +// DenseVectorProperty +// FlattenedProperty +// NestedProperty +// ObjectProperty +// PassthroughObjectProperty +// RankVectorProperty +// SemanticTextProperty +// SparseVectorProperty +// CompletionProperty +// ConstantKeywordProperty +// CountedKeywordProperty +// FieldAliasProperty +// HistogramProperty +// IpProperty +// Murmur3HashProperty +// TokenCountProperty +// GeoPointProperty +// GeoShapeProperty +// PointProperty +// ShapeProperty +// ByteNumberProperty +// DoubleNumberProperty +// FloatNumberProperty +// HalfFloatNumberProperty +// IntegerNumberProperty +// LongNumberProperty +// ScaledFloatNumberProperty +// ShortNumberProperty +// UnsignedLongNumberProperty +// DateRangeProperty +// DoubleRangeProperty +// FloatRangeProperty +// IntegerRangeProperty +// IpRangeProperty +// LongRangeProperty +// IcuCollationProperty +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/Property.ts#L120-L191 +type Property any + +type PropertyVariant interface { + PropertyCaster() *Property +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/publishedclusterstates.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/publishedclusterstates.go new file mode 100644 index 000000000..e03de42d2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/publishedclusterstates.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// PublishedClusterStates type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L265-L278 +type PublishedClusterStates struct { + // CompatibleDiffs Number of compatible differences between published cluster states. + CompatibleDiffs *int64 `json:"compatible_diffs,omitempty"` + // FullStates Number of published cluster states. + FullStates *int64 `json:"full_states,omitempty"` + // IncompatibleDiffs Number of incompatible differences between published cluster states. + IncompatibleDiffs *int64 `json:"incompatible_diffs,omitempty"` +} + +func (s *PublishedClusterStates) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compatible_diffs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CompatibleDiffs", err) + } + s.CompatibleDiffs = &value + case float64: + f := int64(v) + s.CompatibleDiffs = &f + } + + case "full_states": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FullStates", err) + } + s.FullStates = &value + case float64: + f := int64(v) + s.FullStates = &f + } + + case "incompatible_diffs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IncompatibleDiffs", err) + } + s.IncompatibleDiffs = &value + case float64: + f := int64(v) + s.IncompatibleDiffs = &f + } + + } + } + return nil +} + +// NewPublishedClusterStates returns a PublishedClusterStates. +func NewPublishedClusterStates() *PublishedClusterStates { + r := &PublishedClusterStates{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queries.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queries.go new file mode 100644 index 000000000..a3ee2410b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queries.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Queries type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L430-L432 +type Queries struct { + Cache *CacheQueries `json:"cache,omitempty"` +} + +// NewQueries returns a Queries. +func NewQueries() *Queries { + r := &Queries{} + + return r +} + +type QueriesVariant interface { + QueriesCaster() *Queries +} + +func (s *Queries) QueriesCaster() *Queries { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/query.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/query.go new file mode 100644 index 000000000..ad307c006 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/query.go @@ -0,0 +1,687 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Query type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/abstractions.ts#L103-L434 +type Query struct { + AdditionalQueryProperty map[string]json.RawMessage `json:"-"` + // Bool matches documents matching boolean combinations of other queries. + Bool *BoolQuery `json:"bool,omitempty"` + // Boosting Returns documents matching a `positive` query while reducing the relevance + // score of documents that also match a `negative` query. + Boosting *BoostingQuery `json:"boosting,omitempty"` + // CombinedFields The `combined_fields` query supports searching multiple text fields as if + // their contents had been indexed into one combined field. + CombinedFields *CombinedFieldsQuery `json:"combined_fields,omitempty"` + Common map[string]CommonTermsQuery `json:"common,omitempty"` + // ConstantScore Wraps a filter query and returns every matching document with a relevance + // score equal to the `boost` parameter value. + ConstantScore *ConstantScoreQuery `json:"constant_score,omitempty"` + // DisMax Returns documents matching one or more wrapped queries, called query clauses + // or clauses. + // If a returned document matches multiple query clauses, the `dis_max` query + // assigns the document the highest relevance score from any matching clause, + // plus a tie breaking increment for any additional matching subqueries. + DisMax *DisMaxQuery `json:"dis_max,omitempty"` + // DistanceFeature Boosts the relevance score of documents closer to a provided origin date or + // point. + // For example, you can use this query to give more weight to documents closer + // to a certain date or location. + DistanceFeature DistanceFeatureQuery `json:"distance_feature,omitempty"` + // Exists Returns documents that contain an indexed value for a field. + Exists *ExistsQuery `json:"exists,omitempty"` + // FunctionScore The `function_score` enables you to modify the score of documents that are + // retrieved by a query. + FunctionScore *FunctionScoreQuery `json:"function_score,omitempty"` + // Fuzzy Returns documents that contain terms similar to the search term, as measured + // by a Levenshtein edit distance. + Fuzzy map[string]FuzzyQuery `json:"fuzzy,omitempty"` + // GeoBoundingBox Matches geo_point and geo_shape values that intersect a bounding box. + GeoBoundingBox *GeoBoundingBoxQuery `json:"geo_bounding_box,omitempty"` + // GeoDistance Matches `geo_point` and `geo_shape` values within a given distance of a + // geopoint. + GeoDistance *GeoDistanceQuery `json:"geo_distance,omitempty"` + // GeoGrid Matches `geo_point` and `geo_shape` values that intersect a grid cell from a + // GeoGrid aggregation. + GeoGrid map[string]GeoGridQuery `json:"geo_grid,omitempty"` + GeoPolygon *GeoPolygonQuery `json:"geo_polygon,omitempty"` + // GeoShape Filter documents indexed using either the `geo_shape` or the `geo_point` + // type. + GeoShape *GeoShapeQuery `json:"geo_shape,omitempty"` + // HasChild Returns parent documents whose joined child documents match a provided query. + HasChild *HasChildQuery `json:"has_child,omitempty"` + // HasParent Returns child documents whose joined parent document matches a provided + // query. + HasParent *HasParentQuery `json:"has_parent,omitempty"` + // Ids Returns documents based on their IDs. + // This query uses document IDs stored in the `_id` field. + Ids *IdsQuery `json:"ids,omitempty"` + // Intervals Returns documents based on the order and proximity of matching terms. + Intervals map[string]IntervalsQuery `json:"intervals,omitempty"` + // Knn Finds the k nearest vectors to a query vector, as measured by a similarity + // metric. knn query finds nearest vectors through approximate search on indexed + // dense_vectors. + Knn *KnnQuery `json:"knn,omitempty"` + // Match Returns documents that match a provided text, number, date or boolean value. + // The provided text is analyzed before matching. + Match map[string]MatchQuery `json:"match,omitempty"` + // MatchAll Matches all documents, giving them all a `_score` of 1.0. + MatchAll *MatchAllQuery `json:"match_all,omitempty"` + // MatchBoolPrefix Analyzes its input and constructs a `bool` query from the terms. + // Each term except the last is used in a `term` query. + // The last term is used in a prefix query. + MatchBoolPrefix map[string]MatchBoolPrefixQuery `json:"match_bool_prefix,omitempty"` + // MatchNone Matches no documents. + MatchNone *MatchNoneQuery `json:"match_none,omitempty"` + // MatchPhrase Analyzes the text and creates a phrase query out of the analyzed text. + MatchPhrase map[string]MatchPhraseQuery `json:"match_phrase,omitempty"` + // MatchPhrasePrefix Returns documents that contain the words of a provided text, in the same + // order as provided. + // The last term of the provided text is treated as a prefix, matching any words + // that begin with that term. + MatchPhrasePrefix map[string]MatchPhrasePrefixQuery `json:"match_phrase_prefix,omitempty"` + // MoreLikeThis Returns documents that are "like" a given set of documents. + MoreLikeThis *MoreLikeThisQuery `json:"more_like_this,omitempty"` + // MultiMatch Enables you to search for a provided text, number, date or boolean value + // across multiple fields. + // The provided text is analyzed before matching. + MultiMatch *MultiMatchQuery `json:"multi_match,omitempty"` + // Nested Wraps another query to search nested fields. + // If an object matches the search, the nested query returns the root parent + // document. + Nested *NestedQuery `json:"nested,omitempty"` + // ParentId Returns child documents joined to a specific parent document. + ParentId *ParentIdQuery `json:"parent_id,omitempty"` + // Percolate Matches queries stored in an index. + Percolate *PercolateQuery `json:"percolate,omitempty"` + // Pinned Promotes selected documents to rank higher than those matching a given query. + Pinned *PinnedQuery `json:"pinned,omitempty"` + // Prefix Returns documents that contain a specific prefix in a provided field. + Prefix map[string]PrefixQuery `json:"prefix,omitempty"` + // QueryString Returns documents based on a provided query string, using a parser with a + // strict syntax. + QueryString *QueryStringQuery `json:"query_string,omitempty"` + // Range Returns documents that contain terms within a provided range. + Range map[string]RangeQuery `json:"range,omitempty"` + // RankFeature Boosts the relevance score of documents based on the numeric value of a + // `rank_feature` or `rank_features` field. + RankFeature *RankFeatureQuery `json:"rank_feature,omitempty"` + // Regexp Returns documents that contain terms matching a regular expression. + Regexp map[string]RegexpQuery `json:"regexp,omitempty"` + Rule *RuleQuery `json:"rule,omitempty"` + // Script Filters documents based on a provided script. + // The script query is typically used in a filter context. + Script *ScriptQuery `json:"script,omitempty"` + // ScriptScore Uses a script to provide a custom score for returned documents. + ScriptScore *ScriptScoreQuery `json:"script_score,omitempty"` + // Semantic A semantic query to semantic_text field types + Semantic *SemanticQuery `json:"semantic,omitempty"` + // Shape Queries documents that contain fields indexed using the `shape` type. + Shape *ShapeQuery `json:"shape,omitempty"` + // SimpleQueryString Returns documents based on a provided query string, using a parser with a + // limited but fault-tolerant syntax. + SimpleQueryString *SimpleQueryStringQuery `json:"simple_query_string,omitempty"` + // SpanContaining Returns matches which enclose another span query. + SpanContaining *SpanContainingQuery `json:"span_containing,omitempty"` + // SpanFieldMasking Wrapper to allow span queries to participate in composite single-field span + // queries by _lying_ about their search field. + SpanFieldMasking *SpanFieldMaskingQuery `json:"span_field_masking,omitempty"` + // SpanFirst Matches spans near the beginning of a field. + SpanFirst *SpanFirstQuery `json:"span_first,omitempty"` + // SpanMulti Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`, `prefix`, + // `range`, or `regexp` query) as a `span` query, so it can be nested. + SpanMulti *SpanMultiTermQuery `json:"span_multi,omitempty"` + // SpanNear Matches spans which are near one another. + // You can specify `slop`, the maximum number of intervening unmatched + // positions, as well as whether matches are required to be in-order. + SpanNear *SpanNearQuery `json:"span_near,omitempty"` + // SpanNot Removes matches which overlap with another span query or which are within x + // tokens before (controlled by the parameter `pre`) or y tokens after + // (controlled by the parameter `post`) another span query. + SpanNot *SpanNotQuery `json:"span_not,omitempty"` + // SpanOr Matches the union of its span clauses. + SpanOr *SpanOrQuery `json:"span_or,omitempty"` + // SpanTerm Matches spans containing a term. + SpanTerm map[string]SpanTermQuery `json:"span_term,omitempty"` + // SpanWithin Returns matches which are enclosed inside another span query. + SpanWithin *SpanWithinQuery `json:"span_within,omitempty"` + // SparseVector Using input query vectors or a natural language processing model to convert a + // query into a list of token-weight pairs, queries against a sparse vector + // field. + SparseVector *SparseVectorQuery `json:"sparse_vector,omitempty"` + // Term Returns documents that contain an exact term in a provided field. + // To return a document, the query term must exactly match the queried field's + // value, including whitespace and capitalization. + Term map[string]TermQuery `json:"term,omitempty"` + // Terms Returns documents that contain one or more exact terms in a provided field. + // To return a document, one or more terms must exactly match a field value, + // including whitespace and capitalization. + Terms *TermsQuery `json:"terms,omitempty"` + // TermsSet Returns documents that contain a minimum number of exact terms in a provided + // field. + // To return a document, a required number of terms must exactly match the field + // values, including whitespace and capitalization. + TermsSet map[string]TermsSetQuery `json:"terms_set,omitempty"` + // TextExpansion Uses a natural language processing model to convert the query text into a + // list of token-weight pairs which are then used in a query against a sparse + // vector or rank features field. + TextExpansion map[string]TextExpansionQuery `json:"text_expansion,omitempty"` + Type *TypeQuery `json:"type,omitempty"` + // WeightedTokens Supports returning text_expansion query results by sending in precomputed + // tokens with the query. + WeightedTokens map[string]WeightedTokensQuery `json:"weighted_tokens,omitempty"` + // Wildcard Returns documents that contain terms matching a wildcard pattern. + Wildcard map[string]WildcardQuery `json:"wildcard,omitempty"` + // Wrapper A query that accepts any other query as base64 encoded string. + Wrapper *WrapperQuery `json:"wrapper,omitempty"` +} + +func (s *Query) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bool": + if err := dec.Decode(&s.Bool); err != nil { + return fmt.Errorf("%s | %w", "Bool", err) + } + + case "boosting": + if err := dec.Decode(&s.Boosting); err != nil { + return fmt.Errorf("%s | %w", "Boosting", err) + } + + case "combined_fields": + if err := dec.Decode(&s.CombinedFields); err != nil { + return fmt.Errorf("%s | %w", "CombinedFields", err) + } + + case "common": + if s.Common == nil { + s.Common = make(map[string]CommonTermsQuery, 0) + } + if err := dec.Decode(&s.Common); err != nil { + return fmt.Errorf("%s | %w", "Common", err) + } + + case "constant_score": + if err := dec.Decode(&s.ConstantScore); err != nil { + return fmt.Errorf("%s | %w", "ConstantScore", err) + } + + case "dis_max": + if err := dec.Decode(&s.DisMax); err != nil { + return fmt.Errorf("%s | %w", "DisMax", err) + } + + case "distance_feature": + var message json.RawMessage + err := dec.Decode(&message) + if err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + + untyped := NewUntypedDistanceFeatureQuery() + err = json.Unmarshal(message, &untyped) + if err != nil { + return fmt.Errorf("%s | %w", "DistanceFeature", err) + } + s.DistanceFeature = untyped + + case "exists": + if err := dec.Decode(&s.Exists); err != nil { + return fmt.Errorf("%s | %w", "Exists", err) + } + + case "function_score": + if err := dec.Decode(&s.FunctionScore); err != nil { + return fmt.Errorf("%s | %w", "FunctionScore", err) + } + + case "fuzzy": + if s.Fuzzy == nil { + s.Fuzzy = make(map[string]FuzzyQuery, 0) + } + if err := dec.Decode(&s.Fuzzy); err != nil { + return fmt.Errorf("%s | %w", "Fuzzy", err) + } + + case "geo_bounding_box": + if err := dec.Decode(&s.GeoBoundingBox); err != nil { + return fmt.Errorf("%s | %w", "GeoBoundingBox", err) + } + + case "geo_distance": + if err := dec.Decode(&s.GeoDistance); err != nil { + return fmt.Errorf("%s | %w", "GeoDistance", err) + } + + case "geo_grid": + if s.GeoGrid == nil { + s.GeoGrid = make(map[string]GeoGridQuery, 0) + } + if err := dec.Decode(&s.GeoGrid); err != nil { + return fmt.Errorf("%s | %w", "GeoGrid", err) + } + + case "geo_polygon": + if err := dec.Decode(&s.GeoPolygon); err != nil { + return fmt.Errorf("%s | %w", "GeoPolygon", err) + } + + case "geo_shape": + if err := dec.Decode(&s.GeoShape); err != nil { + return fmt.Errorf("%s | %w", "GeoShape", err) + } + + case "has_child": + if err := dec.Decode(&s.HasChild); err != nil { + return fmt.Errorf("%s | %w", "HasChild", err) + } + + case "has_parent": + if err := dec.Decode(&s.HasParent); err != nil { + return fmt.Errorf("%s | %w", "HasParent", err) + } + + case "ids": + if err := dec.Decode(&s.Ids); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + + case "intervals": + if s.Intervals == nil { + s.Intervals = make(map[string]IntervalsQuery, 0) + } + if err := dec.Decode(&s.Intervals); err != nil { + return fmt.Errorf("%s | %w", "Intervals", err) + } + + case "knn": + if err := dec.Decode(&s.Knn); err != nil { + return fmt.Errorf("%s | %w", "Knn", err) + } + + case "match": + if s.Match == nil { + s.Match = make(map[string]MatchQuery, 0) + } + if err := dec.Decode(&s.Match); err != nil { + return fmt.Errorf("%s | %w", "Match", err) + } + + case "match_all": + if err := dec.Decode(&s.MatchAll); err != nil { + return fmt.Errorf("%s | %w", "MatchAll", err) + } + + case "match_bool_prefix": + if s.MatchBoolPrefix == nil { + s.MatchBoolPrefix = make(map[string]MatchBoolPrefixQuery, 0) + } + if err := dec.Decode(&s.MatchBoolPrefix); err != nil { + return fmt.Errorf("%s | %w", "MatchBoolPrefix", err) + } + + case "match_none": + if err := dec.Decode(&s.MatchNone); err != nil { + return fmt.Errorf("%s | %w", "MatchNone", err) + } + + case "match_phrase": + if s.MatchPhrase == nil { + s.MatchPhrase = make(map[string]MatchPhraseQuery, 0) + } + if err := dec.Decode(&s.MatchPhrase); err != nil { + return fmt.Errorf("%s | %w", "MatchPhrase", err) + } + + case "match_phrase_prefix": + if s.MatchPhrasePrefix == nil { + s.MatchPhrasePrefix = make(map[string]MatchPhrasePrefixQuery, 0) + } + if err := dec.Decode(&s.MatchPhrasePrefix); err != nil { + return fmt.Errorf("%s | %w", "MatchPhrasePrefix", err) + } + + case "more_like_this": + if err := dec.Decode(&s.MoreLikeThis); err != nil { + return fmt.Errorf("%s | %w", "MoreLikeThis", err) + } + + case "multi_match": + if err := dec.Decode(&s.MultiMatch); err != nil { + return fmt.Errorf("%s | %w", "MultiMatch", err) + } + + case "nested": + if err := dec.Decode(&s.Nested); err != nil { + return fmt.Errorf("%s | %w", "Nested", err) + } + + case "parent_id": + if err := dec.Decode(&s.ParentId); err != nil { + return fmt.Errorf("%s | %w", "ParentId", err) + } + + case "percolate": + if err := dec.Decode(&s.Percolate); err != nil { + return fmt.Errorf("%s | %w", "Percolate", err) + } + + case "pinned": + if err := dec.Decode(&s.Pinned); err != nil { + return fmt.Errorf("%s | %w", "Pinned", err) + } + + case "prefix": + if s.Prefix == nil { + s.Prefix = make(map[string]PrefixQuery, 0) + } + if err := dec.Decode(&s.Prefix); err != nil { + return fmt.Errorf("%s | %w", "Prefix", err) + } + + case "query_string": + if err := dec.Decode(&s.QueryString); err != nil { + return fmt.Errorf("%s | %w", "QueryString", err) + } + + case "range": + if s.Range == nil { + s.Range = make(map[string]RangeQuery, 0) + } + messages := make(map[string]json.RawMessage) + err := dec.Decode(&messages) + if err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + + untyped := NewUntypedRangeQuery() + for key, message := range messages { + err := json.Unmarshal(message, &untyped) + if err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + s.Range[key] = untyped + } + + case "rank_feature": + if err := dec.Decode(&s.RankFeature); err != nil { + return fmt.Errorf("%s | %w", "RankFeature", err) + } + + case "regexp": + if s.Regexp == nil { + s.Regexp = make(map[string]RegexpQuery, 0) + } + if err := dec.Decode(&s.Regexp); err != nil { + return fmt.Errorf("%s | %w", "Regexp", err) + } + + case "rule": + if err := dec.Decode(&s.Rule); err != nil { + return fmt.Errorf("%s | %w", "Rule", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "script_score": + if err := dec.Decode(&s.ScriptScore); err != nil { + return fmt.Errorf("%s | %w", "ScriptScore", err) + } + + case "semantic": + if err := dec.Decode(&s.Semantic); err != nil { + return fmt.Errorf("%s | %w", "Semantic", err) + } + + case "shape": + if err := dec.Decode(&s.Shape); err != nil { + return fmt.Errorf("%s | %w", "Shape", err) + } + + case "simple_query_string": + if err := dec.Decode(&s.SimpleQueryString); err != nil { + return fmt.Errorf("%s | %w", "SimpleQueryString", err) + } + + case "span_containing": + if err := dec.Decode(&s.SpanContaining); err != nil { + return fmt.Errorf("%s | %w", "SpanContaining", err) + } + + case "span_field_masking": + if err := dec.Decode(&s.SpanFieldMasking); err != nil { + return fmt.Errorf("%s | %w", "SpanFieldMasking", err) + } + + case "span_first": + if err := dec.Decode(&s.SpanFirst); err != nil { + return fmt.Errorf("%s | %w", "SpanFirst", err) + } + + case "span_multi": + if err := dec.Decode(&s.SpanMulti); err != nil { + return fmt.Errorf("%s | %w", "SpanMulti", err) + } + + case "span_near": + if err := dec.Decode(&s.SpanNear); err != nil { + return fmt.Errorf("%s | %w", "SpanNear", err) + } + + case "span_not": + if err := dec.Decode(&s.SpanNot); err != nil { + return fmt.Errorf("%s | %w", "SpanNot", err) + } + + case "span_or": + if err := dec.Decode(&s.SpanOr); err != nil { + return fmt.Errorf("%s | %w", "SpanOr", err) + } + + case "span_term": + if s.SpanTerm == nil { + s.SpanTerm = make(map[string]SpanTermQuery, 0) + } + if err := dec.Decode(&s.SpanTerm); err != nil { + return fmt.Errorf("%s | %w", "SpanTerm", err) + } + + case "span_within": + if err := dec.Decode(&s.SpanWithin); err != nil { + return fmt.Errorf("%s | %w", "SpanWithin", err) + } + + case "sparse_vector": + if err := dec.Decode(&s.SparseVector); err != nil { + return fmt.Errorf("%s | %w", "SparseVector", err) + } + + case "term": + if s.Term == nil { + s.Term = make(map[string]TermQuery, 0) + } + if err := dec.Decode(&s.Term); err != nil { + return fmt.Errorf("%s | %w", "Term", err) + } + + case "terms": + if err := dec.Decode(&s.Terms); err != nil { + return fmt.Errorf("%s | %w", "Terms", err) + } + + case "terms_set": + if s.TermsSet == nil { + s.TermsSet = make(map[string]TermsSetQuery, 0) + } + if err := dec.Decode(&s.TermsSet); err != nil { + return fmt.Errorf("%s | %w", "TermsSet", err) + } + + case "text_expansion": + if s.TextExpansion == nil { + s.TextExpansion = make(map[string]TextExpansionQuery, 0) + } + if err := dec.Decode(&s.TextExpansion); err != nil { + return fmt.Errorf("%s | %w", "TextExpansion", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "weighted_tokens": + if s.WeightedTokens == nil { + s.WeightedTokens = make(map[string]WeightedTokensQuery, 0) + } + if err := dec.Decode(&s.WeightedTokens); err != nil { + return fmt.Errorf("%s | %w", "WeightedTokens", err) + } + + case "wildcard": + if s.Wildcard == nil { + s.Wildcard = make(map[string]WildcardQuery, 0) + } + if err := dec.Decode(&s.Wildcard); err != nil { + return fmt.Errorf("%s | %w", "Wildcard", err) + } + + case "wrapper": + if err := dec.Decode(&s.Wrapper); err != nil { + return fmt.Errorf("%s | %w", "Wrapper", err) + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalQueryProperty == nil { + s.AdditionalQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalQueryProperty", err) + } + s.AdditionalQueryProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s Query) MarshalJSON() ([]byte, error) { + type opt Query + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewQuery returns a Query. +func NewQuery() *Query { + r := &Query{ + AdditionalQueryProperty: make(map[string]json.RawMessage), + Common: make(map[string]CommonTermsQuery), + Fuzzy: make(map[string]FuzzyQuery), + GeoGrid: make(map[string]GeoGridQuery), + Intervals: make(map[string]IntervalsQuery), + Match: make(map[string]MatchQuery), + MatchBoolPrefix: make(map[string]MatchBoolPrefixQuery), + MatchPhrase: make(map[string]MatchPhraseQuery), + MatchPhrasePrefix: make(map[string]MatchPhrasePrefixQuery), + Prefix: make(map[string]PrefixQuery), + Range: make(map[string]RangeQuery), + Regexp: make(map[string]RegexpQuery), + SpanTerm: make(map[string]SpanTermQuery), + Term: make(map[string]TermQuery), + TermsSet: make(map[string]TermsSetQuery), + TextExpansion: make(map[string]TextExpansionQuery), + WeightedTokens: make(map[string]WeightedTokensQuery), + Wildcard: make(map[string]WildcardQuery), + } + + return r +} + +type QueryVariant interface { + QueryCaster() *Query +} + +func (s *Query) QueryCaster() *Query { + return s +} + +func (s *Query) IndicesPrivilegesQueryCaster() *IndicesPrivilegesQuery { + o := IndicesPrivilegesQuery(s) + return &o +} + +func (s *Query) RoleTemplateInlineQueryCaster() *RoleTemplateInlineQuery { + o := RoleTemplateInlineQuery(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/querybreakdown.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/querybreakdown.go new file mode 100644 index 000000000..acba87bc8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/querybreakdown.go @@ -0,0 +1,383 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// QueryBreakdown type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L105-L126 +type QueryBreakdown struct { + Advance int64 `json:"advance"` + AdvanceCount int64 `json:"advance_count"` + BuildScorer int64 `json:"build_scorer"` + BuildScorerCount int64 `json:"build_scorer_count"` + ComputeMaxScore int64 `json:"compute_max_score"` + ComputeMaxScoreCount int64 `json:"compute_max_score_count"` + CountWeight int64 `json:"count_weight"` + CountWeightCount int64 `json:"count_weight_count"` + CreateWeight int64 `json:"create_weight"` + CreateWeightCount int64 `json:"create_weight_count"` + Match int64 `json:"match"` + MatchCount int64 `json:"match_count"` + NextDoc int64 `json:"next_doc"` + NextDocCount int64 `json:"next_doc_count"` + Score int64 `json:"score"` + ScoreCount int64 `json:"score_count"` + SetMinCompetitiveScore int64 `json:"set_min_competitive_score"` + SetMinCompetitiveScoreCount int64 `json:"set_min_competitive_score_count"` + ShallowAdvance int64 `json:"shallow_advance"` + ShallowAdvanceCount int64 `json:"shallow_advance_count"` +} + +func (s *QueryBreakdown) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "advance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Advance", err) + } + s.Advance = value + case float64: + f := int64(v) + s.Advance = f + } + + case "advance_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AdvanceCount", err) + } + s.AdvanceCount = value + case float64: + f := int64(v) + s.AdvanceCount = f + } + + case "build_scorer": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BuildScorer", err) + } + s.BuildScorer = value + case float64: + f := int64(v) + s.BuildScorer = f + } + + case "build_scorer_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BuildScorerCount", err) + } + s.BuildScorerCount = value + case float64: + f := int64(v) + s.BuildScorerCount = f + } + + case "compute_max_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ComputeMaxScore", err) + } + s.ComputeMaxScore = value + case float64: + f := int64(v) + s.ComputeMaxScore = f + } + + case "compute_max_score_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ComputeMaxScoreCount", err) + } + s.ComputeMaxScoreCount = value + case float64: + f := int64(v) + s.ComputeMaxScoreCount = f + } + + case "count_weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CountWeight", err) + } + s.CountWeight = value + case float64: + f := int64(v) + s.CountWeight = f + } + + case "count_weight_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CountWeightCount", err) + } + s.CountWeightCount = value + case float64: + f := int64(v) + s.CountWeightCount = f + } + + case "create_weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CreateWeight", err) + } + s.CreateWeight = value + case float64: + f := int64(v) + s.CreateWeight = f + } + + case "create_weight_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CreateWeightCount", err) + } + s.CreateWeightCount = value + case float64: + f := int64(v) + s.CreateWeightCount = f + } + + case "match": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Match", err) + } + s.Match = value + case float64: + f := int64(v) + s.Match = f + } + + case "match_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MatchCount", err) + } + s.MatchCount = value + case float64: + f := int64(v) + s.MatchCount = f + } + + case "next_doc": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NextDoc", err) + } + s.NextDoc = value + case float64: + f := int64(v) + s.NextDoc = f + } + + case "next_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NextDocCount", err) + } + s.NextDocCount = value + case float64: + f := int64(v) + s.NextDocCount = f + } + + case "score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Score", err) + } + s.Score = value + case float64: + f := int64(v) + s.Score = f + } + + case "score_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ScoreCount", err) + } + s.ScoreCount = value + case float64: + f := int64(v) + s.ScoreCount = f + } + + case "set_min_competitive_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SetMinCompetitiveScore", err) + } + s.SetMinCompetitiveScore = value + case float64: + f := int64(v) + s.SetMinCompetitiveScore = f + } + + case "set_min_competitive_score_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SetMinCompetitiveScoreCount", err) + } + s.SetMinCompetitiveScoreCount = value + case float64: + f := int64(v) + s.SetMinCompetitiveScoreCount = f + } + + case "shallow_advance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShallowAdvance", err) + } + s.ShallowAdvance = value + case float64: + f := int64(v) + s.ShallowAdvance = f + } + + case "shallow_advance_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShallowAdvanceCount", err) + } + s.ShallowAdvanceCount = value + case float64: + f := int64(v) + s.ShallowAdvanceCount = f + } + + } + } + return nil +} + +// NewQueryBreakdown returns a QueryBreakdown. +func NewQueryBreakdown() *QueryBreakdown { + r := &QueryBreakdown{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/querycachestats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/querycachestats.go new file mode 100644 index 000000000..442a01d60 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/querycachestats.go @@ -0,0 +1,197 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// QueryCacheStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L219-L253 +type QueryCacheStats struct { + // CacheCount Total number of entries added to the query cache across all shards assigned + // to selected nodes. + // This number includes current and evicted entries. + CacheCount int64 `json:"cache_count"` + // CacheSize Total number of entries currently in the query cache across all shards + // assigned to selected nodes. + CacheSize int64 `json:"cache_size"` + // Evictions Total number of query cache evictions across all shards assigned to selected + // nodes. + Evictions int64 `json:"evictions"` + // HitCount Total count of query cache hits across all shards assigned to selected nodes. + HitCount int64 `json:"hit_count"` + // MemorySize Total amount of memory used for the query cache across all shards assigned to + // selected nodes. + MemorySize ByteSize `json:"memory_size,omitempty"` + // MemorySizeInBytes Total amount, in bytes, of memory used for the query cache across all shards + // assigned to selected nodes. + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + // MissCount Total count of query cache misses across all shards assigned to selected + // nodes. + MissCount int64 `json:"miss_count"` + // TotalCount Total count of hits and misses in the query cache across all shards assigned + // to selected nodes. + TotalCount int64 `json:"total_count"` +} + +func (s *QueryCacheStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CacheCount", err) + } + s.CacheCount = value + case float64: + f := int64(v) + s.CacheCount = f + } + + case "cache_size": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CacheSize", err) + } + s.CacheSize = value + case float64: + f := int64(v) + s.CacheSize = f + } + + case "evictions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Evictions", err) + } + s.Evictions = value + case float64: + f := int64(v) + s.Evictions = f + } + + case "hit_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "HitCount", err) + } + s.HitCount = value + case float64: + f := int64(v) + s.HitCount = f + } + + case "memory_size": + if err := dec.Decode(&s.MemorySize); err != nil { + return fmt.Errorf("%s | %w", "MemorySize", err) + } + + case "memory_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MemorySizeInBytes", err) + } + s.MemorySizeInBytes = value + case float64: + f := int64(v) + s.MemorySizeInBytes = f + } + + case "miss_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MissCount", err) + } + s.MissCount = value + case float64: + f := int64(v) + s.MissCount = f + } + + case "total_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalCount", err) + } + s.TotalCount = value + case float64: + f := int64(v) + s.TotalCount = f + } + + } + } + return nil +} + +// NewQueryCacheStats returns a QueryCacheStats. +func NewQueryCacheStats() *QueryCacheStats { + r := &QueryCacheStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryfeatureextractor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryfeatureextractor.go new file mode 100644 index 000000000..32ea1221b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryfeatureextractor.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// QueryFeatureExtractor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L98-L105 +type QueryFeatureExtractor struct { + DefaultScore *float32 `json:"default_score,omitempty"` + FeatureName string `json:"feature_name"` + Query Query `json:"query"` +} + +func (s *QueryFeatureExtractor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "default_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "DefaultScore", err) + } + f := float32(value) + s.DefaultScore = &f + case float64: + f := float32(v) + s.DefaultScore = &f + } + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FeatureName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + } + } + return nil +} + +// NewQueryFeatureExtractor returns a QueryFeatureExtractor. +func NewQueryFeatureExtractor() *QueryFeatureExtractor { + r := &QueryFeatureExtractor{} + + return r +} + +type QueryFeatureExtractorVariant interface { + QueryFeatureExtractorCaster() *QueryFeatureExtractor +} + +func (s *QueryFeatureExtractor) QueryFeatureExtractorCaster() *QueryFeatureExtractor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryprofile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryprofile.go new file mode 100644 index 000000000..021db903b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryprofile.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// QueryProfile type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L128-L134 +type QueryProfile struct { + Breakdown QueryBreakdown `json:"breakdown"` + Children []QueryProfile `json:"children,omitempty"` + Description string `json:"description"` + TimeInNanos int64 `json:"time_in_nanos"` + Type string `json:"type"` +} + +func (s *QueryProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "breakdown": + if err := dec.Decode(&s.Breakdown); err != nil { + return fmt.Errorf("%s | %w", "Breakdown", err) + } + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return fmt.Errorf("%s | %w", "Children", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "time_in_nanos": + if err := dec.Decode(&s.TimeInNanos); err != nil { + return fmt.Errorf("%s | %w", "TimeInNanos", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewQueryProfile returns a QueryProfile. +func NewQueryProfile() *QueryProfile { + r := &QueryProfile{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrole.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrole.go new file mode 100644 index 000000000..056394e57 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrole.go @@ -0,0 +1,192 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterprivilege" +) + +// QueryRole type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/query_role/types.ts#L103-L109 +type QueryRole struct { + // Applications A list of application privilege entries + Applications []ApplicationPrivileges `json:"applications,omitempty"` + // Cluster A list of cluster privileges. These privileges define the cluster level + // actions that API keys are able to execute. + Cluster []clusterprivilege.ClusterPrivilege `json:"cluster,omitempty"` + // Description Optional description of the role descriptor + Description *string `json:"description,omitempty"` + // Global An object defining global privileges. A global privilege is a form of cluster + // privilege that is request-aware. Support for global privileges is currently + // limited to the management of application privileges. + Global []GlobalPrivilege `json:"global,omitempty"` + // Indices A list of indices permissions entries. + Indices []IndicesPrivileges `json:"indices,omitempty"` + // Metadata Optional meta-data. Within the metadata object, keys that begin with `_` are + // reserved for system usage. + Metadata Metadata `json:"metadata,omitempty"` + // Name Name of the role. + Name string `json:"name"` + // RemoteCluster A list of cluster permissions for remote clusters. + // NOTE: This is limited a subset of the cluster permissions. + RemoteCluster []RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + // RemoteIndices A list of indices permissions for remote clusters. + RemoteIndices []RemoteIndicesPrivileges `json:"remote_indices,omitempty"` + // Restriction Restriction for when the role descriptor is allowed to be effective. + Restriction *Restriction `json:"restriction,omitempty"` + // RunAs A list of users that the API keys can impersonate. + // NOTE: In Elastic Cloud Serverless, the run-as feature is disabled. + // For API compatibility, you can still specify an empty `run_as` field, but a + // non-empty list will be rejected. + RunAs []string `json:"run_as,omitempty"` + Sort_ []FieldValue `json:"_sort,omitempty"` + TransientMetadata map[string]json.RawMessage `json:"transient_metadata,omitempty"` +} + +func (s *QueryRole) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "applications": + if err := dec.Decode(&s.Applications); err != nil { + return fmt.Errorf("%s | %w", "Applications", err) + } + + case "cluster": + if err := dec.Decode(&s.Cluster); err != nil { + return fmt.Errorf("%s | %w", "Cluster", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "global": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewGlobalPrivilege() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Global", err) + } + + s.Global = append(s.Global, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Global); err != nil { + return fmt.Errorf("%s | %w", "Global", err) + } + } + + case "indices", "index": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "remote_indices": + if err := dec.Decode(&s.RemoteIndices); err != nil { + return fmt.Errorf("%s | %w", "RemoteIndices", err) + } + + case "restriction": + if err := dec.Decode(&s.Restriction); err != nil { + return fmt.Errorf("%s | %w", "Restriction", err) + } + + case "run_as": + if err := dec.Decode(&s.RunAs); err != nil { + return fmt.Errorf("%s | %w", "RunAs", err) + } + + case "_sort": + if err := dec.Decode(&s.Sort_); err != nil { + return fmt.Errorf("%s | %w", "Sort_", err) + } + + case "transient_metadata": + if s.TransientMetadata == nil { + s.TransientMetadata = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.TransientMetadata); err != nil { + return fmt.Errorf("%s | %w", "TransientMetadata", err) + } + + } + } + return nil +} + +// NewQueryRole returns a QueryRole. +func NewQueryRole() *QueryRole { + r := &QueryRole{ + TransientMetadata: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrule.go new file mode 100644 index 000000000..6faf1b7d6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrule.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/queryruletype" +) + +// QueryRule type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/_types/QueryRuleset.ts#L36-L58 +type QueryRule struct { + // Actions The actions to take when the rule is matched. + // The format of this action depends on the rule type. + Actions QueryRuleActions `json:"actions"` + // Criteria The criteria that must be met for the rule to be applied. + // If multiple criteria are specified for a rule, all criteria must be met for + // the rule to be applied. + Criteria []QueryRuleCriteria `json:"criteria"` + Priority *int `json:"priority,omitempty"` + // RuleId A unique identifier for the rule. + RuleId string `json:"rule_id"` + // Type The type of rule. + // `pinned` will identify and pin specific documents to the top of search + // results. + // `exclude` will exclude specific documents from search results. + Type queryruletype.QueryRuleType `json:"type"` +} + +func (s *QueryRule) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return fmt.Errorf("%s | %w", "Actions", err) + } + + case "criteria": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQueryRuleCriteria() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Criteria", err) + } + + s.Criteria = append(s.Criteria, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Criteria); err != nil { + return fmt.Errorf("%s | %w", "Criteria", err) + } + } + + case "priority": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + s.Priority = &value + case float64: + f := int(v) + s.Priority = &f + } + + case "rule_id": + if err := dec.Decode(&s.RuleId); err != nil { + return fmt.Errorf("%s | %w", "RuleId", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewQueryRule returns a QueryRule. +func NewQueryRule() *QueryRule { + r := &QueryRule{} + + return r +} + +type QueryRuleVariant interface { + QueryRuleCaster() *QueryRule +} + +func (s *QueryRule) QueryRuleCaster() *QueryRule { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryruleactions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryruleactions.go new file mode 100644 index 000000000..51e22f237 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryruleactions.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// QueryRuleActions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/_types/QueryRuleset.ts#L110-L126 +type QueryRuleActions struct { + // Docs The documents to apply the rule to. + // Only one of `ids` or `docs` may be specified and at least one must be + // specified. + // There is a maximum value of 100 documents in a rule. + // You can specify the following attributes for each document: + // + // * `_index`: The index of the document to pin. + // * `_id`: The unique document ID. + Docs []PinnedDoc `json:"docs,omitempty"` + // Ids The unique document IDs of the documents to apply the rule to. + // Only one of `ids` or `docs` may be specified and at least one must be + // specified. + Ids []string `json:"ids,omitempty"` +} + +// NewQueryRuleActions returns a QueryRuleActions. +func NewQueryRuleActions() *QueryRuleActions { + r := &QueryRuleActions{} + + return r +} + +type QueryRuleActionsVariant interface { + QueryRuleActionsCaster() *QueryRuleActions +} + +func (s *QueryRuleActions) QueryRuleActionsCaster() *QueryRuleActions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrulecriteria.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrulecriteria.go new file mode 100644 index 000000000..19987d3b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrulecriteria.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/queryrulecriteriatype" +) + +// QueryRuleCriteria type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/_types/QueryRuleset.ts#L65-L93 +type QueryRuleCriteria struct { + // Metadata The metadata field to match against. + // This metadata will be used to match against `match_criteria` sent in the + // rule. + // It is required for all criteria types except `always`. + Metadata *string `json:"metadata,omitempty"` + // Type The type of criteria. The following criteria types are supported: + // + // * `always`: Matches all queries, regardless of input. + // * `contains`: Matches that contain this value anywhere in the field meet the + // criteria defined by the rule. Only applicable for string values. + // * `exact`: Only exact matches meet the criteria defined by the rule. + // Applicable for string or numerical values. + // * `fuzzy`: Exact matches or matches within the allowed Levenshtein Edit + // Distance meet the criteria defined by the rule. Only applicable for string + // values. + // * `gt`: Matches with a value greater than this value meet the criteria + // defined by the rule. Only applicable for numerical values. + // * `gte`: Matches with a value greater than or equal to this value meet the + // criteria defined by the rule. Only applicable for numerical values. + // * `lt`: Matches with a value less than this value meet the criteria defined + // by the rule. Only applicable for numerical values. + // * `lte`: Matches with a value less than or equal to this value meet the + // criteria defined by the rule. Only applicable for numerical values. + // * `prefix`: Matches that start with this value meet the criteria defined by + // the rule. Only applicable for string values. + // * `suffix`: Matches that end with this value meet the criteria defined by the + // rule. Only applicable for string values. + Type queryrulecriteriatype.QueryRuleCriteriaType `json:"type"` + // Values The values to match against the `metadata` field. + // Only one value must match for the criteria to be met. + // It is required for all criteria types except `always`. + Values []json.RawMessage `json:"values,omitempty"` +} + +func (s *QueryRuleCriteria) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "metadata": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Metadata = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "values": + if err := dec.Decode(&s.Values); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + + } + } + return nil +} + +// NewQueryRuleCriteria returns a QueryRuleCriteria. +func NewQueryRuleCriteria() *QueryRuleCriteria { + r := &QueryRuleCriteria{} + + return r +} + +type QueryRuleCriteriaVariant interface { + QueryRuleCriteriaCaster() *QueryRuleCriteria +} + +func (s *QueryRuleCriteria) QueryRuleCriteriaCaster() *QueryRuleCriteria { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrulesetlistitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrulesetlistitem.go new file mode 100644 index 000000000..76fc9ac0a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrulesetlistitem.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// QueryRulesetListItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/list_rulesets/types.ts#L23-L44 +type QueryRulesetListItem struct { + // RuleCriteriaTypesCounts A map of criteria type (for example, `exact`) to the number of rules of that + // type. + // + // NOTE: The counts in `rule_criteria_types_counts` may be larger than the value + // of `rule_total_count` because a rule may have multiple criteria. + RuleCriteriaTypesCounts map[string]int `json:"rule_criteria_types_counts"` + // RuleTotalCount The number of rules associated with the ruleset. + RuleTotalCount int `json:"rule_total_count"` + // RuleTypeCounts A map of rule type (for example, `pinned`) to the number of rules of that + // type. + RuleTypeCounts map[string]int `json:"rule_type_counts"` + // RulesetId A unique identifier for the ruleset. + RulesetId string `json:"ruleset_id"` +} + +func (s *QueryRulesetListItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "rule_criteria_types_counts": + if s.RuleCriteriaTypesCounts == nil { + s.RuleCriteriaTypesCounts = make(map[string]int, 0) + } + if err := dec.Decode(&s.RuleCriteriaTypesCounts); err != nil { + return fmt.Errorf("%s | %w", "RuleCriteriaTypesCounts", err) + } + + case "rule_total_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RuleTotalCount", err) + } + s.RuleTotalCount = value + case float64: + f := int(v) + s.RuleTotalCount = f + } + + case "rule_type_counts": + if s.RuleTypeCounts == nil { + s.RuleTypeCounts = make(map[string]int, 0) + } + if err := dec.Decode(&s.RuleTypeCounts); err != nil { + return fmt.Errorf("%s | %w", "RuleTypeCounts", err) + } + + case "ruleset_id": + if err := dec.Decode(&s.RulesetId); err != nil { + return fmt.Errorf("%s | %w", "RulesetId", err) + } + + } + } + return nil +} + +// NewQueryRulesetListItem returns a QueryRulesetListItem. +func NewQueryRulesetListItem() *QueryRulesetListItem { + r := &QueryRulesetListItem{ + RuleCriteriaTypesCounts: make(map[string]int), + RuleTypeCounts: make(map[string]int), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrulesetmatchedrule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrulesetmatchedrule.go new file mode 100644 index 000000000..9c18d9dd1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryrulesetmatchedrule.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// QueryRulesetMatchedRule type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/query_rules/test/QueryRulesetTestResponse.ts#L30-L39 +type QueryRulesetMatchedRule struct { + // RuleId Rule unique identifier within that ruleset + RuleId string `json:"rule_id"` + // RulesetId Ruleset unique identifier + RulesetId string `json:"ruleset_id"` +} + +func (s *QueryRulesetMatchedRule) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "rule_id": + if err := dec.Decode(&s.RuleId); err != nil { + return fmt.Errorf("%s | %w", "RuleId", err) + } + + case "ruleset_id": + if err := dec.Decode(&s.RulesetId); err != nil { + return fmt.Errorf("%s | %w", "RulesetId", err) + } + + } + } + return nil +} + +// NewQueryRulesetMatchedRule returns a QueryRulesetMatchedRule. +func NewQueryRulesetMatchedRule() *QueryRulesetMatchedRule { + r := &QueryRulesetMatchedRule{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/querystringquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/querystringquery.go new file mode 100644 index 000000000..2c3b12ccf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/querystringquery.go @@ -0,0 +1,443 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/textquerytype" +) + +// QueryStringQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L649-L772 +type QueryStringQuery struct { + // AllowLeadingWildcard If `true`, the wildcard characters `*` and `?` are allowed as the first + // character of the query string. + AllowLeadingWildcard *bool `json:"allow_leading_wildcard,omitempty"` + // AnalyzeWildcard If `true`, the query attempts to analyze wildcard terms in the query string. + AnalyzeWildcard *bool `json:"analyze_wildcard,omitempty"` + // Analyzer Analyzer used to convert text in the query string into tokens. + Analyzer *string `json:"analyzer,omitempty"` + // AutoGenerateSynonymsPhraseQuery If `true`, match phrase queries are automatically created for multi-term + // synonyms. + AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // DefaultField Default field to search if no field is provided in the query string. + // Supports wildcards (`*`). + // Defaults to the `index.query.default_field` index setting, which has a + // default value of `*`. + DefaultField *string `json:"default_field,omitempty"` + // DefaultOperator Default boolean logic used to interpret text in the query string if no + // operators are specified. + DefaultOperator *operator.Operator `json:"default_operator,omitempty"` + // EnablePositionIncrements If `true`, enable position increments in queries constructed from a + // `query_string` search. + EnablePositionIncrements *bool `json:"enable_position_increments,omitempty"` + Escape *bool `json:"escape,omitempty"` + // Fields Array of fields to search. Supports wildcards (`*`). + Fields []string `json:"fields,omitempty"` + // Fuzziness Maximum edit distance allowed for fuzzy matching. + Fuzziness Fuzziness `json:"fuzziness,omitempty"` + // FuzzyMaxExpansions Maximum number of terms to which the query expands for fuzzy matching. + FuzzyMaxExpansions *int `json:"fuzzy_max_expansions,omitempty"` + // FuzzyPrefixLength Number of beginning characters left unchanged for fuzzy matching. + FuzzyPrefixLength *int `json:"fuzzy_prefix_length,omitempty"` + // FuzzyRewrite Method used to rewrite the query. + FuzzyRewrite *string `json:"fuzzy_rewrite,omitempty"` + // FuzzyTranspositions If `true`, edits for fuzzy matching include transpositions of two adjacent + // characters (for example, `ab` to `ba`). + FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` + // Lenient If `true`, format-based errors, such as providing a text value for a numeric + // field, are ignored. + Lenient *bool `json:"lenient,omitempty"` + // MaxDeterminizedStates Maximum number of automaton states required for the query. + MaxDeterminizedStates *int `json:"max_determinized_states,omitempty"` + // MinimumShouldMatch Minimum number of clauses that must match for a document to be returned. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // PhraseSlop Maximum number of positions allowed between matching tokens for phrases. + PhraseSlop *Float64 `json:"phrase_slop,omitempty"` + // Query Query string you wish to parse and use for search. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // QuoteAnalyzer Analyzer used to convert quoted text in the query string into tokens. + // For quoted text, this parameter overrides the analyzer specified in the + // `analyzer` parameter. + QuoteAnalyzer *string `json:"quote_analyzer,omitempty"` + // QuoteFieldSuffix Suffix appended to quoted text in the query string. + // You can use this suffix to use a different analysis method for exact matches. + QuoteFieldSuffix *string `json:"quote_field_suffix,omitempty"` + // Rewrite Method used to rewrite the query. + Rewrite *string `json:"rewrite,omitempty"` + // TieBreaker How to combine the queries generated from the individual search terms in the + // resulting `dis_max` query. + TieBreaker *Float64 `json:"tie_breaker,omitempty"` + // TimeZone Coordinated Universal Time (UTC) offset or IANA time zone used to convert + // date values in the query string to UTC. + TimeZone *string `json:"time_zone,omitempty"` + // Type Determines how the query matches and scores documents. + Type *textquerytype.TextQueryType `json:"type,omitempty"` +} + +func (s *QueryStringQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_leading_wildcard": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowLeadingWildcard", err) + } + s.AllowLeadingWildcard = &value + case bool: + s.AllowLeadingWildcard = &v + } + + case "analyze_wildcard": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AnalyzeWildcard", err) + } + s.AnalyzeWildcard = &value + case bool: + s.AnalyzeWildcard = &v + } + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "auto_generate_synonyms_phrase_query": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AutoGenerateSynonymsPhraseQuery", err) + } + s.AutoGenerateSynonymsPhraseQuery = &value + case bool: + s.AutoGenerateSynonymsPhraseQuery = &v + } + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "default_field": + if err := dec.Decode(&s.DefaultField); err != nil { + return fmt.Errorf("%s | %w", "DefaultField", err) + } + + case "default_operator": + if err := dec.Decode(&s.DefaultOperator); err != nil { + return fmt.Errorf("%s | %w", "DefaultOperator", err) + } + + case "enable_position_increments": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "EnablePositionIncrements", err) + } + s.EnablePositionIncrements = &value + case bool: + s.EnablePositionIncrements = &v + } + + case "escape": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Escape", err) + } + s.Escape = &value + case bool: + s.Escape = &v + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "fuzziness": + if err := dec.Decode(&s.Fuzziness); err != nil { + return fmt.Errorf("%s | %w", "Fuzziness", err) + } + + case "fuzzy_max_expansions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FuzzyMaxExpansions", err) + } + s.FuzzyMaxExpansions = &value + case float64: + f := int(v) + s.FuzzyMaxExpansions = &f + } + + case "fuzzy_prefix_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FuzzyPrefixLength", err) + } + s.FuzzyPrefixLength = &value + case float64: + f := int(v) + s.FuzzyPrefixLength = &f + } + + case "fuzzy_rewrite": + if err := dec.Decode(&s.FuzzyRewrite); err != nil { + return fmt.Errorf("%s | %w", "FuzzyRewrite", err) + } + + case "fuzzy_transpositions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FuzzyTranspositions", err) + } + s.FuzzyTranspositions = &value + case bool: + s.FuzzyTranspositions = &v + } + + case "lenient": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Lenient", err) + } + s.Lenient = &value + case bool: + s.Lenient = &v + } + + case "max_determinized_states": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxDeterminizedStates", err) + } + s.MaxDeterminizedStates = &value + case float64: + f := int(v) + s.MaxDeterminizedStates = &f + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return fmt.Errorf("%s | %w", "MinimumShouldMatch", err) + } + + case "phrase_slop": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PhraseSlop", err) + } + f := Float64(value) + s.PhraseSlop = &f + case float64: + f := Float64(v) + s.PhraseSlop = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "quote_analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QuoteAnalyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QuoteAnalyzer = &o + + case "quote_field_suffix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QuoteFieldSuffix", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QuoteFieldSuffix = &o + + case "rewrite": + if err := dec.Decode(&s.Rewrite); err != nil { + return fmt.Errorf("%s | %w", "Rewrite", err) + } + + case "tie_breaker": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TieBreaker", err) + } + f := Float64(value) + s.TieBreaker = &f + case float64: + f := Float64(v) + s.TieBreaker = &f + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return fmt.Errorf("%s | %w", "TimeZone", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewQueryStringQuery returns a QueryStringQuery. +func NewQueryStringQuery() *QueryStringQuery { + r := &QueryStringQuery{} + + return r +} + +type QueryStringQueryVariant interface { + QueryStringQueryCaster() *QueryStringQuery +} + +func (s *QueryStringQuery) QueryStringQueryCaster() *QueryStringQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryuser.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryuser.go new file mode 100644 index 000000000..569aee497 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryuser.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// QueryUser type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/query_user/types.ts#L103-L105 +type QueryUser struct { + Email *string `json:"email,omitempty"` + Enabled bool `json:"enabled"` + FullName *string `json:"full_name,omitempty"` + Metadata Metadata `json:"metadata"` + ProfileUid *string `json:"profile_uid,omitempty"` + Roles []string `json:"roles"` + Sort_ []FieldValue `json:"_sort,omitempty"` + Username string `json:"username"` +} + +func (s *QueryUser) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "email": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Email", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Email = &o + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "full_name": + if err := dec.Decode(&s.FullName); err != nil { + return fmt.Errorf("%s | %w", "FullName", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "profile_uid": + if err := dec.Decode(&s.ProfileUid); err != nil { + return fmt.Errorf("%s | %w", "ProfileUid", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "_sort": + if err := dec.Decode(&s.Sort_); err != nil { + return fmt.Errorf("%s | %w", "Sort_", err) + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + + } + } + return nil +} + +// NewQueryUser returns a QueryUser. +func NewQueryUser() *QueryUser { + r := &QueryUser{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryvectorbuilder.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryvectorbuilder.go new file mode 100644 index 000000000..5464bf47f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/queryvectorbuilder.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// QueryVectorBuilder type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Knn.ts#L89-L92 +type QueryVectorBuilder struct { + AdditionalQueryVectorBuilderProperty map[string]json.RawMessage `json:"-"` + TextEmbedding *TextEmbedding `json:"text_embedding,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s QueryVectorBuilder) MarshalJSON() ([]byte, error) { + type opt QueryVectorBuilder + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalQueryVectorBuilderProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalQueryVectorBuilderProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewQueryVectorBuilder returns a QueryVectorBuilder. +func NewQueryVectorBuilder() *QueryVectorBuilder { + r := &QueryVectorBuilder{ + AdditionalQueryVectorBuilderProperty: make(map[string]json.RawMessage), + } + + return r +} + +type QueryVectorBuilderVariant interface { + QueryVectorBuilderCaster() *QueryVectorBuilder +} + +func (s *QueryVectorBuilder) QueryVectorBuilderCaster() *QueryVectorBuilder { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/querywatch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/querywatch.go new file mode 100644 index 000000000..c2753cef4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/querywatch.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// QueryWatch type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Watch.ts#L58-L64 +type QueryWatch struct { + Id_ string `json:"_id"` + PrimaryTerm_ *int `json:"_primary_term,omitempty"` + SeqNo_ *int64 `json:"_seq_no,omitempty"` + Status *WatchStatus `json:"status,omitempty"` + Watch *Watch `json:"watch,omitempty"` +} + +func (s *QueryWatch) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_primary_term": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryTerm_", err) + } + s.PrimaryTerm_ = &value + case float64: + f := int(v) + s.PrimaryTerm_ = &f + } + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return fmt.Errorf("%s | %w", "SeqNo_", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "watch": + if err := dec.Decode(&s.Watch); err != nil { + return fmt.Errorf("%s | %w", "Watch", err) + } + + } + } + return nil +} + +// NewQueryWatch returns a QueryWatch. +func NewQueryWatch() *QueryWatch { + r := &QueryWatch{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/questionansweringinferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/questionansweringinferenceoptions.go new file mode 100644 index 000000000..8029f482b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/questionansweringinferenceoptions.go @@ -0,0 +1,129 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// QuestionAnsweringInferenceOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L301-L311 +type QuestionAnsweringInferenceOptions struct { + // MaxAnswerLength The maximum answer length to consider + MaxAnswerLength *int `json:"max_answer_length,omitempty"` + // NumTopClasses Specifies the number of top class predictions to return. Defaults to 0. + NumTopClasses *int `json:"num_top_classes,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options to update when inferring + Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` +} + +func (s *QuestionAnsweringInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_answer_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxAnswerLength", err) + } + s.MaxAnswerLength = &value + case float64: + f := int(v) + s.MaxAnswerLength = &f + } + + case "num_top_classes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopClasses", err) + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + } + } + return nil +} + +// NewQuestionAnsweringInferenceOptions returns a QuestionAnsweringInferenceOptions. +func NewQuestionAnsweringInferenceOptions() *QuestionAnsweringInferenceOptions { + r := &QuestionAnsweringInferenceOptions{} + + return r +} + +type QuestionAnsweringInferenceOptionsVariant interface { + QuestionAnsweringInferenceOptionsCaster() *QuestionAnsweringInferenceOptions +} + +func (s *QuestionAnsweringInferenceOptions) QuestionAnsweringInferenceOptionsCaster() *QuestionAnsweringInferenceOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/questionansweringinferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/questionansweringinferenceupdateoptions.go new file mode 100644 index 000000000..0d2cdbf13 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/questionansweringinferenceupdateoptions.go @@ -0,0 +1,143 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// QuestionAnsweringInferenceUpdateOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L439-L450 +type QuestionAnsweringInferenceUpdateOptions struct { + // MaxAnswerLength The maximum answer length to consider for extraction + MaxAnswerLength *int `json:"max_answer_length,omitempty"` + // NumTopClasses Specifies the number of top class predictions to return. Defaults to 0. + NumTopClasses *int `json:"num_top_classes,omitempty"` + // Question The question to answer given the inference context + Question string `json:"question"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options to update when inferring + Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` +} + +func (s *QuestionAnsweringInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_answer_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxAnswerLength", err) + } + s.MaxAnswerLength = &value + case float64: + f := int(v) + s.MaxAnswerLength = &f + } + + case "num_top_classes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopClasses", err) + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "question": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Question", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Question = o + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + } + } + return nil +} + +// NewQuestionAnsweringInferenceUpdateOptions returns a QuestionAnsweringInferenceUpdateOptions. +func NewQuestionAnsweringInferenceUpdateOptions() *QuestionAnsweringInferenceUpdateOptions { + r := &QuestionAnsweringInferenceUpdateOptions{} + + return r +} + +type QuestionAnsweringInferenceUpdateOptionsVariant interface { + QuestionAnsweringInferenceUpdateOptionsCaster() *QuestionAnsweringInferenceUpdateOptions +} + +func (s *QuestionAnsweringInferenceUpdateOptions) QuestionAnsweringInferenceUpdateOptionsCaster() *QuestionAnsweringInferenceUpdateOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/randomsampleraggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/randomsampleraggregation.go new file mode 100644 index 000000000..33ada2381 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/randomsampleraggregation.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RandomSamplerAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L749-L769 +type RandomSamplerAggregation struct { + // Probability The probability that a document will be included in the aggregated data. + // Must be greater than 0, less than 0.5, or exactly 1. + // The lower the probability, the fewer documents are matched. + Probability Float64 `json:"probability"` + // Seed The seed to generate the random sampling of documents. + // When a seed is provided, the random subset of documents is the same between + // calls. + Seed *int `json:"seed,omitempty"` + // ShardSeed When combined with seed, setting shard_seed ensures 100% consistent sampling + // over shards where data is exactly the same. + ShardSeed *int `json:"shard_seed,omitempty"` +} + +func (s *RandomSamplerAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "probability": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Probability", err) + } + f := Float64(value) + s.Probability = f + case float64: + f := Float64(v) + s.Probability = f + } + + case "seed": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Seed", err) + } + s.Seed = &value + case float64: + f := int(v) + s.Seed = &f + } + + case "shard_seed": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSeed", err) + } + s.ShardSeed = &value + case float64: + f := int(v) + s.ShardSeed = &f + } + + } + } + return nil +} + +// NewRandomSamplerAggregation returns a RandomSamplerAggregation. +func NewRandomSamplerAggregation() *RandomSamplerAggregation { + r := &RandomSamplerAggregation{} + + return r +} + +type RandomSamplerAggregationVariant interface { + RandomSamplerAggregationCaster() *RandomSamplerAggregation +} + +func (s *RandomSamplerAggregation) RandomSamplerAggregationCaster() *RandomSamplerAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/randomscorefunction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/randomscorefunction.go new file mode 100644 index 000000000..4cafe4ace --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/randomscorefunction.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RandomScoreFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L144-L147 +type RandomScoreFunction struct { + Field *string `json:"field,omitempty"` + Seed *string `json:"seed,omitempty"` +} + +func (s *RandomScoreFunction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "seed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Seed", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Seed = &o + + } + } + return nil +} + +// NewRandomScoreFunction returns a RandomScoreFunction. +func NewRandomScoreFunction() *RandomScoreFunction { + r := &RandomScoreFunction{} + + return r +} + +type RandomScoreFunctionVariant interface { + RandomScoreFunctionCaster() *RandomScoreFunction +} + +func (s *RandomScoreFunction) RandomScoreFunctionCaster() *RandomScoreFunction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rangeaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rangeaggregate.go new file mode 100644 index 000000000..c7ef03890 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rangeaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// RangeAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L594-L598 +type RangeAggregate struct { + Buckets BucketsRangeBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *RangeAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]RangeBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []RangeBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewRangeAggregate returns a RangeAggregate. +func NewRangeAggregate() *RangeAggregate { + r := &RangeAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rangeaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rangeaggregation.go new file mode 100644 index 000000000..85e7207af --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rangeaggregation.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RangeAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L669-L689 +type RangeAggregation struct { + // Field The date field whose values are use to build ranges. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Keyed Set to `true` to associate a unique string key with each bucket and return + // the ranges as a hash rather than an array. + Keyed *bool `json:"keyed,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing *int `json:"missing,omitempty"` + // Ranges An array of ranges used to bucket documents. + Ranges []AggregationRange `json:"ranges,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *RangeAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "keyed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Keyed", err) + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "missing": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + s.Missing = &value + case float64: + f := int(v) + s.Missing = &f + } + + case "ranges": + if err := dec.Decode(&s.Ranges); err != nil { + return fmt.Errorf("%s | %w", "Ranges", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewRangeAggregation returns a RangeAggregation. +func NewRangeAggregation() *RangeAggregation { + r := &RangeAggregation{} + + return r +} + +type RangeAggregationVariant interface { + RangeAggregationCaster() *RangeAggregation +} + +func (s *RangeAggregation) RangeAggregationCaster() *RangeAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rangebucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rangebucket.go new file mode 100644 index 000000000..1321069ee --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rangebucket.go @@ -0,0 +1,706 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// RangeBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L600-L607 +type RangeBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + From *Float64 `json:"from,omitempty"` + FromAsString *string `json:"from_as_string,omitempty"` + // Key The bucket key. Present if the aggregation is _not_ keyed + Key *string `json:"key,omitempty"` + To *Float64 `json:"to,omitempty"` + ToAsString *string `json:"to_as_string,omitempty"` +} + +func (s *RangeBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "from": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + f := Float64(value) + s.From = &f + case float64: + f := Float64(v) + s.From = &f + } + + case "from_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FromAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FromAsString = &o + + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = &o + + case "to": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "To", err) + } + f := Float64(value) + s.To = &f + case float64: + f := Float64(v) + s.To = &f + } + + case "to_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ToAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ToAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s RangeBucket) MarshalJSON() ([]byte, error) { + type opt RangeBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewRangeBucket returns a RangeBucket. +func NewRangeBucket() *RangeBucket { + r := &RangeBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rangequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rangequery.go new file mode 100644 index 000000000..ef913f24f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rangequery.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// RangeQuery holds the union for the following types: +// +// UntypedRangeQuery +// DateRangeQuery +// NumberRangeQuery +// TermRangeQuery +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L172-L182 +type RangeQuery any + +type RangeQueryVariant interface { + RangeQueryCaster() *RangeQuery +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankcontainer.go new file mode 100644 index 000000000..a3244e507 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankcontainer.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// RankContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Rank.ts#L22-L28 +type RankContainer struct { + AdditionalRankContainerProperty map[string]json.RawMessage `json:"-"` + // Rrf The reciprocal rank fusion parameters + Rrf *RrfRank `json:"rrf,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s RankContainer) MarshalJSON() ([]byte, error) { + type opt RankContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRankContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRankContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewRankContainer returns a RankContainer. +func NewRankContainer() *RankContainer { + r := &RankContainer{ + AdditionalRankContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type RankContainerVariant interface { + RankContainerCaster() *RankContainer +} + +func (s *RankContainer) RankContainerCaster() *RankContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankeddocument.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankeddocument.go new file mode 100644 index 000000000..eab9113a1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankeddocument.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RankedDocument type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/Results.ts#L91-L101 +type RankedDocument struct { + Index int `json:"index"` + RelevanceScore float32 `json:"relevance_score"` + Text *string `json:"text,omitempty"` +} + +func (s *RankedDocument) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = value + case float64: + f := int(v) + s.Index = f + } + + case "relevance_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "RelevanceScore", err) + } + f := float32(value) + s.RelevanceScore = f + case float64: + f := float32(v) + s.RelevanceScore = f + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = &o + + } + } + return nil +} + +// NewRankedDocument returns a RankedDocument. +func NewRankedDocument() *RankedDocument { + r := &RankedDocument{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalhit.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalhit.go new file mode 100644 index 000000000..3c35777f9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalhit.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RankEvalHit type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/types.ts#L144-L148 +type RankEvalHit struct { + Id_ string `json:"_id"` + Index_ string `json:"_index"` + Score_ Float64 `json:"_score"` +} + +func (s *RankEvalHit) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Score_", err) + } + f := Float64(value) + s.Score_ = f + case float64: + f := Float64(v) + s.Score_ = f + } + + } + } + return nil +} + +// NewRankEvalHit returns a RankEvalHit. +func NewRankEvalHit() *RankEvalHit { + r := &RankEvalHit{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalhititem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalhititem.go new file mode 100644 index 000000000..f01dcc034 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalhititem.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// RankEvalHitItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/types.ts#L139-L142 +type RankEvalHitItem struct { + Hit RankEvalHit `json:"hit"` + Rating *Float64 `json:"rating,omitempty"` +} + +func (s *RankEvalHitItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hit": + if err := dec.Decode(&s.Hit); err != nil { + return fmt.Errorf("%s | %w", "Hit", err) + } + + case "rating": + if err := dec.Decode(&s.Rating); err != nil { + return fmt.Errorf("%s | %w", "Rating", err) + } + + } + } + return nil +} + +// NewRankEvalHitItem returns a RankEvalHitItem. +func NewRankEvalHitItem() *RankEvalHitItem { + r := &RankEvalHitItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetric.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetric.go new file mode 100644 index 000000000..4f3f5ebdc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetric.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// RankEvalMetric type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/types.ts#L90-L96 +type RankEvalMetric struct { + Dcg *RankEvalMetricDiscountedCumulativeGain `json:"dcg,omitempty"` + ExpectedReciprocalRank *RankEvalMetricExpectedReciprocalRank `json:"expected_reciprocal_rank,omitempty"` + MeanReciprocalRank *RankEvalMetricMeanReciprocalRank `json:"mean_reciprocal_rank,omitempty"` + Precision *RankEvalMetricPrecision `json:"precision,omitempty"` + Recall *RankEvalMetricRecall `json:"recall,omitempty"` +} + +// NewRankEvalMetric returns a RankEvalMetric. +func NewRankEvalMetric() *RankEvalMetric { + r := &RankEvalMetric{} + + return r +} + +type RankEvalMetricVariant interface { + RankEvalMetricCaster() *RankEvalMetric +} + +func (s *RankEvalMetric) RankEvalMetricCaster() *RankEvalMetric { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricdetail.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricdetail.go new file mode 100644 index 000000000..3fe2e0888 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricdetail.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RankEvalMetricDetail type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/types.ts#L128-L137 +type RankEvalMetricDetail struct { + // Hits The hits section shows a grouping of the search results with their supplied + // ratings + Hits []RankEvalHitItem `json:"hits"` + // MetricDetails The metric_details give additional information about the calculated quality + // metric (e.g. how many of the retrieved documents were relevant). The content + // varies for each metric but allows for better interpretation of the results + MetricDetails map[string]map[string]json.RawMessage `json:"metric_details"` + // MetricScore The metric_score in the details section shows the contribution of this query + // to the global quality metric score + MetricScore Float64 `json:"metric_score"` + // UnratedDocs The unrated_docs section contains an _index and _id entry for each document + // in the search result for this query that didn’t have a ratings value. This + // can be used to ask the user to supply ratings for these documents + UnratedDocs []UnratedDocument `json:"unrated_docs"` +} + +func (s *RankEvalMetricDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return fmt.Errorf("%s | %w", "Hits", err) + } + + case "metric_details": + if s.MetricDetails == nil { + s.MetricDetails = make(map[string]map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.MetricDetails); err != nil { + return fmt.Errorf("%s | %w", "MetricDetails", err) + } + + case "metric_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MetricScore", err) + } + f := Float64(value) + s.MetricScore = f + case float64: + f := Float64(v) + s.MetricScore = f + } + + case "unrated_docs": + if err := dec.Decode(&s.UnratedDocs); err != nil { + return fmt.Errorf("%s | %w", "UnratedDocs", err) + } + + } + } + return nil +} + +// NewRankEvalMetricDetail returns a RankEvalMetricDetail. +func NewRankEvalMetricDetail() *RankEvalMetricDetail { + r := &RankEvalMetricDetail{ + MetricDetails: make(map[string]map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricdiscountedcumulativegain.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricdiscountedcumulativegain.go new file mode 100644 index 000000000..32c8ccbff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricdiscountedcumulativegain.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RankEvalMetricDiscountedCumulativeGain type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/types.ts#L66-L77 +type RankEvalMetricDiscountedCumulativeGain struct { + // K Sets the maximum number of documents retrieved per query. This value will act + // in place of the usual size parameter in the query. + K *int `json:"k,omitempty"` + // Normalize If set to true, this metric will calculate the Normalized DCG. + Normalize *bool `json:"normalize,omitempty"` +} + +func (s *RankEvalMetricDiscountedCumulativeGain) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "k": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "K", err) + } + s.K = &value + case float64: + f := int(v) + s.K = &f + } + + case "normalize": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Normalize", err) + } + s.Normalize = &value + case bool: + s.Normalize = &v + } + + } + } + return nil +} + +// NewRankEvalMetricDiscountedCumulativeGain returns a RankEvalMetricDiscountedCumulativeGain. +func NewRankEvalMetricDiscountedCumulativeGain() *RankEvalMetricDiscountedCumulativeGain { + r := &RankEvalMetricDiscountedCumulativeGain{} + + return r +} + +type RankEvalMetricDiscountedCumulativeGainVariant interface { + RankEvalMetricDiscountedCumulativeGainCaster() *RankEvalMetricDiscountedCumulativeGain +} + +func (s *RankEvalMetricDiscountedCumulativeGain) RankEvalMetricDiscountedCumulativeGainCaster() *RankEvalMetricDiscountedCumulativeGain { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricexpectedreciprocalrank.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricexpectedreciprocalrank.go new file mode 100644 index 000000000..744a51dca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricexpectedreciprocalrank.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RankEvalMetricExpectedReciprocalRank type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/types.ts#L79-L88 +type RankEvalMetricExpectedReciprocalRank struct { + // K Sets the maximum number of documents retrieved per query. This value will act + // in place of the usual size parameter in the query. + K *int `json:"k,omitempty"` + // MaximumRelevance The highest relevance grade used in the user-supplied relevance judgments. + MaximumRelevance int `json:"maximum_relevance"` +} + +func (s *RankEvalMetricExpectedReciprocalRank) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "k": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "K", err) + } + s.K = &value + case float64: + f := int(v) + s.K = &f + } + + case "maximum_relevance": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaximumRelevance", err) + } + s.MaximumRelevance = value + case float64: + f := int(v) + s.MaximumRelevance = f + } + + } + } + return nil +} + +// NewRankEvalMetricExpectedReciprocalRank returns a RankEvalMetricExpectedReciprocalRank. +func NewRankEvalMetricExpectedReciprocalRank() *RankEvalMetricExpectedReciprocalRank { + r := &RankEvalMetricExpectedReciprocalRank{} + + return r +} + +type RankEvalMetricExpectedReciprocalRankVariant interface { + RankEvalMetricExpectedReciprocalRankCaster() *RankEvalMetricExpectedReciprocalRank +} + +func (s *RankEvalMetricExpectedReciprocalRank) RankEvalMetricExpectedReciprocalRankCaster() *RankEvalMetricExpectedReciprocalRank { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricmeanreciprocalrank.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricmeanreciprocalrank.go new file mode 100644 index 000000000..377fc33a4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricmeanreciprocalrank.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RankEvalMetricMeanReciprocalRank type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/types.ts#L60-L64 +type RankEvalMetricMeanReciprocalRank struct { + // K Sets the maximum number of documents retrieved per query. This value will act + // in place of the usual size parameter in the query. + K *int `json:"k,omitempty"` + // RelevantRatingThreshold Sets the rating threshold above which documents are considered to be + // "relevant". + RelevantRatingThreshold *int `json:"relevant_rating_threshold,omitempty"` +} + +func (s *RankEvalMetricMeanReciprocalRank) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "k": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "K", err) + } + s.K = &value + case float64: + f := int(v) + s.K = &f + } + + case "relevant_rating_threshold": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RelevantRatingThreshold", err) + } + s.RelevantRatingThreshold = &value + case float64: + f := int(v) + s.RelevantRatingThreshold = &f + } + + } + } + return nil +} + +// NewRankEvalMetricMeanReciprocalRank returns a RankEvalMetricMeanReciprocalRank. +func NewRankEvalMetricMeanReciprocalRank() *RankEvalMetricMeanReciprocalRank { + r := &RankEvalMetricMeanReciprocalRank{} + + return r +} + +type RankEvalMetricMeanReciprocalRankVariant interface { + RankEvalMetricMeanReciprocalRankCaster() *RankEvalMetricMeanReciprocalRank +} + +func (s *RankEvalMetricMeanReciprocalRank) RankEvalMetricMeanReciprocalRankCaster() *RankEvalMetricMeanReciprocalRank { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricprecision.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricprecision.go new file mode 100644 index 000000000..8bec79e3c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricprecision.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RankEvalMetricPrecision type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/types.ts#L42-L52 +type RankEvalMetricPrecision struct { + // IgnoreUnlabeled Controls how unlabeled documents in the search results are counted. If set to + // true, unlabeled documents are ignored and neither count as relevant or + // irrelevant. Set to false (the default), they are treated as irrelevant. + IgnoreUnlabeled *bool `json:"ignore_unlabeled,omitempty"` + // K Sets the maximum number of documents retrieved per query. This value will act + // in place of the usual size parameter in the query. + K *int `json:"k,omitempty"` + // RelevantRatingThreshold Sets the rating threshold above which documents are considered to be + // "relevant". + RelevantRatingThreshold *int `json:"relevant_rating_threshold,omitempty"` +} + +func (s *RankEvalMetricPrecision) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ignore_unlabeled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnlabeled", err) + } + s.IgnoreUnlabeled = &value + case bool: + s.IgnoreUnlabeled = &v + } + + case "k": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "K", err) + } + s.K = &value + case float64: + f := int(v) + s.K = &f + } + + case "relevant_rating_threshold": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RelevantRatingThreshold", err) + } + s.RelevantRatingThreshold = &value + case float64: + f := int(v) + s.RelevantRatingThreshold = &f + } + + } + } + return nil +} + +// NewRankEvalMetricPrecision returns a RankEvalMetricPrecision. +func NewRankEvalMetricPrecision() *RankEvalMetricPrecision { + r := &RankEvalMetricPrecision{} + + return r +} + +type RankEvalMetricPrecisionVariant interface { + RankEvalMetricPrecisionCaster() *RankEvalMetricPrecision +} + +func (s *RankEvalMetricPrecision) RankEvalMetricPrecisionCaster() *RankEvalMetricPrecision { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricrecall.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricrecall.go new file mode 100644 index 000000000..8513f2f11 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalmetricrecall.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RankEvalMetricRecall type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/types.ts#L54-L58 +type RankEvalMetricRecall struct { + // K Sets the maximum number of documents retrieved per query. This value will act + // in place of the usual size parameter in the query. + K *int `json:"k,omitempty"` + // RelevantRatingThreshold Sets the rating threshold above which documents are considered to be + // "relevant". + RelevantRatingThreshold *int `json:"relevant_rating_threshold,omitempty"` +} + +func (s *RankEvalMetricRecall) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "k": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "K", err) + } + s.K = &value + case float64: + f := int(v) + s.K = &f + } + + case "relevant_rating_threshold": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RelevantRatingThreshold", err) + } + s.RelevantRatingThreshold = &value + case float64: + f := int(v) + s.RelevantRatingThreshold = &f + } + + } + } + return nil +} + +// NewRankEvalMetricRecall returns a RankEvalMetricRecall. +func NewRankEvalMetricRecall() *RankEvalMetricRecall { + r := &RankEvalMetricRecall{} + + return r +} + +type RankEvalMetricRecallVariant interface { + RankEvalMetricRecallCaster() *RankEvalMetricRecall +} + +func (s *RankEvalMetricRecall) RankEvalMetricRecallCaster() *RankEvalMetricRecall { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalquery.go new file mode 100644 index 000000000..77b8e10e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalquery.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RankEvalQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/types.ts#L111-L117 +type RankEvalQuery struct { + Query Query `json:"query"` + Size *int `json:"size,omitempty"` +} + +func (s *RankEvalQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Query) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + +// NewRankEvalQuery returns a RankEvalQuery. +func NewRankEvalQuery() *RankEvalQuery { + r := &RankEvalQuery{} + + return r +} + +type RankEvalQueryVariant interface { + RankEvalQueryCaster() *RankEvalQuery +} + +func (s *RankEvalQuery) RankEvalQueryCaster() *RankEvalQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalrequestitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalrequestitem.go new file mode 100644 index 000000000..a2bff2c91 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankevalrequestitem.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// RankEvalRequestItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/types.ts#L98-L109 +type RankEvalRequestItem struct { + // Id The search request’s ID, used to group result details later. + Id string `json:"id"` + // Params The search template parameters. + Params map[string]json.RawMessage `json:"params,omitempty"` + // Ratings List of document ratings + Ratings []DocumentRating `json:"ratings"` + // Request The query being evaluated. + Request *RankEvalQuery `json:"request,omitempty"` + // TemplateId The search template Id + TemplateId *string `json:"template_id,omitempty"` +} + +func (s *RankEvalRequestItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "ratings": + if err := dec.Decode(&s.Ratings); err != nil { + return fmt.Errorf("%s | %w", "Ratings", err) + } + + case "request": + if err := dec.Decode(&s.Request); err != nil { + return fmt.Errorf("%s | %w", "Request", err) + } + + case "template_id": + if err := dec.Decode(&s.TemplateId); err != nil { + return fmt.Errorf("%s | %w", "TemplateId", err) + } + + } + } + return nil +} + +// NewRankEvalRequestItem returns a RankEvalRequestItem. +func NewRankEvalRequestItem() *RankEvalRequestItem { + r := &RankEvalRequestItem{ + Params: make(map[string]json.RawMessage), + } + + return r +} + +type RankEvalRequestItemVariant interface { + RankEvalRequestItemCaster() *RankEvalRequestItem +} + +func (s *RankEvalRequestItem) RankEvalRequestItemCaster() *RankEvalRequestItem { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturefunctionlinear.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturefunctionlinear.go new file mode 100644 index 000000000..74fbe2c5d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturefunctionlinear.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// RankFeatureFunctionLinear type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L282-L282 +type RankFeatureFunctionLinear struct { +} + +// NewRankFeatureFunctionLinear returns a RankFeatureFunctionLinear. +func NewRankFeatureFunctionLinear() *RankFeatureFunctionLinear { + r := &RankFeatureFunctionLinear{} + + return r +} + +type RankFeatureFunctionLinearVariant interface { + RankFeatureFunctionLinearCaster() *RankFeatureFunctionLinear +} + +func (s *RankFeatureFunctionLinear) RankFeatureFunctionLinearCaster() *RankFeatureFunctionLinear { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturefunctionlogarithm.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturefunctionlogarithm.go new file mode 100644 index 000000000..83933e779 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturefunctionlogarithm.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RankFeatureFunctionLogarithm type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L284-L289 +type RankFeatureFunctionLogarithm struct { + // ScalingFactor Configurable scaling factor. + ScalingFactor float32 `json:"scaling_factor"` +} + +func (s *RankFeatureFunctionLogarithm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "scaling_factor": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "ScalingFactor", err) + } + f := float32(value) + s.ScalingFactor = f + case float64: + f := float32(v) + s.ScalingFactor = f + } + + } + } + return nil +} + +// NewRankFeatureFunctionLogarithm returns a RankFeatureFunctionLogarithm. +func NewRankFeatureFunctionLogarithm() *RankFeatureFunctionLogarithm { + r := &RankFeatureFunctionLogarithm{} + + return r +} + +type RankFeatureFunctionLogarithmVariant interface { + RankFeatureFunctionLogarithmCaster() *RankFeatureFunctionLogarithm +} + +func (s *RankFeatureFunctionLogarithm) RankFeatureFunctionLogarithmCaster() *RankFeatureFunctionLogarithm { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturefunctionsaturation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturefunctionsaturation.go new file mode 100644 index 000000000..1e249b229 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturefunctionsaturation.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RankFeatureFunctionSaturation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L291-L296 +type RankFeatureFunctionSaturation struct { + // Pivot Configurable pivot value so that the result will be less than 0.5. + Pivot *float32 `json:"pivot,omitempty"` +} + +func (s *RankFeatureFunctionSaturation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "pivot": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Pivot", err) + } + f := float32(value) + s.Pivot = &f + case float64: + f := float32(v) + s.Pivot = &f + } + + } + } + return nil +} + +// NewRankFeatureFunctionSaturation returns a RankFeatureFunctionSaturation. +func NewRankFeatureFunctionSaturation() *RankFeatureFunctionSaturation { + r := &RankFeatureFunctionSaturation{} + + return r +} + +type RankFeatureFunctionSaturationVariant interface { + RankFeatureFunctionSaturationCaster() *RankFeatureFunctionSaturation +} + +func (s *RankFeatureFunctionSaturation) RankFeatureFunctionSaturationCaster() *RankFeatureFunctionSaturation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturefunctionsigmoid.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturefunctionsigmoid.go new file mode 100644 index 000000000..b765511c2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturefunctionsigmoid.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RankFeatureFunctionSigmoid type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L298-L307 +type RankFeatureFunctionSigmoid struct { + // Exponent Configurable Exponent. + Exponent float32 `json:"exponent"` + // Pivot Configurable pivot value so that the result will be less than 0.5. + Pivot float32 `json:"pivot"` +} + +func (s *RankFeatureFunctionSigmoid) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exponent": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Exponent", err) + } + f := float32(value) + s.Exponent = f + case float64: + f := float32(v) + s.Exponent = f + } + + case "pivot": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Pivot", err) + } + f := float32(value) + s.Pivot = f + case float64: + f := float32(v) + s.Pivot = f + } + + } + } + return nil +} + +// NewRankFeatureFunctionSigmoid returns a RankFeatureFunctionSigmoid. +func NewRankFeatureFunctionSigmoid() *RankFeatureFunctionSigmoid { + r := &RankFeatureFunctionSigmoid{} + + return r +} + +type RankFeatureFunctionSigmoidVariant interface { + RankFeatureFunctionSigmoidCaster() *RankFeatureFunctionSigmoid +} + +func (s *RankFeatureFunctionSigmoid) RankFeatureFunctionSigmoidCaster() *RankFeatureFunctionSigmoid { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeatureproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeatureproperty.go new file mode 100644 index 000000000..906534564 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeatureproperty.go @@ -0,0 +1,838 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// RankFeatureProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L207-L210 +type RankFeatureProperty struct { + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + PositiveScoreImpact *bool `json:"positive_score_impact,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *RankFeatureProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "positive_score_impact": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "PositiveScoreImpact", err) + } + s.PositiveScoreImpact = &value + case bool: + s.PositiveScoreImpact = &v + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s RankFeatureProperty) MarshalJSON() ([]byte, error) { + type innerRankFeatureProperty RankFeatureProperty + tmp := innerRankFeatureProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + PositiveScoreImpact: s.PositiveScoreImpact, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "rank_feature" + + return json.Marshal(tmp) +} + +// NewRankFeatureProperty returns a RankFeatureProperty. +func NewRankFeatureProperty() *RankFeatureProperty { + r := &RankFeatureProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type RankFeaturePropertyVariant interface { + RankFeaturePropertyCaster() *RankFeatureProperty +} + +func (s *RankFeatureProperty) RankFeaturePropertyCaster() *RankFeatureProperty { + return s +} + +func (s *RankFeatureProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturequery.go new file mode 100644 index 000000000..6ea25caf5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturequery.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RankFeatureQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L309-L335 +type RankFeatureQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Field `rank_feature` or `rank_features` field used to boost relevance scores. + Field string `json:"field"` + // Linear Linear function used to boost relevance scores based on the value of the rank + // feature `field`. + Linear *RankFeatureFunctionLinear `json:"linear,omitempty"` + // Log Logarithmic function used to boost relevance scores based on the value of the + // rank feature `field`. + Log *RankFeatureFunctionLogarithm `json:"log,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Saturation Saturation function used to boost relevance scores based on the value of the + // rank feature `field`. + Saturation *RankFeatureFunctionSaturation `json:"saturation,omitempty"` + // Sigmoid Sigmoid function used to boost relevance scores based on the value of the + // rank feature `field`. + Sigmoid *RankFeatureFunctionSigmoid `json:"sigmoid,omitempty"` +} + +func (s *RankFeatureQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "linear": + if err := dec.Decode(&s.Linear); err != nil { + return fmt.Errorf("%s | %w", "Linear", err) + } + + case "log": + if err := dec.Decode(&s.Log); err != nil { + return fmt.Errorf("%s | %w", "Log", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "saturation": + if err := dec.Decode(&s.Saturation); err != nil { + return fmt.Errorf("%s | %w", "Saturation", err) + } + + case "sigmoid": + if err := dec.Decode(&s.Sigmoid); err != nil { + return fmt.Errorf("%s | %w", "Sigmoid", err) + } + + } + } + return nil +} + +// NewRankFeatureQuery returns a RankFeatureQuery. +func NewRankFeatureQuery() *RankFeatureQuery { + r := &RankFeatureQuery{} + + return r +} + +type RankFeatureQueryVariant interface { + RankFeatureQueryCaster() *RankFeatureQuery +} + +func (s *RankFeatureQuery) RankFeatureQueryCaster() *RankFeatureQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturesproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturesproperty.go new file mode 100644 index 000000000..846d322b9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankfeaturesproperty.go @@ -0,0 +1,838 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// RankFeaturesProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L212-L215 +type RankFeaturesProperty struct { + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + PositiveScoreImpact *bool `json:"positive_score_impact,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *RankFeaturesProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "positive_score_impact": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "PositiveScoreImpact", err) + } + s.PositiveScoreImpact = &value + case bool: + s.PositiveScoreImpact = &v + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s RankFeaturesProperty) MarshalJSON() ([]byte, error) { + type innerRankFeaturesProperty RankFeaturesProperty + tmp := innerRankFeaturesProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + PositiveScoreImpact: s.PositiveScoreImpact, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "rank_features" + + return json.Marshal(tmp) +} + +// NewRankFeaturesProperty returns a RankFeaturesProperty. +func NewRankFeaturesProperty() *RankFeaturesProperty { + r := &RankFeaturesProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type RankFeaturesPropertyVariant interface { + RankFeaturesPropertyCaster() *RankFeaturesProperty +} + +func (s *RankFeaturesProperty) RankFeaturesPropertyCaster() *RankFeaturesProperty { + return s +} + +func (s *RankFeaturesProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankvectorproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankvectorproperty.go new file mode 100644 index 000000000..48796a599 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rankvectorproperty.go @@ -0,0 +1,848 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/rankvectorelementtype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// RankVectorProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L217-L224 +type RankVectorProperty struct { + Dims *int `json:"dims,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + ElementType *rankvectorelementtype.RankVectorElementType `json:"element_type,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *RankVectorProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dims": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Dims", err) + } + s.Dims = &value + case float64: + f := int(v) + s.Dims = &f + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "element_type": + if err := dec.Decode(&s.ElementType); err != nil { + return fmt.Errorf("%s | %w", "ElementType", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s RankVectorProperty) MarshalJSON() ([]byte, error) { + type innerRankVectorProperty RankVectorProperty + tmp := innerRankVectorProperty{ + Dims: s.Dims, + Dynamic: s.Dynamic, + ElementType: s.ElementType, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "rank_vectors" + + return json.Marshal(tmp) +} + +// NewRankVectorProperty returns a RankVectorProperty. +func NewRankVectorProperty() *RankVectorProperty { + r := &RankVectorProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type RankVectorPropertyVariant interface { + RankVectorPropertyCaster() *RankVectorProperty +} + +func (s *RankVectorProperty) RankVectorPropertyCaster() *RankVectorProperty { + return s +} + +func (s *RankVectorProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/raretermsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/raretermsaggregation.go new file mode 100644 index 000000000..24cfccded --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/raretermsaggregation.go @@ -0,0 +1,191 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RareTermsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L706-L739 +type RareTermsAggregation struct { + // Exclude Terms that should be excluded from the aggregation. + Exclude []string `json:"exclude,omitempty"` + // Field The field from which to return rare terms. + Field *string `json:"field,omitempty"` + // Include Terms that should be included in the aggregation. + Include TermsInclude `json:"include,omitempty"` + // MaxDocCount The maximum number of documents a term should appear in. + MaxDocCount *int64 `json:"max_doc_count,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + // Precision The precision of the internal CuckooFilters. + // Smaller precision leads to better approximation, but higher memory usage. + Precision *Float64 `json:"precision,omitempty"` + ValueType *string `json:"value_type,omitempty"` +} + +func (s *RareTermsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exclude": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Exclude", err) + } + + s.Exclude = append(s.Exclude, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Exclude); err != nil { + return fmt.Errorf("%s | %w", "Exclude", err) + } + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "include": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + include_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Include", err) + } + + switch t { + + case "num_partitions", "partition": + o := NewTermsPartition() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + s.Include = o + break include_field + + } + } + if s.Include == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Include); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + } + + case "max_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxDocCount", err) + } + s.MaxDocCount = &value + case float64: + f := int64(v) + s.MaxDocCount = &f + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "precision": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Precision", err) + } + f := Float64(value) + s.Precision = &f + case float64: + f := Float64(v) + s.Precision = &f + } + + case "value_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueType = &o + + } + } + return nil +} + +// NewRareTermsAggregation returns a RareTermsAggregation. +func NewRareTermsAggregation() *RareTermsAggregation { + r := &RareTermsAggregation{} + + return r +} + +type RareTermsAggregationVariant interface { + RareTermsAggregationCaster() *RareTermsAggregation +} + +func (s *RareTermsAggregation) RareTermsAggregationCaster() *RareTermsAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rateaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rateaggregate.go new file mode 100644 index 000000000..981abafbe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rateaggregate.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RateAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L849-L856 +type RateAggregate struct { + Meta Metadata `json:"meta,omitempty"` + Value Float64 `json:"value"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *RateAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + f := Float64(value) + s.Value = f + case float64: + f := Float64(v) + s.Value = f + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + +// NewRateAggregate returns a RateAggregate. +func NewRateAggregate() *RateAggregate { + r := &RateAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rateaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rateaggregation.go new file mode 100644 index 000000000..ce7a9f1e1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rateaggregation.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/calendarinterval" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ratemode" +) + +// RateAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L262-L273 +type RateAggregation struct { + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + // Mode How the rate is calculated. + Mode *ratemode.RateMode `json:"mode,omitempty"` + Script *Script `json:"script,omitempty"` + // Unit The interval used to calculate the rate. + // By default, the interval of the `date_histogram` is used. + Unit *calendarinterval.CalendarInterval `json:"unit,omitempty"` +} + +func (s *RateAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "unit": + if err := dec.Decode(&s.Unit); err != nil { + return fmt.Errorf("%s | %w", "Unit", err) + } + + } + } + return nil +} + +// NewRateAggregation returns a RateAggregation. +func NewRateAggregation() *RateAggregation { + r := &RateAggregation{} + + return r +} + +type RateAggregationVariant interface { + RateAggregationCaster() *RateAggregation +} + +func (s *RateAggregation) RateAggregationCaster() *RateAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ratelimitsetting.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ratelimitsetting.go new file mode 100644 index 000000000..073315fa9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ratelimitsetting.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RateLimitSetting type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/Services.ts#L323-L349 +type RateLimitSetting struct { + // RequestsPerMinute The number of requests allowed per minute. + // By default, the number of requests allowed per minute is set by each service + // as follows: + // + // * `alibabacloud-ai-search` service: `1000` + // * `anthropic` service: `50` + // * `azureaistudio` service: `240` + // * `azureopenai` service and task type `text_embedding`: `1440` + // * `azureopenai` service and task type `completion`: `120` + // * `cohere` service: `10000` + // * `elastic` service and task type `chat_completion`: `240` + // * `googleaistudio` service: `360` + // * `googlevertexai` service: `30000` + // * `hugging_face` service: `3000` + // * `jinaai` service: `2000` + // * `mistral` service: `240` + // * `openai` service and task type `text_embedding`: `3000` + // * `openai` service and task type `completion`: `500` + // * `voyageai` service: `2000` + // * `watsonxai` service: `120` + RequestsPerMinute *int `json:"requests_per_minute,omitempty"` +} + +func (s *RateLimitSetting) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "requests_per_minute": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RequestsPerMinute", err) + } + s.RequestsPerMinute = &value + case float64: + f := int(v) + s.RequestsPerMinute = &f + } + + } + } + return nil +} + +// NewRateLimitSetting returns a RateLimitSetting. +func NewRateLimitSetting() *RateLimitSetting { + r := &RateLimitSetting{} + + return r +} + +type RateLimitSettingVariant interface { + RateLimitSettingCaster() *RateLimitSetting +} + +func (s *RateLimitSetting) RateLimitSettingCaster() *RateLimitSetting { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readblobdetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readblobdetails.go new file mode 100644 index 000000000..5ba218c3a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readblobdetails.go @@ -0,0 +1,158 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReadBlobDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L204-L248 +type ReadBlobDetails struct { + // BeforeWriteComplete Indicates whether the read operation may have started before the write + // operation was complete. + BeforeWriteComplete *bool `json:"before_write_complete,omitempty"` + // Elapsed The length of time spent reading the blob. + // If the blob was not found, this detail is omitted. + Elapsed Duration `json:"elapsed,omitempty"` + // ElapsedNanos The length of time spent reading the blob, in nanoseconds. + // If the blob was not found, this detail is omitted. + ElapsedNanos *int64 `json:"elapsed_nanos,omitempty"` + // FirstByteTime The length of time waiting for the first byte of the read operation to be + // received. + // If the blob was not found, this detail is omitted. + FirstByteTime Duration `json:"first_byte_time,omitempty"` + // FirstByteTimeNanos The length of time waiting for the first byte of the read operation to be + // received, in nanoseconds. + // If the blob was not found, this detail is omitted. + FirstByteTimeNanos int64 `json:"first_byte_time_nanos"` + // Found Indicates whether the blob was found by the read operation. + // If the read was started before the write completed or the write was ended + // before completion, it might be false. + Found bool `json:"found"` + // Node The node that performed the read operation. + Node SnapshotNodeInfo `json:"node"` + // Throttled The length of time spent waiting due to the `max_restore_bytes_per_sec` or + // `indices.recovery.max_bytes_per_sec` throttles during the read of the blob. + // If the blob was not found, this detail is omitted. + Throttled Duration `json:"throttled,omitempty"` + // ThrottledNanos The length of time spent waiting due to the `max_restore_bytes_per_sec` or + // `indices.recovery.max_bytes_per_sec` throttles during the read of the blob, + // in nanoseconds. + // If the blob was not found, this detail is omitted. + ThrottledNanos *int64 `json:"throttled_nanos,omitempty"` +} + +func (s *ReadBlobDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "before_write_complete": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "BeforeWriteComplete", err) + } + s.BeforeWriteComplete = &value + case bool: + s.BeforeWriteComplete = &v + } + + case "elapsed": + if err := dec.Decode(&s.Elapsed); err != nil { + return fmt.Errorf("%s | %w", "Elapsed", err) + } + + case "elapsed_nanos": + if err := dec.Decode(&s.ElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "ElapsedNanos", err) + } + + case "first_byte_time": + if err := dec.Decode(&s.FirstByteTime); err != nil { + return fmt.Errorf("%s | %w", "FirstByteTime", err) + } + + case "first_byte_time_nanos": + if err := dec.Decode(&s.FirstByteTimeNanos); err != nil { + return fmt.Errorf("%s | %w", "FirstByteTimeNanos", err) + } + + case "found": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Found", err) + } + s.Found = value + case bool: + s.Found = v + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "throttled": + if err := dec.Decode(&s.Throttled); err != nil { + return fmt.Errorf("%s | %w", "Throttled", err) + } + + case "throttled_nanos": + if err := dec.Decode(&s.ThrottledNanos); err != nil { + return fmt.Errorf("%s | %w", "ThrottledNanos", err) + } + + } + } + return nil +} + +// NewReadBlobDetails returns a ReadBlobDetails. +func NewReadBlobDetails() *ReadBlobDetails { + r := &ReadBlobDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readexception.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readexception.go new file mode 100644 index 000000000..ff572275b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readexception.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReadException type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ccr/_types/FollowIndexStats.ts#L111-L118 +type ReadException struct { + // Exception The exception that caused the read to fail. + Exception ErrorCause `json:"exception"` + // FromSeqNo The starting sequence number of the batch requested from the leader. + FromSeqNo int64 `json:"from_seq_no"` + // Retries The number of times the batch has been retried. + Retries int `json:"retries"` +} + +func (s *ReadException) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exception": + if err := dec.Decode(&s.Exception); err != nil { + return fmt.Errorf("%s | %w", "Exception", err) + } + + case "from_seq_no": + if err := dec.Decode(&s.FromSeqNo); err != nil { + return fmt.Errorf("%s | %w", "FromSeqNo", err) + } + + case "retries": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Retries", err) + } + s.Retries = value + case float64: + f := int(v) + s.Retries = f + } + + } + } + return nil +} + +// NewReadException returns a ReadException. +func NewReadException() *ReadException { + r := &ReadException{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readonlyurlrepository.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readonlyurlrepository.go new file mode 100644 index 000000000..d6402f665 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readonlyurlrepository.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ReadOnlyUrlRepository type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotRepository.ts#L92-L102 +type ReadOnlyUrlRepository struct { + // Settings The repository settings. + Settings ReadOnlyUrlRepositorySettings `json:"settings"` + // Type The read-only URL repository type. + Type string `json:"type,omitempty"` + Uuid *string `json:"uuid,omitempty"` +} + +func (s *ReadOnlyUrlRepository) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return fmt.Errorf("%s | %w", "Uuid", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ReadOnlyUrlRepository) MarshalJSON() ([]byte, error) { + type innerReadOnlyUrlRepository ReadOnlyUrlRepository + tmp := innerReadOnlyUrlRepository{ + Settings: s.Settings, + Type: s.Type, + Uuid: s.Uuid, + } + + tmp.Type = "url" + + return json.Marshal(tmp) +} + +// NewReadOnlyUrlRepository returns a ReadOnlyUrlRepository. +func NewReadOnlyUrlRepository() *ReadOnlyUrlRepository { + r := &ReadOnlyUrlRepository{} + + return r +} + +type ReadOnlyUrlRepositoryVariant interface { + ReadOnlyUrlRepositoryCaster() *ReadOnlyUrlRepository +} + +func (s *ReadOnlyUrlRepository) ReadOnlyUrlRepositoryCaster() *ReadOnlyUrlRepository { + return s +} + +func (s *ReadOnlyUrlRepository) RepositoryCaster() *Repository { + o := Repository(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readonlyurlrepositorysettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readonlyurlrepositorysettings.go new file mode 100644 index 000000000..3eeb94912 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readonlyurlrepositorysettings.go @@ -0,0 +1,201 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReadOnlyUrlRepositorySettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotRepository.ts#L377-L412 +type ReadOnlyUrlRepositorySettings struct { + // ChunkSize Big files can be broken down into multiple smaller blobs in the blob store + // during snapshotting. + // It is not recommended to change this value from its default unless there is + // an explicit reason for limiting the size of blobs in the repository. + // Setting a value lower than the default can result in an increased number of + // API calls to the blob store during snapshot create and restore operations + // compared to using the default value and thus make both operations slower and + // more costly. + // Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + // The default varies by repository type. + ChunkSize ByteSize `json:"chunk_size,omitempty"` + // Compress When set to `true`, metadata files are stored in compressed format. + // This setting doesn't affect index files that are already compressed by + // default. + Compress *bool `json:"compress,omitempty"` + // HttpMaxRetries The maximum number of retries for HTTP and HTTPS URLs. + HttpMaxRetries *int `json:"http_max_retries,omitempty"` + // HttpSocketTimeout The maximum wait time for data transfers over a connection. + HttpSocketTimeout Duration `json:"http_socket_timeout,omitempty"` + // MaxNumberOfSnapshots The maximum number of snapshots the repository can contain. + // The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. + MaxNumberOfSnapshots *int `json:"max_number_of_snapshots,omitempty"` + // MaxRestoreBytesPerSec The maximum snapshot restore rate per node. + // It defaults to unlimited. + // Note that restores are also throttled through recovery settings. + MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // MaxSnapshotBytesPerSec The maximum snapshot creation rate per node. + // It defaults to 40mb per second. + // Note that if the recovery settings for managed services are set, then it + // defaults to unlimited, and the rate is additionally throttled through + // recovery settings. + MaxSnapshotBytesPerSec ByteSize `json:"max_snapshot_bytes_per_sec,omitempty"` + // Url The URL location of the root of the shared filesystem repository. + // The following protocols are supported: + // + // * `file` + // * `ftp` + // * `http` + // * `https` + // * `jar` + // + // URLs using the HTTP, HTTPS, or FTP protocols must be explicitly allowed with + // the `repositories.url.allowed_urls` cluster setting. + // This setting supports wildcards in the place of a host, path, query, or + // fragment in the URL. + // + // URLs using the file protocol must point to the location of a shared + // filesystem accessible to all master and data nodes in the cluster. + // This location must be registered in the `path.repo` setting. + // You don't need to register URLs using the FTP, HTTP, HTTPS, or JAR protocols + // in the `path.repo` setting. + Url string `json:"url"` +} + +func (s *ReadOnlyUrlRepositorySettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "chunk_size": + if err := dec.Decode(&s.ChunkSize); err != nil { + return fmt.Errorf("%s | %w", "ChunkSize", err) + } + + case "compress": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Compress", err) + } + s.Compress = &value + case bool: + s.Compress = &v + } + + case "http_max_retries": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "HttpMaxRetries", err) + } + s.HttpMaxRetries = &value + case float64: + f := int(v) + s.HttpMaxRetries = &f + } + + case "http_socket_timeout": + if err := dec.Decode(&s.HttpSocketTimeout); err != nil { + return fmt.Errorf("%s | %w", "HttpSocketTimeout", err) + } + + case "max_number_of_snapshots": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNumberOfSnapshots", err) + } + s.MaxNumberOfSnapshots = &value + case float64: + f := int(v) + s.MaxNumberOfSnapshots = &f + } + + case "max_restore_bytes_per_sec": + if err := dec.Decode(&s.MaxRestoreBytesPerSec); err != nil { + return fmt.Errorf("%s | %w", "MaxRestoreBytesPerSec", err) + } + + case "max_snapshot_bytes_per_sec": + if err := dec.Decode(&s.MaxSnapshotBytesPerSec); err != nil { + return fmt.Errorf("%s | %w", "MaxSnapshotBytesPerSec", err) + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = o + + } + } + return nil +} + +// NewReadOnlyUrlRepositorySettings returns a ReadOnlyUrlRepositorySettings. +func NewReadOnlyUrlRepositorySettings() *ReadOnlyUrlRepositorySettings { + r := &ReadOnlyUrlRepositorySettings{} + + return r +} + +type ReadOnlyUrlRepositorySettingsVariant interface { + ReadOnlyUrlRepositorySettingsCaster() *ReadOnlyUrlRepositorySettings +} + +func (s *ReadOnlyUrlRepositorySettings) ReadOnlyUrlRepositorySettingsCaster() *ReadOnlyUrlRepositorySettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readsummaryinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readsummaryinfo.go new file mode 100644 index 000000000..fe66b7f19 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/readsummaryinfo.go @@ -0,0 +1,167 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReadSummaryInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L115-L160 +type ReadSummaryInfo struct { + // Count The number of read operations performed in the test. + Count int `json:"count"` + // MaxWait The maximum time spent waiting for the first byte of any read request to be + // received. + MaxWait Duration `json:"max_wait"` + // MaxWaitNanos The maximum time spent waiting for the first byte of any read request to be + // received, in nanoseconds. + MaxWaitNanos int64 `json:"max_wait_nanos"` + // TotalElapsed The total elapsed time spent on reading blobs in the test. + TotalElapsed Duration `json:"total_elapsed"` + // TotalElapsedNanos The total elapsed time spent on reading blobs in the test, in nanoseconds. + TotalElapsedNanos int64 `json:"total_elapsed_nanos"` + // TotalSize The total size of all the blobs or partial blobs read in the test. + TotalSize ByteSize `json:"total_size"` + // TotalSizeBytes The total size of all the blobs or partial blobs read in the test, in bytes. + TotalSizeBytes int64 `json:"total_size_bytes"` + // TotalThrottled The total time spent waiting due to the `max_restore_bytes_per_sec` or + // `indices.recovery.max_bytes_per_sec` throttles. + TotalThrottled Duration `json:"total_throttled"` + // TotalThrottledNanos The total time spent waiting due to the `max_restore_bytes_per_sec` or + // `indices.recovery.max_bytes_per_sec` throttles, in nanoseconds. + TotalThrottledNanos int64 `json:"total_throttled_nanos"` + // TotalWait The total time spent waiting for the first byte of each read request to be + // received. + TotalWait Duration `json:"total_wait"` + // TotalWaitNanos The total time spent waiting for the first byte of each read request to be + // received, in nanoseconds. + TotalWaitNanos int64 `json:"total_wait_nanos"` +} + +func (s *ReadSummaryInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "max_wait": + if err := dec.Decode(&s.MaxWait); err != nil { + return fmt.Errorf("%s | %w", "MaxWait", err) + } + + case "max_wait_nanos": + if err := dec.Decode(&s.MaxWaitNanos); err != nil { + return fmt.Errorf("%s | %w", "MaxWaitNanos", err) + } + + case "total_elapsed": + if err := dec.Decode(&s.TotalElapsed); err != nil { + return fmt.Errorf("%s | %w", "TotalElapsed", err) + } + + case "total_elapsed_nanos": + if err := dec.Decode(&s.TotalElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "TotalElapsedNanos", err) + } + + case "total_size": + if err := dec.Decode(&s.TotalSize); err != nil { + return fmt.Errorf("%s | %w", "TotalSize", err) + } + + case "total_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeBytes", err) + } + s.TotalSizeBytes = value + case float64: + f := int64(v) + s.TotalSizeBytes = f + } + + case "total_throttled": + if err := dec.Decode(&s.TotalThrottled); err != nil { + return fmt.Errorf("%s | %w", "TotalThrottled", err) + } + + case "total_throttled_nanos": + if err := dec.Decode(&s.TotalThrottledNanos); err != nil { + return fmt.Errorf("%s | %w", "TotalThrottledNanos", err) + } + + case "total_wait": + if err := dec.Decode(&s.TotalWait); err != nil { + return fmt.Errorf("%s | %w", "TotalWait", err) + } + + case "total_wait_nanos": + if err := dec.Decode(&s.TotalWaitNanos); err != nil { + return fmt.Errorf("%s | %w", "TotalWaitNanos", err) + } + + } + } + return nil +} + +// NewReadSummaryInfo returns a ReadSummaryInfo. +func NewReadSummaryInfo() *ReadSummaryInfo { + r := &ReadSummaryInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/realmcache.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/realmcache.go new file mode 100644 index 000000000..3299ef367 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/realmcache.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RealmCache type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L276-L278 +type RealmCache struct { + Size int64 `json:"size"` +} + +func (s *RealmCache) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "size": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = value + case float64: + f := int64(v) + s.Size = f + } + + } + } + return nil +} + +// NewRealmCache returns a RealmCache. +func NewRealmCache() *RealmCache { + r := &RealmCache{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/realminfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/realminfo.go new file mode 100644 index 000000000..e57855fa8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/realminfo.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RealmInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/RealmInfo.ts#L22-L25 +type RealmInfo struct { + Name string `json:"name"` + Type string `json:"type"` +} + +func (s *RealmInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewRealmInfo returns a RealmInfo. +func NewRealmInfo() *RealmInfo { + r := &RealmInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recording.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recording.go new file mode 100644 index 000000000..edc215184 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recording.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Recording type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L227-L232 +type Recording struct { + CumulativeExecutionCount *int64 `json:"cumulative_execution_count,omitempty"` + CumulativeExecutionTime Duration `json:"cumulative_execution_time,omitempty"` + CumulativeExecutionTimeMillis *int64 `json:"cumulative_execution_time_millis,omitempty"` + Name *string `json:"name,omitempty"` +} + +func (s *Recording) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cumulative_execution_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CumulativeExecutionCount", err) + } + s.CumulativeExecutionCount = &value + case float64: + f := int64(v) + s.CumulativeExecutionCount = &f + } + + case "cumulative_execution_time": + if err := dec.Decode(&s.CumulativeExecutionTime); err != nil { + return fmt.Errorf("%s | %w", "CumulativeExecutionTime", err) + } + + case "cumulative_execution_time_millis": + if err := dec.Decode(&s.CumulativeExecutionTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "CumulativeExecutionTimeMillis", err) + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + } + } + return nil +} + +// NewRecording returns a Recording. +func NewRecording() *Recording { + r := &Recording{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoverybytes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoverybytes.go new file mode 100644 index 000000000..94ee7be29 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoverybytes.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// RecoveryBytes type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/recovery/types.ts#L38-L48 +type RecoveryBytes struct { + Percent Percentage `json:"percent"` + Recovered ByteSize `json:"recovered,omitempty"` + RecoveredFromSnapshot ByteSize `json:"recovered_from_snapshot,omitempty"` + RecoveredFromSnapshotInBytes ByteSize `json:"recovered_from_snapshot_in_bytes,omitempty"` + RecoveredInBytes ByteSize `json:"recovered_in_bytes"` + Reused ByteSize `json:"reused,omitempty"` + ReusedInBytes ByteSize `json:"reused_in_bytes"` + Total ByteSize `json:"total,omitempty"` + TotalInBytes ByteSize `json:"total_in_bytes"` +} + +func (s *RecoveryBytes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "percent": + if err := dec.Decode(&s.Percent); err != nil { + return fmt.Errorf("%s | %w", "Percent", err) + } + + case "recovered": + if err := dec.Decode(&s.Recovered); err != nil { + return fmt.Errorf("%s | %w", "Recovered", err) + } + + case "recovered_from_snapshot": + if err := dec.Decode(&s.RecoveredFromSnapshot); err != nil { + return fmt.Errorf("%s | %w", "RecoveredFromSnapshot", err) + } + + case "recovered_from_snapshot_in_bytes": + if err := dec.Decode(&s.RecoveredFromSnapshotInBytes); err != nil { + return fmt.Errorf("%s | %w", "RecoveredFromSnapshotInBytes", err) + } + + case "recovered_in_bytes": + if err := dec.Decode(&s.RecoveredInBytes); err != nil { + return fmt.Errorf("%s | %w", "RecoveredInBytes", err) + } + + case "reused": + if err := dec.Decode(&s.Reused); err != nil { + return fmt.Errorf("%s | %w", "Reused", err) + } + + case "reused_in_bytes": + if err := dec.Decode(&s.ReusedInBytes); err != nil { + return fmt.Errorf("%s | %w", "ReusedInBytes", err) + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + + case "total_in_bytes": + if err := dec.Decode(&s.TotalInBytes); err != nil { + return fmt.Errorf("%s | %w", "TotalInBytes", err) + } + + } + } + return nil +} + +// NewRecoveryBytes returns a RecoveryBytes. +func NewRecoveryBytes() *RecoveryBytes { + r := &RecoveryBytes{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoveryfiles.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoveryfiles.go new file mode 100644 index 000000000..47ba6493e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoveryfiles.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RecoveryFiles type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/recovery/types.ts#L56-L62 +type RecoveryFiles struct { + Details []FileDetails `json:"details,omitempty"` + Percent Percentage `json:"percent"` + Recovered int64 `json:"recovered"` + Reused int64 `json:"reused"` + Total int64 `json:"total"` +} + +func (s *RecoveryFiles) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + case "percent": + if err := dec.Decode(&s.Percent); err != nil { + return fmt.Errorf("%s | %w", "Percent", err) + } + + case "recovered": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Recovered", err) + } + s.Recovered = value + case float64: + f := int64(v) + s.Recovered = f + } + + case "reused": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Reused", err) + } + s.Reused = value + case float64: + f := int64(v) + s.Reused = f + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + +// NewRecoveryFiles returns a RecoveryFiles. +func NewRecoveryFiles() *RecoveryFiles { + r := &RecoveryFiles{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoveryindexstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoveryindexstatus.go new file mode 100644 index 000000000..8fd844f95 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoveryindexstatus.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// RecoveryIndexStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/recovery/types.ts#L64-L74 +type RecoveryIndexStatus struct { + Bytes *RecoveryBytes `json:"bytes,omitempty"` + Files RecoveryFiles `json:"files"` + Size RecoveryBytes `json:"size"` + SourceThrottleTime Duration `json:"source_throttle_time,omitempty"` + SourceThrottleTimeInMillis int64 `json:"source_throttle_time_in_millis"` + TargetThrottleTime Duration `json:"target_throttle_time,omitempty"` + TargetThrottleTimeInMillis int64 `json:"target_throttle_time_in_millis"` + TotalTime Duration `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +func (s *RecoveryIndexStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bytes": + if err := dec.Decode(&s.Bytes); err != nil { + return fmt.Errorf("%s | %w", "Bytes", err) + } + + case "files": + if err := dec.Decode(&s.Files); err != nil { + return fmt.Errorf("%s | %w", "Files", err) + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + case "source_throttle_time": + if err := dec.Decode(&s.SourceThrottleTime); err != nil { + return fmt.Errorf("%s | %w", "SourceThrottleTime", err) + } + + case "source_throttle_time_in_millis": + if err := dec.Decode(&s.SourceThrottleTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "SourceThrottleTimeInMillis", err) + } + + case "target_throttle_time": + if err := dec.Decode(&s.TargetThrottleTime); err != nil { + return fmt.Errorf("%s | %w", "TargetThrottleTime", err) + } + + case "target_throttle_time_in_millis": + if err := dec.Decode(&s.TargetThrottleTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TargetThrottleTimeInMillis", err) + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return fmt.Errorf("%s | %w", "TotalTime", err) + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalTimeInMillis", err) + } + + } + } + return nil +} + +// NewRecoveryIndexStatus returns a RecoveryIndexStatus. +func NewRecoveryIndexStatus() *RecoveryIndexStatus { + r := &RecoveryIndexStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoveryorigin.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoveryorigin.go new file mode 100644 index 000000000..fc2d9332a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoveryorigin.go @@ -0,0 +1,151 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RecoveryOrigin type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/recovery/types.ts#L76-L89 +type RecoveryOrigin struct { + BootstrapNewHistoryUuid *bool `json:"bootstrap_new_history_uuid,omitempty"` + Host *string `json:"host,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Id *string `json:"id,omitempty"` + Index *string `json:"index,omitempty"` + Ip *string `json:"ip,omitempty"` + Name *string `json:"name,omitempty"` + Repository *string `json:"repository,omitempty"` + RestoreUUID *string `json:"restoreUUID,omitempty"` + Snapshot *string `json:"snapshot,omitempty"` + TransportAddress *string `json:"transport_address,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *RecoveryOrigin) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bootstrap_new_history_uuid": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "BootstrapNewHistoryUuid", err) + } + s.BootstrapNewHistoryUuid = &value + case bool: + s.BootstrapNewHistoryUuid = &v + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + case "hostname": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Hostname", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Hostname = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "ip": + if err := dec.Decode(&s.Ip); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "repository": + if err := dec.Decode(&s.Repository); err != nil { + return fmt.Errorf("%s | %w", "Repository", err) + } + + case "restoreUUID": + if err := dec.Decode(&s.RestoreUUID); err != nil { + return fmt.Errorf("%s | %w", "RestoreUUID", err) + } + + case "snapshot": + if err := dec.Decode(&s.Snapshot); err != nil { + return fmt.Errorf("%s | %w", "Snapshot", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewRecoveryOrigin returns a RecoveryOrigin. +func NewRecoveryOrigin() *RecoveryOrigin { + r := &RecoveryOrigin{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoveryrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoveryrecord.go new file mode 100644 index 000000000..40c8f54c2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoveryrecord.go @@ -0,0 +1,364 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RecoveryRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/recovery/types.ts#L24-L155 +type RecoveryRecord struct { + // Bytes The number of bytes to recover. + Bytes *string `json:"bytes,omitempty"` + // BytesPercent The ratio of bytes recovered. + BytesPercent Percentage `json:"bytes_percent,omitempty"` + // BytesRecovered The bytes recovered. + BytesRecovered *string `json:"bytes_recovered,omitempty"` + // BytesTotal The total number of bytes. + BytesTotal *string `json:"bytes_total,omitempty"` + // Files The number of files to recover. + Files *string `json:"files,omitempty"` + // FilesPercent The ratio of files recovered. + FilesPercent Percentage `json:"files_percent,omitempty"` + // FilesRecovered The files recovered. + FilesRecovered *string `json:"files_recovered,omitempty"` + // FilesTotal The total number of files. + FilesTotal *string `json:"files_total,omitempty"` + // Index The index name. + Index *string `json:"index,omitempty"` + // Repository The repository name. + Repository *string `json:"repository,omitempty"` + // Shard The shard name. + Shard *string `json:"shard,omitempty"` + // Snapshot The snapshot name. + Snapshot *string `json:"snapshot,omitempty"` + // SourceHost The source host. + SourceHost *string `json:"source_host,omitempty"` + // SourceNode The source node name. + SourceNode *string `json:"source_node,omitempty"` + // Stage The recovery stage. + Stage *string `json:"stage,omitempty"` + // StartTime The recovery start time. + StartTime DateTime `json:"start_time,omitempty"` + // StartTimeMillis The recovery start time in epoch milliseconds. + StartTimeMillis *int64 `json:"start_time_millis,omitempty"` + // StopTime The recovery stop time. + StopTime DateTime `json:"stop_time,omitempty"` + // StopTimeMillis The recovery stop time in epoch milliseconds. + StopTimeMillis *int64 `json:"stop_time_millis,omitempty"` + // TargetHost The target host. + TargetHost *string `json:"target_host,omitempty"` + // TargetNode The target node name. + TargetNode *string `json:"target_node,omitempty"` + // Time The recovery time. + Time Duration `json:"time,omitempty"` + // TranslogOps The number of translog operations to recover. + TranslogOps *string `json:"translog_ops,omitempty"` + // TranslogOpsPercent The ratio of translog operations recovered. + TranslogOpsPercent Percentage `json:"translog_ops_percent,omitempty"` + // TranslogOpsRecovered The translog operations recovered. + TranslogOpsRecovered *string `json:"translog_ops_recovered,omitempty"` + // Type The recovery type. + Type *string `json:"type,omitempty"` +} + +func (s *RecoveryRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bytes", "b": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Bytes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Bytes = &o + + case "bytes_percent", "bp": + if err := dec.Decode(&s.BytesPercent); err != nil { + return fmt.Errorf("%s | %w", "BytesPercent", err) + } + + case "bytes_recovered", "br": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BytesRecovered", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BytesRecovered = &o + + case "bytes_total", "tb": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BytesTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BytesTotal = &o + + case "files", "f": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Files", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Files = &o + + case "files_percent", "fp": + if err := dec.Decode(&s.FilesPercent); err != nil { + return fmt.Errorf("%s | %w", "FilesPercent", err) + } + + case "files_recovered", "fr": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FilesRecovered", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FilesRecovered = &o + + case "files_total", "tf": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FilesTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FilesTotal = &o + + case "index", "i", "idx": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "repository", "rep": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Repository", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Repository = &o + + case "shard", "s", "sh": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Shard", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Shard = &o + + case "snapshot", "snap": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Snapshot", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Snapshot = &o + + case "source_host", "shost": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SourceHost", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SourceHost = &o + + case "source_node", "snode": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SourceNode", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SourceNode = &o + + case "stage", "st": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Stage", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Stage = &o + + case "start_time", "start": + if err := dec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + + case "start_time_millis", "start_millis": + if err := dec.Decode(&s.StartTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeMillis", err) + } + + case "stop_time", "stop": + if err := dec.Decode(&s.StopTime); err != nil { + return fmt.Errorf("%s | %w", "StopTime", err) + } + + case "stop_time_millis", "stop_millis": + if err := dec.Decode(&s.StopTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "StopTimeMillis", err) + } + + case "target_host", "thost": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TargetHost", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TargetHost = &o + + case "target_node", "tnode": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TargetNode", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TargetNode = &o + + case "time", "t", "ti": + if err := dec.Decode(&s.Time); err != nil { + return fmt.Errorf("%s | %w", "Time", err) + } + + case "translog_ops", "to": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TranslogOps", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TranslogOps = &o + + case "translog_ops_percent", "top": + if err := dec.Decode(&s.TranslogOpsPercent); err != nil { + return fmt.Errorf("%s | %w", "TranslogOpsPercent", err) + } + + case "translog_ops_recovered", "tor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TranslogOpsRecovered", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TranslogOpsRecovered = &o + + case "type", "ty": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + } + } + return nil +} + +// NewRecoveryRecord returns a RecoveryRecord. +func NewRecoveryRecord() *RecoveryRecord { + r := &RecoveryRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoverystartstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoverystartstatus.go new file mode 100644 index 000000000..938111485 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoverystartstatus.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// RecoveryStartStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/recovery/types.ts#L91-L96 +type RecoveryStartStatus struct { + CheckIndexTime Duration `json:"check_index_time,omitempty"` + CheckIndexTimeInMillis int64 `json:"check_index_time_in_millis"` + TotalTime Duration `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +func (s *RecoveryStartStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "check_index_time": + if err := dec.Decode(&s.CheckIndexTime); err != nil { + return fmt.Errorf("%s | %w", "CheckIndexTime", err) + } + + case "check_index_time_in_millis": + if err := dec.Decode(&s.CheckIndexTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "CheckIndexTimeInMillis", err) + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return fmt.Errorf("%s | %w", "TotalTime", err) + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalTimeInMillis", err) + } + + } + } + return nil +} + +// NewRecoveryStartStatus returns a RecoveryStartStatus. +func NewRecoveryStartStatus() *RecoveryStartStatus { + r := &RecoveryStartStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoverystats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoverystats.go new file mode 100644 index 000000000..0f0b78ce0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoverystats.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RecoveryStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L255-L260 +type RecoveryStats struct { + CurrentAsSource int64 `json:"current_as_source"` + CurrentAsTarget int64 `json:"current_as_target"` + ThrottleTime Duration `json:"throttle_time,omitempty"` + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` +} + +func (s *RecoveryStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current_as_source": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CurrentAsSource", err) + } + s.CurrentAsSource = value + case float64: + f := int64(v) + s.CurrentAsSource = f + } + + case "current_as_target": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CurrentAsTarget", err) + } + s.CurrentAsTarget = value + case float64: + f := int64(v) + s.CurrentAsTarget = f + } + + case "throttle_time": + if err := dec.Decode(&s.ThrottleTime); err != nil { + return fmt.Errorf("%s | %w", "ThrottleTime", err) + } + + case "throttle_time_in_millis": + if err := dec.Decode(&s.ThrottleTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "ThrottleTimeInMillis", err) + } + + } + } + return nil +} + +// NewRecoveryStats returns a RecoveryStats. +func NewRecoveryStats() *RecoveryStats { + r := &RecoveryStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoverystatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoverystatus.go new file mode 100644 index 000000000..46dd815dd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/recoverystatus.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// RecoveryStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/recovery/types.ts#L98-L100 +type RecoveryStatus struct { + Shards []ShardRecovery `json:"shards"` +} + +// NewRecoveryStatus returns a RecoveryStatus. +func NewRecoveryStatus() *RecoveryStatus { + r := &RecoveryStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/redact.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/redact.go new file mode 100644 index 000000000..5bcc108c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/redact.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Redact type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Simulation.ts#L38-L43 +type Redact struct { + // IsRedacted_ indicates if document has been redacted + IsRedacted_ bool `json:"_is_redacted"` +} + +func (s *Redact) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_is_redacted": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsRedacted_", err) + } + s.IsRedacted_ = value + case bool: + s.IsRedacted_ = v + } + + } + } + return nil +} + +// NewRedact returns a Redact. +func NewRedact() *Redact { + r := &Redact{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/redactprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/redactprocessor.go new file mode 100644 index 000000000..960e16e44 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/redactprocessor.go @@ -0,0 +1,235 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RedactProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1298-L1339 +type RedactProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to be redacted + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + PatternDefinitions map[string]string `json:"pattern_definitions,omitempty"` + // Patterns A list of grok expressions to match and redact named captures with + Patterns []string `json:"patterns"` + // Prefix Start a redacted section with this token + Prefix *string `json:"prefix,omitempty"` + // SkipIfUnlicensed If `true` and the current license does not support running redact processors, + // then the processor quietly exits without modifying the document + SkipIfUnlicensed *bool `json:"skip_if_unlicensed,omitempty"` + // Suffix End a redacted section with this token + Suffix *string `json:"suffix,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TraceRedact If `true` then ingest metadata `_ingest._redact._is_redacted` is set to + // `true` if the document has been redacted + TraceRedact *bool `json:"trace_redact,omitempty"` +} + +func (s *RedactProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "pattern_definitions": + if s.PatternDefinitions == nil { + s.PatternDefinitions = make(map[string]string, 0) + } + if err := dec.Decode(&s.PatternDefinitions); err != nil { + return fmt.Errorf("%s | %w", "PatternDefinitions", err) + } + + case "patterns": + if err := dec.Decode(&s.Patterns); err != nil { + return fmt.Errorf("%s | %w", "Patterns", err) + } + + case "prefix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Prefix", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Prefix = &o + + case "skip_if_unlicensed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SkipIfUnlicensed", err) + } + s.SkipIfUnlicensed = &value + case bool: + s.SkipIfUnlicensed = &v + } + + case "suffix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Suffix", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Suffix = &o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "trace_redact": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TraceRedact", err) + } + s.TraceRedact = &value + case bool: + s.TraceRedact = &v + } + + } + } + return nil +} + +// NewRedactProcessor returns a RedactProcessor. +func NewRedactProcessor() *RedactProcessor { + r := &RedactProcessor{ + PatternDefinitions: make(map[string]string), + } + + return r +} + +type RedactProcessorVariant interface { + RedactProcessorCaster() *RedactProcessor +} + +func (s *RedactProcessor) RedactProcessorCaster() *RedactProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/refreshstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/refreshstats.go new file mode 100644 index 000000000..6be8ab5e6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/refreshstats.go @@ -0,0 +1,129 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RefreshStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L262-L269 +type RefreshStats struct { + ExternalTotal int64 `json:"external_total"` + ExternalTotalTimeInMillis int64 `json:"external_total_time_in_millis"` + Listeners int64 `json:"listeners"` + Total int64 `json:"total"` + TotalTime Duration `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +func (s *RefreshStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "external_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ExternalTotal", err) + } + s.ExternalTotal = value + case float64: + f := int64(v) + s.ExternalTotal = f + } + + case "external_total_time_in_millis": + if err := dec.Decode(&s.ExternalTotalTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "ExternalTotalTimeInMillis", err) + } + + case "listeners": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Listeners", err) + } + s.Listeners = value + case float64: + f := int64(v) + s.Listeners = f + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return fmt.Errorf("%s | %w", "TotalTime", err) + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalTimeInMillis", err) + } + + } + } + return nil +} + +// NewRefreshStats returns a RefreshStats. +func NewRefreshStats() *RefreshStats { + r := &RefreshStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/regexoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/regexoptions.go new file mode 100644 index 000000000..06c5017fa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/regexoptions.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RegexOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L184-L195 +type RegexOptions struct { + // Flags Optional operators for the regular expression. + Flags *string `json:"flags,omitempty"` + // MaxDeterminizedStates Maximum number of automaton states required for the query. + MaxDeterminizedStates *int `json:"max_determinized_states,omitempty"` +} + +func (s *RegexOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "flags": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Flags", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flags = &o + + case "max_determinized_states": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxDeterminizedStates", err) + } + s.MaxDeterminizedStates = &value + case float64: + f := int(v) + s.MaxDeterminizedStates = &f + } + + } + } + return nil +} + +// NewRegexOptions returns a RegexOptions. +func NewRegexOptions() *RegexOptions { + r := &RegexOptions{} + + return r +} + +type RegexOptionsVariant interface { + RegexOptionsCaster() *RegexOptions +} + +func (s *RegexOptions) RegexOptionsCaster() *RegexOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/regexpquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/regexpquery.go new file mode 100644 index 000000000..5e8c851b4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/regexpquery.go @@ -0,0 +1,190 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RegexpQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L199-L232 +type RegexpQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // CaseInsensitive Allows case insensitive matching of the regular expression value with the + // indexed field values when set to `true`. + // When `false`, case sensitivity of matching depends on the underlying field’s + // mapping. + CaseInsensitive *bool `json:"case_insensitive,omitempty"` + // Flags Enables optional operators for the regular expression. + Flags *string `json:"flags,omitempty"` + // MaxDeterminizedStates Maximum number of automaton states required for the query. + MaxDeterminizedStates *int `json:"max_determinized_states,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Rewrite Method used to rewrite the query. + Rewrite *string `json:"rewrite,omitempty"` + // Value Regular expression for terms you wish to find in the provided field. + Value string `json:"value"` +} + +func (s *RegexpQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Value) + if err != nil { + return err + } + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "case_insensitive": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CaseInsensitive", err) + } + s.CaseInsensitive = &value + case bool: + s.CaseInsensitive = &v + } + + case "flags": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Flags", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flags = &o + + case "max_determinized_states": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxDeterminizedStates", err) + } + s.MaxDeterminizedStates = &value + case float64: + f := int(v) + s.MaxDeterminizedStates = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "rewrite": + if err := dec.Decode(&s.Rewrite); err != nil { + return fmt.Errorf("%s | %w", "Rewrite", err) + } + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil +} + +// NewRegexpQuery returns a RegexpQuery. +func NewRegexpQuery() *RegexpQuery { + r := &RegexpQuery{} + + return r +} + +type RegexpQueryVariant interface { + RegexpQueryCaster() *RegexpQuery +} + +func (s *RegexpQuery) RegexpQueryCaster() *RegexpQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/regexvalidation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/regexvalidation.go new file mode 100644 index 000000000..87da2848d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/regexvalidation.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RegexValidation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L78-L81 +type RegexValidation struct { + Constraint string `json:"constraint"` + Type string `json:"type,omitempty"` +} + +func (s *RegexValidation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "constraint": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Constraint", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Constraint = o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s RegexValidation) MarshalJSON() ([]byte, error) { + type innerRegexValidation RegexValidation + tmp := innerRegexValidation{ + Constraint: s.Constraint, + Type: s.Type, + } + + tmp.Type = "regex" + + return json.Marshal(tmp) +} + +// NewRegexValidation returns a RegexValidation. +func NewRegexValidation() *RegexValidation { + r := &RegexValidation{} + + return r +} + +type RegexValidationVariant interface { + RegexValidationCaster() *RegexValidation +} + +func (s *RegexValidation) RegexValidationCaster() *RegexValidation { + return s +} + +func (s *RegexValidation) ValidationCaster() *Validation { + o := Validation(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/registereddomainprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/registereddomainprocessor.go new file mode 100644 index 000000000..3aaf8bea4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/registereddomainprocessor.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RegisteredDomainProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1341-L1357 +type RegisteredDomainProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field Field containing the source FQDN. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If true and any required fields are missing, the processor quietly exits + // without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField Object field containing extracted domain components. If an empty string, + // the processor adds components to the document’s root. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *RegisteredDomainProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewRegisteredDomainProcessor returns a RegisteredDomainProcessor. +func NewRegisteredDomainProcessor() *RegisteredDomainProcessor { + r := &RegisteredDomainProcessor{} + + return r +} + +type RegisteredDomainProcessorVariant interface { + RegisteredDomainProcessorCaster() *RegisteredDomainProcessor +} + +func (s *RegisteredDomainProcessor) RegisteredDomainProcessorCaster() *RegisteredDomainProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/regressioninferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/regressioninferenceoptions.go new file mode 100644 index 000000000..4b6555fd2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/regressioninferenceoptions.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RegressionInferenceOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L107-L116 +type RegressionInferenceOptions struct { + // NumTopFeatureImportanceValues Specifies the maximum number of feature importance values per document. + NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` +} + +func (s *RegressionInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "num_top_feature_importance_values": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopFeatureImportanceValues", err) + } + s.NumTopFeatureImportanceValues = &value + case float64: + f := int(v) + s.NumTopFeatureImportanceValues = &f + } + + case "results_field": + if err := dec.Decode(&s.ResultsField); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + + } + } + return nil +} + +// NewRegressionInferenceOptions returns a RegressionInferenceOptions. +func NewRegressionInferenceOptions() *RegressionInferenceOptions { + r := &RegressionInferenceOptions{} + + return r +} + +type RegressionInferenceOptionsVariant interface { + RegressionInferenceOptionsCaster() *RegressionInferenceOptions +} + +func (s *RegressionInferenceOptions) RegressionInferenceOptionsCaster() *RegressionInferenceOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindexdestination.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindexdestination.go new file mode 100644 index 000000000..f8b48dbb0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindexdestination.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/optype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +// ReindexDestination type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/reindex/types.ts#L39-L67 +type ReindexDestination struct { + // Index The name of the data stream, index, or index alias you are copying to. + Index string `json:"index"` + // OpType If it is `create`, the operation will only index documents that do not + // already exist (also known as "put if absent"). + // + // IMPORTANT: To reindex to a data stream destination, this argument must be + // `create`. + OpType *optype.OpType `json:"op_type,omitempty"` + // Pipeline The name of the pipeline to use. + Pipeline *string `json:"pipeline,omitempty"` + // Routing By default, a document's routing is preserved unless it's changed by the + // script. + // If it is `keep`, the routing on the bulk request sent for each match is set + // to the routing on the match. + // If it is `discard`, the routing on the bulk request sent for each match is + // set to `null`. + // If it is `=value`, the routing on the bulk request sent for each match is set + // to all value specified after the equals sign (`=`). + Routing *string `json:"routing,omitempty"` + // VersionType The versioning to use for the indexing operation. + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *ReindexDestination) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "op_type": + if err := dec.Decode(&s.OpType); err != nil { + return fmt.Errorf("%s | %w", "OpType", err) + } + + case "pipeline": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pipeline", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pipeline = &o + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return fmt.Errorf("%s | %w", "VersionType", err) + } + + } + } + return nil +} + +// NewReindexDestination returns a ReindexDestination. +func NewReindexDestination() *ReindexDestination { + r := &ReindexDestination{} + + return r +} + +type ReindexDestinationVariant interface { + ReindexDestinationCaster() *ReindexDestination +} + +func (s *ReindexDestination) ReindexDestinationCaster() *ReindexDestination { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindexnode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindexnode.go new file mode 100644 index 000000000..d70ade0f0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindexnode.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noderole" +) + +// ReindexNode type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/reindex_rethrottle/types.ts#L33-L35 +type ReindexNode struct { + Attributes map[string]string `json:"attributes"` + Host string `json:"host"` + Ip string `json:"ip"` + Name string `json:"name"` + Roles []noderole.NodeRole `json:"roles,omitempty"` + Tasks map[string]ReindexTask `json:"tasks"` + TransportAddress string `json:"transport_address"` +} + +func (s *ReindexNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + case "ip": + if err := dec.Decode(&s.Ip); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "tasks": + if s.Tasks == nil { + s.Tasks = make(map[string]ReindexTask, 0) + } + if err := dec.Decode(&s.Tasks); err != nil { + return fmt.Errorf("%s | %w", "Tasks", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + } + } + return nil +} + +// NewReindexNode returns a ReindexNode. +func NewReindexNode() *ReindexNode { + r := &ReindexNode{ + Attributes: make(map[string]string), + Tasks: make(map[string]ReindexTask), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindexsource.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindexsource.go new file mode 100644 index 000000000..544b5a819 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindexsource.go @@ -0,0 +1,185 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReindexSource type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/reindex/types.ts#L69-L110 +type ReindexSource struct { + // Index The name of the data stream, index, or alias you are copying from. + // It accepts a comma-separated list to reindex from multiple sources. + Index []string `json:"index"` + // Query The documents to reindex, which is defined with Query DSL. + Query *Query `json:"query,omitempty"` + // Remote A remote instance of Elasticsearch that you want to index from. + Remote *RemoteSource `json:"remote,omitempty"` + RuntimeMappings RuntimeFields `json:"runtime_mappings,omitempty"` + // Size The number of documents to index per batch. + // Use it when you are indexing from remote to ensure that the batches fit + // within the on-heap buffer, which defaults to a maximum size of 100 MB. + Size *int `json:"size,omitempty"` + // Slice Slice the reindex request manually using the provided slice ID and total + // number of slices. + Slice *SlicedScroll `json:"slice,omitempty"` + // Sort A comma-separated list of `:` pairs to sort by before + // indexing. + // Use it in conjunction with `max_docs` to control what documents are + // reindexed. + // + // WARNING: Sort in reindex is deprecated. + // Sorting in reindex was never guaranteed to index documents in order and + // prevents further development of reindex such as resilience and performance + // improvements. + // If used in combination with `max_docs`, consider using a query filter + // instead. + Sort []SortCombinations `json:"sort,omitempty"` + // SourceFields_ If `true`, reindex all source fields. + // Set it to a list to reindex select fields. + SourceFields_ []string `json:"_source,omitempty"` +} + +func (s *ReindexSource) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + s.Index = append(s.Index, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "remote": + if err := dec.Decode(&s.Remote); err != nil { + return fmt.Errorf("%s | %w", "Remote", err) + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "slice": + if err := dec.Decode(&s.Slice); err != nil { + return fmt.Errorf("%s | %w", "Slice", err) + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + case "_source": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "SourceFields_", err) + } + + s.SourceFields_ = append(s.SourceFields_, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.SourceFields_); err != nil { + return fmt.Errorf("%s | %w", "SourceFields_", err) + } + } + + } + } + return nil +} + +// NewReindexSource returns a ReindexSource. +func NewReindexSource() *ReindexSource { + r := &ReindexSource{} + + return r +} + +type ReindexSourceVariant interface { + ReindexSourceCaster() *ReindexSource +} + +func (s *ReindexSource) ReindexSourceCaster() *ReindexSource { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindexstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindexstatus.go new file mode 100644 index 000000000..f84ee4769 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindexstatus.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReindexStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/reindex_rethrottle/types.ts#L37-L85 +type ReindexStatus struct { + // Batches The number of scroll responses pulled back by the reindex. + Batches int64 `json:"batches"` + // Created The number of documents that were successfully created. + Created int64 `json:"created"` + // Deleted The number of documents that were successfully deleted. + Deleted int64 `json:"deleted"` + // Noops The number of documents that were ignored because the script used for the + // reindex returned a `noop` value for `ctx.op`. + Noops int64 `json:"noops"` + // RequestsPerSecond The number of requests per second effectively executed during the reindex. + RequestsPerSecond float32 `json:"requests_per_second"` + // Retries The number of retries attempted by reindex. `bulk` is the number of bulk + // actions retried and `search` is the number of search actions retried. + Retries Retries `json:"retries"` + Throttled Duration `json:"throttled,omitempty"` + // ThrottledMillis Number of milliseconds the request slept to conform to `requests_per_second`. + ThrottledMillis int64 `json:"throttled_millis"` + ThrottledUntil Duration `json:"throttled_until,omitempty"` + // ThrottledUntilMillis This field should always be equal to zero in a `_reindex` response. + // It only has meaning when using the Task API, where it indicates the next time + // (in milliseconds since epoch) a throttled request will be executed again in + // order to conform to `requests_per_second`. + ThrottledUntilMillis int64 `json:"throttled_until_millis"` + // Total The number of documents that were successfully processed. + Total int64 `json:"total"` + // Updated The number of documents that were successfully updated, for example, a + // document with same ID already existed prior to reindex updating it. + Updated int64 `json:"updated"` + // VersionConflicts The number of version conflicts that reindex hits. + VersionConflicts int64 `json:"version_conflicts"` +} + +func (s *ReindexStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "batches": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Batches", err) + } + s.Batches = value + case float64: + f := int64(v) + s.Batches = f + } + + case "created": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Created", err) + } + s.Created = value + case float64: + f := int64(v) + s.Created = f + } + + case "deleted": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Deleted", err) + } + s.Deleted = value + case float64: + f := int64(v) + s.Deleted = f + } + + case "noops": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Noops", err) + } + s.Noops = value + case float64: + f := int64(v) + s.Noops = f + } + + case "requests_per_second": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "RequestsPerSecond", err) + } + f := float32(value) + s.RequestsPerSecond = f + case float64: + f := float32(v) + s.RequestsPerSecond = f + } + + case "retries": + if err := dec.Decode(&s.Retries); err != nil { + return fmt.Errorf("%s | %w", "Retries", err) + } + + case "throttled": + if err := dec.Decode(&s.Throttled); err != nil { + return fmt.Errorf("%s | %w", "Throttled", err) + } + + case "throttled_millis": + if err := dec.Decode(&s.ThrottledMillis); err != nil { + return fmt.Errorf("%s | %w", "ThrottledMillis", err) + } + + case "throttled_until": + if err := dec.Decode(&s.ThrottledUntil); err != nil { + return fmt.Errorf("%s | %w", "ThrottledUntil", err) + } + + case "throttled_until_millis": + if err := dec.Decode(&s.ThrottledUntilMillis); err != nil { + return fmt.Errorf("%s | %w", "ThrottledUntilMillis", err) + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + case "updated": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Updated", err) + } + s.Updated = value + case float64: + f := int64(v) + s.Updated = f + } + + case "version_conflicts": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "VersionConflicts", err) + } + s.VersionConflicts = value + case float64: + f := int64(v) + s.VersionConflicts = f + } + + } + } + return nil +} + +// NewReindexStatus returns a ReindexStatus. +func NewReindexStatus() *ReindexStatus { + r := &ReindexStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindextask.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindextask.go new file mode 100644 index 000000000..a6af4d1d5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reindextask.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReindexTask type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/reindex_rethrottle/types.ts#L87-L98 +type ReindexTask struct { + Action string `json:"action"` + Cancellable bool `json:"cancellable"` + Description string `json:"description"` + Headers HttpHeaders `json:"headers"` + Id int64 `json:"id"` + Node string `json:"node"` + RunningTimeInNanos int64 `json:"running_time_in_nanos"` + StartTimeInMillis int64 `json:"start_time_in_millis"` + Status ReindexStatus `json:"status"` + Type string `json:"type"` +} + +func (s *ReindexTask) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Action", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Action = o + + case "cancellable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Cancellable", err) + } + s.Cancellable = value + case bool: + s.Cancellable = v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "headers": + if err := dec.Decode(&s.Headers); err != nil { + return fmt.Errorf("%s | %w", "Headers", err) + } + + case "id": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + s.Id = value + case float64: + f := int64(v) + s.Id = f + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "running_time_in_nanos": + if err := dec.Decode(&s.RunningTimeInNanos); err != nil { + return fmt.Errorf("%s | %w", "RunningTimeInNanos", err) + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeInMillis", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewReindexTask returns a ReindexTask. +func NewReindexTask() *ReindexTask { + r := &ReindexTask{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reloaddetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reloaddetails.go new file mode 100644 index 000000000..ac63bd4ab --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reloaddetails.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReloadDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/reload_search_analyzers/types.ts#L27-L31 +type ReloadDetails struct { + Index string `json:"index"` + ReloadedAnalyzers []string `json:"reloaded_analyzers"` + ReloadedNodeIds []string `json:"reloaded_node_ids"` +} + +func (s *ReloadDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Index = o + + case "reloaded_analyzers": + if err := dec.Decode(&s.ReloadedAnalyzers); err != nil { + return fmt.Errorf("%s | %w", "ReloadedAnalyzers", err) + } + + case "reloaded_node_ids": + if err := dec.Decode(&s.ReloadedNodeIds); err != nil { + return fmt.Errorf("%s | %w", "ReloadedNodeIds", err) + } + + } + } + return nil +} + +// NewReloadDetails returns a ReloadDetails. +func NewReloadDetails() *ReloadDetails { + r := &ReloadDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reloadresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reloadresult.go new file mode 100644 index 000000000..d496a730d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reloadresult.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ReloadResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/reload_search_analyzers/types.ts#L22-L25 +type ReloadResult struct { + ReloadDetails []ReloadDetails `json:"reload_details"` + Shards_ ShardStatistics `json:"_shards"` +} + +// NewReloadResult returns a ReloadResult. +func NewReloadResult() *ReloadResult { + r := &ReloadResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/relocationfailureinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/relocationfailureinfo.go new file mode 100644 index 000000000..d22fe1380 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/relocationfailureinfo.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RelocationFailureInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Node.ts#L67-L69 +type RelocationFailureInfo struct { + FailedAttempts int `json:"failed_attempts"` +} + +func (s *RelocationFailureInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "failed_attempts": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FailedAttempts", err) + } + s.FailedAttempts = value + case float64: + f := int(v) + s.FailedAttempts = f + } + + } + } + return nil +} + +// NewRelocationFailureInfo returns a RelocationFailureInfo. +func NewRelocationFailureInfo() *RelocationFailureInfo { + r := &RelocationFailureInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remoteclusterinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remoteclusterinfo.go new file mode 100644 index 000000000..bfd9d647a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remoteclusterinfo.go @@ -0,0 +1,290 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/healthstatus" +) + +// RemoteClusterInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L786-L817 +type RemoteClusterInfo struct { + // ClusterUuid The UUID of the remote cluster. + ClusterUuid string `json:"cluster_uuid"` + // IndicesCount The total number of indices in the remote cluster. + IndicesCount int `json:"indices_count"` + // IndicesTotalSize Total data set size of all shards assigned to selected nodes, as a + // human-readable string. + IndicesTotalSize *string `json:"indices_total_size,omitempty"` + // IndicesTotalSizeInBytes Total data set size, in bytes, of all shards assigned to selected nodes. + IndicesTotalSizeInBytes int64 `json:"indices_total_size_in_bytes"` + // MaxHeap Maximum amount of memory available for use by the heap across the nodes of + // the remote cluster, as a human-readable string. + MaxHeap *string `json:"max_heap,omitempty"` + // MaxHeapInBytes Maximum amount of memory, in bytes, available for use by the heap across the + // nodes of the remote cluster. + MaxHeapInBytes int64 `json:"max_heap_in_bytes"` + // MemTotal Total amount of physical memory across the nodes of the remote cluster, as a + // human-readable string. + MemTotal *string `json:"mem_total,omitempty"` + // MemTotalInBytes Total amount, in bytes, of physical memory across the nodes of the remote + // cluster. + MemTotalInBytes int64 `json:"mem_total_in_bytes"` + // Mode The connection mode used to communicate with the remote cluster. + Mode string `json:"mode"` + // NodesCount The total count of nodes in the remote cluster. + NodesCount int `json:"nodes_count"` + // ShardsCount The total number of shards in the remote cluster. + ShardsCount int `json:"shards_count"` + // SkipUnavailable The `skip_unavailable` setting used for this remote cluster. + SkipUnavailable bool `json:"skip_unavailable"` + // Status Health status of the cluster, based on the state of its primary and replica + // shards. + Status healthstatus.HealthStatus `json:"status"` + // TransportCompress Transport compression setting used for this remote cluster. + TransportCompress string `json:"transport.compress"` + // Version The list of Elasticsearch versions used by the nodes on the remote cluster. + Version []string `json:"version"` +} + +func (s *RemoteClusterInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cluster_uuid": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ClusterUuid", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClusterUuid = o + + case "indices_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IndicesCount", err) + } + s.IndicesCount = value + case float64: + f := int(v) + s.IndicesCount = f + } + + case "indices_total_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndicesTotalSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndicesTotalSize = &o + + case "indices_total_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndicesTotalSizeInBytes", err) + } + s.IndicesTotalSizeInBytes = value + case float64: + f := int64(v) + s.IndicesTotalSizeInBytes = f + } + + case "max_heap": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxHeap", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxHeap = &o + + case "max_heap_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxHeapInBytes", err) + } + s.MaxHeapInBytes = value + case float64: + f := int64(v) + s.MaxHeapInBytes = f + } + + case "mem_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MemTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MemTotal = &o + + case "mem_total_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MemTotalInBytes", err) + } + s.MemTotalInBytes = value + case float64: + f := int64(v) + s.MemTotalInBytes = f + } + + case "mode": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Mode = o + + case "nodes_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NodesCount", err) + } + s.NodesCount = value + case float64: + f := int(v) + s.NodesCount = f + } + + case "shards_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardsCount", err) + } + s.ShardsCount = value + case float64: + f := int(v) + s.ShardsCount = f + } + + case "skip_unavailable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SkipUnavailable", err) + } + s.SkipUnavailable = value + case bool: + s.SkipUnavailable = v + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "transport.compress": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TransportCompress", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TransportCompress = o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewRemoteClusterInfo returns a RemoteClusterInfo. +func NewRemoteClusterInfo() *RemoteClusterInfo { + r := &RemoteClusterInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remoteclusterprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remoteclusterprivileges.go new file mode 100644 index 000000000..9441014af --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remoteclusterprivileges.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/remoteclusterprivilege" +) + +// RemoteClusterPrivileges type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L280-L292 +type RemoteClusterPrivileges struct { + // Clusters A list of cluster aliases to which the permissions in this entry apply. + Clusters []string `json:"clusters"` + // Privileges The cluster level privileges that owners of the role have on the remote + // cluster. + Privileges []remoteclusterprivilege.RemoteClusterPrivilege `json:"privileges"` +} + +func (s *RemoteClusterPrivileges) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "clusters": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Clusters", err) + } + + s.Clusters = append(s.Clusters, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Clusters); err != nil { + return fmt.Errorf("%s | %w", "Clusters", err) + } + } + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return fmt.Errorf("%s | %w", "Privileges", err) + } + + } + } + return nil +} + +// NewRemoteClusterPrivileges returns a RemoteClusterPrivileges. +func NewRemoteClusterPrivileges() *RemoteClusterPrivileges { + r := &RemoteClusterPrivileges{} + + return r +} + +type RemoteClusterPrivilegesVariant interface { + RemoteClusterPrivilegesCaster() *RemoteClusterPrivileges +} + +func (s *RemoteClusterPrivileges) RemoteClusterPrivilegesCaster() *RemoteClusterPrivileges { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remoteindicesprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remoteindicesprivileges.go new file mode 100644 index 000000000..f30658d37 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remoteindicesprivileges.go @@ -0,0 +1,195 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexprivilege" +) + +// RemoteIndicesPrivileges type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L246-L278 +type RemoteIndicesPrivileges struct { + // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that + // cover restricted indices. Implicitly, restricted indices have limited + // privileges that can cause pattern tests to fail. If restricted indices are + // explicitly included in the `names` list, Elasticsearch checks privileges + // against these indices regardless of the value set for + // `allow_restricted_indices`. + AllowRestrictedIndices *bool `json:"allow_restricted_indices,omitempty"` + // Clusters A list of cluster aliases to which the permissions in this entry apply. + Clusters []string `json:"clusters"` + // FieldSecurity The document fields that the owners of the role have read access to. + FieldSecurity *FieldSecurity `json:"field_security,omitempty"` + // Names A list of indices (or index name patterns) to which the permissions in this + // entry apply. + Names []string `json:"names"` + // Privileges The index level privileges that owners of the role have on the specified + // indices. + Privileges []indexprivilege.IndexPrivilege `json:"privileges"` + // Query A search query that defines the documents the owners of the role have access + // to. A document within the specified indices must match this query for it to + // be accessible by the owners of the role. + Query IndicesPrivilegesQuery `json:"query,omitempty"` +} + +func (s *RemoteIndicesPrivileges) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowRestrictedIndices", err) + } + s.AllowRestrictedIndices = &value + case bool: + s.AllowRestrictedIndices = &v + } + + case "clusters": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Clusters", err) + } + + s.Clusters = append(s.Clusters, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Clusters); err != nil { + return fmt.Errorf("%s | %w", "Clusters", err) + } + } + + case "field_security": + if err := dec.Decode(&s.FieldSecurity); err != nil { + return fmt.Errorf("%s | %w", "FieldSecurity", err) + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + } + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return fmt.Errorf("%s | %w", "Privileges", err) + } + + case "query": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + query_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Query", err) + } + + switch t { + + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + o := NewQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = o + break query_field + + case "template": + o := NewRoleTemplateQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = o + break query_field + + } + } + if s.Query == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + } + + } + } + return nil +} + +// NewRemoteIndicesPrivileges returns a RemoteIndicesPrivileges. +func NewRemoteIndicesPrivileges() *RemoteIndicesPrivileges { + r := &RemoteIndicesPrivileges{} + + return r +} + +type RemoteIndicesPrivilegesVariant interface { + RemoteIndicesPrivilegesCaster() *RemoteIndicesPrivileges +} + +func (s *RemoteIndicesPrivileges) RemoteIndicesPrivilegesCaster() *RemoteIndicesPrivileges { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remotesource.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remotesource.go new file mode 100644 index 000000000..c357c53ef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remotesource.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// RemoteSource type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/reindex/types.ts#L112-L140 +type RemoteSource struct { + // ConnectTimeout The remote connection timeout. + ConnectTimeout Duration `json:"connect_timeout,omitempty"` + // Headers An object containing the headers of the request. + Headers map[string]string `json:"headers,omitempty"` + // Host The URL for the remote instance of Elasticsearch that you want to index from. + // This information is required when you're indexing from remote. + Host string `json:"host"` + // Password The password to use for authentication with the remote host. + Password *string `json:"password,omitempty"` + // SocketTimeout The remote socket read timeout. + SocketTimeout Duration `json:"socket_timeout,omitempty"` + // Username The username to use for authentication with the remote host. + Username *string `json:"username,omitempty"` +} + +func (s *RemoteSource) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "connect_timeout": + if err := dec.Decode(&s.ConnectTimeout); err != nil { + return fmt.Errorf("%s | %w", "ConnectTimeout", err) + } + + case "headers": + if s.Headers == nil { + s.Headers = make(map[string]string, 0) + } + if err := dec.Decode(&s.Headers); err != nil { + return fmt.Errorf("%s | %w", "Headers", err) + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + case "password": + if err := dec.Decode(&s.Password); err != nil { + return fmt.Errorf("%s | %w", "Password", err) + } + + case "socket_timeout": + if err := dec.Decode(&s.SocketTimeout); err != nil { + return fmt.Errorf("%s | %w", "SocketTimeout", err) + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + + } + } + return nil +} + +// NewRemoteSource returns a RemoteSource. +func NewRemoteSource() *RemoteSource { + r := &RemoteSource{ + Headers: make(map[string]string), + } + + return r +} + +type RemoteSourceVariant interface { + RemoteSourceCaster() *RemoteSource +} + +func (s *RemoteSource) RemoteSourceCaster() *RemoteSource { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remoteuserindicesprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remoteuserindicesprivileges.go new file mode 100644 index 000000000..af5c3a0c1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/remoteuserindicesprivileges.go @@ -0,0 +1,178 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexprivilege" +) + +// RemoteUserIndicesPrivileges type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L318-L341 +type RemoteUserIndicesPrivileges struct { + // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that + // cover restricted indices. Implicitly, restricted indices have limited + // privileges that can cause pattern tests to fail. If restricted indices are + // explicitly included in the `names` list, Elasticsearch checks privileges + // against these indices regardless of the value set for + // `allow_restricted_indices`. + AllowRestrictedIndices bool `json:"allow_restricted_indices"` + Clusters []string `json:"clusters"` + // FieldSecurity The document fields that the owners of the role have read access to. + FieldSecurity []FieldSecurity `json:"field_security,omitempty"` + // Names A list of indices (or index name patterns) to which the permissions in this + // entry apply. + Names []string `json:"names"` + // Privileges The index level privileges that owners of the role have on the specified + // indices. + Privileges []indexprivilege.IndexPrivilege `json:"privileges"` + // Query Search queries that define the documents the user has access to. A document + // within the specified indices must match these queries for it to be accessible + // by the owners of the role. + Query []IndicesPrivilegesQuery `json:"query,omitempty"` +} + +func (s *RemoteUserIndicesPrivileges) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowRestrictedIndices", err) + } + s.AllowRestrictedIndices = value + case bool: + s.AllowRestrictedIndices = v + } + + case "clusters": + if err := dec.Decode(&s.Clusters); err != nil { + return fmt.Errorf("%s | %w", "Clusters", err) + } + + case "field_security": + if err := dec.Decode(&s.FieldSecurity); err != nil { + return fmt.Errorf("%s | %w", "FieldSecurity", err) + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + } + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return fmt.Errorf("%s | %w", "Privileges", err) + } + + case "query": + messageArray := []json.RawMessage{} + if err := dec.Decode(&messageArray); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + query_field: + for _, message := range messageArray { + keyDec := json.NewDecoder(bytes.NewReader(message)) + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Query", err) + } + + switch t { + + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + o := NewQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = append(s.Query, o) + continue query_field + + case "template": + o := NewRoleTemplateQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = append(s.Query, o) + continue query_field + + } + } + + var o any + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = append(s.Query, o) + } + + } + } + return nil +} + +// NewRemoteUserIndicesPrivileges returns a RemoteUserIndicesPrivileges. +func NewRemoteUserIndicesPrivileges() *RemoteUserIndicesPrivileges { + r := &RemoteUserIndicesPrivileges{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeaction.go new file mode 100644 index 000000000..4165cce26 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeaction.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RemoveAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/update_aliases/types.ts#L97-L122 +type RemoveAction struct { + // Alias Alias for the action. + // Index alias names support date math. + Alias *string `json:"alias,omitempty"` + // Aliases Aliases for the action. + // Index alias names support date math. + Aliases []string `json:"aliases,omitempty"` + // Index Data stream or index for the action. + // Supports wildcards (`*`). + Index *string `json:"index,omitempty"` + // Indices Data streams or indices for the action. + // Supports wildcards (`*`). + Indices []string `json:"indices,omitempty"` + // MustExist If `true`, the alias must exist to perform the action. + MustExist *bool `json:"must_exist,omitempty"` +} + +func (s *RemoveAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alias": + if err := dec.Decode(&s.Alias); err != nil { + return fmt.Errorf("%s | %w", "Alias", err) + } + + case "aliases": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aliases", err) + } + + s.Aliases = append(s.Aliases, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Aliases); err != nil { + return fmt.Errorf("%s | %w", "Aliases", err) + } + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + case "must_exist": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MustExist", err) + } + s.MustExist = &value + case bool: + s.MustExist = &v + } + + } + } + return nil +} + +// NewRemoveAction returns a RemoveAction. +func NewRemoveAction() *RemoveAction { + r := &RemoveAction{} + + return r +} + +type RemoveActionVariant interface { + RemoveActionCaster() *RemoveAction +} + +func (s *RemoveAction) RemoveActionCaster() *RemoveAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeclusterserver.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeclusterserver.go new file mode 100644 index 000000000..b0dc6b8f7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeclusterserver.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// RemoveClusterServer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/info/types.ts#L74-L77 +type RemoveClusterServer struct { + BoundAddress []string `json:"bound_address"` + PublishAddress string `json:"publish_address"` +} + +func (s *RemoveClusterServer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bound_address": + if err := dec.Decode(&s.BoundAddress); err != nil { + return fmt.Errorf("%s | %w", "BoundAddress", err) + } + + case "publish_address": + if err := dec.Decode(&s.PublishAddress); err != nil { + return fmt.Errorf("%s | %w", "PublishAddress", err) + } + + } + } + return nil +} + +// NewRemoveClusterServer returns a RemoveClusterServer. +func NewRemoveClusterServer() *RemoveClusterServer { + r := &RemoveClusterServer{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeduplicatestokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeduplicatestokenfilter.go new file mode 100644 index 000000000..037463ac0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeduplicatestokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// RemoveDuplicatesTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L403-L405 +type RemoveDuplicatesTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *RemoveDuplicatesTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s RemoveDuplicatesTokenFilter) MarshalJSON() ([]byte, error) { + type innerRemoveDuplicatesTokenFilter RemoveDuplicatesTokenFilter + tmp := innerRemoveDuplicatesTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "remove_duplicates" + + return json.Marshal(tmp) +} + +// NewRemoveDuplicatesTokenFilter returns a RemoveDuplicatesTokenFilter. +func NewRemoveDuplicatesTokenFilter() *RemoveDuplicatesTokenFilter { + r := &RemoveDuplicatesTokenFilter{} + + return r +} + +type RemoveDuplicatesTokenFilterVariant interface { + RemoveDuplicatesTokenFilterCaster() *RemoveDuplicatesTokenFilter +} + +func (s *RemoveDuplicatesTokenFilter) RemoveDuplicatesTokenFilterCaster() *RemoveDuplicatesTokenFilter { + return s +} + +func (s *RemoveDuplicatesTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeindexaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeindexaction.go new file mode 100644 index 000000000..396204c5c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeindexaction.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RemoveIndexAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/update_aliases/types.ts#L124-L139 +type RemoveIndexAction struct { + // Index Data stream or index for the action. + // Supports wildcards (`*`). + Index *string `json:"index,omitempty"` + // Indices Data streams or indices for the action. + // Supports wildcards (`*`). + Indices []string `json:"indices,omitempty"` + // MustExist If `true`, the alias must exist to perform the action. + MustExist *bool `json:"must_exist,omitempty"` +} + +func (s *RemoveIndexAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + case "must_exist": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MustExist", err) + } + s.MustExist = &value + case bool: + s.MustExist = &v + } + + } + } + return nil +} + +// NewRemoveIndexAction returns a RemoveIndexAction. +func NewRemoveIndexAction() *RemoveIndexAction { + r := &RemoveIndexAction{} + + return r +} + +type RemoveIndexActionVariant interface { + RemoveIndexActionCaster() *RemoveIndexAction +} + +func (s *RemoveIndexAction) RemoveIndexActionCaster() *RemoveIndexAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeindicesblockstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeindicesblockstatus.go new file mode 100644 index 000000000..73a2ccccf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeindicesblockstatus.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RemoveIndicesBlockStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/remove_block/IndicesRemoveBlockResponse.ts#L30-L34 +type RemoveIndicesBlockStatus struct { + Exception *ErrorCause `json:"exception,omitempty"` + Name string `json:"name"` + Unblocked *bool `json:"unblocked,omitempty"` +} + +func (s *RemoveIndicesBlockStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exception": + if err := dec.Decode(&s.Exception); err != nil { + return fmt.Errorf("%s | %w", "Exception", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "unblocked": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Unblocked", err) + } + s.Unblocked = &value + case bool: + s.Unblocked = &v + } + + } + } + return nil +} + +// NewRemoveIndicesBlockStatus returns a RemoveIndicesBlockStatus. +func NewRemoveIndicesBlockStatus() *RemoveIndicesBlockStatus { + r := &RemoveIndicesBlockStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeprocessor.go new file mode 100644 index 000000000..4b70231e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/removeprocessor.go @@ -0,0 +1,185 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RemoveProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1359-L1373 +type RemoveProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field Fields to be removed. Supports template snippets. + Field []string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // Keep Fields to be kept. When set, all fields other than those specified are + // removed. + Keep []string `json:"keep,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *RemoveProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + s.Field = append(s.Field, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "keep": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Keep", err) + } + + s.Keep = append(s.Keep, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Keep); err != nil { + return fmt.Errorf("%s | %w", "Keep", err) + } + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil +} + +// NewRemoveProcessor returns a RemoveProcessor. +func NewRemoveProcessor() *RemoveProcessor { + r := &RemoveProcessor{} + + return r +} + +type RemoveProcessorVariant interface { + RemoveProcessorCaster() *RemoveProcessor +} + +func (s *RemoveProcessor) RemoveProcessorCaster() *RemoveProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/renameprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/renameprocessor.go new file mode 100644 index 000000000..4f53726e4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/renameprocessor.go @@ -0,0 +1,164 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RenameProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1375-L1391 +type RenameProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to be renamed. + // Supports template snippets. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The new name of the field. + // Supports template snippets. + TargetField string `json:"target_field"` +} + +func (s *RenameProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewRenameProcessor returns a RenameProcessor. +func NewRenameProcessor() *RenameProcessor { + r := &RenameProcessor{} + + return r +} + +type RenameProcessorVariant interface { + RenameProcessorCaster() *RenameProcessor +} + +func (s *RenameProcessor) RenameProcessorCaster() *RenameProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/replicationaccess.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/replicationaccess.go new file mode 100644 index 000000000..eeea7fbbe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/replicationaccess.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReplicationAccess type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L444-L454 +type ReplicationAccess struct { + // AllowRestrictedIndices This needs to be set to true if the patterns in the names field should cover + // system indices. + AllowRestrictedIndices *bool `json:"allow_restricted_indices,omitempty"` + // Names A list of indices (or index name patterns) to which the permissions in this + // entry apply. + Names []string `json:"names"` +} + +func (s *ReplicationAccess) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowRestrictedIndices", err) + } + s.AllowRestrictedIndices = &value + case bool: + s.AllowRestrictedIndices = &v + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + } + + } + } + return nil +} + +// NewReplicationAccess returns a ReplicationAccess. +func NewReplicationAccess() *ReplicationAccess { + r := &ReplicationAccess{} + + return r +} + +type ReplicationAccessVariant interface { + ReplicationAccessCaster() *ReplicationAccess +} + +func (s *ReplicationAccess) ReplicationAccessCaster() *ReplicationAccess { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reportingemailattachment.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reportingemailattachment.go new file mode 100644 index 000000000..5ee5851ad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reportingemailattachment.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReportingEmailAttachment type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L224-L232 +type ReportingEmailAttachment struct { + Inline *bool `json:"inline,omitempty"` + Interval Duration `json:"interval,omitempty"` + Request *HttpInputRequestDefinition `json:"request,omitempty"` + Retries *int `json:"retries,omitempty"` + Url string `json:"url"` +} + +func (s *ReportingEmailAttachment) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "inline": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Inline", err) + } + s.Inline = &value + case bool: + s.Inline = &v + } + + case "interval": + if err := dec.Decode(&s.Interval); err != nil { + return fmt.Errorf("%s | %w", "Interval", err) + } + + case "request": + if err := dec.Decode(&s.Request); err != nil { + return fmt.Errorf("%s | %w", "Request", err) + } + + case "retries": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Retries", err) + } + s.Retries = &value + case float64: + f := int(v) + s.Retries = &f + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = o + + } + } + return nil +} + +// NewReportingEmailAttachment returns a ReportingEmailAttachment. +func NewReportingEmailAttachment() *ReportingEmailAttachment { + r := &ReportingEmailAttachment{} + + return r +} + +type ReportingEmailAttachmentVariant interface { + ReportingEmailAttachmentCaster() *ReportingEmailAttachment +} + +func (s *ReportingEmailAttachment) ReportingEmailAttachmentCaster() *ReportingEmailAttachment { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositoriesrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositoriesrecord.go new file mode 100644 index 000000000..cdc652a89 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositoriesrecord.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RepositoriesRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/repositories/types.ts#L20-L31 +type RepositoriesRecord struct { + // Id The unique repository identifier. + Id *string `json:"id,omitempty"` + // Type The repository type. + Type *string `json:"type,omitempty"` +} + +func (s *RepositoriesRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id", "repoId": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "type", "t": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + } + } + return nil +} + +// NewRepositoriesRecord returns a RepositoriesRecord. +func NewRepositoriesRecord() *RepositoriesRecord { + r := &RepositoriesRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repository.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repository.go new file mode 100644 index 000000000..ea43e439f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repository.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Repository holds the union for the following types: +// +// AzureRepository +// GcsRepository +// S3Repository +// SharedFileSystemRepository +// ReadOnlyUrlRepository +// SourceOnlyRepository +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotRepository.ts#L24-L34 +type Repository any + +type RepositoryVariant interface { + RepositoryCaster() *Repository +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositoryintegrityindicator.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositoryintegrityindicator.go new file mode 100644 index 000000000..0c623f8de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositoryintegrityindicator.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indicatorhealthstatus" +) + +// RepositoryIntegrityIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L138-L142 +type RepositoryIntegrityIndicator struct { + Details *RepositoryIntegrityIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *RepositoryIntegrityIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return fmt.Errorf("%s | %w", "Diagnosis", err) + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return fmt.Errorf("%s | %w", "Impacts", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Symptom", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewRepositoryIntegrityIndicator returns a RepositoryIntegrityIndicator. +func NewRepositoryIntegrityIndicator() *RepositoryIntegrityIndicator { + r := &RepositoryIntegrityIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositoryintegrityindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositoryintegrityindicatordetails.go new file mode 100644 index 000000000..0958d6f09 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositoryintegrityindicatordetails.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RepositoryIntegrityIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L143-L147 +type RepositoryIntegrityIndicatorDetails struct { + Corrupted []string `json:"corrupted,omitempty"` + CorruptedRepositories *int64 `json:"corrupted_repositories,omitempty"` + TotalRepositories *int64 `json:"total_repositories,omitempty"` +} + +func (s *RepositoryIntegrityIndicatorDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "corrupted": + if err := dec.Decode(&s.Corrupted); err != nil { + return fmt.Errorf("%s | %w", "Corrupted", err) + } + + case "corrupted_repositories": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CorruptedRepositories", err) + } + s.CorruptedRepositories = &value + case float64: + f := int64(v) + s.CorruptedRepositories = &f + } + + case "total_repositories": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalRepositories", err) + } + s.TotalRepositories = &value + case float64: + f := int64(v) + s.TotalRepositories = &f + } + + } + } + return nil +} + +// NewRepositoryIntegrityIndicatorDetails returns a RepositoryIntegrityIndicatorDetails. +func NewRepositoryIntegrityIndicatorDetails() *RepositoryIntegrityIndicatorDetails { + r := &RepositoryIntegrityIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositorylocation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositorylocation.go new file mode 100644 index 000000000..e8ff0c7e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositorylocation.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RepositoryLocation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/RepositoryMeteringInformation.ts#L68-L74 +type RepositoryLocation struct { + BasePath string `json:"base_path"` + // Bucket Bucket name (GCP, S3) + Bucket *string `json:"bucket,omitempty"` + // Container Container name (Azure) + Container *string `json:"container,omitempty"` +} + +func (s *RepositoryLocation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "base_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BasePath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BasePath = o + + case "bucket": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Bucket", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Bucket = &o + + case "container": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Container", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Container = &o + + } + } + return nil +} + +// NewRepositoryLocation returns a RepositoryLocation. +func NewRepositoryLocation() *RepositoryLocation { + r := &RepositoryLocation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositorymeteringinformation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositorymeteringinformation.go new file mode 100644 index 000000000..0bdd2a01d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositorymeteringinformation.go @@ -0,0 +1,156 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RepositoryMeteringInformation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/RepositoryMeteringInformation.ts#L24-L66 +type RepositoryMeteringInformation struct { + // Archived A flag that tells whether or not this object has been archived. When a + // repository is closed or updated the + // repository metering information is archived and kept for a certain period of + // time. This allows retrieving the + // repository metering information of previous repository instantiations. + Archived bool `json:"archived"` + // ClusterVersion The cluster state version when this object was archived, this field can be + // used as a logical timestamp to delete + // all the archived metrics up to an observed version. This field is only + // present for archived repository metering + // information objects. The main purpose of this field is to avoid possible race + // conditions during repository metering + // information deletions, i.e. deleting archived repositories metering + // information that we haven’t observed yet. + ClusterVersion *int64 `json:"cluster_version,omitempty"` + // RepositoryEphemeralId An identifier that changes every time the repository is updated. + RepositoryEphemeralId string `json:"repository_ephemeral_id"` + // RepositoryLocation Represents an unique location within the repository. + RepositoryLocation RepositoryLocation `json:"repository_location"` + // RepositoryName Repository name. + RepositoryName string `json:"repository_name"` + // RepositoryStartedAt Time the repository was created or updated. Recorded in milliseconds since + // the Unix Epoch. + RepositoryStartedAt int64 `json:"repository_started_at"` + // RepositoryStoppedAt Time the repository was deleted or updated. Recorded in milliseconds since + // the Unix Epoch. + RepositoryStoppedAt *int64 `json:"repository_stopped_at,omitempty"` + // RepositoryType Repository type. + RepositoryType string `json:"repository_type"` + // RequestCounts An object with the number of request performed against the repository grouped + // by request type. + RequestCounts RequestCounts `json:"request_counts"` +} + +func (s *RepositoryMeteringInformation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "archived": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Archived", err) + } + s.Archived = value + case bool: + s.Archived = v + } + + case "cluster_version": + if err := dec.Decode(&s.ClusterVersion); err != nil { + return fmt.Errorf("%s | %w", "ClusterVersion", err) + } + + case "repository_ephemeral_id": + if err := dec.Decode(&s.RepositoryEphemeralId); err != nil { + return fmt.Errorf("%s | %w", "RepositoryEphemeralId", err) + } + + case "repository_location": + if err := dec.Decode(&s.RepositoryLocation); err != nil { + return fmt.Errorf("%s | %w", "RepositoryLocation", err) + } + + case "repository_name": + if err := dec.Decode(&s.RepositoryName); err != nil { + return fmt.Errorf("%s | %w", "RepositoryName", err) + } + + case "repository_started_at": + if err := dec.Decode(&s.RepositoryStartedAt); err != nil { + return fmt.Errorf("%s | %w", "RepositoryStartedAt", err) + } + + case "repository_stopped_at": + if err := dec.Decode(&s.RepositoryStoppedAt); err != nil { + return fmt.Errorf("%s | %w", "RepositoryStoppedAt", err) + } + + case "repository_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RepositoryType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RepositoryType = o + + case "request_counts": + if err := dec.Decode(&s.RequestCounts); err != nil { + return fmt.Errorf("%s | %w", "RequestCounts", err) + } + + } + } + return nil +} + +// NewRepositoryMeteringInformation returns a RepositoryMeteringInformation. +func NewRepositoryMeteringInformation() *RepositoryMeteringInformation { + r := &RepositoryMeteringInformation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositorystatscurrentcounts.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositorystatscurrentcounts.go new file mode 100644 index 000000000..941661696 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositorystatscurrentcounts.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RepositoryStatsCurrentCounts type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L672-L680 +type RepositoryStatsCurrentCounts struct { + ActiveDeletions int `json:"active_deletions"` + Clones int `json:"clones"` + Deletions int `json:"deletions"` + Finalizations int `json:"finalizations"` + Shards RepositoryStatsShards `json:"shards"` + SnapshotDeletions int `json:"snapshot_deletions"` + Snapshots int `json:"snapshots"` +} + +func (s *RepositoryStatsCurrentCounts) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active_deletions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ActiveDeletions", err) + } + s.ActiveDeletions = value + case float64: + f := int(v) + s.ActiveDeletions = f + } + + case "clones": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Clones", err) + } + s.Clones = value + case float64: + f := int(v) + s.Clones = f + } + + case "deletions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Deletions", err) + } + s.Deletions = value + case float64: + f := int(v) + s.Deletions = f + } + + case "finalizations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Finalizations", err) + } + s.Finalizations = value + case float64: + f := int(v) + s.Finalizations = f + } + + case "shards": + if err := dec.Decode(&s.Shards); err != nil { + return fmt.Errorf("%s | %w", "Shards", err) + } + + case "snapshot_deletions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SnapshotDeletions", err) + } + s.SnapshotDeletions = value + case float64: + f := int(v) + s.SnapshotDeletions = f + } + + case "snapshots": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Snapshots", err) + } + s.Snapshots = value + case float64: + f := int(v) + s.Snapshots = f + } + + } + } + return nil +} + +// NewRepositoryStatsCurrentCounts returns a RepositoryStatsCurrentCounts. +func NewRepositoryStatsCurrentCounts() *RepositoryStatsCurrentCounts { + r := &RepositoryStatsCurrentCounts{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositorystatsshards.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositorystatsshards.go new file mode 100644 index 000000000..edd212ff9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/repositorystatsshards.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstate" +) + +// RepositoryStatsShards type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L682-L687 +type RepositoryStatsShards struct { + Complete int `json:"complete"` + Incomplete int `json:"incomplete"` + States map[shardstate.ShardState]int `json:"states"` + Total int `json:"total"` +} + +func (s *RepositoryStatsShards) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "complete": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Complete", err) + } + s.Complete = value + case float64: + f := int(v) + s.Complete = f + } + + case "incomplete": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Incomplete", err) + } + s.Incomplete = value + case float64: + f := int(v) + s.Incomplete = f + } + + case "states": + if s.States == nil { + s.States = make(map[shardstate.ShardState]int, 0) + } + if err := dec.Decode(&s.States); err != nil { + return fmt.Errorf("%s | %w", "States", err) + } + + case "total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int(v) + s.Total = f + } + + } + } + return nil +} + +// NewRepositoryStatsShards returns a RepositoryStatsShards. +func NewRepositoryStatsShards() *RepositoryStatsShards { + r := &RepositoryStatsShards{ + States: make(map[shardstate.ShardState]int), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/requestcachestats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/requestcachestats.go new file mode 100644 index 000000000..2b54388de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/requestcachestats.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RequestCacheStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L271-L277 +type RequestCacheStats struct { + Evictions int64 `json:"evictions"` + HitCount int64 `json:"hit_count"` + MemorySize *string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + MissCount int64 `json:"miss_count"` +} + +func (s *RequestCacheStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "evictions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Evictions", err) + } + s.Evictions = value + case float64: + f := int64(v) + s.Evictions = f + } + + case "hit_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "HitCount", err) + } + s.HitCount = value + case float64: + f := int64(v) + s.HitCount = f + } + + case "memory_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MemorySize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MemorySize = &o + + case "memory_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MemorySizeInBytes", err) + } + s.MemorySizeInBytes = value + case float64: + f := int64(v) + s.MemorySizeInBytes = f + } + + case "miss_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MissCount", err) + } + s.MissCount = value + case float64: + f := int64(v) + s.MissCount = f + } + + } + } + return nil +} + +// NewRequestCacheStats returns a RequestCacheStats. +func NewRequestCacheStats() *RequestCacheStats { + r := &RequestCacheStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/requestchatcompletion.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/requestchatcompletion.go new file mode 100644 index 000000000..c526aa0d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/requestchatcompletion.go @@ -0,0 +1,247 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RequestChatCompletion type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L25-L97 +type RequestChatCompletion struct { + // MaxCompletionTokens The upper bound limit for the number of tokens that can be generated for a + // completion request. + MaxCompletionTokens *int64 `json:"max_completion_tokens,omitempty"` + // Messages A list of objects representing the conversation. + // Requests should generally only add new messages from the user (role `user`). + // The other message roles (`assistant`, `system`, or `tool`) should generally + // only be copied from the response to a previous completion request, such that + // the messages array is built up throughout a conversation. + Messages []Message `json:"messages"` + // Model The ID of the model to use. + Model *string `json:"model,omitempty"` + // Stop A sequence of strings to control when the model should stop generating + // additional tokens. + Stop []string `json:"stop,omitempty"` + // Temperature The sampling temperature to use. + Temperature *float32 `json:"temperature,omitempty"` + // ToolChoice Controls which tool is called by the model. + // String representation: One of `auto`, `none`, or `requrired`. `auto` allows + // the model to choose between calling tools and generating a message. `none` + // causes the model to not call any tools. `required` forces the model to call + // one or more tools. + // Example (object representation): + // ``` + // + // { + // "tool_choice": { + // "type": "function", + // "function": { + // "name": "get_current_weather" + // } + // } + // } + // + // ``` + ToolChoice CompletionToolType `json:"tool_choice,omitempty"` + // Tools A list of tools that the model can call. + // Example: + // ``` + // + // { + // "tools": [ + // { + // "type": "function", + // "function": { + // "name": "get_price_of_item", + // "description": "Get the current price of an item", + // "parameters": { + // "type": "object", + // "properties": { + // "item": { + // "id": "12345" + // }, + // "unit": { + // "type": "currency" + // } + // } + // } + // } + // } + // ] + // } + // + // ``` + Tools []CompletionTool `json:"tools,omitempty"` + // TopP Nucleus sampling, an alternative to sampling with temperature. + TopP *float32 `json:"top_p,omitempty"` +} + +func (s *RequestChatCompletion) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_completion_tokens": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxCompletionTokens", err) + } + s.MaxCompletionTokens = &value + case float64: + f := int64(v) + s.MaxCompletionTokens = &f + } + + case "messages": + if err := dec.Decode(&s.Messages); err != nil { + return fmt.Errorf("%s | %w", "Messages", err) + } + + case "model": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Model", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Model = &o + + case "stop": + if err := dec.Decode(&s.Stop); err != nil { + return fmt.Errorf("%s | %w", "Stop", err) + } + + case "temperature": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Temperature", err) + } + f := float32(value) + s.Temperature = &f + case float64: + f := float32(v) + s.Temperature = &f + } + + case "tool_choice": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "ToolChoice", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + toolchoice_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "ToolChoice", err) + } + + switch t { + + case "function", "type": + o := NewCompletionToolChoice() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "ToolChoice", err) + } + s.ToolChoice = o + break toolchoice_field + + } + } + if s.ToolChoice == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.ToolChoice); err != nil { + return fmt.Errorf("%s | %w", "ToolChoice", err) + } + } + + case "tools": + if err := dec.Decode(&s.Tools); err != nil { + return fmt.Errorf("%s | %w", "Tools", err) + } + + case "top_p": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "TopP", err) + } + f := float32(value) + s.TopP = &f + case float64: + f := float32(v) + s.TopP = &f + } + + } + } + return nil +} + +// NewRequestChatCompletion returns a RequestChatCompletion. +func NewRequestChatCompletion() *RequestChatCompletion { + r := &RequestChatCompletion{} + + return r +} + +type RequestChatCompletionVariant interface { + RequestChatCompletionCaster() *RequestChatCompletion +} + +func (s *RequestChatCompletion) RequestChatCompletionCaster() *RequestChatCompletion { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/requestcounts.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/requestcounts.go new file mode 100644 index 000000000..ebe645177 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/requestcounts.go @@ -0,0 +1,255 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RequestCounts type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/RepositoryMeteringInformation.ts#L76-L103 +type RequestCounts struct { + // GetBlob Number of Get Blob requests (Azure) + GetBlob *int64 `json:"GetBlob,omitempty"` + // GetBlobProperties Number of Get Blob Properties requests (Azure) + GetBlobProperties *int64 `json:"GetBlobProperties,omitempty"` + // GetObject Number of get object requests (GCP, S3) + GetObject *int64 `json:"GetObject,omitempty"` + // InsertObject Number of insert object requests, including simple, multipart and resumable + // uploads. Resumable uploads + // can perform multiple http requests to insert a single object but they are + // considered as a single request + // since they are billed as an individual operation. (GCP) + InsertObject *int64 `json:"InsertObject,omitempty"` + // ListBlobs Number of List Blobs requests (Azure) + ListBlobs *int64 `json:"ListBlobs,omitempty"` + // ListObjects Number of list objects requests (GCP, S3) + ListObjects *int64 `json:"ListObjects,omitempty"` + // PutBlob Number of Put Blob requests (Azure) + PutBlob *int64 `json:"PutBlob,omitempty"` + // PutBlock Number of Put Block (Azure) + PutBlock *int64 `json:"PutBlock,omitempty"` + // PutBlockList Number of Put Block List requests + PutBlockList *int64 `json:"PutBlockList,omitempty"` + // PutMultipartObject Number of Multipart requests, including CreateMultipartUpload, UploadPart and + // CompleteMultipartUpload requests (S3) + PutMultipartObject *int64 `json:"PutMultipartObject,omitempty"` + // PutObject Number of PutObject requests (S3) + PutObject *int64 `json:"PutObject,omitempty"` +} + +func (s *RequestCounts) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "GetBlob": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "GetBlob", err) + } + s.GetBlob = &value + case float64: + f := int64(v) + s.GetBlob = &f + } + + case "GetBlobProperties": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "GetBlobProperties", err) + } + s.GetBlobProperties = &value + case float64: + f := int64(v) + s.GetBlobProperties = &f + } + + case "GetObject": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "GetObject", err) + } + s.GetObject = &value + case float64: + f := int64(v) + s.GetObject = &f + } + + case "InsertObject": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InsertObject", err) + } + s.InsertObject = &value + case float64: + f := int64(v) + s.InsertObject = &f + } + + case "ListBlobs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ListBlobs", err) + } + s.ListBlobs = &value + case float64: + f := int64(v) + s.ListBlobs = &f + } + + case "ListObjects": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ListObjects", err) + } + s.ListObjects = &value + case float64: + f := int64(v) + s.ListObjects = &f + } + + case "PutBlob": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PutBlob", err) + } + s.PutBlob = &value + case float64: + f := int64(v) + s.PutBlob = &f + } + + case "PutBlock": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PutBlock", err) + } + s.PutBlock = &value + case float64: + f := int64(v) + s.PutBlock = &f + } + + case "PutBlockList": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PutBlockList", err) + } + s.PutBlockList = &value + case float64: + f := int64(v) + s.PutBlockList = &f + } + + case "PutMultipartObject": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PutMultipartObject", err) + } + s.PutMultipartObject = &value + case float64: + f := int64(v) + s.PutMultipartObject = &f + } + + case "PutObject": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PutObject", err) + } + s.PutObject = &value + case float64: + f := int64(v) + s.PutObject = &f + } + + } + } + return nil +} + +// NewRequestCounts returns a RequestCounts. +func NewRequestCounts() *RequestCounts { + r := &RequestCounts{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/requestitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/requestitem.go new file mode 100644 index 000000000..bd317e4f0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/requestitem.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// RequestItem holds the union for the following types: +// +// MultisearchHeader +// TemplateConfig +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/msearch_template/types.ts#L26-L27 +type RequestItem any + +type RequestItemVariant interface { + RequestItemCaster() *RequestItem +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reroutedecision.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reroutedecision.go new file mode 100644 index 000000000..b97312f40 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reroutedecision.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RerouteDecision type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/reroute/types.ts#L86-L90 +type RerouteDecision struct { + Decider string `json:"decider"` + Decision string `json:"decision"` + Explanation string `json:"explanation"` +} + +func (s *RerouteDecision) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decider": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Decider", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Decider = o + + case "decision": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Decision", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Decision = o + + case "explanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Explanation", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Explanation = o + + } + } + return nil +} + +// NewRerouteDecision returns a RerouteDecision. +func NewRerouteDecision() *RerouteDecision { + r := &RerouteDecision{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rerouteexplanation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rerouteexplanation.go new file mode 100644 index 000000000..6f4cdbcf4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rerouteexplanation.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RerouteExplanation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/reroute/types.ts#L92-L96 +type RerouteExplanation struct { + Command string `json:"command"` + Decisions []RerouteDecision `json:"decisions"` + Parameters RerouteParameters `json:"parameters"` +} + +func (s *RerouteExplanation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "command": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Command", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Command = o + + case "decisions": + if err := dec.Decode(&s.Decisions); err != nil { + return fmt.Errorf("%s | %w", "Decisions", err) + } + + case "parameters": + if err := dec.Decode(&s.Parameters); err != nil { + return fmt.Errorf("%s | %w", "Parameters", err) + } + + } + } + return nil +} + +// NewRerouteExplanation returns a RerouteExplanation. +func NewRerouteExplanation() *RerouteExplanation { + r := &RerouteExplanation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rerouteparameters.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rerouteparameters.go new file mode 100644 index 000000000..417a4ad67 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rerouteparameters.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RerouteParameters type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/reroute/types.ts#L98-L105 +type RerouteParameters struct { + AllowPrimary bool `json:"allow_primary"` + FromNode *string `json:"from_node,omitempty"` + Index string `json:"index"` + Node string `json:"node"` + Shard int `json:"shard"` + ToNode *string `json:"to_node,omitempty"` +} + +func (s *RerouteParameters) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_primary": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowPrimary", err) + } + s.AllowPrimary = value + case bool: + s.AllowPrimary = v + } + + case "from_node": + if err := dec.Decode(&s.FromNode); err != nil { + return fmt.Errorf("%s | %w", "FromNode", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "shard": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Shard", err) + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + case "to_node": + if err := dec.Decode(&s.ToNode); err != nil { + return fmt.Errorf("%s | %w", "ToNode", err) + } + + } + } + return nil +} + +// NewRerouteParameters returns a RerouteParameters. +func NewRerouteParameters() *RerouteParameters { + r := &RerouteParameters{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rerouteprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rerouteprocessor.go new file mode 100644 index 000000000..51a158d88 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rerouteprocessor.go @@ -0,0 +1,206 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RerouteProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1393-L1421 +type RerouteProcessor struct { + // Dataset Field references or a static value for the dataset part of the data stream + // name. + // In addition to the criteria for index names, cannot contain - and must be no + // longer than 100 characters. + // Example values are nginx.access and nginx.error. + // + // Supports field references with a mustache-like syntax (denoted as {{double}} + // or {{{triple}}} curly braces). + // When resolving field references, the processor replaces invalid characters + // with _. Uses the part + // of the index name as a fallback if all field references resolve to a null, + // missing, or non-string value. + // + // default {{data_stream.dataset}} + Dataset []string `json:"dataset,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Destination A static value for the target. Can’t be set when the dataset or namespace + // option is set. + Destination *string `json:"destination,omitempty"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // Namespace Field references or a static value for the namespace part of the data stream + // name. See the criteria for + // index names for allowed characters. Must be no longer than 100 characters. + // + // Supports field references with a mustache-like syntax (denoted as {{double}} + // or {{{triple}}} curly braces). + // When resolving field references, the processor replaces invalid characters + // with _. Uses the part + // of the index name as a fallback if all field references resolve to a null, + // missing, or non-string value. + // + // default {{data_stream.namespace}} + Namespace []string `json:"namespace,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *RerouteProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dataset": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Dataset", err) + } + + s.Dataset = append(s.Dataset, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Dataset); err != nil { + return fmt.Errorf("%s | %w", "Dataset", err) + } + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "destination": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Destination", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Destination = &o + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "namespace": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Namespace", err) + } + + s.Namespace = append(s.Namespace, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Namespace); err != nil { + return fmt.Errorf("%s | %w", "Namespace", err) + } + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil +} + +// NewRerouteProcessor returns a RerouteProcessor. +func NewRerouteProcessor() *RerouteProcessor { + r := &RerouteProcessor{} + + return r +} + +type RerouteProcessorVariant interface { + RerouteProcessorCaster() *RerouteProcessor +} + +func (s *RerouteProcessor) RerouteProcessorCaster() *RerouteProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rescore.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rescore.go new file mode 100644 index 000000000..dee6b6998 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rescore.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Rescore type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/rescoring.ts#L25-L38 +type Rescore struct { + AdditionalRescoreProperty map[string]json.RawMessage `json:"-"` + LearningToRank *LearningToRank `json:"learning_to_rank,omitempty"` + Query *RescoreQuery `json:"query,omitempty"` + WindowSize *int `json:"window_size,omitempty"` +} + +func (s *Rescore) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "learning_to_rank": + if err := dec.Decode(&s.LearningToRank); err != nil { + return fmt.Errorf("%s | %w", "LearningToRank", err) + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "window_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "WindowSize", err) + } + s.WindowSize = &value + case float64: + f := int(v) + s.WindowSize = &f + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalRescoreProperty == nil { + s.AdditionalRescoreProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalRescoreProperty", err) + } + s.AdditionalRescoreProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s Rescore) MarshalJSON() ([]byte, error) { + type opt Rescore + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRescoreProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRescoreProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewRescore returns a Rescore. +func NewRescore() *Rescore { + r := &Rescore{ + AdditionalRescoreProperty: make(map[string]json.RawMessage), + } + + return r +} + +type RescoreVariant interface { + RescoreCaster() *Rescore +} + +func (s *Rescore) RescoreCaster() *Rescore { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rescorequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rescorequery.go new file mode 100644 index 000000000..030f8ccd5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rescorequery.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scoremode" +) + +// RescoreQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/rescoring.ts#L40-L62 +type RescoreQuery struct { + // Query The query to use for rescoring. + // This query is only run on the Top-K results returned by the `query` and + // `post_filter` phases. + Query Query `json:"rescore_query"` + // QueryWeight Relative importance of the original query versus the rescore query. + QueryWeight *Float64 `json:"query_weight,omitempty"` + // RescoreQueryWeight Relative importance of the rescore query versus the original query. + RescoreQueryWeight *Float64 `json:"rescore_query_weight,omitempty"` + // ScoreMode Determines how scores are combined. + ScoreMode *scoremode.ScoreMode `json:"score_mode,omitempty"` +} + +func (s *RescoreQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "rescore_query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "query_weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "QueryWeight", err) + } + f := Float64(value) + s.QueryWeight = &f + case float64: + f := Float64(v) + s.QueryWeight = &f + } + + case "rescore_query_weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RescoreQueryWeight", err) + } + f := Float64(value) + s.RescoreQueryWeight = &f + case float64: + f := Float64(v) + s.RescoreQueryWeight = &f + } + + case "score_mode": + if err := dec.Decode(&s.ScoreMode); err != nil { + return fmt.Errorf("%s | %w", "ScoreMode", err) + } + + } + } + return nil +} + +// NewRescoreQuery returns a RescoreQuery. +func NewRescoreQuery() *RescoreQuery { + r := &RescoreQuery{} + + return r +} + +type RescoreQueryVariant interface { + RescoreQueryCaster() *RescoreQuery +} + +func (s *RescoreQuery) RescoreQueryCaster() *RescoreQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rescorerretriever.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rescorerretriever.go new file mode 100644 index 000000000..9d9acb3cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rescorerretriever.go @@ -0,0 +1,146 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RescorerRetriever type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Retriever.ts#L62-L66 +type RescorerRetriever struct { + // Filter Query to filter the documents that can match. + Filter []Query `json:"filter,omitempty"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are not + // included in the top documents. + MinScore *float32 `json:"min_score,omitempty"` + // Name_ Retriever name. + Name_ *string `json:"_name,omitempty"` + Rescore []Rescore `json:"rescore"` + // Retriever Inner retriever. + Retriever RetrieverContainer `json:"retriever"` +} + +func (s *RescorerRetriever) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := float32(value) + s.MinScore = &f + case float64: + f := float32(v) + s.MinScore = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name_ = &o + + case "rescore": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewRescore() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Rescore", err) + } + + s.Rescore = append(s.Rescore, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Rescore); err != nil { + return fmt.Errorf("%s | %w", "Rescore", err) + } + } + + case "retriever": + if err := dec.Decode(&s.Retriever); err != nil { + return fmt.Errorf("%s | %w", "Retriever", err) + } + + } + } + return nil +} + +// NewRescorerRetriever returns a RescorerRetriever. +func NewRescorerRetriever() *RescorerRetriever { + r := &RescorerRetriever{} + + return r +} + +type RescorerRetrieverVariant interface { + RescorerRetrieverCaster() *RescorerRetriever +} + +func (s *RescorerRetriever) RescorerRetrieverCaster() *RescorerRetriever { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rescorevector.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rescorevector.go new file mode 100644 index 000000000..3432da68e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rescorevector.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RescoreVector type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Knn.ts#L30-L33 +type RescoreVector struct { + // Oversample Applies the specified oversample factor to k on the approximate kNN search + Oversample float32 `json:"oversample"` +} + +func (s *RescoreVector) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "oversample": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Oversample", err) + } + f := float32(value) + s.Oversample = f + case float64: + f := float32(v) + s.Oversample = f + } + + } + } + return nil +} + +// NewRescoreVector returns a RescoreVector. +func NewRescoreVector() *RescoreVector { + r := &RescoreVector{} + + return r +} + +type RescoreVectorVariant interface { + RescoreVectorCaster() *RescoreVector +} + +func (s *RescoreVector) RescoreVectorCaster() *RescoreVector { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reservedsize.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reservedsize.go new file mode 100644 index 000000000..ef55b2be1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reservedsize.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ReservedSize type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/allocation_explain/types.ts#L72-L77 +type ReservedSize struct { + NodeId string `json:"node_id"` + Path string `json:"path"` + Shards []string `json:"shards"` + Total int64 `json:"total"` +} + +func (s *ReservedSize) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = o + + case "shards": + if err := dec.Decode(&s.Shards); err != nil { + return fmt.Errorf("%s | %w", "Shards", err) + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + +// NewReservedSize returns a ReservedSize. +func NewReservedSize() *ReservedSize { + r := &ReservedSize{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resolveclusterinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resolveclusterinfo.go new file mode 100644 index 000000000..d0429a958 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resolveclusterinfo.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ResolveClusterInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/resolve_cluster/ResolveClusterResponse.ts#L29-L55 +type ResolveClusterInfo struct { + // Connected Whether the remote cluster is connected to the local (querying) cluster. + Connected bool `json:"connected"` + // Error Provides error messages that are likely to occur if you do a search with this + // index expression + // on the specified cluster (for example, lack of security privileges to query + // an index). + Error *string `json:"error,omitempty"` + // MatchingIndices Whether the index expression provided in the request matches any indices, + // aliases or data streams + // on the cluster. + MatchingIndices *bool `json:"matching_indices,omitempty"` + // SkipUnavailable The `skip_unavailable` setting for a remote cluster. + SkipUnavailable bool `json:"skip_unavailable"` + // Version Provides version information about the cluster. + Version *ElasticsearchVersionMinInfo `json:"version,omitempty"` +} + +func (s *ResolveClusterInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "connected": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Connected", err) + } + s.Connected = value + case bool: + s.Connected = v + } + + case "error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Error = &o + + case "matching_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MatchingIndices", err) + } + s.MatchingIndices = &value + case bool: + s.MatchingIndices = &v + } + + case "skip_unavailable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SkipUnavailable", err) + } + s.SkipUnavailable = value + case bool: + s.SkipUnavailable = v + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewResolveClusterInfo returns a ResolveClusterInfo. +func NewResolveClusterInfo() *ResolveClusterInfo { + r := &ResolveClusterInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resolveindexaliasitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resolveindexaliasitem.go new file mode 100644 index 000000000..6aaf1b6fa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resolveindexaliasitem.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ResolveIndexAliasItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/resolve_index/ResolveIndexResponse.ts#L37-L40 +type ResolveIndexAliasItem struct { + Indices []string `json:"indices"` + Name string `json:"name"` +} + +func (s *ResolveIndexAliasItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewResolveIndexAliasItem returns a ResolveIndexAliasItem. +func NewResolveIndexAliasItem() *ResolveIndexAliasItem { + r := &ResolveIndexAliasItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resolveindexdatastreamsitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resolveindexdatastreamsitem.go new file mode 100644 index 000000000..478f7afa9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resolveindexdatastreamsitem.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ResolveIndexDataStreamsItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/resolve_index/ResolveIndexResponse.ts#L42-L46 +type ResolveIndexDataStreamsItem struct { + BackingIndices []string `json:"backing_indices"` + Name string `json:"name"` + TimestampField string `json:"timestamp_field"` +} + +func (s *ResolveIndexDataStreamsItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "backing_indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "BackingIndices", err) + } + + s.BackingIndices = append(s.BackingIndices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.BackingIndices); err != nil { + return fmt.Errorf("%s | %w", "BackingIndices", err) + } + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "timestamp_field": + if err := dec.Decode(&s.TimestampField); err != nil { + return fmt.Errorf("%s | %w", "TimestampField", err) + } + + } + } + return nil +} + +// NewResolveIndexDataStreamsItem returns a ResolveIndexDataStreamsItem. +func NewResolveIndexDataStreamsItem() *ResolveIndexDataStreamsItem { + r := &ResolveIndexDataStreamsItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resolveindexitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resolveindexitem.go new file mode 100644 index 000000000..7a08e0de7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resolveindexitem.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ResolveIndexItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/resolve_index/ResolveIndexResponse.ts#L30-L35 +type ResolveIndexItem struct { + Aliases []string `json:"aliases,omitempty"` + Attributes []string `json:"attributes"` + DataStream *string `json:"data_stream,omitempty"` + Name string `json:"name"` +} + +func (s *ResolveIndexItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aliases": + if err := dec.Decode(&s.Aliases); err != nil { + return fmt.Errorf("%s | %w", "Aliases", err) + } + + case "attributes": + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "data_stream": + if err := dec.Decode(&s.DataStream); err != nil { + return fmt.Errorf("%s | %w", "DataStream", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewResolveIndexItem returns a ResolveIndexItem. +func NewResolveIndexItem() *ResolveIndexItem { + r := &ResolveIndexItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resourceprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resourceprivileges.go new file mode 100644 index 000000000..03533aced --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/resourceprivileges.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ResourcePrivileges type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/has_privileges/types.ts#L48-L48 +type ResourcePrivileges map[string]Privileges diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/responseitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/responseitem.go new file mode 100644 index 000000000..0b824fb73 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/responseitem.go @@ -0,0 +1,206 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/failurestorestatus" +) + +// ResponseItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/bulk/types.ts#L37-L84 +type ResponseItem struct { + // Error Additional information about the failed operation. + // The property is returned only for failed operations. + Error *ErrorCause `json:"error,omitempty"` + FailureStore *failurestorestatus.FailureStoreStatus `json:"failure_store,omitempty"` + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + Get *InlineGetDictUserDefined `json:"get,omitempty"` + // Id_ The document ID associated with the operation. + Id_ *string `json:"_id,omitempty"` + // Index_ The name of the index associated with the operation. + // If the operation targeted a data stream, this is the backing index into which + // the document was written. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the operation. + // This property is returned only for successful operations. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result The result of the operation. + // Successful values are `created`, `deleted`, and `updated`. + Result *string `json:"result,omitempty"` + // SeqNo_ The sequence number assigned to the document for the operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Shard information for the operation. + Shards_ *ShardStatistics `json:"_shards,omitempty"` + // Status The HTTP status code returned for the operation. + Status int `json:"status"` + // Version_ The document version associated with the operation. + // The document version is incremented each time the document is updated. + // This property is returned only for successful actions. + Version_ *int64 `json:"_version,omitempty"` +} + +func (s *ResponseItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + + case "failure_store": + if err := dec.Decode(&s.FailureStore); err != nil { + return fmt.Errorf("%s | %w", "FailureStore", err) + } + + case "forced_refresh": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ForcedRefresh", err) + } + s.ForcedRefresh = &value + case bool: + s.ForcedRefresh = &v + } + + case "get": + if err := dec.Decode(&s.Get); err != nil { + return fmt.Errorf("%s | %w", "Get", err) + } + + case "_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id_ = &o + + case "_index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Index_ = o + + case "_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryTerm_", err) + } + s.PrimaryTerm_ = &value + case float64: + f := int64(v) + s.PrimaryTerm_ = &f + } + + case "result": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Result", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Result = &o + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return fmt.Errorf("%s | %w", "SeqNo_", err) + } + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return fmt.Errorf("%s | %w", "Shards_", err) + } + + case "status": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + s.Status = value + case float64: + f := int(v) + s.Status = f + } + + case "_version": + if err := dec.Decode(&s.Version_); err != nil { + return fmt.Errorf("%s | %w", "Version_", err) + } + + } + } + return nil +} + +// NewResponseItem returns a ResponseItem. +func NewResponseItem() *ResponseItem { + r := &ResponseItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/restriction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/restriction.go new file mode 100644 index 000000000..ed2e604fe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/restriction.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/restrictionworkflow" +) + +// Restriction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/RoleDescriptor.ts#L135-L141 +type Restriction struct { + // Workflows A list of workflows to which the API key is restricted. + // NOTE: In order to use a role restriction, an API key must be created with a + // single role descriptor. + Workflows []restrictionworkflow.RestrictionWorkflow `json:"workflows"` +} + +// NewRestriction returns a Restriction. +func NewRestriction() *Restriction { + r := &Restriction{} + + return r +} + +type RestrictionVariant interface { + RestrictionCaster() *Restriction +} + +func (s *Restriction) RestrictionCaster() *Restriction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retention.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retention.go new file mode 100644 index 000000000..87baad5b3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retention.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Retention type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/_types/SnapshotLifecycle.ts#L94-L107 +type Retention struct { + // ExpireAfter Time period after which a snapshot is considered expired and eligible for + // deletion. SLM deletes expired snapshots based on the slm.retention_schedule. + ExpireAfter Duration `json:"expire_after"` + // MaxCount Maximum number of snapshots to retain, even if the snapshots have not yet + // expired. If the number of snapshots in the repository exceeds this limit, the + // policy retains the most recent snapshots and deletes older snapshots. + MaxCount int `json:"max_count"` + // MinCount Minimum number of snapshots to retain, even if the snapshots have expired. + MinCount int `json:"min_count"` +} + +func (s *Retention) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expire_after": + if err := dec.Decode(&s.ExpireAfter); err != nil { + return fmt.Errorf("%s | %w", "ExpireAfter", err) + } + + case "max_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxCount", err) + } + s.MaxCount = value + case float64: + f := int(v) + s.MaxCount = f + } + + case "min_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinCount", err) + } + s.MinCount = value + case float64: + f := int(v) + s.MinCount = f + } + + } + } + return nil +} + +// NewRetention returns a Retention. +func NewRetention() *Retention { + r := &Retention{} + + return r +} + +type RetentionVariant interface { + RetentionCaster() *Retention +} + +func (s *Retention) RetentionCaster() *Retention { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retentionlease.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retentionlease.go new file mode 100644 index 000000000..fe7e6a8c7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retentionlease.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// RetentionLease type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L66-L68 +type RetentionLease struct { + Period Duration `json:"period"` +} + +func (s *RetentionLease) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "period": + if err := dec.Decode(&s.Period); err != nil { + return fmt.Errorf("%s | %w", "Period", err) + } + + } + } + return nil +} + +// NewRetentionLease returns a RetentionLease. +func NewRetentionLease() *RetentionLease { + r := &RetentionLease{} + + return r +} + +type RetentionLeaseVariant interface { + RetentionLeaseCaster() *RetentionLease +} + +func (s *RetentionLease) RetentionLeaseCaster() *RetentionLease { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retentionpolicy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retentionpolicy.go new file mode 100644 index 000000000..ad925a476 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retentionpolicy.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// RetentionPolicy type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/_types/Transform.ts#L88-L96 +type RetentionPolicy struct { + // Field The date field that is used to calculate the age of the document. + Field string `json:"field"` + // MaxAge Specifies the maximum age of a document in the destination index. Documents + // that are older than the configured + // value are removed from the destination index. + MaxAge Duration `json:"max_age"` +} + +func (s *RetentionPolicy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "max_age": + if err := dec.Decode(&s.MaxAge); err != nil { + return fmt.Errorf("%s | %w", "MaxAge", err) + } + + } + } + return nil +} + +// NewRetentionPolicy returns a RetentionPolicy. +func NewRetentionPolicy() *RetentionPolicy { + r := &RetentionPolicy{} + + return r +} + +type RetentionPolicyVariant interface { + RetentionPolicyCaster() *RetentionPolicy +} + +func (s *RetentionPolicy) RetentionPolicyCaster() *RetentionPolicy { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retentionpolicycontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retentionpolicycontainer.go new file mode 100644 index 000000000..becee5417 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retentionpolicycontainer.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// RetentionPolicyContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/_types/Transform.ts#L80-L86 +type RetentionPolicyContainer struct { + AdditionalRetentionPolicyContainerProperty map[string]json.RawMessage `json:"-"` + // Time Specifies that the transform uses a time field to set the retention policy. + Time *RetentionPolicy `json:"time,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s RetentionPolicyContainer) MarshalJSON() ([]byte, error) { + type opt RetentionPolicyContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRetentionPolicyContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRetentionPolicyContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewRetentionPolicyContainer returns a RetentionPolicyContainer. +func NewRetentionPolicyContainer() *RetentionPolicyContainer { + r := &RetentionPolicyContainer{ + AdditionalRetentionPolicyContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type RetentionPolicyContainerVariant interface { + RetentionPolicyContainerCaster() *RetentionPolicyContainer +} + +func (s *RetentionPolicyContainer) RetentionPolicyContainerCaster() *RetentionPolicyContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retries.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retries.go new file mode 100644 index 000000000..8355c4b01 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retries.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Retries type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Retries.ts#L22-L31 +type Retries struct { + // Bulk The number of bulk actions retried. + Bulk int64 `json:"bulk"` + // Search The number of search actions retried. + Search int64 `json:"search"` +} + +func (s *Retries) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bulk": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Bulk", err) + } + s.Bulk = value + case float64: + f := int64(v) + s.Bulk = f + } + + case "search": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Search", err) + } + s.Search = value + case float64: + f := int64(v) + s.Search = f + } + + } + } + return nil +} + +// NewRetries returns a Retries. +func NewRetries() *Retries { + r := &Retries{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retrievercontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retrievercontainer.go new file mode 100644 index 000000000..87af7846d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/retrievercontainer.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// RetrieverContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Retriever.ts#L28-L51 +type RetrieverContainer struct { + AdditionalRetrieverContainerProperty map[string]json.RawMessage `json:"-"` + // Knn A retriever that replaces the functionality of a knn search. + Knn *KnnRetriever `json:"knn,omitempty"` + // Linear A retriever that supports the combination of different retrievers through a + // weighted linear combination. + Linear *LinearRetriever `json:"linear,omitempty"` + // Pinned A pinned retriever applies pinned documents to the underlying retriever. + // This retriever will rewrite to a PinnedQueryBuilder. + Pinned *PinnedRetriever `json:"pinned,omitempty"` + // Rescorer A retriever that re-scores only the results produced by its child retriever. + Rescorer *RescorerRetriever `json:"rescorer,omitempty"` + // Rrf A retriever that produces top documents from reciprocal rank fusion (RRF). + Rrf *RRFRetriever `json:"rrf,omitempty"` + // Rule A retriever that replaces the functionality of a rule query. + Rule *RuleRetriever `json:"rule,omitempty"` + // Standard A retriever that replaces the functionality of a traditional query. + Standard *StandardRetriever `json:"standard,omitempty"` + // TextSimilarityReranker A retriever that reranks the top documents based on a reranking model using + // the InferenceAPI + TextSimilarityReranker *TextSimilarityReranker `json:"text_similarity_reranker,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s RetrieverContainer) MarshalJSON() ([]byte, error) { + type opt RetrieverContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRetrieverContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRetrieverContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewRetrieverContainer returns a RetrieverContainer. +func NewRetrieverContainer() *RetrieverContainer { + r := &RetrieverContainer{ + AdditionalRetrieverContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type RetrieverContainerVariant interface { + RetrieverContainerCaster() *RetrieverContainer +} + +func (s *RetrieverContainer) RetrieverContainerCaster() *RetrieverContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reversenestedaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reversenestedaggregate.go new file mode 100644 index 000000000..d72eabe3b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reversenestedaggregate.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// ReverseNestedAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L540-L544 +type ReverseNestedAggregate struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *ReverseNestedAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s ReverseNestedAggregate) MarshalJSON() ([]byte, error) { + type opt ReverseNestedAggregate + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewReverseNestedAggregate returns a ReverseNestedAggregate. +func NewReverseNestedAggregate() *ReverseNestedAggregate { + r := &ReverseNestedAggregate{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reversenestedaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reversenestedaggregation.go new file mode 100644 index 000000000..8f7c41088 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reversenestedaggregation.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ReverseNestedAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L741-L747 +type ReverseNestedAggregation struct { + // Path Defines the nested object field that should be joined back to. + // The default is empty, which means that it joins back to the root/main + // document level. + Path *string `json:"path,omitempty"` +} + +func (s *ReverseNestedAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + + } + } + return nil +} + +// NewReverseNestedAggregation returns a ReverseNestedAggregation. +func NewReverseNestedAggregation() *ReverseNestedAggregation { + r := &ReverseNestedAggregation{} + + return r +} + +type ReverseNestedAggregationVariant interface { + ReverseNestedAggregationCaster() *ReverseNestedAggregation +} + +func (s *ReverseNestedAggregation) ReverseNestedAggregationCaster() *ReverseNestedAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reversetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reversetokenfilter.go new file mode 100644 index 000000000..00c69fd1f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/reversetokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ReverseTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L407-L409 +type ReverseTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ReverseTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ReverseTokenFilter) MarshalJSON() ([]byte, error) { + type innerReverseTokenFilter ReverseTokenFilter + tmp := innerReverseTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "reverse" + + return json.Marshal(tmp) +} + +// NewReverseTokenFilter returns a ReverseTokenFilter. +func NewReverseTokenFilter() *ReverseTokenFilter { + r := &ReverseTokenFilter{} + + return r +} + +type ReverseTokenFilterVariant interface { + ReverseTokenFilterCaster() *ReverseTokenFilter +} + +func (s *ReverseTokenFilter) ReverseTokenFilterCaster() *ReverseTokenFilter { + return s +} + +func (s *ReverseTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/role.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/role.go new file mode 100644 index 000000000..9cc48cdcb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/role.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterprivilege" +) + +// Role type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_role/types.ts#L32-L54 +type Role struct { + Applications []ApplicationPrivileges `json:"applications"` + Cluster []clusterprivilege.ClusterPrivilege `json:"cluster"` + Description *string `json:"description,omitempty"` + Global map[string]map[string]map[string][]string `json:"global,omitempty"` + Indices []IndicesPrivileges `json:"indices"` + Metadata Metadata `json:"metadata"` + RemoteCluster []RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + RemoteIndices []RemoteIndicesPrivileges `json:"remote_indices,omitempty"` + RoleTemplates []RoleTemplate `json:"role_templates,omitempty"` + RunAs []string `json:"run_as,omitempty"` + TransientMetadata map[string]json.RawMessage `json:"transient_metadata,omitempty"` +} + +func (s *Role) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "applications": + if err := dec.Decode(&s.Applications); err != nil { + return fmt.Errorf("%s | %w", "Applications", err) + } + + case "cluster": + if err := dec.Decode(&s.Cluster); err != nil { + return fmt.Errorf("%s | %w", "Cluster", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "global": + if s.Global == nil { + s.Global = make(map[string]map[string]map[string][]string, 0) + } + if err := dec.Decode(&s.Global); err != nil { + return fmt.Errorf("%s | %w", "Global", err) + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "remote_indices": + if err := dec.Decode(&s.RemoteIndices); err != nil { + return fmt.Errorf("%s | %w", "RemoteIndices", err) + } + + case "role_templates": + if err := dec.Decode(&s.RoleTemplates); err != nil { + return fmt.Errorf("%s | %w", "RoleTemplates", err) + } + + case "run_as": + if err := dec.Decode(&s.RunAs); err != nil { + return fmt.Errorf("%s | %w", "RunAs", err) + } + + case "transient_metadata": + if s.TransientMetadata == nil { + s.TransientMetadata = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.TransientMetadata); err != nil { + return fmt.Errorf("%s | %w", "TransientMetadata", err) + } + + } + } + return nil +} + +// NewRole returns a Role. +func NewRole() *Role { + r := &Role{ + Global: make(map[string]map[string]map[string][]string), + TransientMetadata: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roledescriptor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roledescriptor.go new file mode 100644 index 000000000..c7bbfb1dd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roledescriptor.go @@ -0,0 +1,180 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterprivilege" +) + +// RoleDescriptor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/RoleDescriptor.ts#L33-L83 +type RoleDescriptor struct { + // Applications A list of application privilege entries + Applications []ApplicationPrivileges `json:"applications,omitempty"` + // Cluster A list of cluster privileges. These privileges define the cluster level + // actions that API keys are able to execute. + Cluster []clusterprivilege.ClusterPrivilege `json:"cluster,omitempty"` + // Description Optional description of the role descriptor + Description *string `json:"description,omitempty"` + // Global An object defining global privileges. A global privilege is a form of cluster + // privilege that is request-aware. Support for global privileges is currently + // limited to the management of application privileges. + Global []GlobalPrivilege `json:"global,omitempty"` + // Indices A list of indices permissions entries. + Indices []IndicesPrivileges `json:"indices,omitempty"` + // Metadata Optional meta-data. Within the metadata object, keys that begin with `_` are + // reserved for system usage. + Metadata Metadata `json:"metadata,omitempty"` + // RemoteCluster A list of cluster permissions for remote clusters. + // NOTE: This is limited a subset of the cluster permissions. + RemoteCluster []RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + // RemoteIndices A list of indices permissions for remote clusters. + RemoteIndices []RemoteIndicesPrivileges `json:"remote_indices,omitempty"` + // Restriction Restriction for when the role descriptor is allowed to be effective. + Restriction *Restriction `json:"restriction,omitempty"` + // RunAs A list of users that the API keys can impersonate. + // NOTE: In Elastic Cloud Serverless, the run-as feature is disabled. + // For API compatibility, you can still specify an empty `run_as` field, but a + // non-empty list will be rejected. + RunAs []string `json:"run_as,omitempty"` + TransientMetadata map[string]json.RawMessage `json:"transient_metadata,omitempty"` +} + +func (s *RoleDescriptor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "applications": + if err := dec.Decode(&s.Applications); err != nil { + return fmt.Errorf("%s | %w", "Applications", err) + } + + case "cluster": + if err := dec.Decode(&s.Cluster); err != nil { + return fmt.Errorf("%s | %w", "Cluster", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "global": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewGlobalPrivilege() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Global", err) + } + + s.Global = append(s.Global, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Global); err != nil { + return fmt.Errorf("%s | %w", "Global", err) + } + } + + case "indices", "index": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "remote_indices": + if err := dec.Decode(&s.RemoteIndices); err != nil { + return fmt.Errorf("%s | %w", "RemoteIndices", err) + } + + case "restriction": + if err := dec.Decode(&s.Restriction); err != nil { + return fmt.Errorf("%s | %w", "Restriction", err) + } + + case "run_as": + if err := dec.Decode(&s.RunAs); err != nil { + return fmt.Errorf("%s | %w", "RunAs", err) + } + + case "transient_metadata": + if s.TransientMetadata == nil { + s.TransientMetadata = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.TransientMetadata); err != nil { + return fmt.Errorf("%s | %w", "TransientMetadata", err) + } + + } + } + return nil +} + +// NewRoleDescriptor returns a RoleDescriptor. +func NewRoleDescriptor() *RoleDescriptor { + r := &RoleDescriptor{ + TransientMetadata: make(map[string]json.RawMessage), + } + + return r +} + +type RoleDescriptorVariant interface { + RoleDescriptorCaster() *RoleDescriptor +} + +func (s *RoleDescriptor) RoleDescriptorCaster() *RoleDescriptor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roledescriptorread.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roledescriptorread.go new file mode 100644 index 000000000..119144622 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roledescriptorread.go @@ -0,0 +1,169 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterprivilege" +) + +// RoleDescriptorRead type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/RoleDescriptor.ts#L85-L133 +type RoleDescriptorRead struct { + // Applications A list of application privilege entries + Applications []ApplicationPrivileges `json:"applications,omitempty"` + // Cluster A list of cluster privileges. These privileges define the cluster level + // actions that API keys are able to execute. + Cluster []clusterprivilege.ClusterPrivilege `json:"cluster"` + // Description An optional description of the role descriptor. + Description *string `json:"description,omitempty"` + // Global An object defining global privileges. A global privilege is a form of cluster + // privilege that is request-aware. Support for global privileges is currently + // limited to the management of application privileges. + Global []GlobalPrivilege `json:"global,omitempty"` + // Indices A list of indices permissions entries. + Indices []IndicesPrivileges `json:"indices"` + // Metadata Optional meta-data. Within the metadata object, keys that begin with `_` are + // reserved for system usage. + Metadata Metadata `json:"metadata,omitempty"` + // RemoteCluster A list of cluster permissions for remote clusters. + // NOTE: This is limited a subset of the cluster permissions. + RemoteCluster []RemoteClusterPrivileges `json:"remote_cluster,omitempty"` + // RemoteIndices A list of indices permissions for remote clusters. + RemoteIndices []RemoteIndicesPrivileges `json:"remote_indices,omitempty"` + // Restriction A restriction for when the role descriptor is allowed to be effective. + Restriction *Restriction `json:"restriction,omitempty"` + // RunAs A list of users that the API keys can impersonate. + RunAs []string `json:"run_as,omitempty"` + TransientMetadata map[string]json.RawMessage `json:"transient_metadata,omitempty"` +} + +func (s *RoleDescriptorRead) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "applications": + if err := dec.Decode(&s.Applications); err != nil { + return fmt.Errorf("%s | %w", "Applications", err) + } + + case "cluster": + if err := dec.Decode(&s.Cluster); err != nil { + return fmt.Errorf("%s | %w", "Cluster", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "global": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewGlobalPrivilege() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Global", err) + } + + s.Global = append(s.Global, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Global); err != nil { + return fmt.Errorf("%s | %w", "Global", err) + } + } + + case "indices", "index": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return fmt.Errorf("%s | %w", "RemoteCluster", err) + } + + case "remote_indices": + if err := dec.Decode(&s.RemoteIndices); err != nil { + return fmt.Errorf("%s | %w", "RemoteIndices", err) + } + + case "restriction": + if err := dec.Decode(&s.Restriction); err != nil { + return fmt.Errorf("%s | %w", "Restriction", err) + } + + case "run_as": + if err := dec.Decode(&s.RunAs); err != nil { + return fmt.Errorf("%s | %w", "RunAs", err) + } + + case "transient_metadata": + if s.TransientMetadata == nil { + s.TransientMetadata = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.TransientMetadata); err != nil { + return fmt.Errorf("%s | %w", "TransientMetadata", err) + } + + } + } + return nil +} + +// NewRoleDescriptorRead returns a RoleDescriptorRead. +func NewRoleDescriptorRead() *RoleDescriptorRead { + r := &RoleDescriptorRead{ + TransientMetadata: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roledescriptorwrapper.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roledescriptorwrapper.go new file mode 100644 index 000000000..bd41ad061 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roledescriptorwrapper.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// RoleDescriptorWrapper type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_service_accounts/types.ts#L22-L24 +type RoleDescriptorWrapper struct { + RoleDescriptor RoleDescriptorRead `json:"role_descriptor"` +} + +// NewRoleDescriptorWrapper returns a RoleDescriptorWrapper. +func NewRoleDescriptorWrapper() *RoleDescriptorWrapper { + r := &RoleDescriptorWrapper{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rolemappingrule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rolemappingrule.go new file mode 100644 index 000000000..86d08497d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rolemappingrule.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// RoleMappingRule type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/RoleMappingRule.ts#L23-L31 +type RoleMappingRule struct { + AdditionalRoleMappingRuleProperty map[string]json.RawMessage `json:"-"` + All []RoleMappingRule `json:"all,omitempty"` + Any []RoleMappingRule `json:"any,omitempty"` + Except *RoleMappingRule `json:"except,omitempty"` + Field map[string][]FieldValue `json:"field,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s RoleMappingRule) MarshalJSON() ([]byte, error) { + type opt RoleMappingRule + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRoleMappingRuleProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRoleMappingRuleProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewRoleMappingRule returns a RoleMappingRule. +func NewRoleMappingRule() *RoleMappingRule { + r := &RoleMappingRule{ + AdditionalRoleMappingRuleProperty: make(map[string]json.RawMessage), + Field: make(map[string][]FieldValue), + } + + return r +} + +type RoleMappingRuleVariant interface { + RoleMappingRuleCaster() *RoleMappingRule +} + +func (s *RoleMappingRule) RoleMappingRuleCaster() *RoleMappingRule { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rolequerycontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rolequerycontainer.go new file mode 100644 index 000000000..fc85f8161 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rolequerycontainer.go @@ -0,0 +1,230 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// RoleQueryContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/query_role/types.ts#L37-L101 +type RoleQueryContainer struct { + AdditionalRoleQueryContainerProperty map[string]json.RawMessage `json:"-"` + // Bool matches roles matching boolean combinations of other queries. + Bool *BoolQuery `json:"bool,omitempty"` + // Exists Returns roles that contain an indexed value for a field. + Exists *ExistsQuery `json:"exists,omitempty"` + // Ids Returns roles based on their IDs. + // This query uses role document IDs stored in the `_id` field. + Ids *IdsQuery `json:"ids,omitempty"` + // Match Returns roles that match a provided text, number, date or boolean value. + // The provided text is analyzed before matching. + Match map[string]MatchQuery `json:"match,omitempty"` + // MatchAll Matches all roles, giving them all a `_score` of 1.0. + MatchAll *MatchAllQuery `json:"match_all,omitempty"` + // Prefix Returns roles that contain a specific prefix in a provided field. + Prefix map[string]PrefixQuery `json:"prefix,omitempty"` + // Range Returns roles that contain terms within a provided range. + Range map[string]RangeQuery `json:"range,omitempty"` + // SimpleQueryString Returns roles based on a provided query string, using a parser with a limited + // but fault-tolerant syntax. + SimpleQueryString *SimpleQueryStringQuery `json:"simple_query_string,omitempty"` + // Term Returns roles that contain an exact term in a provided field. + // To return a document, the query term must exactly match the queried field's + // value, including whitespace and capitalization. + Term map[string]TermQuery `json:"term,omitempty"` + // Terms Returns roles that contain one or more exact terms in a provided field. + // To return a document, one or more terms must exactly match a field value, + // including whitespace and capitalization. + Terms *TermsQuery `json:"terms,omitempty"` + // Wildcard Returns roles that contain terms matching a wildcard pattern. + Wildcard map[string]WildcardQuery `json:"wildcard,omitempty"` +} + +func (s *RoleQueryContainer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bool": + if err := dec.Decode(&s.Bool); err != nil { + return fmt.Errorf("%s | %w", "Bool", err) + } + + case "exists": + if err := dec.Decode(&s.Exists); err != nil { + return fmt.Errorf("%s | %w", "Exists", err) + } + + case "ids": + if err := dec.Decode(&s.Ids); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + + case "match": + if s.Match == nil { + s.Match = make(map[string]MatchQuery, 0) + } + if err := dec.Decode(&s.Match); err != nil { + return fmt.Errorf("%s | %w", "Match", err) + } + + case "match_all": + if err := dec.Decode(&s.MatchAll); err != nil { + return fmt.Errorf("%s | %w", "MatchAll", err) + } + + case "prefix": + if s.Prefix == nil { + s.Prefix = make(map[string]PrefixQuery, 0) + } + if err := dec.Decode(&s.Prefix); err != nil { + return fmt.Errorf("%s | %w", "Prefix", err) + } + + case "range": + if s.Range == nil { + s.Range = make(map[string]RangeQuery, 0) + } + messages := make(map[string]json.RawMessage) + err := dec.Decode(&messages) + if err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + + untyped := NewUntypedRangeQuery() + for key, message := range messages { + err := json.Unmarshal(message, &untyped) + if err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + s.Range[key] = untyped + } + + case "simple_query_string": + if err := dec.Decode(&s.SimpleQueryString); err != nil { + return fmt.Errorf("%s | %w", "SimpleQueryString", err) + } + + case "term": + if s.Term == nil { + s.Term = make(map[string]TermQuery, 0) + } + if err := dec.Decode(&s.Term); err != nil { + return fmt.Errorf("%s | %w", "Term", err) + } + + case "terms": + if err := dec.Decode(&s.Terms); err != nil { + return fmt.Errorf("%s | %w", "Terms", err) + } + + case "wildcard": + if s.Wildcard == nil { + s.Wildcard = make(map[string]WildcardQuery, 0) + } + if err := dec.Decode(&s.Wildcard); err != nil { + return fmt.Errorf("%s | %w", "Wildcard", err) + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalRoleQueryContainerProperty == nil { + s.AdditionalRoleQueryContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalRoleQueryContainerProperty", err) + } + s.AdditionalRoleQueryContainerProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s RoleQueryContainer) MarshalJSON() ([]byte, error) { + type opt RoleQueryContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalRoleQueryContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalRoleQueryContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewRoleQueryContainer returns a RoleQueryContainer. +func NewRoleQueryContainer() *RoleQueryContainer { + r := &RoleQueryContainer{ + AdditionalRoleQueryContainerProperty: make(map[string]json.RawMessage), + Match: make(map[string]MatchQuery), + Prefix: make(map[string]PrefixQuery), + Range: make(map[string]RangeQuery), + Term: make(map[string]TermQuery), + Wildcard: make(map[string]WildcardQuery), + } + + return r +} + +type RoleQueryContainerVariant interface { + RoleQueryContainerCaster() *RoleQueryContainer +} + +func (s *RoleQueryContainer) RoleQueryContainerCaster() *RoleQueryContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roletemplate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roletemplate.go new file mode 100644 index 000000000..bcffe876c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roletemplate.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/templateformat" +) + +// RoleTemplate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/RoleTemplate.ts#L28-L31 +type RoleTemplate struct { + Format *templateformat.TemplateFormat `json:"format,omitempty"` + Template Script `json:"template"` +} + +// NewRoleTemplate returns a RoleTemplate. +func NewRoleTemplate() *RoleTemplate { + r := &RoleTemplate{} + + return r +} + +type RoleTemplateVariant interface { + RoleTemplateCaster() *RoleTemplate +} + +func (s *RoleTemplate) RoleTemplateCaster() *RoleTemplate { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roletemplateinlinequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roletemplateinlinequery.go new file mode 100644 index 000000000..18db80893 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roletemplateinlinequery.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// RoleTemplateInlineQuery holds the union for the following types: +// +// string +// Query +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L385-L386 +type RoleTemplateInlineQuery any + +type RoleTemplateInlineQueryVariant interface { + RoleTemplateInlineQueryCaster() *RoleTemplateInlineQuery +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roletemplatequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roletemplatequery.go new file mode 100644 index 000000000..59c0b386a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roletemplatequery.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// RoleTemplateQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L353-L363 +type RoleTemplateQuery struct { + // Template When you create a role, you can specify a query that defines the document + // level security permissions. You can optionally + // use Mustache templates in the role query to insert the username of the + // current authenticated user into the role. + // Like other places in Elasticsearch that support templating or scripting, you + // can specify inline, stored, or file-based + // templates and define custom parameters. You access the details for the + // current authenticated user through the _user parameter. + Template *RoleTemplateScript `json:"template,omitempty"` +} + +// NewRoleTemplateQuery returns a RoleTemplateQuery. +func NewRoleTemplateQuery() *RoleTemplateQuery { + r := &RoleTemplateQuery{} + + return r +} + +type RoleTemplateQueryVariant interface { + RoleTemplateQueryCaster() *RoleTemplateQuery +} + +func (s *RoleTemplateQuery) RoleTemplateQueryCaster() *RoleTemplateQuery { + return s +} + +func (s *RoleTemplateQuery) IndicesPrivilegesQueryCaster() *IndicesPrivilegesQuery { + o := IndicesPrivilegesQuery(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roletemplatescript.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roletemplatescript.go new file mode 100644 index 000000000..3895a626a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/roletemplatescript.go @@ -0,0 +1,151 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptlanguage" +) + +// RoleTemplateScript type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L365-L383 +type RoleTemplateScript struct { + // Id The `id` for a stored script. + Id *string `json:"id,omitempty"` + // Lang Specifies the language the script is written in. + Lang *scriptlanguage.ScriptLanguage `json:"lang,omitempty"` + Options map[string]string `json:"options,omitempty"` + // Params Specifies any named parameters that are passed into the script as variables. + // Use parameters instead of hard-coded values to decrease compile time. + Params map[string]json.RawMessage `json:"params,omitempty"` + Source RoleTemplateInlineQuery `json:"source,omitempty"` +} + +func (s *RoleTemplateScript) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Source) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "lang": + if err := dec.Decode(&s.Lang); err != nil { + return fmt.Errorf("%s | %w", "Lang", err) + } + + case "options": + if s.Options == nil { + s.Options = make(map[string]string, 0) + } + if err := dec.Decode(&s.Options); err != nil { + return fmt.Errorf("%s | %w", "Options", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source", err) + } + + switch t { + + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + o := NewQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + s.Source = o + break source_field + + } + } + if s.Source == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + } + + } + } + return nil +} + +// NewRoleTemplateScript returns a RoleTemplateScript. +func NewRoleTemplateScript() *RoleTemplateScript { + r := &RoleTemplateScript{ + Options: make(map[string]string), + Params: make(map[string]json.RawMessage), + } + + return r +} + +type RoleTemplateScriptVariant interface { + RoleTemplateScriptCaster() *RoleTemplateScript +} + +func (s *RoleTemplateScript) RoleTemplateScriptCaster() *RoleTemplateScript { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rolloveraction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rolloveraction.go new file mode 100644 index 000000000..ffcc610ad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rolloveraction.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RolloverAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/_types/Phase.ts#L99-L110 +type RolloverAction struct { + MaxAge Duration `json:"max_age,omitempty"` + MaxDocs *int64 `json:"max_docs,omitempty"` + MaxPrimaryShardDocs *int64 `json:"max_primary_shard_docs,omitempty"` + MaxPrimaryShardSize ByteSize `json:"max_primary_shard_size,omitempty"` + MaxSize ByteSize `json:"max_size,omitempty"` + MinAge Duration `json:"min_age,omitempty"` + MinDocs *int64 `json:"min_docs,omitempty"` + MinPrimaryShardDocs *int64 `json:"min_primary_shard_docs,omitempty"` + MinPrimaryShardSize ByteSize `json:"min_primary_shard_size,omitempty"` + MinSize ByteSize `json:"min_size,omitempty"` +} + +func (s *RolloverAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_age": + if err := dec.Decode(&s.MaxAge); err != nil { + return fmt.Errorf("%s | %w", "MaxAge", err) + } + + case "max_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxDocs", err) + } + s.MaxDocs = &value + case float64: + f := int64(v) + s.MaxDocs = &f + } + + case "max_primary_shard_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxPrimaryShardDocs", err) + } + s.MaxPrimaryShardDocs = &value + case float64: + f := int64(v) + s.MaxPrimaryShardDocs = &f + } + + case "max_primary_shard_size": + if err := dec.Decode(&s.MaxPrimaryShardSize); err != nil { + return fmt.Errorf("%s | %w", "MaxPrimaryShardSize", err) + } + + case "max_size": + if err := dec.Decode(&s.MaxSize); err != nil { + return fmt.Errorf("%s | %w", "MaxSize", err) + } + + case "min_age": + if err := dec.Decode(&s.MinAge); err != nil { + return fmt.Errorf("%s | %w", "MinAge", err) + } + + case "min_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocs", err) + } + s.MinDocs = &value + case float64: + f := int64(v) + s.MinDocs = &f + } + + case "min_primary_shard_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinPrimaryShardDocs", err) + } + s.MinPrimaryShardDocs = &value + case float64: + f := int64(v) + s.MinPrimaryShardDocs = &f + } + + case "min_primary_shard_size": + if err := dec.Decode(&s.MinPrimaryShardSize); err != nil { + return fmt.Errorf("%s | %w", "MinPrimaryShardSize", err) + } + + case "min_size": + if err := dec.Decode(&s.MinSize); err != nil { + return fmt.Errorf("%s | %w", "MinSize", err) + } + + } + } + return nil +} + +// NewRolloverAction returns a RolloverAction. +func NewRolloverAction() *RolloverAction { + r := &RolloverAction{} + + return r +} + +type RolloverActionVariant interface { + RolloverActionCaster() *RolloverAction +} + +func (s *RolloverAction) RolloverActionCaster() *RolloverAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rolloverconditions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rolloverconditions.go new file mode 100644 index 000000000..ea81b8174 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rolloverconditions.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RolloverConditions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/rollover/types.ts#L24-L40 +type RolloverConditions struct { + MaxAge Duration `json:"max_age,omitempty"` + MaxAgeMillis *int64 `json:"max_age_millis,omitempty"` + MaxDocs *int64 `json:"max_docs,omitempty"` + MaxPrimaryShardDocs *int64 `json:"max_primary_shard_docs,omitempty"` + MaxPrimaryShardSize ByteSize `json:"max_primary_shard_size,omitempty"` + MaxPrimaryShardSizeBytes *int64 `json:"max_primary_shard_size_bytes,omitempty"` + MaxSize ByteSize `json:"max_size,omitempty"` + MaxSizeBytes *int64 `json:"max_size_bytes,omitempty"` + MinAge Duration `json:"min_age,omitempty"` + MinDocs *int64 `json:"min_docs,omitempty"` + MinPrimaryShardDocs *int64 `json:"min_primary_shard_docs,omitempty"` + MinPrimaryShardSize ByteSize `json:"min_primary_shard_size,omitempty"` + MinPrimaryShardSizeBytes *int64 `json:"min_primary_shard_size_bytes,omitempty"` + MinSize ByteSize `json:"min_size,omitempty"` + MinSizeBytes *int64 `json:"min_size_bytes,omitempty"` +} + +func (s *RolloverConditions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_age": + if err := dec.Decode(&s.MaxAge); err != nil { + return fmt.Errorf("%s | %w", "MaxAge", err) + } + + case "max_age_millis": + if err := dec.Decode(&s.MaxAgeMillis); err != nil { + return fmt.Errorf("%s | %w", "MaxAgeMillis", err) + } + + case "max_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxDocs", err) + } + s.MaxDocs = &value + case float64: + f := int64(v) + s.MaxDocs = &f + } + + case "max_primary_shard_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxPrimaryShardDocs", err) + } + s.MaxPrimaryShardDocs = &value + case float64: + f := int64(v) + s.MaxPrimaryShardDocs = &f + } + + case "max_primary_shard_size": + if err := dec.Decode(&s.MaxPrimaryShardSize); err != nil { + return fmt.Errorf("%s | %w", "MaxPrimaryShardSize", err) + } + + case "max_primary_shard_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxPrimaryShardSizeBytes", err) + } + s.MaxPrimaryShardSizeBytes = &value + case float64: + f := int64(v) + s.MaxPrimaryShardSizeBytes = &f + } + + case "max_size": + if err := dec.Decode(&s.MaxSize); err != nil { + return fmt.Errorf("%s | %w", "MaxSize", err) + } + + case "max_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSizeBytes", err) + } + s.MaxSizeBytes = &value + case float64: + f := int64(v) + s.MaxSizeBytes = &f + } + + case "min_age": + if err := dec.Decode(&s.MinAge); err != nil { + return fmt.Errorf("%s | %w", "MinAge", err) + } + + case "min_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocs", err) + } + s.MinDocs = &value + case float64: + f := int64(v) + s.MinDocs = &f + } + + case "min_primary_shard_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinPrimaryShardDocs", err) + } + s.MinPrimaryShardDocs = &value + case float64: + f := int64(v) + s.MinPrimaryShardDocs = &f + } + + case "min_primary_shard_size": + if err := dec.Decode(&s.MinPrimaryShardSize); err != nil { + return fmt.Errorf("%s | %w", "MinPrimaryShardSize", err) + } + + case "min_primary_shard_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinPrimaryShardSizeBytes", err) + } + s.MinPrimaryShardSizeBytes = &value + case float64: + f := int64(v) + s.MinPrimaryShardSizeBytes = &f + } + + case "min_size": + if err := dec.Decode(&s.MinSize); err != nil { + return fmt.Errorf("%s | %w", "MinSize", err) + } + + case "min_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinSizeBytes", err) + } + s.MinSizeBytes = &value + case float64: + f := int64(v) + s.MinSizeBytes = &f + } + + } + } + return nil +} + +// NewRolloverConditions returns a RolloverConditions. +func NewRolloverConditions() *RolloverConditions { + r := &RolloverConditions{} + + return r +} + +type RolloverConditionsVariant interface { + RolloverConditionsCaster() *RolloverConditions +} + +func (s *RolloverConditions) RolloverConditionsCaster() *RolloverConditions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupcapabilities.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupcapabilities.go new file mode 100644 index 000000000..b89e9a4eb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupcapabilities.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// RollupCapabilities type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/get_rollup_caps/types.ts#L24-L29 +type RollupCapabilities struct { + // RollupJobs There can be multiple, independent jobs configured for a single index or + // index pattern. Each of these jobs may have different configurations, so the + // API returns a list of all the various configurations available. + RollupJobs []RollupCapabilitySummary `json:"rollup_jobs"` +} + +// NewRollupCapabilities returns a RollupCapabilities. +func NewRollupCapabilities() *RollupCapabilities { + r := &RollupCapabilities{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupcapabilitysummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupcapabilitysummary.go new file mode 100644 index 000000000..12ac3172c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupcapabilitysummary.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RollupCapabilitySummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/get_rollup_caps/types.ts#L31-L36 +type RollupCapabilitySummary struct { + Fields map[string][]RollupFieldSummary `json:"fields"` + IndexPattern string `json:"index_pattern"` + JobId string `json:"job_id"` + RollupIndex string `json:"rollup_index"` +} + +func (s *RollupCapabilitySummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string][]RollupFieldSummary, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "index_pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexPattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexPattern = o + + case "job_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.JobId = o + + case "rollup_index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RollupIndex", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RollupIndex = o + + } + } + return nil +} + +// NewRollupCapabilitySummary returns a RollupCapabilitySummary. +func NewRollupCapabilitySummary() *RollupCapabilitySummary { + r := &RollupCapabilitySummary{ + Fields: make(map[string][]RollupFieldSummary), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupfieldsummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupfieldsummary.go new file mode 100644 index 000000000..296a35163 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupfieldsummary.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RollupFieldSummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/get_rollup_caps/types.ts#L38-L42 +type RollupFieldSummary struct { + Agg string `json:"agg"` + CalendarInterval Duration `json:"calendar_interval,omitempty"` + TimeZone *string `json:"time_zone,omitempty"` +} + +func (s *RollupFieldSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "agg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Agg", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Agg = o + + case "calendar_interval": + if err := dec.Decode(&s.CalendarInterval); err != nil { + return fmt.Errorf("%s | %w", "CalendarInterval", err) + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return fmt.Errorf("%s | %w", "TimeZone", err) + } + + } + } + return nil +} + +// NewRollupFieldSummary returns a RollupFieldSummary. +func NewRollupFieldSummary() *RollupFieldSummary { + r := &RollupFieldSummary{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjob.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjob.go new file mode 100644 index 000000000..59021f5db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjob.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// RollupJob type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/get_jobs/types.ts#L28-L43 +type RollupJob struct { + // Config The rollup job configuration. + Config RollupJobConfiguration `json:"config"` + // Stats Transient statistics about the rollup job, such as how many documents have + // been processed and how many rollup summary docs have been indexed. + // These stats are not persisted. + // If a node is restarted, these stats are reset. + Stats RollupJobStats `json:"stats"` + // Status The current status of the indexer for the rollup job. + Status RollupJobStatus `json:"status"` +} + +// NewRollupJob returns a RollupJob. +func NewRollupJob() *RollupJob { + r := &RollupJob{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobconfiguration.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobconfiguration.go new file mode 100644 index 000000000..e3a600666 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobconfiguration.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RollupJobConfiguration type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/get_jobs/types.ts#L45-L54 +type RollupJobConfiguration struct { + Cron string `json:"cron"` + Groups Groupings `json:"groups"` + Id string `json:"id"` + IndexPattern string `json:"index_pattern"` + Metrics []FieldMetric `json:"metrics"` + PageSize int64 `json:"page_size"` + RollupIndex string `json:"rollup_index"` + Timeout Duration `json:"timeout"` +} + +func (s *RollupJobConfiguration) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cron": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Cron", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Cron = o + + case "groups": + if err := dec.Decode(&s.Groups); err != nil { + return fmt.Errorf("%s | %w", "Groups", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "index_pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexPattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexPattern = o + + case "metrics": + if err := dec.Decode(&s.Metrics); err != nil { + return fmt.Errorf("%s | %w", "Metrics", err) + } + + case "page_size": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PageSize", err) + } + s.PageSize = value + case float64: + f := int64(v) + s.PageSize = f + } + + case "rollup_index": + if err := dec.Decode(&s.RollupIndex); err != nil { + return fmt.Errorf("%s | %w", "RollupIndex", err) + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + + } + } + return nil +} + +// NewRollupJobConfiguration returns a RollupJobConfiguration. +func NewRollupJobConfiguration() *RollupJobConfiguration { + r := &RollupJobConfiguration{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobstats.go new file mode 100644 index 000000000..87a0fae3e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobstats.go @@ -0,0 +1,225 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RollupJobStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/get_jobs/types.ts#L56-L69 +type RollupJobStats struct { + DocumentsProcessed int64 `json:"documents_processed"` + IndexFailures int64 `json:"index_failures"` + IndexTimeInMs int64 `json:"index_time_in_ms"` + IndexTotal int64 `json:"index_total"` + PagesProcessed int64 `json:"pages_processed"` + ProcessingTimeInMs int64 `json:"processing_time_in_ms"` + ProcessingTotal int64 `json:"processing_total"` + RollupsIndexed int64 `json:"rollups_indexed"` + SearchFailures int64 `json:"search_failures"` + SearchTimeInMs int64 `json:"search_time_in_ms"` + SearchTotal int64 `json:"search_total"` + TriggerCount int64 `json:"trigger_count"` +} + +func (s *RollupJobStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "documents_processed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocumentsProcessed", err) + } + s.DocumentsProcessed = value + case float64: + f := int64(v) + s.DocumentsProcessed = f + } + + case "index_failures": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexFailures", err) + } + s.IndexFailures = value + case float64: + f := int64(v) + s.IndexFailures = f + } + + case "index_time_in_ms": + if err := dec.Decode(&s.IndexTimeInMs); err != nil { + return fmt.Errorf("%s | %w", "IndexTimeInMs", err) + } + + case "index_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexTotal", err) + } + s.IndexTotal = value + case float64: + f := int64(v) + s.IndexTotal = f + } + + case "pages_processed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PagesProcessed", err) + } + s.PagesProcessed = value + case float64: + f := int64(v) + s.PagesProcessed = f + } + + case "processing_time_in_ms": + if err := dec.Decode(&s.ProcessingTimeInMs); err != nil { + return fmt.Errorf("%s | %w", "ProcessingTimeInMs", err) + } + + case "processing_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ProcessingTotal", err) + } + s.ProcessingTotal = value + case float64: + f := int64(v) + s.ProcessingTotal = f + } + + case "rollups_indexed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RollupsIndexed", err) + } + s.RollupsIndexed = value + case float64: + f := int64(v) + s.RollupsIndexed = f + } + + case "search_failures": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SearchFailures", err) + } + s.SearchFailures = value + case float64: + f := int64(v) + s.SearchFailures = f + } + + case "search_time_in_ms": + if err := dec.Decode(&s.SearchTimeInMs); err != nil { + return fmt.Errorf("%s | %w", "SearchTimeInMs", err) + } + + case "search_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SearchTotal", err) + } + s.SearchTotal = value + case float64: + f := int64(v) + s.SearchTotal = f + } + + case "trigger_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TriggerCount", err) + } + s.TriggerCount = value + case float64: + f := int64(v) + s.TriggerCount = f + } + + } + } + return nil +} + +// NewRollupJobStats returns a RollupJobStats. +func NewRollupJobStats() *RollupJobStats { + r := &RollupJobStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobstatus.go new file mode 100644 index 000000000..f75820a96 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobstatus.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexingjobstate" +) + +// RollupJobStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/get_jobs/types.ts#L71-L75 +type RollupJobStatus struct { + CurrentPosition map[string]json.RawMessage `json:"current_position,omitempty"` + JobState indexingjobstate.IndexingJobState `json:"job_state"` + UpgradedDocId *bool `json:"upgraded_doc_id,omitempty"` +} + +func (s *RollupJobStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current_position": + if s.CurrentPosition == nil { + s.CurrentPosition = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.CurrentPosition); err != nil { + return fmt.Errorf("%s | %w", "CurrentPosition", err) + } + + case "job_state": + if err := dec.Decode(&s.JobState); err != nil { + return fmt.Errorf("%s | %w", "JobState", err) + } + + case "upgraded_doc_id": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "UpgradedDocId", err) + } + s.UpgradedDocId = &value + case bool: + s.UpgradedDocId = &v + } + + } + } + return nil +} + +// NewRollupJobStatus returns a RollupJobStatus. +func NewRollupJobStatus() *RollupJobStatus { + r := &RollupJobStatus{ + CurrentPosition: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobsummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobsummary.go new file mode 100644 index 000000000..a77326a95 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobsummary.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RollupJobSummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/get_rollup_index_caps/types.ts#L28-L33 +type RollupJobSummary struct { + Fields map[string][]RollupJobSummaryField `json:"fields"` + IndexPattern string `json:"index_pattern"` + JobId string `json:"job_id"` + RollupIndex string `json:"rollup_index"` +} + +func (s *RollupJobSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string][]RollupJobSummaryField, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "index_pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexPattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexPattern = o + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return fmt.Errorf("%s | %w", "JobId", err) + } + + case "rollup_index": + if err := dec.Decode(&s.RollupIndex); err != nil { + return fmt.Errorf("%s | %w", "RollupIndex", err) + } + + } + } + return nil +} + +// NewRollupJobSummary returns a RollupJobSummary. +func NewRollupJobSummary() *RollupJobSummary { + r := &RollupJobSummary{ + Fields: make(map[string][]RollupJobSummaryField), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobsummaryfield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobsummaryfield.go new file mode 100644 index 000000000..c35c73597 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rollupjobsummaryfield.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RollupJobSummaryField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/get_rollup_index_caps/types.ts#L35-L39 +type RollupJobSummaryField struct { + Agg string `json:"agg"` + CalendarInterval Duration `json:"calendar_interval,omitempty"` + TimeZone *string `json:"time_zone,omitempty"` +} + +func (s *RollupJobSummaryField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "agg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Agg", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Agg = o + + case "calendar_interval": + if err := dec.Decode(&s.CalendarInterval); err != nil { + return fmt.Errorf("%s | %w", "CalendarInterval", err) + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return fmt.Errorf("%s | %w", "TimeZone", err) + } + + } + } + return nil +} + +// NewRollupJobSummaryField returns a RollupJobSummaryField. +func NewRollupJobSummaryField() *RollupJobSummaryField { + r := &RollupJobSummaryField{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/romaniananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/romaniananalyzer.go new file mode 100644 index 000000000..e69c12778 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/romaniananalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RomanianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L268-L273 +type RomanianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *RomanianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s RomanianAnalyzer) MarshalJSON() ([]byte, error) { + type innerRomanianAnalyzer RomanianAnalyzer + tmp := innerRomanianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "romanian" + + return json.Marshal(tmp) +} + +// NewRomanianAnalyzer returns a RomanianAnalyzer. +func NewRomanianAnalyzer() *RomanianAnalyzer { + r := &RomanianAnalyzer{} + + return r +} + +type RomanianAnalyzerVariant interface { + RomanianAnalyzerCaster() *RomanianAnalyzer +} + +func (s *RomanianAnalyzer) RomanianAnalyzerCaster() *RomanianAnalyzer { + return s +} + +func (s *RomanianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/routingfield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/routingfield.go new file mode 100644 index 000000000..05bc1c082 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/routingfield.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RoutingField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/meta-fields.ts#L50-L52 +type RoutingField struct { + Required bool `json:"required"` +} + +func (s *RoutingField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "required": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Required", err) + } + s.Required = value + case bool: + s.Required = v + } + + } + } + return nil +} + +// NewRoutingField returns a RoutingField. +func NewRoutingField() *RoutingField { + r := &RoutingField{} + + return r +} + +type RoutingFieldVariant interface { + RoutingFieldCaster() *RoutingField +} + +func (s *RoutingField) RoutingFieldCaster() *RoutingField { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rrfrank.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rrfrank.go new file mode 100644 index 000000000..8354fc19e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rrfrank.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RrfRank type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Rank.ts#L32-L37 +type RrfRank struct { + // RankConstant How much influence documents in individual result sets per query have over + // the final ranked result set + RankConstant *int64 `json:"rank_constant,omitempty"` + // RankWindowSize Size of the individual result sets per query + RankWindowSize *int64 `json:"rank_window_size,omitempty"` +} + +func (s *RrfRank) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "rank_constant": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RankConstant", err) + } + s.RankConstant = &value + case float64: + f := int64(v) + s.RankConstant = &f + } + + case "rank_window_size": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RankWindowSize", err) + } + s.RankWindowSize = &value + case float64: + f := int64(v) + s.RankWindowSize = &f + } + + } + } + return nil +} + +// NewRrfRank returns a RrfRank. +func NewRrfRank() *RrfRank { + r := &RrfRank{} + + return r +} + +type RrfRankVariant interface { + RrfRankCaster() *RrfRank +} + +func (s *RrfRank) RrfRankCaster() *RrfRank { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rrfretriever.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rrfretriever.go new file mode 100644 index 000000000..400ed6574 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rrfretriever.go @@ -0,0 +1,186 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RRFRetriever type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Retriever.ts#L135-L144 +type RRFRetriever struct { + Fields []string `json:"fields,omitempty"` + // Filter Query to filter the documents that can match. + Filter []Query `json:"filter,omitempty"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are not + // included in the top documents. + MinScore *float32 `json:"min_score,omitempty"` + // Name_ Retriever name. + Name_ *string `json:"_name,omitempty"` + Query *string `json:"query,omitempty"` + // RankConstant This value determines how much influence documents in individual result sets + // per query have over the final ranked result set. + RankConstant *int `json:"rank_constant,omitempty"` + // RankWindowSize This value determines the size of the individual result sets per query. + RankWindowSize *int `json:"rank_window_size,omitempty"` + // Retrievers A list of child retrievers to specify which sets of returned top documents + // will have the RRF formula applied to them. + Retrievers []RetrieverContainer `json:"retrievers"` +} + +func (s *RRFRetriever) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := float32(value) + s.MinScore = &f + case float64: + f := float32(v) + s.MinScore = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name_ = &o + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = &o + + case "rank_constant": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RankConstant", err) + } + s.RankConstant = &value + case float64: + f := int(v) + s.RankConstant = &f + } + + case "rank_window_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RankWindowSize", err) + } + s.RankWindowSize = &value + case float64: + f := int(v) + s.RankWindowSize = &f + } + + case "retrievers": + if err := dec.Decode(&s.Retrievers); err != nil { + return fmt.Errorf("%s | %w", "Retrievers", err) + } + + } + } + return nil +} + +// NewRRFRetriever returns a RRFRetriever. +func NewRRFRetriever() *RRFRetriever { + r := &RRFRetriever{} + + return r +} + +type RRFRetrieverVariant interface { + RRFRetrieverCaster() *RRFRetriever +} + +func (s *RRFRetriever) RRFRetrieverCaster() *RRFRetriever { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rulecondition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rulecondition.go new file mode 100644 index 000000000..aba389454 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rulecondition.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/appliesto" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditionoperator" +) + +// RuleCondition type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Rule.ts#L52-L65 +type RuleCondition struct { + // AppliesTo Specifies the result property to which the condition applies. If your + // detector uses `lat_long`, `metric`, `rare`, or `freq_rare` functions, you can + // only specify conditions that apply to time. + AppliesTo appliesto.AppliesTo `json:"applies_to"` + // Operator Specifies the condition operator. The available options are greater than, + // greater than or equals, less than, and less than or equals. + Operator conditionoperator.ConditionOperator `json:"operator"` + // Value The value that is compared against the `applies_to` field using the operator. + Value Float64 `json:"value"` +} + +func (s *RuleCondition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "applies_to": + if err := dec.Decode(&s.AppliesTo); err != nil { + return fmt.Errorf("%s | %w", "AppliesTo", err) + } + + case "operator": + if err := dec.Decode(&s.Operator); err != nil { + return fmt.Errorf("%s | %w", "Operator", err) + } + + case "value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + f := Float64(value) + s.Value = f + case float64: + f := Float64(v) + s.Value = f + } + + } + } + return nil +} + +// NewRuleCondition returns a RuleCondition. +func NewRuleCondition() *RuleCondition { + r := &RuleCondition{} + + return r +} + +type RuleConditionVariant interface { + RuleConditionCaster() *RuleCondition +} + +func (s *RuleCondition) RuleConditionCaster() *RuleCondition { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rulequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rulequery.go new file mode 100644 index 000000000..1e1e6995f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/rulequery.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RuleQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L398-L406 +type RuleQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + MatchCriteria json.RawMessage `json:"match_criteria,omitempty"` + Organic Query `json:"organic"` + QueryName_ *string `json:"_name,omitempty"` + RulesetId *string `json:"ruleset_id,omitempty"` + RulesetIds []string `json:"ruleset_ids,omitempty"` +} + +func (s *RuleQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "match_criteria": + if err := dec.Decode(&s.MatchCriteria); err != nil { + return fmt.Errorf("%s | %w", "MatchCriteria", err) + } + + case "organic": + if err := dec.Decode(&s.Organic); err != nil { + return fmt.Errorf("%s | %w", "Organic", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "ruleset_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RulesetId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RulesetId = &o + + case "ruleset_ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "RulesetIds", err) + } + + s.RulesetIds = append(s.RulesetIds, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.RulesetIds); err != nil { + return fmt.Errorf("%s | %w", "RulesetIds", err) + } + } + + } + } + return nil +} + +// NewRuleQuery returns a RuleQuery. +func NewRuleQuery() *RuleQuery { + r := &RuleQuery{} + + return r +} + +type RuleQueryVariant interface { + RuleQueryCaster() *RuleQuery +} + +func (s *RuleQuery) RuleQueryCaster() *RuleQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ruleretriever.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ruleretriever.go new file mode 100644 index 000000000..ddea9c6df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ruleretriever.go @@ -0,0 +1,173 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RuleRetriever type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Retriever.ts#L159-L168 +type RuleRetriever struct { + // Filter Query to filter the documents that can match. + Filter []Query `json:"filter,omitempty"` + // MatchCriteria The match criteria that will determine if a rule in the provided rulesets + // should be applied. + MatchCriteria json.RawMessage `json:"match_criteria,omitempty"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are not + // included in the top documents. + MinScore *float32 `json:"min_score,omitempty"` + // Name_ Retriever name. + Name_ *string `json:"_name,omitempty"` + // RankWindowSize This value determines the size of the individual result set. + RankWindowSize *int `json:"rank_window_size,omitempty"` + // Retriever The retriever whose results rules should be applied to. + Retriever RetrieverContainer `json:"retriever"` + // RulesetIds The ruleset IDs containing the rules this retriever is evaluating against. + RulesetIds []string `json:"ruleset_ids"` +} + +func (s *RuleRetriever) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "match_criteria": + if err := dec.Decode(&s.MatchCriteria); err != nil { + return fmt.Errorf("%s | %w", "MatchCriteria", err) + } + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := float32(value) + s.MinScore = &f + case float64: + f := float32(v) + s.MinScore = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name_ = &o + + case "rank_window_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RankWindowSize", err) + } + s.RankWindowSize = &value + case float64: + f := int(v) + s.RankWindowSize = &f + } + + case "retriever": + if err := dec.Decode(&s.Retriever); err != nil { + return fmt.Errorf("%s | %w", "Retriever", err) + } + + case "ruleset_ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "RulesetIds", err) + } + + s.RulesetIds = append(s.RulesetIds, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.RulesetIds); err != nil { + return fmt.Errorf("%s | %w", "RulesetIds", err) + } + } + + } + } + return nil +} + +// NewRuleRetriever returns a RuleRetriever. +func NewRuleRetriever() *RuleRetriever { + r := &RuleRetriever{} + + return r +} + +type RuleRetrieverVariant interface { + RuleRetrieverCaster() *RuleRetriever +} + +func (s *RuleRetriever) RuleRetrieverCaster() *RuleRetriever { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runningstatesearchinterval.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runningstatesearchinterval.go new file mode 100644 index 000000000..9af54f803 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runningstatesearchinterval.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// RunningStateSearchInterval type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Datafeed.ts#L226-L243 +type RunningStateSearchInterval struct { + // End The end time. + End Duration `json:"end,omitempty"` + // EndMs The end time as an epoch in milliseconds. + EndMs int64 `json:"end_ms"` + // Start The start time. + Start Duration `json:"start,omitempty"` + // StartMs The start time as an epoch in milliseconds. + StartMs int64 `json:"start_ms"` +} + +func (s *RunningStateSearchInterval) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "end": + if err := dec.Decode(&s.End); err != nil { + return fmt.Errorf("%s | %w", "End", err) + } + + case "end_ms": + if err := dec.Decode(&s.EndMs); err != nil { + return fmt.Errorf("%s | %w", "EndMs", err) + } + + case "start": + if err := dec.Decode(&s.Start); err != nil { + return fmt.Errorf("%s | %w", "Start", err) + } + + case "start_ms": + if err := dec.Decode(&s.StartMs); err != nil { + return fmt.Errorf("%s | %w", "StartMs", err) + } + + } + } + return nil +} + +// NewRunningStateSearchInterval returns a RunningStateSearchInterval. +func NewRunningStateSearchInterval() *RunningStateSearchInterval { + r := &RunningStateSearchInterval{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runtimefield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runtimefield.go new file mode 100644 index 000000000..05399f8c0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runtimefield.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/runtimefieldtype" +) + +// RuntimeField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/RuntimeFields.ts#L26-L50 +type RuntimeField struct { + // FetchFields For type `lookup` + FetchFields []RuntimeFieldFetchFields `json:"fetch_fields,omitempty"` + // Fields For type `composite` + Fields map[string]CompositeSubField `json:"fields,omitempty"` + // Format A custom format for `date` type runtime fields. + Format *string `json:"format,omitempty"` + // InputField For type `lookup` + InputField *string `json:"input_field,omitempty"` + // Script Painless script executed at query time. + Script *Script `json:"script,omitempty"` + // TargetField For type `lookup` + TargetField *string `json:"target_field,omitempty"` + // TargetIndex For type `lookup` + TargetIndex *string `json:"target_index,omitempty"` + // Type Field type, which can be: `boolean`, `composite`, `date`, `double`, + // `geo_point`, `ip`,`keyword`, `long`, or `lookup`. + Type runtimefieldtype.RuntimeFieldType `json:"type"` +} + +func (s *RuntimeField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fetch_fields": + if err := dec.Decode(&s.FetchFields); err != nil { + return fmt.Errorf("%s | %w", "FetchFields", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]CompositeSubField, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "input_field": + if err := dec.Decode(&s.InputField); err != nil { + return fmt.Errorf("%s | %w", "InputField", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + case "target_index": + if err := dec.Decode(&s.TargetIndex); err != nil { + return fmt.Errorf("%s | %w", "TargetIndex", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewRuntimeField returns a RuntimeField. +func NewRuntimeField() *RuntimeField { + r := &RuntimeField{ + Fields: make(map[string]CompositeSubField), + } + + return r +} + +type RuntimeFieldVariant interface { + RuntimeFieldCaster() *RuntimeField +} + +func (s *RuntimeField) RuntimeFieldCaster() *RuntimeField { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runtimefieldfetchfields.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runtimefieldfetchfields.go new file mode 100644 index 000000000..e36598a17 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runtimefieldfetchfields.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RuntimeFieldFetchFields type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/RuntimeFields.ts#L56-L60 +type RuntimeFieldFetchFields struct { + Field string `json:"field"` + Format *string `json:"format,omitempty"` +} + +func (s *RuntimeFieldFetchFields) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Field) + if err != nil { + return err + } + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + } + } + return nil +} + +// NewRuntimeFieldFetchFields returns a RuntimeFieldFetchFields. +func NewRuntimeFieldFetchFields() *RuntimeFieldFetchFields { + r := &RuntimeFieldFetchFields{} + + return r +} + +type RuntimeFieldFetchFieldsVariant interface { + RuntimeFieldFetchFieldsCaster() *RuntimeFieldFetchFields +} + +func (s *RuntimeFieldFetchFields) RuntimeFieldFetchFieldsCaster() *RuntimeFieldFetchFields { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runtimefields.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runtimefields.go new file mode 100644 index 000000000..f7b471bdc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runtimefields.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// RuntimeFields type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/RuntimeFields.ts#L24-L24 +type RuntimeFields map[string]RuntimeField + +type RuntimeFieldsVariant interface { + RuntimeFieldsCaster() *RuntimeFields +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runtimefieldstype.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runtimefieldstype.go new file mode 100644 index 000000000..1c86011cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/runtimefieldstype.go @@ -0,0 +1,267 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RuntimeFieldsType type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L289-L304 +type RuntimeFieldsType struct { + CharsMax int64 `json:"chars_max"` + CharsTotal int64 `json:"chars_total"` + Count int64 `json:"count"` + DocMax int64 `json:"doc_max"` + DocTotal int64 `json:"doc_total"` + IndexCount int64 `json:"index_count"` + Lang []string `json:"lang"` + LinesMax int64 `json:"lines_max"` + LinesTotal int64 `json:"lines_total"` + Name string `json:"name"` + ScriptlessCount int64 `json:"scriptless_count"` + ShadowedCount int64 `json:"shadowed_count"` + SourceMax int64 `json:"source_max"` + SourceTotal int64 `json:"source_total"` +} + +func (s *RuntimeFieldsType) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "chars_max": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CharsMax", err) + } + s.CharsMax = value + case float64: + f := int64(v) + s.CharsMax = f + } + + case "chars_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CharsTotal", err) + } + s.CharsTotal = value + case float64: + f := int64(v) + s.CharsTotal = f + } + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "doc_max": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocMax", err) + } + s.DocMax = value + case float64: + f := int64(v) + s.DocMax = f + } + + case "doc_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocTotal", err) + } + s.DocTotal = value + case float64: + f := int64(v) + s.DocTotal = f + } + + case "index_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexCount", err) + } + s.IndexCount = value + case float64: + f := int64(v) + s.IndexCount = f + } + + case "lang": + if err := dec.Decode(&s.Lang); err != nil { + return fmt.Errorf("%s | %w", "Lang", err) + } + + case "lines_max": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LinesMax", err) + } + s.LinesMax = value + case float64: + f := int64(v) + s.LinesMax = f + } + + case "lines_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LinesTotal", err) + } + s.LinesTotal = value + case float64: + f := int64(v) + s.LinesTotal = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "scriptless_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ScriptlessCount", err) + } + s.ScriptlessCount = value + case float64: + f := int64(v) + s.ScriptlessCount = f + } + + case "shadowed_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShadowedCount", err) + } + s.ShadowedCount = value + case float64: + f := int64(v) + s.ShadowedCount = f + } + + case "source_max": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SourceMax", err) + } + s.SourceMax = value + case float64: + f := int64(v) + s.SourceMax = f + } + + case "source_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SourceTotal", err) + } + s.SourceTotal = value + case float64: + f := int64(v) + s.SourceTotal = f + } + + } + } + return nil +} + +// NewRuntimeFieldsType returns a RuntimeFieldsType. +func NewRuntimeFieldsType() *RuntimeFieldsType { + r := &RuntimeFieldsType{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/russiananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/russiananalyzer.go new file mode 100644 index 000000000..a0f076cf3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/russiananalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// RussianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L275-L280 +type RussianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *RussianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s RussianAnalyzer) MarshalJSON() ([]byte, error) { + type innerRussianAnalyzer RussianAnalyzer + tmp := innerRussianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "russian" + + return json.Marshal(tmp) +} + +// NewRussianAnalyzer returns a RussianAnalyzer. +func NewRussianAnalyzer() *RussianAnalyzer { + r := &RussianAnalyzer{} + + return r +} + +type RussianAnalyzerVariant interface { + RussianAnalyzerCaster() *RussianAnalyzer +} + +func (s *RussianAnalyzer) RussianAnalyzerCaster() *RussianAnalyzer { + return s +} + +func (s *RussianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/russianstemtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/russianstemtokenfilter.go new file mode 100644 index 000000000..c3dc05df7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/russianstemtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// RussianStemTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L567-L569 +type RussianStemTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *RussianStemTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s RussianStemTokenFilter) MarshalJSON() ([]byte, error) { + type innerRussianStemTokenFilter RussianStemTokenFilter + tmp := innerRussianStemTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "russian_stem" + + return json.Marshal(tmp) +} + +// NewRussianStemTokenFilter returns a RussianStemTokenFilter. +func NewRussianStemTokenFilter() *RussianStemTokenFilter { + r := &RussianStemTokenFilter{} + + return r +} + +type RussianStemTokenFilterVariant interface { + RussianStemTokenFilterCaster() *RussianStemTokenFilter +} + +func (s *RussianStemTokenFilter) RussianStemTokenFilterCaster() *RussianStemTokenFilter { + return s +} + +func (s *RussianStemTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/s3repository.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/s3repository.go new file mode 100644 index 000000000..cccd36d41 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/s3repository.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// S3Repository type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotRepository.ts#L64-L78 +type S3Repository struct { + // Settings The repository settings. + // + // NOTE: In addition to the specified settings, you can also use all non-secure + // client settings in the repository settings. + // In this case, the client settings found in the repository settings will be + // merged with those of the named client used by the repository. + // Conflicts between client and repository settings are resolved by the + // repository settings taking precedence over client settings. + Settings S3RepositorySettings `json:"settings"` + // Type The S3 repository type. + Type string `json:"type,omitempty"` + Uuid *string `json:"uuid,omitempty"` +} + +func (s *S3Repository) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return fmt.Errorf("%s | %w", "Uuid", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s S3Repository) MarshalJSON() ([]byte, error) { + type innerS3Repository S3Repository + tmp := innerS3Repository{ + Settings: s.Settings, + Type: s.Type, + Uuid: s.Uuid, + } + + tmp.Type = "s3" + + return json.Marshal(tmp) +} + +// NewS3Repository returns a S3Repository. +func NewS3Repository() *S3Repository { + r := &S3Repository{} + + return r +} + +type S3RepositoryVariant interface { + S3RepositoryCaster() *S3Repository +} + +func (s *S3Repository) S3RepositoryCaster() *S3Repository { + return s +} + +func (s *S3Repository) RepositoryCaster() *Repository { + o := Repository(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/s3repositorysettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/s3repositorysettings.go new file mode 100644 index 000000000..287918bd5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/s3repositorysettings.go @@ -0,0 +1,390 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// S3RepositorySettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotRepository.ts#L237-L346 +type S3RepositorySettings struct { + // BasePath The path to the repository data within its bucket. + // It defaults to an empty string, meaning that the repository is at the root of + // the bucket. + // The value of this setting should not start or end with a forward slash (`/`). + // + // NOTE: Don't set base_path when configuring a snapshot repository for Elastic + // Cloud Enterprise. + // Elastic Cloud Enterprise automatically generates the `base_path` for each + // deployment so that multiple deployments may share the same bucket. + BasePath *string `json:"base_path,omitempty"` + // Bucket The name of the S3 bucket to use for snapshots. + // The bucket name must adhere to Amazon's S3 bucket naming rules. + Bucket string `json:"bucket"` + // BufferSize The minimum threshold below which the chunk is uploaded using a single + // request. + // Beyond this threshold, the S3 repository will use the AWS Multipart Upload + // API to split the chunk into several parts, each of `buffer_size` length, and + // to upload each part in its own request. + // Note that setting a buffer size lower than 5mb is not allowed since it will + // prevent the use of the Multipart API and may result in upload errors. + // It is also not possible to set a buffer size greater than 5gb as it is the + // maximum upload size allowed by S3. + // Defaults to `100mb` or 5% of JVM heap, whichever is smaller. + BufferSize ByteSize `json:"buffer_size,omitempty"` + // CannedAcl The S3 repository supports all S3 canned ACLs: `private`, `public-read`, + // `public-read-write`, `authenticated-read`, `log-delivery-write`, + // `bucket-owner-read`, `bucket-owner-full-control`. + // You could specify a canned ACL using the `canned_acl` setting. + // When the S3 repository creates buckets and objects, it adds the canned ACL + // into the buckets and objects. + CannedAcl *string `json:"canned_acl,omitempty"` + // ChunkSize Big files can be broken down into multiple smaller blobs in the blob store + // during snapshotting. + // It is not recommended to change this value from its default unless there is + // an explicit reason for limiting the size of blobs in the repository. + // Setting a value lower than the default can result in an increased number of + // API calls to the blob store during snapshot create and restore operations + // compared to using the default value and thus make both operations slower and + // more costly. + // Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + // The default varies by repository type. + ChunkSize ByteSize `json:"chunk_size,omitempty"` + // Client The name of the S3 client to use to connect to S3. + Client *string `json:"client,omitempty"` + // Compress When set to `true`, metadata files are stored in compressed format. + // This setting doesn't affect index files that are already compressed by + // default. + Compress *bool `json:"compress,omitempty"` + // DeleteObjectsMaxSize The maxmimum batch size, between 1 and 1000, used for `DeleteObjects` + // requests. + // Defaults to 1000 which is the maximum number supported by the AWS + // DeleteObjects API. + DeleteObjectsMaxSize *int `json:"delete_objects_max_size,omitempty"` + // GetRegisterRetryDelay The time to wait before trying again if an attempt to read a linearizable + // register fails. + GetRegisterRetryDelay Duration `json:"get_register_retry_delay,omitempty"` + // MaxMultipartParts The maximum number of parts that Elasticsearch will write during a multipart + // upload of a single object. + // Files which are larger than `buffer_size × max_multipart_parts` will be + // chunked into several smaller objects. + // Elasticsearch may also split a file across multiple objects to satisfy other + // constraints such as the `chunk_size` limit. + // Defaults to `10000` which is the maximum number of parts in a multipart + // upload in AWS S3. + MaxMultipartParts *int `json:"max_multipart_parts,omitempty"` + // MaxMultipartUploadCleanupSize The maximum number of possibly-dangling multipart uploads to clean up in each + // batch of snapshot deletions. + // Defaults to 1000 which is the maximum number supported by the AWS + // ListMultipartUploads API. + // If set to `0`, Elasticsearch will not attempt to clean up dangling multipart + // uploads. + MaxMultipartUploadCleanupSize *int `json:"max_multipart_upload_cleanup_size,omitempty"` + // MaxRestoreBytesPerSec The maximum snapshot restore rate per node. + // It defaults to unlimited. + // Note that restores are also throttled through recovery settings. + MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // MaxSnapshotBytesPerSec The maximum snapshot creation rate per node. + // It defaults to 40mb per second. + // Note that if the recovery settings for managed services are set, then it + // defaults to unlimited, and the rate is additionally throttled through + // recovery settings. + MaxSnapshotBytesPerSec ByteSize `json:"max_snapshot_bytes_per_sec,omitempty"` + // Readonly If true, the repository is read-only. + // The cluster can retrieve and restore snapshots from the repository but not + // write to the repository or create snapshots in it. + // + // Only a cluster with write access can create snapshots in the repository. + // All other clusters connected to the repository should have the `readonly` + // parameter set to `true`. + // + // If `false`, the cluster can write to the repository and create snapshots in + // it. + // + // IMPORTANT: If you register the same snapshot repository with multiple + // clusters, only one cluster should have write access to the repository. + // Having multiple clusters write to the repository at the same time risks + // corrupting the contents of the repository. + Readonly *bool `json:"readonly,omitempty"` + // ServerSideEncryption When set to `true`, files are encrypted on server side using an AES256 + // algorithm. + ServerSideEncryption *bool `json:"server_side_encryption,omitempty"` + // StorageClass The S3 storage class for objects written to the repository. + // Values may be `standard`, `reduced_redundancy`, `standard_ia`, `onezone_ia`, + // and `intelligent_tiering`. + StorageClass *string `json:"storage_class,omitempty"` + // ThrottledDeleteRetryDelayIncrement The delay before the first retry and the amount the delay is incremented by + // on each subsequent retry. + // The default is 50ms and the minimum is 0ms. + ThrottledDeleteRetryDelayIncrement Duration `json:"throttled_delete_retry.delay_increment,omitempty"` + // ThrottledDeleteRetryMaximumDelay The upper bound on how long the delays between retries will grow to. + // The default is 5s and the minimum is 0ms. + ThrottledDeleteRetryMaximumDelay Duration `json:"throttled_delete_retry.maximum_delay,omitempty"` + // ThrottledDeleteRetryMaximumNumberOfRetries The number times to retry a throttled snapshot deletion. + // The default is 10 and the minimum value is 0 which will disable retries + // altogether. + // Note that if retries are enabled in the Azure client, each of these retries + // comprises that many client-level retries. + ThrottledDeleteRetryMaximumNumberOfRetries *int `json:"throttled_delete_retry.maximum_number_of_retries,omitempty"` +} + +func (s *S3RepositorySettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "base_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BasePath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BasePath = &o + + case "bucket": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Bucket", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Bucket = o + + case "buffer_size": + if err := dec.Decode(&s.BufferSize); err != nil { + return fmt.Errorf("%s | %w", "BufferSize", err) + } + + case "canned_acl": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CannedAcl", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CannedAcl = &o + + case "chunk_size": + if err := dec.Decode(&s.ChunkSize); err != nil { + return fmt.Errorf("%s | %w", "ChunkSize", err) + } + + case "client": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Client", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Client = &o + + case "compress": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Compress", err) + } + s.Compress = &value + case bool: + s.Compress = &v + } + + case "delete_objects_max_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DeleteObjectsMaxSize", err) + } + s.DeleteObjectsMaxSize = &value + case float64: + f := int(v) + s.DeleteObjectsMaxSize = &f + } + + case "get_register_retry_delay": + if err := dec.Decode(&s.GetRegisterRetryDelay); err != nil { + return fmt.Errorf("%s | %w", "GetRegisterRetryDelay", err) + } + + case "max_multipart_parts": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxMultipartParts", err) + } + s.MaxMultipartParts = &value + case float64: + f := int(v) + s.MaxMultipartParts = &f + } + + case "max_multipart_upload_cleanup_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxMultipartUploadCleanupSize", err) + } + s.MaxMultipartUploadCleanupSize = &value + case float64: + f := int(v) + s.MaxMultipartUploadCleanupSize = &f + } + + case "max_restore_bytes_per_sec": + if err := dec.Decode(&s.MaxRestoreBytesPerSec); err != nil { + return fmt.Errorf("%s | %w", "MaxRestoreBytesPerSec", err) + } + + case "max_snapshot_bytes_per_sec": + if err := dec.Decode(&s.MaxSnapshotBytesPerSec); err != nil { + return fmt.Errorf("%s | %w", "MaxSnapshotBytesPerSec", err) + } + + case "readonly": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Readonly", err) + } + s.Readonly = &value + case bool: + s.Readonly = &v + } + + case "server_side_encryption": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ServerSideEncryption", err) + } + s.ServerSideEncryption = &value + case bool: + s.ServerSideEncryption = &v + } + + case "storage_class": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StorageClass", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StorageClass = &o + + case "throttled_delete_retry.delay_increment": + if err := dec.Decode(&s.ThrottledDeleteRetryDelayIncrement); err != nil { + return fmt.Errorf("%s | %w", "ThrottledDeleteRetryDelayIncrement", err) + } + + case "throttled_delete_retry.maximum_delay": + if err := dec.Decode(&s.ThrottledDeleteRetryMaximumDelay); err != nil { + return fmt.Errorf("%s | %w", "ThrottledDeleteRetryMaximumDelay", err) + } + + case "throttled_delete_retry.maximum_number_of_retries": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ThrottledDeleteRetryMaximumNumberOfRetries", err) + } + s.ThrottledDeleteRetryMaximumNumberOfRetries = &value + case float64: + f := int(v) + s.ThrottledDeleteRetryMaximumNumberOfRetries = &f + } + + } + } + return nil +} + +// NewS3RepositorySettings returns a S3RepositorySettings. +func NewS3RepositorySettings() *S3RepositorySettings { + r := &S3RepositorySettings{} + + return r +} + +type S3RepositorySettingsVariant interface { + S3RepositorySettingsCaster() *S3RepositorySettings +} + +func (s *S3RepositorySettings) S3RepositorySettingsCaster() *S3RepositorySettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/samplediversity.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/samplediversity.go new file mode 100644 index 000000000..7e2a9ab9c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/samplediversity.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SampleDiversity type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/graph/_types/ExploreControls.ts#L51-L54 +type SampleDiversity struct { + Field string `json:"field"` + MaxDocsPerValue int `json:"max_docs_per_value"` +} + +func (s *SampleDiversity) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "max_docs_per_value": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxDocsPerValue", err) + } + s.MaxDocsPerValue = value + case float64: + f := int(v) + s.MaxDocsPerValue = f + } + + } + } + return nil +} + +// NewSampleDiversity returns a SampleDiversity. +func NewSampleDiversity() *SampleDiversity { + r := &SampleDiversity{} + + return r +} + +type SampleDiversityVariant interface { + SampleDiversityCaster() *SampleDiversity +} + +func (s *SampleDiversity) SampleDiversityCaster() *SampleDiversity { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sampleraggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sampleraggregate.go new file mode 100644 index 000000000..7b704d696 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sampleraggregate.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// SamplerAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L558-L559 +type SamplerAggregate struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *SamplerAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s SamplerAggregate) MarshalJSON() ([]byte, error) { + type opt SamplerAggregate + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewSamplerAggregate returns a SamplerAggregate. +func NewSamplerAggregate() *SamplerAggregate { + r := &SamplerAggregate{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sampleraggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sampleraggregation.go new file mode 100644 index 000000000..bab6683b3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sampleraggregation.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SamplerAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L771-L780 +type SamplerAggregation struct { + // ShardSize Limits how many top-scoring documents are collected in the sample processed + // on each shard. + ShardSize *int `json:"shard_size,omitempty"` +} + +func (s *SamplerAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "shard_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSize", err) + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + } + } + return nil +} + +// NewSamplerAggregation returns a SamplerAggregation. +func NewSamplerAggregation() *SamplerAggregation { + r := &SamplerAggregation{} + + return r +} + +type SamplerAggregationVariant interface { + SamplerAggregationCaster() *SamplerAggregation +} + +func (s *SamplerAggregation) SamplerAggregationCaster() *SamplerAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scalarvalue.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scalarvalue.go new file mode 100644 index 000000000..43e7f5b56 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scalarvalue.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ScalarValue holds the union for the following types: +// +// int64 +// Float64 +// string +// bool +// nil +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L33-L37 +type ScalarValue any + +type ScalarValueVariant interface { + ScalarValueCaster() *ScalarValue +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scaledfloatnumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scaledfloatnumberproperty.go new file mode 100644 index 000000000..da88653fc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scaledfloatnumberproperty.go @@ -0,0 +1,1017 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype" +) + +// ScaledFloatNumberProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L197-L201 +type ScaledFloatNumberProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *Float64 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + ScalingFactor *Float64 `json:"scaling_factor,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + // TimeSeriesMetric For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + f := Float64(value) + s.NullValue = &f + case float64: + f := Float64(v) + s.NullValue = &f + } + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "scaling_factor": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ScalingFactor", err) + } + f := Float64(value) + s.ScalingFactor = &f + case float64: + f := Float64(v) + s.ScalingFactor = &f + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "time_series_metric": + if err := dec.Decode(&s.TimeSeriesMetric); err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ScaledFloatNumberProperty) MarshalJSON() ([]byte, error) { + type innerScaledFloatNumberProperty ScaledFloatNumberProperty + tmp := innerScaledFloatNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + ScalingFactor: s.ScalingFactor, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "scaled_float" + + return json.Marshal(tmp) +} + +// NewScaledFloatNumberProperty returns a ScaledFloatNumberProperty. +func NewScaledFloatNumberProperty() *ScaledFloatNumberProperty { + r := &ScaledFloatNumberProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type ScaledFloatNumberPropertyVariant interface { + ScaledFloatNumberPropertyCaster() *ScaledFloatNumberProperty +} + +func (s *ScaledFloatNumberProperty) ScaledFloatNumberPropertyCaster() *ScaledFloatNumberProperty { + return s +} + +func (s *ScaledFloatNumberProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scandinavianfoldingtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scandinavianfoldingtokenfilter.go new file mode 100644 index 000000000..5e2766dc8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scandinavianfoldingtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ScandinavianFoldingTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L531-L533 +type ScandinavianFoldingTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ScandinavianFoldingTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ScandinavianFoldingTokenFilter) MarshalJSON() ([]byte, error) { + type innerScandinavianFoldingTokenFilter ScandinavianFoldingTokenFilter + tmp := innerScandinavianFoldingTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "scandinavian_folding" + + return json.Marshal(tmp) +} + +// NewScandinavianFoldingTokenFilter returns a ScandinavianFoldingTokenFilter. +func NewScandinavianFoldingTokenFilter() *ScandinavianFoldingTokenFilter { + r := &ScandinavianFoldingTokenFilter{} + + return r +} + +type ScandinavianFoldingTokenFilterVariant interface { + ScandinavianFoldingTokenFilterCaster() *ScandinavianFoldingTokenFilter +} + +func (s *ScandinavianFoldingTokenFilter) ScandinavianFoldingTokenFilterCaster() *ScandinavianFoldingTokenFilter { + return s +} + +func (s *ScandinavianFoldingTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scandinaviannormalizationtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scandinaviannormalizationtokenfilter.go new file mode 100644 index 000000000..99677bf44 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scandinaviannormalizationtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ScandinavianNormalizationTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L535-L537 +type ScandinavianNormalizationTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ScandinavianNormalizationTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ScandinavianNormalizationTokenFilter) MarshalJSON() ([]byte, error) { + type innerScandinavianNormalizationTokenFilter ScandinavianNormalizationTokenFilter + tmp := innerScandinavianNormalizationTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "scandinavian_normalization" + + return json.Marshal(tmp) +} + +// NewScandinavianNormalizationTokenFilter returns a ScandinavianNormalizationTokenFilter. +func NewScandinavianNormalizationTokenFilter() *ScandinavianNormalizationTokenFilter { + r := &ScandinavianNormalizationTokenFilter{} + + return r +} + +type ScandinavianNormalizationTokenFilterVariant interface { + ScandinavianNormalizationTokenFilterCaster() *ScandinavianNormalizationTokenFilter +} + +func (s *ScandinavianNormalizationTokenFilter) ScandinavianNormalizationTokenFilterCaster() *ScandinavianNormalizationTokenFilter { + return s +} + +func (s *ScandinavianNormalizationTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/schedulecontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/schedulecontainer.go new file mode 100644 index 000000000..e0b9e5f56 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/schedulecontainer.go @@ -0,0 +1,204 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ScheduleContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Schedule.ts#L80-L92 +type ScheduleContainer struct { + AdditionalScheduleContainerProperty map[string]json.RawMessage `json:"-"` + Cron *string `json:"cron,omitempty"` + Daily *DailySchedule `json:"daily,omitempty"` + Hourly *HourlySchedule `json:"hourly,omitempty"` + Interval Duration `json:"interval,omitempty"` + Monthly []TimeOfMonth `json:"monthly,omitempty"` + Timezone *string `json:"timezone,omitempty"` + Weekly []TimeOfWeek `json:"weekly,omitempty"` + Yearly []TimeOfYear `json:"yearly,omitempty"` +} + +func (s *ScheduleContainer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cron": + if err := dec.Decode(&s.Cron); err != nil { + return fmt.Errorf("%s | %w", "Cron", err) + } + + case "daily": + if err := dec.Decode(&s.Daily); err != nil { + return fmt.Errorf("%s | %w", "Daily", err) + } + + case "hourly": + if err := dec.Decode(&s.Hourly); err != nil { + return fmt.Errorf("%s | %w", "Hourly", err) + } + + case "interval": + if err := dec.Decode(&s.Interval); err != nil { + return fmt.Errorf("%s | %w", "Interval", err) + } + + case "monthly": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewTimeOfMonth() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Monthly", err) + } + + s.Monthly = append(s.Monthly, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Monthly); err != nil { + return fmt.Errorf("%s | %w", "Monthly", err) + } + } + + case "timezone": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Timezone", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Timezone = &o + + case "weekly": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewTimeOfWeek() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Weekly", err) + } + + s.Weekly = append(s.Weekly, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Weekly); err != nil { + return fmt.Errorf("%s | %w", "Weekly", err) + } + } + + case "yearly": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewTimeOfYear() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Yearly", err) + } + + s.Yearly = append(s.Yearly, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Yearly); err != nil { + return fmt.Errorf("%s | %w", "Yearly", err) + } + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalScheduleContainerProperty == nil { + s.AdditionalScheduleContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalScheduleContainerProperty", err) + } + s.AdditionalScheduleContainerProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s ScheduleContainer) MarshalJSON() ([]byte, error) { + type opt ScheduleContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalScheduleContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalScheduleContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewScheduleContainer returns a ScheduleContainer. +func NewScheduleContainer() *ScheduleContainer { + r := &ScheduleContainer{ + AdditionalScheduleContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type ScheduleContainerVariant interface { + ScheduleContainerCaster() *ScheduleContainer +} + +func (s *ScheduleContainer) ScheduleContainerCaster() *ScheduleContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scheduletimeofday.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scheduletimeofday.go new file mode 100644 index 000000000..481414f7b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scheduletimeofday.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ScheduleTimeOfDay holds the union for the following types: +// +// string +// HourAndMinute +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Schedule.ts#L99-L104 +type ScheduleTimeOfDay any + +type ScheduleTimeOfDayVariant interface { + ScheduleTimeOfDayCaster() *ScheduleTimeOfDay +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scheduletriggerevent.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scheduletriggerevent.go new file mode 100644 index 000000000..b9aea39f2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scheduletriggerevent.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ScheduleTriggerEvent type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Schedule.ts#L94-L97 +type ScheduleTriggerEvent struct { + ScheduledTime DateTime `json:"scheduled_time"` + TriggeredTime DateTime `json:"triggered_time,omitempty"` +} + +func (s *ScheduleTriggerEvent) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "scheduled_time": + if err := dec.Decode(&s.ScheduledTime); err != nil { + return fmt.Errorf("%s | %w", "ScheduledTime", err) + } + + case "triggered_time": + if err := dec.Decode(&s.TriggeredTime); err != nil { + return fmt.Errorf("%s | %w", "TriggeredTime", err) + } + + } + } + return nil +} + +// NewScheduleTriggerEvent returns a ScheduleTriggerEvent. +func NewScheduleTriggerEvent() *ScheduleTriggerEvent { + r := &ScheduleTriggerEvent{} + + return r +} + +type ScheduleTriggerEventVariant interface { + ScheduleTriggerEventCaster() *ScheduleTriggerEvent +} + +func (s *ScheduleTriggerEvent) ScheduleTriggerEventCaster() *ScheduleTriggerEvent { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/schedulingconfiguration.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/schedulingconfiguration.go new file mode 100644 index 000000000..f5b0541cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/schedulingconfiguration.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SchedulingConfiguration type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L246-L250 +type SchedulingConfiguration struct { + AccessControl *ConnectorScheduling `json:"access_control,omitempty"` + Full *ConnectorScheduling `json:"full,omitempty"` + Incremental *ConnectorScheduling `json:"incremental,omitempty"` +} + +// NewSchedulingConfiguration returns a SchedulingConfiguration. +func NewSchedulingConfiguration() *SchedulingConfiguration { + r := &SchedulingConfiguration{} + + return r +} + +type SchedulingConfigurationVariant interface { + SchedulingConfigurationCaster() *SchedulingConfiguration +} + +func (s *SchedulingConfiguration) SchedulingConfigurationCaster() *SchedulingConfiguration { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scoresort.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scoresort.go new file mode 100644 index 000000000..e3e0634ef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scoresort.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" +) + +// ScoreSort type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/sort.ts#L54-L56 +type ScoreSort struct { + Order *sortorder.SortOrder `json:"order,omitempty"` +} + +// NewScoreSort returns a ScoreSort. +func NewScoreSort() *ScoreSort { + r := &ScoreSort{} + + return r +} + +type ScoreSortVariant interface { + ScoreSortCaster() *ScoreSort +} + +func (s *ScoreSort) ScoreSortCaster() *ScoreSort { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/script.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/script.go new file mode 100644 index 000000000..fc0bdd80b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/script.go @@ -0,0 +1,152 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptlanguage" +) + +// Script type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Scripting.ts#L65-L89 +type Script struct { + // Id The `id` for a stored script. + Id *string `json:"id,omitempty"` + // Lang Specifies the language the script is written in. + Lang *scriptlanguage.ScriptLanguage `json:"lang,omitempty"` + Options map[string]string `json:"options,omitempty"` + // Params Specifies any named parameters that are passed into the script as variables. + // Use parameters instead of hard-coded values to decrease compile time. + Params map[string]json.RawMessage `json:"params,omitempty"` + // Source The script source. + Source ScriptSource `json:"source,omitempty"` +} + +func (s *Script) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Source) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "lang": + if err := dec.Decode(&s.Lang); err != nil { + return fmt.Errorf("%s | %w", "Lang", err) + } + + case "options": + if s.Options == nil { + s.Options = make(map[string]string, 0) + } + if err := dec.Decode(&s.Options); err != nil { + return fmt.Errorf("%s | %w", "Options", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source", err) + } + + switch t { + + case "aggregations", "collapse", "docvalue_fields", "explain", "ext", "fields", "from", "highlight", "indices_boost", "knn", "min_score", "pit", "post_filter", "profile", "query", "rank", "rescore", "retriever", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "_source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version": + o := NewSearchRequestBody() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + s.Source = o + break source_field + + } + } + if s.Source == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + } + + } + } + return nil +} + +// NewScript returns a Script. +func NewScript() *Script { + r := &Script{ + Options: make(map[string]string), + Params: make(map[string]json.RawMessage), + } + + return r +} + +type ScriptVariant interface { + ScriptCaster() *Script +} + +func (s *Script) ScriptCaster() *Script { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptcache.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptcache.go new file mode 100644 index 000000000..3d1b13ec4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptcache.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ScriptCache type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L1109-L1123 +type ScriptCache struct { + // CacheEvictions Total number of times the script cache has evicted old data. + CacheEvictions *int64 `json:"cache_evictions,omitempty"` + // CompilationLimitTriggered Total number of times the script compilation circuit breaker has limited + // inline script compilations. + CompilationLimitTriggered *int64 `json:"compilation_limit_triggered,omitempty"` + // Compilations Total number of inline script compilations performed by the node. + Compilations *int64 `json:"compilations,omitempty"` + Context *string `json:"context,omitempty"` +} + +func (s *ScriptCache) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_evictions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CacheEvictions", err) + } + s.CacheEvictions = &value + case float64: + f := int64(v) + s.CacheEvictions = &f + } + + case "compilation_limit_triggered": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CompilationLimitTriggered", err) + } + s.CompilationLimitTriggered = &value + case float64: + f := int64(v) + s.CompilationLimitTriggered = &f + } + + case "compilations": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Compilations", err) + } + s.Compilations = &value + case float64: + f := int64(v) + s.Compilations = &f + } + + case "context": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Context", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Context = &o + + } + } + return nil +} + +// NewScriptCache returns a ScriptCache. +func NewScriptCache() *ScriptCache { + r := &ScriptCache{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptcondition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptcondition.go new file mode 100644 index 000000000..d6e69f6ec --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptcondition.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptlanguage" +) + +// ScriptCondition type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Conditions.ts#L80-L88 +type ScriptCondition struct { + Id *string `json:"id,omitempty"` + Lang *scriptlanguage.ScriptLanguage `json:"lang,omitempty"` + Params map[string]json.RawMessage `json:"params,omitempty"` + Source ScriptSource `json:"source,omitempty"` +} + +func (s *ScriptCondition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "lang": + if err := dec.Decode(&s.Lang); err != nil { + return fmt.Errorf("%s | %w", "Lang", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source", err) + } + + switch t { + + case "aggregations", "collapse", "docvalue_fields", "explain", "ext", "fields", "from", "highlight", "indices_boost", "knn", "min_score", "pit", "post_filter", "profile", "query", "rank", "rescore", "retriever", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "_source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version": + o := NewSearchRequestBody() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + s.Source = o + break source_field + + } + } + if s.Source == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + } + + } + } + return nil +} + +// NewScriptCondition returns a ScriptCondition. +func NewScriptCondition() *ScriptCondition { + r := &ScriptCondition{ + Params: make(map[string]json.RawMessage), + } + + return r +} + +type ScriptConditionVariant interface { + ScriptConditionCaster() *ScriptCondition +} + +func (s *ScriptCondition) ScriptConditionCaster() *ScriptCondition { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptedheuristic.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptedheuristic.go new file mode 100644 index 000000000..80f9edd05 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptedheuristic.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ScriptedHeuristic type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L813-L815 +type ScriptedHeuristic struct { + Script Script `json:"script"` +} + +// NewScriptedHeuristic returns a ScriptedHeuristic. +func NewScriptedHeuristic() *ScriptedHeuristic { + r := &ScriptedHeuristic{} + + return r +} + +type ScriptedHeuristicVariant interface { + ScriptedHeuristicCaster() *ScriptedHeuristic +} + +func (s *ScriptedHeuristic) ScriptedHeuristicCaster() *ScriptedHeuristic { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptedmetricaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptedmetricaggregate.go new file mode 100644 index 000000000..a3d0c6aef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptedmetricaggregate.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ScriptedMetricAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L741-L747 +type ScriptedMetricAggregate struct { + Meta Metadata `json:"meta,omitempty"` + Value json.RawMessage `json:"value,omitempty"` +} + +func (s *ScriptedMetricAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + } + } + return nil +} + +// NewScriptedMetricAggregate returns a ScriptedMetricAggregate. +func NewScriptedMetricAggregate() *ScriptedMetricAggregate { + r := &ScriptedMetricAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptedmetricaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptedmetricaggregation.go new file mode 100644 index 000000000..a0964795e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptedmetricaggregation.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ScriptedMetricAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L286-L312 +type ScriptedMetricAggregation struct { + // CombineScript Runs once on each shard after document collection is complete. + // Allows the aggregation to consolidate the state returned from each shard. + CombineScript *Script `json:"combine_script,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // InitScript Runs prior to any collection of documents. + // Allows the aggregation to set up any initial state. + InitScript *Script `json:"init_script,omitempty"` + // MapScript Run once per document collected. + // If no `combine_script` is specified, the resulting state needs to be stored + // in the `state` object. + MapScript *Script `json:"map_script,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + // Params A global object with script parameters for `init`, `map` and `combine` + // scripts. + // It is shared between the scripts. + Params map[string]json.RawMessage `json:"params,omitempty"` + // ReduceScript Runs once on the coordinating node after all shards have returned their + // results. + // The script is provided with access to a variable `states`, which is an array + // of the result of the `combine_script` on each shard. + ReduceScript *Script `json:"reduce_script,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *ScriptedMetricAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "combine_script": + if err := dec.Decode(&s.CombineScript); err != nil { + return fmt.Errorf("%s | %w", "CombineScript", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "init_script": + if err := dec.Decode(&s.InitScript); err != nil { + return fmt.Errorf("%s | %w", "InitScript", err) + } + + case "map_script": + if err := dec.Decode(&s.MapScript); err != nil { + return fmt.Errorf("%s | %w", "MapScript", err) + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "reduce_script": + if err := dec.Decode(&s.ReduceScript); err != nil { + return fmt.Errorf("%s | %w", "ReduceScript", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewScriptedMetricAggregation returns a ScriptedMetricAggregation. +func NewScriptedMetricAggregation() *ScriptedMetricAggregation { + r := &ScriptedMetricAggregation{ + Params: make(map[string]json.RawMessage), + } + + return r +} + +type ScriptedMetricAggregationVariant interface { + ScriptedMetricAggregationCaster() *ScriptedMetricAggregation +} + +func (s *ScriptedMetricAggregation) ScriptedMetricAggregationCaster() *ScriptedMetricAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptfield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptfield.go new file mode 100644 index 000000000..b34fb08ac --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptfield.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ScriptField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Scripting.ts#L91-L94 +type ScriptField struct { + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + Script Script `json:"script"` +} + +func (s *ScriptField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewScriptField returns a ScriptField. +func NewScriptField() *ScriptField { + r := &ScriptField{} + + return r +} + +type ScriptFieldVariant interface { + ScriptFieldCaster() *ScriptField +} + +func (s *ScriptField) ScriptFieldCaster() *ScriptField { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scripting.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scripting.go new file mode 100644 index 000000000..72976804a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scripting.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Scripting type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L1055-L1073 +type Scripting struct { + // CacheEvictions Total number of times the script cache has evicted old data. + CacheEvictions *int64 `json:"cache_evictions,omitempty"` + // CompilationLimitTriggered Total number of times the script compilation circuit breaker has limited + // inline script compilations. + CompilationLimitTriggered *int64 `json:"compilation_limit_triggered,omitempty"` + // Compilations Total number of inline script compilations performed by the node. + Compilations *int64 `json:"compilations,omitempty"` + // CompilationsHistory Contains this recent history of script compilations. + CompilationsHistory map[string]int64 `json:"compilations_history,omitempty"` + Contexts []NodesContext `json:"contexts,omitempty"` +} + +func (s *Scripting) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_evictions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CacheEvictions", err) + } + s.CacheEvictions = &value + case float64: + f := int64(v) + s.CacheEvictions = &f + } + + case "compilation_limit_triggered": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CompilationLimitTriggered", err) + } + s.CompilationLimitTriggered = &value + case float64: + f := int64(v) + s.CompilationLimitTriggered = &f + } + + case "compilations": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Compilations", err) + } + s.Compilations = &value + case float64: + f := int64(v) + s.Compilations = &f + } + + case "compilations_history": + if s.CompilationsHistory == nil { + s.CompilationsHistory = make(map[string]int64, 0) + } + if err := dec.Decode(&s.CompilationsHistory); err != nil { + return fmt.Errorf("%s | %w", "CompilationsHistory", err) + } + + case "contexts": + if err := dec.Decode(&s.Contexts); err != nil { + return fmt.Errorf("%s | %w", "Contexts", err) + } + + } + } + return nil +} + +// NewScripting returns a Scripting. +func NewScripting() *Scripting { + r := &Scripting{ + CompilationsHistory: make(map[string]int64), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptprocessor.go new file mode 100644 index 000000000..b1b40e996 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptprocessor.go @@ -0,0 +1,199 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptlanguage" +) + +// ScriptProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1423-L1443 +type ScriptProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Id ID of a stored script. + // If no `source` is specified, this parameter is required. + Id *string `json:"id,omitempty"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // Lang Script language. + Lang *scriptlanguage.ScriptLanguage `json:"lang,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Params Object containing parameters for the script. + Params map[string]json.RawMessage `json:"params,omitempty"` + // Source Inline script. + // If no `id` is specified, this parameter is required. + Source ScriptSource `json:"source,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *ScriptProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "lang": + if err := dec.Decode(&s.Lang); err != nil { + return fmt.Errorf("%s | %w", "Lang", err) + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source", err) + } + + switch t { + + case "aggregations", "collapse", "docvalue_fields", "explain", "ext", "fields", "from", "highlight", "indices_boost", "knn", "min_score", "pit", "post_filter", "profile", "query", "rank", "rescore", "retriever", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "_source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version": + o := NewSearchRequestBody() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + s.Source = o + break source_field + + } + } + if s.Source == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil +} + +// NewScriptProcessor returns a ScriptProcessor. +func NewScriptProcessor() *ScriptProcessor { + r := &ScriptProcessor{ + Params: make(map[string]json.RawMessage), + } + + return r +} + +type ScriptProcessorVariant interface { + ScriptProcessorCaster() *ScriptProcessor +} + +func (s *ScriptProcessor) ScriptProcessorCaster() *ScriptProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptquery.go new file mode 100644 index 000000000..89419dc12 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptquery.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ScriptQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L337-L346 +type ScriptQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Script Contains a script to run as a query. + // This script must return a boolean value, `true` or `false`. + Script Script `json:"script"` +} + +func (s *ScriptQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewScriptQuery returns a ScriptQuery. +func NewScriptQuery() *ScriptQuery { + r := &ScriptQuery{} + + return r +} + +type ScriptQueryVariant interface { + ScriptQueryCaster() *ScriptQuery +} + +func (s *ScriptQuery) ScriptQueryCaster() *ScriptQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptscorefunction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptscorefunction.go new file mode 100644 index 000000000..889872f36 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptscorefunction.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ScriptScoreFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L137-L142 +type ScriptScoreFunction struct { + // Script A script that computes a score. + Script Script `json:"script"` +} + +// NewScriptScoreFunction returns a ScriptScoreFunction. +func NewScriptScoreFunction() *ScriptScoreFunction { + r := &ScriptScoreFunction{} + + return r +} + +type ScriptScoreFunctionVariant interface { + ScriptScoreFunctionCaster() *ScriptScoreFunction +} + +func (s *ScriptScoreFunction) ScriptScoreFunctionCaster() *ScriptScoreFunction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptscorequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptscorequery.go new file mode 100644 index 000000000..38b0481bf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptscorequery.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ScriptScoreQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L348-L365 +type ScriptScoreQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // MinScore Documents with a score lower than this floating point number are excluded + // from the search results. + MinScore *float32 `json:"min_score,omitempty"` + // Query Query used to return documents. + Query Query `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // Script Script used to compute the score of documents returned by the query. + // Important: final relevance scores from the `script_score` query cannot be + // negative. + Script Script `json:"script"` +} + +func (s *ScriptScoreQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := float32(value) + s.MinScore = &f + case float64: + f := float32(v) + s.MinScore = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewScriptScoreQuery returns a ScriptScoreQuery. +func NewScriptScoreQuery() *ScriptScoreQuery { + r := &ScriptScoreQuery{} + + return r +} + +type ScriptScoreQueryVariant interface { + ScriptScoreQueryCaster() *ScriptScoreQuery +} + +func (s *ScriptScoreQuery) ScriptScoreQueryCaster() *ScriptScoreQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptsort.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptsort.go new file mode 100644 index 000000000..0bf459b21 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptsort.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptsorttype" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortmode" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" +) + +// ScriptSort type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/sort.ts#L72-L78 +type ScriptSort struct { + Mode *sortmode.SortMode `json:"mode,omitempty"` + Nested *NestedSortValue `json:"nested,omitempty"` + Order *sortorder.SortOrder `json:"order,omitempty"` + Script Script `json:"script"` + Type *scriptsorttype.ScriptSortType `json:"type,omitempty"` +} + +// NewScriptSort returns a ScriptSort. +func NewScriptSort() *ScriptSort { + r := &ScriptSort{} + + return r +} + +type ScriptSortVariant interface { + ScriptSortCaster() *ScriptSort +} + +func (s *ScriptSort) ScriptSortCaster() *ScriptSort { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptsource.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptsource.go new file mode 100644 index 000000000..d154777dd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scriptsource.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ScriptSource holds the union for the following types: +// +// string +// SearchRequestBody +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Scripting.ts#L48-L49 +type ScriptSource any + +type ScriptSourceVariant interface { + ScriptSourceCaster() *ScriptSource +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scripttransform.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scripttransform.go new file mode 100644 index 000000000..a05e70340 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scripttransform.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ScriptTransform type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Transform.ts#L37-L45 +type ScriptTransform struct { + Id *string `json:"id,omitempty"` + Lang *string `json:"lang,omitempty"` + Params map[string]json.RawMessage `json:"params,omitempty"` + Source ScriptSource `json:"source,omitempty"` +} + +func (s *ScriptTransform) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "lang": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Lang", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Lang = &o + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source", err) + } + + switch t { + + case "aggregations", "collapse", "docvalue_fields", "explain", "ext", "fields", "from", "highlight", "indices_boost", "knn", "min_score", "pit", "post_filter", "profile", "query", "rank", "rescore", "retriever", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "_source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version": + o := NewSearchRequestBody() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + s.Source = o + break source_field + + } + } + if s.Source == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + } + + } + } + return nil +} + +// NewScriptTransform returns a ScriptTransform. +func NewScriptTransform() *ScriptTransform { + r := &ScriptTransform{ + Params: make(map[string]json.RawMessage), + } + + return r +} + +type ScriptTransformVariant interface { + ScriptTransformCaster() *ScriptTransform +} + +func (s *ScriptTransform) ScriptTransformCaster() *ScriptTransform { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scrollids.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scrollids.go new file mode 100644 index 000000000..4b22cdacd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/scrollids.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ScrollIds type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L50-L50 +type ScrollIds []string + +type ScrollIdsVariant interface { + ScrollIdsCaster() *ScrollIds +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchablesnapshotaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchablesnapshotaction.go new file mode 100644 index 000000000..59a745666 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchablesnapshotaction.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SearchableSnapshotAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/_types/Phase.ts#L128-L131 +type SearchableSnapshotAction struct { + ForceMergeIndex *bool `json:"force_merge_index,omitempty"` + SnapshotRepository string `json:"snapshot_repository"` +} + +func (s *SearchableSnapshotAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "force_merge_index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ForceMergeIndex", err) + } + s.ForceMergeIndex = &value + case bool: + s.ForceMergeIndex = &v + } + + case "snapshot_repository": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SnapshotRepository", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SnapshotRepository = o + + } + } + return nil +} + +// NewSearchableSnapshotAction returns a SearchableSnapshotAction. +func NewSearchableSnapshotAction() *SearchableSnapshotAction { + r := &SearchableSnapshotAction{} + + return r +} + +type SearchableSnapshotActionVariant interface { + SearchableSnapshotActionCaster() *SearchableSnapshotAction +} + +func (s *SearchableSnapshotAction) SearchableSnapshotActionCaster() *SearchableSnapshotAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchablesnapshots.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchablesnapshots.go new file mode 100644 index 000000000..cc72da090 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchablesnapshots.go @@ -0,0 +1,144 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SearchableSnapshots type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L434-L438 +type SearchableSnapshots struct { + Available bool `json:"available"` + Enabled bool `json:"enabled"` + FullCopyIndicesCount *int `json:"full_copy_indices_count,omitempty"` + IndicesCount int `json:"indices_count"` + SharedCacheIndicesCount *int `json:"shared_cache_indices_count,omitempty"` +} + +func (s *SearchableSnapshots) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "full_copy_indices_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FullCopyIndicesCount", err) + } + s.FullCopyIndicesCount = &value + case float64: + f := int(v) + s.FullCopyIndicesCount = &f + } + + case "indices_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IndicesCount", err) + } + s.IndicesCount = value + case float64: + f := int(v) + s.IndicesCount = f + } + + case "shared_cache_indices_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SharedCacheIndicesCount", err) + } + s.SharedCacheIndicesCount = &value + case float64: + f := int(v) + s.SharedCacheIndicesCount = &f + } + + } + } + return nil +} + +// NewSearchableSnapshots returns a SearchableSnapshots. +func NewSearchableSnapshots() *SearchableSnapshots { + r := &SearchableSnapshots{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchaccess.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchaccess.go new file mode 100644 index 000000000..c71abbdcb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchaccess.go @@ -0,0 +1,167 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SearchAccess type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L456-L476 +type SearchAccess struct { + // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that + // cover restricted indices. Implicitly, restricted indices have limited + // privileges that can cause pattern tests to fail. If restricted indices are + // explicitly included in the `names` list, Elasticsearch checks privileges + // against these indices regardless of the value set for + // `allow_restricted_indices`. + AllowRestrictedIndices *bool `json:"allow_restricted_indices,omitempty"` + // FieldSecurity The document fields that the owners of the role have read access to. + FieldSecurity *FieldSecurity `json:"field_security,omitempty"` + // Names A list of indices (or index name patterns) to which the permissions in this + // entry apply. + Names []string `json:"names"` + // Query A search query that defines the documents the owners of the role have access + // to. A document within the specified indices must match this query for it to + // be accessible by the owners of the role. + Query IndicesPrivilegesQuery `json:"query,omitempty"` +} + +func (s *SearchAccess) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowRestrictedIndices", err) + } + s.AllowRestrictedIndices = &value + case bool: + s.AllowRestrictedIndices = &v + } + + case "field_security": + if err := dec.Decode(&s.FieldSecurity); err != nil { + return fmt.Errorf("%s | %w", "FieldSecurity", err) + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + } + + case "query": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + query_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Query", err) + } + + switch t { + + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + o := NewQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = o + break query_field + + case "template": + o := NewRoleTemplateQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = o + break query_field + + } + } + if s.Query == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + } + + } + } + return nil +} + +// NewSearchAccess returns a SearchAccess. +func NewSearchAccess() *SearchAccess { + r := &SearchAccess{} + + return r +} + +type SearchAccessVariant interface { + SearchAccessCaster() *SearchAccess +} + +func (s *SearchAccess) SearchAccessCaster() *SearchAccess { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchapplication.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchapplication.go new file mode 100644 index 000000000..da961eb43 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchapplication.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SearchApplication type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/_types/SearchApplication.ts#L24-L33 +type SearchApplication struct { + // AnalyticsCollectionName Analytics collection associated to the Search Application. + AnalyticsCollectionName *string `json:"analytics_collection_name,omitempty"` + // Indices Indices that are part of the Search Application. + Indices []string `json:"indices"` + // Name Search Application name + Name string `json:"name"` + // Template Search template to use on search operations. + Template *SearchApplicationTemplate `json:"template,omitempty"` + // UpdatedAtMillis Last time the Search Application was updated. + UpdatedAtMillis int64 `json:"updated_at_millis"` +} + +func (s *SearchApplication) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analytics_collection_name": + if err := dec.Decode(&s.AnalyticsCollectionName); err != nil { + return fmt.Errorf("%s | %w", "AnalyticsCollectionName", err) + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return fmt.Errorf("%s | %w", "Template", err) + } + + case "updated_at_millis": + if err := dec.Decode(&s.UpdatedAtMillis); err != nil { + return fmt.Errorf("%s | %w", "UpdatedAtMillis", err) + } + + } + } + return nil +} + +// NewSearchApplication returns a SearchApplication. +func NewSearchApplication() *SearchApplication { + r := &SearchApplication{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchapplicationparameters.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchapplicationparameters.go new file mode 100644 index 000000000..e87dfbf5c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchapplicationparameters.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SearchApplicationParameters type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/_types/SearchApplicationParameters.ts#L23-L36 +type SearchApplicationParameters struct { + // AnalyticsCollectionName Analytics collection associated to the Search Application. + AnalyticsCollectionName *string `json:"analytics_collection_name,omitempty"` + // Indices Indices that are part of the Search Application. + Indices []string `json:"indices"` + // Template Search template to use on search operations. + Template *SearchApplicationTemplate `json:"template,omitempty"` +} + +func (s *SearchApplicationParameters) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analytics_collection_name": + if err := dec.Decode(&s.AnalyticsCollectionName); err != nil { + return fmt.Errorf("%s | %w", "AnalyticsCollectionName", err) + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return fmt.Errorf("%s | %w", "Template", err) + } + + } + } + return nil +} + +// NewSearchApplicationParameters returns a SearchApplicationParameters. +func NewSearchApplicationParameters() *SearchApplicationParameters { + r := &SearchApplicationParameters{} + + return r +} + +type SearchApplicationParametersVariant interface { + SearchApplicationParametersCaster() *SearchApplicationParameters +} + +func (s *SearchApplicationParameters) SearchApplicationParametersCaster() *SearchApplicationParameters { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchapplicationtemplate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchapplicationtemplate.go new file mode 100644 index 000000000..fc8e7aa08 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchapplicationtemplate.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SearchApplicationTemplate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/search_application/_types/SearchApplicationTemplate.ts#L22-L27 +type SearchApplicationTemplate struct { + // Script The associated mustache template. + Script Script `json:"script"` +} + +// NewSearchApplicationTemplate returns a SearchApplicationTemplate. +func NewSearchApplicationTemplate() *SearchApplicationTemplate { + r := &SearchApplicationTemplate{} + + return r +} + +type SearchApplicationTemplateVariant interface { + SearchApplicationTemplateCaster() *SearchApplicationTemplate +} + +func (s *SearchApplicationTemplate) SearchApplicationTemplateCaster() *SearchApplicationTemplate { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchasyoutypeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchasyoutypeproperty.go new file mode 100644 index 000000000..8021457ed --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchasyoutypeproperty.go @@ -0,0 +1,978 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termvectoroption" +) + +// SearchAsYouTypeProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L263-L274 +type SearchAsYouTypeProperty struct { + Analyzer *string `json:"analyzer,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + Index *bool `json:"index,omitempty"` + IndexOptions *indexoptions.IndexOptions `json:"index_options,omitempty"` + MaxShingleSize *int `json:"max_shingle_size,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Norms *bool `json:"norms,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SearchAnalyzer *string `json:"search_analyzer,omitempty"` + SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` + Similarity *string `json:"similarity,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "index_options": + if err := dec.Decode(&s.IndexOptions); err != nil { + return fmt.Errorf("%s | %w", "IndexOptions", err) + } + + case "max_shingle_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxShingleSize", err) + } + s.MaxShingleSize = &value + case float64: + f := int(v) + s.MaxShingleSize = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "norms": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Norms", err) + } + s.Norms = &value + case bool: + s.Norms = &v + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "search_analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchAnalyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchAnalyzer = &o + + case "search_quote_analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchQuoteAnalyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQuoteAnalyzer = &o + + case "similarity": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Similarity", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "term_vector": + if err := dec.Decode(&s.TermVector); err != nil { + return fmt.Errorf("%s | %w", "TermVector", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SearchAsYouTypeProperty) MarshalJSON() ([]byte, error) { + type innerSearchAsYouTypeProperty SearchAsYouTypeProperty + tmp := innerSearchAsYouTypeProperty{ + Analyzer: s.Analyzer, + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + IndexOptions: s.IndexOptions, + MaxShingleSize: s.MaxShingleSize, + Meta: s.Meta, + Norms: s.Norms, + Properties: s.Properties, + SearchAnalyzer: s.SearchAnalyzer, + SearchQuoteAnalyzer: s.SearchQuoteAnalyzer, + Similarity: s.Similarity, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TermVector: s.TermVector, + Type: s.Type, + } + + tmp.Type = "search_as_you_type" + + return json.Marshal(tmp) +} + +// NewSearchAsYouTypeProperty returns a SearchAsYouTypeProperty. +func NewSearchAsYouTypeProperty() *SearchAsYouTypeProperty { + r := &SearchAsYouTypeProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type SearchAsYouTypePropertyVariant interface { + SearchAsYouTypePropertyCaster() *SearchAsYouTypeProperty +} + +func (s *SearchAsYouTypeProperty) SearchAsYouTypePropertyCaster() *SearchAsYouTypeProperty { + return s +} + +func (s *SearchAsYouTypeProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchidle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchidle.go new file mode 100644 index 000000000..4bccb5138 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchidle.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SearchIdle type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L255-L258 +type SearchIdle struct { + After Duration `json:"after,omitempty"` +} + +func (s *SearchIdle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "after": + if err := dec.Decode(&s.After); err != nil { + return fmt.Errorf("%s | %w", "After", err) + } + + } + } + return nil +} + +// NewSearchIdle returns a SearchIdle. +func NewSearchIdle() *SearchIdle { + r := &SearchIdle{} + + return r +} + +type SearchIdleVariant interface { + SearchIdleCaster() *SearchIdle +} + +func (s *SearchIdle) SearchIdleCaster() *SearchIdle { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchinput.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchinput.go new file mode 100644 index 000000000..b915fdc47 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchinput.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SearchInput type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L109-L113 +type SearchInput struct { + Extract []string `json:"extract,omitempty"` + Request SearchInputRequestDefinition `json:"request"` + Timeout Duration `json:"timeout,omitempty"` +} + +func (s *SearchInput) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "extract": + if err := dec.Decode(&s.Extract); err != nil { + return fmt.Errorf("%s | %w", "Extract", err) + } + + case "request": + if err := dec.Decode(&s.Request); err != nil { + return fmt.Errorf("%s | %w", "Request", err) + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + + } + } + return nil +} + +// NewSearchInput returns a SearchInput. +func NewSearchInput() *SearchInput { + r := &SearchInput{} + + return r +} + +type SearchInputVariant interface { + SearchInputCaster() *SearchInput +} + +func (s *SearchInput) SearchInputCaster() *SearchInput { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchinputrequestbody.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchinputrequestbody.go new file mode 100644 index 000000000..3ecbaa707 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchinputrequestbody.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SearchInputRequestBody type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L144-L146 +type SearchInputRequestBody struct { + Query Query `json:"query"` +} + +// NewSearchInputRequestBody returns a SearchInputRequestBody. +func NewSearchInputRequestBody() *SearchInputRequestBody { + r := &SearchInputRequestBody{} + + return r +} + +type SearchInputRequestBodyVariant interface { + SearchInputRequestBodyCaster() *SearchInputRequestBody +} + +func (s *SearchInputRequestBody) SearchInputRequestBodyCaster() *SearchInputRequestBody { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchinputrequestdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchinputrequestdefinition.go new file mode 100644 index 000000000..50c7f674f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchinputrequestdefinition.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype" +) + +// SearchInputRequestDefinition type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L115-L122 +type SearchInputRequestDefinition struct { + Body *SearchInputRequestBody `json:"body,omitempty"` + Indices []string `json:"indices,omitempty"` + IndicesOptions *IndicesOptions `json:"indices_options,omitempty"` + RestTotalHitsAsInt *bool `json:"rest_total_hits_as_int,omitempty"` + SearchType *searchtype.SearchType `json:"search_type,omitempty"` + Template *SearchTemplateRequestBody `json:"template,omitempty"` +} + +func (s *SearchInputRequestDefinition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "body": + if err := dec.Decode(&s.Body); err != nil { + return fmt.Errorf("%s | %w", "Body", err) + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "indices_options": + if err := dec.Decode(&s.IndicesOptions); err != nil { + return fmt.Errorf("%s | %w", "IndicesOptions", err) + } + + case "rest_total_hits_as_int": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RestTotalHitsAsInt", err) + } + s.RestTotalHitsAsInt = &value + case bool: + s.RestTotalHitsAsInt = &v + } + + case "search_type": + if err := dec.Decode(&s.SearchType); err != nil { + return fmt.Errorf("%s | %w", "SearchType", err) + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return fmt.Errorf("%s | %w", "Template", err) + } + + } + } + return nil +} + +// NewSearchInputRequestDefinition returns a SearchInputRequestDefinition. +func NewSearchInputRequestDefinition() *SearchInputRequestDefinition { + r := &SearchInputRequestDefinition{} + + return r +} + +type SearchInputRequestDefinitionVariant interface { + SearchInputRequestDefinitionCaster() *SearchInputRequestDefinition +} + +func (s *SearchInputRequestDefinition) SearchInputRequestDefinitionCaster() *SearchInputRequestDefinition { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchprofile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchprofile.go new file mode 100644 index 000000000..73ec77a8a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchprofile.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SearchProfile type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L136-L140 +type SearchProfile struct { + Collector []Collector `json:"collector"` + Query []QueryProfile `json:"query"` + RewriteTime int64 `json:"rewrite_time"` +} + +func (s *SearchProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collector": + if err := dec.Decode(&s.Collector); err != nil { + return fmt.Errorf("%s | %w", "Collector", err) + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "rewrite_time": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RewriteTime", err) + } + s.RewriteTime = value + case float64: + f := int64(v) + s.RewriteTime = f + } + + } + } + return nil +} + +// NewSearchProfile returns a SearchProfile. +func NewSearchProfile() *SearchProfile { + r := &SearchProfile{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchrequestbody.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchrequestbody.go new file mode 100644 index 000000000..ae3ca293b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchrequestbody.go @@ -0,0 +1,563 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SearchRequestBody type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/SearchRequestBody.ts#L42-L242 +type SearchRequestBody struct { + // Aggregations Defines the aggregations that are run as part of the search request. + Aggregations map[string]Aggregations `json:"aggregations,omitempty"` + // Collapse Collapses search results the values of the specified field. + Collapse *FieldCollapse `json:"collapse,omitempty"` + // DocvalueFields An array of wildcard (`*`) field patterns. + // The request returns doc values for field names matching these patterns in the + // `hits.fields` property of the response. + DocvalueFields []FieldAndFormat `json:"docvalue_fields,omitempty"` + // Explain If `true`, the request returns detailed information about score computation + // as part of a hit. + Explain *bool `json:"explain,omitempty"` + // Ext Configuration of search extensions defined by Elasticsearch plugins. + Ext map[string]json.RawMessage `json:"ext,omitempty"` + // Fields An array of wildcard (`*`) field patterns. + // The request returns values for field names matching these patterns in the + // `hits.fields` property of the response. + Fields []FieldAndFormat `json:"fields,omitempty"` + // From The starting document offset, which must be non-negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. + // To page through more hits, use the `search_after` parameter. + From *int `json:"from,omitempty"` + // Highlight Specifies the highlighter to use for retrieving highlighted snippets from one + // or more fields in your search results. + Highlight *Highlight `json:"highlight,omitempty"` + // IndicesBoost Boost the `_score` of documents from specified indices. + // The boost value is the factor by which scores are multiplied. + // A boost value greater than `1.0` increases the score. + // A boost value between `0` and `1.0` decreases the score. + IndicesBoost []map[string]Float64 `json:"indices_boost,omitempty"` + // Knn The approximate kNN search to run. + Knn []KnnSearch `json:"knn,omitempty"` + // MinScore The minimum `_score` for matching documents. + // Documents with a lower `_score` are not included in search results or results + // collected by aggregations. + MinScore *Float64 `json:"min_score,omitempty"` + // Pit Limit the search to a point in time (PIT). + // If you provide a PIT, you cannot specify an `` in the request path. + Pit *PointInTimeReference `json:"pit,omitempty"` + // PostFilter Use the `post_filter` parameter to filter search results. + // The search hits are filtered after the aggregations are calculated. + // A post filter has no impact on the aggregation results. + PostFilter *Query `json:"post_filter,omitempty"` + // Profile Set to `true` to return detailed timing information about the execution of + // individual components in a search request. + // NOTE: This is a debugging tool and adds significant overhead to search + // execution. + Profile *bool `json:"profile,omitempty"` + // Query The search definition using the Query DSL. + Query *Query `json:"query,omitempty"` + // Rank The Reciprocal Rank Fusion (RRF) to use. + Rank *RankContainer `json:"rank,omitempty"` + // Rescore Can be used to improve precision by reordering just the top (for example 100 + // - 500) documents returned by the `query` and `post_filter` phases. + Rescore []Rescore `json:"rescore,omitempty"` + // Retriever A retriever is a specification to describe top documents returned from a + // search. + // A retriever replaces other elements of the search API that also return top + // documents such as `query` and `knn`. + Retriever *RetrieverContainer `json:"retriever,omitempty"` + // RuntimeMappings One or more runtime fields in the search request. + // These fields take precedence over mapped fields with the same name. + RuntimeMappings RuntimeFields `json:"runtime_mappings,omitempty"` + // ScriptFields Retrieve a script evaluation (based on different fields) for each hit. + ScriptFields map[string]ScriptField `json:"script_fields,omitempty"` + // SearchAfter Used to retrieve the next page of hits using a set of sort values from the + // previous page. + SearchAfter []FieldValue `json:"search_after,omitempty"` + // SeqNoPrimaryTerm If `true`, the request returns sequence number and primary term of the last + // modification of each hit. + SeqNoPrimaryTerm *bool `json:"seq_no_primary_term,omitempty"` + // Size The number of hits to return, which must not be negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. + // To page through more hits, use the `search_after` property. + Size *int `json:"size,omitempty"` + // Slice Split a scrolled search into multiple slices that can be consumed + // independently. + Slice *SlicedScroll `json:"slice,omitempty"` + // Sort A comma-separated list of : pairs. + Sort []SortCombinations `json:"sort,omitempty"` + // Source_ The source fields that are returned for matching documents. + // These fields are returned in the `hits._source` property of the search + // response. + // If the `stored_fields` property is specified, the `_source` property defaults + // to `false`. + // Otherwise, it defaults to `true`. + Source_ SourceConfig `json:"_source,omitempty"` + // Stats The stats groups to associate with the search. + // Each group maintains a statistics aggregation for its associated searches. + // You can retrieve these stats using the indices stats API. + Stats []string `json:"stats,omitempty"` + // StoredFields A comma-separated list of stored fields to return as part of a hit. + // If no fields are specified, no stored fields are included in the response. + // If this field is specified, the `_source` property defaults to `false`. + // You can pass `_source: true` to return both source fields and stored fields + // in the search response. + StoredFields []string `json:"stored_fields,omitempty"` + // Suggest Defines a suggester that provides similar looking terms based on a provided + // text. + Suggest *Suggester `json:"suggest,omitempty"` + // TerminateAfter The maximum number of documents to collect for each shard. + // If a query reaches this limit, Elasticsearch terminates the query early. + // Elasticsearch collects documents before sorting. + // + // IMPORTANT: Use with caution. + // Elasticsearch applies this property to each shard handling the request. + // When possible, let Elasticsearch perform early termination automatically. + // Avoid specifying this property for requests that target data streams with + // backing indices across multiple data tiers. + // + // If set to `0` (default), the query does not terminate early. + TerminateAfter *int64 `json:"terminate_after,omitempty"` + // Timeout The period of time to wait for a response from each shard. + // If no response is received before the timeout expires, the request fails and + // returns an error. + // Defaults to no timeout. + Timeout *string `json:"timeout,omitempty"` + // TrackScores If `true`, calculate and return document scores, even if the scores are not + // used for sorting. + TrackScores *bool `json:"track_scores,omitempty"` + // TrackTotalHits Number of hits matching the query to count accurately. + // If `true`, the exact number of hits is returned at the cost of some + // performance. + // If `false`, the response does not include the total number of hits matching + // the query. + TrackTotalHits TrackHits `json:"track_total_hits,omitempty"` + // Version If `true`, the request returns the document version as part of a hit. + Version *bool `json:"version,omitempty"` +} + +func (s *SearchRequestBody) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "collapse": + if err := dec.Decode(&s.Collapse); err != nil { + return fmt.Errorf("%s | %w", "Collapse", err) + } + + case "docvalue_fields": + if err := dec.Decode(&s.DocvalueFields); err != nil { + return fmt.Errorf("%s | %w", "DocvalueFields", err) + } + + case "explain": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Explain", err) + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "ext": + if s.Ext == nil { + s.Ext = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Ext); err != nil { + return fmt.Errorf("%s | %w", "Ext", err) + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "from": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "highlight": + if err := dec.Decode(&s.Highlight); err != nil { + return fmt.Errorf("%s | %w", "Highlight", err) + } + + case "indices_boost": + if err := dec.Decode(&s.IndicesBoost); err != nil { + return fmt.Errorf("%s | %w", "IndicesBoost", err) + } + + case "knn": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewKnnSearch() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Knn", err) + } + + s.Knn = append(s.Knn, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Knn); err != nil { + return fmt.Errorf("%s | %w", "Knn", err) + } + } + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := Float64(value) + s.MinScore = &f + case float64: + f := Float64(v) + s.MinScore = &f + } + + case "pit": + if err := dec.Decode(&s.Pit); err != nil { + return fmt.Errorf("%s | %w", "Pit", err) + } + + case "post_filter": + if err := dec.Decode(&s.PostFilter); err != nil { + return fmt.Errorf("%s | %w", "PostFilter", err) + } + + case "profile": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + s.Profile = &value + case bool: + s.Profile = &v + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "rank": + if err := dec.Decode(&s.Rank); err != nil { + return fmt.Errorf("%s | %w", "Rank", err) + } + + case "rescore": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewRescore() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Rescore", err) + } + + s.Rescore = append(s.Rescore, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Rescore); err != nil { + return fmt.Errorf("%s | %w", "Rescore", err) + } + } + + case "retriever": + if err := dec.Decode(&s.Retriever); err != nil { + return fmt.Errorf("%s | %w", "Retriever", err) + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return fmt.Errorf("%s | %w", "ScriptFields", err) + } + + case "search_after": + if err := dec.Decode(&s.SearchAfter); err != nil { + return fmt.Errorf("%s | %w", "SearchAfter", err) + } + + case "seq_no_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SeqNoPrimaryTerm", err) + } + s.SeqNoPrimaryTerm = &value + case bool: + s.SeqNoPrimaryTerm = &v + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "slice": + if err := dec.Decode(&s.Slice); err != nil { + return fmt.Errorf("%s | %w", "Slice", err) + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + case "_source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source__field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source_", err) + } + + switch t { + + case "exclude_vectors", "excludes", "includes": + o := NewSourceFilter() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + s.Source_ = o + break source__field + + } + } + if s.Source_ == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + } + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return fmt.Errorf("%s | %w", "Stats", err) + } + + case "stored_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + + s.StoredFields = append(s.StoredFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.StoredFields); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + } + + case "suggest": + if err := dec.Decode(&s.Suggest); err != nil { + return fmt.Errorf("%s | %w", "Suggest", err) + } + + case "terminate_after": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TerminateAfter", err) + } + s.TerminateAfter = &value + case float64: + f := int64(v) + s.TerminateAfter = &f + } + + case "timeout": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Timeout = &o + + case "track_scores": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TrackScores", err) + } + s.TrackScores = &value + case bool: + s.TrackScores = &v + } + + case "track_total_hits": + if err := dec.Decode(&s.TrackTotalHits); err != nil { + return fmt.Errorf("%s | %w", "TrackTotalHits", err) + } + + case "version": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + s.Version = &value + case bool: + s.Version = &v + } + + } + } + return nil +} + +// NewSearchRequestBody returns a SearchRequestBody. +func NewSearchRequestBody() *SearchRequestBody { + r := &SearchRequestBody{ + Aggregations: make(map[string]Aggregations), + Ext: make(map[string]json.RawMessage), + ScriptFields: make(map[string]ScriptField), + } + + return r +} + +type SearchRequestBodyVariant interface { + SearchRequestBodyCaster() *SearchRequestBody +} + +func (s *SearchRequestBody) SearchRequestBodyCaster() *SearchRequestBody { + return s +} + +func (s *SearchRequestBody) MsearchRequestItemCaster() *MsearchRequestItem { + o := MsearchRequestItem(s) + return &o +} + +func (s *SearchRequestBody) ScriptSourceCaster() *ScriptSource { + o := ScriptSource(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchshardsnodeattributes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchshardsnodeattributes.go new file mode 100644 index 000000000..612313ad4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchshardsnodeattributes.go @@ -0,0 +1,157 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noderole" +) + +// SearchShardsNodeAttributes type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search_shards/SearchShardsResponse.ts#L42-L60 +type SearchShardsNodeAttributes struct { + // Attributes Lists node attributes. + Attributes map[string]string `json:"attributes"` + // EphemeralId The ephemeral ID of the node. + EphemeralId string `json:"ephemeral_id"` + ExternalId string `json:"external_id"` + MaxIndexVersion int `json:"max_index_version"` + MinIndexVersion int `json:"min_index_version"` + // Name The human-readable identifier of the node. + Name string `json:"name"` + Roles []noderole.NodeRole `json:"roles"` + // TransportAddress The host and port where transport HTTP connections are accepted. + TransportAddress string `json:"transport_address"` + Version string `json:"version"` +} + +func (s *SearchShardsNodeAttributes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "ephemeral_id": + if err := dec.Decode(&s.EphemeralId); err != nil { + return fmt.Errorf("%s | %w", "EphemeralId", err) + } + + case "external_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ExternalId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ExternalId = o + + case "max_index_version": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxIndexVersion", err) + } + s.MaxIndexVersion = value + case float64: + f := int(v) + s.MaxIndexVersion = f + } + + case "min_index_version": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinIndexVersion", err) + } + s.MinIndexVersion = value + case float64: + f := int(v) + s.MinIndexVersion = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewSearchShardsNodeAttributes returns a SearchShardsNodeAttributes. +func NewSearchShardsNodeAttributes() *SearchShardsNodeAttributes { + r := &SearchShardsNodeAttributes{ + Attributes: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchstats.go new file mode 100644 index 000000000..37742f61b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchstats.go @@ -0,0 +1,283 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SearchStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L279-L299 +type SearchStats struct { + FetchCurrent int64 `json:"fetch_current"` + FetchTime Duration `json:"fetch_time,omitempty"` + FetchTimeInMillis int64 `json:"fetch_time_in_millis"` + FetchTotal int64 `json:"fetch_total"` + Groups map[string]SearchStats `json:"groups,omitempty"` + OpenContexts *int64 `json:"open_contexts,omitempty"` + QueryCurrent int64 `json:"query_current"` + QueryTime Duration `json:"query_time,omitempty"` + QueryTimeInMillis int64 `json:"query_time_in_millis"` + QueryTotal int64 `json:"query_total"` + RecentSearchLoad *Float64 `json:"recent_search_load,omitempty"` + ScrollCurrent int64 `json:"scroll_current"` + ScrollTime Duration `json:"scroll_time,omitempty"` + ScrollTimeInMillis int64 `json:"scroll_time_in_millis"` + ScrollTotal int64 `json:"scroll_total"` + SuggestCurrent int64 `json:"suggest_current"` + SuggestTime Duration `json:"suggest_time,omitempty"` + SuggestTimeInMillis int64 `json:"suggest_time_in_millis"` + SuggestTotal int64 `json:"suggest_total"` +} + +func (s *SearchStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fetch_current": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FetchCurrent", err) + } + s.FetchCurrent = value + case float64: + f := int64(v) + s.FetchCurrent = f + } + + case "fetch_time": + if err := dec.Decode(&s.FetchTime); err != nil { + return fmt.Errorf("%s | %w", "FetchTime", err) + } + + case "fetch_time_in_millis": + if err := dec.Decode(&s.FetchTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "FetchTimeInMillis", err) + } + + case "fetch_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FetchTotal", err) + } + s.FetchTotal = value + case float64: + f := int64(v) + s.FetchTotal = f + } + + case "groups": + if s.Groups == nil { + s.Groups = make(map[string]SearchStats, 0) + } + if err := dec.Decode(&s.Groups); err != nil { + return fmt.Errorf("%s | %w", "Groups", err) + } + + case "open_contexts": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "OpenContexts", err) + } + s.OpenContexts = &value + case float64: + f := int64(v) + s.OpenContexts = &f + } + + case "query_current": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "QueryCurrent", err) + } + s.QueryCurrent = value + case float64: + f := int64(v) + s.QueryCurrent = f + } + + case "query_time": + if err := dec.Decode(&s.QueryTime); err != nil { + return fmt.Errorf("%s | %w", "QueryTime", err) + } + + case "query_time_in_millis": + if err := dec.Decode(&s.QueryTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "QueryTimeInMillis", err) + } + + case "query_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "QueryTotal", err) + } + s.QueryTotal = value + case float64: + f := int64(v) + s.QueryTotal = f + } + + case "recent_search_load": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RecentSearchLoad", err) + } + f := Float64(value) + s.RecentSearchLoad = &f + case float64: + f := Float64(v) + s.RecentSearchLoad = &f + } + + case "scroll_current": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ScrollCurrent", err) + } + s.ScrollCurrent = value + case float64: + f := int64(v) + s.ScrollCurrent = f + } + + case "scroll_time": + if err := dec.Decode(&s.ScrollTime); err != nil { + return fmt.Errorf("%s | %w", "ScrollTime", err) + } + + case "scroll_time_in_millis": + if err := dec.Decode(&s.ScrollTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "ScrollTimeInMillis", err) + } + + case "scroll_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ScrollTotal", err) + } + s.ScrollTotal = value + case float64: + f := int64(v) + s.ScrollTotal = f + } + + case "suggest_current": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SuggestCurrent", err) + } + s.SuggestCurrent = value + case float64: + f := int64(v) + s.SuggestCurrent = f + } + + case "suggest_time": + if err := dec.Decode(&s.SuggestTime); err != nil { + return fmt.Errorf("%s | %w", "SuggestTime", err) + } + + case "suggest_time_in_millis": + if err := dec.Decode(&s.SuggestTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "SuggestTimeInMillis", err) + } + + case "suggest_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SuggestTotal", err) + } + s.SuggestTotal = value + case float64: + f := int64(v) + s.SuggestTotal = f + } + + } + } + return nil +} + +// NewSearchStats returns a SearchStats. +func NewSearchStats() *SearchStats { + r := &SearchStats{ + Groups: make(map[string]SearchStats), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchtemplaterequestbody.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchtemplaterequestbody.go new file mode 100644 index 000000000..032fb8ea1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchtemplaterequestbody.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SearchTemplateRequestBody type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L125-L142 +type SearchTemplateRequestBody struct { + Explain *bool `json:"explain,omitempty"` + // Id ID of the search template to use. If no source is specified, + // this parameter is required. + Id *string `json:"id,omitempty"` + Params map[string]json.RawMessage `json:"params,omitempty"` + Profile *bool `json:"profile,omitempty"` + // Source An inline search template. Supports the same parameters as the search API's + // request body. Also supports Mustache variables. If no id is specified, this + // parameter is required. + Source *string `json:"source,omitempty"` +} + +func (s *SearchTemplateRequestBody) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "explain": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Explain", err) + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "profile": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + s.Profile = &value + case bool: + s.Profile = &v + } + + case "source": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = &o + + } + } + return nil +} + +// NewSearchTemplateRequestBody returns a SearchTemplateRequestBody. +func NewSearchTemplateRequestBody() *SearchTemplateRequestBody { + r := &SearchTemplateRequestBody{ + Params: make(map[string]json.RawMessage), + } + + return r +} + +type SearchTemplateRequestBodyVariant interface { + SearchTemplateRequestBodyCaster() *SearchTemplateRequestBody +} + +func (s *SearchTemplateRequestBody) SearchTemplateRequestBodyCaster() *SearchTemplateRequestBody { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchtransform.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchtransform.go new file mode 100644 index 000000000..da3475a9a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchtransform.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SearchTransform type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Transform.ts#L47-L50 +type SearchTransform struct { + Request SearchInputRequestDefinition `json:"request"` + Timeout Duration `json:"timeout"` +} + +func (s *SearchTransform) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "request": + if err := dec.Decode(&s.Request); err != nil { + return fmt.Errorf("%s | %w", "Request", err) + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return fmt.Errorf("%s | %w", "Timeout", err) + } + + } + } + return nil +} + +// NewSearchTransform returns a SearchTransform. +func NewSearchTransform() *SearchTransform { + r := &SearchTransform{} + + return r +} + +type SearchTransformVariant interface { + SearchTransformCaster() *SearchTransform +} + +func (s *SearchTransform) SearchTransformCaster() *SearchTransform { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchusagestats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchusagestats.go new file mode 100644 index 000000000..797d0f045 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/searchusagestats.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SearchUsageStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L149-L155 +type SearchUsageStats struct { + Queries map[string]int64 `json:"queries"` + Rescorers map[string]int64 `json:"rescorers"` + Retrievers map[string]int64 `json:"retrievers"` + Sections map[string]int64 `json:"sections"` + Total int64 `json:"total"` +} + +func (s *SearchUsageStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "queries": + if s.Queries == nil { + s.Queries = make(map[string]int64, 0) + } + if err := dec.Decode(&s.Queries); err != nil { + return fmt.Errorf("%s | %w", "Queries", err) + } + + case "rescorers": + if s.Rescorers == nil { + s.Rescorers = make(map[string]int64, 0) + } + if err := dec.Decode(&s.Rescorers); err != nil { + return fmt.Errorf("%s | %w", "Rescorers", err) + } + + case "retrievers": + if s.Retrievers == nil { + s.Retrievers = make(map[string]int64, 0) + } + if err := dec.Decode(&s.Retrievers); err != nil { + return fmt.Errorf("%s | %w", "Retrievers", err) + } + + case "sections": + if s.Sections == nil { + s.Sections = make(map[string]int64, 0) + } + if err := dec.Decode(&s.Sections); err != nil { + return fmt.Errorf("%s | %w", "Sections", err) + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + +// NewSearchUsageStats returns a SearchUsageStats. +func NewSearchUsageStats() *SearchUsageStats { + r := &SearchUsageStats{ + Queries: make(map[string]int64), + Rescorers: make(map[string]int64), + Retrievers: make(map[string]int64), + Sections: make(map[string]int64), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/security.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/security.go new file mode 100644 index 000000000..4157c501d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/security.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Security type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L440-L453 +type Security struct { + Anonymous FeatureToggle `json:"anonymous"` + ApiKeyService FeatureToggle `json:"api_key_service"` + Audit Audit `json:"audit"` + Available bool `json:"available"` + Enabled bool `json:"enabled"` + Fips140 FeatureToggle `json:"fips_140"` + Ipfilter IpFilter `json:"ipfilter"` + OperatorPrivileges Base `json:"operator_privileges"` + Realms map[string]XpackRealm `json:"realms"` + RoleMapping map[string]XpackRoleMapping `json:"role_mapping"` + Roles SecurityRoles `json:"roles"` + Ssl Ssl `json:"ssl"` + SystemKey *FeatureToggle `json:"system_key,omitempty"` + TokenService FeatureToggle `json:"token_service"` +} + +func (s *Security) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "anonymous": + if err := dec.Decode(&s.Anonymous); err != nil { + return fmt.Errorf("%s | %w", "Anonymous", err) + } + + case "api_key_service": + if err := dec.Decode(&s.ApiKeyService); err != nil { + return fmt.Errorf("%s | %w", "ApiKeyService", err) + } + + case "audit": + if err := dec.Decode(&s.Audit); err != nil { + return fmt.Errorf("%s | %w", "Audit", err) + } + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "fips_140": + if err := dec.Decode(&s.Fips140); err != nil { + return fmt.Errorf("%s | %w", "Fips140", err) + } + + case "ipfilter": + if err := dec.Decode(&s.Ipfilter); err != nil { + return fmt.Errorf("%s | %w", "Ipfilter", err) + } + + case "operator_privileges": + if err := dec.Decode(&s.OperatorPrivileges); err != nil { + return fmt.Errorf("%s | %w", "OperatorPrivileges", err) + } + + case "realms": + if s.Realms == nil { + s.Realms = make(map[string]XpackRealm, 0) + } + if err := dec.Decode(&s.Realms); err != nil { + return fmt.Errorf("%s | %w", "Realms", err) + } + + case "role_mapping": + if s.RoleMapping == nil { + s.RoleMapping = make(map[string]XpackRoleMapping, 0) + } + if err := dec.Decode(&s.RoleMapping); err != nil { + return fmt.Errorf("%s | %w", "RoleMapping", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "ssl": + if err := dec.Decode(&s.Ssl); err != nil { + return fmt.Errorf("%s | %w", "Ssl", err) + } + + case "system_key": + if err := dec.Decode(&s.SystemKey); err != nil { + return fmt.Errorf("%s | %w", "SystemKey", err) + } + + case "token_service": + if err := dec.Decode(&s.TokenService); err != nil { + return fmt.Errorf("%s | %w", "TokenService", err) + } + + } + } + return nil +} + +// NewSecurity returns a Security. +func NewSecurity() *Security { + r := &Security{ + Realms: make(map[string]XpackRealm), + RoleMapping: make(map[string]XpackRoleMapping), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolemapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolemapping.go new file mode 100644 index 000000000..a3869190a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolemapping.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SecurityRoleMapping type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/RoleMapping.ts#L25-L33 +type SecurityRoleMapping struct { + Enabled bool `json:"enabled"` + Metadata Metadata `json:"metadata"` + RoleTemplates []RoleTemplate `json:"role_templates,omitempty"` + Roles []string `json:"roles,omitempty"` + Rules RoleMappingRule `json:"rules"` +} + +func (s *SecurityRoleMapping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "role_templates": + if err := dec.Decode(&s.RoleTemplates); err != nil { + return fmt.Errorf("%s | %w", "RoleTemplates", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "rules": + if err := dec.Decode(&s.Rules); err != nil { + return fmt.Errorf("%s | %w", "Rules", err) + } + + } + } + return nil +} + +// NewSecurityRoleMapping returns a SecurityRoleMapping. +func NewSecurityRoleMapping() *SecurityRoleMapping { + r := &SecurityRoleMapping{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityroles.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityroles.go new file mode 100644 index 000000000..16a7073e0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityroles.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SecurityRoles type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L306-L310 +type SecurityRoles struct { + Dls SecurityRolesDls `json:"dls"` + File SecurityRolesFile `json:"file"` + Native SecurityRolesNative `json:"native"` +} + +// NewSecurityRoles returns a SecurityRoles. +func NewSecurityRoles() *SecurityRoles { + r := &SecurityRoles{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolesdls.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolesdls.go new file mode 100644 index 000000000..eb78519e1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolesdls.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SecurityRolesDls type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L318-L320 +type SecurityRolesDls struct { + BitSetCache SecurityRolesDlsBitSetCache `json:"bit_set_cache"` +} + +// NewSecurityRolesDls returns a SecurityRolesDls. +func NewSecurityRolesDls() *SecurityRolesDls { + r := &SecurityRolesDls{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolesdlsbitsetcache.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolesdlsbitsetcache.go new file mode 100644 index 000000000..3e4a473ec --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolesdlsbitsetcache.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SecurityRolesDlsBitSetCache type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L322-L326 +type SecurityRolesDlsBitSetCache struct { + Count int `json:"count"` + Memory ByteSize `json:"memory,omitempty"` + MemoryInBytes uint64 `json:"memory_in_bytes"` +} + +func (s *SecurityRolesDlsBitSetCache) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "memory": + if err := dec.Decode(&s.Memory); err != nil { + return fmt.Errorf("%s | %w", "Memory", err) + } + + case "memory_in_bytes": + if err := dec.Decode(&s.MemoryInBytes); err != nil { + return fmt.Errorf("%s | %w", "MemoryInBytes", err) + } + + } + } + return nil +} + +// NewSecurityRolesDlsBitSetCache returns a SecurityRolesDlsBitSetCache. +func NewSecurityRolesDlsBitSetCache() *SecurityRolesDlsBitSetCache { + r := &SecurityRolesDlsBitSetCache{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolesfile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolesfile.go new file mode 100644 index 000000000..bdd22e836 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolesfile.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SecurityRolesFile type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L328-L332 +type SecurityRolesFile struct { + Dls bool `json:"dls"` + Fls bool `json:"fls"` + Size int64 `json:"size"` +} + +func (s *SecurityRolesFile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dls": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Dls", err) + } + s.Dls = value + case bool: + s.Dls = v + } + + case "fls": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Fls", err) + } + s.Fls = value + case bool: + s.Fls = v + } + + case "size": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = value + case float64: + f := int64(v) + s.Size = f + } + + } + } + return nil +} + +// NewSecurityRolesFile returns a SecurityRolesFile. +func NewSecurityRolesFile() *SecurityRolesFile { + r := &SecurityRolesFile{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolesnative.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolesnative.go new file mode 100644 index 000000000..f5d74c3d3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securityrolesnative.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SecurityRolesNative type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L312-L316 +type SecurityRolesNative struct { + Dls bool `json:"dls"` + Fls bool `json:"fls"` + Size int64 `json:"size"` +} + +func (s *SecurityRolesNative) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dls": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Dls", err) + } + s.Dls = value + case bool: + s.Dls = v + } + + case "fls": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Fls", err) + } + s.Fls = value + case bool: + s.Fls = v + } + + case "size": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = value + case float64: + f := int64(v) + s.Size = f + } + + } + } + return nil +} + +// NewSecurityRolesNative returns a SecurityRolesNative. +func NewSecurityRolesNative() *SecurityRolesNative { + r := &SecurityRolesNative{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securitysettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securitysettings.go new file mode 100644 index 000000000..5045a8dd0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/securitysettings.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SecuritySettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/SecuritySettings.ts#L22-L24 +type SecuritySettings struct { + Index *IndexSettings `json:"index,omitempty"` +} + +// NewSecuritySettings returns a SecuritySettings. +func NewSecuritySettings() *SecuritySettings { + r := &SecuritySettings{} + + return r +} + +type SecuritySettingsVariant interface { + SecuritySettingsCaster() *SecuritySettings +} + +func (s *SecuritySettings) SecuritySettingsCaster() *SecuritySettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/segment.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/segment.go new file mode 100644 index 000000000..5c1bf38b9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/segment.go @@ -0,0 +1,191 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Segment type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/segments/types.ts#L28-L38 +type Segment struct { + Attributes map[string]string `json:"attributes"` + Committed bool `json:"committed"` + Compound bool `json:"compound"` + DeletedDocs int64 `json:"deleted_docs"` + Generation int `json:"generation"` + NumDocs int64 `json:"num_docs"` + Search bool `json:"search"` + SizeInBytes Float64 `json:"size_in_bytes"` + Version string `json:"version"` +} + +func (s *Segment) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "committed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Committed", err) + } + s.Committed = value + case bool: + s.Committed = v + } + + case "compound": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Compound", err) + } + s.Compound = value + case bool: + s.Compound = v + } + + case "deleted_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DeletedDocs", err) + } + s.DeletedDocs = value + case float64: + f := int64(v) + s.DeletedDocs = f + } + + case "generation": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Generation", err) + } + s.Generation = value + case float64: + f := int(v) + s.Generation = f + } + + case "num_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumDocs", err) + } + s.NumDocs = value + case float64: + f := int64(v) + s.NumDocs = f + } + + case "search": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Search", err) + } + s.Search = value + case bool: + s.Search = v + } + + case "size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SizeInBytes", err) + } + f := Float64(value) + s.SizeInBytes = f + case float64: + f := Float64(v) + s.SizeInBytes = f + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewSegment returns a Segment. +func NewSegment() *Segment { + r := &Segment{ + Attributes: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/segmentsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/segmentsrecord.go new file mode 100644 index 000000000..4e761a5f6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/segmentsrecord.go @@ -0,0 +1,261 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SegmentsRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/segments/types.ts#L22-L107 +type SegmentsRecord struct { + // Committed If `true`, the segment is synced to disk. + // Segments that are synced can survive a hard reboot. + // If `false`, the data from uncommitted segments is also stored in the + // transaction log so that Elasticsearch is able to replay changes on the next + // start. + Committed *string `json:"committed,omitempty"` + // Compound If `true`, the segment is stored in a compound file. + // This means Lucene merged all files from the segment in a single file to save + // file descriptors. + Compound *string `json:"compound,omitempty"` + // DocsCount The number of documents in the segment. + // This excludes deleted documents and counts any nested documents separately + // from their parents. + // It also excludes documents which were indexed recently and do not yet belong + // to a segment. + DocsCount *string `json:"docs.count,omitempty"` + // DocsDeleted The number of deleted documents in the segment, which might be higher or + // lower than the number of delete operations you have performed. + // This number excludes deletes that were performed recently and do not yet + // belong to a segment. + // Deleted documents are cleaned up by the automatic merge process if it makes + // sense to do so. + // Also, Elasticsearch creates extra deleted documents to internally track the + // recent history of operations on a shard. + DocsDeleted *string `json:"docs.deleted,omitempty"` + // Generation The segment generation number. + // Elasticsearch increments this generation number for each segment written then + // uses this number to derive the segment name. + Generation *string `json:"generation,omitempty"` + // Id The unique identifier of the node where it lives. + Id *string `json:"id,omitempty"` + // Index The index name. + Index *string `json:"index,omitempty"` + // Ip The IP address of the node where it lives. + Ip *string `json:"ip,omitempty"` + // Prirep The shard type: `primary` or `replica`. + Prirep *string `json:"prirep,omitempty"` + // Searchable If `true`, the segment is searchable. + // If `false`, the segment has most likely been written to disk but needs a + // refresh to be searchable. + Searchable *string `json:"searchable,omitempty"` + // Segment The segment name, which is derived from the segment generation and used + // internally to create file names in the directory of the shard. + Segment *string `json:"segment,omitempty"` + // Shard The shard name. + Shard *string `json:"shard,omitempty"` + // Size The segment size in bytes. + Size ByteSize `json:"size,omitempty"` + // SizeMemory The segment memory in bytes. + // A value of `-1` indicates Elasticsearch was unable to compute this number. + SizeMemory ByteSize `json:"size.memory,omitempty"` + // Version The version of Lucene used to write the segment. + Version *string `json:"version,omitempty"` +} + +func (s *SegmentsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "committed", "ic", "isCommitted": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Committed", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Committed = &o + + case "compound", "ico", "isCompound": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Compound", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Compound = &o + + case "docs.count", "dc", "docsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DocsCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocsCount = &o + + case "docs.deleted", "dd", "docsDeleted": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DocsDeleted", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocsDeleted = &o + + case "generation", "g", "gen": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Generation", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Generation = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "index", "i", "idx": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "ip": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "prirep", "p", "pr", "primaryOrReplica": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Prirep", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Prirep = &o + + case "searchable", "is", "isSearchable": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Searchable", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Searchable = &o + + case "segment", "seg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Segment", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Segment = &o + + case "shard", "s", "sh": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Shard", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Shard = &o + + case "size", "si": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + case "size.memory", "sm", "sizeMemory": + if err := dec.Decode(&s.SizeMemory); err != nil { + return fmt.Errorf("%s | %w", "SizeMemory", err) + } + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewSegmentsRecord returns a SegmentsRecord. +func NewSegmentsRecord() *SegmentsRecord { + r := &SegmentsRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/segmentsstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/segmentsstats.go new file mode 100644 index 000000000..0314ec286 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/segmentsstats.go @@ -0,0 +1,374 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SegmentsStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L301-L396 +type SegmentsStats struct { + // Count Total number of segments across all shards assigned to selected nodes. + Count int `json:"count"` + // DocValuesMemory Total amount of memory used for doc values across all shards assigned to + // selected nodes. + DocValuesMemory ByteSize `json:"doc_values_memory,omitempty"` + // DocValuesMemoryInBytes Total amount, in bytes, of memory used for doc values across all shards + // assigned to selected nodes. + DocValuesMemoryInBytes int64 `json:"doc_values_memory_in_bytes"` + // FileSizes This object is not populated by the cluster stats API. + // To get information on segment files, use the node stats API. + FileSizes map[string]ShardFileSizeInfo `json:"file_sizes"` + // FixedBitSet Total amount of memory used by fixed bit sets across all shards assigned to + // selected nodes. + // Fixed bit sets are used for nested object field types and type filters for + // join fields. + FixedBitSet ByteSize `json:"fixed_bit_set,omitempty"` + // FixedBitSetMemoryInBytes Total amount of memory, in bytes, used by fixed bit sets across all shards + // assigned to selected nodes. + FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes"` + // IndexWriterMemory Total amount of memory used by all index writers across all shards assigned + // to selected nodes. + IndexWriterMemory ByteSize `json:"index_writer_memory,omitempty"` + // IndexWriterMemoryInBytes Total amount, in bytes, of memory used by all index writers across all shards + // assigned to selected nodes. + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"` + // MaxUnsafeAutoIdTimestamp Unix timestamp, in milliseconds, of the most recently retried indexing + // request. + MaxUnsafeAutoIdTimestamp int64 `json:"max_unsafe_auto_id_timestamp"` + // Memory Total amount of memory used for segments across all shards assigned to + // selected nodes. + Memory ByteSize `json:"memory,omitempty"` + // MemoryInBytes Total amount, in bytes, of memory used for segments across all shards + // assigned to selected nodes. + MemoryInBytes int64 `json:"memory_in_bytes"` + // NormsMemory Total amount of memory used for normalization factors across all shards + // assigned to selected nodes. + NormsMemory ByteSize `json:"norms_memory,omitempty"` + // NormsMemoryInBytes Total amount, in bytes, of memory used for normalization factors across all + // shards assigned to selected nodes. + NormsMemoryInBytes int64 `json:"norms_memory_in_bytes"` + // PointsMemory Total amount of memory used for points across all shards assigned to selected + // nodes. + PointsMemory ByteSize `json:"points_memory,omitempty"` + // PointsMemoryInBytes Total amount, in bytes, of memory used for points across all shards assigned + // to selected nodes. + PointsMemoryInBytes int64 `json:"points_memory_in_bytes"` + // StoredFieldsMemory Total amount of memory used for stored fields across all shards assigned to + // selected nodes. + StoredFieldsMemory ByteSize `json:"stored_fields_memory,omitempty"` + // StoredFieldsMemoryInBytes Total amount, in bytes, of memory used for stored fields across all shards + // assigned to selected nodes. + StoredFieldsMemoryInBytes int64 `json:"stored_fields_memory_in_bytes"` + // TermVectorsMemory Total amount of memory used for term vectors across all shards assigned to + // selected nodes. + TermVectorsMemory ByteSize `json:"term_vectors_memory,omitempty"` + // TermVectorsMemoryInBytes Total amount, in bytes, of memory used for term vectors across all shards + // assigned to selected nodes. + TermVectorsMemoryInBytes int64 `json:"term_vectors_memory_in_bytes"` + // TermsMemory Total amount of memory used for terms across all shards assigned to selected + // nodes. + TermsMemory ByteSize `json:"terms_memory,omitempty"` + // TermsMemoryInBytes Total amount, in bytes, of memory used for terms across all shards assigned + // to selected nodes. + TermsMemoryInBytes int64 `json:"terms_memory_in_bytes"` + // VersionMapMemory Total amount of memory used by all version maps across all shards assigned to + // selected nodes. + VersionMapMemory ByteSize `json:"version_map_memory,omitempty"` + // VersionMapMemoryInBytes Total amount, in bytes, of memory used by all version maps across all shards + // assigned to selected nodes. + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"` +} + +func (s *SegmentsStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "doc_values_memory": + if err := dec.Decode(&s.DocValuesMemory); err != nil { + return fmt.Errorf("%s | %w", "DocValuesMemory", err) + } + + case "doc_values_memory_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocValuesMemoryInBytes", err) + } + s.DocValuesMemoryInBytes = value + case float64: + f := int64(v) + s.DocValuesMemoryInBytes = f + } + + case "file_sizes": + if s.FileSizes == nil { + s.FileSizes = make(map[string]ShardFileSizeInfo, 0) + } + if err := dec.Decode(&s.FileSizes); err != nil { + return fmt.Errorf("%s | %w", "FileSizes", err) + } + + case "fixed_bit_set": + if err := dec.Decode(&s.FixedBitSet); err != nil { + return fmt.Errorf("%s | %w", "FixedBitSet", err) + } + + case "fixed_bit_set_memory_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FixedBitSetMemoryInBytes", err) + } + s.FixedBitSetMemoryInBytes = value + case float64: + f := int64(v) + s.FixedBitSetMemoryInBytes = f + } + + case "index_writer_memory": + if err := dec.Decode(&s.IndexWriterMemory); err != nil { + return fmt.Errorf("%s | %w", "IndexWriterMemory", err) + } + + case "index_writer_memory_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexWriterMemoryInBytes", err) + } + s.IndexWriterMemoryInBytes = value + case float64: + f := int64(v) + s.IndexWriterMemoryInBytes = f + } + + case "max_unsafe_auto_id_timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxUnsafeAutoIdTimestamp", err) + } + s.MaxUnsafeAutoIdTimestamp = value + case float64: + f := int64(v) + s.MaxUnsafeAutoIdTimestamp = f + } + + case "memory": + if err := dec.Decode(&s.Memory); err != nil { + return fmt.Errorf("%s | %w", "Memory", err) + } + + case "memory_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MemoryInBytes", err) + } + s.MemoryInBytes = value + case float64: + f := int64(v) + s.MemoryInBytes = f + } + + case "norms_memory": + if err := dec.Decode(&s.NormsMemory); err != nil { + return fmt.Errorf("%s | %w", "NormsMemory", err) + } + + case "norms_memory_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NormsMemoryInBytes", err) + } + s.NormsMemoryInBytes = value + case float64: + f := int64(v) + s.NormsMemoryInBytes = f + } + + case "points_memory": + if err := dec.Decode(&s.PointsMemory); err != nil { + return fmt.Errorf("%s | %w", "PointsMemory", err) + } + + case "points_memory_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PointsMemoryInBytes", err) + } + s.PointsMemoryInBytes = value + case float64: + f := int64(v) + s.PointsMemoryInBytes = f + } + + case "stored_fields_memory": + if err := dec.Decode(&s.StoredFieldsMemory); err != nil { + return fmt.Errorf("%s | %w", "StoredFieldsMemory", err) + } + + case "stored_fields_memory_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "StoredFieldsMemoryInBytes", err) + } + s.StoredFieldsMemoryInBytes = value + case float64: + f := int64(v) + s.StoredFieldsMemoryInBytes = f + } + + case "term_vectors_memory": + if err := dec.Decode(&s.TermVectorsMemory); err != nil { + return fmt.Errorf("%s | %w", "TermVectorsMemory", err) + } + + case "term_vectors_memory_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TermVectorsMemoryInBytes", err) + } + s.TermVectorsMemoryInBytes = value + case float64: + f := int64(v) + s.TermVectorsMemoryInBytes = f + } + + case "terms_memory": + if err := dec.Decode(&s.TermsMemory); err != nil { + return fmt.Errorf("%s | %w", "TermsMemory", err) + } + + case "terms_memory_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TermsMemoryInBytes", err) + } + s.TermsMemoryInBytes = value + case float64: + f := int64(v) + s.TermsMemoryInBytes = f + } + + case "version_map_memory": + if err := dec.Decode(&s.VersionMapMemory); err != nil { + return fmt.Errorf("%s | %w", "VersionMapMemory", err) + } + + case "version_map_memory_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "VersionMapMemoryInBytes", err) + } + s.VersionMapMemoryInBytes = value + case float64: + f := int64(v) + s.VersionMapMemoryInBytes = f + } + + } + } + return nil +} + +// NewSegmentsStats returns a SegmentsStats. +func NewSegmentsStats() *SegmentsStats { + r := &SegmentsStats{ + FileSizes: make(map[string]ShardFileSizeInfo), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/selectoption.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/selectoption.go new file mode 100644 index 000000000..2f9089ebf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/selectoption.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SelectOption type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L25-L28 +type SelectOption struct { + Label string `json:"label"` + Value ScalarValue `json:"value"` +} + +func (s *SelectOption) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "label": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Label", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Label = o + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + } + } + return nil +} + +// NewSelectOption returns a SelectOption. +func NewSelectOption() *SelectOption { + r := &SelectOption{} + + return r +} + +type SelectOptionVariant interface { + SelectOptionCaster() *SelectOption +} + +func (s *SelectOption) SelectOptionCaster() *SelectOption { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/semanticquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/semanticquery.go new file mode 100644 index 000000000..bc62b6c6a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/semanticquery.go @@ -0,0 +1,134 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SemanticQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/SemanticQuery.ts#L22-L30 +type SemanticQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Field The field to query, which must be a semantic_text field type + Field string `json:"field"` + // Query The query text + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *SemanticQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = o + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewSemanticQuery returns a SemanticQuery. +func NewSemanticQuery() *SemanticQuery { + r := &SemanticQuery{} + + return r +} + +type SemanticQueryVariant interface { + SemanticQueryCaster() *SemanticQuery +} + +func (s *SemanticQuery) SemanticQueryCaster() *SemanticQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/semantictextproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/semantictextproperty.go new file mode 100644 index 000000000..576c57120 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/semantictextproperty.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SemanticTextProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L238-L261 +type SemanticTextProperty struct { + // ChunkingSettings Settings for chunking text into smaller passages. If specified, these will + // override the + // chunking settings sent in the inference endpoint associated with + // inference_id. If chunking settings are updated, + // they will not be applied to existing documents until they are reindexed. + ChunkingSettings *ChunkingSettings `json:"chunking_settings,omitempty"` + // InferenceId Inference endpoint that will be used to generate embeddings for the field. + // This parameter cannot be updated. Use the Create inference API to create the + // endpoint. + // If `search_inference_id` is specified, the inference endpoint will only be + // used at index time. + InferenceId *string `json:"inference_id,omitempty"` + Meta map[string]string `json:"meta,omitempty"` + // SearchInferenceId Inference endpoint that will be used to generate embeddings at query time. + // You can update this parameter by using the Update mapping API. Use the Create + // inference API to create the endpoint. + // If not specified, the inference endpoint defined by inference_id will be used + // at both index and query time. + SearchInferenceId *string `json:"search_inference_id,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *SemanticTextProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "chunking_settings": + if err := dec.Decode(&s.ChunkingSettings); err != nil { + return fmt.Errorf("%s | %w", "ChunkingSettings", err) + } + + case "inference_id": + if err := dec.Decode(&s.InferenceId); err != nil { + return fmt.Errorf("%s | %w", "InferenceId", err) + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "search_inference_id": + if err := dec.Decode(&s.SearchInferenceId); err != nil { + return fmt.Errorf("%s | %w", "SearchInferenceId", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SemanticTextProperty) MarshalJSON() ([]byte, error) { + type innerSemanticTextProperty SemanticTextProperty + tmp := innerSemanticTextProperty{ + ChunkingSettings: s.ChunkingSettings, + InferenceId: s.InferenceId, + Meta: s.Meta, + SearchInferenceId: s.SearchInferenceId, + Type: s.Type, + } + + tmp.Type = "semantic_text" + + return json.Marshal(tmp) +} + +// NewSemanticTextProperty returns a SemanticTextProperty. +func NewSemanticTextProperty() *SemanticTextProperty { + r := &SemanticTextProperty{ + Meta: make(map[string]string), + } + + return r +} + +type SemanticTextPropertyVariant interface { + SemanticTextPropertyCaster() *SemanticTextProperty +} + +func (s *SemanticTextProperty) SemanticTextPropertyCaster() *SemanticTextProperty { + return s +} + +func (s *SemanticTextProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serbiananalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serbiananalyzer.go new file mode 100644 index 000000000..d9917c892 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serbiananalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SerbianAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L282-L287 +type SerbianAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *SerbianAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SerbianAnalyzer) MarshalJSON() ([]byte, error) { + type innerSerbianAnalyzer SerbianAnalyzer + tmp := innerSerbianAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "serbian" + + return json.Marshal(tmp) +} + +// NewSerbianAnalyzer returns a SerbianAnalyzer. +func NewSerbianAnalyzer() *SerbianAnalyzer { + r := &SerbianAnalyzer{} + + return r +} + +type SerbianAnalyzerVariant interface { + SerbianAnalyzerCaster() *SerbianAnalyzer +} + +func (s *SerbianAnalyzer) SerbianAnalyzerCaster() *SerbianAnalyzer { + return s +} + +func (s *SerbianAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serbiannormalizationtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serbiannormalizationtokenfilter.go new file mode 100644 index 000000000..72676c53d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serbiannormalizationtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SerbianNormalizationTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L539-L541 +type SerbianNormalizationTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *SerbianNormalizationTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SerbianNormalizationTokenFilter) MarshalJSON() ([]byte, error) { + type innerSerbianNormalizationTokenFilter SerbianNormalizationTokenFilter + tmp := innerSerbianNormalizationTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "serbian_normalization" + + return json.Marshal(tmp) +} + +// NewSerbianNormalizationTokenFilter returns a SerbianNormalizationTokenFilter. +func NewSerbianNormalizationTokenFilter() *SerbianNormalizationTokenFilter { + r := &SerbianNormalizationTokenFilter{} + + return r +} + +type SerbianNormalizationTokenFilterVariant interface { + SerbianNormalizationTokenFilterCaster() *SerbianNormalizationTokenFilter +} + +func (s *SerbianNormalizationTokenFilter) SerbianNormalizationTokenFilterCaster() *SerbianNormalizationTokenFilter { + return s +} + +func (s *SerbianNormalizationTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serialdifferencingaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serialdifferencingaggregation.go new file mode 100644 index 000000000..236017a6a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serialdifferencingaggregation.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// SerialDifferencingAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L399-L408 +type SerialDifferencingAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + // Lag The historical bucket to subtract from the current value. + // Must be a positive, non-zero integer. + Lag *int `json:"lag,omitempty"` +} + +func (s *SerialDifferencingAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "lag": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Lag", err) + } + s.Lag = &value + case float64: + f := int(v) + s.Lag = &f + } + + } + } + return nil +} + +// NewSerialDifferencingAggregation returns a SerialDifferencingAggregation. +func NewSerialDifferencingAggregation() *SerialDifferencingAggregation { + r := &SerialDifferencingAggregation{} + + return r +} + +type SerialDifferencingAggregationVariant interface { + SerialDifferencingAggregationCaster() *SerialDifferencingAggregation +} + +func (s *SerialDifferencingAggregation) SerialDifferencingAggregationCaster() *SerialDifferencingAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serializedclusterstate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serializedclusterstate.go new file mode 100644 index 000000000..bc944d159 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serializedclusterstate.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SerializedClusterState type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L234-L240 +type SerializedClusterState struct { + Diffs *SerializedClusterStateDetail `json:"diffs,omitempty"` + // FullStates Number of published cluster states. + FullStates *SerializedClusterStateDetail `json:"full_states,omitempty"` +} + +// NewSerializedClusterState returns a SerializedClusterState. +func NewSerializedClusterState() *SerializedClusterState { + r := &SerializedClusterState{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serializedclusterstatedetail.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serializedclusterstatedetail.go new file mode 100644 index 000000000..9fef258e8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/serializedclusterstatedetail.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SerializedClusterStateDetail type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L242-L248 +type SerializedClusterStateDetail struct { + CompressedSize *string `json:"compressed_size,omitempty"` + CompressedSizeInBytes *int64 `json:"compressed_size_in_bytes,omitempty"` + Count *int64 `json:"count,omitempty"` + UncompressedSize *string `json:"uncompressed_size,omitempty"` + UncompressedSizeInBytes *int64 `json:"uncompressed_size_in_bytes,omitempty"` +} + +func (s *SerializedClusterStateDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compressed_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CompressedSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CompressedSize = &o + + case "compressed_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CompressedSizeInBytes", err) + } + s.CompressedSizeInBytes = &value + case float64: + f := int64(v) + s.CompressedSizeInBytes = &f + } + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "uncompressed_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UncompressedSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UncompressedSize = &o + + case "uncompressed_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "UncompressedSizeInBytes", err) + } + s.UncompressedSizeInBytes = &value + case float64: + f := int64(v) + s.UncompressedSizeInBytes = &f + } + + } + } + return nil +} + +// NewSerializedClusterStateDetail returns a SerializedClusterStateDetail. +func NewSerializedClusterStateDetail() *SerializedClusterStateDetail { + r := &SerializedClusterStateDetail{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/servicetoken.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/servicetoken.go new file mode 100644 index 000000000..e803a7ede --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/servicetoken.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ServiceToken type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/create_service_token/types.ts#L22-L25 +type ServiceToken struct { + Name string `json:"name"` + Value string `json:"value"` +} + +func (s *ServiceToken) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil +} + +// NewServiceToken returns a ServiceToken. +func NewServiceToken() *ServiceToken { + r := &ServiceToken{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/setpriorityaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/setpriorityaction.go new file mode 100644 index 000000000..e5fac5e7b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/setpriorityaction.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SetPriorityAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/_types/Phase.ts#L95-L97 +type SetPriorityAction struct { + Priority *int `json:"priority,omitempty"` +} + +func (s *SetPriorityAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "priority": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + s.Priority = &value + case float64: + f := int(v) + s.Priority = &f + } + + } + } + return nil +} + +// NewSetPriorityAction returns a SetPriorityAction. +func NewSetPriorityAction() *SetPriorityAction { + r := &SetPriorityAction{} + + return r +} + +type SetPriorityActionVariant interface { + SetPriorityActionCaster() *SetPriorityAction +} + +func (s *SetPriorityAction) SetPriorityActionCaster() *SetPriorityAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/setprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/setprocessor.go new file mode 100644 index 000000000..a90964019 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/setprocessor.go @@ -0,0 +1,210 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SetProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1445-L1479 +type SetProcessor struct { + // CopyFrom The origin field which will be copied to `field`, cannot set `value` + // simultaneously. + // Supported data types are `boolean`, `number`, `array`, `object`, `string`, + // `date`, etc. + CopyFrom *string `json:"copy_from,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to insert, upsert, or update. + // Supports template snippets. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreEmptyValue If `true` and `value` is a template snippet that evaluates to `null` or the + // empty string, the processor quietly exits without modifying the document. + IgnoreEmptyValue *bool `json:"ignore_empty_value,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // MediaType The media type for encoding `value`. + // Applies only when value is a template snippet. + // Must be one of `application/json`, `text/plain`, or + // `application/x-www-form-urlencoded`. + MediaType *string `json:"media_type,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Override If `true` processor will update fields with pre-existing non-null-valued + // field. + // When set to `false`, such fields will not be touched. + Override *bool `json:"override,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // Value The value to be set for the field. + // Supports template snippets. + // May specify only one of `value` or `copy_from`. + Value json.RawMessage `json:"value,omitempty"` +} + +func (s *SetProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "copy_from": + if err := dec.Decode(&s.CopyFrom); err != nil { + return fmt.Errorf("%s | %w", "CopyFrom", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_empty_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreEmptyValue", err) + } + s.IgnoreEmptyValue = &value + case bool: + s.IgnoreEmptyValue = &v + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "media_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MediaType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MediaType = &o + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "override": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Override", err) + } + s.Override = &value + case bool: + s.Override = &v + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + } + } + return nil +} + +// NewSetProcessor returns a SetProcessor. +func NewSetProcessor() *SetProcessor { + r := &SetProcessor{} + + return r +} + +type SetProcessorVariant interface { + SetProcessorCaster() *SetProcessor +} + +func (s *SetProcessor) SetProcessorCaster() *SetProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/setsecurityuserprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/setsecurityuserprocessor.go new file mode 100644 index 000000000..77753e131 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/setsecurityuserprocessor.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SetSecurityUserProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1481-L1490 +type SetSecurityUserProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to store the user information into. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Properties Controls what user related properties are added to the field. + Properties []string `json:"properties,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *SetSecurityUserProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "properties": + if err := dec.Decode(&s.Properties); err != nil { + return fmt.Errorf("%s | %w", "Properties", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil +} + +// NewSetSecurityUserProcessor returns a SetSecurityUserProcessor. +func NewSetSecurityUserProcessor() *SetSecurityUserProcessor { + r := &SetSecurityUserProcessor{} + + return r +} + +type SetSecurityUserProcessorVariant interface { + SetSecurityUserProcessorCaster() *SetSecurityUserProcessor +} + +func (s *SetSecurityUserProcessor) SetSecurityUserProcessorCaster() *SetSecurityUserProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settings.go new file mode 100644 index 000000000..2fc9ffb3a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settings.go @@ -0,0 +1,192 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Settings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/_types/Transform.ts#L98-L144 +type Settings struct { + // AlignCheckpoints Specifies whether the transform checkpoint ranges should be optimized for + // performance. Such optimization can align + // checkpoint ranges with the date histogram interval when date histogram is + // specified as a group source in the + // transform config. As a result, less document updates in the destination index + // will be performed thus improving + // overall performance. + AlignCheckpoints *bool `json:"align_checkpoints,omitempty"` + // DatesAsEpochMillis Defines if dates in the ouput should be written as ISO formatted string or as + // millis since epoch. epoch_millis was + // the default for transforms created before version 7.11. For compatible output + // set this value to `true`. + DatesAsEpochMillis *bool `json:"dates_as_epoch_millis,omitempty"` + // DeduceMappings Specifies whether the transform should deduce the destination index mappings + // from the transform configuration. + DeduceMappings *bool `json:"deduce_mappings,omitempty"` + // DocsPerSecond Specifies a limit on the number of input documents per second. This setting + // throttles the transform by adding a + // wait time between search requests. The default value is null, which disables + // throttling. + DocsPerSecond *float32 `json:"docs_per_second,omitempty"` + // MaxPageSearchSize Defines the initial page size to use for the composite aggregation for each + // checkpoint. If circuit breaker + // exceptions occur, the page size is dynamically adjusted to a lower value. The + // minimum value is `10` and the + // maximum is `65,536`. + MaxPageSearchSize *int `json:"max_page_search_size,omitempty"` + // Unattended If `true`, the transform runs in unattended mode. In unattended mode, the + // transform retries indefinitely in case + // of an error which means the transform never fails. Setting the number of + // retries other than infinite fails in + // validation. + Unattended *bool `json:"unattended,omitempty"` +} + +func (s *Settings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "align_checkpoints": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AlignCheckpoints", err) + } + s.AlignCheckpoints = &value + case bool: + s.AlignCheckpoints = &v + } + + case "dates_as_epoch_millis": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DatesAsEpochMillis", err) + } + s.DatesAsEpochMillis = &value + case bool: + s.DatesAsEpochMillis = &v + } + + case "deduce_mappings": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DeduceMappings", err) + } + s.DeduceMappings = &value + case bool: + s.DeduceMappings = &v + } + + case "docs_per_second": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "DocsPerSecond", err) + } + f := float32(value) + s.DocsPerSecond = &f + case float64: + f := float32(v) + s.DocsPerSecond = &f + } + + case "max_page_search_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxPageSearchSize", err) + } + s.MaxPageSearchSize = &value + case float64: + f := int(v) + s.MaxPageSearchSize = &f + } + + case "unattended": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Unattended", err) + } + s.Unattended = &value + case bool: + s.Unattended = &v + } + + } + } + return nil +} + +// NewSettings returns a Settings. +func NewSettings() *Settings { + r := &Settings{} + + return r +} + +type SettingsVariant interface { + SettingsCaster() *Settings +} + +func (s *Settings) SettingsCaster() *Settings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingsanalyze.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingsanalyze.go new file mode 100644 index 000000000..528a245ba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingsanalyze.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SettingsAnalyze type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L245-L248 +type SettingsAnalyze struct { + MaxTokenCount Stringifiedinteger `json:"max_token_count,omitempty"` +} + +func (s *SettingsAnalyze) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_token_count": + if err := dec.Decode(&s.MaxTokenCount); err != nil { + return fmt.Errorf("%s | %w", "MaxTokenCount", err) + } + + } + } + return nil +} + +// NewSettingsAnalyze returns a SettingsAnalyze. +func NewSettingsAnalyze() *SettingsAnalyze { + r := &SettingsAnalyze{} + + return r +} + +type SettingsAnalyzeVariant interface { + SettingsAnalyzeCaster() *SettingsAnalyze +} + +func (s *SettingsAnalyze) SettingsAnalyzeCaster() *SettingsAnalyze { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingshighlight.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingshighlight.go new file mode 100644 index 000000000..b2dcaa0f8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingshighlight.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SettingsHighlight type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L240-L243 +type SettingsHighlight struct { + MaxAnalyzedOffset *int `json:"max_analyzed_offset,omitempty"` +} + +func (s *SettingsHighlight) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_analyzed_offset": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxAnalyzedOffset", err) + } + s.MaxAnalyzedOffset = &value + case float64: + f := int(v) + s.MaxAnalyzedOffset = &f + } + + } + } + return nil +} + +// NewSettingsHighlight returns a SettingsHighlight. +func NewSettingsHighlight() *SettingsHighlight { + r := &SettingsHighlight{} + + return r +} + +type SettingsHighlightVariant interface { + SettingsHighlightCaster() *SettingsHighlight +} + +func (s *SettingsHighlight) SettingsHighlightCaster() *SettingsHighlight { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingsquerystring.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingsquerystring.go new file mode 100644 index 000000000..58a9ab8a1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingsquerystring.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SettingsQueryString type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L260-L262 +type SettingsQueryString struct { + Lenient Stringifiedboolean `json:"lenient"` +} + +func (s *SettingsQueryString) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lenient": + if err := dec.Decode(&s.Lenient); err != nil { + return fmt.Errorf("%s | %w", "Lenient", err) + } + + } + } + return nil +} + +// NewSettingsQueryString returns a SettingsQueryString. +func NewSettingsQueryString() *SettingsQueryString { + r := &SettingsQueryString{} + + return r +} + +type SettingsQueryStringVariant interface { + SettingsQueryStringCaster() *SettingsQueryString +} + +func (s *SettingsQueryString) SettingsQueryStringCaster() *SettingsQueryString { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssearch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssearch.go new file mode 100644 index 000000000..e21480855 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssearch.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SettingsSearch type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L250-L253 +type SettingsSearch struct { + Idle *SearchIdle `json:"idle,omitempty"` + Slowlog *SlowlogSettings `json:"slowlog,omitempty"` +} + +// NewSettingsSearch returns a SettingsSearch. +func NewSettingsSearch() *SettingsSearch { + r := &SettingsSearch{} + + return r +} + +type SettingsSearchVariant interface { + SettingsSearchCaster() *SettingsSearch +} + +func (s *SettingsSearch) SettingsSearchCaster() *SettingsSearch { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilarity.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilarity.go new file mode 100644 index 000000000..efbc205a3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilarity.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SettingsSimilarity holds the union for the following types: +// +// SettingsSimilarityBm25 +// SettingsSimilarityBoolean +// SettingsSimilarityDfi +// SettingsSimilarityDfr +// SettingsSimilarityIb +// SettingsSimilarityLmd +// SettingsSimilarityLmj +// SettingsSimilarityScripted +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L180-L192 +type SettingsSimilarity any + +type SettingsSimilarityVariant interface { + SettingsSimilarityCaster() *SettingsSimilarity +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritybm25.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritybm25.go new file mode 100644 index 000000000..174479ee0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritybm25.go @@ -0,0 +1,146 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SettingsSimilarityBm25 type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L198-L203 +type SettingsSimilarityBm25 struct { + B *Float64 `json:"b,omitempty"` + DiscountOverlaps *bool `json:"discount_overlaps,omitempty"` + K1 *Float64 `json:"k1,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *SettingsSimilarityBm25) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "b": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "B", err) + } + f := Float64(value) + s.B = &f + case float64: + f := Float64(v) + s.B = &f + } + + case "discount_overlaps": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DiscountOverlaps", err) + } + s.DiscountOverlaps = &value + case bool: + s.DiscountOverlaps = &v + } + + case "k1": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "K1", err) + } + f := Float64(value) + s.K1 = &f + case float64: + f := Float64(v) + s.K1 = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityBm25) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityBm25 SettingsSimilarityBm25 + tmp := innerSettingsSimilarityBm25{ + B: s.B, + DiscountOverlaps: s.DiscountOverlaps, + K1: s.K1, + Type: s.Type, + } + + tmp.Type = "BM25" + + return json.Marshal(tmp) +} + +// NewSettingsSimilarityBm25 returns a SettingsSimilarityBm25. +func NewSettingsSimilarityBm25() *SettingsSimilarityBm25 { + r := &SettingsSimilarityBm25{} + + return r +} + +type SettingsSimilarityBm25Variant interface { + SettingsSimilarityBm25Caster() *SettingsSimilarityBm25 +} + +func (s *SettingsSimilarityBm25) SettingsSimilarityBm25Caster() *SettingsSimilarityBm25 { + return s +} + +func (s *SettingsSimilarityBm25) SettingsSimilarityCaster() *SettingsSimilarity { + o := SettingsSimilarity(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilarityboolean.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilarityboolean.go new file mode 100644 index 000000000..9beb173c7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilarityboolean.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// SettingsSimilarityBoolean type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L194-L196 +type SettingsSimilarityBoolean struct { + Type string `json:"type,omitempty"` +} + +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityBoolean) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityBoolean SettingsSimilarityBoolean + tmp := innerSettingsSimilarityBoolean{ + Type: s.Type, + } + + tmp.Type = "boolean" + + return json.Marshal(tmp) +} + +// NewSettingsSimilarityBoolean returns a SettingsSimilarityBoolean. +func NewSettingsSimilarityBoolean() *SettingsSimilarityBoolean { + r := &SettingsSimilarityBoolean{} + + return r +} + +type SettingsSimilarityBooleanVariant interface { + SettingsSimilarityBooleanCaster() *SettingsSimilarityBoolean +} + +func (s *SettingsSimilarityBoolean) SettingsSimilarityBooleanCaster() *SettingsSimilarityBoolean { + return s +} + +func (s *SettingsSimilarityBoolean) SettingsSimilarityCaster() *SettingsSimilarity { + o := SettingsSimilarity(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritydfi.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritydfi.go new file mode 100644 index 000000000..ff4967f00 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritydfi.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfiindependencemeasure" +) + +// SettingsSimilarityDfi type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L205-L208 +type SettingsSimilarityDfi struct { + IndependenceMeasure dfiindependencemeasure.DFIIndependenceMeasure `json:"independence_measure"` + Type string `json:"type,omitempty"` +} + +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityDfi) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityDfi SettingsSimilarityDfi + tmp := innerSettingsSimilarityDfi{ + IndependenceMeasure: s.IndependenceMeasure, + Type: s.Type, + } + + tmp.Type = "DFI" + + return json.Marshal(tmp) +} + +// NewSettingsSimilarityDfi returns a SettingsSimilarityDfi. +func NewSettingsSimilarityDfi() *SettingsSimilarityDfi { + r := &SettingsSimilarityDfi{} + + return r +} + +type SettingsSimilarityDfiVariant interface { + SettingsSimilarityDfiCaster() *SettingsSimilarityDfi +} + +func (s *SettingsSimilarityDfi) SettingsSimilarityDfiCaster() *SettingsSimilarityDfi { + return s +} + +func (s *SettingsSimilarityDfi) SettingsSimilarityCaster() *SettingsSimilarity { + o := SettingsSimilarity(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritydfr.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritydfr.go new file mode 100644 index 000000000..b1f581003 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritydfr.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfraftereffect" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfrbasicmodel" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/normalization" +) + +// SettingsSimilarityDfr type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L210-L215 +type SettingsSimilarityDfr struct { + AfterEffect dfraftereffect.DFRAfterEffect `json:"after_effect"` + BasicModel dfrbasicmodel.DFRBasicModel `json:"basic_model"` + Normalization normalization.Normalization `json:"normalization"` + Type string `json:"type,omitempty"` +} + +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityDfr) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityDfr SettingsSimilarityDfr + tmp := innerSettingsSimilarityDfr{ + AfterEffect: s.AfterEffect, + BasicModel: s.BasicModel, + Normalization: s.Normalization, + Type: s.Type, + } + + tmp.Type = "DFR" + + return json.Marshal(tmp) +} + +// NewSettingsSimilarityDfr returns a SettingsSimilarityDfr. +func NewSettingsSimilarityDfr() *SettingsSimilarityDfr { + r := &SettingsSimilarityDfr{} + + return r +} + +type SettingsSimilarityDfrVariant interface { + SettingsSimilarityDfrCaster() *SettingsSimilarityDfr +} + +func (s *SettingsSimilarityDfr) SettingsSimilarityDfrCaster() *SettingsSimilarityDfr { + return s +} + +func (s *SettingsSimilarityDfr) SettingsSimilarityCaster() *SettingsSimilarity { + o := SettingsSimilarity(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilarityib.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilarityib.go new file mode 100644 index 000000000..6c28d67e3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilarityib.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ibdistribution" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/iblambda" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/normalization" +) + +// SettingsSimilarityIb type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L217-L222 +type SettingsSimilarityIb struct { + Distribution ibdistribution.IBDistribution `json:"distribution"` + Lambda iblambda.IBLambda `json:"lambda"` + Normalization normalization.Normalization `json:"normalization"` + Type string `json:"type,omitempty"` +} + +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityIb) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityIb SettingsSimilarityIb + tmp := innerSettingsSimilarityIb{ + Distribution: s.Distribution, + Lambda: s.Lambda, + Normalization: s.Normalization, + Type: s.Type, + } + + tmp.Type = "IB" + + return json.Marshal(tmp) +} + +// NewSettingsSimilarityIb returns a SettingsSimilarityIb. +func NewSettingsSimilarityIb() *SettingsSimilarityIb { + r := &SettingsSimilarityIb{} + + return r +} + +type SettingsSimilarityIbVariant interface { + SettingsSimilarityIbCaster() *SettingsSimilarityIb +} + +func (s *SettingsSimilarityIb) SettingsSimilarityIbCaster() *SettingsSimilarityIb { + return s +} + +func (s *SettingsSimilarityIb) SettingsSimilarityCaster() *SettingsSimilarity { + o := SettingsSimilarity(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritylmd.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritylmd.go new file mode 100644 index 000000000..62fd8102b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritylmd.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SettingsSimilarityLmd type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L224-L227 +type SettingsSimilarityLmd struct { + Mu *Float64 `json:"mu,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *SettingsSimilarityLmd) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mu": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Mu", err) + } + f := Float64(value) + s.Mu = &f + case float64: + f := Float64(v) + s.Mu = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityLmd) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityLmd SettingsSimilarityLmd + tmp := innerSettingsSimilarityLmd{ + Mu: s.Mu, + Type: s.Type, + } + + tmp.Type = "LMDirichlet" + + return json.Marshal(tmp) +} + +// NewSettingsSimilarityLmd returns a SettingsSimilarityLmd. +func NewSettingsSimilarityLmd() *SettingsSimilarityLmd { + r := &SettingsSimilarityLmd{} + + return r +} + +type SettingsSimilarityLmdVariant interface { + SettingsSimilarityLmdCaster() *SettingsSimilarityLmd +} + +func (s *SettingsSimilarityLmd) SettingsSimilarityLmdCaster() *SettingsSimilarityLmd { + return s +} + +func (s *SettingsSimilarityLmd) SettingsSimilarityCaster() *SettingsSimilarity { + o := SettingsSimilarity(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritylmj.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritylmj.go new file mode 100644 index 000000000..bd39bffde --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilaritylmj.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SettingsSimilarityLmj type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L229-L232 +type SettingsSimilarityLmj struct { + Lambda *Float64 `json:"lambda,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *SettingsSimilarityLmj) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lambda": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Lambda", err) + } + f := Float64(value) + s.Lambda = &f + case float64: + f := Float64(v) + s.Lambda = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityLmj) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityLmj SettingsSimilarityLmj + tmp := innerSettingsSimilarityLmj{ + Lambda: s.Lambda, + Type: s.Type, + } + + tmp.Type = "LMJelinekMercer" + + return json.Marshal(tmp) +} + +// NewSettingsSimilarityLmj returns a SettingsSimilarityLmj. +func NewSettingsSimilarityLmj() *SettingsSimilarityLmj { + r := &SettingsSimilarityLmj{} + + return r +} + +type SettingsSimilarityLmjVariant interface { + SettingsSimilarityLmjCaster() *SettingsSimilarityLmj +} + +func (s *SettingsSimilarityLmj) SettingsSimilarityLmjCaster() *SettingsSimilarityLmj { + return s +} + +func (s *SettingsSimilarityLmj) SettingsSimilarityCaster() *SettingsSimilarity { + o := SettingsSimilarity(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilarityscripted.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilarityscripted.go new file mode 100644 index 000000000..fd0840790 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/settingssimilarityscripted.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" +) + +// SettingsSimilarityScripted type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L234-L238 +type SettingsSimilarityScripted struct { + Script Script `json:"script"` + Type string `json:"type,omitempty"` + WeightScript *Script `json:"weight_script,omitempty"` +} + +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityScripted) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityScripted SettingsSimilarityScripted + tmp := innerSettingsSimilarityScripted{ + Script: s.Script, + Type: s.Type, + WeightScript: s.WeightScript, + } + + tmp.Type = "scripted" + + return json.Marshal(tmp) +} + +// NewSettingsSimilarityScripted returns a SettingsSimilarityScripted. +func NewSettingsSimilarityScripted() *SettingsSimilarityScripted { + r := &SettingsSimilarityScripted{} + + return r +} + +type SettingsSimilarityScriptedVariant interface { + SettingsSimilarityScriptedCaster() *SettingsSimilarityScripted +} + +func (s *SettingsSimilarityScripted) SettingsSimilarityScriptedCaster() *SettingsSimilarityScripted { + return s +} + +func (s *SettingsSimilarityScripted) SettingsSimilarityCaster() *SettingsSimilarity { + o := SettingsSimilarity(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shapefieldquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shapefieldquery.go new file mode 100644 index 000000000..ba67693d0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shapefieldquery.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoshaperelation" +) + +// ShapeFieldQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L383-L396 +type ShapeFieldQuery struct { + // IndexedShape Queries using a pre-indexed shape. + IndexedShape *FieldLookup `json:"indexed_shape,omitempty"` + // Relation Spatial relation between the query shape and the document shape. + Relation *geoshaperelation.GeoShapeRelation `json:"relation,omitempty"` + // Shape Queries using an inline shape definition in GeoJSON or Well Known Text (WKT) + // format. + Shape json.RawMessage `json:"shape,omitempty"` +} + +func (s *ShapeFieldQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indexed_shape": + if err := dec.Decode(&s.IndexedShape); err != nil { + return fmt.Errorf("%s | %w", "IndexedShape", err) + } + + case "relation": + if err := dec.Decode(&s.Relation); err != nil { + return fmt.Errorf("%s | %w", "Relation", err) + } + + case "shape": + if err := dec.Decode(&s.Shape); err != nil { + return fmt.Errorf("%s | %w", "Shape", err) + } + + } + } + return nil +} + +// NewShapeFieldQuery returns a ShapeFieldQuery. +func NewShapeFieldQuery() *ShapeFieldQuery { + r := &ShapeFieldQuery{} + + return r +} + +type ShapeFieldQueryVariant interface { + ShapeFieldQueryCaster() *ShapeFieldQuery +} + +func (s *ShapeFieldQuery) ShapeFieldQueryCaster() *ShapeFieldQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shapeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shapeproperty.go new file mode 100644 index 000000000..cd132c59d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shapeproperty.go @@ -0,0 +1,928 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoorientation" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// ShapeProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/geo.ts#L81-L93 +type ShapeProperty struct { + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + IgnoreZValue *bool `json:"ignore_z_value,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Orientation *geoorientation.GeoOrientation `json:"orientation,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *ShapeProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "ignore_z_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreZValue", err) + } + s.IgnoreZValue = &value + case bool: + s.IgnoreZValue = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "orientation": + if err := dec.Decode(&s.Orientation); err != nil { + return fmt.Errorf("%s | %w", "Orientation", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ShapeProperty) MarshalJSON() ([]byte, error) { + type innerShapeProperty ShapeProperty + tmp := innerShapeProperty{ + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Meta: s.Meta, + Orientation: s.Orientation, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "shape" + + return json.Marshal(tmp) +} + +// NewShapeProperty returns a ShapeProperty. +func NewShapeProperty() *ShapeProperty { + r := &ShapeProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type ShapePropertyVariant interface { + ShapePropertyCaster() *ShapeProperty +} + +func (s *ShapeProperty) ShapePropertyCaster() *ShapeProperty { + return s +} + +func (s *ShapeProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shapequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shapequery.go new file mode 100644 index 000000000..efe2415e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shapequery.go @@ -0,0 +1,168 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShapeQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L367-L381 +type ShapeQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // IgnoreUnmapped When set to `true` the query ignores an unmapped field and will not match any + // documents. + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + ShapeQuery map[string]ShapeFieldQuery `json:"-"` +} + +func (s *ShapeQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "ignore_unmapped": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreUnmapped", err) + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + default: + + if key, ok := t.(string); ok { + if s.ShapeQuery == nil { + s.ShapeQuery = make(map[string]ShapeFieldQuery, 0) + } + raw := NewShapeFieldQuery() + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "ShapeQuery", err) + } + s.ShapeQuery[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s ShapeQuery) MarshalJSON() ([]byte, error) { + type opt ShapeQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.ShapeQuery { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "ShapeQuery") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewShapeQuery returns a ShapeQuery. +func NewShapeQuery() *ShapeQuery { + r := &ShapeQuery{ + ShapeQuery: make(map[string]ShapeFieldQuery), + } + + return r +} + +type ShapeQueryVariant interface { + ShapeQueryCaster() *ShapeQuery +} + +func (s *ShapeQuery) ShapeQueryCaster() *ShapeQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardcommit.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardcommit.go new file mode 100644 index 000000000..3bfe94404 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardcommit.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardCommit type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L112-L117 +type ShardCommit struct { + Generation int `json:"generation"` + Id string `json:"id"` + NumDocs int64 `json:"num_docs"` + UserData map[string]string `json:"user_data"` +} + +func (s *ShardCommit) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "generation": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Generation", err) + } + s.Generation = value + case float64: + f := int(v) + s.Generation = f + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "num_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumDocs", err) + } + s.NumDocs = value + case float64: + f := int64(v) + s.NumDocs = f + } + + case "user_data": + if s.UserData == nil { + s.UserData = make(map[string]string, 0) + } + if err := dec.Decode(&s.UserData); err != nil { + return fmt.Errorf("%s | %w", "UserData", err) + } + + } + } + return nil +} + +// NewShardCommit returns a ShardCommit. +func NewShardCommit() *ShardCommit { + r := &ShardCommit{ + UserData: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardfailure.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardfailure.go new file mode 100644 index 000000000..04800a50f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardfailure.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardFailure type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Errors.ts#L52-L58 +type ShardFailure struct { + Index *string `json:"index,omitempty"` + Node *string `json:"node,omitempty"` + Reason ErrorCause `json:"reason"` + Shard int `json:"shard"` + Status *string `json:"status,omitempty"` +} + +func (s *ShardFailure) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = &o + + case "reason": + if err := dec.Decode(&s.Reason); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + + case "shard": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Shard", err) + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + case "status": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Status = &o + + } + } + return nil +} + +// NewShardFailure returns a ShardFailure. +func NewShardFailure() *ShardFailure { + r := &ShardFailure{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardfilesizeinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardfilesizeinfo.go new file mode 100644 index 000000000..42dd48a21 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardfilesizeinfo.go @@ -0,0 +1,156 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardFileSizeInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L124-L131 +type ShardFileSizeInfo struct { + AverageSizeInBytes *int64 `json:"average_size_in_bytes,omitempty"` + Count *int64 `json:"count,omitempty"` + Description string `json:"description"` + MaxSizeInBytes *int64 `json:"max_size_in_bytes,omitempty"` + MinSizeInBytes *int64 `json:"min_size_in_bytes,omitempty"` + SizeInBytes int64 `json:"size_in_bytes"` +} + +func (s *ShardFileSizeInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "average_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "AverageSizeInBytes", err) + } + s.AverageSizeInBytes = &value + case float64: + f := int64(v) + s.AverageSizeInBytes = &f + } + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "max_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSizeInBytes", err) + } + s.MaxSizeInBytes = &value + case float64: + f := int64(v) + s.MaxSizeInBytes = &f + } + + case "min_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinSizeInBytes", err) + } + s.MinSizeInBytes = &value + case float64: + f := int64(v) + s.MinSizeInBytes = &f + } + + case "size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SizeInBytes", err) + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + } + } + return nil +} + +// NewShardFileSizeInfo returns a ShardFileSizeInfo. +func NewShardFileSizeInfo() *ShardFileSizeInfo { + r := &ShardFileSizeInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardhealthstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardhealthstats.go new file mode 100644 index 000000000..e709d0f7c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardhealthstats.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/healthstatus" +) + +// ShardHealthStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/health/types.ts#L37-L45 +type ShardHealthStats struct { + ActiveShards int `json:"active_shards"` + InitializingShards int `json:"initializing_shards"` + PrimaryActive bool `json:"primary_active"` + RelocatingShards int `json:"relocating_shards"` + Status healthstatus.HealthStatus `json:"status"` + UnassignedPrimaryShards int `json:"unassigned_primary_shards"` + UnassignedShards int `json:"unassigned_shards"` +} + +func (s *ShardHealthStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ActiveShards", err) + } + s.ActiveShards = value + case float64: + f := int(v) + s.ActiveShards = f + } + + case "initializing_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "InitializingShards", err) + } + s.InitializingShards = value + case float64: + f := int(v) + s.InitializingShards = f + } + + case "primary_active": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryActive", err) + } + s.PrimaryActive = value + case bool: + s.PrimaryActive = v + } + + case "relocating_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RelocatingShards", err) + } + s.RelocatingShards = value + case float64: + f := int(v) + s.RelocatingShards = f + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "unassigned_primary_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "UnassignedPrimaryShards", err) + } + s.UnassignedPrimaryShards = value + case float64: + f := int(v) + s.UnassignedPrimaryShards = f + } + + case "unassigned_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "UnassignedShards", err) + } + s.UnassignedShards = value + case float64: + f := int(v) + s.UnassignedShards = f + } + + } + } + return nil +} + +// NewShardHealthStats returns a ShardHealthStats. +func NewShardHealthStats() *ShardHealthStats { + r := &ShardHealthStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardlease.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardlease.go new file mode 100644 index 000000000..55d093705 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardlease.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardLease type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L133-L138 +type ShardLease struct { + Id string `json:"id"` + RetainingSeqNo int64 `json:"retaining_seq_no"` + Source string `json:"source"` + Timestamp int64 `json:"timestamp"` +} + +func (s *ShardLease) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "retaining_seq_no": + if err := dec.Decode(&s.RetainingSeqNo); err != nil { + return fmt.Errorf("%s | %w", "RetainingSeqNo", err) + } + + case "source": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = o + + case "timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + s.Timestamp = value + case float64: + f := int64(v) + s.Timestamp = f + } + + } + } + return nil +} + +// NewShardLease returns a ShardLease. +func NewShardLease() *ShardLease { + r := &ShardLease{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardmigrationstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardmigrationstatus.go new file mode 100644 index 000000000..496ec123d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardmigrationstatus.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shutdownstatus" +) + +// ShardMigrationStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L52-L54 +type ShardMigrationStatus struct { + Status shutdownstatus.ShutdownStatus `json:"status"` +} + +// NewShardMigrationStatus returns a ShardMigrationStatus. +func NewShardMigrationStatus() *ShardMigrationStatus { + r := &ShardMigrationStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardpath.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardpath.go new file mode 100644 index 000000000..9ca91e0d0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardpath.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardPath type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L140-L144 +type ShardPath struct { + DataPath string `json:"data_path"` + IsCustomDataPath bool `json:"is_custom_data_path"` + StatePath string `json:"state_path"` +} + +func (s *ShardPath) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataPath = o + + case "is_custom_data_path": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IsCustomDataPath", err) + } + s.IsCustomDataPath = value + case bool: + s.IsCustomDataPath = v + } + + case "state_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StatePath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StatePath = o + + } + } + return nil +} + +// NewShardPath returns a ShardPath. +func NewShardPath() *ShardPath { + r := &ShardPath{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardprofile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardprofile.go new file mode 100644 index 000000000..bf08cf0a7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardprofile.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardProfile type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/profile.ts#L142-L152 +type ShardProfile struct { + Aggregations []AggregationProfile `json:"aggregations"` + Cluster string `json:"cluster"` + Dfs *DfsProfile `json:"dfs,omitempty"` + Fetch *FetchProfile `json:"fetch,omitempty"` + Id string `json:"id"` + Index string `json:"index"` + NodeId string `json:"node_id"` + Searches []SearchProfile `json:"searches"` + ShardId int `json:"shard_id"` +} + +func (s *ShardProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if err := dec.Decode(&s.Aggregations); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + + case "cluster": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Cluster", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Cluster = o + + case "dfs": + if err := dec.Decode(&s.Dfs); err != nil { + return fmt.Errorf("%s | %w", "Dfs", err) + } + + case "fetch": + if err := dec.Decode(&s.Fetch); err != nil { + return fmt.Errorf("%s | %w", "Fetch", err) + } + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = o + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + + case "searches": + if err := dec.Decode(&s.Searches); err != nil { + return fmt.Errorf("%s | %w", "Searches", err) + } + + case "shard_id": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardId", err) + } + s.ShardId = value + case float64: + f := int(v) + s.ShardId = f + } + + } + } + return nil +} + +// NewShardProfile returns a ShardProfile. +func NewShardProfile() *ShardProfile { + r := &ShardProfile{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardquerycache.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardquerycache.go new file mode 100644 index 000000000..4ebecc79f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardquerycache.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardQueryCache type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L146-L154 +type ShardQueryCache struct { + CacheCount int64 `json:"cache_count"` + CacheSize int64 `json:"cache_size"` + Evictions int64 `json:"evictions"` + HitCount int64 `json:"hit_count"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + MissCount int64 `json:"miss_count"` + TotalCount int64 `json:"total_count"` +} + +func (s *ShardQueryCache) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CacheCount", err) + } + s.CacheCount = value + case float64: + f := int64(v) + s.CacheCount = f + } + + case "cache_size": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CacheSize", err) + } + s.CacheSize = value + case float64: + f := int64(v) + s.CacheSize = f + } + + case "evictions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Evictions", err) + } + s.Evictions = value + case float64: + f := int64(v) + s.Evictions = f + } + + case "hit_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "HitCount", err) + } + s.HitCount = value + case float64: + f := int64(v) + s.HitCount = f + } + + case "memory_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MemorySizeInBytes", err) + } + s.MemorySizeInBytes = value + case float64: + f := int64(v) + s.MemorySizeInBytes = f + } + + case "miss_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MissCount", err) + } + s.MissCount = value + case float64: + f := int64(v) + s.MissCount = f + } + + case "total_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalCount", err) + } + s.TotalCount = value + case float64: + f := int64(v) + s.TotalCount = f + } + + } + } + return nil +} + +// NewShardQueryCache returns a ShardQueryCache. +func NewShardQueryCache() *ShardQueryCache { + r := &ShardQueryCache{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardrecovery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardrecovery.go new file mode 100644 index 000000000..92cef7c47 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardrecovery.go @@ -0,0 +1,192 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardRecovery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/recovery/types.ts#L118-L135 +type ShardRecovery struct { + Id int64 `json:"id"` + Index RecoveryIndexStatus `json:"index"` + Primary bool `json:"primary"` + Source RecoveryOrigin `json:"source"` + Stage string `json:"stage"` + Start *RecoveryStartStatus `json:"start,omitempty"` + StartTime DateTime `json:"start_time,omitempty"` + StartTimeInMillis int64 `json:"start_time_in_millis"` + StopTime DateTime `json:"stop_time,omitempty"` + StopTimeInMillis *int64 `json:"stop_time_in_millis,omitempty"` + Target RecoveryOrigin `json:"target"` + TotalTime Duration `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` + Translog TranslogStatus `json:"translog"` + Type string `json:"type"` + VerifyIndex VerifyIndex `json:"verify_index"` +} + +func (s *ShardRecovery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + s.Id = value + case float64: + f := int64(v) + s.Id = f + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "primary": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Primary", err) + } + s.Primary = value + case bool: + s.Primary = v + } + + case "source": + if err := dec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + + case "stage": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Stage", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Stage = o + + case "start": + if err := dec.Decode(&s.Start); err != nil { + return fmt.Errorf("%s | %w", "Start", err) + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeInMillis", err) + } + + case "stop_time": + if err := dec.Decode(&s.StopTime); err != nil { + return fmt.Errorf("%s | %w", "StopTime", err) + } + + case "stop_time_in_millis": + if err := dec.Decode(&s.StopTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "StopTimeInMillis", err) + } + + case "target": + if err := dec.Decode(&s.Target); err != nil { + return fmt.Errorf("%s | %w", "Target", err) + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return fmt.Errorf("%s | %w", "TotalTime", err) + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalTimeInMillis", err) + } + + case "translog": + if err := dec.Decode(&s.Translog); err != nil { + return fmt.Errorf("%s | %w", "Translog", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + case "verify_index": + if err := dec.Decode(&s.VerifyIndex); err != nil { + return fmt.Errorf("%s | %w", "VerifyIndex", err) + } + + } + } + return nil +} + +// NewShardRecovery returns a ShardRecovery. +func NewShardRecovery() *ShardRecovery { + r := &ShardRecovery{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardretentionleases.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardretentionleases.go new file mode 100644 index 000000000..d4eb2d5ce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardretentionleases.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardRetentionLeases type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L156-L160 +type ShardRetentionLeases struct { + Leases []ShardLease `json:"leases"` + PrimaryTerm int64 `json:"primary_term"` + Version int64 `json:"version"` +} + +func (s *ShardRetentionLeases) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "leases": + if err := dec.Decode(&s.Leases); err != nil { + return fmt.Errorf("%s | %w", "Leases", err) + } + + case "primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryTerm", err) + } + s.PrimaryTerm = value + case float64: + f := int64(v) + s.PrimaryTerm = f + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewShardRetentionLeases returns a ShardRetentionLeases. +func NewShardRetentionLeases() *ShardRetentionLeases { + r := &ShardRetentionLeases{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardrouting.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardrouting.go new file mode 100644 index 000000000..c90a378d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardrouting.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardroutingstate" +) + +// ShardRouting type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L162-L167 +type ShardRouting struct { + Node string `json:"node"` + Primary bool `json:"primary"` + RelocatingNode *string `json:"relocating_node,omitempty"` + State shardroutingstate.ShardRoutingState `json:"state"` +} + +func (s *ShardRouting) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = o + + case "primary": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Primary", err) + } + s.Primary = value + case bool: + s.Primary = v + } + + case "relocating_node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RelocatingNode", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RelocatingNode = &o + + case "state": + if err := dec.Decode(&s.State); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + + } + } + return nil +} + +// NewShardRouting returns a ShardRouting. +func NewShardRouting() *ShardRouting { + r := &ShardRouting{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsavailabilityindicator.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsavailabilityindicator.go new file mode 100644 index 000000000..47e5d638b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsavailabilityindicator.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indicatorhealthstatus" +) + +// ShardsAvailabilityIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L107-L111 +type ShardsAvailabilityIndicator struct { + Details *ShardsAvailabilityIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *ShardsAvailabilityIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return fmt.Errorf("%s | %w", "Diagnosis", err) + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return fmt.Errorf("%s | %w", "Impacts", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Symptom", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewShardsAvailabilityIndicator returns a ShardsAvailabilityIndicator. +func NewShardsAvailabilityIndicator() *ShardsAvailabilityIndicator { + r := &ShardsAvailabilityIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsavailabilityindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsavailabilityindicatordetails.go new file mode 100644 index 000000000..81619d3f6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsavailabilityindicatordetails.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardsAvailabilityIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L112-L123 +type ShardsAvailabilityIndicatorDetails struct { + CreatingPrimaries int64 `json:"creating_primaries"` + CreatingReplicas int64 `json:"creating_replicas"` + InitializingPrimaries int64 `json:"initializing_primaries"` + InitializingReplicas int64 `json:"initializing_replicas"` + RestartingPrimaries int64 `json:"restarting_primaries"` + RestartingReplicas int64 `json:"restarting_replicas"` + StartedPrimaries int64 `json:"started_primaries"` + StartedReplicas int64 `json:"started_replicas"` + UnassignedPrimaries int64 `json:"unassigned_primaries"` + UnassignedReplicas int64 `json:"unassigned_replicas"` +} + +func (s *ShardsAvailabilityIndicatorDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "creating_primaries": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CreatingPrimaries", err) + } + s.CreatingPrimaries = value + case float64: + f := int64(v) + s.CreatingPrimaries = f + } + + case "creating_replicas": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "CreatingReplicas", err) + } + s.CreatingReplicas = value + case float64: + f := int64(v) + s.CreatingReplicas = f + } + + case "initializing_primaries": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InitializingPrimaries", err) + } + s.InitializingPrimaries = value + case float64: + f := int64(v) + s.InitializingPrimaries = f + } + + case "initializing_replicas": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InitializingReplicas", err) + } + s.InitializingReplicas = value + case float64: + f := int64(v) + s.InitializingReplicas = f + } + + case "restarting_primaries": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RestartingPrimaries", err) + } + s.RestartingPrimaries = value + case float64: + f := int64(v) + s.RestartingPrimaries = f + } + + case "restarting_replicas": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RestartingReplicas", err) + } + s.RestartingReplicas = value + case float64: + f := int64(v) + s.RestartingReplicas = f + } + + case "started_primaries": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "StartedPrimaries", err) + } + s.StartedPrimaries = value + case float64: + f := int64(v) + s.StartedPrimaries = f + } + + case "started_replicas": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "StartedReplicas", err) + } + s.StartedReplicas = value + case float64: + f := int64(v) + s.StartedReplicas = f + } + + case "unassigned_primaries": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "UnassignedPrimaries", err) + } + s.UnassignedPrimaries = value + case float64: + f := int64(v) + s.UnassignedPrimaries = f + } + + case "unassigned_replicas": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "UnassignedReplicas", err) + } + s.UnassignedReplicas = value + case float64: + f := int64(v) + s.UnassignedReplicas = f + } + + } + } + return nil +} + +// NewShardsAvailabilityIndicatorDetails returns a ShardsAvailabilityIndicatorDetails. +func NewShardsAvailabilityIndicatorDetails() *ShardsAvailabilityIndicatorDetails { + r := &ShardsAvailabilityIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardscapacityindicator.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardscapacityindicator.go new file mode 100644 index 000000000..23d4d3812 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardscapacityindicator.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indicatorhealthstatus" +) + +// ShardsCapacityIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L192-L196 +type ShardsCapacityIndicator struct { + Details *ShardsCapacityIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *ShardsCapacityIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return fmt.Errorf("%s | %w", "Diagnosis", err) + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return fmt.Errorf("%s | %w", "Impacts", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Symptom", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewShardsCapacityIndicator returns a ShardsCapacityIndicator. +func NewShardsCapacityIndicator() *ShardsCapacityIndicator { + r := &ShardsCapacityIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardscapacityindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardscapacityindicatordetails.go new file mode 100644 index 000000000..c1217f8e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardscapacityindicatordetails.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ShardsCapacityIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L198-L201 +type ShardsCapacityIndicatorDetails struct { + Data ShardsCapacityIndicatorTierDetail `json:"data"` + Frozen ShardsCapacityIndicatorTierDetail `json:"frozen"` +} + +// NewShardsCapacityIndicatorDetails returns a ShardsCapacityIndicatorDetails. +func NewShardsCapacityIndicatorDetails() *ShardsCapacityIndicatorDetails { + r := &ShardsCapacityIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardscapacityindicatortierdetail.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardscapacityindicatortierdetail.go new file mode 100644 index 000000000..b1fec9a3f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardscapacityindicatortierdetail.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardsCapacityIndicatorTierDetail type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L203-L206 +type ShardsCapacityIndicatorTierDetail struct { + CurrentUsedShards *int `json:"current_used_shards,omitempty"` + MaxShardsInCluster int `json:"max_shards_in_cluster"` +} + +func (s *ShardsCapacityIndicatorTierDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current_used_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CurrentUsedShards", err) + } + s.CurrentUsedShards = &value + case float64: + f := int(v) + s.CurrentUsedShards = &f + } + + case "max_shards_in_cluster": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxShardsInCluster", err) + } + s.MaxShardsInCluster = value + case float64: + f := int(v) + s.MaxShardsInCluster = f + } + + } + } + return nil +} + +// NewShardsCapacityIndicatorTierDetail returns a ShardsCapacityIndicatorTierDetail. +func NewShardsCapacityIndicatorTierDetail() *ShardsCapacityIndicatorTierDetail { + r := &ShardsCapacityIndicatorTierDetail{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsegmentrouting.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsegmentrouting.go new file mode 100644 index 000000000..1629b19b7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsegmentrouting.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardSegmentRouting type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/segments/types.ts#L40-L44 +type ShardSegmentRouting struct { + Node string `json:"node"` + Primary bool `json:"primary"` + State string `json:"state"` +} + +func (s *ShardSegmentRouting) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = o + + case "primary": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Primary", err) + } + s.Primary = value + case bool: + s.Primary = v + } + + case "state": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = o + + } + } + return nil +} + +// NewShardSegmentRouting returns a ShardSegmentRouting. +func NewShardSegmentRouting() *ShardSegmentRouting { + r := &ShardSegmentRouting{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsequencenumber.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsequencenumber.go new file mode 100644 index 000000000..2759afabd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsequencenumber.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardSequenceNumber type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L176-L180 +type ShardSequenceNumber struct { + GlobalCheckpoint int64 `json:"global_checkpoint"` + LocalCheckpoint int64 `json:"local_checkpoint"` + MaxSeqNo int64 `json:"max_seq_no"` +} + +func (s *ShardSequenceNumber) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "global_checkpoint": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "GlobalCheckpoint", err) + } + s.GlobalCheckpoint = value + case float64: + f := int64(v) + s.GlobalCheckpoint = f + } + + case "local_checkpoint": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LocalCheckpoint", err) + } + s.LocalCheckpoint = value + case float64: + f := int64(v) + s.LocalCheckpoint = f + } + + case "max_seq_no": + if err := dec.Decode(&s.MaxSeqNo); err != nil { + return fmt.Errorf("%s | %w", "MaxSeqNo", err) + } + + } + } + return nil +} + +// NewShardSequenceNumber returns a ShardSequenceNumber. +func NewShardSequenceNumber() *ShardSequenceNumber { + r := &ShardSequenceNumber{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsrecord.go new file mode 100644 index 000000000..def538801 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsrecord.go @@ -0,0 +1,1181 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardsRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/shards/types.ts#L20-L427 +type ShardsRecord struct { + // BulkAvgSizeInBytes The average size in bytes of shard bulk operations. + BulkAvgSizeInBytes *string `json:"bulk.avg_size_in_bytes,omitempty"` + // BulkAvgTime The average time spent in shard bulk operations. + BulkAvgTime *string `json:"bulk.avg_time,omitempty"` + // BulkTotalOperations The number of bulk shard operations. + BulkTotalOperations *string `json:"bulk.total_operations,omitempty"` + // BulkTotalSizeInBytes The total size in bytes of shard bulk operations. + BulkTotalSizeInBytes *string `json:"bulk.total_size_in_bytes,omitempty"` + // BulkTotalTime The time spent in shard bulk operations. + BulkTotalTime *string `json:"bulk.total_time,omitempty"` + // CompletionSize The size of completion. + CompletionSize *string `json:"completion.size,omitempty"` + // Dataset total size of dataset (including the cache for partially mounted indices) + Dataset *string `json:"dataset,omitempty"` + // Docs The number of documents in the shard. + Docs *string `json:"docs,omitempty"` + // FielddataEvictions The fielddata cache evictions. + FielddataEvictions *string `json:"fielddata.evictions,omitempty"` + // FielddataMemorySize The used fielddata cache memory. + FielddataMemorySize *string `json:"fielddata.memory_size,omitempty"` + // FlushTotal The number of flushes. + FlushTotal *string `json:"flush.total,omitempty"` + // FlushTotalTime The time spent in flush. + FlushTotalTime *string `json:"flush.total_time,omitempty"` + // GetCurrent The number of current get operations. + GetCurrent *string `json:"get.current,omitempty"` + // GetExistsTime The time spent in successful get operations. + GetExistsTime *string `json:"get.exists_time,omitempty"` + // GetExistsTotal The number of successful get operations. + GetExistsTotal *string `json:"get.exists_total,omitempty"` + // GetMissingTime The time spent in failed get operations. + GetMissingTime *string `json:"get.missing_time,omitempty"` + // GetMissingTotal The number of failed get operations. + GetMissingTotal *string `json:"get.missing_total,omitempty"` + // GetTime The time spent in get operations. + GetTime *string `json:"get.time,omitempty"` + // GetTotal The number of get operations. + GetTotal *string `json:"get.total,omitempty"` + // Id The unique identifier for the node. + Id *string `json:"id,omitempty"` + // Index The index name. + Index *string `json:"index,omitempty"` + // IndexingDeleteCurrent The number of current deletion operations. + IndexingDeleteCurrent *string `json:"indexing.delete_current,omitempty"` + // IndexingDeleteTime The time spent in deletion operations. + IndexingDeleteTime *string `json:"indexing.delete_time,omitempty"` + // IndexingDeleteTotal The number of delete operations. + IndexingDeleteTotal *string `json:"indexing.delete_total,omitempty"` + // IndexingIndexCurrent The number of current indexing operations. + IndexingIndexCurrent *string `json:"indexing.index_current,omitempty"` + // IndexingIndexFailed The number of failed indexing operations. + IndexingIndexFailed *string `json:"indexing.index_failed,omitempty"` + // IndexingIndexTime The time spent in indexing operations. + IndexingIndexTime *string `json:"indexing.index_time,omitempty"` + // IndexingIndexTotal The number of indexing operations. + IndexingIndexTotal *string `json:"indexing.index_total,omitempty"` + // Ip The IP address of the node. + Ip *string `json:"ip,omitempty"` + // MergesCurrent The number of current merge operations. + MergesCurrent *string `json:"merges.current,omitempty"` + // MergesCurrentDocs The number of current merging documents. + MergesCurrentDocs *string `json:"merges.current_docs,omitempty"` + // MergesCurrentSize The size of current merge operations. + MergesCurrentSize *string `json:"merges.current_size,omitempty"` + // MergesTotal The number of completed merge operations. + MergesTotal *string `json:"merges.total,omitempty"` + // MergesTotalDocs The nuber of merged documents. + MergesTotalDocs *string `json:"merges.total_docs,omitempty"` + // MergesTotalSize The size of current merges. + MergesTotalSize *string `json:"merges.total_size,omitempty"` + // MergesTotalTime The time spent merging documents. + MergesTotalTime *string `json:"merges.total_time,omitempty"` + // Node The name of node. + Node *string `json:"node,omitempty"` + // PathData The shard data path. + PathData *string `json:"path.data,omitempty"` + // PathState The shard state path. + PathState *string `json:"path.state,omitempty"` + // Prirep The shard type: `primary` or `replica`. + Prirep *string `json:"prirep,omitempty"` + // QueryCacheEvictions The query cache evictions. + QueryCacheEvictions *string `json:"query_cache.evictions,omitempty"` + // QueryCacheMemorySize The used query cache memory. + QueryCacheMemorySize *string `json:"query_cache.memory_size,omitempty"` + // RecoverysourceType The type of recovery source. + RecoverysourceType *string `json:"recoverysource.type,omitempty"` + // RefreshExternalTime The time spent in external refreshes. + RefreshExternalTime *string `json:"refresh.external_time,omitempty"` + // RefreshExternalTotal The total nunber of external refreshes. + RefreshExternalTotal *string `json:"refresh.external_total,omitempty"` + // RefreshListeners The number of pending refresh listeners. + RefreshListeners *string `json:"refresh.listeners,omitempty"` + // RefreshTime The time spent in refreshes. + RefreshTime *string `json:"refresh.time,omitempty"` + // RefreshTotal The total number of refreshes. + RefreshTotal *string `json:"refresh.total,omitempty"` + // SearchFetchCurrent The current fetch phase operations. + SearchFetchCurrent *string `json:"search.fetch_current,omitempty"` + // SearchFetchTime The time spent in fetch phase. + SearchFetchTime *string `json:"search.fetch_time,omitempty"` + // SearchFetchTotal The total number of fetch operations. + SearchFetchTotal *string `json:"search.fetch_total,omitempty"` + // SearchOpenContexts The number of open search contexts. + SearchOpenContexts *string `json:"search.open_contexts,omitempty"` + // SearchQueryCurrent The current query phase operations. + SearchQueryCurrent *string `json:"search.query_current,omitempty"` + // SearchQueryTime The time spent in query phase. + SearchQueryTime *string `json:"search.query_time,omitempty"` + // SearchQueryTotal The total number of query phase operations. + SearchQueryTotal *string `json:"search.query_total,omitempty"` + // SearchScrollCurrent The open scroll contexts. + SearchScrollCurrent *string `json:"search.scroll_current,omitempty"` + // SearchScrollTime The time scroll contexts were held open. + SearchScrollTime *string `json:"search.scroll_time,omitempty"` + // SearchScrollTotal The number of completed scroll contexts. + SearchScrollTotal *string `json:"search.scroll_total,omitempty"` + // SegmentsCount The number of segments. + SegmentsCount *string `json:"segments.count,omitempty"` + // SegmentsFixedBitsetMemory The memory used by fixed bit sets for nested object field types and export + // type filters for types referred in `_parent` fields. + SegmentsFixedBitsetMemory *string `json:"segments.fixed_bitset_memory,omitempty"` + // SegmentsIndexWriterMemory The memory used by the index writer. + SegmentsIndexWriterMemory *string `json:"segments.index_writer_memory,omitempty"` + // SegmentsMemory The memory used by segments. + SegmentsMemory *string `json:"segments.memory,omitempty"` + // SegmentsVersionMapMemory The memory used by the version map. + SegmentsVersionMapMemory *string `json:"segments.version_map_memory,omitempty"` + // SeqNoGlobalCheckpoint The global checkpoint. + SeqNoGlobalCheckpoint *string `json:"seq_no.global_checkpoint,omitempty"` + // SeqNoLocalCheckpoint The local checkpoint. + SeqNoLocalCheckpoint *string `json:"seq_no.local_checkpoint,omitempty"` + // SeqNoMax The maximum sequence number. + SeqNoMax *string `json:"seq_no.max,omitempty"` + // Shard The shard name. + Shard *string `json:"shard,omitempty"` + // State The shard state. + // Returned values include: + // `INITIALIZING`: The shard is recovering from a peer shard or gateway. + // `RELOCATING`: The shard is relocating. + // `STARTED`: The shard has started. + // `UNASSIGNED`: The shard is not assigned to any node. + State *string `json:"state,omitempty"` + // Store The disk space used by the shard. + Store *string `json:"store,omitempty"` + // SyncId The sync identifier. + SyncId *string `json:"sync_id,omitempty"` + // UnassignedAt The time at which the shard became unassigned in Coordinated Universal Time + // (UTC). + UnassignedAt *string `json:"unassigned.at,omitempty"` + // UnassignedDetails Additional details as to why the shard became unassigned. + // It does not explain why the shard is not assigned; use the cluster allocation + // explain API for that information. + UnassignedDetails *string `json:"unassigned.details,omitempty"` + // UnassignedFor The time at which the shard was requested to be unassigned in Coordinated + // Universal Time (UTC). + UnassignedFor *string `json:"unassigned.for,omitempty"` + // UnassignedReason The reason for the last change to the state of an unassigned shard. + // It does not explain why the shard is currently unassigned; use the cluster + // allocation explain API for that information. + // Returned values include: + // `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the + // shard. + // `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. + // `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling + // index. + // `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed + // index. + // `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing + // an empty primary using the cluster reroute API. + // `INDEX_CLOSED`: Unassigned because the index was closed. + // `INDEX_CREATED`: Unassigned as a result of an API creation of an index. + // `INDEX_REOPENED`: Unassigned as a result of opening a closed index. + // `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster + // reroute API. + // `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. + // `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the + // cluster. + // `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was + // registered as restarting using the node shutdown API. + // `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary + // shard failed before the initialization completed. + // `REALLOCATED_REPLICA`: A better replica location is identified and causes the + // existing replica allocation to be cancelled. + // `REINITIALIZED`: When a shard moves from started back to initializing. + // `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. + // `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute + // command. + UnassignedReason *string `json:"unassigned.reason,omitempty"` + // WarmerCurrent The number of current warmer operations. + WarmerCurrent *string `json:"warmer.current,omitempty"` + // WarmerTotal The total number of warmer operations. + WarmerTotal *string `json:"warmer.total,omitempty"` + // WarmerTotalTime The time spent in warmer operations. + WarmerTotalTime *string `json:"warmer.total_time,omitempty"` +} + +func (s *ShardsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bulk.avg_size_in_bytes", "basi", "bulkAvgSizeInBytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkAvgSizeInBytes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkAvgSizeInBytes = &o + + case "bulk.avg_time", "bati", "bulkAvgTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkAvgTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkAvgTime = &o + + case "bulk.total_operations", "bto", "bulkTotalOperations": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkTotalOperations", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalOperations = &o + + case "bulk.total_size_in_bytes", "btsi", "bulkTotalSizeInBytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkTotalSizeInBytes", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalSizeInBytes = &o + + case "bulk.total_time", "btti", "bulkTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "BulkTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalTime = &o + + case "completion.size", "cs", "completionSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CompletionSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CompletionSize = &o + + case "dataset": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Dataset", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Dataset = &o + + case "docs", "d", "dc": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Docs", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Docs = &o + + case "fielddata.evictions", "fe", "fielddataEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FielddataEvictions", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FielddataEvictions = &o + + case "fielddata.memory_size", "fm", "fielddataMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FielddataMemorySize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FielddataMemorySize = &o + + case "flush.total", "ft", "flushTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FlushTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FlushTotal = &o + + case "flush.total_time", "ftt", "flushTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FlushTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FlushTotalTime = &o + + case "get.current", "gc", "getCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetCurrent = &o + + case "get.exists_time", "geti", "getExistsTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetExistsTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetExistsTime = &o + + case "get.exists_total", "geto", "getExistsTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetExistsTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetExistsTotal = &o + + case "get.missing_time", "gmti", "getMissingTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetMissingTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetMissingTime = &o + + case "get.missing_total", "gmto", "getMissingTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetMissingTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetMissingTotal = &o + + case "get.time", "gti", "getTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetTime = &o + + case "get.total", "gto", "getTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "GetTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetTotal = &o + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "index", "i", "idx": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Index = &o + + case "indexing.delete_current", "idc", "indexingDeleteCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingDeleteCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteCurrent = &o + + case "indexing.delete_time", "idti", "indexingDeleteTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingDeleteTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteTime = &o + + case "indexing.delete_total", "idto", "indexingDeleteTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingDeleteTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteTotal = &o + + case "indexing.index_current", "iic", "indexingIndexCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingIndexCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexCurrent = &o + + case "indexing.index_failed", "iif", "indexingIndexFailed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingIndexFailed", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexFailed = &o + + case "indexing.index_time", "iiti", "indexingIndexTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingIndexTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexTime = &o + + case "indexing.index_total", "iito", "indexingIndexTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexingIndexTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexTotal = &o + + case "ip": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "merges.current", "mc", "mergesCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrent = &o + + case "merges.current_docs", "mcd", "mergesCurrentDocs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesCurrentDocs", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrentDocs = &o + + case "merges.current_size", "mcs", "mergesCurrentSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesCurrentSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrentSize = &o + + case "merges.total", "mt", "mergesTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotal = &o + + case "merges.total_docs", "mtd", "mergesTotalDocs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesTotalDocs", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalDocs = &o + + case "merges.total_size", "mts", "mergesTotalSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesTotalSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalSize = &o + + case "merges.total_time", "mtt", "mergesTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MergesTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalTime = &o + + case "node", "n": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = &o + + case "path.data", "pd", "dataPath": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PathData", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PathData = &o + + case "path.state", "ps", "statsPath": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PathState", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PathState = &o + + case "prirep", "p", "pr", "primaryOrReplica": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Prirep", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Prirep = &o + + case "query_cache.evictions", "qce", "queryCacheEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryCacheEvictions", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheEvictions = &o + + case "query_cache.memory_size", "qcm", "queryCacheMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryCacheMemorySize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheMemorySize = &o + + case "recoverysource.type", "rs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RecoverysourceType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RecoverysourceType = &o + + case "refresh.external_time", "rti", "refreshTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshExternalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshExternalTime = &o + + case "refresh.external_total", "rto", "refreshTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshExternalTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshExternalTotal = &o + + case "refresh.listeners", "rli", "refreshListeners": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshListeners", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshListeners = &o + + case "refresh.time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshTime = &o + + case "refresh.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RefreshTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshTotal = &o + + case "search.fetch_current", "sfc", "searchFetchCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchFetchCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchCurrent = &o + + case "search.fetch_time", "sfti", "searchFetchTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchFetchTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchTime = &o + + case "search.fetch_total", "sfto", "searchFetchTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchFetchTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchTotal = &o + + case "search.open_contexts", "so", "searchOpenContexts": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchOpenContexts", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchOpenContexts = &o + + case "search.query_current", "sqc", "searchQueryCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchQueryCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryCurrent = &o + + case "search.query_time", "sqti", "searchQueryTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchQueryTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryTime = &o + + case "search.query_total", "sqto", "searchQueryTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchQueryTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryTotal = &o + + case "search.scroll_current", "scc", "searchScrollCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchScrollCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollCurrent = &o + + case "search.scroll_time", "scti", "searchScrollTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchScrollTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollTime = &o + + case "search.scroll_total", "scto", "searchScrollTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchScrollTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollTotal = &o + + case "segments.count", "sc", "segmentsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsCount = &o + + case "segments.fixed_bitset_memory", "sfbm", "fixedBitsetMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsFixedBitsetMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsFixedBitsetMemory = &o + + case "segments.index_writer_memory", "siwm", "segmentsIndexWriterMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsIndexWriterMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsIndexWriterMemory = &o + + case "segments.memory", "sm", "segmentsMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsMemory = &o + + case "segments.version_map_memory", "svmm", "segmentsVersionMapMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SegmentsVersionMapMemory", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsVersionMapMemory = &o + + case "seq_no.global_checkpoint", "sqg", "globalCheckpoint": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SeqNoGlobalCheckpoint", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SeqNoGlobalCheckpoint = &o + + case "seq_no.local_checkpoint", "sql", "localCheckpoint": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SeqNoLocalCheckpoint", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SeqNoLocalCheckpoint = &o + + case "seq_no.max", "sqm", "maxSeqNo": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SeqNoMax", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SeqNoMax = &o + + case "shard", "s", "sh": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Shard", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Shard = &o + + case "state", "st": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = &o + + case "store", "sto": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Store = &o + + case "sync_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SyncId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SyncId = &o + + case "unassigned.at", "ua": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UnassignedAt", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UnassignedAt = &o + + case "unassigned.details", "ud": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UnassignedDetails", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UnassignedDetails = &o + + case "unassigned.for", "uf": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UnassignedFor", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UnassignedFor = &o + + case "unassigned.reason", "ur": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UnassignedReason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UnassignedReason = &o + + case "warmer.current", "wc", "warmerCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "WarmerCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WarmerCurrent = &o + + case "warmer.total", "wto", "warmerTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "WarmerTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WarmerTotal = &o + + case "warmer.total_time", "wtt", "warmerTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "WarmerTotalTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WarmerTotalTime = &o + + } + } + return nil +} + +// NewShardsRecord returns a ShardsRecord. +func NewShardsRecord() *ShardsRecord { + r := &ShardsRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardssegment.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardssegment.go new file mode 100644 index 000000000..a3aca47ba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardssegment.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardsSegment type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/segments/types.ts#L46-L51 +type ShardsSegment struct { + NumCommittedSegments int `json:"num_committed_segments"` + NumSearchSegments int `json:"num_search_segments"` + Routing ShardSegmentRouting `json:"routing"` + Segments map[string]Segment `json:"segments"` +} + +func (s *ShardsSegment) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "num_committed_segments": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumCommittedSegments", err) + } + s.NumCommittedSegments = value + case float64: + f := int(v) + s.NumCommittedSegments = f + } + + case "num_search_segments": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumSearchSegments", err) + } + s.NumSearchSegments = value + case float64: + f := int(v) + s.NumSearchSegments = f + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "segments": + if s.Segments == nil { + s.Segments = make(map[string]Segment, 0) + } + if err := dec.Decode(&s.Segments); err != nil { + return fmt.Errorf("%s | %w", "Segments", err) + } + + } + } + return nil +} + +// NewShardsSegment returns a ShardsSegment. +func NewShardsSegment() *ShardsSegment { + r := &ShardsSegment{ + Segments: make(map[string]Segment), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsstatssummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsstatssummary.go new file mode 100644 index 000000000..cf3f40849 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsstatssummary.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ShardsStatsSummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotShardsStatus.ts#L29-L35 +type ShardsStatsSummary struct { + Incremental ShardsStatsSummaryItem `json:"incremental"` + StartTimeInMillis int64 `json:"start_time_in_millis"` + Time Duration `json:"time,omitempty"` + TimeInMillis int64 `json:"time_in_millis"` + Total ShardsStatsSummaryItem `json:"total"` +} + +func (s *ShardsStatsSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "incremental": + if err := dec.Decode(&s.Incremental); err != nil { + return fmt.Errorf("%s | %w", "Incremental", err) + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeInMillis", err) + } + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return fmt.Errorf("%s | %w", "Time", err) + } + + case "time_in_millis": + if err := dec.Decode(&s.TimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TimeInMillis", err) + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + + } + } + return nil +} + +// NewShardsStatsSummary returns a ShardsStatsSummary. +func NewShardsStatsSummary() *ShardsStatsSummary { + r := &ShardsStatsSummary{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsstatssummaryitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsstatssummaryitem.go new file mode 100644 index 000000000..e51c049c4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardsstatssummaryitem.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardsStatsSummaryItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotShardsStatus.ts#L37-L40 +type ShardsStatsSummaryItem struct { + FileCount int64 `json:"file_count"` + SizeInBytes int64 `json:"size_in_bytes"` +} + +func (s *ShardsStatsSummaryItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "file_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FileCount", err) + } + s.FileCount = value + case float64: + f := int64(v) + s.FileCount = f + } + + case "size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SizeInBytes", err) + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + } + } + return nil +} + +// NewShardsStatsSummaryItem returns a ShardsStatsSummaryItem. +func NewShardsStatsSummaryItem() *ShardsStatsSummaryItem { + r := &ShardsStatsSummaryItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstatistics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstatistics.go new file mode 100644 index 000000000..e5de7bb34 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstatistics.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ShardStatistics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L54-L69 +type ShardStatistics struct { + // Failed The number of shards the operation or search attempted to run on but failed. + Failed uint `json:"failed"` + Failures []ShardFailure `json:"failures,omitempty"` + Skipped *uint `json:"skipped,omitempty"` + // Successful The number of shards the operation or search succeeded on. + Successful uint `json:"successful"` + // Total The number of shards the operation or search will run on overall. + Total uint `json:"total"` +} + +// NewShardStatistics returns a ShardStatistics. +func NewShardStatistics() *ShardStatistics { + r := &ShardStatistics{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstore.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstore.go new file mode 100644 index 000000000..0498f9538 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstore.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstoreallocation" +) + +// ShardStore type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/shard_stores/types.ts#L29-L36 +type ShardStore struct { + Allocation shardstoreallocation.ShardStoreAllocation `json:"allocation"` + AllocationId *string `json:"allocation_id,omitempty"` + ShardStore map[string]ShardStoreNode `json:"-"` + StoreException *ShardStoreException `json:"store_exception,omitempty"` +} + +func (s *ShardStore) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocation": + if err := dec.Decode(&s.Allocation); err != nil { + return fmt.Errorf("%s | %w", "Allocation", err) + } + + case "allocation_id": + if err := dec.Decode(&s.AllocationId); err != nil { + return fmt.Errorf("%s | %w", "AllocationId", err) + } + + case "store_exception": + if err := dec.Decode(&s.StoreException); err != nil { + return fmt.Errorf("%s | %w", "StoreException", err) + } + + default: + + if key, ok := t.(string); ok { + if s.ShardStore == nil { + s.ShardStore = make(map[string]ShardStoreNode, 0) + } + raw := NewShardStoreNode() + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "ShardStore", err) + } + s.ShardStore[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s ShardStore) MarshalJSON() ([]byte, error) { + type opt ShardStore + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.ShardStore { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "ShardStore") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewShardStore returns a ShardStore. +func NewShardStore() *ShardStore { + r := &ShardStore{ + ShardStore: make(map[string]ShardStoreNode), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstoreexception.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstoreexception.go new file mode 100644 index 000000000..d9dbc4a79 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstoreexception.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardStoreException type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/shard_stores/types.ts#L53-L56 +type ShardStoreException struct { + Reason string `json:"reason"` + Type string `json:"type"` +} + +func (s *ShardStoreException) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewShardStoreException returns a ShardStoreException. +func NewShardStoreException() *ShardStoreException { + r := &ShardStoreException{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstoreindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstoreindex.go new file mode 100644 index 000000000..e8e7d3d81 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstoreindex.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ShardStoreIndex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search_shards/SearchShardsResponse.ts#L62-L65 +type ShardStoreIndex struct { + Aliases []string `json:"aliases,omitempty"` + Filter *Query `json:"filter,omitempty"` +} + +// NewShardStoreIndex returns a ShardStoreIndex. +func NewShardStoreIndex() *ShardStoreIndex { + r := &ShardStoreIndex{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstorenode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstorenode.go new file mode 100644 index 000000000..83a7cd0b3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstorenode.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardStoreNode type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/shard_stores/types.ts#L38-L45 +type ShardStoreNode struct { + Attributes map[string]string `json:"attributes"` + EphemeralId *string `json:"ephemeral_id,omitempty"` + ExternalId *string `json:"external_id,omitempty"` + Name string `json:"name"` + Roles []string `json:"roles"` + TransportAddress string `json:"transport_address"` +} + +func (s *ShardStoreNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "ephemeral_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "EphemeralId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.EphemeralId = &o + + case "external_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ExternalId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ExternalId = &o + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + } + } + return nil +} + +// NewShardStoreNode returns a ShardStoreNode. +func NewShardStoreNode() *ShardStoreNode { + r := &ShardStoreNode{ + Attributes: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstorewrapper.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstorewrapper.go new file mode 100644 index 000000000..e2a7a21ff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstorewrapper.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// ShardStoreWrapper type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/shard_stores/types.ts#L58-L60 +type ShardStoreWrapper struct { + Stores []ShardStore `json:"stores"` +} + +// NewShardStoreWrapper returns a ShardStoreWrapper. +func NewShardStoreWrapper() *ShardStoreWrapper { + r := &ShardStoreWrapper{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstotalstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstotalstats.go new file mode 100644 index 000000000..00c89cc6f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shardstotalstats.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShardsTotalStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/stats/types.ts#L182-L184 +type ShardsTotalStats struct { + TotalCount int64 `json:"total_count"` +} + +func (s *ShardsTotalStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "total_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalCount", err) + } + s.TotalCount = value + case float64: + f := int64(v) + s.TotalCount = f + } + + } + } + return nil +} + +// NewShardsTotalStats returns a ShardsTotalStats. +func NewShardsTotalStats() *ShardsTotalStats { + r := &ShardsTotalStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shared.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shared.go new file mode 100644 index 000000000..8b034ae0d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shared.go @@ -0,0 +1,152 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Shared type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/searchable_snapshots/cache_stats/Response.ts#L34-L43 +type Shared struct { + BytesReadInBytes ByteSize `json:"bytes_read_in_bytes"` + BytesWrittenInBytes ByteSize `json:"bytes_written_in_bytes"` + Evictions int64 `json:"evictions"` + NumRegions int `json:"num_regions"` + Reads int64 `json:"reads"` + RegionSizeInBytes ByteSize `json:"region_size_in_bytes"` + SizeInBytes ByteSize `json:"size_in_bytes"` + Writes int64 `json:"writes"` +} + +func (s *Shared) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bytes_read_in_bytes": + if err := dec.Decode(&s.BytesReadInBytes); err != nil { + return fmt.Errorf("%s | %w", "BytesReadInBytes", err) + } + + case "bytes_written_in_bytes": + if err := dec.Decode(&s.BytesWrittenInBytes); err != nil { + return fmt.Errorf("%s | %w", "BytesWrittenInBytes", err) + } + + case "evictions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Evictions", err) + } + s.Evictions = value + case float64: + f := int64(v) + s.Evictions = f + } + + case "num_regions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumRegions", err) + } + s.NumRegions = value + case float64: + f := int(v) + s.NumRegions = f + } + + case "reads": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Reads", err) + } + s.Reads = value + case float64: + f := int64(v) + s.Reads = f + } + + case "region_size_in_bytes": + if err := dec.Decode(&s.RegionSizeInBytes); err != nil { + return fmt.Errorf("%s | %w", "RegionSizeInBytes", err) + } + + case "size_in_bytes": + if err := dec.Decode(&s.SizeInBytes); err != nil { + return fmt.Errorf("%s | %w", "SizeInBytes", err) + } + + case "writes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Writes", err) + } + s.Writes = value + case float64: + f := int64(v) + s.Writes = f + } + + } + } + return nil +} + +// NewShared returns a Shared. +func NewShared() *Shared { + r := &Shared{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sharedfilesystemrepository.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sharedfilesystemrepository.go new file mode 100644 index 000000000..cf804ecb0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sharedfilesystemrepository.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SharedFileSystemRepository type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotRepository.ts#L80-L90 +type SharedFileSystemRepository struct { + // Settings The repository settings. + Settings SharedFileSystemRepositorySettings `json:"settings"` + // Type The shared file system repository type. + Type string `json:"type,omitempty"` + Uuid *string `json:"uuid,omitempty"` +} + +func (s *SharedFileSystemRepository) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return fmt.Errorf("%s | %w", "Uuid", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SharedFileSystemRepository) MarshalJSON() ([]byte, error) { + type innerSharedFileSystemRepository SharedFileSystemRepository + tmp := innerSharedFileSystemRepository{ + Settings: s.Settings, + Type: s.Type, + Uuid: s.Uuid, + } + + tmp.Type = "fs" + + return json.Marshal(tmp) +} + +// NewSharedFileSystemRepository returns a SharedFileSystemRepository. +func NewSharedFileSystemRepository() *SharedFileSystemRepository { + r := &SharedFileSystemRepository{} + + return r +} + +type SharedFileSystemRepositoryVariant interface { + SharedFileSystemRepositoryCaster() *SharedFileSystemRepository +} + +func (s *SharedFileSystemRepository) SharedFileSystemRepositoryCaster() *SharedFileSystemRepository { + return s +} + +func (s *SharedFileSystemRepository) RepositoryCaster() *Repository { + o := Repository(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sharedfilesystemrepositorysettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sharedfilesystemrepositorysettings.go new file mode 100644 index 000000000..4af3b7b7a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sharedfilesystemrepositorysettings.go @@ -0,0 +1,191 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SharedFileSystemRepositorySettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotRepository.ts#L348-L375 +type SharedFileSystemRepositorySettings struct { + // ChunkSize Big files can be broken down into multiple smaller blobs in the blob store + // during snapshotting. + // It is not recommended to change this value from its default unless there is + // an explicit reason for limiting the size of blobs in the repository. + // Setting a value lower than the default can result in an increased number of + // API calls to the blob store during snapshot create and restore operations + // compared to using the default value and thus make both operations slower and + // more costly. + // Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + // The default varies by repository type. + ChunkSize ByteSize `json:"chunk_size,omitempty"` + // Compress When set to `true`, metadata files are stored in compressed format. + // This setting doesn't affect index files that are already compressed by + // default. + Compress *bool `json:"compress,omitempty"` + // Location The location of the shared filesystem used to store and retrieve snapshots. + // This location must be registered in the `path.repo` setting on all master and + // data nodes in the cluster. + // Unlike `path.repo`, this setting supports only a single file path. + Location string `json:"location"` + // MaxNumberOfSnapshots The maximum number of snapshots the repository can contain. + // The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. + MaxNumberOfSnapshots *int `json:"max_number_of_snapshots,omitempty"` + // MaxRestoreBytesPerSec The maximum snapshot restore rate per node. + // It defaults to unlimited. + // Note that restores are also throttled through recovery settings. + MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // MaxSnapshotBytesPerSec The maximum snapshot creation rate per node. + // It defaults to 40mb per second. + // Note that if the recovery settings for managed services are set, then it + // defaults to unlimited, and the rate is additionally throttled through + // recovery settings. + MaxSnapshotBytesPerSec ByteSize `json:"max_snapshot_bytes_per_sec,omitempty"` + // Readonly If `true`, the repository is read-only. + // The cluster can retrieve and restore snapshots from the repository but not + // write to the repository or create snapshots in it. + // + // Only a cluster with write access can create snapshots in the repository. + // All other clusters connected to the repository should have the `readonly` + // parameter set to `true`. + // + // If `false`, the cluster can write to the repository and create snapshots in + // it. + // + // IMPORTANT: If you register the same snapshot repository with multiple + // clusters, only one cluster should have write access to the repository. + // Having multiple clusters write to the repository at the same time risks + // corrupting the contents of the repository. + Readonly *bool `json:"readonly,omitempty"` +} + +func (s *SharedFileSystemRepositorySettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "chunk_size": + if err := dec.Decode(&s.ChunkSize); err != nil { + return fmt.Errorf("%s | %w", "ChunkSize", err) + } + + case "compress": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Compress", err) + } + s.Compress = &value + case bool: + s.Compress = &v + } + + case "location": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Location", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Location = o + + case "max_number_of_snapshots": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNumberOfSnapshots", err) + } + s.MaxNumberOfSnapshots = &value + case float64: + f := int(v) + s.MaxNumberOfSnapshots = &f + } + + case "max_restore_bytes_per_sec": + if err := dec.Decode(&s.MaxRestoreBytesPerSec); err != nil { + return fmt.Errorf("%s | %w", "MaxRestoreBytesPerSec", err) + } + + case "max_snapshot_bytes_per_sec": + if err := dec.Decode(&s.MaxSnapshotBytesPerSec); err != nil { + return fmt.Errorf("%s | %w", "MaxSnapshotBytesPerSec", err) + } + + case "readonly": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Readonly", err) + } + s.Readonly = &value + case bool: + s.Readonly = &v + } + + } + } + return nil +} + +// NewSharedFileSystemRepositorySettings returns a SharedFileSystemRepositorySettings. +func NewSharedFileSystemRepositorySettings() *SharedFileSystemRepositorySettings { + r := &SharedFileSystemRepositorySettings{} + + return r +} + +type SharedFileSystemRepositorySettingsVariant interface { + SharedFileSystemRepositorySettingsCaster() *SharedFileSystemRepositorySettings +} + +func (s *SharedFileSystemRepositorySettings) SharedFileSystemRepositorySettingsCaster() *SharedFileSystemRepositorySettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shingletokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shingletokenfilter.go new file mode 100644 index 000000000..3abc665cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shingletokenfilter.go @@ -0,0 +1,190 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShingleTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L109-L123 +type ShingleTokenFilter struct { + // FillerToken String used in shingles as a replacement for empty positions that do not + // contain a token. This filler token is only used in shingles, not original + // unigrams. Defaults to an underscore (`_`). + FillerToken *string `json:"filler_token,omitempty"` + // MaxShingleSize Maximum number of tokens to concatenate when creating shingles. Defaults to + // `2`. + MaxShingleSize Stringifiedinteger `json:"max_shingle_size,omitempty"` + // MinShingleSize Minimum number of tokens to concatenate when creating shingles. Defaults to + // `2`. + MinShingleSize Stringifiedinteger `json:"min_shingle_size,omitempty"` + // OutputUnigrams If `true`, the output includes the original input tokens. If `false`, the + // output only includes shingles; the original input tokens are removed. + // Defaults to `true`. + OutputUnigrams *bool `json:"output_unigrams,omitempty"` + // OutputUnigramsIfNoShingles If `true`, the output includes the original input tokens only if no shingles + // are produced; if shingles are produced, the output only includes shingles. + // Defaults to `false`. + OutputUnigramsIfNoShingles *bool `json:"output_unigrams_if_no_shingles,omitempty"` + // TokenSeparator Separator used to concatenate adjacent tokens to form a shingle. Defaults to + // a space (`" "`). + TokenSeparator *string `json:"token_separator,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ShingleTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filler_token": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FillerToken", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FillerToken = &o + + case "max_shingle_size": + if err := dec.Decode(&s.MaxShingleSize); err != nil { + return fmt.Errorf("%s | %w", "MaxShingleSize", err) + } + + case "min_shingle_size": + if err := dec.Decode(&s.MinShingleSize); err != nil { + return fmt.Errorf("%s | %w", "MinShingleSize", err) + } + + case "output_unigrams": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "OutputUnigrams", err) + } + s.OutputUnigrams = &value + case bool: + s.OutputUnigrams = &v + } + + case "output_unigrams_if_no_shingles": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "OutputUnigramsIfNoShingles", err) + } + s.OutputUnigramsIfNoShingles = &value + case bool: + s.OutputUnigramsIfNoShingles = &v + } + + case "token_separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TokenSeparator", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TokenSeparator = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ShingleTokenFilter) MarshalJSON() ([]byte, error) { + type innerShingleTokenFilter ShingleTokenFilter + tmp := innerShingleTokenFilter{ + FillerToken: s.FillerToken, + MaxShingleSize: s.MaxShingleSize, + MinShingleSize: s.MinShingleSize, + OutputUnigrams: s.OutputUnigrams, + OutputUnigramsIfNoShingles: s.OutputUnigramsIfNoShingles, + TokenSeparator: s.TokenSeparator, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "shingle" + + return json.Marshal(tmp) +} + +// NewShingleTokenFilter returns a ShingleTokenFilter. +func NewShingleTokenFilter() *ShingleTokenFilter { + r := &ShingleTokenFilter{} + + return r +} + +type ShingleTokenFilterVariant interface { + ShingleTokenFilterCaster() *ShingleTokenFilter +} + +func (s *ShingleTokenFilter) ShingleTokenFilterCaster() *ShingleTokenFilter { + return s +} + +func (s *ShingleTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shortnumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shortnumberproperty.go new file mode 100644 index 000000000..9300b08c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shortnumberproperty.go @@ -0,0 +1,988 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype" +) + +// ShortNumberProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L182-L185 +type ShortNumberProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *int `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + // TimeSeriesMetric For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + if err := dec.Decode(&s.NullValue); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "time_series_metric": + if err := dec.Decode(&s.TimeSeriesMetric); err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ShortNumberProperty) MarshalJSON() ([]byte, error) { + type innerShortNumberProperty ShortNumberProperty + tmp := innerShortNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "short" + + return json.Marshal(tmp) +} + +// NewShortNumberProperty returns a ShortNumberProperty. +func NewShortNumberProperty() *ShortNumberProperty { + r := &ShortNumberProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type ShortNumberPropertyVariant interface { + ShortNumberPropertyCaster() *ShortNumberProperty +} + +func (s *ShortNumberProperty) ShortNumberPropertyCaster() *ShortNumberProperty { + return s +} + +func (s *ShortNumberProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shrinkaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shrinkaction.go new file mode 100644 index 000000000..b668e80db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/shrinkaction.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ShrinkAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/_types/Phase.ts#L117-L121 +type ShrinkAction struct { + AllowWriteAfterShrink *bool `json:"allow_write_after_shrink,omitempty"` + MaxPrimaryShardSize ByteSize `json:"max_primary_shard_size,omitempty"` + NumberOfShards *int `json:"number_of_shards,omitempty"` +} + +func (s *ShrinkAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_write_after_shrink": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowWriteAfterShrink", err) + } + s.AllowWriteAfterShrink = &value + case bool: + s.AllowWriteAfterShrink = &v + } + + case "max_primary_shard_size": + if err := dec.Decode(&s.MaxPrimaryShardSize); err != nil { + return fmt.Errorf("%s | %w", "MaxPrimaryShardSize", err) + } + + case "number_of_shards": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfShards", err) + } + s.NumberOfShards = &value + case float64: + f := int(v) + s.NumberOfShards = &f + } + + } + } + return nil +} + +// NewShrinkAction returns a ShrinkAction. +func NewShrinkAction() *ShrinkAction { + r := &ShrinkAction{} + + return r +} + +type ShrinkActionVariant interface { + ShrinkActionCaster() *ShrinkAction +} + +func (s *ShrinkAction) ShrinkActionCaster() *ShrinkAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significantlongtermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significantlongtermsaggregate.go new file mode 100644 index 000000000..b7de1047b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significantlongtermsaggregate.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SignificantLongTermsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L670-L672 +type SignificantLongTermsAggregate struct { + BgCount *int64 `json:"bg_count,omitempty"` + Buckets BucketsSignificantLongTermsBucket `json:"buckets"` + DocCount *int64 `json:"doc_count,omitempty"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *SignificantLongTermsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bg_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BgCount", err) + } + s.BgCount = &value + case float64: + f := int64(v) + s.BgCount = &f + } + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]SignificantLongTermsBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []SignificantLongTermsBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = &value + case float64: + f := int64(v) + s.DocCount = &f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewSignificantLongTermsAggregate returns a SignificantLongTermsAggregate. +func NewSignificantLongTermsAggregate() *SignificantLongTermsAggregate { + r := &SignificantLongTermsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significantlongtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significantlongtermsbucket.go new file mode 100644 index 000000000..16448df19 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significantlongtermsbucket.go @@ -0,0 +1,694 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// SignificantLongTermsBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L679-L682 +type SignificantLongTermsBucket struct { + Aggregations map[string]Aggregate `json:"-"` + BgCount int64 `json:"bg_count"` + DocCount int64 `json:"doc_count"` + Key int64 `json:"key"` + KeyAsString *string `json:"key_as_string,omitempty"` + Score Float64 `json:"score"` +} + +func (s *SignificantLongTermsBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bg_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BgCount", err) + } + s.BgCount = value + case float64: + f := int64(v) + s.BgCount = f + } + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + s.Key = value + case float64: + f := int64(v) + s.Key = f + } + + case "key_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "KeyAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o + + case "score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Score", err) + } + f := Float64(value) + s.Score = f + case float64: + f := Float64(v) + s.Score = f + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s SignificantLongTermsBucket) MarshalJSON() ([]byte, error) { + type opt SignificantLongTermsBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewSignificantLongTermsBucket returns a SignificantLongTermsBucket. +func NewSignificantLongTermsBucket() *SignificantLongTermsBucket { + r := &SignificantLongTermsBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significantstringtermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significantstringtermsaggregate.go new file mode 100644 index 000000000..daa5162a3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significantstringtermsaggregate.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SignificantStringTermsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L684-L686 +type SignificantStringTermsAggregate struct { + BgCount *int64 `json:"bg_count,omitempty"` + Buckets BucketsSignificantStringTermsBucket `json:"buckets"` + DocCount *int64 `json:"doc_count,omitempty"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *SignificantStringTermsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bg_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BgCount", err) + } + s.BgCount = &value + case float64: + f := int64(v) + s.BgCount = &f + } + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]SignificantStringTermsBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []SignificantStringTermsBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = &value + case float64: + f := int64(v) + s.DocCount = &f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewSignificantStringTermsAggregate returns a SignificantStringTermsAggregate. +func NewSignificantStringTermsAggregate() *SignificantStringTermsAggregate { + r := &SignificantStringTermsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significantstringtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significantstringtermsbucket.go new file mode 100644 index 000000000..e5248c91a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significantstringtermsbucket.go @@ -0,0 +1,678 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// SignificantStringTermsBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L688-L690 +type SignificantStringTermsBucket struct { + Aggregations map[string]Aggregate `json:"-"` + BgCount int64 `json:"bg_count"` + DocCount int64 `json:"doc_count"` + Key string `json:"key"` + Score Float64 `json:"score"` +} + +func (s *SignificantStringTermsBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bg_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BgCount", err) + } + s.BgCount = value + case float64: + f := int64(v) + s.BgCount = f + } + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = o + + case "score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Score", err) + } + f := Float64(value) + s.Score = f + case float64: + f := Float64(v) + s.Score = f + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s SignificantStringTermsBucket) MarshalJSON() ([]byte, error) { + type opt SignificantStringTermsBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewSignificantStringTermsBucket returns a SignificantStringTermsBucket. +func NewSignificantStringTermsBucket() *SignificantStringTermsBucket { + r := &SignificantStringTermsBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significanttermsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significanttermsaggregation.go new file mode 100644 index 000000000..a045da1d2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significanttermsaggregation.go @@ -0,0 +1,273 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termsaggregationexecutionhint" +) + +// SignificantTermsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L817-L884 +type SignificantTermsAggregation struct { + // BackgroundFilter A background filter that can be used to focus in on significant terms within + // a narrower context, instead of the entire index. + BackgroundFilter *Query `json:"background_filter,omitempty"` + // ChiSquare Use Chi square, as described in "Information Retrieval", Manning et al., + // Chapter 13.5.2, as the significance score. + ChiSquare *ChiSquareHeuristic `json:"chi_square,omitempty"` + // Exclude Terms to exclude. + Exclude []string `json:"exclude,omitempty"` + // ExecutionHint Mechanism by which the aggregation should be executed: using field values + // directly or using global ordinals. + ExecutionHint *termsaggregationexecutionhint.TermsAggregationExecutionHint `json:"execution_hint,omitempty"` + // Field The field from which to return significant terms. + Field *string `json:"field,omitempty"` + // Gnd Use Google normalized distance as described in "The Google Similarity + // Distance", Cilibrasi and Vitanyi, 2007, as the significance score. + Gnd *GoogleNormalizedDistanceHeuristic `json:"gnd,omitempty"` + // Include Terms to include. + Include TermsInclude `json:"include,omitempty"` + // Jlh Use JLH score as the significance score. + Jlh *EmptyObject `json:"jlh,omitempty"` + // MinDocCount Only return terms that are found in more than `min_doc_count` hits. + MinDocCount *int64 `json:"min_doc_count,omitempty"` + // MutualInformation Use mutual information as described in "Information Retrieval", Manning et + // al., Chapter 13.5.1, as the significance score. + MutualInformation *MutualInformationHeuristic `json:"mutual_information,omitempty"` + // Percentage A simple calculation of the number of documents in the foreground sample with + // a term divided by the number of documents in the background with the term. + Percentage *PercentageScoreHeuristic `json:"percentage,omitempty"` + // ScriptHeuristic Customized score, implemented via a script. + ScriptHeuristic *ScriptedHeuristic `json:"script_heuristic,omitempty"` + // ShardMinDocCount Regulates the certainty a shard has if the term should actually be added to + // the candidate list or not with respect to the `min_doc_count`. + // Terms will only be considered if their local shard frequency within the set + // is higher than the `shard_min_doc_count`. + ShardMinDocCount *int64 `json:"shard_min_doc_count,omitempty"` + // ShardSize Can be used to control the volumes of candidate terms produced by each shard. + // By default, `shard_size` will be automatically estimated based on the number + // of shards and the `size` parameter. + ShardSize *int `json:"shard_size,omitempty"` + // Size The number of buckets returned out of the overall terms list. + Size *int `json:"size,omitempty"` +} + +func (s *SignificantTermsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "background_filter": + if err := dec.Decode(&s.BackgroundFilter); err != nil { + return fmt.Errorf("%s | %w", "BackgroundFilter", err) + } + + case "chi_square": + if err := dec.Decode(&s.ChiSquare); err != nil { + return fmt.Errorf("%s | %w", "ChiSquare", err) + } + + case "exclude": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Exclude", err) + } + + s.Exclude = append(s.Exclude, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Exclude); err != nil { + return fmt.Errorf("%s | %w", "Exclude", err) + } + } + + case "execution_hint": + if err := dec.Decode(&s.ExecutionHint); err != nil { + return fmt.Errorf("%s | %w", "ExecutionHint", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "gnd": + if err := dec.Decode(&s.Gnd); err != nil { + return fmt.Errorf("%s | %w", "Gnd", err) + } + + case "include": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + include_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Include", err) + } + + switch t { + + case "num_partitions", "partition": + o := NewTermsPartition() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + s.Include = o + break include_field + + } + } + if s.Include == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Include); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + } + + case "jlh": + if err := dec.Decode(&s.Jlh); err != nil { + return fmt.Errorf("%s | %w", "Jlh", err) + } + + case "min_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocCount", err) + } + s.MinDocCount = &value + case float64: + f := int64(v) + s.MinDocCount = &f + } + + case "mutual_information": + if err := dec.Decode(&s.MutualInformation); err != nil { + return fmt.Errorf("%s | %w", "MutualInformation", err) + } + + case "percentage": + if err := dec.Decode(&s.Percentage); err != nil { + return fmt.Errorf("%s | %w", "Percentage", err) + } + + case "script_heuristic": + if err := dec.Decode(&s.ScriptHeuristic); err != nil { + return fmt.Errorf("%s | %w", "ScriptHeuristic", err) + } + + case "shard_min_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShardMinDocCount", err) + } + s.ShardMinDocCount = &value + case float64: + f := int64(v) + s.ShardMinDocCount = &f + } + + case "shard_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSize", err) + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + +// NewSignificantTermsAggregation returns a SignificantTermsAggregation. +func NewSignificantTermsAggregation() *SignificantTermsAggregation { + r := &SignificantTermsAggregation{} + + return r +} + +type SignificantTermsAggregationVariant interface { + SignificantTermsAggregationCaster() *SignificantTermsAggregation +} + +func (s *SignificantTermsAggregation) SignificantTermsAggregationCaster() *SignificantTermsAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significanttextaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significanttextaggregation.go new file mode 100644 index 000000000..a849d4f86 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/significanttextaggregation.go @@ -0,0 +1,307 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termsaggregationexecutionhint" +) + +// SignificantTextAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L886-L961 +type SignificantTextAggregation struct { + // BackgroundFilter A background filter that can be used to focus in on significant terms within + // a narrower context, instead of the entire index. + BackgroundFilter *Query `json:"background_filter,omitempty"` + // ChiSquare Use Chi square, as described in "Information Retrieval", Manning et al., + // Chapter 13.5.2, as the significance score. + ChiSquare *ChiSquareHeuristic `json:"chi_square,omitempty"` + // Exclude Values to exclude. + Exclude []string `json:"exclude,omitempty"` + // ExecutionHint Determines whether the aggregation will use field values directly or global + // ordinals. + ExecutionHint *termsaggregationexecutionhint.TermsAggregationExecutionHint `json:"execution_hint,omitempty"` + // Field The field from which to return significant text. + Field *string `json:"field,omitempty"` + // FilterDuplicateText Whether to out duplicate text to deal with noisy data. + FilterDuplicateText *bool `json:"filter_duplicate_text,omitempty"` + // Gnd Use Google normalized distance as described in "The Google Similarity + // Distance", Cilibrasi and Vitanyi, 2007, as the significance score. + Gnd *GoogleNormalizedDistanceHeuristic `json:"gnd,omitempty"` + // Include Values to include. + Include TermsInclude `json:"include,omitempty"` + // Jlh Use JLH score as the significance score. + Jlh *EmptyObject `json:"jlh,omitempty"` + // MinDocCount Only return values that are found in more than `min_doc_count` hits. + MinDocCount *int64 `json:"min_doc_count,omitempty"` + // MutualInformation Use mutual information as described in "Information Retrieval", Manning et + // al., Chapter 13.5.1, as the significance score. + MutualInformation *MutualInformationHeuristic `json:"mutual_information,omitempty"` + // Percentage A simple calculation of the number of documents in the foreground sample with + // a term divided by the number of documents in the background with the term. + Percentage *PercentageScoreHeuristic `json:"percentage,omitempty"` + // ScriptHeuristic Customized score, implemented via a script. + ScriptHeuristic *ScriptedHeuristic `json:"script_heuristic,omitempty"` + // ShardMinDocCount Regulates the certainty a shard has if the values should actually be added to + // the candidate list or not with respect to the min_doc_count. + // Values will only be considered if their local shard frequency within the set + // is higher than the `shard_min_doc_count`. + ShardMinDocCount *int64 `json:"shard_min_doc_count,omitempty"` + // ShardSize The number of candidate terms produced by each shard. + // By default, `shard_size` will be automatically estimated based on the number + // of shards and the `size` parameter. + ShardSize *int `json:"shard_size,omitempty"` + // Size The number of buckets returned out of the overall terms list. + Size *int `json:"size,omitempty"` + // SourceFields Overrides the JSON `_source` fields from which text will be analyzed. + SourceFields []string `json:"source_fields,omitempty"` +} + +func (s *SignificantTextAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "background_filter": + if err := dec.Decode(&s.BackgroundFilter); err != nil { + return fmt.Errorf("%s | %w", "BackgroundFilter", err) + } + + case "chi_square": + if err := dec.Decode(&s.ChiSquare); err != nil { + return fmt.Errorf("%s | %w", "ChiSquare", err) + } + + case "exclude": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Exclude", err) + } + + s.Exclude = append(s.Exclude, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Exclude); err != nil { + return fmt.Errorf("%s | %w", "Exclude", err) + } + } + + case "execution_hint": + if err := dec.Decode(&s.ExecutionHint); err != nil { + return fmt.Errorf("%s | %w", "ExecutionHint", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "filter_duplicate_text": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FilterDuplicateText", err) + } + s.FilterDuplicateText = &value + case bool: + s.FilterDuplicateText = &v + } + + case "gnd": + if err := dec.Decode(&s.Gnd); err != nil { + return fmt.Errorf("%s | %w", "Gnd", err) + } + + case "include": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + include_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Include", err) + } + + switch t { + + case "num_partitions", "partition": + o := NewTermsPartition() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + s.Include = o + break include_field + + } + } + if s.Include == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Include); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + } + + case "jlh": + if err := dec.Decode(&s.Jlh); err != nil { + return fmt.Errorf("%s | %w", "Jlh", err) + } + + case "min_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocCount", err) + } + s.MinDocCount = &value + case float64: + f := int64(v) + s.MinDocCount = &f + } + + case "mutual_information": + if err := dec.Decode(&s.MutualInformation); err != nil { + return fmt.Errorf("%s | %w", "MutualInformation", err) + } + + case "percentage": + if err := dec.Decode(&s.Percentage); err != nil { + return fmt.Errorf("%s | %w", "Percentage", err) + } + + case "script_heuristic": + if err := dec.Decode(&s.ScriptHeuristic); err != nil { + return fmt.Errorf("%s | %w", "ScriptHeuristic", err) + } + + case "shard_min_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShardMinDocCount", err) + } + s.ShardMinDocCount = &value + case float64: + f := int64(v) + s.ShardMinDocCount = &f + } + + case "shard_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSize", err) + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "source_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "SourceFields", err) + } + + s.SourceFields = append(s.SourceFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.SourceFields); err != nil { + return fmt.Errorf("%s | %w", "SourceFields", err) + } + } + + } + } + return nil +} + +// NewSignificantTextAggregation returns a SignificantTextAggregation. +func NewSignificantTextAggregation() *SignificantTextAggregation { + r := &SignificantTextAggregation{} + + return r +} + +type SignificantTextAggregationVariant interface { + SignificantTextAggregationCaster() *SignificantTextAggregation +} + +func (s *SignificantTextAggregation) SignificantTextAggregationCaster() *SignificantTextAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simpleanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simpleanalyzer.go new file mode 100644 index 000000000..5d8536542 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simpleanalyzer.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SimpleAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L367-L371 +type SimpleAnalyzer struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *SimpleAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SimpleAnalyzer) MarshalJSON() ([]byte, error) { + type innerSimpleAnalyzer SimpleAnalyzer + tmp := innerSimpleAnalyzer{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "simple" + + return json.Marshal(tmp) +} + +// NewSimpleAnalyzer returns a SimpleAnalyzer. +func NewSimpleAnalyzer() *SimpleAnalyzer { + r := &SimpleAnalyzer{} + + return r +} + +type SimpleAnalyzerVariant interface { + SimpleAnalyzerCaster() *SimpleAnalyzer +} + +func (s *SimpleAnalyzer) SimpleAnalyzerCaster() *SimpleAnalyzer { + return s +} + +func (s *SimpleAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplemovingaverageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplemovingaverageaggregation.go new file mode 100644 index 000000000..59aaaa2bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplemovingaverageaggregation.go @@ -0,0 +1,188 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// SimpleMovingAverageAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L273-L276 +type SimpleMovingAverageAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Minimize *bool `json:"minimize,omitempty"` + Model string `json:"model,omitempty"` + Predict *int `json:"predict,omitempty"` + Settings EmptyObject `json:"settings"` + Window *int `json:"window,omitempty"` +} + +func (s *SimpleMovingAverageAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + case "minimize": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Minimize", err) + } + s.Minimize = &value + case bool: + s.Minimize = &v + } + + case "model": + if err := dec.Decode(&s.Model); err != nil { + return fmt.Errorf("%s | %w", "Model", err) + } + + case "predict": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Predict", err) + } + s.Predict = &value + case float64: + f := int(v) + s.Predict = &f + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "window": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Window", err) + } + s.Window = &value + case float64: + f := int(v) + s.Window = &f + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SimpleMovingAverageAggregation) MarshalJSON() ([]byte, error) { + type innerSimpleMovingAverageAggregation SimpleMovingAverageAggregation + tmp := innerSimpleMovingAverageAggregation{ + BucketsPath: s.BucketsPath, + Format: s.Format, + GapPolicy: s.GapPolicy, + Minimize: s.Minimize, + Model: s.Model, + Predict: s.Predict, + Settings: s.Settings, + Window: s.Window, + } + + tmp.Model = "simple" + + return json.Marshal(tmp) +} + +// NewSimpleMovingAverageAggregation returns a SimpleMovingAverageAggregation. +func NewSimpleMovingAverageAggregation() *SimpleMovingAverageAggregation { + r := &SimpleMovingAverageAggregation{} + + return r +} + +type SimpleMovingAverageAggregationVariant interface { + SimpleMovingAverageAggregationCaster() *SimpleMovingAverageAggregation +} + +func (s *SimpleMovingAverageAggregation) SimpleMovingAverageAggregationCaster() *SimpleMovingAverageAggregation { + return s +} + +func (s *SimpleMovingAverageAggregation) MovingAverageAggregationCaster() *MovingAverageAggregation { + o := MovingAverageAggregation(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplepatternsplittokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplepatternsplittokenizer.go new file mode 100644 index 000000000..1b78d6813 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplepatternsplittokenizer.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SimplePatternSplitTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L118-L121 +type SimplePatternSplitTokenizer struct { + Pattern *string `json:"pattern,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *SimplePatternSplitTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SimplePatternSplitTokenizer) MarshalJSON() ([]byte, error) { + type innerSimplePatternSplitTokenizer SimplePatternSplitTokenizer + tmp := innerSimplePatternSplitTokenizer{ + Pattern: s.Pattern, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "simple_pattern_split" + + return json.Marshal(tmp) +} + +// NewSimplePatternSplitTokenizer returns a SimplePatternSplitTokenizer. +func NewSimplePatternSplitTokenizer() *SimplePatternSplitTokenizer { + r := &SimplePatternSplitTokenizer{} + + return r +} + +type SimplePatternSplitTokenizerVariant interface { + SimplePatternSplitTokenizerCaster() *SimplePatternSplitTokenizer +} + +func (s *SimplePatternSplitTokenizer) SimplePatternSplitTokenizerCaster() *SimplePatternSplitTokenizer { + return s +} + +func (s *SimplePatternSplitTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplepatterntokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplepatterntokenizer.go new file mode 100644 index 000000000..7b6b83df8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplepatterntokenizer.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SimplePatternTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L113-L116 +type SimplePatternTokenizer struct { + Pattern *string `json:"pattern,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *SimplePatternTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pattern", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SimplePatternTokenizer) MarshalJSON() ([]byte, error) { + type innerSimplePatternTokenizer SimplePatternTokenizer + tmp := innerSimplePatternTokenizer{ + Pattern: s.Pattern, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "simple_pattern" + + return json.Marshal(tmp) +} + +// NewSimplePatternTokenizer returns a SimplePatternTokenizer. +func NewSimplePatternTokenizer() *SimplePatternTokenizer { + r := &SimplePatternTokenizer{} + + return r +} + +type SimplePatternTokenizerVariant interface { + SimplePatternTokenizerCaster() *SimplePatternTokenizer +} + +func (s *SimplePatternTokenizer) SimplePatternTokenizerCaster() *SimplePatternTokenizer { + return s +} + +func (s *SimplePatternTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplequerystringflags.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplequerystringflags.go new file mode 100644 index 000000000..795cfcd8a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplequerystringflags.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SimpleQueryStringFlags type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L774-L778 +type SimpleQueryStringFlags PipeSeparatedFlagsSimpleQueryStringFlag + +type SimpleQueryStringFlagsVariant interface { + SimpleQueryStringFlagsCaster() *SimpleQueryStringFlags +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplequerystringquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplequerystringquery.go new file mode 100644 index 000000000..3de603156 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplequerystringquery.go @@ -0,0 +1,288 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator" +) + +// SimpleQueryStringQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/fulltext.ts#L837-L905 +type SimpleQueryStringQuery struct { + // AnalyzeWildcard If `true`, the query attempts to analyze wildcard terms in the query string. + AnalyzeWildcard *bool `json:"analyze_wildcard,omitempty"` + // Analyzer Analyzer used to convert text in the query string into tokens. + Analyzer *string `json:"analyzer,omitempty"` + // AutoGenerateSynonymsPhraseQuery If `true`, the parser creates a match_phrase query for each multi-position + // token. + AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // DefaultOperator Default boolean logic used to interpret text in the query string if no + // operators are specified. + DefaultOperator *operator.Operator `json:"default_operator,omitempty"` + // Fields Array of fields you wish to search. + // Accepts wildcard expressions. + // You also can boost relevance scores for matches to particular fields using a + // caret (`^`) notation. + // Defaults to the `index.query.default_field index` setting, which has a + // default value of `*`. + Fields []string `json:"fields,omitempty"` + // Flags List of enabled operators for the simple query string syntax. + Flags PipeSeparatedFlagsSimpleQueryStringFlag `json:"flags,omitempty"` + // FuzzyMaxExpansions Maximum number of terms to which the query expands for fuzzy matching. + FuzzyMaxExpansions *int `json:"fuzzy_max_expansions,omitempty"` + // FuzzyPrefixLength Number of beginning characters left unchanged for fuzzy matching. + FuzzyPrefixLength *int `json:"fuzzy_prefix_length,omitempty"` + // FuzzyTranspositions If `true`, edits for fuzzy matching include transpositions of two adjacent + // characters (for example, `ab` to `ba`). + FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` + // Lenient If `true`, format-based errors, such as providing a text value for a numeric + // field, are ignored. + Lenient *bool `json:"lenient,omitempty"` + // MinimumShouldMatch Minimum number of clauses that must match for a document to be returned. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // Query Query string in the simple query string syntax you wish to parse and use for + // search. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // QuoteFieldSuffix Suffix appended to quoted text in the query string. + QuoteFieldSuffix *string `json:"quote_field_suffix,omitempty"` +} + +func (s *SimpleQueryStringQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyze_wildcard": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AnalyzeWildcard", err) + } + s.AnalyzeWildcard = &value + case bool: + s.AnalyzeWildcard = &v + } + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "auto_generate_synonyms_phrase_query": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AutoGenerateSynonymsPhraseQuery", err) + } + s.AutoGenerateSynonymsPhraseQuery = &value + case bool: + s.AutoGenerateSynonymsPhraseQuery = &v + } + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "default_operator": + if err := dec.Decode(&s.DefaultOperator); err != nil { + return fmt.Errorf("%s | %w", "DefaultOperator", err) + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "flags": + if err := dec.Decode(&s.Flags); err != nil { + return fmt.Errorf("%s | %w", "Flags", err) + } + + case "fuzzy_max_expansions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FuzzyMaxExpansions", err) + } + s.FuzzyMaxExpansions = &value + case float64: + f := int(v) + s.FuzzyMaxExpansions = &f + } + + case "fuzzy_prefix_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FuzzyPrefixLength", err) + } + s.FuzzyPrefixLength = &value + case float64: + f := int(v) + s.FuzzyPrefixLength = &f + } + + case "fuzzy_transpositions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FuzzyTranspositions", err) + } + s.FuzzyTranspositions = &value + case bool: + s.FuzzyTranspositions = &v + } + + case "lenient": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Lenient", err) + } + s.Lenient = &value + case bool: + s.Lenient = &v + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return fmt.Errorf("%s | %w", "MinimumShouldMatch", err) + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "quote_field_suffix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QuoteFieldSuffix", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QuoteFieldSuffix = &o + + } + } + return nil +} + +// NewSimpleQueryStringQuery returns a SimpleQueryStringQuery. +func NewSimpleQueryStringQuery() *SimpleQueryStringQuery { + r := &SimpleQueryStringQuery{} + + return r +} + +type SimpleQueryStringQueryVariant interface { + SimpleQueryStringQueryCaster() *SimpleQueryStringQuery +} + +func (s *SimpleQueryStringQuery) SimpleQueryStringQueryCaster() *SimpleQueryStringQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplevalueaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplevalueaggregate.go new file mode 100644 index 000000000..b399fd57c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simplevalueaggregate.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SimpleValueAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L238-L239 +type SimpleValueAggregate struct { + Meta Metadata `json:"meta,omitempty"` + // Value The metric value. A missing value generally means that there was no data to + // aggregate, + // unless specified otherwise. + Value *Float64 `json:"value,omitempty"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *SimpleValueAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + +// NewSimpleValueAggregate returns a SimpleValueAggregate. +func NewSimpleValueAggregate() *SimpleValueAggregate { + r := &SimpleValueAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simulatedactions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simulatedactions.go new file mode 100644 index 000000000..cad6d0f9b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simulatedactions.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SimulatedActions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Action.ts#L90-L94 +type SimulatedActions struct { + Actions []string `json:"actions"` + All *SimulatedActions `json:"all,omitempty"` + UseAll bool `json:"use_all"` +} + +func (s *SimulatedActions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return fmt.Errorf("%s | %w", "Actions", err) + } + + case "all": + if err := dec.Decode(&s.All); err != nil { + return fmt.Errorf("%s | %w", "All", err) + } + + case "use_all": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "UseAll", err) + } + s.UseAll = value + case bool: + s.UseAll = v + } + + } + } + return nil +} + +// NewSimulatedActions returns a SimulatedActions. +func NewSimulatedActions() *SimulatedActions { + r := &SimulatedActions{} + + return r +} + +type SimulatedActionsVariant interface { + SimulatedActionsCaster() *SimulatedActions +} + +func (s *SimulatedActions) SimulatedActionsCaster() *SimulatedActions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simulatedocumentresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simulatedocumentresult.go new file mode 100644 index 000000000..7a6de1046 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simulatedocumentresult.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SimulateDocumentResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Simulation.ts#L45-L49 +type SimulateDocumentResult struct { + Doc *DocumentSimulation `json:"doc,omitempty"` + Error *ErrorCause `json:"error,omitempty"` + ProcessorResults []PipelineProcessorResult `json:"processor_results,omitempty"` +} + +// NewSimulateDocumentResult returns a SimulateDocumentResult. +func NewSimulateDocumentResult() *SimulateDocumentResult { + r := &SimulateDocumentResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simulateingestdocumentresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simulateingestdocumentresult.go new file mode 100644 index 000000000..8b36fe3ad --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/simulateingestdocumentresult.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SimulateIngestDocumentResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/simulate/ingest/SimulateIngestResponse.ts#L31-L33 +type SimulateIngestDocumentResult struct { + Doc *IngestDocumentSimulation `json:"doc,omitempty"` +} + +// NewSimulateIngestDocumentResult returns a SimulateIngestDocumentResult. +func NewSimulateIngestDocumentResult() *SimulateIngestDocumentResult { + r := &SimulateIngestDocumentResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sizefield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sizefield.go new file mode 100644 index 000000000..7e4adc2f3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sizefield.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SizeField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/meta-fields.ts#L54-L56 +type SizeField struct { + Enabled bool `json:"enabled"` +} + +func (s *SizeField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + +// NewSizeField returns a SizeField. +func NewSizeField() *SizeField { + r := &SizeField{} + + return r +} + +type SizeFieldVariant interface { + SizeFieldCaster() *SizeField +} + +func (s *SizeField) SizeFieldCaster() *SizeField { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sizehttphistogram.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sizehttphistogram.go new file mode 100644 index 000000000..be0d2f1cf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sizehttphistogram.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SizeHttpHistogram type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L716-L720 +type SizeHttpHistogram struct { + Count int64 `json:"count"` + GeBytes *int64 `json:"ge_bytes,omitempty"` + LtBytes *int64 `json:"lt_bytes,omitempty"` +} + +func (s *SizeHttpHistogram) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "ge_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "GeBytes", err) + } + s.GeBytes = &value + case float64: + f := int64(v) + s.GeBytes = &f + } + + case "lt_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LtBytes", err) + } + s.LtBytes = &value + case float64: + f := int64(v) + s.LtBytes = &f + } + + } + } + return nil +} + +// NewSizeHttpHistogram returns a SizeHttpHistogram. +func NewSizeHttpHistogram() *SizeHttpHistogram { + r := &SizeHttpHistogram{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackaction.go new file mode 100644 index 000000000..99cb52d29 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackaction.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SlackAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L91-L94 +type SlackAction struct { + Account *string `json:"account,omitempty"` + Message SlackMessage `json:"message"` +} + +func (s *SlackAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Account", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Account = &o + + case "message": + if err := dec.Decode(&s.Message); err != nil { + return fmt.Errorf("%s | %w", "Message", err) + } + + } + } + return nil +} + +// NewSlackAction returns a SlackAction. +func NewSlackAction() *SlackAction { + r := &SlackAction{} + + return r +} + +type SlackActionVariant interface { + SlackActionCaster() *SlackAction +} + +func (s *SlackAction) SlackActionCaster() *SlackAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackattachment.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackattachment.go new file mode 100644 index 000000000..4c32eca71 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackattachment.go @@ -0,0 +1,252 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SlackAttachment type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L101-L117 +type SlackAttachment struct { + AuthorIcon *string `json:"author_icon,omitempty"` + AuthorLink *string `json:"author_link,omitempty"` + AuthorName string `json:"author_name"` + Color *string `json:"color,omitempty"` + Fallback *string `json:"fallback,omitempty"` + Fields []SlackAttachmentField `json:"fields,omitempty"` + Footer *string `json:"footer,omitempty"` + FooterIcon *string `json:"footer_icon,omitempty"` + ImageUrl *string `json:"image_url,omitempty"` + Pretext *string `json:"pretext,omitempty"` + Text *string `json:"text,omitempty"` + ThumbUrl *string `json:"thumb_url,omitempty"` + Title string `json:"title"` + TitleLink *string `json:"title_link,omitempty"` + Ts *int64 `json:"ts,omitempty"` +} + +func (s *SlackAttachment) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "author_icon": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AuthorIcon", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AuthorIcon = &o + + case "author_link": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AuthorLink", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AuthorLink = &o + + case "author_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AuthorName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AuthorName = o + + case "color": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Color", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Color = &o + + case "fallback": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Fallback", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Fallback = &o + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "footer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Footer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Footer = &o + + case "footer_icon": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FooterIcon", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FooterIcon = &o + + case "image_url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ImageUrl", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ImageUrl = &o + + case "pretext": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pretext", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pretext = &o + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = &o + + case "thumb_url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ThumbUrl", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ThumbUrl = &o + + case "title": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Title", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Title = o + + case "title_link": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TitleLink", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TitleLink = &o + + case "ts": + if err := dec.Decode(&s.Ts); err != nil { + return fmt.Errorf("%s | %w", "Ts", err) + } + + } + } + return nil +} + +// NewSlackAttachment returns a SlackAttachment. +func NewSlackAttachment() *SlackAttachment { + r := &SlackAttachment{} + + return r +} + +type SlackAttachmentVariant interface { + SlackAttachmentCaster() *SlackAttachment +} + +func (s *SlackAttachment) SlackAttachmentCaster() *SlackAttachment { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackattachmentfield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackattachmentfield.go new file mode 100644 index 000000000..7d3875c39 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackattachmentfield.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SlackAttachmentField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L119-L123 +type SlackAttachmentField struct { + Int bool `json:"short"` + Title string `json:"title"` + Value string `json:"value"` +} + +func (s *SlackAttachmentField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "short": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Int", err) + } + s.Int = value + case bool: + s.Int = v + } + + case "title": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Title", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Title = o + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil +} + +// NewSlackAttachmentField returns a SlackAttachmentField. +func NewSlackAttachmentField() *SlackAttachmentField { + r := &SlackAttachmentField{} + + return r +} + +type SlackAttachmentFieldVariant interface { + SlackAttachmentFieldCaster() *SlackAttachmentField +} + +func (s *SlackAttachmentField) SlackAttachmentFieldCaster() *SlackAttachmentField { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackdynamicattachment.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackdynamicattachment.go new file mode 100644 index 000000000..567a3c997 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackdynamicattachment.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SlackDynamicAttachment type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L125-L128 +type SlackDynamicAttachment struct { + AttachmentTemplate SlackAttachment `json:"attachment_template"` + ListPath string `json:"list_path"` +} + +func (s *SlackDynamicAttachment) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attachment_template": + if err := dec.Decode(&s.AttachmentTemplate); err != nil { + return fmt.Errorf("%s | %w", "AttachmentTemplate", err) + } + + case "list_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ListPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ListPath = o + + } + } + return nil +} + +// NewSlackDynamicAttachment returns a SlackDynamicAttachment. +func NewSlackDynamicAttachment() *SlackDynamicAttachment { + r := &SlackDynamicAttachment{} + + return r +} + +type SlackDynamicAttachmentVariant interface { + SlackDynamicAttachmentCaster() *SlackDynamicAttachment +} + +func (s *SlackDynamicAttachment) SlackDynamicAttachmentCaster() *SlackDynamicAttachment { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackmessage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackmessage.go new file mode 100644 index 000000000..0ba15772c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackmessage.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SlackMessage type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L130-L137 +type SlackMessage struct { + Attachments []SlackAttachment `json:"attachments"` + DynamicAttachments *SlackDynamicAttachment `json:"dynamic_attachments,omitempty"` + From string `json:"from"` + Icon *string `json:"icon,omitempty"` + Text string `json:"text"` + To []string `json:"to"` +} + +func (s *SlackMessage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attachments": + if err := dec.Decode(&s.Attachments); err != nil { + return fmt.Errorf("%s | %w", "Attachments", err) + } + + case "dynamic_attachments": + if err := dec.Decode(&s.DynamicAttachments); err != nil { + return fmt.Errorf("%s | %w", "DynamicAttachments", err) + } + + case "from": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.From = o + + case "icon": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Icon", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Icon = &o + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + case "to": + if err := dec.Decode(&s.To); err != nil { + return fmt.Errorf("%s | %w", "To", err) + } + + } + } + return nil +} + +// NewSlackMessage returns a SlackMessage. +func NewSlackMessage() *SlackMessage { + r := &SlackMessage{} + + return r +} + +type SlackMessageVariant interface { + SlackMessageCaster() *SlackMessage +} + +func (s *SlackMessage) SlackMessageCaster() *SlackMessage { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackresult.go new file mode 100644 index 000000000..43232c0d0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slackresult.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SlackResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L96-L99 +type SlackResult struct { + Account *string `json:"account,omitempty"` + Message SlackMessage `json:"message"` +} + +func (s *SlackResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Account", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Account = &o + + case "message": + if err := dec.Decode(&s.Message); err != nil { + return fmt.Errorf("%s | %w", "Message", err) + } + + } + } + return nil +} + +// NewSlackResult returns a SlackResult. +func NewSlackResult() *SlackResult { + r := &SlackResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slicedscroll.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slicedscroll.go new file mode 100644 index 000000000..2a4133641 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slicedscroll.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SlicedScroll type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/SlicedScroll.ts#L23-L27 +type SlicedScroll struct { + Field *string `json:"field,omitempty"` + Id string `json:"id"` + Max int `json:"max"` +} + +func (s *SlicedScroll) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "max": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + s.Max = value + case float64: + f := int(v) + s.Max = f + } + + } + } + return nil +} + +// NewSlicedScroll returns a SlicedScroll. +func NewSlicedScroll() *SlicedScroll { + r := &SlicedScroll{} + + return r +} + +type SlicedScrollVariant interface { + SlicedScrollCaster() *SlicedScroll +} + +func (s *SlicedScroll) SlicedScrollCaster() *SlicedScroll { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slices.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slices.go new file mode 100644 index 000000000..0767a015f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slices.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Slices holds the union for the following types: +// +// int +// slicescalculation.SlicesCalculation +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L363-L368 +type Slices any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slm.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slm.go new file mode 100644 index 000000000..69083e04f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slm.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Slm type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L455-L458 +type Slm struct { + Available bool `json:"available"` + Enabled bool `json:"enabled"` + PolicyCount *int `json:"policy_count,omitempty"` + PolicyStats *Statistics `json:"policy_stats,omitempty"` +} + +func (s *Slm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "policy_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PolicyCount", err) + } + s.PolicyCount = &value + case float64: + f := int(v) + s.PolicyCount = &f + } + + case "policy_stats": + if err := dec.Decode(&s.PolicyStats); err != nil { + return fmt.Errorf("%s | %w", "PolicyStats", err) + } + + } + } + return nil +} + +// NewSlm returns a Slm. +func NewSlm() *Slm { + r := &Slm{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slmindicator.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slmindicator.go new file mode 100644 index 000000000..e461d6251 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slmindicator.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indicatorhealthstatus" +) + +// SlmIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L176-L180 +type SlmIndicator struct { + Details *SlmIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *SlmIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return fmt.Errorf("%s | %w", "Diagnosis", err) + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return fmt.Errorf("%s | %w", "Impacts", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Symptom", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewSlmIndicator returns a SlmIndicator. +func NewSlmIndicator() *SlmIndicator { + r := &SlmIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slmindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slmindicatordetails.go new file mode 100644 index 000000000..84a7c1f88 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slmindicatordetails.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/lifecycleoperationmode" +) + +// SlmIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L181-L185 +type SlmIndicatorDetails struct { + Policies int64 `json:"policies"` + SlmStatus lifecycleoperationmode.LifecycleOperationMode `json:"slm_status"` + UnhealthyPolicies *SlmIndicatorUnhealthyPolicies `json:"unhealthy_policies,omitempty"` +} + +func (s *SlmIndicatorDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "policies": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Policies", err) + } + s.Policies = value + case float64: + f := int64(v) + s.Policies = f + } + + case "slm_status": + if err := dec.Decode(&s.SlmStatus); err != nil { + return fmt.Errorf("%s | %w", "SlmStatus", err) + } + + case "unhealthy_policies": + if err := dec.Decode(&s.UnhealthyPolicies); err != nil { + return fmt.Errorf("%s | %w", "UnhealthyPolicies", err) + } + + } + } + return nil +} + +// NewSlmIndicatorDetails returns a SlmIndicatorDetails. +func NewSlmIndicatorDetails() *SlmIndicatorDetails { + r := &SlmIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slmindicatorunhealthypolicies.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slmindicatorunhealthypolicies.go new file mode 100644 index 000000000..cdee24b23 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slmindicatorunhealthypolicies.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SlmIndicatorUnhealthyPolicies type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L187-L190 +type SlmIndicatorUnhealthyPolicies struct { + Count int64 `json:"count"` + InvocationsSinceLastSuccess map[string]int64 `json:"invocations_since_last_success,omitempty"` +} + +func (s *SlmIndicatorUnhealthyPolicies) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "invocations_since_last_success": + if s.InvocationsSinceLastSuccess == nil { + s.InvocationsSinceLastSuccess = make(map[string]int64, 0) + } + if err := dec.Decode(&s.InvocationsSinceLastSuccess); err != nil { + return fmt.Errorf("%s | %w", "InvocationsSinceLastSuccess", err) + } + + } + } + return nil +} + +// NewSlmIndicatorUnhealthyPolicies returns a SlmIndicatorUnhealthyPolicies. +func NewSlmIndicatorUnhealthyPolicies() *SlmIndicatorUnhealthyPolicies { + r := &SlmIndicatorUnhealthyPolicies{ + InvocationsSinceLastSuccess: make(map[string]int64), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slmpolicy.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slmpolicy.go new file mode 100644 index 000000000..fd6d78d3e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slmpolicy.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SLMPolicy type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/_types/SnapshotLifecycle.ts#L86-L92 +type SLMPolicy struct { + Config *Configuration `json:"config,omitempty"` + Name string `json:"name"` + Repository string `json:"repository"` + Retention *Retention `json:"retention,omitempty"` + Schedule string `json:"schedule"` +} + +func (s *SLMPolicy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "config": + if err := dec.Decode(&s.Config); err != nil { + return fmt.Errorf("%s | %w", "Config", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "repository": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Repository", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Repository = o + + case "retention": + if err := dec.Decode(&s.Retention); err != nil { + return fmt.Errorf("%s | %w", "Retention", err) + } + + case "schedule": + if err := dec.Decode(&s.Schedule); err != nil { + return fmt.Errorf("%s | %w", "Schedule", err) + } + + } + } + return nil +} + +// NewSLMPolicy returns a SLMPolicy. +func NewSLMPolicy() *SLMPolicy { + r := &SLMPolicy{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slowlogsettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slowlogsettings.go new file mode 100644 index 000000000..74be5db12 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slowlogsettings.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SlowlogSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L528-L533 +type SlowlogSettings struct { + Level *string `json:"level,omitempty"` + Reformat *bool `json:"reformat,omitempty"` + Source *int `json:"source,omitempty"` + Threshold *SlowlogTresholds `json:"threshold,omitempty"` +} + +func (s *SlowlogSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "level": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Level", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Level = &o + + case "reformat": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Reformat", err) + } + s.Reformat = &value + case bool: + s.Reformat = &v + } + + case "source": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + s.Source = &value + case float64: + f := int(v) + s.Source = &f + } + + case "threshold": + if err := dec.Decode(&s.Threshold); err != nil { + return fmt.Errorf("%s | %w", "Threshold", err) + } + + } + } + return nil +} + +// NewSlowlogSettings returns a SlowlogSettings. +func NewSlowlogSettings() *SlowlogSettings { + r := &SlowlogSettings{} + + return r +} + +type SlowlogSettingsVariant interface { + SlowlogSettingsCaster() *SlowlogSettings +} + +func (s *SlowlogSettings) SlowlogSettingsCaster() *SlowlogSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slowlogtresholdlevels.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slowlogtresholdlevels.go new file mode 100644 index 000000000..70179a4b9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slowlogtresholdlevels.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SlowlogTresholdLevels type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L540-L545 +type SlowlogTresholdLevels struct { + Debug Duration `json:"debug,omitempty"` + Info Duration `json:"info,omitempty"` + Trace Duration `json:"trace,omitempty"` + Warn Duration `json:"warn,omitempty"` +} + +func (s *SlowlogTresholdLevels) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "debug": + if err := dec.Decode(&s.Debug); err != nil { + return fmt.Errorf("%s | %w", "Debug", err) + } + + case "info": + if err := dec.Decode(&s.Info); err != nil { + return fmt.Errorf("%s | %w", "Info", err) + } + + case "trace": + if err := dec.Decode(&s.Trace); err != nil { + return fmt.Errorf("%s | %w", "Trace", err) + } + + case "warn": + if err := dec.Decode(&s.Warn); err != nil { + return fmt.Errorf("%s | %w", "Warn", err) + } + + } + } + return nil +} + +// NewSlowlogTresholdLevels returns a SlowlogTresholdLevels. +func NewSlowlogTresholdLevels() *SlowlogTresholdLevels { + r := &SlowlogTresholdLevels{} + + return r +} + +type SlowlogTresholdLevelsVariant interface { + SlowlogTresholdLevelsCaster() *SlowlogTresholdLevels +} + +func (s *SlowlogTresholdLevels) SlowlogTresholdLevelsCaster() *SlowlogTresholdLevels { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slowlogtresholds.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slowlogtresholds.go new file mode 100644 index 000000000..22d6a5ab2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/slowlogtresholds.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SlowlogTresholds type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L535-L538 +type SlowlogTresholds struct { + Fetch *SlowlogTresholdLevels `json:"fetch,omitempty"` + Query *SlowlogTresholdLevels `json:"query,omitempty"` +} + +// NewSlowlogTresholds returns a SlowlogTresholds. +func NewSlowlogTresholds() *SlowlogTresholds { + r := &SlowlogTresholds{} + + return r +} + +type SlowlogTresholdsVariant interface { + SlowlogTresholdsCaster() *SlowlogTresholds +} + +func (s *SlowlogTresholds) SlowlogTresholdsCaster() *SlowlogTresholds { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/smoothingmodelcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/smoothingmodelcontainer.go new file mode 100644 index 000000000..51c62b8b0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/smoothingmodelcontainer.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// SmoothingModelContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L446-L462 +type SmoothingModelContainer struct { + AdditionalSmoothingModelContainerProperty map[string]json.RawMessage `json:"-"` + // Laplace A smoothing model that uses an additive smoothing where a constant (typically + // `1.0` or smaller) is added to all counts to balance weights. + Laplace *LaplaceSmoothingModel `json:"laplace,omitempty"` + // LinearInterpolation A smoothing model that takes the weighted mean of the unigrams, bigrams, and + // trigrams based on user supplied weights (lambdas). + LinearInterpolation *LinearInterpolationSmoothingModel `json:"linear_interpolation,omitempty"` + // StupidBackoff A simple backoff model that backs off to lower order n-gram models if the + // higher order count is `0` and discounts the lower order n-gram model by a + // constant factor. + StupidBackoff *StupidBackoffSmoothingModel `json:"stupid_backoff,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s SmoothingModelContainer) MarshalJSON() ([]byte, error) { + type opt SmoothingModelContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalSmoothingModelContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalSmoothingModelContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewSmoothingModelContainer returns a SmoothingModelContainer. +func NewSmoothingModelContainer() *SmoothingModelContainer { + r := &SmoothingModelContainer{ + AdditionalSmoothingModelContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type SmoothingModelContainerVariant interface { + SmoothingModelContainerCaster() *SmoothingModelContainer +} + +func (s *SmoothingModelContainer) SmoothingModelContainerCaster() *SmoothingModelContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotcurrentcounts.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotcurrentcounts.go new file mode 100644 index 000000000..01c43fb86 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotcurrentcounts.go @@ -0,0 +1,154 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SnapshotCurrentCounts type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L642-L663 +type SnapshotCurrentCounts struct { + // Cleanups Cleanups in progress, not counted in concurrent_operations as they are not + // concurrent + Cleanups int `json:"cleanups"` + // ConcurrentOperations Sum of snapshots and snapshot_deletions + ConcurrentOperations int `json:"concurrent_operations"` + // ShardSnapshots Incomplete shard snapshots + ShardSnapshots int `json:"shard_snapshots"` + // SnapshotDeletions Snapshots deletions in progress + SnapshotDeletions int `json:"snapshot_deletions"` + // Snapshots Snapshots currently in progress + Snapshots int `json:"snapshots"` +} + +func (s *SnapshotCurrentCounts) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cleanups": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Cleanups", err) + } + s.Cleanups = value + case float64: + f := int(v) + s.Cleanups = f + } + + case "concurrent_operations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ConcurrentOperations", err) + } + s.ConcurrentOperations = value + case float64: + f := int(v) + s.ConcurrentOperations = f + } + + case "shard_snapshots": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSnapshots", err) + } + s.ShardSnapshots = value + case float64: + f := int(v) + s.ShardSnapshots = f + } + + case "snapshot_deletions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SnapshotDeletions", err) + } + s.SnapshotDeletions = value + case float64: + f := int(v) + s.SnapshotDeletions = f + } + + case "snapshots": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Snapshots", err) + } + s.Snapshots = value + case float64: + f := int(v) + s.Snapshots = f + } + + } + } + return nil +} + +// NewSnapshotCurrentCounts returns a SnapshotCurrentCounts. +func NewSnapshotCurrentCounts() *SnapshotCurrentCounts { + r := &SnapshotCurrentCounts{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotindexstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotindexstats.go new file mode 100644 index 000000000..1ceb33833 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotindexstats.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SnapshotIndexStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotIndexStats.ts#L25-L29 +type SnapshotIndexStats struct { + Shards map[string]SnapshotShardsStatus `json:"shards"` + ShardsStats SnapshotShardsStats `json:"shards_stats"` + Stats SnapshotStats `json:"stats"` +} + +// NewSnapshotIndexStats returns a SnapshotIndexStats. +func NewSnapshotIndexStats() *SnapshotIndexStats { + r := &SnapshotIndexStats{ + Shards: make(map[string]SnapshotShardsStatus), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotinfo.go new file mode 100644 index 000000000..a6e769bf6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotinfo.go @@ -0,0 +1,217 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SnapshotInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotInfo.ts#L41-L71 +type SnapshotInfo struct { + DataStreams []string `json:"data_streams"` + Duration Duration `json:"duration,omitempty"` + DurationInMillis *int64 `json:"duration_in_millis,omitempty"` + EndTime DateTime `json:"end_time,omitempty"` + EndTimeInMillis *int64 `json:"end_time_in_millis,omitempty"` + Failures []SnapshotShardFailure `json:"failures,omitempty"` + FeatureStates []InfoFeatureState `json:"feature_states,omitempty"` + IncludeGlobalState *bool `json:"include_global_state,omitempty"` + IndexDetails map[string]IndexDetails `json:"index_details,omitempty"` + Indices []string `json:"indices,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` + Reason *string `json:"reason,omitempty"` + Repository *string `json:"repository,omitempty"` + Shards *ShardStatistics `json:"shards,omitempty"` + Snapshot string `json:"snapshot"` + StartTime DateTime `json:"start_time,omitempty"` + StartTimeInMillis *int64 `json:"start_time_in_millis,omitempty"` + State *string `json:"state,omitempty"` + Uuid string `json:"uuid"` + Version *string `json:"version,omitempty"` + VersionId *int64 `json:"version_id,omitempty"` +} + +func (s *SnapshotInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_streams": + if err := dec.Decode(&s.DataStreams); err != nil { + return fmt.Errorf("%s | %w", "DataStreams", err) + } + + case "duration": + if err := dec.Decode(&s.Duration); err != nil { + return fmt.Errorf("%s | %w", "Duration", err) + } + + case "duration_in_millis": + if err := dec.Decode(&s.DurationInMillis); err != nil { + return fmt.Errorf("%s | %w", "DurationInMillis", err) + } + + case "end_time": + if err := dec.Decode(&s.EndTime); err != nil { + return fmt.Errorf("%s | %w", "EndTime", err) + } + + case "end_time_in_millis": + if err := dec.Decode(&s.EndTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "EndTimeInMillis", err) + } + + case "failures": + if err := dec.Decode(&s.Failures); err != nil { + return fmt.Errorf("%s | %w", "Failures", err) + } + + case "feature_states": + if err := dec.Decode(&s.FeatureStates); err != nil { + return fmt.Errorf("%s | %w", "FeatureStates", err) + } + + case "include_global_state": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncludeGlobalState", err) + } + s.IncludeGlobalState = &value + case bool: + s.IncludeGlobalState = &v + } + + case "index_details": + if s.IndexDetails == nil { + s.IndexDetails = make(map[string]IndexDetails, 0) + } + if err := dec.Decode(&s.IndexDetails); err != nil { + return fmt.Errorf("%s | %w", "IndexDetails", err) + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "repository": + if err := dec.Decode(&s.Repository); err != nil { + return fmt.Errorf("%s | %w", "Repository", err) + } + + case "shards": + if err := dec.Decode(&s.Shards); err != nil { + return fmt.Errorf("%s | %w", "Shards", err) + } + + case "snapshot": + if err := dec.Decode(&s.Snapshot); err != nil { + return fmt.Errorf("%s | %w", "Snapshot", err) + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeInMillis", err) + } + + case "state": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = &o + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return fmt.Errorf("%s | %w", "Uuid", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "version_id": + if err := dec.Decode(&s.VersionId); err != nil { + return fmt.Errorf("%s | %w", "VersionId", err) + } + + } + } + return nil +} + +// NewSnapshotInfo returns a SnapshotInfo. +func NewSnapshotInfo() *SnapshotInfo { + r := &SnapshotInfo{ + IndexDetails: make(map[string]IndexDetails), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotlifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotlifecycle.go new file mode 100644 index 000000000..16d293b01 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotlifecycle.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SnapshotLifecycle type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/_types/SnapshotLifecycle.ts#L38-L59 +type SnapshotLifecycle struct { + InProgress *InProgress `json:"in_progress,omitempty"` + LastFailure *Invocation `json:"last_failure,omitempty"` + LastSuccess *Invocation `json:"last_success,omitempty"` + // ModifiedDate The last time the policy was modified. + ModifiedDate DateTime `json:"modified_date,omitempty"` + ModifiedDateMillis int64 `json:"modified_date_millis"` + // NextExecution The next time the policy will run. + NextExecution DateTime `json:"next_execution,omitempty"` + NextExecutionMillis int64 `json:"next_execution_millis"` + Policy SLMPolicy `json:"policy"` + Stats Statistics `json:"stats"` + // Version The version of the snapshot policy. + // Only the latest version is stored and incremented when the policy is updated. + Version int64 `json:"version"` +} + +func (s *SnapshotLifecycle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "in_progress": + if err := dec.Decode(&s.InProgress); err != nil { + return fmt.Errorf("%s | %w", "InProgress", err) + } + + case "last_failure": + if err := dec.Decode(&s.LastFailure); err != nil { + return fmt.Errorf("%s | %w", "LastFailure", err) + } + + case "last_success": + if err := dec.Decode(&s.LastSuccess); err != nil { + return fmt.Errorf("%s | %w", "LastSuccess", err) + } + + case "modified_date": + if err := dec.Decode(&s.ModifiedDate); err != nil { + return fmt.Errorf("%s | %w", "ModifiedDate", err) + } + + case "modified_date_millis": + if err := dec.Decode(&s.ModifiedDateMillis); err != nil { + return fmt.Errorf("%s | %w", "ModifiedDateMillis", err) + } + + case "next_execution": + if err := dec.Decode(&s.NextExecution); err != nil { + return fmt.Errorf("%s | %w", "NextExecution", err) + } + + case "next_execution_millis": + if err := dec.Decode(&s.NextExecutionMillis); err != nil { + return fmt.Errorf("%s | %w", "NextExecutionMillis", err) + } + + case "policy": + if err := dec.Decode(&s.Policy); err != nil { + return fmt.Errorf("%s | %w", "Policy", err) + } + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return fmt.Errorf("%s | %w", "Stats", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewSnapshotLifecycle returns a SnapshotLifecycle. +func NewSnapshotLifecycle() *SnapshotLifecycle { + r := &SnapshotLifecycle{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotnodeinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotnodeinfo.go new file mode 100644 index 000000000..095323d11 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotnodeinfo.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SnapshotNodeInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L110-L113 +type SnapshotNodeInfo struct { + Id string `json:"id"` + Name string `json:"name"` +} + +func (s *SnapshotNodeInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewSnapshotNodeInfo returns a SnapshotNodeInfo. +func NewSnapshotNodeInfo() *SnapshotNodeInfo { + r := &SnapshotNodeInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotpolicystats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotpolicystats.go new file mode 100644 index 000000000..695564f1e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotpolicystats.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SnapshotPolicyStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/_types/SnapshotLifecycle.ts#L153-L159 +type SnapshotPolicyStats struct { + Policy string `json:"policy"` + SnapshotDeletionFailures int64 `json:"snapshot_deletion_failures"` + SnapshotsDeleted int64 `json:"snapshots_deleted"` + SnapshotsFailed int64 `json:"snapshots_failed"` + SnapshotsTaken int64 `json:"snapshots_taken"` +} + +func (s *SnapshotPolicyStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "policy": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Policy", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Policy = o + + case "snapshot_deletion_failures": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SnapshotDeletionFailures", err) + } + s.SnapshotDeletionFailures = value + case float64: + f := int64(v) + s.SnapshotDeletionFailures = f + } + + case "snapshots_deleted": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SnapshotsDeleted", err) + } + s.SnapshotsDeleted = value + case float64: + f := int64(v) + s.SnapshotsDeleted = f + } + + case "snapshots_failed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SnapshotsFailed", err) + } + s.SnapshotsFailed = value + case float64: + f := int64(v) + s.SnapshotsFailed = f + } + + case "snapshots_taken": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SnapshotsTaken", err) + } + s.SnapshotsTaken = value + case float64: + f := int64(v) + s.SnapshotsTaken = f + } + + } + } + return nil +} + +// NewSnapshotPolicyStats returns a SnapshotPolicyStats. +func NewSnapshotPolicyStats() *SnapshotPolicyStats { + r := &SnapshotPolicyStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotresponseitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotresponseitem.go new file mode 100644 index 000000000..1f2c63d29 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotresponseitem.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SnapshotResponseItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/get/SnapshotGetResponse.ts#L49-L53 +type SnapshotResponseItem struct { + Error *ErrorCause `json:"error,omitempty"` + Repository string `json:"repository"` + Snapshots []SnapshotInfo `json:"snapshots,omitempty"` +} + +func (s *SnapshotResponseItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + + case "repository": + if err := dec.Decode(&s.Repository); err != nil { + return fmt.Errorf("%s | %w", "Repository", err) + } + + case "snapshots": + if err := dec.Decode(&s.Snapshots); err != nil { + return fmt.Errorf("%s | %w", "Snapshots", err) + } + + } + } + return nil +} + +// NewSnapshotResponseItem returns a SnapshotResponseItem. +func NewSnapshotResponseItem() *SnapshotResponseItem { + r := &SnapshotResponseItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotrestore.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotrestore.go new file mode 100644 index 000000000..9d940c287 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotrestore.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SnapshotRestore type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/restore/SnapshotRestoreResponse.ts#L30-L34 +type SnapshotRestore struct { + Indices []string `json:"indices"` + Shards ShardStatistics `json:"shards"` + Snapshot string `json:"snapshot"` +} + +func (s *SnapshotRestore) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "shards": + if err := dec.Decode(&s.Shards); err != nil { + return fmt.Errorf("%s | %w", "Shards", err) + } + + case "snapshot": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Snapshot", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Snapshot = o + + } + } + return nil +} + +// NewSnapshotRestore returns a SnapshotRestore. +func NewSnapshotRestore() *SnapshotRestore { + r := &SnapshotRestore{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotshardfailure.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotshardfailure.go new file mode 100644 index 000000000..5941743be --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotshardfailure.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SnapshotShardFailure type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotShardFailure.ts#L23-L30 +type SnapshotShardFailure struct { + Index string `json:"index"` + IndexUuid string `json:"index_uuid"` + NodeId *string `json:"node_id,omitempty"` + Reason string `json:"reason"` + ShardId int `json:"shard_id"` + Status string `json:"status"` +} + +func (s *SnapshotShardFailure) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "index_uuid": + if err := dec.Decode(&s.IndexUuid); err != nil { + return fmt.Errorf("%s | %w", "IndexUuid", err) + } + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = o + + case "shard_id": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardId", err) + } + s.ShardId = value + case float64: + f := int(v) + s.ShardId = f + } + + case "status": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Status = o + + } + } + return nil +} + +// NewSnapshotShardFailure returns a SnapshotShardFailure. +func NewSnapshotShardFailure() *SnapshotShardFailure { + r := &SnapshotShardFailure{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotshardsstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotshardsstats.go new file mode 100644 index 000000000..cb5350304 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotshardsstats.go @@ -0,0 +1,165 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SnapshotShardsStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotShardsStats.ts#L22-L47 +type SnapshotShardsStats struct { + // Done The number of shards that initialized, started, and finalized successfully. + Done int64 `json:"done"` + // Failed The number of shards that failed to be included in the snapshot. + Failed int64 `json:"failed"` + // Finalizing The number of shards that are finalizing but are not done. + Finalizing int64 `json:"finalizing"` + // Initializing The number of shards that are still initializing. + Initializing int64 `json:"initializing"` + // Started The number of shards that have started but are not finalized. + Started int64 `json:"started"` + // Total The total number of shards included in the snapshot. + Total int64 `json:"total"` +} + +func (s *SnapshotShardsStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "done": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Done", err) + } + s.Done = value + case float64: + f := int64(v) + s.Done = f + } + + case "failed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Failed", err) + } + s.Failed = value + case float64: + f := int64(v) + s.Failed = f + } + + case "finalizing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Finalizing", err) + } + s.Finalizing = value + case float64: + f := int64(v) + s.Finalizing = f + } + + case "initializing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Initializing", err) + } + s.Initializing = value + case float64: + f := int64(v) + s.Initializing = f + } + + case "started": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Started", err) + } + s.Started = value + case float64: + f := int64(v) + s.Started = f + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + +// NewSnapshotShardsStats returns a SnapshotShardsStats. +func NewSnapshotShardsStats() *SnapshotShardsStats { + r := &SnapshotShardsStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotshardsstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotshardsstatus.go new file mode 100644 index 000000000..c4ffdafce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotshardsstatus.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardsstatsstage" +) + +// SnapshotShardsStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotShardsStatus.ts#L24-L27 +type SnapshotShardsStatus struct { + Stage shardsstatsstage.ShardsStatsStage `json:"stage"` + Stats ShardsStatsSummary `json:"stats"` +} + +// NewSnapshotShardsStatus returns a SnapshotShardsStatus. +func NewSnapshotShardsStatus() *SnapshotShardsStatus { + r := &SnapshotShardsStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotsrecord.go new file mode 100644 index 000000000..9b0083958 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotsrecord.go @@ -0,0 +1,250 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SnapshotsRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/snapshots/types.ts#L24-L96 +type SnapshotsRecord struct { + // Duration The time it took the snapshot process to complete, in time units. + Duration Duration `json:"duration,omitempty"` + // EndEpoch The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot + // process ended. + EndEpoch StringifiedEpochTimeUnitSeconds `json:"end_epoch,omitempty"` + // EndTime The time (HH:MM:SS) at which the snapshot process ended. + EndTime *string `json:"end_time,omitempty"` + // FailedShards The number of failed shards in the snapshot. + FailedShards *string `json:"failed_shards,omitempty"` + // Id The unique identifier for the snapshot. + Id *string `json:"id,omitempty"` + // Indices The number of indices in the snapshot. + Indices *string `json:"indices,omitempty"` + // Reason The reason for any snapshot failures. + Reason *string `json:"reason,omitempty"` + // Repository The repository name. + Repository *string `json:"repository,omitempty"` + // StartEpoch The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot + // process started. + StartEpoch StringifiedEpochTimeUnitSeconds `json:"start_epoch,omitempty"` + // StartTime The time (HH:MM:SS) at which the snapshot process started. + StartTime ScheduleTimeOfDay `json:"start_time,omitempty"` + // Status The state of the snapshot process. + // Returned values include: + // `FAILED`: The snapshot process failed. + // `INCOMPATIBLE`: The snapshot process is incompatible with the current cluster + // version. + // `IN_PROGRESS`: The snapshot process started but has not completed. + // `PARTIAL`: The snapshot process completed with a partial success. + // `SUCCESS`: The snapshot process completed with a full success. + Status *string `json:"status,omitempty"` + // SuccessfulShards The number of successful shards in the snapshot. + SuccessfulShards *string `json:"successful_shards,omitempty"` + // TotalShards The total number of shards in the snapshot. + TotalShards *string `json:"total_shards,omitempty"` +} + +func (s *SnapshotsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "duration", "dur": + if err := dec.Decode(&s.Duration); err != nil { + return fmt.Errorf("%s | %w", "Duration", err) + } + + case "end_epoch", "ete", "endEpoch": + if err := dec.Decode(&s.EndEpoch); err != nil { + return fmt.Errorf("%s | %w", "EndEpoch", err) + } + + case "end_time", "eti", "endTime": + if err := dec.Decode(&s.EndTime); err != nil { + return fmt.Errorf("%s | %w", "EndTime", err) + } + + case "failed_shards", "fs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FailedShards", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FailedShards = &o + + case "id", "snapshot": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "indices", "i": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Indices = &o + + case "reason", "r": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "repository", "re", "repo": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Repository", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Repository = &o + + case "start_epoch", "ste", "startEpoch": + if err := dec.Decode(&s.StartEpoch); err != nil { + return fmt.Errorf("%s | %w", "StartEpoch", err) + } + + case "start_time", "sti", "startTime": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + starttime_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "StartTime", err) + } + + switch t { + + case "hour", "minute": + o := NewHourAndMinute() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + s.StartTime = o + break starttime_field + + } + } + if s.StartTime == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + } + + case "status", "s": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Status = &o + + case "successful_shards", "ss": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SuccessfulShards", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SuccessfulShards = &o + + case "total_shards", "ts": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TotalShards", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TotalShards = &o + + } + } + return nil +} + +// NewSnapshotsRecord returns a SnapshotsRecord. +func NewSnapshotsRecord() *SnapshotsRecord { + r := &SnapshotsRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotstats.go new file mode 100644 index 000000000..dc60c1d42 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snapshotstats.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SnapshotStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotStats.ts#L23-L42 +type SnapshotStats struct { + // Incremental The number and size of files that still need to be copied as part of the + // incremental snapshot. + // For completed snapshots, this property indicates the number and size of files + // that were not already in the repository and were copied as part of the + // incremental snapshot. + Incremental FileCountSnapshotStats `json:"incremental"` + // StartTimeInMillis The time, in milliseconds, when the snapshot creation process started. + StartTimeInMillis int64 `json:"start_time_in_millis"` + Time Duration `json:"time,omitempty"` + // TimeInMillis The total time, in milliseconds, that it took for the snapshot process to + // complete. + TimeInMillis int64 `json:"time_in_millis"` + // Total The total number and size of files that are referenced by the snapshot. + Total FileCountSnapshotStats `json:"total"` +} + +func (s *SnapshotStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "incremental": + if err := dec.Decode(&s.Incremental); err != nil { + return fmt.Errorf("%s | %w", "Incremental", err) + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeInMillis", err) + } + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return fmt.Errorf("%s | %w", "Time", err) + } + + case "time_in_millis": + if err := dec.Decode(&s.TimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TimeInMillis", err) + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + + } + } + return nil +} + +// NewSnapshotStats returns a SnapshotStats. +func NewSnapshotStats() *SnapshotStats { + r := &SnapshotStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snowballanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snowballanalyzer.go new file mode 100644 index 000000000..8844c16f0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snowballanalyzer.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snowballlanguage" +) + +// SnowballAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L374-L380 +type SnowballAnalyzer struct { + Language snowballlanguage.SnowballLanguage `json:"language"` + Stopwords StopWords `json:"stopwords,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *SnowballAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "language": + if err := dec.Decode(&s.Language); err != nil { + return fmt.Errorf("%s | %w", "Language", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SnowballAnalyzer) MarshalJSON() ([]byte, error) { + type innerSnowballAnalyzer SnowballAnalyzer + tmp := innerSnowballAnalyzer{ + Language: s.Language, + Stopwords: s.Stopwords, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "snowball" + + return json.Marshal(tmp) +} + +// NewSnowballAnalyzer returns a SnowballAnalyzer. +func NewSnowballAnalyzer() *SnowballAnalyzer { + r := &SnowballAnalyzer{} + + return r +} + +type SnowballAnalyzerVariant interface { + SnowballAnalyzerCaster() *SnowballAnalyzer +} + +func (s *SnowballAnalyzer) SnowballAnalyzerCaster() *SnowballAnalyzer { + return s +} + +func (s *SnowballAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snowballtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snowballtokenfilter.go new file mode 100644 index 000000000..c958224d6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/snowballtokenfilter.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snowballlanguage" +) + +// SnowballTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L411-L415 +type SnowballTokenFilter struct { + // Language Controls the language used by the stemmer. + Language *snowballlanguage.SnowballLanguage `json:"language,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *SnowballTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "language": + if err := dec.Decode(&s.Language); err != nil { + return fmt.Errorf("%s | %w", "Language", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SnowballTokenFilter) MarshalJSON() ([]byte, error) { + type innerSnowballTokenFilter SnowballTokenFilter + tmp := innerSnowballTokenFilter{ + Language: s.Language, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "snowball" + + return json.Marshal(tmp) +} + +// NewSnowballTokenFilter returns a SnowballTokenFilter. +func NewSnowballTokenFilter() *SnowballTokenFilter { + r := &SnowballTokenFilter{} + + return r +} + +type SnowballTokenFilterVariant interface { + SnowballTokenFilterCaster() *SnowballTokenFilter +} + +func (s *SnowballTokenFilter) SnowballTokenFilterCaster() *SnowballTokenFilter { + return s +} + +func (s *SnowballTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/softdeletes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/softdeletes.go new file mode 100644 index 000000000..08d68c47a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/softdeletes.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SoftDeletes type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L51-L64 +type SoftDeletes struct { + // Enabled Indicates whether soft deletes are enabled on the index. + Enabled *bool `json:"enabled,omitempty"` + // RetentionLease The maximum period to retain a shard history retention lease before it is + // considered expired. + // Shard history retention leases ensure that soft deletes are retained during + // merges on the Lucene + // index. If a soft delete is merged away before it can be replicated to a + // follower the following + // process will fail due to incomplete history on the leader. + RetentionLease *RetentionLease `json:"retention_lease,omitempty"` +} + +func (s *SoftDeletes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "retention_lease": + if err := dec.Decode(&s.RetentionLease); err != nil { + return fmt.Errorf("%s | %w", "RetentionLease", err) + } + + } + } + return nil +} + +// NewSoftDeletes returns a SoftDeletes. +func NewSoftDeletes() *SoftDeletes { + r := &SoftDeletes{} + + return r +} + +type SoftDeletesVariant interface { + SoftDeletesCaster() *SoftDeletes +} + +func (s *SoftDeletes) SoftDeletesCaster() *SoftDeletes { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/soranianalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/soranianalyzer.go new file mode 100644 index 000000000..ce5521d00 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/soranianalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SoraniAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L289-L294 +type SoraniAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *SoraniAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SoraniAnalyzer) MarshalJSON() ([]byte, error) { + type innerSoraniAnalyzer SoraniAnalyzer + tmp := innerSoraniAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "sorani" + + return json.Marshal(tmp) +} + +// NewSoraniAnalyzer returns a SoraniAnalyzer. +func NewSoraniAnalyzer() *SoraniAnalyzer { + r := &SoraniAnalyzer{} + + return r +} + +type SoraniAnalyzerVariant interface { + SoraniAnalyzerCaster() *SoraniAnalyzer +} + +func (s *SoraniAnalyzer) SoraniAnalyzerCaster() *SoraniAnalyzer { + return s +} + +func (s *SoraniAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/soraninormalizationtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/soraninormalizationtokenfilter.go new file mode 100644 index 000000000..a6160fad9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/soraninormalizationtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SoraniNormalizationTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L543-L545 +type SoraniNormalizationTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *SoraniNormalizationTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SoraniNormalizationTokenFilter) MarshalJSON() ([]byte, error) { + type innerSoraniNormalizationTokenFilter SoraniNormalizationTokenFilter + tmp := innerSoraniNormalizationTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "sorani_normalization" + + return json.Marshal(tmp) +} + +// NewSoraniNormalizationTokenFilter returns a SoraniNormalizationTokenFilter. +func NewSoraniNormalizationTokenFilter() *SoraniNormalizationTokenFilter { + r := &SoraniNormalizationTokenFilter{} + + return r +} + +type SoraniNormalizationTokenFilterVariant interface { + SoraniNormalizationTokenFilterCaster() *SoraniNormalizationTokenFilter +} + +func (s *SoraniNormalizationTokenFilter) SoraniNormalizationTokenFilterCaster() *SoraniNormalizationTokenFilter { + return s +} + +func (s *SoraniNormalizationTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sort.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sort.go new file mode 100644 index 000000000..d62404581 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sort.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Sort type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/sort.ts#L104-L104 +type Sort []SortCombinations + +type SortVariant interface { + SortCaster() *Sort +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sortcombinations.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sortcombinations.go new file mode 100644 index 000000000..9802c4925 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sortcombinations.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SortCombinations holds the union for the following types: +// +// string +// SortOptions +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/sort.ts#L98-L102 +type SortCombinations any + +type SortCombinationsVariant interface { + SortCombinationsCaster() *SortCombinations +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sortoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sortoptions.go new file mode 100644 index 000000000..c3693191c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sortoptions.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// SortOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/sort.ts#L86-L96 +type SortOptions struct { + Doc_ *ScoreSort `json:"_doc,omitempty"` + GeoDistance_ *GeoDistanceSort `json:"_geo_distance,omitempty"` + Score_ *ScoreSort `json:"_score,omitempty"` + Script_ *ScriptSort `json:"_script,omitempty"` + SortOptions map[string]FieldSort `json:"-"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s SortOptions) MarshalJSON() ([]byte, error) { + type opt SortOptions + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.SortOptions { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "SortOptions") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewSortOptions returns a SortOptions. +func NewSortOptions() *SortOptions { + r := &SortOptions{ + SortOptions: make(map[string]FieldSort), + } + + return r +} + +type SortOptionsVariant interface { + SortOptionsCaster() *SortOptions +} + +func (s *SortOptions) SortOptionsCaster() *SortOptions { + return s +} + +func (s *SortOptions) SortCombinationsCaster() *SortCombinations { + o := SortCombinations(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sortprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sortprocessor.go new file mode 100644 index 000000000..65764c083 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sortprocessor.go @@ -0,0 +1,156 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" +) + +// SortProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1497-L1513 +type SortProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to be sorted. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Order The sort order to use. + // Accepts `"asc"` or `"desc"`. + Order *sortorder.SortOrder `json:"order,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the sorted value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *SortProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewSortProcessor returns a SortProcessor. +func NewSortProcessor() *SortProcessor { + r := &SortProcessor{} + + return r +} + +type SortProcessorVariant interface { + SortProcessorCaster() *SortProcessor +} + +func (s *SortProcessor) SortProcessorCaster() *SortProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceconfig.go new file mode 100644 index 000000000..2738e4dda --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceconfig.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SourceConfig holds the union for the following types: +// +// bool +// SourceFilter +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/SourceFilter.ts#L50-L54 +type SourceConfig any + +type SourceConfigVariant interface { + SourceConfigCaster() *SourceConfig +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceconfigparam.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceconfigparam.go new file mode 100644 index 000000000..a04a6d10d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceconfigparam.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SourceConfigParam holds the union for the following types: +// +// bool +// []string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/SourceFilter.ts#L56-L62 +type SourceConfigParam any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourcefield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourcefield.go new file mode 100644 index 000000000..6459c8b41 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourcefield.go @@ -0,0 +1,134 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sourcefieldmode" +) + +// SourceField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/meta-fields.ts#L58-L65 +type SourceField struct { + Compress *bool `json:"compress,omitempty"` + CompressThreshold *string `json:"compress_threshold,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Excludes []string `json:"excludes,omitempty"` + Includes []string `json:"includes,omitempty"` + Mode *sourcefieldmode.SourceFieldMode `json:"mode,omitempty"` +} + +func (s *SourceField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compress": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Compress", err) + } + s.Compress = &value + case bool: + s.Compress = &v + } + + case "compress_threshold": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CompressThreshold", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CompressThreshold = &o + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "excludes": + if err := dec.Decode(&s.Excludes); err != nil { + return fmt.Errorf("%s | %w", "Excludes", err) + } + + case "includes": + if err := dec.Decode(&s.Includes); err != nil { + return fmt.Errorf("%s | %w", "Includes", err) + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return fmt.Errorf("%s | %w", "Mode", err) + } + + } + } + return nil +} + +// NewSourceField returns a SourceField. +func NewSourceField() *SourceField { + r := &SourceField{} + + return r +} + +type SourceFieldVariant interface { + SourceFieldCaster() *SourceField +} + +func (s *SourceField) SourceFieldCaster() *SourceField { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourcefilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourcefilter.go new file mode 100644 index 000000000..880c9c724 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourcefilter.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SourceFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/SourceFilter.ts#L23-L48 +type SourceFilter struct { + // ExcludeVectors If `true`, vector fields are excluded from the returned source. + // + // This option takes precedence over `includes`: any vector field will + // remain excluded even if it matches an `includes` rule. + ExcludeVectors *bool `json:"exclude_vectors,omitempty"` + // Excludes A list of fields to exclude from the returned source. + Excludes []string `json:"excludes,omitempty"` + // Includes A list of fields to include in the returned source. + Includes []string `json:"includes,omitempty"` +} + +func (s *SourceFilter) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + var item string + err := json.NewDecoder(bytes.NewReader(data)).Decode(&item) + if err != nil { + return err + } + s.Includes = append(s.Includes, item) + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exclude_vectors": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ExcludeVectors", err) + } + s.ExcludeVectors = &value + case bool: + s.ExcludeVectors = &v + } + + case "excludes", "exclude": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Excludes", err) + } + + s.Excludes = append(s.Excludes, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Excludes); err != nil { + return fmt.Errorf("%s | %w", "Excludes", err) + } + } + + case "includes", "include": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Includes", err) + } + + s.Includes = append(s.Includes, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Includes); err != nil { + return fmt.Errorf("%s | %w", "Includes", err) + } + } + + } + } + return nil +} + +// NewSourceFilter returns a SourceFilter. +func NewSourceFilter() *SourceFilter { + r := &SourceFilter{} + + return r +} + +type SourceFilterVariant interface { + SourceFilterCaster() *SourceFilter +} + +func (s *SourceFilter) SourceFilterCaster() *SourceFilter { + return s +} + +func (s *SourceFilter) SourceConfigCaster() *SourceConfig { + o := SourceConfig(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceindex.go new file mode 100644 index 000000000..e6d7f80be --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceindex.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SourceIndex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/migrate_reindex/MigrateReindexRequest.ts#L50-L52 +type SourceIndex struct { + Index string `json:"index"` +} + +func (s *SourceIndex) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + } + } + return nil +} + +// NewSourceIndex returns a SourceIndex. +func NewSourceIndex() *SourceIndex { + r := &SourceIndex{} + + return r +} + +type SourceIndexVariant interface { + SourceIndexCaster() *SourceIndex +} + +func (s *SourceIndex) SourceIndexCaster() *SourceIndex { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceonlyrepository.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceonlyrepository.go new file mode 100644 index 000000000..e59b6eb20 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceonlyrepository.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SourceOnlyRepository type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotRepository.ts#L104-L114 +type SourceOnlyRepository struct { + // Settings The repository settings. + Settings SourceOnlyRepositorySettings `json:"settings"` + // Type The source-only repository type. + Type string `json:"type,omitempty"` + Uuid *string `json:"uuid,omitempty"` +} + +func (s *SourceOnlyRepository) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return fmt.Errorf("%s | %w", "Uuid", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SourceOnlyRepository) MarshalJSON() ([]byte, error) { + type innerSourceOnlyRepository SourceOnlyRepository + tmp := innerSourceOnlyRepository{ + Settings: s.Settings, + Type: s.Type, + Uuid: s.Uuid, + } + + tmp.Type = "source" + + return json.Marshal(tmp) +} + +// NewSourceOnlyRepository returns a SourceOnlyRepository. +func NewSourceOnlyRepository() *SourceOnlyRepository { + r := &SourceOnlyRepository{} + + return r +} + +type SourceOnlyRepositoryVariant interface { + SourceOnlyRepositoryCaster() *SourceOnlyRepository +} + +func (s *SourceOnlyRepository) SourceOnlyRepositoryCaster() *SourceOnlyRepository { + return s +} + +func (s *SourceOnlyRepository) RepositoryCaster() *Repository { + o := Repository(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceonlyrepositorysettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceonlyrepositorysettings.go new file mode 100644 index 000000000..47ded3bb4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sourceonlyrepositorysettings.go @@ -0,0 +1,191 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SourceOnlyRepositorySettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotRepository.ts#L414-L441 +type SourceOnlyRepositorySettings struct { + // ChunkSize Big files can be broken down into multiple smaller blobs in the blob store + // during snapshotting. + // It is not recommended to change this value from its default unless there is + // an explicit reason for limiting the size of blobs in the repository. + // Setting a value lower than the default can result in an increased number of + // API calls to the blob store during snapshot create and restore operations + // compared to using the default value and thus make both operations slower and + // more costly. + // Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + // The default varies by repository type. + ChunkSize ByteSize `json:"chunk_size,omitempty"` + // Compress When set to `true`, metadata files are stored in compressed format. + // This setting doesn't affect index files that are already compressed by + // default. + Compress *bool `json:"compress,omitempty"` + // DelegateType The delegated repository type. For valid values, refer to the `type` + // parameter. + // Source repositories can use `settings` properties for its delegated + // repository type. + DelegateType *string `json:"delegate_type,omitempty"` + // MaxNumberOfSnapshots The maximum number of snapshots the repository can contain. + // The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. + MaxNumberOfSnapshots *int `json:"max_number_of_snapshots,omitempty"` + // MaxRestoreBytesPerSec The maximum snapshot restore rate per node. + // It defaults to unlimited. + // Note that restores are also throttled through recovery settings. + MaxRestoreBytesPerSec ByteSize `json:"max_restore_bytes_per_sec,omitempty"` + // MaxSnapshotBytesPerSec The maximum snapshot creation rate per node. + // It defaults to 40mb per second. + // Note that if the recovery settings for managed services are set, then it + // defaults to unlimited, and the rate is additionally throttled through + // recovery settings. + MaxSnapshotBytesPerSec ByteSize `json:"max_snapshot_bytes_per_sec,omitempty"` + // ReadOnly If `true`, the repository is read-only. + // The cluster can retrieve and restore snapshots from the repository but not + // write to the repository or create snapshots in it. + // + // Only a cluster with write access can create snapshots in the repository. + // All other clusters connected to the repository should have the `readonly` + // parameter set to `true`. + // + // If `false`, the cluster can write to the repository and create snapshots in + // it. + // + // IMPORTANT: If you register the same snapshot repository with multiple + // clusters, only one cluster should have write access to the repository. + // Having multiple clusters write to the repository at the same time risks + // corrupting the contents of the repository. + ReadOnly *bool `json:"read_only,omitempty"` +} + +func (s *SourceOnlyRepositorySettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "chunk_size": + if err := dec.Decode(&s.ChunkSize); err != nil { + return fmt.Errorf("%s | %w", "ChunkSize", err) + } + + case "compress": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Compress", err) + } + s.Compress = &value + case bool: + s.Compress = &v + } + + case "delegate_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DelegateType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DelegateType = &o + + case "max_number_of_snapshots": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNumberOfSnapshots", err) + } + s.MaxNumberOfSnapshots = &value + case float64: + f := int(v) + s.MaxNumberOfSnapshots = &f + } + + case "max_restore_bytes_per_sec": + if err := dec.Decode(&s.MaxRestoreBytesPerSec); err != nil { + return fmt.Errorf("%s | %w", "MaxRestoreBytesPerSec", err) + } + + case "max_snapshot_bytes_per_sec": + if err := dec.Decode(&s.MaxSnapshotBytesPerSec); err != nil { + return fmt.Errorf("%s | %w", "MaxSnapshotBytesPerSec", err) + } + + case "read_only", "readonly": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ReadOnly", err) + } + s.ReadOnly = &value + case bool: + s.ReadOnly = &v + } + + } + } + return nil +} + +// NewSourceOnlyRepositorySettings returns a SourceOnlyRepositorySettings. +func NewSourceOnlyRepositorySettings() *SourceOnlyRepositorySettings { + r := &SourceOnlyRepositorySettings{} + + return r +} + +type SourceOnlyRepositorySettingsVariant interface { + SourceOnlyRepositorySettingsCaster() *SourceOnlyRepositorySettings +} + +func (s *SourceOnlyRepositorySettings) SourceOnlyRepositorySettingsCaster() *SourceOnlyRepositorySettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spancontainingquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spancontainingquery.go new file mode 100644 index 000000000..956e4f937 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spancontainingquery.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SpanContainingQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/span.ts#L25-L39 +type SpanContainingQuery struct { + // Big Can be any span query. + // Matching spans from `big` that contain matches from `little` are returned. + Big SpanQuery `json:"big"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Little Can be any span query. + // Matching spans from `big` that contain matches from `little` are returned. + Little SpanQuery `json:"little"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *SpanContainingQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "big": + if err := dec.Decode(&s.Big); err != nil { + return fmt.Errorf("%s | %w", "Big", err) + } + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "little": + if err := dec.Decode(&s.Little); err != nil { + return fmt.Errorf("%s | %w", "Little", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewSpanContainingQuery returns a SpanContainingQuery. +func NewSpanContainingQuery() *SpanContainingQuery { + r := &SpanContainingQuery{} + + return r +} + +type SpanContainingQueryVariant interface { + SpanContainingQueryCaster() *SpanContainingQuery +} + +func (s *SpanContainingQuery) SpanContainingQueryCaster() *SpanContainingQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanfieldmaskingquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanfieldmaskingquery.go new file mode 100644 index 000000000..f7f8ddf77 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanfieldmaskingquery.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SpanFieldMaskingQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/span.ts#L41-L47 +type SpanFieldMaskingQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + Field string `json:"field"` + Query SpanQuery `json:"query"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *SpanFieldMaskingQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewSpanFieldMaskingQuery returns a SpanFieldMaskingQuery. +func NewSpanFieldMaskingQuery() *SpanFieldMaskingQuery { + r := &SpanFieldMaskingQuery{} + + return r +} + +type SpanFieldMaskingQueryVariant interface { + SpanFieldMaskingQueryCaster() *SpanFieldMaskingQuery +} + +func (s *SpanFieldMaskingQuery) SpanFieldMaskingQueryCaster() *SpanFieldMaskingQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanfirstquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanfirstquery.go new file mode 100644 index 000000000..889e685d6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanfirstquery.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SpanFirstQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/span.ts#L49-L61 +type SpanFirstQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // End Controls the maximum end position permitted in a match. + End int `json:"end"` + // Match Can be any other span type query. + Match SpanQuery `json:"match"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *SpanFirstQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "end": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "End", err) + } + s.End = value + case float64: + f := int(v) + s.End = f + } + + case "match": + if err := dec.Decode(&s.Match); err != nil { + return fmt.Errorf("%s | %w", "Match", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewSpanFirstQuery returns a SpanFirstQuery. +func NewSpanFirstQuery() *SpanFirstQuery { + r := &SpanFirstQuery{} + + return r +} + +type SpanFirstQueryVariant interface { + SpanFirstQueryCaster() *SpanFirstQuery +} + +func (s *SpanFirstQuery) SpanFirstQueryCaster() *SpanFirstQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spangapquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spangapquery.go new file mode 100644 index 000000000..1afeb533a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spangapquery.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SpanGapQuery type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/span.ts#L63-L65 +type SpanGapQuery map[string]int + +type SpanGapQueryVariant interface { + SpanGapQueryCaster() *SpanGapQuery +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanishanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanishanalyzer.go new file mode 100644 index 000000000..e0603f072 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanishanalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SpanishAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L296-L301 +type SpanishAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *SpanishAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SpanishAnalyzer) MarshalJSON() ([]byte, error) { + type innerSpanishAnalyzer SpanishAnalyzer + tmp := innerSpanishAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "spanish" + + return json.Marshal(tmp) +} + +// NewSpanishAnalyzer returns a SpanishAnalyzer. +func NewSpanishAnalyzer() *SpanishAnalyzer { + r := &SpanishAnalyzer{} + + return r +} + +type SpanishAnalyzerVariant interface { + SpanishAnalyzerCaster() *SpanishAnalyzer +} + +func (s *SpanishAnalyzer) SpanishAnalyzerCaster() *SpanishAnalyzer { + return s +} + +func (s *SpanishAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanmultitermquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanmultitermquery.go new file mode 100644 index 000000000..74e82f87a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanmultitermquery.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SpanMultiTermQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/span.ts#L67-L75 +type SpanMultiTermQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Match Should be a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, + // or `regexp` query). + Match Query `json:"match"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *SpanMultiTermQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "match": + if err := dec.Decode(&s.Match); err != nil { + return fmt.Errorf("%s | %w", "Match", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewSpanMultiTermQuery returns a SpanMultiTermQuery. +func NewSpanMultiTermQuery() *SpanMultiTermQuery { + r := &SpanMultiTermQuery{} + + return r +} + +type SpanMultiTermQueryVariant interface { + SpanMultiTermQueryCaster() *SpanMultiTermQuery +} + +func (s *SpanMultiTermQuery) SpanMultiTermQueryCaster() *SpanMultiTermQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spannearquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spannearquery.go new file mode 100644 index 000000000..a06178b90 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spannearquery.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SpanNearQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/span.ts#L77-L93 +type SpanNearQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Clauses Array of one or more other span type queries. + Clauses []SpanQuery `json:"clauses"` + // InOrder Controls whether matches are required to be in-order. + InOrder *bool `json:"in_order,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Slop Controls the maximum number of intervening unmatched positions permitted. + Slop *int `json:"slop,omitempty"` +} + +func (s *SpanNearQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "clauses": + if err := dec.Decode(&s.Clauses); err != nil { + return fmt.Errorf("%s | %w", "Clauses", err) + } + + case "in_order": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "InOrder", err) + } + s.InOrder = &value + case bool: + s.InOrder = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "slop": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Slop", err) + } + s.Slop = &value + case float64: + f := int(v) + s.Slop = &f + } + + } + } + return nil +} + +// NewSpanNearQuery returns a SpanNearQuery. +func NewSpanNearQuery() *SpanNearQuery { + r := &SpanNearQuery{} + + return r +} + +type SpanNearQueryVariant interface { + SpanNearQueryCaster() *SpanNearQuery +} + +func (s *SpanNearQuery) SpanNearQueryCaster() *SpanNearQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spannotquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spannotquery.go new file mode 100644 index 000000000..e6d13444d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spannotquery.go @@ -0,0 +1,178 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SpanNotQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/span.ts#L95-L122 +type SpanNotQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Dist The number of tokens from within the include span that can’t have overlap + // with the exclude span. + // Equivalent to setting both `pre` and `post`. + Dist *int `json:"dist,omitempty"` + // Exclude Span query whose matches must not overlap those returned. + Exclude SpanQuery `json:"exclude"` + // Include Span query whose matches are filtered. + Include SpanQuery `json:"include"` + // Post The number of tokens after the include span that can’t have overlap with the + // exclude span. + Post *int `json:"post,omitempty"` + // Pre The number of tokens before the include span that can’t have overlap with the + // exclude span. + Pre *int `json:"pre,omitempty"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *SpanNotQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "dist": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Dist", err) + } + s.Dist = &value + case float64: + f := int(v) + s.Dist = &f + } + + case "exclude": + if err := dec.Decode(&s.Exclude); err != nil { + return fmt.Errorf("%s | %w", "Exclude", err) + } + + case "include": + if err := dec.Decode(&s.Include); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + + case "post": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Post", err) + } + s.Post = &value + case float64: + f := int(v) + s.Post = &f + } + + case "pre": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Pre", err) + } + s.Pre = &value + case float64: + f := int(v) + s.Pre = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewSpanNotQuery returns a SpanNotQuery. +func NewSpanNotQuery() *SpanNotQuery { + r := &SpanNotQuery{} + + return r +} + +type SpanNotQueryVariant interface { + SpanNotQueryCaster() *SpanNotQuery +} + +func (s *SpanNotQuery) SpanNotQueryCaster() *SpanNotQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanorquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanorquery.go new file mode 100644 index 000000000..fe4ba62f8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanorquery.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SpanOrQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/span.ts#L124-L132 +type SpanOrQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Clauses Array of one or more other span type queries. + Clauses []SpanQuery `json:"clauses"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *SpanOrQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "clauses": + if err := dec.Decode(&s.Clauses); err != nil { + return fmt.Errorf("%s | %w", "Clauses", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewSpanOrQuery returns a SpanOrQuery. +func NewSpanOrQuery() *SpanOrQuery { + r := &SpanOrQuery{} + + return r +} + +type SpanOrQueryVariant interface { + SpanOrQueryCaster() *SpanOrQuery +} + +func (s *SpanOrQuery) SpanOrQueryCaster() *SpanOrQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanquery.go new file mode 100644 index 000000000..4ab07d64c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanquery.go @@ -0,0 +1,193 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SpanQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/span.ts#L159-L201 +type SpanQuery struct { + AdditionalSpanQueryProperty map[string]json.RawMessage `json:"-"` + // SpanContaining Accepts a list of span queries, but only returns those spans which also match + // a second span query. + SpanContaining *SpanContainingQuery `json:"span_containing,omitempty"` + // SpanFieldMasking Allows queries like `span_near` or `span_or` across different fields. + SpanFieldMasking *SpanFieldMaskingQuery `json:"span_field_masking,omitempty"` + // SpanFirst Accepts another span query whose matches must appear within the first N + // positions of the field. + SpanFirst *SpanFirstQuery `json:"span_first,omitempty"` + SpanGap SpanGapQuery `json:"span_gap,omitempty"` + // SpanMulti Wraps a `term`, `range`, `prefix`, `wildcard`, `regexp`, or `fuzzy` query. + SpanMulti *SpanMultiTermQuery `json:"span_multi,omitempty"` + // SpanNear Accepts multiple span queries whose matches must be within the specified + // distance of each other, and possibly in the same order. + SpanNear *SpanNearQuery `json:"span_near,omitempty"` + // SpanNot Wraps another span query, and excludes any documents which match that query. + SpanNot *SpanNotQuery `json:"span_not,omitempty"` + // SpanOr Combines multiple span queries and returns documents which match any of the + // specified queries. + SpanOr *SpanOrQuery `json:"span_or,omitempty"` + // SpanTerm The equivalent of the `term` query but for use with other span queries. + SpanTerm map[string]SpanTermQuery `json:"span_term,omitempty"` + // SpanWithin The result from a single span query is returned as long is its span falls + // within the spans returned by a list of other span queries. + SpanWithin *SpanWithinQuery `json:"span_within,omitempty"` +} + +func (s *SpanQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "span_containing": + if err := dec.Decode(&s.SpanContaining); err != nil { + return fmt.Errorf("%s | %w", "SpanContaining", err) + } + + case "span_field_masking": + if err := dec.Decode(&s.SpanFieldMasking); err != nil { + return fmt.Errorf("%s | %w", "SpanFieldMasking", err) + } + + case "span_first": + if err := dec.Decode(&s.SpanFirst); err != nil { + return fmt.Errorf("%s | %w", "SpanFirst", err) + } + + case "span_gap": + if err := dec.Decode(&s.SpanGap); err != nil { + return fmt.Errorf("%s | %w", "SpanGap", err) + } + + case "span_multi": + if err := dec.Decode(&s.SpanMulti); err != nil { + return fmt.Errorf("%s | %w", "SpanMulti", err) + } + + case "span_near": + if err := dec.Decode(&s.SpanNear); err != nil { + return fmt.Errorf("%s | %w", "SpanNear", err) + } + + case "span_not": + if err := dec.Decode(&s.SpanNot); err != nil { + return fmt.Errorf("%s | %w", "SpanNot", err) + } + + case "span_or": + if err := dec.Decode(&s.SpanOr); err != nil { + return fmt.Errorf("%s | %w", "SpanOr", err) + } + + case "span_term": + if s.SpanTerm == nil { + s.SpanTerm = make(map[string]SpanTermQuery, 0) + } + if err := dec.Decode(&s.SpanTerm); err != nil { + return fmt.Errorf("%s | %w", "SpanTerm", err) + } + + case "span_within": + if err := dec.Decode(&s.SpanWithin); err != nil { + return fmt.Errorf("%s | %w", "SpanWithin", err) + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalSpanQueryProperty == nil { + s.AdditionalSpanQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalSpanQueryProperty", err) + } + s.AdditionalSpanQueryProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s SpanQuery) MarshalJSON() ([]byte, error) { + type opt SpanQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalSpanQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalSpanQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewSpanQuery returns a SpanQuery. +func NewSpanQuery() *SpanQuery { + r := &SpanQuery{ + AdditionalSpanQueryProperty: make(map[string]json.RawMessage), + SpanTerm: make(map[string]SpanTermQuery), + } + + return r +} + +type SpanQueryVariant interface { + SpanQueryCaster() *SpanQuery +} + +func (s *SpanQuery) SpanQueryCaster() *SpanQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spantermquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spantermquery.go new file mode 100644 index 000000000..30d26744e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spantermquery.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SpanTermQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/span.ts#L134-L141 +type SpanTermQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + Value FieldValue `json:"value"` +} + +func (s *SpanTermQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Value) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "value", "term": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + } + } + return nil +} + +// NewSpanTermQuery returns a SpanTermQuery. +func NewSpanTermQuery() *SpanTermQuery { + r := &SpanTermQuery{} + + return r +} + +type SpanTermQueryVariant interface { + SpanTermQueryCaster() *SpanTermQuery +} + +func (s *SpanTermQuery) SpanTermQueryCaster() *SpanTermQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanwithinquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanwithinquery.go new file mode 100644 index 000000000..8dfc72987 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/spanwithinquery.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SpanWithinQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/span.ts#L143-L157 +type SpanWithinQuery struct { + // Big Can be any span query. + // Matching spans from `little` that are enclosed within `big` are returned. + Big SpanQuery `json:"big"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Little Can be any span query. + // Matching spans from `little` that are enclosed within `big` are returned. + Little SpanQuery `json:"little"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *SpanWithinQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "big": + if err := dec.Decode(&s.Big); err != nil { + return fmt.Errorf("%s | %w", "Big", err) + } + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "little": + if err := dec.Decode(&s.Little); err != nil { + return fmt.Errorf("%s | %w", "Little", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewSpanWithinQuery returns a SpanWithinQuery. +func NewSpanWithinQuery() *SpanWithinQuery { + r := &SpanWithinQuery{} + + return r +} + +type SpanWithinQueryVariant interface { + SpanWithinQueryCaster() *SpanWithinQuery +} + +func (s *SpanWithinQuery) SpanWithinQueryCaster() *SpanWithinQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparseembeddingresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparseembeddingresult.go new file mode 100644 index 000000000..cf516db0d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparseembeddingresult.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SparseEmbeddingResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/Results.ts#L36-L38 +type SparseEmbeddingResult struct { + Embedding SparseVector `json:"embedding"` +} + +func (s *SparseEmbeddingResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "embedding": + if err := dec.Decode(&s.Embedding); err != nil { + return fmt.Errorf("%s | %w", "Embedding", err) + } + + } + } + return nil +} + +// NewSparseEmbeddingResult returns a SparseEmbeddingResult. +func NewSparseEmbeddingResult() *SparseEmbeddingResult { + r := &SparseEmbeddingResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevector.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevector.go new file mode 100644 index 000000000..5f2d975db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevector.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SparseVector type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/Results.ts#L24-L28 +type SparseVector map[string]float32 diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevectorindexoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevectorindexoptions.go new file mode 100644 index 000000000..5803ffef5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevectorindexoptions.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SparseVectorIndexOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/SparseVectorIndexOptions.ts#L22-L42 +type SparseVectorIndexOptions struct { + // Prune Whether to perform pruning, omitting the non-significant tokens from the + // query to improve query performance. + // If prune is true but the pruning_config is not specified, pruning will occur + // but default values will be used. + // Default: false + Prune *bool `json:"prune,omitempty"` + // PruningConfig Optional pruning configuration. + // If enabled, this will omit non-significant tokens from the query in order to + // improve query performance. + // This is only used if prune is set to true. + // If prune is set to true but pruning_config is not specified, default values + // will be used. + PruningConfig *TokenPruningConfig `json:"pruning_config,omitempty"` +} + +func (s *SparseVectorIndexOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "prune": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Prune", err) + } + s.Prune = &value + case bool: + s.Prune = &v + } + + case "pruning_config": + if err := dec.Decode(&s.PruningConfig); err != nil { + return fmt.Errorf("%s | %w", "PruningConfig", err) + } + + } + } + return nil +} + +// NewSparseVectorIndexOptions returns a SparseVectorIndexOptions. +func NewSparseVectorIndexOptions() *SparseVectorIndexOptions { + r := &SparseVectorIndexOptions{} + + return r +} + +type SparseVectorIndexOptionsVariant interface { + SparseVectorIndexOptionsCaster() *SparseVectorIndexOptions +} + +func (s *SparseVectorIndexOptions) SparseVectorIndexOptionsCaster() *SparseVectorIndexOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevectorproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevectorproperty.go new file mode 100644 index 000000000..52715b590 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevectorproperty.go @@ -0,0 +1,847 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// SparseVectorProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L226-L236 +type SparseVectorProperty struct { + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // IndexOptions Additional index options for the sparse vector field that controls the + // token pruning behavior of the sparse vector field. + IndexOptions *SparseVectorIndexOptions `json:"index_options,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *SparseVectorProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index_options": + if err := dec.Decode(&s.IndexOptions); err != nil { + return fmt.Errorf("%s | %w", "IndexOptions", err) + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SparseVectorProperty) MarshalJSON() ([]byte, error) { + type innerSparseVectorProperty SparseVectorProperty + tmp := innerSparseVectorProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IndexOptions: s.IndexOptions, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "sparse_vector" + + return json.Marshal(tmp) +} + +// NewSparseVectorProperty returns a SparseVectorProperty. +func NewSparseVectorProperty() *SparseVectorProperty { + r := &SparseVectorProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type SparseVectorPropertyVariant interface { + SparseVectorPropertyCaster() *SparseVectorProperty +} + +func (s *SparseVectorProperty) SparseVectorPropertyCaster() *SparseVectorProperty { + return s +} + +func (s *SparseVectorProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevectorquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevectorquery.go new file mode 100644 index 000000000..8570e086f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevectorquery.go @@ -0,0 +1,231 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SparseVectorQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/SparseVectorQuery.ts#L26-L80 +type SparseVectorQuery struct { + AdditionalSparseVectorQueryProperty map[string]json.RawMessage `json:"-"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Field The name of the field that contains the token-weight pairs to be searched + // against. + // This field must be a mapped sparse_vector field. + Field string `json:"field"` + // InferenceId The inference ID to use to convert the query text into token-weight pairs. + // It must be the same inference ID that was used to create the tokens from the + // input text. + // Only one of inference_id and query_vector is allowed. + // If inference_id is specified, query must also be specified. + // Only one of inference_id or query_vector may be supplied in a request. + InferenceId *string `json:"inference_id,omitempty"` + // Prune Whether to perform pruning, omitting the non-significant tokens from the + // query to improve query performance. + // If prune is true but the pruning_config is not specified, pruning will occur + // but default values will be used. + // Default: false + Prune *bool `json:"prune,omitempty"` + // PruningConfig Optional pruning configuration. + // If enabled, this will omit non-significant tokens from the query in order to + // improve query performance. + // This is only used if prune is set to true. + // If prune is set to true but pruning_config is not specified, default values + // will be used. + PruningConfig *TokenPruningConfig `json:"pruning_config,omitempty"` + // Query The query text you want to use for search. + // If inference_id is specified, query must also be specified. + Query *string `json:"query,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // QueryVector Dictionary of precomputed sparse vectors and their associated weights. + // Only one of inference_id or query_vector may be supplied in a request. + QueryVector map[string]float32 `json:"query_vector,omitempty"` +} + +func (s *SparseVectorQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "inference_id": + if err := dec.Decode(&s.InferenceId); err != nil { + return fmt.Errorf("%s | %w", "InferenceId", err) + } + + case "prune": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Prune", err) + } + s.Prune = &value + case bool: + s.Prune = &v + } + + case "pruning_config": + if err := dec.Decode(&s.PruningConfig); err != nil { + return fmt.Errorf("%s | %w", "PruningConfig", err) + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = &o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "query_vector": + if s.QueryVector == nil { + s.QueryVector = make(map[string]float32, 0) + } + if err := dec.Decode(&s.QueryVector); err != nil { + return fmt.Errorf("%s | %w", "QueryVector", err) + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalSparseVectorQueryProperty == nil { + s.AdditionalSparseVectorQueryProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalSparseVectorQueryProperty", err) + } + s.AdditionalSparseVectorQueryProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s SparseVectorQuery) MarshalJSON() ([]byte, error) { + type opt SparseVectorQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalSparseVectorQueryProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalSparseVectorQueryProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewSparseVectorQuery returns a SparseVectorQuery. +func NewSparseVectorQuery() *SparseVectorQuery { + r := &SparseVectorQuery{ + AdditionalSparseVectorQueryProperty: make(map[string]json.RawMessage), + QueryVector: make(map[string]float32), + } + + return r +} + +type SparseVectorQueryVariant interface { + SparseVectorQueryCaster() *SparseVectorQuery +} + +func (s *SparseVectorQuery) SparseVectorQueryCaster() *SparseVectorQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevectorstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevectorstats.go new file mode 100644 index 000000000..e14b6aad0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sparsevectorstats.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SparseVectorStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L162-L164 +type SparseVectorStats struct { + ValueCount int64 `json:"value_count"` +} + +func (s *SparseVectorStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "value_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ValueCount", err) + } + s.ValueCount = value + case float64: + f := int64(v) + s.ValueCount = f + } + + } + } + return nil +} + +// NewSparseVectorStats returns a SparseVectorStats. +func NewSparseVectorStats() *SparseVectorStats { + r := &SparseVectorStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/specifieddocument.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/specifieddocument.go new file mode 100644 index 000000000..77afee68f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/specifieddocument.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SpecifiedDocument type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Retriever.ts#L97-L100 +type SpecifiedDocument struct { + Id string `json:"id"` + Index *string `json:"index,omitempty"` +} + +func (s *SpecifiedDocument) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + } + } + return nil +} + +// NewSpecifiedDocument returns a SpecifiedDocument. +func NewSpecifiedDocument() *SpecifiedDocument { + r := &SpecifiedDocument{} + + return r +} + +type SpecifiedDocumentVariant interface { + SpecifiedDocumentCaster() *SpecifiedDocument +} + +func (s *SpecifiedDocument) SpecifiedDocumentCaster() *SpecifiedDocument { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/splitprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/splitprocessor.go new file mode 100644 index 000000000..8a43086e8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/splitprocessor.go @@ -0,0 +1,193 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SplitProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1515-L1540 +type SplitProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to split. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // PreserveTrailing Preserves empty trailing fields, if any. + PreserveTrailing *bool `json:"preserve_trailing,omitempty"` + // Separator A regex which matches the separator, for example, `,` or `\s+`. + Separator string `json:"separator"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the split value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *SplitProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "preserve_trailing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "PreserveTrailing", err) + } + s.PreserveTrailing = &value + case bool: + s.PreserveTrailing = &v + } + + case "separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Separator", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Separator = o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewSplitProcessor returns a SplitProcessor. +func NewSplitProcessor() *SplitProcessor { + r := &SplitProcessor{} + + return r +} + +type SplitProcessorVariant interface { + SplitProcessorCaster() *SplitProcessor +} + +func (s *SplitProcessor) SplitProcessorCaster() *SplitProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sql.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sql.go new file mode 100644 index 000000000..60a901757 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sql.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Sql type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L392-L395 +type Sql struct { + Available bool `json:"available"` + Enabled bool `json:"enabled"` + Features map[string]int `json:"features"` + Queries map[string]XpackQuery `json:"queries"` +} + +func (s *Sql) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "features": + if s.Features == nil { + s.Features = make(map[string]int, 0) + } + if err := dec.Decode(&s.Features); err != nil { + return fmt.Errorf("%s | %w", "Features", err) + } + + case "queries": + if s.Queries == nil { + s.Queries = make(map[string]XpackQuery, 0) + } + if err := dec.Decode(&s.Queries); err != nil { + return fmt.Errorf("%s | %w", "Queries", err) + } + + } + } + return nil +} + +// NewSql returns a Sql. +func NewSql() *Sql { + r := &Sql{ + Features: make(map[string]int), + Queries: make(map[string]XpackQuery), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ssl.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ssl.go new file mode 100644 index 000000000..4712400dd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ssl.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Ssl type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L397-L400 +type Ssl struct { + Http FeatureToggle `json:"http"` + Transport FeatureToggle `json:"transport"` +} + +// NewSsl returns a Ssl. +func NewSsl() *Ssl { + r := &Ssl{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stagnatingbackingindices.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stagnatingbackingindices.go new file mode 100644 index 000000000..c9b8a84cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stagnatingbackingindices.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StagnatingBackingIndices type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/health_report/types.ts#L159-L163 +type StagnatingBackingIndices struct { + FirstOccurrenceTimestamp int64 `json:"first_occurrence_timestamp"` + IndexName string `json:"index_name"` + RetryCount int `json:"retry_count"` +} + +func (s *StagnatingBackingIndices) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "first_occurrence_timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "FirstOccurrenceTimestamp", err) + } + s.FirstOccurrenceTimestamp = value + case float64: + f := int64(v) + s.FirstOccurrenceTimestamp = f + } + + case "index_name": + if err := dec.Decode(&s.IndexName); err != nil { + return fmt.Errorf("%s | %w", "IndexName", err) + } + + case "retry_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RetryCount", err) + } + s.RetryCount = value + case float64: + f := int(v) + s.RetryCount = f + } + + } + } + return nil +} + +// NewStagnatingBackingIndices returns a StagnatingBackingIndices. +func NewStagnatingBackingIndices() *StagnatingBackingIndices { + r := &StagnatingBackingIndices{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standardanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standardanalyzer.go new file mode 100644 index 000000000..9aa18c65b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standardanalyzer.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StandardAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L382-L402 +type StandardAnalyzer struct { + // MaxTokenLength The maximum token length. If a token is seen that exceeds this length then it + // is split at `max_token_length` intervals. + // Defaults to `255`. + MaxTokenLength *int `json:"max_token_length,omitempty"` + // Stopwords A pre-defined stop words list like `_english_` or an array containing a list + // of stop words. + // Defaults to `_none_`. + Stopwords StopWords `json:"stopwords,omitempty"` + // StopwordsPath The path to a file containing stop words. + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *StandardAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_token_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTokenLength", err) + } + s.MaxTokenLength = &value + case float64: + f := int(v) + s.MaxTokenLength = &f + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s StandardAnalyzer) MarshalJSON() ([]byte, error) { + type innerStandardAnalyzer StandardAnalyzer + tmp := innerStandardAnalyzer{ + MaxTokenLength: s.MaxTokenLength, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "standard" + + return json.Marshal(tmp) +} + +// NewStandardAnalyzer returns a StandardAnalyzer. +func NewStandardAnalyzer() *StandardAnalyzer { + r := &StandardAnalyzer{} + + return r +} + +type StandardAnalyzerVariant interface { + StandardAnalyzerCaster() *StandardAnalyzer +} + +func (s *StandardAnalyzer) StandardAnalyzerCaster() *StandardAnalyzer { + return s +} + +func (s *StandardAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standarddeviationbounds.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standarddeviationbounds.go new file mode 100644 index 000000000..9fda175ca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standarddeviationbounds.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// StandardDeviationBounds type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L281-L288 +type StandardDeviationBounds struct { + Lower *Float64 `json:"lower,omitempty"` + LowerPopulation *Float64 `json:"lower_population,omitempty"` + LowerSampling *Float64 `json:"lower_sampling,omitempty"` + Upper *Float64 `json:"upper,omitempty"` + UpperPopulation *Float64 `json:"upper_population,omitempty"` + UpperSampling *Float64 `json:"upper_sampling,omitempty"` +} + +func (s *StandardDeviationBounds) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lower": + if err := dec.Decode(&s.Lower); err != nil { + return fmt.Errorf("%s | %w", "Lower", err) + } + + case "lower_population": + if err := dec.Decode(&s.LowerPopulation); err != nil { + return fmt.Errorf("%s | %w", "LowerPopulation", err) + } + + case "lower_sampling": + if err := dec.Decode(&s.LowerSampling); err != nil { + return fmt.Errorf("%s | %w", "LowerSampling", err) + } + + case "upper": + if err := dec.Decode(&s.Upper); err != nil { + return fmt.Errorf("%s | %w", "Upper", err) + } + + case "upper_population": + if err := dec.Decode(&s.UpperPopulation); err != nil { + return fmt.Errorf("%s | %w", "UpperPopulation", err) + } + + case "upper_sampling": + if err := dec.Decode(&s.UpperSampling); err != nil { + return fmt.Errorf("%s | %w", "UpperSampling", err) + } + + } + } + return nil +} + +// NewStandardDeviationBounds returns a StandardDeviationBounds. +func NewStandardDeviationBounds() *StandardDeviationBounds { + r := &StandardDeviationBounds{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standarddeviationboundsasstring.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standarddeviationboundsasstring.go new file mode 100644 index 000000000..c8cfd4995 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standarddeviationboundsasstring.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StandardDeviationBoundsAsString type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L290-L297 +type StandardDeviationBoundsAsString struct { + Lower string `json:"lower"` + LowerPopulation string `json:"lower_population"` + LowerSampling string `json:"lower_sampling"` + Upper string `json:"upper"` + UpperPopulation string `json:"upper_population"` + UpperSampling string `json:"upper_sampling"` +} + +func (s *StandardDeviationBoundsAsString) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lower": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Lower", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Lower = o + + case "lower_population": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LowerPopulation", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LowerPopulation = o + + case "lower_sampling": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LowerSampling", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LowerSampling = o + + case "upper": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Upper", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Upper = o + + case "upper_population": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UpperPopulation", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UpperPopulation = o + + case "upper_sampling": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UpperSampling", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UpperSampling = o + + } + } + return nil +} + +// NewStandardDeviationBoundsAsString returns a StandardDeviationBoundsAsString. +func NewStandardDeviationBoundsAsString() *StandardDeviationBoundsAsString { + r := &StandardDeviationBoundsAsString{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standardretriever.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standardretriever.go new file mode 100644 index 000000000..8f0d47103 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standardretriever.go @@ -0,0 +1,180 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StandardRetriever type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Retriever.ts#L102-L113 +type StandardRetriever struct { + // Collapse Collapses the top documents by a specified key into a single top document per + // key. + Collapse *FieldCollapse `json:"collapse,omitempty"` + // Filter Query to filter the documents that can match. + Filter []Query `json:"filter,omitempty"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are not + // included in the top documents. + MinScore *float32 `json:"min_score,omitempty"` + // Name_ Retriever name. + Name_ *string `json:"_name,omitempty"` + // Query Defines a query to retrieve a set of top documents. + Query *Query `json:"query,omitempty"` + // SearchAfter Defines a search after object parameter used for pagination. + SearchAfter []FieldValue `json:"search_after,omitempty"` + // Sort A sort object that that specifies the order of matching documents. + Sort []SortCombinations `json:"sort,omitempty"` + // TerminateAfter Maximum number of documents to collect for each shard. + TerminateAfter *int `json:"terminate_after,omitempty"` +} + +func (s *StandardRetriever) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collapse": + if err := dec.Decode(&s.Collapse); err != nil { + return fmt.Errorf("%s | %w", "Collapse", err) + } + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := float32(value) + s.MinScore = &f + case float64: + f := float32(v) + s.MinScore = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name_ = &o + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "search_after": + if err := dec.Decode(&s.SearchAfter); err != nil { + return fmt.Errorf("%s | %w", "SearchAfter", err) + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + case "terminate_after": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TerminateAfter", err) + } + s.TerminateAfter = &value + case float64: + f := int(v) + s.TerminateAfter = &f + } + + } + } + return nil +} + +// NewStandardRetriever returns a StandardRetriever. +func NewStandardRetriever() *StandardRetriever { + r := &StandardRetriever{} + + return r +} + +type StandardRetrieverVariant interface { + StandardRetrieverCaster() *StandardRetriever +} + +func (s *StandardRetriever) StandardRetrieverCaster() *StandardRetriever { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standardtokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standardtokenizer.go new file mode 100644 index 000000000..648e6e0c0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/standardtokenizer.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StandardTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L123-L126 +type StandardTokenizer struct { + MaxTokenLength *int `json:"max_token_length,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *StandardTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_token_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTokenLength", err) + } + s.MaxTokenLength = &value + case float64: + f := int(v) + s.MaxTokenLength = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s StandardTokenizer) MarshalJSON() ([]byte, error) { + type innerStandardTokenizer StandardTokenizer + tmp := innerStandardTokenizer{ + MaxTokenLength: s.MaxTokenLength, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "standard" + + return json.Marshal(tmp) +} + +// NewStandardTokenizer returns a StandardTokenizer. +func NewStandardTokenizer() *StandardTokenizer { + r := &StandardTokenizer{} + + return r +} + +type StandardTokenizerVariant interface { + StandardTokenizerCaster() *StandardTokenizer +} + +func (s *StandardTokenizer) StandardTokenizerCaster() *StandardTokenizer { + return s +} + +func (s *StandardTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statistics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statistics.go new file mode 100644 index 000000000..484fa643c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statistics.go @@ -0,0 +1,193 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Statistics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/slm/_types/SnapshotLifecycle.ts#L61-L84 +type Statistics struct { + Policy *string `json:"policy,omitempty"` + RetentionDeletionTime Duration `json:"retention_deletion_time,omitempty"` + RetentionDeletionTimeMillis *int64 `json:"retention_deletion_time_millis,omitempty"` + RetentionFailed *int64 `json:"retention_failed,omitempty"` + RetentionRuns *int64 `json:"retention_runs,omitempty"` + RetentionTimedOut *int64 `json:"retention_timed_out,omitempty"` + TotalSnapshotDeletionFailures *int64 `json:"total_snapshot_deletion_failures,omitempty"` + TotalSnapshotsDeleted *int64 `json:"total_snapshots_deleted,omitempty"` + TotalSnapshotsFailed *int64 `json:"total_snapshots_failed,omitempty"` + TotalSnapshotsTaken *int64 `json:"total_snapshots_taken,omitempty"` +} + +func (s *Statistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "policy": + if err := dec.Decode(&s.Policy); err != nil { + return fmt.Errorf("%s | %w", "Policy", err) + } + + case "retention_deletion_time": + if err := dec.Decode(&s.RetentionDeletionTime); err != nil { + return fmt.Errorf("%s | %w", "RetentionDeletionTime", err) + } + + case "retention_deletion_time_millis": + if err := dec.Decode(&s.RetentionDeletionTimeMillis); err != nil { + return fmt.Errorf("%s | %w", "RetentionDeletionTimeMillis", err) + } + + case "retention_failed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RetentionFailed", err) + } + s.RetentionFailed = &value + case float64: + f := int64(v) + s.RetentionFailed = &f + } + + case "retention_runs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RetentionRuns", err) + } + s.RetentionRuns = &value + case float64: + f := int64(v) + s.RetentionRuns = &f + } + + case "retention_timed_out": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RetentionTimedOut", err) + } + s.RetentionTimedOut = &value + case float64: + f := int64(v) + s.RetentionTimedOut = &f + } + + case "total_snapshot_deletion_failures", "snapshot_deletion_failures": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSnapshotDeletionFailures", err) + } + s.TotalSnapshotDeletionFailures = &value + case float64: + f := int64(v) + s.TotalSnapshotDeletionFailures = &f + } + + case "total_snapshots_deleted", "snapshots_deleted": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSnapshotsDeleted", err) + } + s.TotalSnapshotsDeleted = &value + case float64: + f := int64(v) + s.TotalSnapshotsDeleted = &f + } + + case "total_snapshots_failed", "snapshots_failed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSnapshotsFailed", err) + } + s.TotalSnapshotsFailed = &value + case float64: + f := int64(v) + s.TotalSnapshotsFailed = &f + } + + case "total_snapshots_taken", "snapshots_taken": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSnapshotsTaken", err) + } + s.TotalSnapshotsTaken = &value + case float64: + f := int64(v) + s.TotalSnapshotsTaken = &f + } + + } + } + return nil +} + +// NewStatistics returns a Statistics. +func NewStatistics() *Statistics { + r := &Statistics{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stats.go new file mode 100644 index 000000000..33e6ef3fb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stats.go @@ -0,0 +1,281 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noderole" +) + +// Stats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L30-L114 +type Stats struct { + // AdaptiveSelection Statistics about adaptive replica selection. + AdaptiveSelection map[string]AdaptiveSelection `json:"adaptive_selection,omitempty"` + // Attributes Contains a list of attributes for the node. + Attributes map[string]string `json:"attributes,omitempty"` + // Breakers Statistics about the field data circuit breaker. + Breakers map[string]Breaker `json:"breakers,omitempty"` + // Discovery Contains node discovery statistics for the node. + Discovery *Discovery `json:"discovery,omitempty"` + // Fs File system information, data path, free disk space, read/write stats. + Fs *FileSystem `json:"fs,omitempty"` + // Host Network host for the node, based on the network host setting. + Host *string `json:"host,omitempty"` + // Http HTTP connection information. + Http *Http `json:"http,omitempty"` + // IndexingPressure Contains indexing pressure statistics for the node. + IndexingPressure *NodesIndexingPressure `json:"indexing_pressure,omitempty"` + // Indices Indices stats about size, document count, indexing and deletion times, search + // times, field cache size, merges and flushes. + Indices *IndicesShardStats `json:"indices,omitempty"` + // Ingest Statistics about ingest preprocessing. + Ingest *NodesIngest `json:"ingest,omitempty"` + // Ip IP address and port for the node. + Ip []string `json:"ip,omitempty"` + // Jvm JVM stats, memory pool information, garbage collection, buffer pools, number + // of loaded/unloaded classes. + Jvm *Jvm `json:"jvm,omitempty"` + // Name Human-readable identifier for the node. + // Based on the node name setting. + Name *string `json:"name,omitempty"` + // Os Operating system stats, load average, mem, swap. + Os *OperatingSystem `json:"os,omitempty"` + // Process Process statistics, memory consumption, cpu usage, open file descriptors. + Process *Process `json:"process,omitempty"` + // Roles Roles assigned to the node. + Roles []noderole.NodeRole `json:"roles,omitempty"` + // Script Contains script statistics for the node. + Script *Scripting `json:"script,omitempty"` + ScriptCache map[string][]ScriptCache `json:"script_cache,omitempty"` + // ThreadPool Statistics about each thread pool, including current size, queue and rejected + // tasks. + ThreadPool map[string]ThreadCount `json:"thread_pool,omitempty"` + Timestamp *int64 `json:"timestamp,omitempty"` + // Transport Transport statistics about sent and received bytes in cluster communication. + Transport *Transport `json:"transport,omitempty"` + // TransportAddress Host and port for the transport layer, used for internal communication + // between nodes in a cluster. + TransportAddress *string `json:"transport_address,omitempty"` +} + +func (s *Stats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adaptive_selection": + if s.AdaptiveSelection == nil { + s.AdaptiveSelection = make(map[string]AdaptiveSelection, 0) + } + if err := dec.Decode(&s.AdaptiveSelection); err != nil { + return fmt.Errorf("%s | %w", "AdaptiveSelection", err) + } + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "breakers": + if s.Breakers == nil { + s.Breakers = make(map[string]Breaker, 0) + } + if err := dec.Decode(&s.Breakers); err != nil { + return fmt.Errorf("%s | %w", "Breakers", err) + } + + case "discovery": + if err := dec.Decode(&s.Discovery); err != nil { + return fmt.Errorf("%s | %w", "Discovery", err) + } + + case "fs": + if err := dec.Decode(&s.Fs); err != nil { + return fmt.Errorf("%s | %w", "Fs", err) + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + case "http": + if err := dec.Decode(&s.Http); err != nil { + return fmt.Errorf("%s | %w", "Http", err) + } + + case "indexing_pressure": + if err := dec.Decode(&s.IndexingPressure); err != nil { + return fmt.Errorf("%s | %w", "IndexingPressure", err) + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "ingest": + if err := dec.Decode(&s.Ingest); err != nil { + return fmt.Errorf("%s | %w", "Ingest", err) + } + + case "ip": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + + s.Ip = append(s.Ip, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Ip); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + } + + case "jvm": + if err := dec.Decode(&s.Jvm); err != nil { + return fmt.Errorf("%s | %w", "Jvm", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "os": + if err := dec.Decode(&s.Os); err != nil { + return fmt.Errorf("%s | %w", "Os", err) + } + + case "process": + if err := dec.Decode(&s.Process); err != nil { + return fmt.Errorf("%s | %w", "Process", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "script_cache": + if s.ScriptCache == nil { + s.ScriptCache = make(map[string][]ScriptCache, 0) + } + rawMsg := make(map[string]json.RawMessage, 0) + dec.Decode(&rawMsg) + for key, value := range rawMsg { + switch { + case bytes.HasPrefix(value, []byte("\"")), bytes.HasPrefix(value, []byte("{")): + o := NewScriptCache() + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return fmt.Errorf("%s | %w", "ScriptCache", err) + } + s.ScriptCache[key] = append(s.ScriptCache[key], *o) + default: + o := []ScriptCache{} + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return fmt.Errorf("%s | %w", "ScriptCache", err) + } + s.ScriptCache[key] = o + } + } + + case "thread_pool": + if s.ThreadPool == nil { + s.ThreadPool = make(map[string]ThreadCount, 0) + } + if err := dec.Decode(&s.ThreadPool); err != nil { + return fmt.Errorf("%s | %w", "ThreadPool", err) + } + + case "timestamp": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + s.Timestamp = &value + case float64: + f := int64(v) + s.Timestamp = &f + } + + case "transport": + if err := dec.Decode(&s.Transport); err != nil { + return fmt.Errorf("%s | %w", "Transport", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + } + } + return nil +} + +// NewStats returns a Stats. +func NewStats() *Stats { + r := &Stats{ + AdaptiveSelection: make(map[string]AdaptiveSelection), + Attributes: make(map[string]string), + Breakers: make(map[string]Breaker), + ScriptCache: make(map[string][]ScriptCache), + ThreadPool: make(map[string]ThreadCount), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statsaggregate.go new file mode 100644 index 000000000..2c2ce0244 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statsaggregate.go @@ -0,0 +1,172 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StatsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L257-L273 +type StatsAggregate struct { + Avg *Float64 `json:"avg,omitempty"` + AvgAsString *string `json:"avg_as_string,omitempty"` + Count int64 `json:"count"` + Max *Float64 `json:"max,omitempty"` + MaxAsString *string `json:"max_as_string,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Min *Float64 `json:"min,omitempty"` + MinAsString *string `json:"min_as_string,omitempty"` + Sum Float64 `json:"sum"` + SumAsString *string `json:"sum_as_string,omitempty"` +} + +func (s *StatsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + if err := dec.Decode(&s.Avg); err != nil { + return fmt.Errorf("%s | %w", "Avg", err) + } + + case "avg_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AvgAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AvgAsString = &o + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "max": + if err := dec.Decode(&s.Max); err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + + case "max_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxAsString = &o + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "min": + if err := dec.Decode(&s.Min); err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + + case "min_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MinAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinAsString = &o + + case "sum": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Sum", err) + } + f := Float64(value) + s.Sum = f + case float64: + f := Float64(v) + s.Sum = f + } + + case "sum_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SumAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SumAsString = &o + + } + } + return nil +} + +// NewStatsAggregate returns a StatsAggregate. +func NewStatsAggregate() *StatsAggregate { + r := &StatsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statsaggregation.go new file mode 100644 index 000000000..787de16f9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statsaggregation.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StatsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L314-L314 +type StatsAggregation struct { + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *StatsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewStatsAggregation returns a StatsAggregation. +func NewStatsAggregation() *StatsAggregation { + r := &StatsAggregation{} + + return r +} + +type StatsAggregationVariant interface { + StatsAggregationCaster() *StatsAggregation +} + +func (s *StatsAggregation) StatsAggregationCaster() *StatsAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statsbucketaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statsbucketaggregate.go new file mode 100644 index 000000000..b31b4ae57 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statsbucketaggregate.go @@ -0,0 +1,172 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StatsBucketAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L275-L279 +type StatsBucketAggregate struct { + Avg *Float64 `json:"avg,omitempty"` + AvgAsString *string `json:"avg_as_string,omitempty"` + Count int64 `json:"count"` + Max *Float64 `json:"max,omitempty"` + MaxAsString *string `json:"max_as_string,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Min *Float64 `json:"min,omitempty"` + MinAsString *string `json:"min_as_string,omitempty"` + Sum Float64 `json:"sum"` + SumAsString *string `json:"sum_as_string,omitempty"` +} + +func (s *StatsBucketAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + if err := dec.Decode(&s.Avg); err != nil { + return fmt.Errorf("%s | %w", "Avg", err) + } + + case "avg_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AvgAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AvgAsString = &o + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "max": + if err := dec.Decode(&s.Max); err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + + case "max_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxAsString = &o + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "min": + if err := dec.Decode(&s.Min); err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + + case "min_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MinAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinAsString = &o + + case "sum": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Sum", err) + } + f := Float64(value) + s.Sum = f + case float64: + f := Float64(v) + s.Sum = f + } + + case "sum_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SumAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SumAsString = &o + + } + } + return nil +} + +// NewStatsBucketAggregate returns a StatsBucketAggregate. +func NewStatsBucketAggregate() *StatsBucketAggregate { + r := &StatsBucketAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statsbucketaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statsbucketaggregation.go new file mode 100644 index 000000000..8bbd23e02 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statsbucketaggregation.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// StatsBucketAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L410-L410 +type StatsBucketAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` +} + +func (s *StatsBucketAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + } + } + return nil +} + +// NewStatsBucketAggregation returns a StatsBucketAggregation. +func NewStatsBucketAggregation() *StatsBucketAggregation { + r := &StatsBucketAggregation{} + + return r +} + +type StatsBucketAggregationVariant interface { + StatsBucketAggregationCaster() *StatsBucketAggregation +} + +func (s *StatsBucketAggregation) StatsBucketAggregationCaster() *StatsBucketAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/status.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/status.go new file mode 100644 index 000000000..3375396bb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/status.go @@ -0,0 +1,158 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Status type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/_types/SnapshotStatus.ts#L26-L60 +type Status struct { + // IncludeGlobalState Indicates whether the current cluster state is included in the snapshot. + IncludeGlobalState bool `json:"include_global_state"` + Indices map[string]SnapshotIndexStats `json:"indices"` + // Repository The name of the repository that includes the snapshot. + Repository string `json:"repository"` + // ShardsStats Statistics for the shards in the snapshot. + ShardsStats SnapshotShardsStats `json:"shards_stats"` + // Snapshot The name of the snapshot. + Snapshot string `json:"snapshot"` + // State The current snapshot state: + // + // * `FAILED`: The snapshot finished with an error and failed to store any data. + // * `STARTED`: The snapshot is currently running. + // * `SUCCESS`: The snapshot completed. + State string `json:"state"` + // Stats Details about the number (`file_count`) and size (`size_in_bytes`) of files + // included in the snapshot. + Stats SnapshotStats `json:"stats"` + // Uuid The universally unique identifier (UUID) for the snapshot. + Uuid string `json:"uuid"` +} + +func (s *Status) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "include_global_state": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IncludeGlobalState", err) + } + s.IncludeGlobalState = value + case bool: + s.IncludeGlobalState = v + } + + case "indices": + if s.Indices == nil { + s.Indices = make(map[string]SnapshotIndexStats, 0) + } + if err := dec.Decode(&s.Indices); err != nil { + return fmt.Errorf("%s | %w", "Indices", err) + } + + case "repository": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Repository", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Repository = o + + case "shards_stats": + if err := dec.Decode(&s.ShardsStats); err != nil { + return fmt.Errorf("%s | %w", "ShardsStats", err) + } + + case "snapshot": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Snapshot", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Snapshot = o + + case "state": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = o + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return fmt.Errorf("%s | %w", "Stats", err) + } + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return fmt.Errorf("%s | %w", "Uuid", err) + } + + } + } + return nil +} + +// NewStatus returns a Status. +func NewStatus() *Status { + r := &Status{ + Indices: make(map[string]SnapshotIndexStats), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statuserror.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statuserror.go new file mode 100644 index 000000000..2c76c8b87 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statuserror.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StatusError type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_migrate_reindex_status/MigrateGetReindexStatusResponse.ts#L44-L47 +type StatusError struct { + Index string `json:"index"` + Message string `json:"message"` +} + +func (s *StatusError) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Index = o + + case "message": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Message", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Message = o + + } + } + return nil +} + +// NewStatusError returns a StatusError. +func NewStatusError() *StatusError { + r := &StatusError{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statusinprogress.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statusinprogress.go new file mode 100644 index 000000000..d48196a41 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/statusinprogress.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StatusInProgress type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_migrate_reindex_status/MigrateGetReindexStatusResponse.ts#L38-L42 +type StatusInProgress struct { + Index string `json:"index"` + ReindexedDocCount int64 `json:"reindexed_doc_count"` + TotalDocCount int64 `json:"total_doc_count"` +} + +func (s *StatusInProgress) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Index = o + + case "reindexed_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ReindexedDocCount", err) + } + s.ReindexedDocCount = value + case float64: + f := int64(v) + s.ReindexedDocCount = f + } + + case "total_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalDocCount", err) + } + s.TotalDocCount = value + case float64: + f := int64(v) + s.TotalDocCount = f + } + + } + } + return nil +} + +// NewStatusInProgress returns a StatusInProgress. +func NewStatusInProgress() *StatusInProgress { + r := &StatusInProgress{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stemmeroverridetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stemmeroverridetokenfilter.go new file mode 100644 index 000000000..ce5735666 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stemmeroverridetokenfilter.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StemmerOverrideTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L417-L423 +type StemmerOverrideTokenFilter struct { + // Rules A list of mapping rules to use. + Rules []string `json:"rules,omitempty"` + // RulesPath A path (either relative to `config` location, or absolute) to a list of + // mappings. + RulesPath *string `json:"rules_path,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *StemmerOverrideTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "rules": + if err := dec.Decode(&s.Rules); err != nil { + return fmt.Errorf("%s | %w", "Rules", err) + } + + case "rules_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RulesPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RulesPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s StemmerOverrideTokenFilter) MarshalJSON() ([]byte, error) { + type innerStemmerOverrideTokenFilter StemmerOverrideTokenFilter + tmp := innerStemmerOverrideTokenFilter{ + Rules: s.Rules, + RulesPath: s.RulesPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "stemmer_override" + + return json.Marshal(tmp) +} + +// NewStemmerOverrideTokenFilter returns a StemmerOverrideTokenFilter. +func NewStemmerOverrideTokenFilter() *StemmerOverrideTokenFilter { + r := &StemmerOverrideTokenFilter{} + + return r +} + +type StemmerOverrideTokenFilterVariant interface { + StemmerOverrideTokenFilterCaster() *StemmerOverrideTokenFilter +} + +func (s *StemmerOverrideTokenFilter) StemmerOverrideTokenFilterCaster() *StemmerOverrideTokenFilter { + return s +} + +func (s *StemmerOverrideTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stemmertokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stemmertokenfilter.go new file mode 100644 index 000000000..c26254f8a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stemmertokenfilter.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StemmerTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L425-L429 +type StemmerTokenFilter struct { + Language *string `json:"language,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *StemmerTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "language", "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Language", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Language = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s StemmerTokenFilter) MarshalJSON() ([]byte, error) { + type innerStemmerTokenFilter StemmerTokenFilter + tmp := innerStemmerTokenFilter{ + Language: s.Language, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "stemmer" + + return json.Marshal(tmp) +} + +// NewStemmerTokenFilter returns a StemmerTokenFilter. +func NewStemmerTokenFilter() *StemmerTokenFilter { + r := &StemmerTokenFilter{} + + return r +} + +type StemmerTokenFilterVariant interface { + StemmerTokenFilterCaster() *StemmerTokenFilter +} + +func (s *StemmerTokenFilter) StemmerTokenFilterCaster() *StemmerTokenFilter { + return s +} + +func (s *StemmerTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stepkey.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stepkey.go new file mode 100644 index 000000000..fd1b523bc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stepkey.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StepKey type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/move_to_step/types.ts#L20-L31 +type StepKey struct { + // Action The optional action to which the index will be moved. + Action *string `json:"action,omitempty"` + // Name The optional step name to which the index will be moved. + Name *string `json:"name,omitempty"` + Phase string `json:"phase"` +} + +func (s *StepKey) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Action", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Action = &o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "phase": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Phase", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Phase = o + + } + } + return nil +} + +// NewStepKey returns a StepKey. +func NewStepKey() *StepKey { + r := &StepKey{} + + return r +} + +type StepKeyVariant interface { + StepKeyCaster() *StepKey +} + +func (s *StepKey) StepKeyCaster() *StepKey { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stopanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stopanalyzer.go new file mode 100644 index 000000000..72c7a9705 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stopanalyzer.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StopAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L404-L419 +type StopAnalyzer struct { + // Stopwords A pre-defined stop words list like `_english_` or an array containing a list + // of stop words. + // Defaults to `_none_`. + Stopwords StopWords `json:"stopwords,omitempty"` + // StopwordsPath The path to a file containing stop words. + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *StopAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s StopAnalyzer) MarshalJSON() ([]byte, error) { + type innerStopAnalyzer StopAnalyzer + tmp := innerStopAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "stop" + + return json.Marshal(tmp) +} + +// NewStopAnalyzer returns a StopAnalyzer. +func NewStopAnalyzer() *StopAnalyzer { + r := &StopAnalyzer{} + + return r +} + +type StopAnalyzerVariant interface { + StopAnalyzerCaster() *StopAnalyzer +} + +func (s *StopAnalyzer) StopAnalyzerCaster() *StopAnalyzer { + return s +} + +func (s *StopAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stoptokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stoptokenfilter.go new file mode 100644 index 000000000..327fe38e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stoptokenfilter.go @@ -0,0 +1,164 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StopTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L125-L136 +type StopTokenFilter struct { + // IgnoreCase If `true`, stop word matching is case insensitive. For example, if `true`, a + // stop word of the matches and removes `The`, `THE`, or `the`. Defaults to + // `false`. + IgnoreCase *bool `json:"ignore_case,omitempty"` + // RemoveTrailing If `true`, the last token of a stream is removed if it’s a stop word. + // Defaults to `true`. + RemoveTrailing *bool `json:"remove_trailing,omitempty"` + // Stopwords Language value, such as `_arabic_` or `_thai_`. Defaults to `_english_`. + Stopwords StopWords `json:"stopwords,omitempty"` + // StopwordsPath Path to a file that contains a list of stop words to remove. + // This path must be absolute or relative to the `config` location, and the file + // must be UTF-8 encoded. Each stop word in the file must be separated by a line + // break. + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *StopTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ignore_case": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreCase", err) + } + s.IgnoreCase = &value + case bool: + s.IgnoreCase = &v + } + + case "remove_trailing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RemoveTrailing", err) + } + s.RemoveTrailing = &value + case bool: + s.RemoveTrailing = &v + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s StopTokenFilter) MarshalJSON() ([]byte, error) { + type innerStopTokenFilter StopTokenFilter + tmp := innerStopTokenFilter{ + IgnoreCase: s.IgnoreCase, + RemoveTrailing: s.RemoveTrailing, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "stop" + + return json.Marshal(tmp) +} + +// NewStopTokenFilter returns a StopTokenFilter. +func NewStopTokenFilter() *StopTokenFilter { + r := &StopTokenFilter{} + + return r +} + +type StopTokenFilterVariant interface { + StopTokenFilterCaster() *StopTokenFilter +} + +func (s *StopTokenFilter) StopTokenFilterCaster() *StopTokenFilter { + return s +} + +func (s *StopTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stopwords.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stopwords.go new file mode 100644 index 000000000..97b66454e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stopwords.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// StopWords holds the union for the following types: +// +// stopwordlanguage.StopWordLanguage +// []string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/StopWords.ts#L60-L66 +type StopWords any + +type StopWordsVariant interface { + StopWordsCaster() *StopWords +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/storage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/storage.go new file mode 100644 index 000000000..fee011627 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/storage.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/storagetype" +) + +// Storage type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L547-L558 +type Storage struct { + // AllowMmap You can restrict the use of the mmapfs and the related hybridfs store type + // via the setting node.store.allow_mmap. + // This is a boolean setting indicating whether or not memory-mapping is + // allowed. The default is to allow it. This + // setting is useful, for example, if you are in an environment where you can + // not control the ability to create a lot + // of memory maps so you need disable the ability to use memory-mapping. + AllowMmap *bool `json:"allow_mmap,omitempty"` + // StatsRefreshInterval How often store statistics are refreshed + StatsRefreshInterval Duration `json:"stats_refresh_interval,omitempty"` + Type storagetype.StorageType `json:"type"` +} + +func (s *Storage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_mmap": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowMmap", err) + } + s.AllowMmap = &value + case bool: + s.AllowMmap = &v + } + + case "stats_refresh_interval": + if err := dec.Decode(&s.StatsRefreshInterval); err != nil { + return fmt.Errorf("%s | %w", "StatsRefreshInterval", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// NewStorage returns a Storage. +func NewStorage() *Storage { + r := &Storage{} + + return r +} + +type StorageVariant interface { + StorageCaster() *Storage +} + +func (s *Storage) StorageCaster() *Storage { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/storedscript.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/storedscript.go new file mode 100644 index 000000000..28c12d8ff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/storedscript.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptlanguage" +) + +// StoredScript type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Scripting.ts#L51-L63 +type StoredScript struct { + // Lang The language the script is written in. + // For search templates, use `mustache`. + Lang scriptlanguage.ScriptLanguage `json:"lang"` + Options map[string]string `json:"options,omitempty"` + // Source The script source. + // For search templates, an object containing the search template. + Source ScriptSource `json:"source"` +} + +func (s *StoredScript) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lang": + if err := dec.Decode(&s.Lang); err != nil { + return fmt.Errorf("%s | %w", "Lang", err) + } + + case "options": + if s.Options == nil { + s.Options = make(map[string]string, 0) + } + if err := dec.Decode(&s.Options); err != nil { + return fmt.Errorf("%s | %w", "Options", err) + } + + case "source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source", err) + } + + switch t { + + case "aggregations", "collapse", "docvalue_fields", "explain", "ext", "fields", "from", "highlight", "indices_boost", "knn", "min_score", "pit", "post_filter", "profile", "query", "rank", "rescore", "retriever", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "_source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version": + o := NewSearchRequestBody() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + s.Source = o + break source_field + + } + } + if s.Source == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + } + + } + } + return nil +} + +// NewStoredScript returns a StoredScript. +func NewStoredScript() *StoredScript { + r := &StoredScript{ + Options: make(map[string]string), + } + + return r +} + +type StoredScriptVariant interface { + StoredScriptCaster() *StoredScript +} + +func (s *StoredScript) StoredScriptCaster() *StoredScript { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/storestats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/storestats.go new file mode 100644 index 000000000..c99e06f5b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/storestats.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StoreStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L398-L425 +type StoreStats struct { + // Reserved A prediction of how much larger the shard stores will eventually grow due to + // ongoing peer recoveries, restoring snapshots, and similar activities. + Reserved ByteSize `json:"reserved,omitempty"` + // ReservedInBytes A prediction, in bytes, of how much larger the shard stores will eventually + // grow due to ongoing peer recoveries, restoring snapshots, and similar + // activities. + ReservedInBytes int64 `json:"reserved_in_bytes"` + // Size Total size of all shards assigned to selected nodes. + Size ByteSize `json:"size,omitempty"` + // SizeInBytes Total size, in bytes, of all shards assigned to selected nodes. + SizeInBytes int64 `json:"size_in_bytes"` + // TotalDataSetSize Total data set size of all shards assigned to selected nodes. + // This includes the size of shards not stored fully on the nodes, such as the + // cache for partially mounted indices. + TotalDataSetSize ByteSize `json:"total_data_set_size,omitempty"` + // TotalDataSetSizeInBytes Total data set size, in bytes, of all shards assigned to selected nodes. + // This includes the size of shards not stored fully on the nodes, such as the + // cache for partially mounted indices. + TotalDataSetSizeInBytes *int64 `json:"total_data_set_size_in_bytes,omitempty"` +} + +func (s *StoreStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "reserved": + if err := dec.Decode(&s.Reserved); err != nil { + return fmt.Errorf("%s | %w", "Reserved", err) + } + + case "reserved_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ReservedInBytes", err) + } + s.ReservedInBytes = value + case float64: + f := int64(v) + s.ReservedInBytes = f + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + case "size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SizeInBytes", err) + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + case "total_data_set_size": + if err := dec.Decode(&s.TotalDataSetSize); err != nil { + return fmt.Errorf("%s | %w", "TotalDataSetSize", err) + } + + case "total_data_set_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalDataSetSizeInBytes", err) + } + s.TotalDataSetSizeInBytes = &value + case float64: + f := int64(v) + s.TotalDataSetSizeInBytes = &f + } + + } + } + return nil +} + +// NewStoreStats returns a StoreStats. +func NewStoreStats() *StoreStats { + r := &StoreStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/streamresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/streamresult.go new file mode 100644 index 000000000..eb9311b02 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/streamresult.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// StreamResult type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Binary.ts#L24-L24 +type StreamResult []byte diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedboolean.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedboolean.go new file mode 100644 index 000000000..2346cc1e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedboolean.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Stringifiedboolean holds the union for the following types: +// +// bool +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_spec_utils/Stringified.ts#L20-L27 +type Stringifiedboolean any + +type StringifiedbooleanVariant interface { + StringifiedbooleanCaster() *Stringifiedboolean +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifieddouble.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifieddouble.go new file mode 100644 index 000000000..b8c835b6d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifieddouble.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Stringifieddouble holds the union for the following types: +// +// Float64 +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_spec_utils/Stringified.ts#L20-L27 +type Stringifieddouble any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedepochtimeunitmillis.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedepochtimeunitmillis.go new file mode 100644 index 000000000..25d9074b6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedepochtimeunitmillis.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// StringifiedEpochTimeUnitMillis holds the union for the following types: +// +// int64 +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_spec_utils/Stringified.ts#L20-L27 +type StringifiedEpochTimeUnitMillis any + +type StringifiedEpochTimeUnitMillisVariant interface { + StringifiedEpochTimeUnitMillisCaster() *StringifiedEpochTimeUnitMillis +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedepochtimeunitseconds.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedepochtimeunitseconds.go new file mode 100644 index 000000000..1eabe7128 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedepochtimeunitseconds.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// StringifiedEpochTimeUnitSeconds holds the union for the following types: +// +// int64 +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_spec_utils/Stringified.ts#L20-L27 +type StringifiedEpochTimeUnitSeconds any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedinteger.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedinteger.go new file mode 100644 index 000000000..d56fa08eb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedinteger.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Stringifiedinteger holds the union for the following types: +// +// int +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_spec_utils/Stringified.ts#L20-L27 +type Stringifiedinteger any + +type StringifiedintegerVariant interface { + StringifiedintegerCaster() *Stringifiedinteger +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedversionnumber.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedversionnumber.go new file mode 100644 index 000000000..39d14b3e7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringifiedversionnumber.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// StringifiedVersionNumber holds the union for the following types: +// +// int64 +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_spec_utils/Stringified.ts#L20-L27 +type StringifiedVersionNumber any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringraretermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringraretermsaggregate.go new file mode 100644 index 000000000..4027199ff --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringraretermsaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// StringRareTermsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L483-L487 +type StringRareTermsAggregate struct { + Buckets BucketsStringRareTermsBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *StringRareTermsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]StringRareTermsBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []StringRareTermsBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewStringRareTermsAggregate returns a StringRareTermsAggregate. +func NewStringRareTermsAggregate() *StringRareTermsAggregate { + r := &StringRareTermsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringraretermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringraretermsbucket.go new file mode 100644 index 000000000..1564493c1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringraretermsbucket.go @@ -0,0 +1,645 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// StringRareTermsBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L489-L491 +type StringRareTermsBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key string `json:"key"` +} + +func (s *StringRareTermsBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s StringRareTermsBucket) MarshalJSON() ([]byte, error) { + type opt StringRareTermsBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewStringRareTermsBucket returns a StringRareTermsBucket. +func NewStringRareTermsBucket() *StringRareTermsBucket { + r := &StringRareTermsBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringstatsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringstatsaggregate.go new file mode 100644 index 000000000..23856cc38 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringstatsaggregate.go @@ -0,0 +1,154 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StringStatsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L795-L806 +type StringStatsAggregate struct { + AvgLength *Float64 `json:"avg_length,omitempty"` + AvgLengthAsString *string `json:"avg_length_as_string,omitempty"` + Count int64 `json:"count"` + Distribution *map[string]Float64 `json:"distribution,omitempty"` + Entropy *Float64 `json:"entropy,omitempty"` + MaxLength *int `json:"max_length,omitempty"` + MaxLengthAsString *string `json:"max_length_as_string,omitempty"` + Meta Metadata `json:"meta,omitempty"` + MinLength *int `json:"min_length,omitempty"` + MinLengthAsString *string `json:"min_length_as_string,omitempty"` +} + +func (s *StringStatsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg_length": + if err := dec.Decode(&s.AvgLength); err != nil { + return fmt.Errorf("%s | %w", "AvgLength", err) + } + + case "avg_length_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AvgLengthAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AvgLengthAsString = &o + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "distribution": + if err := dec.Decode(&s.Distribution); err != nil { + return fmt.Errorf("%s | %w", "Distribution", err) + } + + case "entropy": + if err := dec.Decode(&s.Entropy); err != nil { + return fmt.Errorf("%s | %w", "Entropy", err) + } + + case "max_length": + if err := dec.Decode(&s.MaxLength); err != nil { + return fmt.Errorf("%s | %w", "MaxLength", err) + } + + case "max_length_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxLengthAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxLengthAsString = &o + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "min_length": + if err := dec.Decode(&s.MinLength); err != nil { + return fmt.Errorf("%s | %w", "MinLength", err) + } + + case "min_length_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MinLengthAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinLengthAsString = &o + + } + } + return nil +} + +// NewStringStatsAggregate returns a StringStatsAggregate. +func NewStringStatsAggregate() *StringStatsAggregate { + r := &StringStatsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringstatsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringstatsaggregation.go new file mode 100644 index 000000000..5b74794dc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringstatsaggregation.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StringStatsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L316-L322 +type StringStatsAggregation struct { + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` + // ShowDistribution Shows the probability distribution for all characters. + ShowDistribution *bool `json:"show_distribution,omitempty"` +} + +func (s *StringStatsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "show_distribution": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShowDistribution", err) + } + s.ShowDistribution = &value + case bool: + s.ShowDistribution = &v + } + + } + } + return nil +} + +// NewStringStatsAggregation returns a StringStatsAggregation. +func NewStringStatsAggregation() *StringStatsAggregation { + r := &StringStatsAggregation{} + + return r +} + +type StringStatsAggregationVariant interface { + StringStatsAggregationCaster() *StringStatsAggregation +} + +func (s *StringStatsAggregation) StringStatsAggregationCaster() *StringStatsAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringtermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringtermsaggregate.go new file mode 100644 index 000000000..a178e8ba0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringtermsaggregate.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StringTermsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L424-L429 +type StringTermsAggregate struct { + Buckets BucketsStringTermsBucket `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` +} + +func (s *StringTermsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]StringTermsBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []StringTermsBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "doc_count_error_upper_bound": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "sum_other_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SumOtherDocCount", err) + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f + } + + } + } + return nil +} + +// NewStringTermsAggregate returns a StringTermsAggregate. +func NewStringTermsAggregate() *StringTermsAggregate { + r := &StringTermsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringtermsbucket.go new file mode 100644 index 000000000..e24047930 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stringtermsbucket.go @@ -0,0 +1,654 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// StringTermsBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L435-L437 +type StringTermsBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Key FieldValue `json:"key"` +} + +func (s *StringTermsBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "doc_count_error_upper_bound": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f + } + + case "key": + if err := dec.Decode(&s.Key); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s StringTermsBucket) MarshalJSON() ([]byte, error) { + type opt StringTermsBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewStringTermsBucket returns a StringTermsBucket. +func NewStringTermsBucket() *StringTermsBucket { + r := &StringTermsBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stupidbackoffsmoothingmodel.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stupidbackoffsmoothingmodel.go new file mode 100644 index 000000000..a62021557 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/stupidbackoffsmoothingmodel.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// StupidBackoffSmoothingModel type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L464-L469 +type StupidBackoffSmoothingModel struct { + // Discount A constant factor that the lower order n-gram model is discounted by. + Discount Float64 `json:"discount"` +} + +func (s *StupidBackoffSmoothingModel) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "discount": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Discount", err) + } + f := Float64(value) + s.Discount = f + case float64: + f := Float64(v) + s.Discount = f + } + + } + } + return nil +} + +// NewStupidBackoffSmoothingModel returns a StupidBackoffSmoothingModel. +func NewStupidBackoffSmoothingModel() *StupidBackoffSmoothingModel { + r := &StupidBackoffSmoothingModel{} + + return r +} + +type StupidBackoffSmoothingModelVariant interface { + StupidBackoffSmoothingModelCaster() *StupidBackoffSmoothingModel +} + +func (s *StupidBackoffSmoothingModel) StupidBackoffSmoothingModelCaster() *StupidBackoffSmoothingModel { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/suggest.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/suggest.go new file mode 100644 index 000000000..482b4a1c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/suggest.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Suggest holds the union for the following types: +// +// CompletionSuggest +// PhraseSuggest +// TermSuggest +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L35-L41 +type Suggest any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/suggestcontext.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/suggestcontext.go new file mode 100644 index 000000000..2c2944f57 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/suggestcontext.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SuggestContext type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/specialized.ts#L43-L48 +type SuggestContext struct { + Name string `json:"name"` + Path *string `json:"path,omitempty"` + Precision *string `json:"precision,omitempty"` + Type string `json:"type"` +} + +func (s *SuggestContext) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + + case "precision": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Precision", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Precision = &o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewSuggestContext returns a SuggestContext. +func NewSuggestContext() *SuggestContext { + r := &SuggestContext{} + + return r +} + +type SuggestContextVariant interface { + SuggestContextCaster() *SuggestContext +} + +func (s *SuggestContext) SuggestContextCaster() *SuggestContext { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/suggester.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/suggester.go new file mode 100644 index 000000000..36c7f4016 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/suggester.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Suggester type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L102-L108 +type Suggester struct { + Suggesters map[string]FieldSuggester `json:"-"` + // Text Global suggest text, to avoid repetition when the same text is used in + // several suggesters + Text *string `json:"text,omitempty"` +} + +func (s *Suggester) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = &o + + default: + + if key, ok := t.(string); ok { + if s.Suggesters == nil { + s.Suggesters = make(map[string]FieldSuggester, 0) + } + raw := NewFieldSuggester() + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "Suggesters", err) + } + s.Suggesters[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s Suggester) MarshalJSON() ([]byte, error) { + type opt Suggester + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Suggesters { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Suggesters") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewSuggester returns a Suggester. +func NewSuggester() *Suggester { + r := &Suggester{ + Suggesters: make(map[string]FieldSuggester), + } + + return r +} + +type SuggesterVariant interface { + SuggesterCaster() *Suggester +} + +func (s *Suggester) SuggesterCaster() *Suggester { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/suggestfuzziness.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/suggestfuzziness.go new file mode 100644 index 000000000..95ff3acc2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/suggestfuzziness.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SuggestFuzziness type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L197-L225 +type SuggestFuzziness struct { + // Fuzziness The fuzziness factor. + Fuzziness Fuzziness `json:"fuzziness,omitempty"` + // MinLength Minimum length of the input before fuzzy suggestions are returned. + MinLength *int `json:"min_length,omitempty"` + // PrefixLength Minimum length of the input, which is not checked for fuzzy alternatives. + PrefixLength *int `json:"prefix_length,omitempty"` + // Transpositions If set to `true`, transpositions are counted as one change instead of two. + Transpositions *bool `json:"transpositions,omitempty"` + // UnicodeAware If `true`, all measurements (like fuzzy edit distance, transpositions, and + // lengths) are measured in Unicode code points instead of in bytes. + // This is slightly slower than raw bytes. + UnicodeAware *bool `json:"unicode_aware,omitempty"` +} + +func (s *SuggestFuzziness) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fuzziness": + if err := dec.Decode(&s.Fuzziness); err != nil { + return fmt.Errorf("%s | %w", "Fuzziness", err) + } + + case "min_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinLength", err) + } + s.MinLength = &value + case float64: + f := int(v) + s.MinLength = &f + } + + case "prefix_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrefixLength", err) + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "transpositions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Transpositions", err) + } + s.Transpositions = &value + case bool: + s.Transpositions = &v + } + + case "unicode_aware": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "UnicodeAware", err) + } + s.UnicodeAware = &value + case bool: + s.UnicodeAware = &v + } + + } + } + return nil +} + +// NewSuggestFuzziness returns a SuggestFuzziness. +func NewSuggestFuzziness() *SuggestFuzziness { + r := &SuggestFuzziness{} + + return r +} + +type SuggestFuzzinessVariant interface { + SuggestFuzzinessCaster() *SuggestFuzziness +} + +func (s *SuggestFuzziness) SuggestFuzzinessCaster() *SuggestFuzziness { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sumaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sumaggregate.go new file mode 100644 index 000000000..fd7505e9a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sumaggregate.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SumAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L211-L216 +type SumAggregate struct { + Meta Metadata `json:"meta,omitempty"` + // Value The metric value. A missing value generally means that there was no data to + // aggregate, + // unless specified otherwise. + Value *Float64 `json:"value,omitempty"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *SumAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + +// NewSumAggregate returns a SumAggregate. +func NewSumAggregate() *SumAggregate { + r := &SumAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sumaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sumaggregation.go new file mode 100644 index 000000000..dbf2e3dd4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sumaggregation.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SumAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L324-L324 +type SumAggregation struct { + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *SumAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewSumAggregation returns a SumAggregation. +func NewSumAggregation() *SumAggregation { + r := &SumAggregation{} + + return r +} + +type SumAggregationVariant interface { + SumAggregationCaster() *SumAggregation +} + +func (s *SumAggregation) SumAggregationCaster() *SumAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sumbucketaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sumbucketaggregation.go new file mode 100644 index 000000000..2ff05fe8a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/sumbucketaggregation.go @@ -0,0 +1,103 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy" +) + +// SumBucketAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/pipeline.ts#L412-L415 +type SumBucketAggregation struct { + // BucketsPath Path to the buckets that contain one set of values to correlate. + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` +} + +func (s *SumBucketAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets_path": + if err := dec.Decode(&s.BucketsPath); err != nil { + return fmt.Errorf("%s | %w", "BucketsPath", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return fmt.Errorf("%s | %w", "GapPolicy", err) + } + + } + } + return nil +} + +// NewSumBucketAggregation returns a SumBucketAggregation. +func NewSumBucketAggregation() *SumBucketAggregation { + r := &SumBucketAggregation{} + + return r +} + +type SumBucketAggregationVariant interface { + SumBucketAggregationCaster() *SumBucketAggregation +} + +func (s *SumBucketAggregation) SumBucketAggregationCaster() *SumBucketAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/summary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/summary.go new file mode 100644 index 000000000..df6c94c29 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/summary.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/policytype" +) + +// Summary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/enrich/_types/Policy.ts#L24-L26 +type Summary struct { + Config map[policytype.PolicyType]EnrichPolicy `json:"config"` +} + +// NewSummary returns a Summary. +func NewSummary() *Summary { + r := &Summary{ + Config: make(map[policytype.PolicyType]EnrichPolicy), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/summaryinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/summaryinfo.go new file mode 100644 index 000000000..47a103f1a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/summaryinfo.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SummaryInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L193-L202 +type SummaryInfo struct { + // Read A collection of statistics that summarise the results of the read operations + // in the test. + Read ReadSummaryInfo `json:"read"` + // Write A collection of statistics that summarise the results of the write operations + // in the test. + Write WriteSummaryInfo `json:"write"` +} + +// NewSummaryInfo returns a SummaryInfo. +func NewSummaryInfo() *SummaryInfo { + r := &SummaryInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/swedishanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/swedishanalyzer.go new file mode 100644 index 000000000..59b4de1ac --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/swedishanalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SwedishAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L303-L308 +type SwedishAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *SwedishAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SwedishAnalyzer) MarshalJSON() ([]byte, error) { + type innerSwedishAnalyzer SwedishAnalyzer + tmp := innerSwedishAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "swedish" + + return json.Marshal(tmp) +} + +// NewSwedishAnalyzer returns a SwedishAnalyzer. +func NewSwedishAnalyzer() *SwedishAnalyzer { + r := &SwedishAnalyzer{} + + return r +} + +type SwedishAnalyzerVariant interface { + SwedishAnalyzerCaster() *SwedishAnalyzer +} + +func (s *SwedishAnalyzer) SwedishAnalyzerCaster() *SwedishAnalyzer { + return s +} + +func (s *SwedishAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synccontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synccontainer.go new file mode 100644 index 000000000..ddcffcb5d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synccontainer.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// SyncContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/_types/Transform.ts#L169-L175 +type SyncContainer struct { + AdditionalSyncContainerProperty map[string]json.RawMessage `json:"-"` + // Time Specifies that the transform uses a time field to synchronize the source and + // destination indices. + Time *TimeSync `json:"time,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s SyncContainer) MarshalJSON() ([]byte, error) { + type opt SyncContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalSyncContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalSyncContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewSyncContainer returns a SyncContainer. +func NewSyncContainer() *SyncContainer { + r := &SyncContainer{ + AdditionalSyncContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type SyncContainerVariant interface { + SyncContainerCaster() *SyncContainer +} + +func (s *SyncContainer) SyncContainerCaster() *SyncContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/syncjobconnectorreference.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/syncjobconnectorreference.go new file mode 100644 index 000000000..e23199407 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/syncjobconnectorreference.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SyncJobConnectorReference type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/SyncJob.ts#L31-L40 +type SyncJobConnectorReference struct { + Configuration ConnectorConfiguration `json:"configuration"` + Filtering FilteringRules `json:"filtering"` + Id string `json:"id"` + IndexName string `json:"index_name"` + Language *string `json:"language,omitempty"` + Pipeline *IngestPipelineParams `json:"pipeline,omitempty"` + ServiceType string `json:"service_type"` + SyncCursor json.RawMessage `json:"sync_cursor,omitempty"` +} + +func (s *SyncJobConnectorReference) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "configuration": + if err := dec.Decode(&s.Configuration); err != nil { + return fmt.Errorf("%s | %w", "Configuration", err) + } + + case "filtering": + if err := dec.Decode(&s.Filtering); err != nil { + return fmt.Errorf("%s | %w", "Filtering", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "index_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexName = o + + case "language": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Language", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Language = &o + + case "pipeline": + if err := dec.Decode(&s.Pipeline); err != nil { + return fmt.Errorf("%s | %w", "Pipeline", err) + } + + case "service_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ServiceType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServiceType = o + + case "sync_cursor": + if err := dec.Decode(&s.SyncCursor); err != nil { + return fmt.Errorf("%s | %w", "SyncCursor", err) + } + + } + } + return nil +} + +// NewSyncJobConnectorReference returns a SyncJobConnectorReference. +func NewSyncJobConnectorReference() *SyncJobConnectorReference { + r := &SyncJobConnectorReference{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/syncrulesfeature.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/syncrulesfeature.go new file mode 100644 index 000000000..daa309b68 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/syncrulesfeature.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// SyncRulesFeature type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L219-L228 +type SyncRulesFeature struct { + // Advanced Indicates whether advanced sync rules are enabled. + Advanced *FeatureEnabled `json:"advanced,omitempty"` + // Basic Indicates whether basic sync rules are enabled. + Basic *FeatureEnabled `json:"basic,omitempty"` +} + +// NewSyncRulesFeature returns a SyncRulesFeature. +func NewSyncRulesFeature() *SyncRulesFeature { + r := &SyncRulesFeature{} + + return r +} + +type SyncRulesFeatureVariant interface { + SyncRulesFeatureCaster() *SyncRulesFeature +} + +func (s *SyncRulesFeature) SyncRulesFeatureCaster() *SyncRulesFeature { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymgraphtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymgraphtokenfilter.go new file mode 100644 index 000000000..232452713 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymgraphtokenfilter.go @@ -0,0 +1,220 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/synonymformat" +) + +// SynonymGraphTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L163-L165 +type SynonymGraphTokenFilter struct { + // Expand Expands definitions for equivalent synonym rules. Defaults to `true`. + Expand *bool `json:"expand,omitempty"` + // Format Sets the synonym rules format. + Format *synonymformat.SynonymFormat `json:"format,omitempty"` + // Lenient If `true` ignores errors while parsing the synonym rules. It is important to + // note that only those synonym rules which cannot get parsed are ignored. + // Defaults to the value of the `updateable` setting. + Lenient *bool `json:"lenient,omitempty"` + // Synonyms Used to define inline synonyms. + Synonyms []string `json:"synonyms,omitempty"` + // SynonymsPath Used to provide a synonym file. This path must be absolute or relative to the + // `config` location. + SynonymsPath *string `json:"synonyms_path,omitempty"` + // SynonymsSet Provide a synonym set created via Synonyms Management APIs. + SynonymsSet *string `json:"synonyms_set,omitempty"` + // Tokenizer Controls the tokenizers that will be used to tokenize the synonym, this + // parameter is for backwards compatibility for indices that created before 6.0. + Tokenizer *string `json:"tokenizer,omitempty"` + Type string `json:"type,omitempty"` + // Updateable If `true` allows reloading search analyzers to pick up changes to synonym + // files. Only to be used for search analyzers. Defaults to `false`. + Updateable *bool `json:"updateable,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *SynonymGraphTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expand": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Expand", err) + } + s.Expand = &value + case bool: + s.Expand = &v + } + + case "format": + if err := dec.Decode(&s.Format); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + + case "lenient": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Lenient", err) + } + s.Lenient = &value + case bool: + s.Lenient = &v + } + + case "synonyms": + if err := dec.Decode(&s.Synonyms); err != nil { + return fmt.Errorf("%s | %w", "Synonyms", err) + } + + case "synonyms_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SynonymsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SynonymsPath = &o + + case "synonyms_set": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SynonymsSet", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SynonymsSet = &o + + case "tokenizer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tokenizer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tokenizer = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "updateable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Updateable", err) + } + s.Updateable = &value + case bool: + s.Updateable = &v + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SynonymGraphTokenFilter) MarshalJSON() ([]byte, error) { + type innerSynonymGraphTokenFilter SynonymGraphTokenFilter + tmp := innerSynonymGraphTokenFilter{ + Expand: s.Expand, + Format: s.Format, + Lenient: s.Lenient, + Synonyms: s.Synonyms, + SynonymsPath: s.SynonymsPath, + SynonymsSet: s.SynonymsSet, + Tokenizer: s.Tokenizer, + Type: s.Type, + Updateable: s.Updateable, + Version: s.Version, + } + + tmp.Type = "synonym_graph" + + return json.Marshal(tmp) +} + +// NewSynonymGraphTokenFilter returns a SynonymGraphTokenFilter. +func NewSynonymGraphTokenFilter() *SynonymGraphTokenFilter { + r := &SynonymGraphTokenFilter{} + + return r +} + +type SynonymGraphTokenFilterVariant interface { + SynonymGraphTokenFilterCaster() *SynonymGraphTokenFilter +} + +func (s *SynonymGraphTokenFilter) SynonymGraphTokenFilterCaster() *SynonymGraphTokenFilter { + return s +} + +func (s *SynonymGraphTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymrule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymrule.go new file mode 100644 index 000000000..b489d6167 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymrule.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SynonymRule type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/synonyms/_types/SynonymRule.ts#L26-L37 +type SynonymRule struct { + // Id The identifier for the synonym rule. + // If you do not specify a synonym rule ID when you create a rule, an identifier + // is created automatically by Elasticsearch. + Id *string `json:"id,omitempty"` + // Synonyms The synonyms that conform the synonym rule in Solr format. + Synonyms string `json:"synonyms"` +} + +func (s *SynonymRule) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "synonyms": + if err := dec.Decode(&s.Synonyms); err != nil { + return fmt.Errorf("%s | %w", "Synonyms", err) + } + + } + } + return nil +} + +// NewSynonymRule returns a SynonymRule. +func NewSynonymRule() *SynonymRule { + r := &SynonymRule{} + + return r +} + +type SynonymRuleVariant interface { + SynonymRuleCaster() *SynonymRule +} + +func (s *SynonymRule) SynonymRuleCaster() *SynonymRule { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymruleread.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymruleread.go new file mode 100644 index 000000000..241179478 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymruleread.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// SynonymRuleRead type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/synonyms/_types/SynonymRule.ts#L40-L50 +type SynonymRuleRead struct { + // Id Synonym Rule identifier + Id string `json:"id"` + // Synonyms Synonyms, in Solr format, that conform the synonym rule. + Synonyms string `json:"synonyms"` +} + +func (s *SynonymRuleRead) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "synonyms": + if err := dec.Decode(&s.Synonyms); err != nil { + return fmt.Errorf("%s | %w", "Synonyms", err) + } + + } + } + return nil +} + +// NewSynonymRuleRead returns a SynonymRuleRead. +func NewSynonymRuleRead() *SynonymRuleRead { + r := &SynonymRuleRead{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymssetitem.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymssetitem.go new file mode 100644 index 000000000..45ef0f08a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymssetitem.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SynonymsSetItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/synonyms/get_synonyms_sets/SynonymsSetsGetResponse.ts#L36-L45 +type SynonymsSetItem struct { + // Count Number of synonym rules that the synonym set contains + Count int `json:"count"` + // SynonymsSet Synonyms set identifier + SynonymsSet string `json:"synonyms_set"` +} + +func (s *SynonymsSetItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "synonyms_set": + if err := dec.Decode(&s.SynonymsSet); err != nil { + return fmt.Errorf("%s | %w", "SynonymsSet", err) + } + + } + } + return nil +} + +// NewSynonymsSetItem returns a SynonymsSetItem. +func NewSynonymsSetItem() *SynonymsSetItem { + r := &SynonymsSetItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymsstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymsstats.go new file mode 100644 index 000000000..d8b1800a9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymsstats.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// SynonymsStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/stats/types.ts#L354-L357 +type SynonymsStats struct { + Count int `json:"count"` + IndexCount int `json:"index_count"` +} + +func (s *SynonymsStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "index_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IndexCount", err) + } + s.IndexCount = value + case float64: + f := int(v) + s.IndexCount = f + } + + } + } + return nil +} + +// NewSynonymsStats returns a SynonymsStats. +func NewSynonymsStats() *SynonymsStats { + r := &SynonymsStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymtokenfilter.go new file mode 100644 index 000000000..c9dcc3f8e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/synonymtokenfilter.go @@ -0,0 +1,220 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/synonymformat" +) + +// SynonymTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L167-L169 +type SynonymTokenFilter struct { + // Expand Expands definitions for equivalent synonym rules. Defaults to `true`. + Expand *bool `json:"expand,omitempty"` + // Format Sets the synonym rules format. + Format *synonymformat.SynonymFormat `json:"format,omitempty"` + // Lenient If `true` ignores errors while parsing the synonym rules. It is important to + // note that only those synonym rules which cannot get parsed are ignored. + // Defaults to the value of the `updateable` setting. + Lenient *bool `json:"lenient,omitempty"` + // Synonyms Used to define inline synonyms. + Synonyms []string `json:"synonyms,omitempty"` + // SynonymsPath Used to provide a synonym file. This path must be absolute or relative to the + // `config` location. + SynonymsPath *string `json:"synonyms_path,omitempty"` + // SynonymsSet Provide a synonym set created via Synonyms Management APIs. + SynonymsSet *string `json:"synonyms_set,omitempty"` + // Tokenizer Controls the tokenizers that will be used to tokenize the synonym, this + // parameter is for backwards compatibility for indices that created before 6.0. + Tokenizer *string `json:"tokenizer,omitempty"` + Type string `json:"type,omitempty"` + // Updateable If `true` allows reloading search analyzers to pick up changes to synonym + // files. Only to be used for search analyzers. Defaults to `false`. + Updateable *bool `json:"updateable,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *SynonymTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expand": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Expand", err) + } + s.Expand = &value + case bool: + s.Expand = &v + } + + case "format": + if err := dec.Decode(&s.Format); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + + case "lenient": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Lenient", err) + } + s.Lenient = &value + case bool: + s.Lenient = &v + } + + case "synonyms": + if err := dec.Decode(&s.Synonyms); err != nil { + return fmt.Errorf("%s | %w", "Synonyms", err) + } + + case "synonyms_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SynonymsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SynonymsPath = &o + + case "synonyms_set": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SynonymsSet", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SynonymsSet = &o + + case "tokenizer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tokenizer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tokenizer = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "updateable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Updateable", err) + } + s.Updateable = &value + case bool: + s.Updateable = &v + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SynonymTokenFilter) MarshalJSON() ([]byte, error) { + type innerSynonymTokenFilter SynonymTokenFilter + tmp := innerSynonymTokenFilter{ + Expand: s.Expand, + Format: s.Format, + Lenient: s.Lenient, + Synonyms: s.Synonyms, + SynonymsPath: s.SynonymsPath, + SynonymsSet: s.SynonymsSet, + Tokenizer: s.Tokenizer, + Type: s.Type, + Updateable: s.Updateable, + Version: s.Version, + } + + tmp.Type = "synonym" + + return json.Marshal(tmp) +} + +// NewSynonymTokenFilter returns a SynonymTokenFilter. +func NewSynonymTokenFilter() *SynonymTokenFilter { + r := &SynonymTokenFilter{} + + return r +} + +type SynonymTokenFilterVariant interface { + SynonymTokenFilterCaster() *SynonymTokenFilter +} + +func (s *SynonymTokenFilter) SynonymTokenFilterCaster() *SynonymTokenFilter { + return s +} + +func (s *SynonymTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevaluescontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevaluescontainer.go new file mode 100644 index 000000000..c01d5432d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevaluescontainer.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// TableValuesContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/_types/TableValuesContainer.ts#L22-L28 +type TableValuesContainer struct { + AdditionalTableValuesContainerProperty map[string]json.RawMessage `json:"-"` + Float64 [][]Float64 `json:"double,omitempty"` + Int [][]int `json:"integer,omitempty"` + Int64 [][]int64 `json:"long,omitempty"` + Keyword [][]string `json:"keyword,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TableValuesContainer) MarshalJSON() ([]byte, error) { + type opt TableValuesContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTableValuesContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTableValuesContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewTableValuesContainer returns a TableValuesContainer. +func NewTableValuesContainer() *TableValuesContainer { + r := &TableValuesContainer{ + AdditionalTableValuesContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type TableValuesContainerVariant interface { + TableValuesContainerCaster() *TableValuesContainer +} + +func (s *TableValuesContainer) TableValuesContainerCaster() *TableValuesContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevaluesintegervalue.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevaluesintegervalue.go new file mode 100644 index 000000000..4869d1144 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevaluesintegervalue.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TableValuesIntegerValue type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/_types/TableValuesContainer.ts#L30-L30 +type TableValuesIntegerValue []int + +type TableValuesIntegerValueVariant interface { + TableValuesIntegerValueCaster() *TableValuesIntegerValue +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevalueskeywordvalue.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevalueskeywordvalue.go new file mode 100644 index 000000000..5aa53dac1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevalueskeywordvalue.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TableValuesKeywordValue type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/_types/TableValuesContainer.ts#L31-L31 +type TableValuesKeywordValue []string + +type TableValuesKeywordValueVariant interface { + TableValuesKeywordValueCaster() *TableValuesKeywordValue +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevalueslongdouble.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevalueslongdouble.go new file mode 100644 index 000000000..e1727f984 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevalueslongdouble.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TableValuesLongDouble type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/_types/TableValuesContainer.ts#L33-L33 +type TableValuesLongDouble []Float64 + +type TableValuesLongDoubleVariant interface { + TableValuesLongDoubleCaster() *TableValuesLongDouble +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevalueslongvalue.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevalueslongvalue.go new file mode 100644 index 000000000..f71392cc6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tablevalueslongvalue.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TableValuesLongValue type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/esql/_types/TableValuesContainer.ts#L32-L32 +type TableValuesLongValue []int64 + +type TableValuesLongValueVariant interface { + TableValuesLongValueCaster() *TableValuesLongValue +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/targetmeanencodingpreprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/targetmeanencodingpreprocessor.go new file mode 100644 index 000000000..6753a1b6e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/targetmeanencodingpreprocessor.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TargetMeanEncodingPreprocessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model/types.ts#L49-L54 +type TargetMeanEncodingPreprocessor struct { + DefaultValue Float64 `json:"default_value"` + FeatureName string `json:"feature_name"` + Field string `json:"field"` + TargetMap map[string]Float64 `json:"target_map"` +} + +func (s *TargetMeanEncodingPreprocessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "default_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DefaultValue", err) + } + f := Float64(value) + s.DefaultValue = f + case float64: + f := Float64(v) + s.DefaultValue = f + } + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FeatureName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + case "field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = o + + case "target_map": + if s.TargetMap == nil { + s.TargetMap = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.TargetMap); err != nil { + return fmt.Errorf("%s | %w", "TargetMap", err) + } + + } + } + return nil +} + +// NewTargetMeanEncodingPreprocessor returns a TargetMeanEncodingPreprocessor. +func NewTargetMeanEncodingPreprocessor() *TargetMeanEncodingPreprocessor { + r := &TargetMeanEncodingPreprocessor{ + TargetMap: make(map[string]Float64), + } + + return r +} + +type TargetMeanEncodingPreprocessorVariant interface { + TargetMeanEncodingPreprocessorCaster() *TargetMeanEncodingPreprocessor +} + +func (s *TargetMeanEncodingPreprocessor) TargetMeanEncodingPreprocessorCaster() *TargetMeanEncodingPreprocessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/taskfailure.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/taskfailure.go new file mode 100644 index 000000000..a7bd104ba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/taskfailure.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TaskFailure type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Errors.ts#L67-L72 +type TaskFailure struct { + NodeId string `json:"node_id"` + Reason ErrorCause `json:"reason"` + Status string `json:"status"` + TaskId int64 `json:"task_id"` +} + +func (s *TaskFailure) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + + case "reason": + if err := dec.Decode(&s.Reason); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + + case "status": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Status = o + + case "task_id": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TaskId", err) + } + s.TaskId = value + case float64: + f := int64(v) + s.TaskId = f + } + + } + } + return nil +} + +// NewTaskFailure returns a TaskFailure. +func NewTaskFailure() *TaskFailure { + r := &TaskFailure{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/taskinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/taskinfo.go new file mode 100644 index 000000000..b3c5ba946 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/taskinfo.go @@ -0,0 +1,212 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TaskInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/tasks/_types/TaskInfo.ts#L32-L58 +type TaskInfo struct { + Action string `json:"action"` + Cancellable bool `json:"cancellable"` + Cancelled *bool `json:"cancelled,omitempty"` + // Description Human readable text that identifies the particular request that the task is + // performing. + // For example, it might identify the search request being performed by a search + // task. + // Other kinds of tasks have different descriptions, like `_reindex` which has + // the source and the destination, or `_bulk` which just has the number of + // requests and the destination indices. + // Many requests will have only an empty description because more detailed + // information about the request is not easily available or particularly helpful + // in identifying the request. + Description *string `json:"description,omitempty"` + Headers map[string]string `json:"headers"` + Id int64 `json:"id"` + Node string `json:"node"` + ParentTaskId *string `json:"parent_task_id,omitempty"` + RunningTime Duration `json:"running_time,omitempty"` + RunningTimeInNanos int64 `json:"running_time_in_nanos"` + StartTimeInMillis int64 `json:"start_time_in_millis"` + // Status The internal status of the task, which varies from task to task. + // The format also varies. + // While the goal is to keep the status for a particular task consistent from + // version to version, this is not always possible because sometimes the + // implementation changes. + // Fields might be removed from the status for a particular request so any + // parsing you do of the status might break in minor releases. + Status json.RawMessage `json:"status,omitempty"` + Type string `json:"type"` +} + +func (s *TaskInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Action", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Action = o + + case "cancellable": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Cancellable", err) + } + s.Cancellable = value + case bool: + s.Cancellable = v + } + + case "cancelled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Cancelled", err) + } + s.Cancelled = &value + case bool: + s.Cancelled = &v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "headers": + if s.Headers == nil { + s.Headers = make(map[string]string, 0) + } + if err := dec.Decode(&s.Headers); err != nil { + return fmt.Errorf("%s | %w", "Headers", err) + } + + case "id": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + s.Id = value + case float64: + f := int64(v) + s.Id = f + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "parent_task_id": + if err := dec.Decode(&s.ParentTaskId); err != nil { + return fmt.Errorf("%s | %w", "ParentTaskId", err) + } + + case "running_time": + if err := dec.Decode(&s.RunningTime); err != nil { + return fmt.Errorf("%s | %w", "RunningTime", err) + } + + case "running_time_in_nanos": + if err := dec.Decode(&s.RunningTimeInNanos); err != nil { + return fmt.Errorf("%s | %w", "RunningTimeInNanos", err) + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "StartTimeInMillis", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewTaskInfo returns a TaskInfo. +func NewTaskInfo() *TaskInfo { + r := &TaskInfo{ + Headers: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/taskinfos.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/taskinfos.go new file mode 100644 index 000000000..436f60ac1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/taskinfos.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TaskInfos holds the union for the following types: +// +// []TaskInfo +// map[string]ParentTaskInfo +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/tasks/_types/TaskListResponseBase.ts#L40-L43 +type TaskInfos any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tasksrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tasksrecord.go new file mode 100644 index 000000000..263402a61 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tasksrecord.go @@ -0,0 +1,259 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TasksRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/tasks/types.ts#L22-L101 +type TasksRecord struct { + // Action The task action. + Action *string `json:"action,omitempty"` + // Description The task action description. + Description *string `json:"description,omitempty"` + // Id The identifier of the task with the node. + Id *string `json:"id,omitempty"` + // Ip The IP address for the node. + Ip *string `json:"ip,omitempty"` + // Node The node name. + Node *string `json:"node,omitempty"` + // NodeId The unique node identifier. + NodeId *string `json:"node_id,omitempty"` + // ParentTaskId The parent task identifier. + ParentTaskId *string `json:"parent_task_id,omitempty"` + // Port The bound transport port for the node. + Port *string `json:"port,omitempty"` + // RunningTime The running time. + RunningTime *string `json:"running_time,omitempty"` + // RunningTimeNs The running time in nanoseconds. + RunningTimeNs *string `json:"running_time_ns,omitempty"` + // StartTime The start time in milliseconds. + StartTime *string `json:"start_time,omitempty"` + // TaskId The unique task identifier. + TaskId *string `json:"task_id,omitempty"` + // Timestamp The start time in `HH:MM:SS` format. + Timestamp *string `json:"timestamp,omitempty"` + // Type The task type. + Type *string `json:"type,omitempty"` + // Version The Elasticsearch version. + Version *string `json:"version,omitempty"` + // XOpaqueId The X-Opaque-ID header. + XOpaqueId *string `json:"x_opaque_id,omitempty"` +} + +func (s *TasksRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action", "ac": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Action", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Action = &o + + case "description", "desc": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "ip", "i": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "node", "n": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = &o + + case "node_id", "ni": + if err := dec.Decode(&s.NodeId); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + + case "parent_task_id", "pti": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ParentTaskId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ParentTaskId = &o + + case "port", "po": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Port", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Port = &o + + case "running_time", "time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RunningTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RunningTime = &o + + case "running_time_ns": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RunningTimeNs", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RunningTimeNs = &o + + case "start_time", "start": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StartTime = &o + + case "task_id", "ti": + if err := dec.Decode(&s.TaskId); err != nil { + return fmt.Errorf("%s | %w", "TaskId", err) + } + + case "timestamp", "ts", "hms", "hhmmss": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Timestamp = &o + + case "type", "ty": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "x_opaque_id", "x": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "XOpaqueId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.XOpaqueId = &o + + } + } + return nil +} + +// NewTasksRecord returns a TasksRecord. +func NewTasksRecord() *TasksRecord { + r := &TasksRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tdigest.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tdigest.go new file mode 100644 index 000000000..b6adc772e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tdigest.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tdigestexecutionhint" +) + +// TDigest type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L244-L255 +type TDigest struct { + // Compression Limits the maximum number of nodes used by the underlying TDigest algorithm + // to `20 * compression`, enabling control of memory usage and approximation + // error. + Compression *int `json:"compression,omitempty"` + // ExecutionHint The default implementation of TDigest is optimized for performance, scaling + // to millions or even billions of sample values while maintaining acceptable + // accuracy levels (close to 1% relative error for millions of samples in some + // cases). + // To use an implementation optimized for accuracy, set this parameter to + // high_accuracy instead. + ExecutionHint *tdigestexecutionhint.TDigestExecutionHint `json:"execution_hint,omitempty"` +} + +func (s *TDigest) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compression": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Compression", err) + } + s.Compression = &value + case float64: + f := int(v) + s.Compression = &f + } + + case "execution_hint": + if err := dec.Decode(&s.ExecutionHint); err != nil { + return fmt.Errorf("%s | %w", "ExecutionHint", err) + } + + } + } + return nil +} + +// NewTDigest returns a TDigest. +func NewTDigest() *TDigest { + r := &TDigest{} + + return r +} + +type TDigestVariant interface { + TDigestCaster() *TDigest +} + +func (s *TDigest) TDigestCaster() *TDigest { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tdigestpercentileranksaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tdigestpercentileranksaggregate.go new file mode 100644 index 000000000..e12a12a94 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tdigestpercentileranksaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TDigestPercentileRanksAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L177-L178 +type TDigestPercentileRanksAggregate struct { + Meta Metadata `json:"meta,omitempty"` + Values Percentiles `json:"values"` +} + +func (s *TDigestPercentileRanksAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "values": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]any, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + s.Values = o + case '[': + o := []ArrayPercentilesItem{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + s.Values = o + } + + } + } + return nil +} + +// NewTDigestPercentileRanksAggregate returns a TDigestPercentileRanksAggregate. +func NewTDigestPercentileRanksAggregate() *TDigestPercentileRanksAggregate { + r := &TDigestPercentileRanksAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tdigestpercentilesaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tdigestpercentilesaggregate.go new file mode 100644 index 000000000..86417e8e8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tdigestpercentilesaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TDigestPercentilesAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L174-L175 +type TDigestPercentilesAggregate struct { + Meta Metadata `json:"meta,omitempty"` + Values Percentiles `json:"values"` +} + +func (s *TDigestPercentilesAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "values": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]any, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + s.Values = o + case '[': + o := []ArrayPercentilesItem{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Values", err) + } + s.Values = o + } + + } + } + return nil +} + +// NewTDigestPercentilesAggregate returns a TDigestPercentilesAggregate. +func NewTDigestPercentilesAggregate() *TDigestPercentilesAggregate { + r := &TDigestPercentilesAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/template.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/template.go new file mode 100644 index 000000000..227890b09 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/template.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Template type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L33-L37 +type Template struct { + Aliases map[string]Alias `json:"aliases"` + Mappings TypeMapping `json:"mappings"` + Settings IndexSettings `json:"settings"` +} + +// NewTemplate returns a Template. +func NewTemplate() *Template { + r := &Template{ + Aliases: make(map[string]Alias), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/templateconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/templateconfig.go new file mode 100644 index 000000000..582027260 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/templateconfig.go @@ -0,0 +1,172 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TemplateConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/msearch_template/types.ts#L29-L55 +type TemplateConfig struct { + // Explain If `true`, returns detailed information about score calculation as part of + // each hit. + Explain *bool `json:"explain,omitempty"` + // Id The ID of the search template to use. If no `source` is specified, + // this parameter is required. + Id *string `json:"id,omitempty"` + // Params Key-value pairs used to replace Mustache variables in the template. + // The key is the variable name. + // The value is the variable value. + Params map[string]json.RawMessage `json:"params,omitempty"` + // Profile If `true`, the query execution is profiled. + Profile *bool `json:"profile,omitempty"` + // Source An inline search template. Supports the same parameters as the search API's + // request body. It also supports Mustache variables. If no `id` is specified, + // this + // parameter is required. + Source ScriptSource `json:"source,omitempty"` +} + +func (s *TemplateConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "explain": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Explain", err) + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "profile": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Profile", err) + } + s.Profile = &value + case bool: + s.Profile = &v + } + + case "source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source", err) + } + + switch t { + + case "aggregations", "collapse", "docvalue_fields", "explain", "ext", "fields", "from", "highlight", "indices_boost", "knn", "min_score", "pit", "post_filter", "profile", "query", "rank", "rescore", "retriever", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "_source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version": + o := NewSearchRequestBody() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + s.Source = o + break source_field + + } + } + if s.Source == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + } + + } + } + return nil +} + +// NewTemplateConfig returns a TemplateConfig. +func NewTemplateConfig() *TemplateConfig { + r := &TemplateConfig{ + Params: make(map[string]json.RawMessage), + } + + return r +} + +type TemplateConfigVariant interface { + TemplateConfigCaster() *TemplateConfig +} + +func (s *TemplateConfig) TemplateConfigCaster() *TemplateConfig { + return s +} + +func (s *TemplateConfig) RequestItemCaster() *RequestItem { + o := RequestItem(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/templatemapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/templatemapping.go new file mode 100644 index 000000000..2b5216762 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/templatemapping.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TemplateMapping type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/TemplateMapping.ts#L27-L34 +type TemplateMapping struct { + Aliases map[string]Alias `json:"aliases"` + IndexPatterns []string `json:"index_patterns"` + Mappings TypeMapping `json:"mappings"` + Order int `json:"order"` + Settings map[string]json.RawMessage `json:"settings"` + Version *int64 `json:"version,omitempty"` +} + +func (s *TemplateMapping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aliases": + if s.Aliases == nil { + s.Aliases = make(map[string]Alias, 0) + } + if err := dec.Decode(&s.Aliases); err != nil { + return fmt.Errorf("%s | %w", "Aliases", err) + } + + case "index_patterns": + if err := dec.Decode(&s.IndexPatterns); err != nil { + return fmt.Errorf("%s | %w", "IndexPatterns", err) + } + + case "mappings": + if err := dec.Decode(&s.Mappings); err != nil { + return fmt.Errorf("%s | %w", "Mappings", err) + } + + case "order": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + s.Order = value + case float64: + f := int(v) + s.Order = f + } + + case "settings": + if s.Settings == nil { + s.Settings = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewTemplateMapping returns a TemplateMapping. +func NewTemplateMapping() *TemplateMapping { + r := &TemplateMapping{ + Aliases: make(map[string]Alias), + Settings: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/templatesrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/templatesrecord.go new file mode 100644 index 000000000..7f782e863 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/templatesrecord.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TemplatesRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/templates/types.ts#L22-L48 +type TemplatesRecord struct { + // ComposedOf The component templates that comprise the index template. + ComposedOf *string `json:"composed_of,omitempty"` + // IndexPatterns The template index patterns. + IndexPatterns *string `json:"index_patterns,omitempty"` + // Name The template name. + Name *string `json:"name,omitempty"` + // Order The template application order or priority number. + Order *string `json:"order,omitempty"` + // Version The template version. + Version *string `json:"version,omitempty"` +} + +func (s *TemplatesRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "composed_of", "c": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ComposedOf", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ComposedOf = &o + + case "index_patterns", "t": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexPatterns", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexPatterns = &o + + case "name", "n": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "order", "o", "p": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Order = &o + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewTemplatesRecord returns a TemplatesRecord. +func NewTemplatesRecord() *TemplatesRecord { + r := &TemplatesRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/term.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/term.go new file mode 100644 index 000000000..b3b71ef2a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/term.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Term type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/termvectors/types.ts#L34-L40 +type Term struct { + DocFreq *int `json:"doc_freq,omitempty"` + Score *Float64 `json:"score,omitempty"` + TermFreq int `json:"term_freq"` + Tokens []TermVectorsToken `json:"tokens,omitempty"` + Ttf *int `json:"ttf,omitempty"` +} + +func (s *Term) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_freq": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocFreq", err) + } + s.DocFreq = &value + case float64: + f := int(v) + s.DocFreq = &f + } + + case "score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Score", err) + } + f := Float64(value) + s.Score = &f + case float64: + f := Float64(v) + s.Score = &f + } + + case "term_freq": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TermFreq", err) + } + s.TermFreq = value + case float64: + f := int(v) + s.TermFreq = f + } + + case "tokens": + if err := dec.Decode(&s.Tokens); err != nil { + return fmt.Errorf("%s | %w", "Tokens", err) + } + + case "ttf": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Ttf", err) + } + s.Ttf = &value + case float64: + f := int(v) + s.Ttf = &f + } + + } + } + return nil +} + +// NewTerm returns a Term. +func NewTerm() *Term { + r := &Term{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/terminateprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/terminateprocessor.go new file mode 100644 index 000000000..32aa8c23c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/terminateprocessor.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TerminateProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1542-L1542 +type TerminateProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *TerminateProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil +} + +// NewTerminateProcessor returns a TerminateProcessor. +func NewTerminateProcessor() *TerminateProcessor { + r := &TerminateProcessor{} + + return r +} + +type TerminateProcessorVariant interface { + TerminateProcessorCaster() *TerminateProcessor +} + +func (s *TerminateProcessor) TerminateProcessorCaster() *TerminateProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termquery.go new file mode 100644 index 000000000..1f578d4a9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termquery.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TermQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L234-L251 +type TermQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // CaseInsensitive Allows ASCII case insensitive matching of the value with the indexed field + // values when set to `true`. + // When `false`, the case sensitivity of matching depends on the underlying + // field’s mapping. + CaseInsensitive *bool `json:"case_insensitive,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Value Term you wish to find in the provided field. + Value FieldValue `json:"value"` +} + +func (s *TermQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Value) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "case_insensitive": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CaseInsensitive", err) + } + s.CaseInsensitive = &value + case bool: + s.CaseInsensitive = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + } + } + return nil +} + +// NewTermQuery returns a TermQuery. +func NewTermQuery() *TermQuery { + r := &TermQuery{} + + return r +} + +type TermQueryVariant interface { + TermQueryCaster() *TermQuery +} + +func (s *TermQuery) TermQueryCaster() *TermQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termrangequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termrangequery.go new file mode 100644 index 000000000..1f4599d9e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termrangequery.go @@ -0,0 +1,176 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/rangerelation" +) + +// TermRangeQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L170-L170 +type TermRangeQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Gt Greater than. + Gt *string `json:"gt,omitempty"` + // Gte Greater than or equal to. + Gte *string `json:"gte,omitempty"` + // Lt Less than. + Lt *string `json:"lt,omitempty"` + // Lte Less than or equal to. + Lte *string `json:"lte,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Relation Indicates how the range query matches values for `range` fields. + Relation *rangerelation.RangeRelation `json:"relation,omitempty"` +} + +func (s *TermRangeQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "gt": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Gt", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Gt = &o + + case "gte": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Gte", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Gte = &o + + case "lt": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Lt", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Lt = &o + + case "lte": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Lte", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Lte = &o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "relation": + if err := dec.Decode(&s.Relation); err != nil { + return fmt.Errorf("%s | %w", "Relation", err) + } + + } + } + return nil +} + +// NewTermRangeQuery returns a TermRangeQuery. +func NewTermRangeQuery() *TermRangeQuery { + r := &TermRangeQuery{} + + return r +} + +type TermRangeQueryVariant interface { + TermRangeQueryCaster() *TermRangeQuery +} + +func (s *TermRangeQuery) TermRangeQueryCaster() *TermRangeQuery { + return s +} + +func (s *TermRangeQuery) RangeQueryCaster() *RangeQuery { + o := RangeQuery(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsaggregation.go new file mode 100644 index 000000000..fcb0ddffb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsaggregation.go @@ -0,0 +1,336 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termsaggregationcollectmode" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termsaggregationexecutionhint" +) + +// TermsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L963-L1031 +type TermsAggregation struct { + // CollectMode Determines how child aggregations should be calculated: breadth-first or + // depth-first. + CollectMode *termsaggregationcollectmode.TermsAggregationCollectMode `json:"collect_mode,omitempty"` + // Exclude Values to exclude. + // Accepts regular expressions and partitions. + Exclude []string `json:"exclude,omitempty"` + // ExecutionHint Determines whether the aggregation will use field values directly or global + // ordinals. + ExecutionHint *termsaggregationexecutionhint.TermsAggregationExecutionHint `json:"execution_hint,omitempty"` + // Field The field from which to return terms. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Include Values to include. + // Accepts regular expressions and partitions. + Include TermsInclude `json:"include,omitempty"` + // MinDocCount Only return values that are found in more than `min_doc_count` hits. + MinDocCount *int `json:"min_doc_count,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + MissingBucket *bool `json:"missing_bucket,omitempty"` + MissingOrder *missingorder.MissingOrder `json:"missing_order,omitempty"` + // Order Specifies the sort order of the buckets. + // Defaults to sorting by descending document count. + Order AggregateOrder `json:"order,omitempty"` + Script *Script `json:"script,omitempty"` + // ShardMinDocCount Regulates the certainty a shard has if the term should actually be added to + // the candidate list or not with respect to the `min_doc_count`. + // Terms will only be considered if their local shard frequency within the set + // is higher than the `shard_min_doc_count`. + ShardMinDocCount *int64 `json:"shard_min_doc_count,omitempty"` + // ShardSize The number of candidate terms produced by each shard. + // By default, `shard_size` will be automatically estimated based on the number + // of shards and the `size` parameter. + ShardSize *int `json:"shard_size,omitempty"` + // ShowTermDocCountError Set to `true` to return the `doc_count_error_upper_bound`, which is an upper + // bound to the error on the `doc_count` returned by each shard. + ShowTermDocCountError *bool `json:"show_term_doc_count_error,omitempty"` + // Size The number of buckets returned out of the overall terms list. + Size *int `json:"size,omitempty"` + // ValueType Coerced unmapped fields into the specified type. + ValueType *string `json:"value_type,omitempty"` +} + +func (s *TermsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collect_mode": + if err := dec.Decode(&s.CollectMode); err != nil { + return fmt.Errorf("%s | %w", "CollectMode", err) + } + + case "exclude": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Exclude", err) + } + + s.Exclude = append(s.Exclude, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Exclude); err != nil { + return fmt.Errorf("%s | %w", "Exclude", err) + } + } + + case "execution_hint": + if err := dec.Decode(&s.ExecutionHint); err != nil { + return fmt.Errorf("%s | %w", "ExecutionHint", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "include": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + include_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Include", err) + } + + switch t { + + case "num_partitions", "partition": + o := NewTermsPartition() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + s.Include = o + break include_field + + } + } + if s.Include == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Include); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + } + + case "min_doc_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocCount", err) + } + s.MinDocCount = &value + case float64: + f := int(v) + s.MinDocCount = &f + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "missing_bucket": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MissingBucket", err) + } + s.MissingBucket = &value + case bool: + s.MissingBucket = &v + } + + case "missing_order": + if err := dec.Decode(&s.MissingOrder); err != nil { + return fmt.Errorf("%s | %w", "MissingOrder", err) + } + + case "order": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]sortorder.SortOrder, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + s.Order = o + case '[': + o := make([]map[string]sortorder.SortOrder, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + s.Order = o + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "shard_min_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShardMinDocCount", err) + } + s.ShardMinDocCount = &value + case float64: + f := int64(v) + s.ShardMinDocCount = &f + } + + case "shard_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSize", err) + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "show_term_doc_count_error": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShowTermDocCountError", err) + } + s.ShowTermDocCountError = &value + case bool: + s.ShowTermDocCountError = &v + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "value_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueType = &o + + } + } + return nil +} + +// NewTermsAggregation returns a TermsAggregation. +func NewTermsAggregation() *TermsAggregation { + r := &TermsAggregation{} + + return r +} + +type TermsAggregationVariant interface { + TermsAggregationCaster() *TermsAggregation +} + +func (s *TermsAggregation) TermsAggregationCaster() *TermsAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsexclude.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsexclude.go new file mode 100644 index 000000000..5f67be8c6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsexclude.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TermsExclude type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L1077-L1078 +type TermsExclude []string + +type TermsExcludeVariant interface { + TermsExcludeCaster() *TermsExclude +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsgrouping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsgrouping.go new file mode 100644 index 000000000..52b0b2c34 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsgrouping.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TermsGrouping type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/rollup/_types/Groupings.ts#L75-L82 +type TermsGrouping struct { + // Fields The set of fields that you wish to collect terms for. + // This array can contain fields that are both keyword and numerics. + // Order does not matter. + Fields []string `json:"fields"` +} + +func (s *TermsGrouping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + } + + } + } + return nil +} + +// NewTermsGrouping returns a TermsGrouping. +func NewTermsGrouping() *TermsGrouping { + r := &TermsGrouping{} + + return r +} + +type TermsGroupingVariant interface { + TermsGroupingCaster() *TermsGrouping +} + +func (s *TermsGrouping) TermsGroupingCaster() *TermsGrouping { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsinclude.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsinclude.go new file mode 100644 index 000000000..674920f16 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsinclude.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TermsInclude holds the union for the following types: +// +// string +// []string +// TermsPartition +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L1074-L1075 +type TermsInclude any + +type TermsIncludeVariant interface { + TermsIncludeCaster() *TermsInclude +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termslookup.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termslookup.go new file mode 100644 index 000000000..5031bffd8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termslookup.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TermsLookup type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L266-L271 +type TermsLookup struct { + Id string `json:"id"` + Index string `json:"index"` + Path string `json:"path"` + Routing *string `json:"routing,omitempty"` +} + +func (s *TermsLookup) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + } + } + return nil +} + +// NewTermsLookup returns a TermsLookup. +func NewTermsLookup() *TermsLookup { + r := &TermsLookup{} + + return r +} + +type TermsLookupVariant interface { + TermsLookupCaster() *TermsLookup +} + +func (s *TermsLookup) TermsLookupCaster() *TermsLookup { + return s +} + +func (s *TermsLookup) TermsQueryFieldCaster() *TermsQueryField { + o := TermsQueryField(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termspartition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termspartition.go new file mode 100644 index 000000000..f0ffad538 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termspartition.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TermsPartition type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L1080-L1089 +type TermsPartition struct { + // NumPartitions The number of partitions. + NumPartitions int64 `json:"num_partitions"` + // Partition The partition number for this request. + Partition int64 `json:"partition"` +} + +func (s *TermsPartition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "num_partitions": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NumPartitions", err) + } + s.NumPartitions = value + case float64: + f := int64(v) + s.NumPartitions = f + } + + case "partition": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Partition", err) + } + s.Partition = value + case float64: + f := int64(v) + s.Partition = f + } + + } + } + return nil +} + +// NewTermsPartition returns a TermsPartition. +func NewTermsPartition() *TermsPartition { + r := &TermsPartition{} + + return r +} + +type TermsPartitionVariant interface { + TermsPartitionCaster() *TermsPartition +} + +func (s *TermsPartition) TermsPartitionCaster() *TermsPartition { + return s +} + +func (s *TermsPartition) TermsIncludeCaster() *TermsInclude { + o := TermsInclude(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsquery.go new file mode 100644 index 000000000..88080eb7d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsquery.go @@ -0,0 +1,151 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TermsQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L253-L259 +type TermsQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + TermsQuery map[string]TermsQueryField `json:"-"` +} + +func (s *TermsQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + default: + + if key, ok := t.(string); ok { + if s.TermsQuery == nil { + s.TermsQuery = make(map[string]TermsQueryField, 0) + } + raw := new(TermsQueryField) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "TermsQuery", err) + } + s.TermsQuery[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TermsQuery) MarshalJSON() ([]byte, error) { + type opt TermsQuery + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.TermsQuery { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "TermsQuery") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewTermsQuery returns a TermsQuery. +func NewTermsQuery() *TermsQuery { + r := &TermsQuery{ + TermsQuery: make(map[string]TermsQueryField), + } + + return r +} + +type TermsQueryVariant interface { + TermsQueryCaster() *TermsQuery +} + +func (s *TermsQuery) TermsQueryCaster() *TermsQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsqueryfield.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsqueryfield.go new file mode 100644 index 000000000..64e3f655d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsqueryfield.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TermsQueryField holds the union for the following types: +// +// []FieldValue +// TermsLookup +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L261-L264 +type TermsQueryField any + +type TermsQueryFieldVariant interface { + TermsQueryFieldCaster() *TermsQueryField +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termssetquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termssetquery.go new file mode 100644 index 000000000..cbc91eea6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termssetquery.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TermsSetQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L273-L295 +type TermsSetQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // MinimumShouldMatch Specification describing number of matching terms required to return a + // document. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // MinimumShouldMatchField Numeric field containing the number of matching terms required to return a + // document. + MinimumShouldMatchField *string `json:"minimum_should_match_field,omitempty"` + // MinimumShouldMatchScript Custom script containing the number of matching terms required to return a + // document. + MinimumShouldMatchScript *Script `json:"minimum_should_match_script,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Terms Array of terms you wish to find in the provided field. + Terms []FieldValue `json:"terms"` +} + +func (s *TermsSetQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return fmt.Errorf("%s | %w", "MinimumShouldMatch", err) + } + + case "minimum_should_match_field": + if err := dec.Decode(&s.MinimumShouldMatchField); err != nil { + return fmt.Errorf("%s | %w", "MinimumShouldMatchField", err) + } + + case "minimum_should_match_script": + if err := dec.Decode(&s.MinimumShouldMatchScript); err != nil { + return fmt.Errorf("%s | %w", "MinimumShouldMatchScript", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "terms": + if err := dec.Decode(&s.Terms); err != nil { + return fmt.Errorf("%s | %w", "Terms", err) + } + + } + } + return nil +} + +// NewTermsSetQuery returns a TermsSetQuery. +func NewTermsSetQuery() *TermsSetQuery { + r := &TermsSetQuery{} + + return r +} + +type TermsSetQueryVariant interface { + TermsSetQueryCaster() *TermsSetQuery +} + +func (s *TermsSetQuery) TermsSetQueryCaster() *TermsSetQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsuggest.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsuggest.go new file mode 100644 index 000000000..2ed33e78d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsuggest.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TermSuggest type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L65-L70 +type TermSuggest struct { + Length int `json:"length"` + Offset int `json:"offset"` + Options []TermSuggestOption `json:"options"` + Text string `json:"text"` +} + +func (s *TermSuggest) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Length", err) + } + s.Length = value + case float64: + f := int(v) + s.Length = f + } + + case "offset": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Offset", err) + } + s.Offset = value + case float64: + f := int(v) + s.Offset = f + } + + case "options": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewTermSuggestOption() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Options", err) + } + + s.Options = append(s.Options, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Options); err != nil { + return fmt.Errorf("%s | %w", "Options", err) + } + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + } + } + return nil +} + +// NewTermSuggest returns a TermSuggest. +func NewTermSuggest() *TermSuggest { + r := &TermSuggest{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsuggester.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsuggester.go new file mode 100644 index 000000000..564aeafe2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsuggester.go @@ -0,0 +1,310 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/stringdistance" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/suggestmode" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/suggestsort" +) + +// TermSuggester type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L507-L569 +type TermSuggester struct { + // Analyzer The analyzer to analyze the suggest text with. + // Defaults to the search analyzer of the suggest field. + Analyzer *string `json:"analyzer,omitempty"` + // Field The field to fetch the candidate suggestions from. + // Needs to be set globally or per suggestion. + Field string `json:"field"` + LowercaseTerms *bool `json:"lowercase_terms,omitempty"` + // MaxEdits The maximum edit distance candidate suggestions can have in order to be + // considered as a suggestion. + // Can only be `1` or `2`. + MaxEdits *int `json:"max_edits,omitempty"` + // MaxInspections A factor that is used to multiply with the shard_size in order to inspect + // more candidate spelling corrections on the shard level. + // Can improve accuracy at the cost of performance. + MaxInspections *int `json:"max_inspections,omitempty"` + // MaxTermFreq The maximum threshold in number of documents in which a suggest text token + // can exist in order to be included. + // Can be a relative percentage number (for example `0.4`) or an absolute number + // to represent document frequencies. + // If a value higher than 1 is specified, then fractional can not be specified. + MaxTermFreq *float32 `json:"max_term_freq,omitempty"` + // MinDocFreq The minimal threshold in number of documents a suggestion should appear in. + // This can improve quality by only suggesting high frequency terms. + // Can be specified as an absolute number or as a relative percentage of number + // of documents. + // If a value higher than 1 is specified, then the number cannot be fractional. + MinDocFreq *float32 `json:"min_doc_freq,omitempty"` + // MinWordLength The minimum length a suggest text term must have in order to be included. + MinWordLength *int `json:"min_word_length,omitempty"` + // PrefixLength The number of minimal prefix characters that must match in order be a + // candidate for suggestions. + // Increasing this number improves spellcheck performance. + PrefixLength *int `json:"prefix_length,omitempty"` + // ShardSize Sets the maximum number of suggestions to be retrieved from each individual + // shard. + ShardSize *int `json:"shard_size,omitempty"` + // Size The maximum corrections to be returned per suggest text token. + Size *int `json:"size,omitempty"` + // Sort Defines how suggestions should be sorted per suggest text term. + Sort *suggestsort.SuggestSort `json:"sort,omitempty"` + // StringDistance The string distance implementation to use for comparing how similar suggested + // terms are. + StringDistance *stringdistance.StringDistance `json:"string_distance,omitempty"` + // SuggestMode Controls what suggestions are included or controls for what suggest text + // terms, suggestions should be suggested. + SuggestMode *suggestmode.SuggestMode `json:"suggest_mode,omitempty"` + // Text The suggest text. + // Needs to be set globally or per suggestion. + Text *string `json:"text,omitempty"` +} + +func (s *TermSuggester) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "lowercase_terms": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "LowercaseTerms", err) + } + s.LowercaseTerms = &value + case bool: + s.LowercaseTerms = &v + } + + case "max_edits": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxEdits", err) + } + s.MaxEdits = &value + case float64: + f := int(v) + s.MaxEdits = &f + } + + case "max_inspections": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxInspections", err) + } + s.MaxInspections = &value + case float64: + f := int(v) + s.MaxInspections = &f + } + + case "max_term_freq": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTermFreq", err) + } + f := float32(value) + s.MaxTermFreq = &f + case float64: + f := float32(v) + s.MaxTermFreq = &f + } + + case "min_doc_freq": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocFreq", err) + } + f := float32(value) + s.MinDocFreq = &f + case float64: + f := float32(v) + s.MinDocFreq = &f + } + + case "min_word_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinWordLength", err) + } + s.MinWordLength = &value + case float64: + f := int(v) + s.MinWordLength = &f + } + + case "prefix_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PrefixLength", err) + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "shard_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSize", err) + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + if err := dec.Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + case "string_distance": + if err := dec.Decode(&s.StringDistance); err != nil { + return fmt.Errorf("%s | %w", "StringDistance", err) + } + + case "suggest_mode": + if err := dec.Decode(&s.SuggestMode); err != nil { + return fmt.Errorf("%s | %w", "SuggestMode", err) + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = &o + + } + } + return nil +} + +// NewTermSuggester returns a TermSuggester. +func NewTermSuggester() *TermSuggester { + r := &TermSuggester{} + + return r +} + +type TermSuggesterVariant interface { + TermSuggesterCaster() *TermSuggester +} + +func (s *TermSuggester) TermSuggesterCaster() *TermSuggester { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsuggestoption.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsuggestoption.go new file mode 100644 index 000000000..5ee9e24c6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termsuggestoption.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TermSuggestOption type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/suggester.ts#L94-L100 +type TermSuggestOption struct { + CollateMatch *bool `json:"collate_match,omitempty"` + Freq int64 `json:"freq"` + Highlighted *string `json:"highlighted,omitempty"` + Score Float64 `json:"score"` + Text string `json:"text"` +} + +func (s *TermSuggestOption) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collate_match": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CollateMatch", err) + } + s.CollateMatch = &value + case bool: + s.CollateMatch = &v + } + + case "freq": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Freq", err) + } + s.Freq = value + case float64: + f := int64(v) + s.Freq = f + } + + case "highlighted": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Highlighted", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Highlighted = &o + + case "score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Score", err) + } + f := Float64(value) + s.Score = f + case float64: + f := Float64(v) + s.Score = f + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Text", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + } + } + return nil +} + +// NewTermSuggestOption returns a TermSuggestOption. +func NewTermSuggestOption() *TermSuggestOption { + r := &TermSuggestOption{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termvector.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termvector.go new file mode 100644 index 000000000..75263f55e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termvector.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TermVector type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/termvectors/types.ts#L23-L26 +type TermVector struct { + FieldStatistics *FieldStatistics `json:"field_statistics,omitempty"` + Terms map[string]Term `json:"terms"` +} + +// NewTermVector returns a TermVector. +func NewTermVector() *TermVector { + r := &TermVector{ + Terms: make(map[string]Term), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termvectorsfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termvectorsfilter.go new file mode 100644 index 000000000..0cd15158c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termvectorsfilter.go @@ -0,0 +1,200 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TermVectorsFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/termvectors/types.ts#L49-L86 +type TermVectorsFilter struct { + // MaxDocFreq Ignore words which occur in more than this many docs. + // Defaults to unbounded. + MaxDocFreq *int `json:"max_doc_freq,omitempty"` + // MaxNumTerms The maximum number of terms that must be returned per field. + MaxNumTerms *int `json:"max_num_terms,omitempty"` + // MaxTermFreq Ignore words with more than this frequency in the source doc. + // It defaults to unbounded. + MaxTermFreq *int `json:"max_term_freq,omitempty"` + // MaxWordLength The maximum word length above which words will be ignored. + // Defaults to unbounded. + MaxWordLength *int `json:"max_word_length,omitempty"` + // MinDocFreq Ignore terms which do not occur in at least this many docs. + MinDocFreq *int `json:"min_doc_freq,omitempty"` + // MinTermFreq Ignore words with less than this frequency in the source doc. + MinTermFreq *int `json:"min_term_freq,omitempty"` + // MinWordLength The minimum word length below which words will be ignored. + MinWordLength *int `json:"min_word_length,omitempty"` +} + +func (s *TermVectorsFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_doc_freq": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxDocFreq", err) + } + s.MaxDocFreq = &value + case float64: + f := int(v) + s.MaxDocFreq = &f + } + + case "max_num_terms": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxNumTerms", err) + } + s.MaxNumTerms = &value + case float64: + f := int(v) + s.MaxNumTerms = &f + } + + case "max_term_freq": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTermFreq", err) + } + s.MaxTermFreq = &value + case float64: + f := int(v) + s.MaxTermFreq = &f + } + + case "max_word_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxWordLength", err) + } + s.MaxWordLength = &value + case float64: + f := int(v) + s.MaxWordLength = &f + } + + case "min_doc_freq": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocFreq", err) + } + s.MinDocFreq = &value + case float64: + f := int(v) + s.MinDocFreq = &f + } + + case "min_term_freq": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinTermFreq", err) + } + s.MinTermFreq = &value + case float64: + f := int(v) + s.MinTermFreq = &f + } + + case "min_word_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinWordLength", err) + } + s.MinWordLength = &value + case float64: + f := int(v) + s.MinWordLength = &f + } + + } + } + return nil +} + +// NewTermVectorsFilter returns a TermVectorsFilter. +func NewTermVectorsFilter() *TermVectorsFilter { + r := &TermVectorsFilter{} + + return r +} + +type TermVectorsFilterVariant interface { + TermVectorsFilterCaster() *TermVectorsFilter +} + +func (s *TermVectorsFilter) TermVectorsFilterCaster() *TermVectorsFilter { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termvectorsresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termvectorsresult.go new file mode 100644 index 000000000..23d0e6e7b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termvectorsresult.go @@ -0,0 +1,129 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TermVectorsResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/mtermvectors/types.ts#L96-L104 +type TermVectorsResult struct { + Error *ErrorCause `json:"error,omitempty"` + Found *bool `json:"found,omitempty"` + Id_ *string `json:"_id,omitempty"` + Index_ string `json:"_index"` + TermVectors map[string]TermVector `json:"term_vectors,omitempty"` + Took *int64 `json:"took,omitempty"` + Version_ *int64 `json:"_version,omitempty"` +} + +func (s *TermVectorsResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + + case "found": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Found", err) + } + s.Found = &value + case bool: + s.Found = &v + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "term_vectors": + if s.TermVectors == nil { + s.TermVectors = make(map[string]TermVector, 0) + } + if err := dec.Decode(&s.TermVectors); err != nil { + return fmt.Errorf("%s | %w", "TermVectors", err) + } + + case "took": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Took", err) + } + s.Took = &value + case float64: + f := int64(v) + s.Took = &f + } + + case "_version": + if err := dec.Decode(&s.Version_); err != nil { + return fmt.Errorf("%s | %w", "Version_", err) + } + + } + } + return nil +} + +// NewTermVectorsResult returns a TermVectorsResult. +func NewTermVectorsResult() *TermVectorsResult { + r := &TermVectorsResult{ + TermVectors: make(map[string]TermVector), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termvectorstoken.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termvectorstoken.go new file mode 100644 index 000000000..ae917b333 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/termvectorstoken.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TermVectorsToken type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/termvectors/types.ts#L42-L47 +type TermVectorsToken struct { + EndOffset *int `json:"end_offset,omitempty"` + Payload *string `json:"payload,omitempty"` + Position int `json:"position"` + StartOffset *int `json:"start_offset,omitempty"` +} + +func (s *TermVectorsToken) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "end_offset": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "EndOffset", err) + } + s.EndOffset = &value + case float64: + f := int(v) + s.EndOffset = &f + } + + case "payload": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Payload", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Payload = &o + + case "position": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Position", err) + } + s.Position = value + case float64: + f := int(v) + s.Position = f + } + + case "start_offset": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "StartOffset", err) + } + s.StartOffset = &value + case float64: + f := int(v) + s.StartOffset = &f + } + + } + } + return nil +} + +// NewTermVectorsToken returns a TermVectorsToken. +func NewTermVectorsToken() *TermVectorsToken { + r := &TermVectorsToken{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/testpopulation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/testpopulation.go new file mode 100644 index 000000000..82514cde2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/testpopulation.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TestPopulation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L342-L352 +type TestPopulation struct { + // Field The field to aggregate. + Field string `json:"field"` + // Filter A filter used to define a set of records to run unpaired t-test on. + Filter *Query `json:"filter,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *TestPopulation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewTestPopulation returns a TestPopulation. +func NewTestPopulation() *TestPopulation { + r := &TestPopulation{} + + return r +} + +type TestPopulationVariant interface { + TestPopulationCaster() *TestPopulation +} + +func (s *TestPopulation) TestPopulationCaster() *TestPopulation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textclassificationinferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textclassificationinferenceoptions.go new file mode 100644 index 000000000..fe5594ee1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textclassificationinferenceoptions.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TextClassificationInferenceOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L202-L214 +type TextClassificationInferenceOptions struct { + // ClassificationLabels Classification labels to apply other than the stored labels. Must have the + // same deminsions as the default configured labels + ClassificationLabels []string `json:"classification_labels,omitempty"` + // NumTopClasses Specifies the number of top class predictions to return. Defaults to 0. + NumTopClasses *int `json:"num_top_classes,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options + Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` + Vocabulary *Vocabulary `json:"vocabulary,omitempty"` +} + +func (s *TextClassificationInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classification_labels": + if err := dec.Decode(&s.ClassificationLabels); err != nil { + return fmt.Errorf("%s | %w", "ClassificationLabels", err) + } + + case "num_top_classes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopClasses", err) + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + case "vocabulary": + if err := dec.Decode(&s.Vocabulary); err != nil { + return fmt.Errorf("%s | %w", "Vocabulary", err) + } + + } + } + return nil +} + +// NewTextClassificationInferenceOptions returns a TextClassificationInferenceOptions. +func NewTextClassificationInferenceOptions() *TextClassificationInferenceOptions { + r := &TextClassificationInferenceOptions{} + + return r +} + +type TextClassificationInferenceOptionsVariant interface { + TextClassificationInferenceOptionsCaster() *TextClassificationInferenceOptions +} + +func (s *TextClassificationInferenceOptions) TextClassificationInferenceOptionsCaster() *TextClassificationInferenceOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textclassificationinferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textclassificationinferenceupdateoptions.go new file mode 100644 index 000000000..9eb29583b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textclassificationinferenceupdateoptions.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TextClassificationInferenceUpdateOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L382-L391 +type TextClassificationInferenceUpdateOptions struct { + // ClassificationLabels Classification labels to apply other than the stored labels. Must have the + // same deminsions as the default configured labels + ClassificationLabels []string `json:"classification_labels,omitempty"` + // NumTopClasses Specifies the number of top class predictions to return. Defaults to 0. + NumTopClasses *int `json:"num_top_classes,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options to update when inferring + Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` +} + +func (s *TextClassificationInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classification_labels": + if err := dec.Decode(&s.ClassificationLabels); err != nil { + return fmt.Errorf("%s | %w", "ClassificationLabels", err) + } + + case "num_top_classes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumTopClasses", err) + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + } + } + return nil +} + +// NewTextClassificationInferenceUpdateOptions returns a TextClassificationInferenceUpdateOptions. +func NewTextClassificationInferenceUpdateOptions() *TextClassificationInferenceUpdateOptions { + r := &TextClassificationInferenceUpdateOptions{} + + return r +} + +type TextClassificationInferenceUpdateOptionsVariant interface { + TextClassificationInferenceUpdateOptionsCaster() *TextClassificationInferenceUpdateOptions +} + +func (s *TextClassificationInferenceUpdateOptions) TextClassificationInferenceUpdateOptionsCaster() *TextClassificationInferenceUpdateOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembedding.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembedding.go new file mode 100644 index 000000000..ff4993818 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembedding.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TextEmbedding type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Knn.ts#L94-L97 +type TextEmbedding struct { + ModelId string `json:"model_id"` + ModelText string `json:"model_text"` +} + +func (s *TextEmbedding) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "model_text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelText", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelText = o + + } + } + return nil +} + +// NewTextEmbedding returns a TextEmbedding. +func NewTextEmbedding() *TextEmbedding { + r := &TextEmbedding{} + + return r +} + +type TextEmbeddingVariant interface { + TextEmbeddingCaster() *TextEmbedding +} + +func (s *TextEmbedding) TextEmbeddingCaster() *TextEmbedding { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembeddingbyteresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembeddingbyteresult.go new file mode 100644 index 000000000..620c23b94 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembeddingbyteresult.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TextEmbeddingByteResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/Results.ts#L53-L58 +type TextEmbeddingByteResult struct { + Embedding []byte `json:"embedding"` +} + +func (s *TextEmbeddingByteResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "embedding": + if err := dec.Decode(&s.Embedding); err != nil { + return fmt.Errorf("%s | %w", "Embedding", err) + } + + } + } + return nil +} + +// NewTextEmbeddingByteResult returns a TextEmbeddingByteResult. +func NewTextEmbeddingByteResult() *TextEmbeddingByteResult { + r := &TextEmbeddingByteResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembeddinginferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembeddinginferenceoptions.go new file mode 100644 index 000000000..f155c9b2d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembeddinginferenceoptions.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TextEmbeddingInferenceOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L252-L262 +type TextEmbeddingInferenceOptions struct { + // EmbeddingSize The number of dimensions in the embedding output + EmbeddingSize *int `json:"embedding_size,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options + Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` + Vocabulary Vocabulary `json:"vocabulary"` +} + +func (s *TextEmbeddingInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "embedding_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "EmbeddingSize", err) + } + s.EmbeddingSize = &value + case float64: + f := int(v) + s.EmbeddingSize = &f + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + case "vocabulary": + if err := dec.Decode(&s.Vocabulary); err != nil { + return fmt.Errorf("%s | %w", "Vocabulary", err) + } + + } + } + return nil +} + +// NewTextEmbeddingInferenceOptions returns a TextEmbeddingInferenceOptions. +func NewTextEmbeddingInferenceOptions() *TextEmbeddingInferenceOptions { + r := &TextEmbeddingInferenceOptions{} + + return r +} + +type TextEmbeddingInferenceOptionsVariant interface { + TextEmbeddingInferenceOptionsCaster() *TextEmbeddingInferenceOptions +} + +func (s *TextEmbeddingInferenceOptions) TextEmbeddingInferenceOptionsCaster() *TextEmbeddingInferenceOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembeddinginferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembeddinginferenceupdateoptions.go new file mode 100644 index 000000000..819b5041e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembeddinginferenceupdateoptions.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TextEmbeddingInferenceUpdateOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L411-L415 +type TextEmbeddingInferenceUpdateOptions struct { + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` +} + +func (s *TextEmbeddingInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + } + } + return nil +} + +// NewTextEmbeddingInferenceUpdateOptions returns a TextEmbeddingInferenceUpdateOptions. +func NewTextEmbeddingInferenceUpdateOptions() *TextEmbeddingInferenceUpdateOptions { + r := &TextEmbeddingInferenceUpdateOptions{} + + return r +} + +type TextEmbeddingInferenceUpdateOptionsVariant interface { + TextEmbeddingInferenceUpdateOptionsCaster() *TextEmbeddingInferenceUpdateOptions +} + +func (s *TextEmbeddingInferenceUpdateOptions) TextEmbeddingInferenceUpdateOptionsCaster() *TextEmbeddingInferenceUpdateOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembeddingresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembeddingresult.go new file mode 100644 index 000000000..79f3f430e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textembeddingresult.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TextEmbeddingResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/Results.ts#L60-L65 +type TextEmbeddingResult struct { + Embedding []float32 `json:"embedding"` +} + +func (s *TextEmbeddingResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "embedding": + if err := dec.Decode(&s.Embedding); err != nil { + return fmt.Errorf("%s | %w", "Embedding", err) + } + + } + } + return nil +} + +// NewTextEmbeddingResult returns a TextEmbeddingResult. +func NewTextEmbeddingResult() *TextEmbeddingResult { + r := &TextEmbeddingResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textexpansioninferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textexpansioninferenceoptions.go new file mode 100644 index 000000000..61636ef87 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textexpansioninferenceoptions.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TextExpansionInferenceOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L264-L271 +type TextExpansionInferenceOptions struct { + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options + Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` + Vocabulary Vocabulary `json:"vocabulary"` +} + +func (s *TextExpansionInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + case "vocabulary": + if err := dec.Decode(&s.Vocabulary); err != nil { + return fmt.Errorf("%s | %w", "Vocabulary", err) + } + + } + } + return nil +} + +// NewTextExpansionInferenceOptions returns a TextExpansionInferenceOptions. +func NewTextExpansionInferenceOptions() *TextExpansionInferenceOptions { + r := &TextExpansionInferenceOptions{} + + return r +} + +type TextExpansionInferenceOptionsVariant interface { + TextExpansionInferenceOptionsCaster() *TextExpansionInferenceOptions +} + +func (s *TextExpansionInferenceOptions) TextExpansionInferenceOptionsCaster() *TextExpansionInferenceOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textexpansioninferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textexpansioninferenceupdateoptions.go new file mode 100644 index 000000000..d5aaeb36e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textexpansioninferenceupdateoptions.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TextExpansionInferenceUpdateOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L417-L421 +type TextExpansionInferenceUpdateOptions struct { + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` +} + +func (s *TextExpansionInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + } + } + return nil +} + +// NewTextExpansionInferenceUpdateOptions returns a TextExpansionInferenceUpdateOptions. +func NewTextExpansionInferenceUpdateOptions() *TextExpansionInferenceUpdateOptions { + r := &TextExpansionInferenceUpdateOptions{} + + return r +} + +type TextExpansionInferenceUpdateOptionsVariant interface { + TextExpansionInferenceUpdateOptionsCaster() *TextExpansionInferenceUpdateOptions +} + +func (s *TextExpansionInferenceUpdateOptions) TextExpansionInferenceUpdateOptionsCaster() *TextExpansionInferenceUpdateOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textexpansionquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textexpansionquery.go new file mode 100644 index 000000000..69e3e18e8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textexpansionquery.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TextExpansionQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/TextExpansionQuery.ts#L23-L36 +type TextExpansionQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // ModelId The text expansion NLP model to use + ModelId string `json:"model_id"` + // ModelText The query text + ModelText string `json:"model_text"` + // PruningConfig Token pruning configurations + PruningConfig *TokenPruningConfig `json:"pruning_config,omitempty"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *TextExpansionQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "model_text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelText", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelText = o + + case "pruning_config": + if err := dec.Decode(&s.PruningConfig); err != nil { + return fmt.Errorf("%s | %w", "PruningConfig", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewTextExpansionQuery returns a TextExpansionQuery. +func NewTextExpansionQuery() *TextExpansionQuery { + r := &TextExpansionQuery{} + + return r +} + +type TextExpansionQueryVariant interface { + TextExpansionQueryCaster() *TextExpansionQuery +} + +func (s *TextExpansionQuery) TextExpansionQueryCaster() *TextExpansionQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textindexprefixes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textindexprefixes.go new file mode 100644 index 000000000..655c59521 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textindexprefixes.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TextIndexPrefixes type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L316-L319 +type TextIndexPrefixes struct { + MaxChars int `json:"max_chars"` + MinChars int `json:"min_chars"` +} + +func (s *TextIndexPrefixes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_chars": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxChars", err) + } + s.MaxChars = value + case float64: + f := int(v) + s.MaxChars = f + } + + case "min_chars": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MinChars", err) + } + s.MinChars = value + case float64: + f := int(v) + s.MinChars = f + } + + } + } + return nil +} + +// NewTextIndexPrefixes returns a TextIndexPrefixes. +func NewTextIndexPrefixes() *TextIndexPrefixes { + r := &TextIndexPrefixes{} + + return r +} + +type TextIndexPrefixesVariant interface { + TextIndexPrefixesCaster() *TextIndexPrefixes +} + +func (s *TextIndexPrefixes) TextIndexPrefixesCaster() *TextIndexPrefixes { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textproperty.go new file mode 100644 index 000000000..170164ac4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textproperty.go @@ -0,0 +1,1058 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termvectoroption" +) + +// TextProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L321-L338 +type TextProperty struct { + Analyzer *string `json:"analyzer,omitempty"` + Boost *Float64 `json:"boost,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + EagerGlobalOrdinals *bool `json:"eager_global_ordinals,omitempty"` + Fielddata *bool `json:"fielddata,omitempty"` + FielddataFrequencyFilter *FielddataFrequencyFilter `json:"fielddata_frequency_filter,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + Index *bool `json:"index,omitempty"` + IndexOptions *indexoptions.IndexOptions `json:"index_options,omitempty"` + IndexPhrases *bool `json:"index_phrases,omitempty"` + IndexPrefixes *TextIndexPrefixes `json:"index_prefixes,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Norms *bool `json:"norms,omitempty"` + PositionIncrementGap *int `json:"position_increment_gap,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + SearchAnalyzer *string `json:"search_analyzer,omitempty"` + SearchQuoteAnalyzer *string `json:"search_quote_analyzer,omitempty"` + Similarity *string `json:"similarity,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + TermVector *termvectoroption.TermVectorOption `json:"term_vector,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *TextProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "eager_global_ordinals": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "EagerGlobalOrdinals", err) + } + s.EagerGlobalOrdinals = &value + case bool: + s.EagerGlobalOrdinals = &v + } + + case "fielddata": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Fielddata", err) + } + s.Fielddata = &value + case bool: + s.Fielddata = &v + } + + case "fielddata_frequency_filter": + if err := dec.Decode(&s.FielddataFrequencyFilter); err != nil { + return fmt.Errorf("%s | %w", "FielddataFrequencyFilter", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "index_options": + if err := dec.Decode(&s.IndexOptions); err != nil { + return fmt.Errorf("%s | %w", "IndexOptions", err) + } + + case "index_phrases": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IndexPhrases", err) + } + s.IndexPhrases = &value + case bool: + s.IndexPhrases = &v + } + + case "index_prefixes": + if err := dec.Decode(&s.IndexPrefixes); err != nil { + return fmt.Errorf("%s | %w", "IndexPrefixes", err) + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "norms": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Norms", err) + } + s.Norms = &value + case bool: + s.Norms = &v + } + + case "position_increment_gap": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PositionIncrementGap", err) + } + s.PositionIncrementGap = &value + case float64: + f := int(v) + s.PositionIncrementGap = &f + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "search_analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchAnalyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchAnalyzer = &o + + case "search_quote_analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchQuoteAnalyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQuoteAnalyzer = &o + + case "similarity": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Similarity", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "term_vector": + if err := dec.Decode(&s.TermVector); err != nil { + return fmt.Errorf("%s | %w", "TermVector", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s TextProperty) MarshalJSON() ([]byte, error) { + type innerTextProperty TextProperty + tmp := innerTextProperty{ + Analyzer: s.Analyzer, + Boost: s.Boost, + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + EagerGlobalOrdinals: s.EagerGlobalOrdinals, + Fielddata: s.Fielddata, + FielddataFrequencyFilter: s.FielddataFrequencyFilter, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + IndexOptions: s.IndexOptions, + IndexPhrases: s.IndexPhrases, + IndexPrefixes: s.IndexPrefixes, + Meta: s.Meta, + Norms: s.Norms, + PositionIncrementGap: s.PositionIncrementGap, + Properties: s.Properties, + SearchAnalyzer: s.SearchAnalyzer, + SearchQuoteAnalyzer: s.SearchQuoteAnalyzer, + Similarity: s.Similarity, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TermVector: s.TermVector, + Type: s.Type, + } + + tmp.Type = "text" + + return json.Marshal(tmp) +} + +// NewTextProperty returns a TextProperty. +func NewTextProperty() *TextProperty { + r := &TextProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type TextPropertyVariant interface { + TextPropertyCaster() *TextProperty +} + +func (s *TextProperty) TextPropertyCaster() *TextProperty { + return s +} + +func (s *TextProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textsimilarityreranker.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textsimilarityreranker.go new file mode 100644 index 000000000..b1fb0f28f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/textsimilarityreranker.go @@ -0,0 +1,192 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TextSimilarityReranker type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Retriever.ts#L146-L157 +type TextSimilarityReranker struct { + // Field The document field to be used for text similarity comparisons. This field + // should contain the text that will be evaluated against the inference_text + Field string `json:"field"` + // Filter Query to filter the documents that can match. + Filter []Query `json:"filter,omitempty"` + // InferenceId Unique identifier of the inference endpoint created using the inference API. + InferenceId *string `json:"inference_id,omitempty"` + // InferenceText The text snippet used as the basis for similarity comparison + InferenceText string `json:"inference_text"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are not + // included in the top documents. + MinScore *float32 `json:"min_score,omitempty"` + // Name_ Retriever name. + Name_ *string `json:"_name,omitempty"` + // RankWindowSize This value determines how many documents we will consider from the nested + // retriever. + RankWindowSize *int `json:"rank_window_size,omitempty"` + // Retriever The nested retriever which will produce the first-level results, that will + // later be used for reranking. + Retriever RetrieverContainer `json:"retriever"` +} + +func (s *TextSimilarityReranker) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = o + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return fmt.Errorf("%s | %w", "Filter", err) + } + } + + case "inference_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "InferenceId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InferenceId = &o + + case "inference_text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "InferenceText", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InferenceText = o + + case "min_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "MinScore", err) + } + f := float32(value) + s.MinScore = &f + case float64: + f := float32(v) + s.MinScore = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name_ = &o + + case "rank_window_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RankWindowSize", err) + } + s.RankWindowSize = &value + case float64: + f := int(v) + s.RankWindowSize = &f + } + + case "retriever": + if err := dec.Decode(&s.Retriever); err != nil { + return fmt.Errorf("%s | %w", "Retriever", err) + } + + } + } + return nil +} + +// NewTextSimilarityReranker returns a TextSimilarityReranker. +func NewTextSimilarityReranker() *TextSimilarityReranker { + r := &TextSimilarityReranker{} + + return r +} + +type TextSimilarityRerankerVariant interface { + TextSimilarityRerankerCaster() *TextSimilarityReranker +} + +func (s *TextSimilarityReranker) TextSimilarityRerankerCaster() *TextSimilarityReranker { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/texttoanalyze.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/texttoanalyze.go new file mode 100644 index 000000000..e188a85c6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/texttoanalyze.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TextToAnalyze type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/analyze/types.ts#L69-L69 +type TextToAnalyze []string + +type TextToAnalyzeVariant interface { + TextToAnalyzeCaster() *TextToAnalyze +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/thaianalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/thaianalyzer.go new file mode 100644 index 000000000..f1975da3e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/thaianalyzer.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ThaiAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L317-L321 +type ThaiAnalyzer struct { + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *ThaiAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ThaiAnalyzer) MarshalJSON() ([]byte, error) { + type innerThaiAnalyzer ThaiAnalyzer + tmp := innerThaiAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "thai" + + return json.Marshal(tmp) +} + +// NewThaiAnalyzer returns a ThaiAnalyzer. +func NewThaiAnalyzer() *ThaiAnalyzer { + r := &ThaiAnalyzer{} + + return r +} + +type ThaiAnalyzerVariant interface { + ThaiAnalyzerCaster() *ThaiAnalyzer +} + +func (s *ThaiAnalyzer) ThaiAnalyzerCaster() *ThaiAnalyzer { + return s +} + +func (s *ThaiAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/thaitokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/thaitokenizer.go new file mode 100644 index 000000000..797a494bf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/thaitokenizer.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// ThaiTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L128-L130 +type ThaiTokenizer struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ThaiTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ThaiTokenizer) MarshalJSON() ([]byte, error) { + type innerThaiTokenizer ThaiTokenizer + tmp := innerThaiTokenizer{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "thai" + + return json.Marshal(tmp) +} + +// NewThaiTokenizer returns a ThaiTokenizer. +func NewThaiTokenizer() *ThaiTokenizer { + r := &ThaiTokenizer{} + + return r +} + +type ThaiTokenizerVariant interface { + ThaiTokenizerCaster() *ThaiTokenizer +} + +func (s *ThaiTokenizer) ThaiTokenizerCaster() *ThaiTokenizer { + return s +} + +func (s *ThaiTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/threadcount.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/threadcount.go new file mode 100644 index 000000000..b99578bb4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/threadcount.go @@ -0,0 +1,165 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ThreadCount type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L1082-L1107 +type ThreadCount struct { + // Active Number of active threads in the thread pool. + Active *int64 `json:"active,omitempty"` + // Completed Number of tasks completed by the thread pool executor. + Completed *int64 `json:"completed,omitempty"` + // Largest Highest number of active threads in the thread pool. + Largest *int64 `json:"largest,omitempty"` + // Queue Number of tasks in queue for the thread pool. + Queue *int64 `json:"queue,omitempty"` + // Rejected Number of tasks rejected by the thread pool executor. + Rejected *int64 `json:"rejected,omitempty"` + // Threads Number of threads in the thread pool. + Threads *int64 `json:"threads,omitempty"` +} + +func (s *ThreadCount) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Active", err) + } + s.Active = &value + case float64: + f := int64(v) + s.Active = &f + } + + case "completed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Completed", err) + } + s.Completed = &value + case float64: + f := int64(v) + s.Completed = &f + } + + case "largest": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Largest", err) + } + s.Largest = &value + case float64: + f := int64(v) + s.Largest = &f + } + + case "queue": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Queue", err) + } + s.Queue = &value + case float64: + f := int64(v) + s.Queue = &f + } + + case "rejected": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Rejected", err) + } + s.Rejected = &value + case float64: + f := int64(v) + s.Rejected = &f + } + + case "threads": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Threads", err) + } + s.Threads = &value + case float64: + f := int64(v) + s.Threads = &f + } + + } + } + return nil +} + +// NewThreadCount returns a ThreadCount. +func NewThreadCount() *ThreadCount { + r := &ThreadCount{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/threadpoolrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/threadpoolrecord.go new file mode 100644 index 000000000..0843529f3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/threadpoolrecord.go @@ -0,0 +1,338 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ThreadPoolRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/thread_pool/types.ts#L22-L124 +type ThreadPoolRecord struct { + // Active The number of active threads in the current thread pool. + Active *string `json:"active,omitempty"` + // Completed The number of completed tasks. + Completed *string `json:"completed,omitempty"` + // Core The core number of active threads allowed in a scaling thread pool. + Core *string `json:"core,omitempty"` + // EphemeralNodeId The ephemeral node identifier. + EphemeralNodeId *string `json:"ephemeral_node_id,omitempty"` + // Host The host name for the current node. + Host *string `json:"host,omitempty"` + // Ip The IP address for the current node. + Ip *string `json:"ip,omitempty"` + // KeepAlive The thread keep alive time. + KeepAlive *string `json:"keep_alive,omitempty"` + // Largest The highest number of active threads in the current thread pool. + Largest *string `json:"largest,omitempty"` + // Max The maximum number of active threads allowed in a scaling thread pool. + Max *string `json:"max,omitempty"` + // Name The thread pool name. + Name *string `json:"name,omitempty"` + // NodeId The persistent node identifier. + NodeId *string `json:"node_id,omitempty"` + // NodeName The node name. + NodeName *string `json:"node_name,omitempty"` + // Pid The process identifier. + Pid *string `json:"pid,omitempty"` + // PoolSize The number of threads in the current thread pool. + PoolSize *string `json:"pool_size,omitempty"` + // Port The bound transport port for the current node. + Port *string `json:"port,omitempty"` + // Queue The number of tasks currently in queue. + Queue *string `json:"queue,omitempty"` + // QueueSize The maximum number of tasks permitted in the queue. + QueueSize *string `json:"queue_size,omitempty"` + // Rejected The number of rejected tasks. + Rejected *string `json:"rejected,omitempty"` + // Size The number of active threads allowed in a fixed thread pool. + Size *string `json:"size,omitempty"` + // Type The thread pool type. + // Returned values include `fixed`, `fixed_auto_queue_size`, `direct`, and + // `scaling`. + Type *string `json:"type,omitempty"` +} + +func (s *ThreadPoolRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active", "a": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Active", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Active = &o + + case "completed", "c": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Completed", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Completed = &o + + case "core", "cr": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Core", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Core = &o + + case "ephemeral_node_id", "eid": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "EphemeralNodeId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.EphemeralNodeId = &o + + case "host", "h": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Host = &o + + case "ip", "i": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "keep_alive", "ka": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "KeepAlive", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeepAlive = &o + + case "largest", "l": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Largest", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Largest = &o + + case "max", "mx": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Max = &o + + case "name", "n": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "node_id", "id": + if err := dec.Decode(&s.NodeId); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + + case "node_name", "nn": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NodeName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeName = &o + + case "pid", "p": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pid", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pid = &o + + case "pool_size", "psz": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PoolSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PoolSize = &o + + case "port", "po": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Port", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Port = &o + + case "queue", "q": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Queue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Queue = &o + + case "queue_size", "qs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueueSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueueSize = &o + + case "rejected", "r": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Rejected", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Rejected = &o + + case "size", "sz": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Size = &o + + case "type", "t": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + } + } + return nil +} + +// NewThreadPoolRecord returns a ThreadPoolRecord. +func NewThreadPoolRecord() *ThreadPoolRecord { + r := &ThreadPoolRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/throttlestate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/throttlestate.go new file mode 100644 index 000000000..ab878a2c2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/throttlestate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ThrottleState type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Action.ts#L120-L123 +type ThrottleState struct { + Reason string `json:"reason"` + Timestamp DateTime `json:"timestamp"` +} + +func (s *ThrottleState) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = o + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + } + } + return nil +} + +// NewThrottleState returns a ThrottleState. +func NewThrottleState() *ThrottleState { + r := &ThrottleState{} + + return r +} + +type ThrottleStateVariant interface { + ThrottleStateCaster() *ThrottleState +} + +func (s *ThrottleState) ThrottleStateCaster() *ThrottleState { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timehttphistogram.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timehttphistogram.go new file mode 100644 index 000000000..a6ba9a5cd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timehttphistogram.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TimeHttpHistogram type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L710-L714 +type TimeHttpHistogram struct { + Count int64 `json:"count"` + GeMillis *int64 `json:"ge_millis,omitempty"` + LtMillis *int64 `json:"lt_millis,omitempty"` +} + +func (s *TimeHttpHistogram) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "ge_millis": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "GeMillis", err) + } + s.GeMillis = &value + case float64: + f := int64(v) + s.GeMillis = &f + } + + case "lt_millis": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LtMillis", err) + } + s.LtMillis = &value + case float64: + f := int64(v) + s.LtMillis = &f + } + + } + } + return nil +} + +// NewTimeHttpHistogram returns a TimeHttpHistogram. +func NewTimeHttpHistogram() *TimeHttpHistogram { + r := &TimeHttpHistogram{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeofmonth.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeofmonth.go new file mode 100644 index 000000000..c57fd1c7b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeofmonth.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TimeOfMonth type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Schedule.ts#L111-L114 +type TimeOfMonth struct { + At []string `json:"at"` + On []int `json:"on"` +} + +// NewTimeOfMonth returns a TimeOfMonth. +func NewTimeOfMonth() *TimeOfMonth { + r := &TimeOfMonth{} + + return r +} + +type TimeOfMonthVariant interface { + TimeOfMonthCaster() *TimeOfMonth +} + +func (s *TimeOfMonth) TimeOfMonthCaster() *TimeOfMonth { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeofweek.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeofweek.go new file mode 100644 index 000000000..55a2f5288 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeofweek.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/day" +) + +// TimeOfWeek type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Schedule.ts#L116-L119 +type TimeOfWeek struct { + At []string `json:"at"` + On []day.Day `json:"on"` +} + +// NewTimeOfWeek returns a TimeOfWeek. +func NewTimeOfWeek() *TimeOfWeek { + r := &TimeOfWeek{} + + return r +} + +type TimeOfWeekVariant interface { + TimeOfWeekCaster() *TimeOfWeek +} + +func (s *TimeOfWeek) TimeOfWeekCaster() *TimeOfWeek { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeofyear.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeofyear.go new file mode 100644 index 000000000..d9a7b08a4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeofyear.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/month" +) + +// TimeOfYear type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Schedule.ts#L121-L125 +type TimeOfYear struct { + At []string `json:"at"` + Int []month.Month `json:"int"` + On []int `json:"on"` +} + +// NewTimeOfYear returns a TimeOfYear. +func NewTimeOfYear() *TimeOfYear { + r := &TimeOfYear{} + + return r +} + +type TimeOfYearVariant interface { + TimeOfYearCaster() *TimeOfYear +} + +func (s *TimeOfYear) TimeOfYearCaster() *TimeOfYear { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeseriesaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeseriesaggregate.go new file mode 100644 index 000000000..54e1554da --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeseriesaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TimeSeriesAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L732-L733 +type TimeSeriesAggregate struct { + Buckets BucketsTimeSeriesBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *TimeSeriesAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]TimeSeriesBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []TimeSeriesBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewTimeSeriesAggregate returns a TimeSeriesAggregate. +func NewTimeSeriesAggregate() *TimeSeriesAggregate { + r := &TimeSeriesAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeseriesaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeseriesaggregation.go new file mode 100644 index 000000000..7baa19429 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeseriesaggregation.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TimeSeriesAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L1033-L1046 +type TimeSeriesAggregation struct { + // Keyed Set to `true` to associate a unique string key with each bucket and returns + // the ranges as a hash rather than an array. + Keyed *bool `json:"keyed,omitempty"` + // Size The maximum number of results to return. + Size *int `json:"size,omitempty"` +} + +func (s *TimeSeriesAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "keyed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Keyed", err) + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + +// NewTimeSeriesAggregation returns a TimeSeriesAggregation. +func NewTimeSeriesAggregation() *TimeSeriesAggregation { + r := &TimeSeriesAggregation{} + + return r +} + +type TimeSeriesAggregationVariant interface { + TimeSeriesAggregationCaster() *TimeSeriesAggregation +} + +func (s *TimeSeriesAggregation) TimeSeriesAggregationCaster() *TimeSeriesAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeseriesbucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeseriesbucket.go new file mode 100644 index 000000000..27e2ce3f2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timeseriesbucket.go @@ -0,0 +1,642 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// TimeSeriesBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L735-L737 +type TimeSeriesBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key map[string]FieldValue `json:"key"` +} + +func (s *TimeSeriesBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + if s.Key == nil { + s.Key = make(map[string]FieldValue, 0) + } + if err := dec.Decode(&s.Key); err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TimeSeriesBucket) MarshalJSON() ([]byte, error) { + type opt TimeSeriesBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewTimeSeriesBucket returns a TimeSeriesBucket. +func NewTimeSeriesBucket() *TimeSeriesBucket { + r := &TimeSeriesBucket{ + Aggregations: make(map[string]Aggregate), + Key: make(map[string]FieldValue), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timesync.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timesync.go new file mode 100644 index 000000000..28f27b779 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timesync.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TimeSync type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/_types/Transform.ts#L177-L189 +type TimeSync struct { + // Delay The time delay between the current time and the latest input data time. + Delay Duration `json:"delay,omitempty"` + // Field The date field that is used to identify new documents in the source. In + // general, it’s a good idea to use a field + // that contains the ingest timestamp. If you use a different field, you might + // need to set the delay such that it + // accounts for data transmission delays. + Field string `json:"field"` +} + +func (s *TimeSync) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "delay": + if err := dec.Decode(&s.Delay); err != nil { + return fmt.Errorf("%s | %w", "Delay", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + } + } + return nil +} + +// NewTimeSync returns a TimeSync. +func NewTimeSync() *TimeSync { + r := &TimeSync{} + + return r +} + +type TimeSyncVariant interface { + TimeSyncCaster() *TimeSync +} + +func (s *TimeSync) TimeSyncCaster() *TimeSync { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timingstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timingstats.go new file mode 100644 index 000000000..559d3821c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/timingstats.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TimingStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L564-L569 +type TimingStats struct { + // ElapsedTime Runtime of the analysis in milliseconds. + ElapsedTime int64 `json:"elapsed_time"` + // IterationTime Runtime of the latest iteration of the analysis in milliseconds. + IterationTime *int64 `json:"iteration_time,omitempty"` +} + +func (s *TimingStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "elapsed_time": + if err := dec.Decode(&s.ElapsedTime); err != nil { + return fmt.Errorf("%s | %w", "ElapsedTime", err) + } + + case "iteration_time": + if err := dec.Decode(&s.IterationTime); err != nil { + return fmt.Errorf("%s | %w", "IterationTime", err) + } + + } + } + return nil +} + +// NewTimingStats returns a TimingStats. +func NewTimingStats() *TimingStats { + r := &TimingStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokencountproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokencountproperty.go new file mode 100644 index 000000000..aae1a4f44 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokencountproperty.go @@ -0,0 +1,954 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// TokenCountProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/specialized.ts#L94-L101 +type TokenCountProperty struct { + Analyzer *string `json:"analyzer,omitempty"` + Boost *Float64 `json:"boost,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + EnablePositionIncrements *bool `json:"enable_position_increments,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *Float64 `json:"null_value,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Analyzer", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "enable_position_increments": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "EnablePositionIncrements", err) + } + s.EnablePositionIncrements = &value + case bool: + s.EnablePositionIncrements = &v + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + f := Float64(value) + s.NullValue = &f + case float64: + f := Float64(v) + s.NullValue = &f + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s TokenCountProperty) MarshalJSON() ([]byte, error) { + type innerTokenCountProperty TokenCountProperty + tmp := innerTokenCountProperty{ + Analyzer: s.Analyzer, + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + EnablePositionIncrements: s.EnablePositionIncrements, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "token_count" + + return json.Marshal(tmp) +} + +// NewTokenCountProperty returns a TokenCountProperty. +func NewTokenCountProperty() *TokenCountProperty { + r := &TokenCountProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type TokenCountPropertyVariant interface { + TokenCountPropertyCaster() *TokenCountProperty +} + +func (s *TokenCountProperty) TokenCountPropertyCaster() *TokenCountProperty { + return s +} + +func (s *TokenCountProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokendetail.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokendetail.go new file mode 100644 index 000000000..5e44c7656 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokendetail.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TokenDetail type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/analyze/types.ts#L71-L74 +type TokenDetail struct { + Name string `json:"name"` + Tokens []ExplainAnalyzeToken `json:"tokens"` +} + +func (s *TokenDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "tokens": + if err := dec.Decode(&s.Tokens); err != nil { + return fmt.Errorf("%s | %w", "Tokens", err) + } + + } + } + return nil +} + +// NewTokenDetail returns a TokenDetail. +func NewTokenDetail() *TokenDetail { + r := &TokenDetail{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenfilter.go new file mode 100644 index 000000000..9e8915dfe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenfilter.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TokenFilter holds the union for the following types: +// +// string +// TokenFilterDefinition +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L575-L580 +type TokenFilter any + +type TokenFilterVariant interface { + TokenFilterCaster() *TokenFilter +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenfilterdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenfilterdefinition.go new file mode 100644 index 000000000..814aff1e6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenfilterdefinition.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TokenFilterDefinition holds the union for the following types: +// +// ApostropheTokenFilter +// ArabicStemTokenFilter +// ArabicNormalizationTokenFilter +// AsciiFoldingTokenFilter +// BengaliNormalizationTokenFilter +// BrazilianStemTokenFilter +// CjkBigramTokenFilter +// CjkWidthTokenFilter +// ClassicTokenFilter +// CommonGramsTokenFilter +// ConditionTokenFilter +// CzechStemTokenFilter +// DecimalDigitTokenFilter +// DelimitedPayloadTokenFilter +// DutchStemTokenFilter +// EdgeNGramTokenFilter +// ElisionTokenFilter +// FingerprintTokenFilter +// FlattenGraphTokenFilter +// FrenchStemTokenFilter +// GermanNormalizationTokenFilter +// GermanStemTokenFilter +// HindiNormalizationTokenFilter +// HunspellTokenFilter +// HyphenationDecompounderTokenFilter +// IndicNormalizationTokenFilter +// KeepTypesTokenFilter +// KeepWordsTokenFilter +// KeywordMarkerTokenFilter +// KeywordRepeatTokenFilter +// KStemTokenFilter +// LengthTokenFilter +// LimitTokenCountTokenFilter +// LowercaseTokenFilter +// MinHashTokenFilter +// MultiplexerTokenFilter +// NGramTokenFilter +// NoriPartOfSpeechTokenFilter +// PatternCaptureTokenFilter +// PatternReplaceTokenFilter +// PersianNormalizationTokenFilter +// PersianStemTokenFilter +// PorterStemTokenFilter +// PredicateTokenFilter +// RemoveDuplicatesTokenFilter +// ReverseTokenFilter +// RussianStemTokenFilter +// ScandinavianFoldingTokenFilter +// ScandinavianNormalizationTokenFilter +// SerbianNormalizationTokenFilter +// ShingleTokenFilter +// SnowballTokenFilter +// SoraniNormalizationTokenFilter +// StemmerOverrideTokenFilter +// StemmerTokenFilter +// StopTokenFilter +// SynonymGraphTokenFilter +// SynonymTokenFilter +// TrimTokenFilter +// TruncateTokenFilter +// UniqueTokenFilter +// UppercaseTokenFilter +// WordDelimiterGraphTokenFilter +// WordDelimiterTokenFilter +// JaStopTokenFilter +// KuromojiStemmerTokenFilter +// KuromojiReadingFormTokenFilter +// KuromojiPartOfSpeechTokenFilter +// IcuCollationTokenFilter +// IcuFoldingTokenFilter +// IcuNormalizationTokenFilter +// IcuTransformTokenFilter +// PhoneticTokenFilter +// DictionaryDecompounderTokenFilter +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L582-L660 +type TokenFilterDefinition any + +type TokenFilterDefinitionVariant interface { + TokenFilterDefinitionCaster() *TokenFilterDefinition +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenizationconfigcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenizationconfigcontainer.go new file mode 100644 index 000000000..297ff8622 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenizationconfigcontainer.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// TokenizationConfigContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L135-L158 +type TokenizationConfigContainer struct { + AdditionalTokenizationConfigContainerProperty map[string]json.RawMessage `json:"-"` + // Bert Indicates BERT tokenization and its options + Bert *NlpBertTokenizationConfig `json:"bert,omitempty"` + // BertJa Indicates BERT Japanese tokenization and its options + BertJa *NlpBertTokenizationConfig `json:"bert_ja,omitempty"` + // Mpnet Indicates MPNET tokenization and its options + Mpnet *NlpBertTokenizationConfig `json:"mpnet,omitempty"` + // Roberta Indicates RoBERTa tokenization and its options + Roberta *NlpRobertaTokenizationConfig `json:"roberta,omitempty"` + XlmRoberta *XlmRobertaTokenizationConfig `json:"xlm_roberta,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TokenizationConfigContainer) MarshalJSON() ([]byte, error) { + type opt TokenizationConfigContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTokenizationConfigContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTokenizationConfigContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewTokenizationConfigContainer returns a TokenizationConfigContainer. +func NewTokenizationConfigContainer() *TokenizationConfigContainer { + r := &TokenizationConfigContainer{ + AdditionalTokenizationConfigContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type TokenizationConfigContainerVariant interface { + TokenizationConfigContainerCaster() *TokenizationConfigContainer +} + +func (s *TokenizationConfigContainer) TokenizationConfigContainerCaster() *TokenizationConfigContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenizer.go new file mode 100644 index 000000000..9cb06aa19 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenizer.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Tokenizer holds the union for the following types: +// +// string +// TokenizerDefinition +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L142-L147 +type Tokenizer any + +type TokenizerVariant interface { + TokenizerCaster() *Tokenizer +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenizerdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenizerdefinition.go new file mode 100644 index 000000000..9d7f64d60 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenizerdefinition.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TokenizerDefinition holds the union for the following types: +// +// CharGroupTokenizer +// ClassicTokenizer +// EdgeNGramTokenizer +// KeywordTokenizer +// LetterTokenizer +// LowercaseTokenizer +// NGramTokenizer +// PathHierarchyTokenizer +// PatternTokenizer +// SimplePatternTokenizer +// SimplePatternSplitTokenizer +// StandardTokenizer +// ThaiTokenizer +// UaxEmailUrlTokenizer +// WhitespaceTokenizer +// IcuTokenizer +// KuromojiTokenizer +// NoriTokenizer +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L149-L172 +type TokenizerDefinition any + +type TokenizerDefinitionVariant interface { + TokenizerDefinitionCaster() *TokenizerDefinition +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenpruningconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenpruningconfig.go new file mode 100644 index 000000000..370b6d134 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tokenpruningconfig.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TokenPruningConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/TokenPruningConfig.ts#L22-L35 +type TokenPruningConfig struct { + // OnlyScorePrunedTokens Whether to only score pruned tokens, vs only scoring kept tokens. + OnlyScorePrunedTokens *bool `json:"only_score_pruned_tokens,omitempty"` + // TokensFreqRatioThreshold Tokens whose frequency is more than this threshold times the average + // frequency of all tokens in the specified field are considered outliers and + // pruned. + TokensFreqRatioThreshold *int `json:"tokens_freq_ratio_threshold,omitempty"` + // TokensWeightThreshold Tokens whose weight is less than this threshold are considered nonsignificant + // and pruned. + TokensWeightThreshold *float32 `json:"tokens_weight_threshold,omitempty"` +} + +func (s *TokenPruningConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "only_score_pruned_tokens": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "OnlyScorePrunedTokens", err) + } + s.OnlyScorePrunedTokens = &value + case bool: + s.OnlyScorePrunedTokens = &v + } + + case "tokens_freq_ratio_threshold": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TokensFreqRatioThreshold", err) + } + s.TokensFreqRatioThreshold = &value + case float64: + f := int(v) + s.TokensFreqRatioThreshold = &f + } + + case "tokens_weight_threshold": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "TokensWeightThreshold", err) + } + f := float32(value) + s.TokensWeightThreshold = &f + case float64: + f := float32(v) + s.TokensWeightThreshold = &f + } + + } + } + return nil +} + +// NewTokenPruningConfig returns a TokenPruningConfig. +func NewTokenPruningConfig() *TokenPruningConfig { + r := &TokenPruningConfig{} + + return r +} + +type TokenPruningConfigVariant interface { + TokenPruningConfigCaster() *TokenPruningConfig +} + +func (s *TokenPruningConfig) TokenPruningConfigCaster() *TokenPruningConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/toolcall.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/toolcall.go new file mode 100644 index 000000000..a5a11bffe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/toolcall.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ToolCall type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L151-L167 +type ToolCall struct { + // Function The function that the model called. + Function ToolCallFunction `json:"function"` + // Id The identifier of the tool call. + Id string `json:"id"` + // Type The type of the tool call. + Type string `json:"type"` +} + +func (s *ToolCall) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "function": + if err := dec.Decode(&s.Function); err != nil { + return fmt.Errorf("%s | %w", "Function", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewToolCall returns a ToolCall. +func NewToolCall() *ToolCall { + r := &ToolCall{} + + return r +} + +type ToolCallVariant interface { + ToolCallCaster() *ToolCall +} + +func (s *ToolCall) ToolCallCaster() *ToolCall { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/toolcallfunction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/toolcallfunction.go new file mode 100644 index 000000000..43c30aa68 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/toolcallfunction.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ToolCallFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L137-L149 +type ToolCallFunction struct { + // Arguments The arguments to call the function with in JSON format. + Arguments string `json:"arguments"` + // Name The name of the function to call. + Name string `json:"name"` +} + +func (s *ToolCallFunction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "arguments": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Arguments", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Arguments = o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + } + } + return nil +} + +// NewToolCallFunction returns a ToolCallFunction. +func NewToolCallFunction() *ToolCallFunction { + r := &ToolCallFunction{} + + return r +} + +type ToolCallFunctionVariant interface { + ToolCallFunctionCaster() *ToolCallFunction +} + +func (s *ToolCallFunction) ToolCallFunctionCaster() *ToolCallFunction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topclassentry.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topclassentry.go new file mode 100644 index 000000000..32ee507ae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topclassentry.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TopClassEntry type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L459-L463 +type TopClassEntry struct { + ClassName string `json:"class_name"` + ClassProbability Float64 `json:"class_probability"` + ClassScore Float64 `json:"class_score"` +} + +func (s *TopClassEntry) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ClassName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClassName = o + + case "class_probability": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ClassProbability", err) + } + f := Float64(value) + s.ClassProbability = f + case float64: + f := Float64(v) + s.ClassProbability = f + } + + case "class_score": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ClassScore", err) + } + f := Float64(value) + s.ClassScore = f + case float64: + f := Float64(v) + s.ClassScore = f + } + + } + } + return nil +} + +// NewTopClassEntry returns a TopClassEntry. +func NewTopClassEntry() *TopClassEntry { + r := &TopClassEntry{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tophit.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tophit.go new file mode 100644 index 000000000..cf7962bc4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tophit.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TopHit type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/text_structure/_types/Structure.ts#L35-L38 +type TopHit struct { + Count int64 `json:"count"` + Value json.RawMessage `json:"value,omitempty"` +} + +func (s *TopHit) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + } + } + return nil +} + +// NewTopHit returns a TopHit. +func NewTopHit() *TopHit { + r := &TopHit{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tophitsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tophitsaggregate.go new file mode 100644 index 000000000..8840bffd6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tophitsaggregate.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TopHitsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L749-L755 +type TopHitsAggregate struct { + Hits HitsMetadata `json:"hits"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *TopHitsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return fmt.Errorf("%s | %w", "Hits", err) + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewTopHitsAggregate returns a TopHitsAggregate. +func NewTopHitsAggregate() *TopHitsAggregate { + r := &TopHitsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tophitsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tophitsaggregation.go new file mode 100644 index 000000000..145371089 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/tophitsaggregation.go @@ -0,0 +1,306 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TopHitsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L369-L429 +type TopHitsAggregation struct { + // DocvalueFields Fields for which to return doc values. + DocvalueFields []FieldAndFormat `json:"docvalue_fields,omitempty"` + // Explain If `true`, returns detailed information about score computation as part of a + // hit. + Explain *bool `json:"explain,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // Fields Array of wildcard (*) patterns. The request returns values for field names + // matching these patterns in the hits.fields property of the response. + Fields []FieldAndFormat `json:"fields,omitempty"` + // From Starting document offset. + From *int `json:"from,omitempty"` + // Highlight Specifies the highlighter to use for retrieving highlighted snippets from one + // or more fields in the search results. + Highlight *Highlight `json:"highlight,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` + // ScriptFields Returns the result of one or more script evaluations for each hit. + ScriptFields map[string]ScriptField `json:"script_fields,omitempty"` + // SeqNoPrimaryTerm If `true`, returns sequence number and primary term of the last modification + // of each hit. + SeqNoPrimaryTerm *bool `json:"seq_no_primary_term,omitempty"` + // Size The maximum number of top matching hits to return per bucket. + Size *int `json:"size,omitempty"` + // Sort Sort order of the top matching hits. + // By default, the hits are sorted by the score of the main query. + Sort []SortCombinations `json:"sort,omitempty"` + // Source_ Selects the fields of the source that are returned. + Source_ SourceConfig `json:"_source,omitempty"` + // StoredFields Returns values for the specified stored fields (fields that use the `store` + // mapping option). + StoredFields []string `json:"stored_fields,omitempty"` + // TrackScores If `true`, calculates and returns document scores, even if the scores are not + // used for sorting. + TrackScores *bool `json:"track_scores,omitempty"` + // Version If `true`, returns document version as part of a hit. + Version *bool `json:"version,omitempty"` +} + +func (s *TopHitsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "docvalue_fields": + if err := dec.Decode(&s.DocvalueFields); err != nil { + return fmt.Errorf("%s | %w", "DocvalueFields", err) + } + + case "explain": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Explain", err) + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return fmt.Errorf("%s | %w", "Fields", err) + } + + case "from": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "highlight": + if err := dec.Decode(&s.Highlight); err != nil { + return fmt.Errorf("%s | %w", "Highlight", err) + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return fmt.Errorf("%s | %w", "ScriptFields", err) + } + + case "seq_no_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SeqNoPrimaryTerm", err) + } + s.SeqNoPrimaryTerm = &value + case bool: + s.SeqNoPrimaryTerm = &v + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + case "_source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source__field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source_", err) + } + + switch t { + + case "exclude_vectors", "excludes", "includes": + o := NewSourceFilter() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + s.Source_ = o + break source__field + + } + } + if s.Source_ == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + } + + case "stored_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + + s.StoredFields = append(s.StoredFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.StoredFields); err != nil { + return fmt.Errorf("%s | %w", "StoredFields", err) + } + } + + case "track_scores": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TrackScores", err) + } + s.TrackScores = &value + case bool: + s.TrackScores = &v + } + + case "version": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + s.Version = &value + case bool: + s.Version = &v + } + + } + } + return nil +} + +// NewTopHitsAggregation returns a TopHitsAggregation. +func NewTopHitsAggregation() *TopHitsAggregation { + r := &TopHitsAggregation{ + ScriptFields: make(map[string]ScriptField), + } + + return r +} + +type TopHitsAggregationVariant interface { + TopHitsAggregationCaster() *TopHitsAggregation +} + +func (s *TopHitsAggregation) TopHitsAggregationCaster() *TopHitsAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topleftbottomrightgeobounds.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topleftbottomrightgeobounds.go new file mode 100644 index 000000000..b077b9bda --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topleftbottomrightgeobounds.go @@ -0,0 +1,167 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TopLeftBottomRightGeoBounds type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Geo.ts#L161-L164 +type TopLeftBottomRightGeoBounds struct { + BottomRight GeoLocation `json:"bottom_right"` + TopLeft GeoLocation `json:"top_left"` +} + +func (s *TopLeftBottomRightGeoBounds) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bottom_right": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "BottomRight", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + bottomright_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "BottomRight", err) + } + + switch t { + + case "lat", "lon": + o := NewLatLonGeoLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "BottomRight", err) + } + s.BottomRight = o + break bottomright_field + + case "geohash": + o := NewGeoHashLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "BottomRight", err) + } + s.BottomRight = o + break bottomright_field + + } + } + if s.BottomRight == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.BottomRight); err != nil { + return fmt.Errorf("%s | %w", "BottomRight", err) + } + } + + case "top_left": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "TopLeft", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + topleft_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "TopLeft", err) + } + + switch t { + + case "lat", "lon": + o := NewLatLonGeoLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "TopLeft", err) + } + s.TopLeft = o + break topleft_field + + case "geohash": + o := NewGeoHashLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "TopLeft", err) + } + s.TopLeft = o + break topleft_field + + } + } + if s.TopLeft == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.TopLeft); err != nil { + return fmt.Errorf("%s | %w", "TopLeft", err) + } + } + + } + } + return nil +} + +// NewTopLeftBottomRightGeoBounds returns a TopLeftBottomRightGeoBounds. +func NewTopLeftBottomRightGeoBounds() *TopLeftBottomRightGeoBounds { + r := &TopLeftBottomRightGeoBounds{} + + return r +} + +type TopLeftBottomRightGeoBoundsVariant interface { + TopLeftBottomRightGeoBoundsCaster() *TopLeftBottomRightGeoBounds +} + +func (s *TopLeftBottomRightGeoBounds) TopLeftBottomRightGeoBoundsCaster() *TopLeftBottomRightGeoBounds { + return s +} + +func (s *TopLeftBottomRightGeoBounds) GeoBoundsCaster() *GeoBounds { + o := GeoBounds(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topmetrics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topmetrics.go new file mode 100644 index 000000000..e319e0c36 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topmetrics.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TopMetrics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L834-L838 +type TopMetrics struct { + Metrics map[string]FieldValue `json:"metrics"` + Sort []FieldValue `json:"sort"` +} + +// NewTopMetrics returns a TopMetrics. +func NewTopMetrics() *TopMetrics { + r := &TopMetrics{ + Metrics: make(map[string]FieldValue), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topmetricsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topmetricsaggregate.go new file mode 100644 index 000000000..8f0e3348b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topmetricsaggregate.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TopMetricsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L829-L832 +type TopMetricsAggregate struct { + Meta Metadata `json:"meta,omitempty"` + Top []TopMetrics `json:"top"` +} + +func (s *TopMetricsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "top": + if err := dec.Decode(&s.Top); err != nil { + return fmt.Errorf("%s | %w", "Top", err) + } + + } + } + return nil +} + +// NewTopMetricsAggregate returns a TopMetricsAggregate. +func NewTopMetricsAggregate() *TopMetricsAggregate { + r := &TopMetricsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topmetricsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topmetricsaggregation.go new file mode 100644 index 000000000..2d82e122f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topmetricsaggregation.go @@ -0,0 +1,146 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TopMetricsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L431-L448 +type TopMetricsAggregation struct { + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // Metrics The fields of the top document to return. + Metrics []TopMetricsValue `json:"metrics,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` + // Size The number of top documents from which to return metrics. + Size *int `json:"size,omitempty"` + // Sort The sort order of the documents. + Sort []SortCombinations `json:"sort,omitempty"` +} + +func (s *TopMetricsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "metrics": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewTopMetricsValue() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Metrics", err) + } + + s.Metrics = append(s.Metrics, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Metrics); err != nil { + return fmt.Errorf("%s | %w", "Metrics", err) + } + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + } + } + return nil +} + +// NewTopMetricsAggregation returns a TopMetricsAggregation. +func NewTopMetricsAggregation() *TopMetricsAggregation { + r := &TopMetricsAggregation{} + + return r +} + +type TopMetricsAggregationVariant interface { + TopMetricsAggregationCaster() *TopMetricsAggregation +} + +func (s *TopMetricsAggregation) TopMetricsAggregationCaster() *TopMetricsAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topmetricsvalue.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topmetricsvalue.go new file mode 100644 index 000000000..9ea22ed11 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/topmetricsvalue.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TopMetricsValue type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L450-L455 +type TopMetricsValue struct { + // Field A field to return as a metric. + Field string `json:"field"` +} + +func (s *TopMetricsValue) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + } + } + return nil +} + +// NewTopMetricsValue returns a TopMetricsValue. +func NewTopMetricsValue() *TopMetricsValue { + r := &TopMetricsValue{} + + return r +} + +type TopMetricsValueVariant interface { + TopMetricsValueCaster() *TopMetricsValue +} + +func (s *TopMetricsValue) TopMetricsValueCaster() *TopMetricsValue { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/toprightbottomleftgeobounds.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/toprightbottomleftgeobounds.go new file mode 100644 index 000000000..3e2b08e23 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/toprightbottomleftgeobounds.go @@ -0,0 +1,167 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TopRightBottomLeftGeoBounds type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Geo.ts#L166-L169 +type TopRightBottomLeftGeoBounds struct { + BottomLeft GeoLocation `json:"bottom_left"` + TopRight GeoLocation `json:"top_right"` +} + +func (s *TopRightBottomLeftGeoBounds) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bottom_left": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "BottomLeft", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + bottomleft_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "BottomLeft", err) + } + + switch t { + + case "lat", "lon": + o := NewLatLonGeoLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "BottomLeft", err) + } + s.BottomLeft = o + break bottomleft_field + + case "geohash": + o := NewGeoHashLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "BottomLeft", err) + } + s.BottomLeft = o + break bottomleft_field + + } + } + if s.BottomLeft == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.BottomLeft); err != nil { + return fmt.Errorf("%s | %w", "BottomLeft", err) + } + } + + case "top_right": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "TopRight", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + topright_field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "TopRight", err) + } + + switch t { + + case "lat", "lon": + o := NewLatLonGeoLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "TopRight", err) + } + s.TopRight = o + break topright_field + + case "geohash": + o := NewGeoHashLocation() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "TopRight", err) + } + s.TopRight = o + break topright_field + + } + } + if s.TopRight == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.TopRight); err != nil { + return fmt.Errorf("%s | %w", "TopRight", err) + } + } + + } + } + return nil +} + +// NewTopRightBottomLeftGeoBounds returns a TopRightBottomLeftGeoBounds. +func NewTopRightBottomLeftGeoBounds() *TopRightBottomLeftGeoBounds { + r := &TopRightBottomLeftGeoBounds{} + + return r +} + +type TopRightBottomLeftGeoBoundsVariant interface { + TopRightBottomLeftGeoBoundsCaster() *TopRightBottomLeftGeoBounds +} + +func (s *TopRightBottomLeftGeoBounds) TopRightBottomLeftGeoBoundsCaster() *TopRightBottomLeftGeoBounds { + return s +} + +func (s *TopRightBottomLeftGeoBounds) GeoBoundsCaster() *GeoBounds { + o := GeoBounds(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totalfeatureimportance.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totalfeatureimportance.go new file mode 100644 index 000000000..a9faccab8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totalfeatureimportance.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TotalFeatureImportance type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L290-L297 +type TotalFeatureImportance struct { + // Classes If the trained model is a classification model, feature importance statistics + // are gathered per target class value. + Classes []TotalFeatureImportanceClass `json:"classes"` + // FeatureName The feature for which this importance was calculated. + FeatureName string `json:"feature_name"` + // Importance A collection of feature importance statistics related to the training data + // set for this particular feature. + Importance []TotalFeatureImportanceStatistics `json:"importance"` +} + +func (s *TotalFeatureImportance) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classes": + if err := dec.Decode(&s.Classes); err != nil { + return fmt.Errorf("%s | %w", "Classes", err) + } + + case "feature_name": + if err := dec.Decode(&s.FeatureName); err != nil { + return fmt.Errorf("%s | %w", "FeatureName", err) + } + + case "importance": + if err := dec.Decode(&s.Importance); err != nil { + return fmt.Errorf("%s | %w", "Importance", err) + } + + } + } + return nil +} + +// NewTotalFeatureImportance returns a TotalFeatureImportance. +func NewTotalFeatureImportance() *TotalFeatureImportance { + r := &TotalFeatureImportance{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totalfeatureimportanceclass.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totalfeatureimportanceclass.go new file mode 100644 index 000000000..f9230cf14 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totalfeatureimportanceclass.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TotalFeatureImportanceClass type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L299-L304 +type TotalFeatureImportanceClass struct { + // ClassName The target class value. Could be a string, boolean, or number. + ClassName string `json:"class_name"` + // Importance A collection of feature importance statistics related to the training data + // set for this particular feature. + Importance []TotalFeatureImportanceStatistics `json:"importance"` +} + +func (s *TotalFeatureImportanceClass) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + if err := dec.Decode(&s.ClassName); err != nil { + return fmt.Errorf("%s | %w", "ClassName", err) + } + + case "importance": + if err := dec.Decode(&s.Importance); err != nil { + return fmt.Errorf("%s | %w", "Importance", err) + } + + } + } + return nil +} + +// NewTotalFeatureImportanceClass returns a TotalFeatureImportanceClass. +func NewTotalFeatureImportanceClass() *TotalFeatureImportanceClass { + r := &TotalFeatureImportanceClass{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totalfeatureimportancestatistics.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totalfeatureimportancestatistics.go new file mode 100644 index 000000000..dc684af0b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totalfeatureimportancestatistics.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TotalFeatureImportanceStatistics type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L306-L313 +type TotalFeatureImportanceStatistics struct { + // Max The maximum importance value across all the training data for this feature. + Max int `json:"max"` + // MeanMagnitude The average magnitude of this feature across all the training data. This + // value is the average of the absolute values of the importance for this + // feature. + MeanMagnitude Float64 `json:"mean_magnitude"` + // Min The minimum importance value across all the training data for this feature. + Min int `json:"min"` +} + +func (s *TotalFeatureImportanceStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + s.Max = value + case float64: + f := int(v) + s.Max = f + } + + case "mean_magnitude": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MeanMagnitude", err) + } + f := Float64(value) + s.MeanMagnitude = f + case float64: + f := Float64(v) + s.MeanMagnitude = f + } + + case "min": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + s.Min = value + case float64: + f := int(v) + s.Min = f + } + + } + } + return nil +} + +// NewTotalFeatureImportanceStatistics returns a TotalFeatureImportanceStatistics. +func NewTotalFeatureImportanceStatistics() *TotalFeatureImportanceStatistics { + r := &TotalFeatureImportanceStatistics{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totalhits.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totalhits.go new file mode 100644 index 000000000..aa66d5bb1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totalhits.go @@ -0,0 +1,63 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +import ( + "bytes" + "encoding/json" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/totalhitsrelation" +) + +// TotalHits type. +// +// https://github.com/elastic/elasticsearch-specification/blob/18d160a8583deec1bbef274d2c0e563a0cd20e2f/specification/_global/search/_types/hits.ts#L94-L97 +type TotalHits struct { + Relation totalhitsrelation.TotalHitsRelation `json:"relation"` + Value int64 `json:"value"` +} + +// UnmarshalJSON implements Unmarshaler interface, it handles the shortcut for total hits. +func (t *TotalHits) UnmarshalJSON(data []byte) error { + type stub TotalHits + tmp := stub{} + dec := json.NewDecoder(bytes.NewReader(data)) + if _, err := strconv.Atoi(string(data)); err == nil { + err := dec.Decode(&t.Value) + if err != nil { + return err + } + t.Relation = totalhitsrelation.Eq + } else { + err := dec.Decode(&tmp) + if err != nil { + return err + } + *t = TotalHits(tmp) + } + + return nil +} + +// NewTotalHits returns a TotalHits. +func NewTotalHits() *TotalHits { + r := &TotalHits{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totaluserprofiles.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totaluserprofiles.go new file mode 100644 index 000000000..1bfee5293 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/totaluserprofiles.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TotalUserProfiles type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/suggest_user_profiles/Response.ts#L24-L27 +type TotalUserProfiles struct { + Relation string `json:"relation"` + Value int64 `json:"value"` +} + +func (s *TotalUserProfiles) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "relation": + if err := dec.Decode(&s.Relation); err != nil { + return fmt.Errorf("%s | %w", "Relation", err) + } + + case "value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + s.Value = value + case float64: + f := int64(v) + s.Value = f + } + + } + } + return nil +} + +// NewTotalUserProfiles returns a TotalUserProfiles. +func NewTotalUserProfiles() *TotalUserProfiles { + r := &TotalUserProfiles{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trackhits.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trackhits.go new file mode 100644 index 000000000..f999c28db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trackhits.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TrackHits holds the union for the following types: +// +// bool +// int +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/search/_types/hits.ts#L143-L151 +type TrackHits any + +type TrackHitsVariant interface { + TrackHitsCaster() *TrackHits +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodel.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodel.go new file mode 100644 index 000000000..aa3f89d17 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodel.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TrainedModel type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model/types.ts#L60-L72 +type TrainedModel struct { + // Ensemble The definition for an ensemble model + Ensemble *Ensemble `json:"ensemble,omitempty"` + // Tree The definition for a binary decision tree. + Tree *TrainedModelTree `json:"tree,omitempty"` + // TreeNode The definition of a node in a tree. + // There are two major types of nodes: leaf nodes and not-leaf nodes. + // - Leaf nodes only need node_index and leaf_value defined. + // - All other nodes need split_feature, left_child, right_child, threshold, + // decision_type, and default_left defined. + TreeNode *TrainedModelTreeNode `json:"tree_node,omitempty"` +} + +// NewTrainedModel returns a TrainedModel. +func NewTrainedModel() *TrainedModel { + r := &TrainedModel{} + + return r +} + +type TrainedModelVariant interface { + TrainedModelCaster() *TrainedModel +} + +func (s *TrainedModel) TrainedModelCaster() *TrainedModel { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelassignment.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelassignment.go new file mode 100644 index 000000000..6e0a48536 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelassignment.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deploymentassignmentstate" +) + +// TrainedModelAssignment type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L472-L489 +type TrainedModelAssignment struct { + AdaptiveAllocations *AdaptiveAllocationsSettings `json:"adaptive_allocations,omitempty"` + // AssignmentState The overall assignment state. + AssignmentState deploymentassignmentstate.DeploymentAssignmentState `json:"assignment_state"` + MaxAssignedAllocations *int `json:"max_assigned_allocations,omitempty"` + Reason *string `json:"reason,omitempty"` + // RoutingTable The allocation state for each node. + RoutingTable map[string]TrainedModelAssignmentRoutingTable `json:"routing_table"` + // StartTime The timestamp when the deployment started. + StartTime DateTime `json:"start_time"` + TaskParameters TrainedModelAssignmentTaskParameters `json:"task_parameters"` +} + +func (s *TrainedModelAssignment) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adaptive_allocations": + if err := dec.Decode(&s.AdaptiveAllocations); err != nil { + return fmt.Errorf("%s | %w", "AdaptiveAllocations", err) + } + + case "assignment_state": + if err := dec.Decode(&s.AssignmentState); err != nil { + return fmt.Errorf("%s | %w", "AssignmentState", err) + } + + case "max_assigned_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxAssignedAllocations", err) + } + s.MaxAssignedAllocations = &value + case float64: + f := int(v) + s.MaxAssignedAllocations = &f + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "routing_table": + if s.RoutingTable == nil { + s.RoutingTable = make(map[string]TrainedModelAssignmentRoutingTable, 0) + } + if err := dec.Decode(&s.RoutingTable); err != nil { + return fmt.Errorf("%s | %w", "RoutingTable", err) + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + + case "task_parameters": + if err := dec.Decode(&s.TaskParameters); err != nil { + return fmt.Errorf("%s | %w", "TaskParameters", err) + } + + } + } + return nil +} + +// NewTrainedModelAssignment returns a TrainedModelAssignment. +func NewTrainedModelAssignment() *TrainedModelAssignment { + r := &TrainedModelAssignment{ + RoutingTable: make(map[string]TrainedModelAssignmentRoutingTable), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelassignmentroutingstateandreason.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelassignmentroutingstateandreason.go new file mode 100644 index 000000000..765cb3d37 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelassignmentroutingstateandreason.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/routingstate" +) + +// TrainedModelAssignmentRoutingStateAndReason type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L431-L441 +type TrainedModelAssignmentRoutingStateAndReason struct { + // Reason The reason for the current state. It is usually populated only when the + // `routing_state` is `failed`. + Reason *string `json:"reason,omitempty"` + // RoutingState The current routing state. + RoutingState routingstate.RoutingState `json:"routing_state"` +} + +func (s *TrainedModelAssignmentRoutingStateAndReason) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "routing_state": + if err := dec.Decode(&s.RoutingState); err != nil { + return fmt.Errorf("%s | %w", "RoutingState", err) + } + + } + } + return nil +} + +// NewTrainedModelAssignmentRoutingStateAndReason returns a TrainedModelAssignmentRoutingStateAndReason. +func NewTrainedModelAssignmentRoutingStateAndReason() *TrainedModelAssignmentRoutingStateAndReason { + r := &TrainedModelAssignmentRoutingStateAndReason{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelassignmentroutingtable.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelassignmentroutingtable.go new file mode 100644 index 000000000..fb5c9e7b1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelassignmentroutingtable.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/routingstate" +) + +// TrainedModelAssignmentRoutingTable type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L443-L461 +type TrainedModelAssignmentRoutingTable struct { + // CurrentAllocations Current number of allocations. + CurrentAllocations int `json:"current_allocations"` + // Reason The reason for the current state. It is usually populated only when the + // `routing_state` is `failed`. + Reason *string `json:"reason,omitempty"` + // RoutingState The current routing state. + RoutingState routingstate.RoutingState `json:"routing_state"` + // TargetAllocations Target number of allocations. + TargetAllocations int `json:"target_allocations"` +} + +func (s *TrainedModelAssignmentRoutingTable) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CurrentAllocations", err) + } + s.CurrentAllocations = value + case float64: + f := int(v) + s.CurrentAllocations = f + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "routing_state": + if err := dec.Decode(&s.RoutingState); err != nil { + return fmt.Errorf("%s | %w", "RoutingState", err) + } + + case "target_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TargetAllocations", err) + } + s.TargetAllocations = value + case float64: + f := int(v) + s.TargetAllocations = f + } + + } + } + return nil +} + +// NewTrainedModelAssignmentRoutingTable returns a TrainedModelAssignmentRoutingTable. +func NewTrainedModelAssignmentRoutingTable() *TrainedModelAssignmentRoutingTable { + r := &TrainedModelAssignmentRoutingTable{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelassignmenttaskparameters.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelassignmenttaskparameters.go new file mode 100644 index 000000000..534bb20cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelassignmenttaskparameters.go @@ -0,0 +1,165 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainingpriority" +) + +// TrainedModelAssignmentTaskParameters type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L370-L406 +type TrainedModelAssignmentTaskParameters struct { + // CacheSize The size of the trained model cache. + CacheSize ByteSize `json:"cache_size,omitempty"` + // DeploymentId The unique identifier for the trained model deployment. + DeploymentId string `json:"deployment_id"` + // ModelBytes The size of the trained model in bytes. + ModelBytes ByteSize `json:"model_bytes"` + // ModelId The unique identifier for the trained model. + ModelId string `json:"model_id"` + // NumberOfAllocations The total number of allocations this model is assigned across ML nodes. + NumberOfAllocations int `json:"number_of_allocations"` + PerAllocationMemoryBytes ByteSize `json:"per_allocation_memory_bytes"` + PerDeploymentMemoryBytes ByteSize `json:"per_deployment_memory_bytes"` + Priority trainingpriority.TrainingPriority `json:"priority"` + // QueueCapacity Number of inference requests are allowed in the queue at a time. + QueueCapacity int `json:"queue_capacity"` + // ThreadsPerAllocation Number of threads per allocation. + ThreadsPerAllocation int `json:"threads_per_allocation"` +} + +func (s *TrainedModelAssignmentTaskParameters) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_size": + if err := dec.Decode(&s.CacheSize); err != nil { + return fmt.Errorf("%s | %w", "CacheSize", err) + } + + case "deployment_id": + if err := dec.Decode(&s.DeploymentId); err != nil { + return fmt.Errorf("%s | %w", "DeploymentId", err) + } + + case "model_bytes": + if err := dec.Decode(&s.ModelBytes); err != nil { + return fmt.Errorf("%s | %w", "ModelBytes", err) + } + + case "model_id": + if err := dec.Decode(&s.ModelId); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + + case "number_of_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfAllocations", err) + } + s.NumberOfAllocations = value + case float64: + f := int(v) + s.NumberOfAllocations = f + } + + case "per_allocation_memory_bytes": + if err := dec.Decode(&s.PerAllocationMemoryBytes); err != nil { + return fmt.Errorf("%s | %w", "PerAllocationMemoryBytes", err) + } + + case "per_deployment_memory_bytes": + if err := dec.Decode(&s.PerDeploymentMemoryBytes); err != nil { + return fmt.Errorf("%s | %w", "PerDeploymentMemoryBytes", err) + } + + case "priority": + if err := dec.Decode(&s.Priority); err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + + case "queue_capacity": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "QueueCapacity", err) + } + s.QueueCapacity = value + case float64: + f := int(v) + s.QueueCapacity = f + } + + case "threads_per_allocation": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ThreadsPerAllocation", err) + } + s.ThreadsPerAllocation = value + case float64: + f := int(v) + s.ThreadsPerAllocation = f + } + + } + } + return nil +} + +// NewTrainedModelAssignmentTaskParameters returns a TrainedModelAssignmentTaskParameters. +func NewTrainedModelAssignmentTaskParameters() *TrainedModelAssignmentTaskParameters { + r := &TrainedModelAssignmentTaskParameters{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelconfig.go new file mode 100644 index 000000000..5d4c8bb2e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelconfig.go @@ -0,0 +1,282 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainedmodeltype" +) + +// TrainedModelConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L203-L240 +type TrainedModelConfig struct { + CompressedDefinition *string `json:"compressed_definition,omitempty"` + // CreateTime The time when the trained model was created. + CreateTime DateTime `json:"create_time,omitempty"` + // CreatedBy Information on the creator of the trained model. + CreatedBy *string `json:"created_by,omitempty"` + // DefaultFieldMap Any field map described in the inference configuration takes precedence. + DefaultFieldMap map[string]string `json:"default_field_map,omitempty"` + // Description The free-text description of the trained model. + Description *string `json:"description,omitempty"` + // EstimatedHeapMemoryUsageBytes The estimated heap usage in bytes to keep the trained model in memory. + EstimatedHeapMemoryUsageBytes *int `json:"estimated_heap_memory_usage_bytes,omitempty"` + // EstimatedOperations The estimated number of operations to use the trained model. + EstimatedOperations *int `json:"estimated_operations,omitempty"` + // FullyDefined True if the full model definition is present. + FullyDefined *bool `json:"fully_defined,omitempty"` + // InferenceConfig The default configuration for inference. This can be either a regression, + // classification, or one of the many NLP focused configurations. It must match + // the underlying definition.trained_model's target_type. For pre-packaged + // models such as ELSER the config is not required. + InferenceConfig *InferenceConfigCreateContainer `json:"inference_config,omitempty"` + // Input The input field names for the model definition. + Input TrainedModelConfigInput `json:"input"` + // LicenseLevel The license level of the trained model. + LicenseLevel *string `json:"license_level,omitempty"` + Location *TrainedModelLocation `json:"location,omitempty"` + // Metadata An object containing metadata about the trained model. For example, models + // created by data frame analytics contain analysis_config and input objects. + Metadata *TrainedModelConfigMetadata `json:"metadata,omitempty"` + // ModelId Identifier for the trained model. + ModelId string `json:"model_id"` + ModelPackage *ModelPackageConfig `json:"model_package,omitempty"` + ModelSizeBytes ByteSize `json:"model_size_bytes,omitempty"` + // ModelType The model type + ModelType *trainedmodeltype.TrainedModelType `json:"model_type,omitempty"` + PlatformArchitecture *string `json:"platform_architecture,omitempty"` + PrefixStrings *TrainedModelPrefixStrings `json:"prefix_strings,omitempty"` + // Tags A comma delimited string of tags. A trained model can have many tags, or + // none. + Tags []string `json:"tags"` + // Version The Elasticsearch version number in which the trained model was created. + Version *string `json:"version,omitempty"` +} + +func (s *TrainedModelConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compressed_definition": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CompressedDefinition", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CompressedDefinition = &o + + case "create_time": + if err := dec.Decode(&s.CreateTime); err != nil { + return fmt.Errorf("%s | %w", "CreateTime", err) + } + + case "created_by": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CreatedBy", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreatedBy = &o + + case "default_field_map": + if s.DefaultFieldMap == nil { + s.DefaultFieldMap = make(map[string]string, 0) + } + if err := dec.Decode(&s.DefaultFieldMap); err != nil { + return fmt.Errorf("%s | %w", "DefaultFieldMap", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "estimated_heap_memory_usage_bytes": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "EstimatedHeapMemoryUsageBytes", err) + } + s.EstimatedHeapMemoryUsageBytes = &value + case float64: + f := int(v) + s.EstimatedHeapMemoryUsageBytes = &f + } + + case "estimated_operations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "EstimatedOperations", err) + } + s.EstimatedOperations = &value + case float64: + f := int(v) + s.EstimatedOperations = &f + } + + case "fully_defined": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "FullyDefined", err) + } + s.FullyDefined = &value + case bool: + s.FullyDefined = &v + } + + case "inference_config": + if err := dec.Decode(&s.InferenceConfig); err != nil { + return fmt.Errorf("%s | %w", "InferenceConfig", err) + } + + case "input": + if err := dec.Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + case "license_level": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LicenseLevel", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LicenseLevel = &o + + case "location": + if err := dec.Decode(&s.Location); err != nil { + return fmt.Errorf("%s | %w", "Location", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "model_id": + if err := dec.Decode(&s.ModelId); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + + case "model_package": + if err := dec.Decode(&s.ModelPackage); err != nil { + return fmt.Errorf("%s | %w", "ModelPackage", err) + } + + case "model_size_bytes": + if err := dec.Decode(&s.ModelSizeBytes); err != nil { + return fmt.Errorf("%s | %w", "ModelSizeBytes", err) + } + + case "model_type": + if err := dec.Decode(&s.ModelType); err != nil { + return fmt.Errorf("%s | %w", "ModelType", err) + } + + case "platform_architecture": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PlatformArchitecture", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PlatformArchitecture = &o + + case "prefix_strings": + if err := dec.Decode(&s.PrefixStrings); err != nil { + return fmt.Errorf("%s | %w", "PrefixStrings", err) + } + + case "tags": + if err := dec.Decode(&s.Tags); err != nil { + return fmt.Errorf("%s | %w", "Tags", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewTrainedModelConfig returns a TrainedModelConfig. +func NewTrainedModelConfig() *TrainedModelConfig { + r := &TrainedModelConfig{ + DefaultFieldMap: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelconfiginput.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelconfiginput.go new file mode 100644 index 000000000..f87e75aba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelconfiginput.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TrainedModelConfigInput type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L242-L245 +type TrainedModelConfigInput struct { + // FieldNames An array of input field names for the model. + FieldNames []string `json:"field_names"` +} + +// NewTrainedModelConfigInput returns a TrainedModelConfigInput. +func NewTrainedModelConfigInput() *TrainedModelConfigInput { + r := &TrainedModelConfigInput{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelconfigmetadata.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelconfigmetadata.go new file mode 100644 index 000000000..534e412a5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelconfigmetadata.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TrainedModelConfigMetadata type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L247-L255 +type TrainedModelConfigMetadata struct { + // FeatureImportanceBaseline An object that contains the baseline for feature importance values. For + // regression analysis, it is a single value. For classification analysis, there + // is a value for each class. + FeatureImportanceBaseline map[string]string `json:"feature_importance_baseline,omitempty"` + // Hyperparameters List of the available hyperparameters optimized during the + // fine_parameter_tuning phase as well as specified by the user. + Hyperparameters []Hyperparameter `json:"hyperparameters,omitempty"` + ModelAliases []string `json:"model_aliases,omitempty"` + // TotalFeatureImportance An array of the total feature importance for each feature used from the + // training data set. This array of objects is returned if data frame analytics + // trained the model and the request includes total_feature_importance in the + // include request parameter. + TotalFeatureImportance []TotalFeatureImportance `json:"total_feature_importance,omitempty"` +} + +// NewTrainedModelConfigMetadata returns a TrainedModelConfigMetadata. +func NewTrainedModelConfigMetadata() *TrainedModelConfigMetadata { + r := &TrainedModelConfigMetadata{ + FeatureImportanceBaseline: make(map[string]string), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeldeploymentallocationstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeldeploymentallocationstatus.go new file mode 100644 index 000000000..838e1ce94 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeldeploymentallocationstatus.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deploymentallocationstate" +) + +// TrainedModelDeploymentAllocationStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L463-L470 +type TrainedModelDeploymentAllocationStatus struct { + // AllocationCount The current number of nodes where the model is allocated. + AllocationCount int `json:"allocation_count"` + // State The detailed allocation state related to the nodes. + State deploymentallocationstate.DeploymentAllocationState `json:"state"` + // TargetAllocationCount The desired number of nodes for model allocation. + TargetAllocationCount int `json:"target_allocation_count"` +} + +func (s *TrainedModelDeploymentAllocationStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocation_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllocationCount", err) + } + s.AllocationCount = value + case float64: + f := int(v) + s.AllocationCount = f + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + + case "target_allocation_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TargetAllocationCount", err) + } + s.TargetAllocationCount = value + case float64: + f := int(v) + s.TargetAllocationCount = f + } + + } + } + return nil +} + +// NewTrainedModelDeploymentAllocationStatus returns a TrainedModelDeploymentAllocationStatus. +func NewTrainedModelDeploymentAllocationStatus() *TrainedModelDeploymentAllocationStatus { + r := &TrainedModelDeploymentAllocationStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeldeploymentnodesstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeldeploymentnodesstats.go new file mode 100644 index 000000000..432205160 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeldeploymentnodesstats.go @@ -0,0 +1,304 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TrainedModelDeploymentNodesStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L156-L201 +type TrainedModelDeploymentNodesStats struct { + // AverageInferenceTimeMs The average time for each inference call to complete on this node. + AverageInferenceTimeMs Float64 `json:"average_inference_time_ms,omitempty"` + // AverageInferenceTimeMsExcludingCacheHits The average time for each inference call to complete on this node, excluding + // cache + AverageInferenceTimeMsExcludingCacheHits Float64 `json:"average_inference_time_ms_excluding_cache_hits,omitempty"` + AverageInferenceTimeMsLastMinute Float64 `json:"average_inference_time_ms_last_minute,omitempty"` + // ErrorCount The number of errors when evaluating the trained model. + ErrorCount *int `json:"error_count,omitempty"` + InferenceCacheHitCount *int64 `json:"inference_cache_hit_count,omitempty"` + InferenceCacheHitCountLastMinute *int64 `json:"inference_cache_hit_count_last_minute,omitempty"` + // InferenceCount The total number of inference calls made against this node for this model. + InferenceCount *int64 `json:"inference_count,omitempty"` + // LastAccess The epoch time stamp of the last inference call for the model on this node. + LastAccess *int64 `json:"last_access,omitempty"` + // Node Information pertaining to the node. + Node DiscoveryNode `json:"node,omitempty"` + // NumberOfAllocations The number of allocations assigned to this node. + NumberOfAllocations *int `json:"number_of_allocations,omitempty"` + // NumberOfPendingRequests The number of inference requests queued to be processed. + NumberOfPendingRequests *int `json:"number_of_pending_requests,omitempty"` + PeakThroughputPerMinute int64 `json:"peak_throughput_per_minute"` + // RejectedExecutionCount The number of inference requests that were not processed because the queue + // was full. + RejectedExecutionCount *int `json:"rejected_execution_count,omitempty"` + // RoutingState The current routing state and reason for the current routing state for this + // allocation. + RoutingState TrainedModelAssignmentRoutingStateAndReason `json:"routing_state"` + // StartTime The epoch timestamp when the allocation started. + StartTime *int64 `json:"start_time,omitempty"` + // ThreadsPerAllocation The number of threads used by each allocation during inference. + ThreadsPerAllocation *int `json:"threads_per_allocation,omitempty"` + ThroughputLastMinute int `json:"throughput_last_minute"` + // TimeoutCount The number of inference requests that timed out before being processed. + TimeoutCount *int `json:"timeout_count,omitempty"` +} + +func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "average_inference_time_ms": + if err := dec.Decode(&s.AverageInferenceTimeMs); err != nil { + return fmt.Errorf("%s | %w", "AverageInferenceTimeMs", err) + } + + case "average_inference_time_ms_excluding_cache_hits": + if err := dec.Decode(&s.AverageInferenceTimeMsExcludingCacheHits); err != nil { + return fmt.Errorf("%s | %w", "AverageInferenceTimeMsExcludingCacheHits", err) + } + + case "average_inference_time_ms_last_minute": + if err := dec.Decode(&s.AverageInferenceTimeMsLastMinute); err != nil { + return fmt.Errorf("%s | %w", "AverageInferenceTimeMsLastMinute", err) + } + + case "error_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ErrorCount", err) + } + s.ErrorCount = &value + case float64: + f := int(v) + s.ErrorCount = &f + } + + case "inference_cache_hit_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InferenceCacheHitCount", err) + } + s.InferenceCacheHitCount = &value + case float64: + f := int64(v) + s.InferenceCacheHitCount = &f + } + + case "inference_cache_hit_count_last_minute": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InferenceCacheHitCountLastMinute", err) + } + s.InferenceCacheHitCountLastMinute = &value + case float64: + f := int64(v) + s.InferenceCacheHitCountLastMinute = &f + } + + case "inference_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "InferenceCount", err) + } + s.InferenceCount = &value + case float64: + f := int64(v) + s.InferenceCount = &f + } + + case "last_access": + if err := dec.Decode(&s.LastAccess); err != nil { + return fmt.Errorf("%s | %w", "LastAccess", err) + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "number_of_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfAllocations", err) + } + s.NumberOfAllocations = &value + case float64: + f := int(v) + s.NumberOfAllocations = &f + } + + case "number_of_pending_requests": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfPendingRequests", err) + } + s.NumberOfPendingRequests = &value + case float64: + f := int(v) + s.NumberOfPendingRequests = &f + } + + case "peak_throughput_per_minute": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PeakThroughputPerMinute", err) + } + s.PeakThroughputPerMinute = value + case float64: + f := int64(v) + s.PeakThroughputPerMinute = f + } + + case "rejected_execution_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RejectedExecutionCount", err) + } + s.RejectedExecutionCount = &value + case float64: + f := int(v) + s.RejectedExecutionCount = &f + } + + case "routing_state": + if err := dec.Decode(&s.RoutingState); err != nil { + return fmt.Errorf("%s | %w", "RoutingState", err) + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + + case "threads_per_allocation": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ThreadsPerAllocation", err) + } + s.ThreadsPerAllocation = &value + case float64: + f := int(v) + s.ThreadsPerAllocation = &f + } + + case "throughput_last_minute": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ThroughputLastMinute", err) + } + s.ThroughputLastMinute = value + case float64: + f := int(v) + s.ThroughputLastMinute = f + } + + case "timeout_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeoutCount", err) + } + s.TimeoutCount = &value + case float64: + f := int(v) + s.TimeoutCount = &f + } + + } + } + return nil +} + +// NewTrainedModelDeploymentNodesStats returns a TrainedModelDeploymentNodesStats. +func NewTrainedModelDeploymentNodesStats() *TrainedModelDeploymentNodesStats { + r := &TrainedModelDeploymentNodesStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeldeploymentstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeldeploymentstats.go new file mode 100644 index 000000000..4d9495c03 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeldeploymentstats.go @@ -0,0 +1,288 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deploymentassignmentstate" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainingpriority" +) + +// TrainedModelDeploymentStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L62-L107 +type TrainedModelDeploymentStats struct { + AdaptiveAllocations *AdaptiveAllocationsSettings `json:"adaptive_allocations,omitempty"` + // AllocationStatus The detailed allocation status for the deployment. + AllocationStatus *TrainedModelDeploymentAllocationStatus `json:"allocation_status,omitempty"` + CacheSize ByteSize `json:"cache_size,omitempty"` + // DeploymentId The unique identifier for the trained model deployment. + DeploymentId string `json:"deployment_id"` + // ErrorCount The sum of `error_count` for all nodes in the deployment. + ErrorCount *int `json:"error_count,omitempty"` + // InferenceCount The sum of `inference_count` for all nodes in the deployment. + InferenceCount *int `json:"inference_count,omitempty"` + // ModelId The unique identifier for the trained model. + ModelId string `json:"model_id"` + // Nodes The deployment stats for each node that currently has the model allocated. + // In serverless, stats are reported for a single unnamed virtual node. + Nodes []TrainedModelDeploymentNodesStats `json:"nodes"` + // NumberOfAllocations The number of allocations requested. + NumberOfAllocations *int `json:"number_of_allocations,omitempty"` + PeakThroughputPerMinute int64 `json:"peak_throughput_per_minute"` + Priority trainingpriority.TrainingPriority `json:"priority"` + // QueueCapacity The number of inference requests that can be queued before new requests are + // rejected. + QueueCapacity *int `json:"queue_capacity,omitempty"` + // Reason The reason for the current deployment state. Usually only populated when + // the model is not deployed to a node. + Reason *string `json:"reason,omitempty"` + // RejectedExecutionCount The sum of `rejected_execution_count` for all nodes in the deployment. + // Individual nodes reject an inference request if the inference queue is full. + // The queue size is controlled by the `queue_capacity` setting in the start + // trained model deployment API. + RejectedExecutionCount *int `json:"rejected_execution_count,omitempty"` + // StartTime The epoch timestamp when the deployment started. + StartTime int64 `json:"start_time"` + // State The overall state of the deployment. + State *deploymentassignmentstate.DeploymentAssignmentState `json:"state,omitempty"` + // ThreadsPerAllocation The number of threads used be each allocation during inference. + ThreadsPerAllocation *int `json:"threads_per_allocation,omitempty"` + // TimeoutCount The sum of `timeout_count` for all nodes in the deployment. + TimeoutCount *int `json:"timeout_count,omitempty"` +} + +func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adaptive_allocations": + if err := dec.Decode(&s.AdaptiveAllocations); err != nil { + return fmt.Errorf("%s | %w", "AdaptiveAllocations", err) + } + + case "allocation_status": + if err := dec.Decode(&s.AllocationStatus); err != nil { + return fmt.Errorf("%s | %w", "AllocationStatus", err) + } + + case "cache_size": + if err := dec.Decode(&s.CacheSize); err != nil { + return fmt.Errorf("%s | %w", "CacheSize", err) + } + + case "deployment_id": + if err := dec.Decode(&s.DeploymentId); err != nil { + return fmt.Errorf("%s | %w", "DeploymentId", err) + } + + case "error_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ErrorCount", err) + } + s.ErrorCount = &value + case float64: + f := int(v) + s.ErrorCount = &f + } + + case "inference_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "InferenceCount", err) + } + s.InferenceCount = &value + case float64: + f := int(v) + s.InferenceCount = &f + } + + case "model_id": + if err := dec.Decode(&s.ModelId); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + + case "nodes": + if err := dec.Decode(&s.Nodes); err != nil { + return fmt.Errorf("%s | %w", "Nodes", err) + } + + case "number_of_allocations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumberOfAllocations", err) + } + s.NumberOfAllocations = &value + case float64: + f := int(v) + s.NumberOfAllocations = &f + } + + case "peak_throughput_per_minute": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PeakThroughputPerMinute", err) + } + s.PeakThroughputPerMinute = value + case float64: + f := int64(v) + s.PeakThroughputPerMinute = f + } + + case "priority": + if err := dec.Decode(&s.Priority); err != nil { + return fmt.Errorf("%s | %w", "Priority", err) + } + + case "queue_capacity": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "QueueCapacity", err) + } + s.QueueCapacity = &value + case float64: + f := int(v) + s.QueueCapacity = &f + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "rejected_execution_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RejectedExecutionCount", err) + } + s.RejectedExecutionCount = &value + case float64: + f := int(v) + s.RejectedExecutionCount = &f + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return fmt.Errorf("%s | %w", "StartTime", err) + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + + case "threads_per_allocation": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ThreadsPerAllocation", err) + } + s.ThreadsPerAllocation = &value + case float64: + f := int(v) + s.ThreadsPerAllocation = &f + } + + case "timeout_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeoutCount", err) + } + s.TimeoutCount = &value + case float64: + f := int(v) + s.TimeoutCount = &f + } + + } + } + return nil +} + +// NewTrainedModelDeploymentStats returns a TrainedModelDeploymentStats. +func NewTrainedModelDeploymentStats() *TrainedModelDeploymentStats { + r := &TrainedModelDeploymentStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelentities.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelentities.go new file mode 100644 index 000000000..82a3e508c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelentities.go @@ -0,0 +1,140 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TrainedModelEntities type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L452-L458 +type TrainedModelEntities struct { + ClassName string `json:"class_name"` + ClassProbability Float64 `json:"class_probability"` + EndPos int `json:"end_pos"` + Entity string `json:"entity"` + StartPos int `json:"start_pos"` +} + +func (s *TrainedModelEntities) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ClassName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClassName = o + + case "class_probability": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ClassProbability", err) + } + f := Float64(value) + s.ClassProbability = f + case float64: + f := Float64(v) + s.ClassProbability = f + } + + case "end_pos": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "EndPos", err) + } + s.EndPos = value + case float64: + f := int(v) + s.EndPos = f + } + + case "entity": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Entity", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Entity = o + + case "start_pos": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "StartPos", err) + } + s.StartPos = value + case float64: + f := int(v) + s.StartPos = f + } + + } + } + return nil +} + +// NewTrainedModelEntities returns a TrainedModelEntities. +func NewTrainedModelEntities() *TrainedModelEntities { + r := &TrainedModelEntities{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelinferenceclassimportance.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelinferenceclassimportance.go new file mode 100644 index 000000000..1637f0afe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelinferenceclassimportance.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TrainedModelInferenceClassImportance type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L465-L468 +type TrainedModelInferenceClassImportance struct { + ClassName string `json:"class_name"` + Importance Float64 `json:"importance"` +} + +func (s *TrainedModelInferenceClassImportance) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ClassName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClassName = o + + case "importance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Importance", err) + } + f := Float64(value) + s.Importance = f + case float64: + f := Float64(v) + s.Importance = f + } + + } + } + return nil +} + +// NewTrainedModelInferenceClassImportance returns a TrainedModelInferenceClassImportance. +func NewTrainedModelInferenceClassImportance() *TrainedModelInferenceClassImportance { + r := &TrainedModelInferenceClassImportance{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelinferencefeatureimportance.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelinferencefeatureimportance.go new file mode 100644 index 000000000..a2fd6d071 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelinferencefeatureimportance.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TrainedModelInferenceFeatureImportance type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L470-L474 +type TrainedModelInferenceFeatureImportance struct { + Classes []TrainedModelInferenceClassImportance `json:"classes,omitempty"` + FeatureName string `json:"feature_name"` + Importance *Float64 `json:"importance,omitempty"` +} + +func (s *TrainedModelInferenceFeatureImportance) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classes": + if err := dec.Decode(&s.Classes); err != nil { + return fmt.Errorf("%s | %w", "Classes", err) + } + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "FeatureName", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + case "importance": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Importance", err) + } + f := Float64(value) + s.Importance = &f + case float64: + f := Float64(v) + s.Importance = &f + } + + } + } + return nil +} + +// NewTrainedModelInferenceFeatureImportance returns a TrainedModelInferenceFeatureImportance. +func NewTrainedModelInferenceFeatureImportance() *TrainedModelInferenceFeatureImportance { + r := &TrainedModelInferenceFeatureImportance{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelinferencestats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelinferencestats.go new file mode 100644 index 000000000..0208e9a23 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelinferencestats.go @@ -0,0 +1,149 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TrainedModelInferenceStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L127-L147 +type TrainedModelInferenceStats struct { + // CacheMissCount The number of times the model was loaded for inference and was not retrieved + // from the cache. + // If this number is close to the `inference_count`, the cache is not being + // appropriately used. + // This can be solved by increasing the cache size or its time-to-live (TTL). + // Refer to general machine learning settings for the appropriate settings. + CacheMissCount int `json:"cache_miss_count"` + // FailureCount The number of failures when using the model for inference. + FailureCount int `json:"failure_count"` + // InferenceCount The total number of times the model has been called for inference. + // This is across all inference contexts, including all pipelines. + InferenceCount int `json:"inference_count"` + // MissingAllFieldsCount The number of inference calls where all the training features for the model + // were missing. + MissingAllFieldsCount int `json:"missing_all_fields_count"` + // Timestamp The time when the statistics were last updated. + Timestamp int64 `json:"timestamp"` +} + +func (s *TrainedModelInferenceStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_miss_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "CacheMissCount", err) + } + s.CacheMissCount = value + case float64: + f := int(v) + s.CacheMissCount = f + } + + case "failure_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FailureCount", err) + } + s.FailureCount = value + case float64: + f := int(v) + s.FailureCount = f + } + + case "inference_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "InferenceCount", err) + } + s.InferenceCount = value + case float64: + f := int(v) + s.InferenceCount = f + } + + case "missing_all_fields_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MissingAllFieldsCount", err) + } + s.MissingAllFieldsCount = value + case float64: + f := int(v) + s.MissingAllFieldsCount = f + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return fmt.Errorf("%s | %w", "Timestamp", err) + } + + } + } + return nil +} + +// NewTrainedModelInferenceStats returns a TrainedModelInferenceStats. +func NewTrainedModelInferenceStats() *TrainedModelInferenceStats { + r := &TrainedModelInferenceStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodellocation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodellocation.go new file mode 100644 index 000000000..6594740f4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodellocation.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TrainedModelLocation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L491-L493 +type TrainedModelLocation struct { + Index TrainedModelLocationIndex `json:"index"` +} + +// NewTrainedModelLocation returns a TrainedModelLocation. +func NewTrainedModelLocation() *TrainedModelLocation { + r := &TrainedModelLocation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodellocationindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodellocationindex.go new file mode 100644 index 000000000..3a24f363d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodellocationindex.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TrainedModelLocationIndex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L495-L497 +type TrainedModelLocationIndex struct { + Name string `json:"name"` +} + +func (s *TrainedModelLocationIndex) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + } + } + return nil +} + +// NewTrainedModelLocationIndex returns a TrainedModelLocationIndex. +func NewTrainedModelLocationIndex() *TrainedModelLocationIndex { + r := &TrainedModelLocationIndex{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelprefixstrings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelprefixstrings.go new file mode 100644 index 000000000..462602085 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelprefixstrings.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TrainedModelPrefixStrings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L499-L508 +type TrainedModelPrefixStrings struct { + // Ingest String prepended to input at ingest + Ingest *string `json:"ingest,omitempty"` + // Search String prepended to input at search + Search *string `json:"search,omitempty"` +} + +func (s *TrainedModelPrefixStrings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ingest": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Ingest", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ingest = &o + + case "search": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Search", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Search = &o + + } + } + return nil +} + +// NewTrainedModelPrefixStrings returns a TrainedModelPrefixStrings. +func NewTrainedModelPrefixStrings() *TrainedModelPrefixStrings { + r := &TrainedModelPrefixStrings{} + + return r +} + +type TrainedModelPrefixStringsVariant interface { + TrainedModelPrefixStringsCaster() *TrainedModelPrefixStrings +} + +func (s *TrainedModelPrefixStrings) TrainedModelPrefixStringsCaster() *TrainedModelPrefixStrings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelsizestats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelsizestats.go new file mode 100644 index 000000000..94ba3bf55 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelsizestats.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TrainedModelSizeStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L149-L154 +type TrainedModelSizeStats struct { + // ModelSizeBytes The size of the model in bytes. + ModelSizeBytes ByteSize `json:"model_size_bytes"` + // RequiredNativeMemoryBytes The amount of memory required to load the model in bytes. + RequiredNativeMemoryBytes ByteSize `json:"required_native_memory_bytes"` +} + +func (s *TrainedModelSizeStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "model_size_bytes": + if err := dec.Decode(&s.ModelSizeBytes); err != nil { + return fmt.Errorf("%s | %w", "ModelSizeBytes", err) + } + + case "required_native_memory_bytes": + if err := dec.Decode(&s.RequiredNativeMemoryBytes); err != nil { + return fmt.Errorf("%s | %w", "RequiredNativeMemoryBytes", err) + } + + } + } + return nil +} + +// NewTrainedModelSizeStats returns a TrainedModelSizeStats. +func NewTrainedModelSizeStats() *TrainedModelSizeStats { + r := &TrainedModelSizeStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelsrecord.go new file mode 100644 index 000000000..8aae50703 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelsrecord.go @@ -0,0 +1,288 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TrainedModelsRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/ml_trained_models/types.ts#L23-L115 +type TrainedModelsRecord struct { + // CreateTime The time the model was created. + CreateTime DateTime `json:"create_time,omitempty"` + // CreatedBy Information about the creator of the model. + CreatedBy *string `json:"created_by,omitempty"` + // DataFrameAnalysis The analysis used by the data frame to build the model. + DataFrameAnalysis *string `json:"data_frame.analysis,omitempty"` + // DataFrameCreateTime The time the data frame analytics job was created. + DataFrameCreateTime *string `json:"data_frame.create_time,omitempty"` + // DataFrameId The identifier for the data frame analytics job that created the model. + // Only displayed if the job is still available. + DataFrameId *string `json:"data_frame.id,omitempty"` + // DataFrameSourceIndex The source index used to train in the data frame analysis. + DataFrameSourceIndex *string `json:"data_frame.source_index,omitempty"` + // Description A description of the model. + Description *string `json:"description,omitempty"` + // HeapSize The estimated heap size to keep the model in memory. + HeapSize ByteSize `json:"heap_size,omitempty"` + // Id The model identifier. + Id *string `json:"id,omitempty"` + // IngestCount The total number of documents that are processed by the model. + IngestCount *string `json:"ingest.count,omitempty"` + // IngestCurrent The total number of documents that are currently being handled by the model. + IngestCurrent *string `json:"ingest.current,omitempty"` + // IngestFailed The total number of failed ingest attempts with the model. + IngestFailed *string `json:"ingest.failed,omitempty"` + // IngestPipelines The number of pipelines that are referencing the model. + IngestPipelines *string `json:"ingest.pipelines,omitempty"` + // IngestTime The total time spent processing documents with thie model. + IngestTime *string `json:"ingest.time,omitempty"` + // License The license level of the model. + License *string `json:"license,omitempty"` + // Operations The estimated number of operations to use the model. + // This number helps to measure the computational complexity of the model. + Operations *string `json:"operations,omitempty"` + Type *string `json:"type,omitempty"` + // Version The version of Elasticsearch when the model was created. + Version *string `json:"version,omitempty"` +} + +func (s *TrainedModelsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "create_time", "ct": + if err := dec.Decode(&s.CreateTime); err != nil { + return fmt.Errorf("%s | %w", "CreateTime", err) + } + + case "created_by", "c", "createdBy": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CreatedBy", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreatedBy = &o + + case "data_frame.analysis", "dfa", "dataFrameAnalyticsAnalysis": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataFrameAnalysis", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataFrameAnalysis = &o + + case "data_frame.create_time", "dft", "dataFrameAnalyticsTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataFrameCreateTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataFrameCreateTime = &o + + case "data_frame.id", "dfid", "dataFrameAnalytics": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataFrameId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataFrameId = &o + + case "data_frame.source_index", "dfsi", "dataFrameAnalyticsSrcIndex": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DataFrameSourceIndex", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataFrameSourceIndex = &o + + case "description", "d": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "heap_size", "hs", "modelHeapSize": + if err := dec.Decode(&s.HeapSize); err != nil { + return fmt.Errorf("%s | %w", "HeapSize", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "ingest.count", "ic", "ingestCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IngestCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IngestCount = &o + + case "ingest.current", "icurr", "ingestCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IngestCurrent", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IngestCurrent = &o + + case "ingest.failed", "if", "ingestFailed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IngestFailed", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IngestFailed = &o + + case "ingest.pipelines", "ip", "ingestPipelines": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IngestPipelines", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IngestPipelines = &o + + case "ingest.time", "it", "ingestTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IngestTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IngestTime = &o + + case "license", "l": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "License", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.License = &o + + case "operations", "o", "modelOperations": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Operations", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Operations = &o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewTrainedModelsRecord returns a TrainedModelsRecord. +func NewTrainedModelsRecord() *TrainedModelsRecord { + r := &TrainedModelsRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelstats.go new file mode 100644 index 000000000..557a4a98a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodelstats.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TrainedModelStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/TrainedModel.ts#L42-L60 +type TrainedModelStats struct { + // DeploymentStats A collection of deployment stats, which is present when the models are + // deployed. + DeploymentStats *TrainedModelDeploymentStats `json:"deployment_stats,omitempty"` + // InferenceStats A collection of inference stats fields. + InferenceStats *TrainedModelInferenceStats `json:"inference_stats,omitempty"` + // Ingest A collection of ingest stats for the model across all nodes. + // The values are summations of the individual node statistics. + // The format matches the ingest section in the nodes stats API. + Ingest map[string]json.RawMessage `json:"ingest,omitempty"` + // ModelId The unique identifier of the trained model. + ModelId string `json:"model_id"` + // ModelSizeStats A collection of model size stats. + ModelSizeStats TrainedModelSizeStats `json:"model_size_stats"` + // PipelineCount The number of ingest pipelines that currently refer to the model. + PipelineCount int `json:"pipeline_count"` +} + +func (s *TrainedModelStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deployment_stats": + if err := dec.Decode(&s.DeploymentStats); err != nil { + return fmt.Errorf("%s | %w", "DeploymentStats", err) + } + + case "inference_stats": + if err := dec.Decode(&s.InferenceStats); err != nil { + return fmt.Errorf("%s | %w", "InferenceStats", err) + } + + case "ingest": + if s.Ingest == nil { + s.Ingest = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Ingest); err != nil { + return fmt.Errorf("%s | %w", "Ingest", err) + } + + case "model_id": + if err := dec.Decode(&s.ModelId); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + + case "model_size_stats": + if err := dec.Decode(&s.ModelSizeStats); err != nil { + return fmt.Errorf("%s | %w", "ModelSizeStats", err) + } + + case "pipeline_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "PipelineCount", err) + } + s.PipelineCount = value + case float64: + f := int(v) + s.PipelineCount = f + } + + } + } + return nil +} + +// NewTrainedModelStats returns a TrainedModelStats. +func NewTrainedModelStats() *TrainedModelStats { + r := &TrainedModelStats{ + Ingest: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeltree.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeltree.go new file mode 100644 index 000000000..f2a2113ed --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeltree.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TrainedModelTree type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model/types.ts#L74-L79 +type TrainedModelTree struct { + ClassificationLabels []string `json:"classification_labels,omitempty"` + FeatureNames []string `json:"feature_names"` + TargetType *string `json:"target_type,omitempty"` + TreeStructure []TrainedModelTreeNode `json:"tree_structure"` +} + +func (s *TrainedModelTree) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classification_labels": + if err := dec.Decode(&s.ClassificationLabels); err != nil { + return fmt.Errorf("%s | %w", "ClassificationLabels", err) + } + + case "feature_names": + if err := dec.Decode(&s.FeatureNames); err != nil { + return fmt.Errorf("%s | %w", "FeatureNames", err) + } + + case "target_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TargetType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TargetType = &o + + case "tree_structure": + if err := dec.Decode(&s.TreeStructure); err != nil { + return fmt.Errorf("%s | %w", "TreeStructure", err) + } + + } + } + return nil +} + +// NewTrainedModelTree returns a TrainedModelTree. +func NewTrainedModelTree() *TrainedModelTree { + r := &TrainedModelTree{} + + return r +} + +type TrainedModelTreeVariant interface { + TrainedModelTreeCaster() *TrainedModelTree +} + +func (s *TrainedModelTree) TrainedModelTreeCaster() *TrainedModelTree { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeltreenode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeltreenode.go new file mode 100644 index 000000000..d9bd301b4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trainedmodeltreenode.go @@ -0,0 +1,218 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TrainedModelTreeNode type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model/types.ts#L81-L91 +type TrainedModelTreeNode struct { + DecisionType *string `json:"decision_type,omitempty"` + DefaultLeft *bool `json:"default_left,omitempty"` + LeafValue *Float64 `json:"leaf_value,omitempty"` + LeftChild *int `json:"left_child,omitempty"` + NodeIndex int `json:"node_index"` + RightChild *int `json:"right_child,omitempty"` + SplitFeature *int `json:"split_feature,omitempty"` + SplitGain *int `json:"split_gain,omitempty"` + Threshold *Float64 `json:"threshold,omitempty"` +} + +func (s *TrainedModelTreeNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decision_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DecisionType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DecisionType = &o + + case "default_left": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DefaultLeft", err) + } + s.DefaultLeft = &value + case bool: + s.DefaultLeft = &v + } + + case "leaf_value": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LeafValue", err) + } + f := Float64(value) + s.LeafValue = &f + case float64: + f := Float64(v) + s.LeafValue = &f + } + + case "left_child": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "LeftChild", err) + } + s.LeftChild = &value + case float64: + f := int(v) + s.LeftChild = &f + } + + case "node_index": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "NodeIndex", err) + } + s.NodeIndex = value + case float64: + f := int(v) + s.NodeIndex = f + } + + case "right_child": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RightChild", err) + } + s.RightChild = &value + case float64: + f := int(v) + s.RightChild = &f + } + + case "split_feature": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SplitFeature", err) + } + s.SplitFeature = &value + case float64: + f := int(v) + s.SplitFeature = &f + } + + case "split_gain": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SplitGain", err) + } + s.SplitGain = &value + case float64: + f := int(v) + s.SplitGain = &f + } + + case "threshold": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Threshold", err) + } + f := Float64(value) + s.Threshold = &f + case float64: + f := Float64(v) + s.Threshold = &f + } + + } + } + return nil +} + +// NewTrainedModelTreeNode returns a TrainedModelTreeNode. +func NewTrainedModelTreeNode() *TrainedModelTreeNode { + r := &TrainedModelTreeNode{} + + return r +} + +type TrainedModelTreeNodeVariant interface { + TrainedModelTreeNodeCaster() *TrainedModelTreeNode +} + +func (s *TrainedModelTreeNode) TrainedModelTreeNodeCaster() *TrainedModelTreeNode { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformauthorization.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformauthorization.go new file mode 100644 index 000000000..ba6baf11b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformauthorization.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TransformAuthorization type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/Authorization.ts#L59-L71 +type TransformAuthorization struct { + // ApiKey If an API key was used for the most recent update to the transform, its name + // and identifier are listed in the response. + ApiKey *ApiKeyAuthorization `json:"api_key,omitempty"` + // Roles If a user ID was used for the most recent update to the transform, its roles + // at the time of the update are listed in the response. + Roles []string `json:"roles,omitempty"` + // ServiceAccount If a service account was used for the most recent update to the transform, + // the account name is listed in the response. + ServiceAccount *string `json:"service_account,omitempty"` +} + +func (s *TransformAuthorization) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + if err := dec.Decode(&s.ApiKey); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "service_account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ServiceAccount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServiceAccount = &o + + } + } + return nil +} + +// NewTransformAuthorization returns a TransformAuthorization. +func NewTransformAuthorization() *TransformAuthorization { + r := &TransformAuthorization{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformcontainer.go new file mode 100644 index 000000000..c3e422e4b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformcontainer.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// TransformContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Transform.ts#L28-L35 +type TransformContainer struct { + AdditionalTransformContainerProperty map[string]json.RawMessage `json:"-"` + Chain []TransformContainer `json:"chain,omitempty"` + Script *ScriptTransform `json:"script,omitempty"` + Search *SearchTransform `json:"search,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TransformContainer) MarshalJSON() ([]byte, error) { + type opt TransformContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTransformContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTransformContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewTransformContainer returns a TransformContainer. +func NewTransformContainer() *TransformContainer { + r := &TransformContainer{ + AdditionalTransformContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type TransformContainerVariant interface { + TransformContainerCaster() *TransformContainer +} + +func (s *TransformContainer) TransformContainerCaster() *TransformContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformdestination.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformdestination.go new file mode 100644 index 000000000..584ead9a7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformdestination.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TransformDestination type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/_types/Transform.ts#L34-L45 +type TransformDestination struct { + // Index The destination index for the transform. The mappings of the destination + // index are deduced based on the source + // fields when possible. If alternate mappings are required, use the create + // index API prior to starting the + // transform. + Index *string `json:"index,omitempty"` + // Pipeline The unique identifier for an ingest pipeline. + Pipeline *string `json:"pipeline,omitempty"` +} + +func (s *TransformDestination) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "pipeline": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pipeline", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pipeline = &o + + } + } + return nil +} + +// NewTransformDestination returns a TransformDestination. +func NewTransformDestination() *TransformDestination { + r := &TransformDestination{} + + return r +} + +type TransformDestinationVariant interface { + TransformDestinationCaster() *TransformDestination +} + +func (s *TransformDestination) TransformDestinationCaster() *TransformDestination { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformhealthissue.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformhealthissue.go new file mode 100644 index 000000000..9529afd97 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformhealthissue.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TransformHealthIssue type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/get_transform_stats/types.ts#L51-L63 +type TransformHealthIssue struct { + // Count Number of times this issue has occurred since it started + Count int `json:"count"` + // Details Details about the issue + Details *string `json:"details,omitempty"` + FirstOccurenceString DateTime `json:"first_occurence_string,omitempty"` + // FirstOccurrence The timestamp this issue occurred for for the first time + FirstOccurrence *int64 `json:"first_occurrence,omitempty"` + // Issue A description of the issue + Issue string `json:"issue"` + // Type The type of the issue + Type string `json:"type"` +} + +func (s *TransformHealthIssue) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "details": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Details = &o + + case "first_occurence_string": + if err := dec.Decode(&s.FirstOccurenceString); err != nil { + return fmt.Errorf("%s | %w", "FirstOccurenceString", err) + } + + case "first_occurrence": + if err := dec.Decode(&s.FirstOccurrence); err != nil { + return fmt.Errorf("%s | %w", "FirstOccurrence", err) + } + + case "issue": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Issue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Issue = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewTransformHealthIssue returns a TransformHealthIssue. +func NewTransformHealthIssue() *TransformHealthIssue { + r := &TransformHealthIssue{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformindexerstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformindexerstats.go new file mode 100644 index 000000000..1b711ebdd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformindexerstats.go @@ -0,0 +1,287 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TransformIndexerStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/get_transform_stats/types.ts#L73-L91 +type TransformIndexerStats struct { + DeleteTimeInMs *int64 `json:"delete_time_in_ms,omitempty"` + DocumentsDeleted *int64 `json:"documents_deleted,omitempty"` + DocumentsIndexed int64 `json:"documents_indexed"` + DocumentsProcessed int64 `json:"documents_processed"` + ExponentialAvgCheckpointDurationMs Float64 `json:"exponential_avg_checkpoint_duration_ms"` + ExponentialAvgDocumentsIndexed Float64 `json:"exponential_avg_documents_indexed"` + ExponentialAvgDocumentsProcessed Float64 `json:"exponential_avg_documents_processed"` + IndexFailures int64 `json:"index_failures"` + IndexTimeInMs int64 `json:"index_time_in_ms"` + IndexTotal int64 `json:"index_total"` + PagesProcessed int64 `json:"pages_processed"` + ProcessingTimeInMs int64 `json:"processing_time_in_ms"` + ProcessingTotal int64 `json:"processing_total"` + SearchFailures int64 `json:"search_failures"` + SearchTimeInMs int64 `json:"search_time_in_ms"` + SearchTotal int64 `json:"search_total"` + TriggerCount int64 `json:"trigger_count"` +} + +func (s *TransformIndexerStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "delete_time_in_ms": + if err := dec.Decode(&s.DeleteTimeInMs); err != nil { + return fmt.Errorf("%s | %w", "DeleteTimeInMs", err) + } + + case "documents_deleted": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocumentsDeleted", err) + } + s.DocumentsDeleted = &value + case float64: + f := int64(v) + s.DocumentsDeleted = &f + } + + case "documents_indexed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocumentsIndexed", err) + } + s.DocumentsIndexed = value + case float64: + f := int64(v) + s.DocumentsIndexed = f + } + + case "documents_processed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocumentsProcessed", err) + } + s.DocumentsProcessed = value + case float64: + f := int64(v) + s.DocumentsProcessed = f + } + + case "exponential_avg_checkpoint_duration_ms": + if err := dec.Decode(&s.ExponentialAvgCheckpointDurationMs); err != nil { + return fmt.Errorf("%s | %w", "ExponentialAvgCheckpointDurationMs", err) + } + + case "exponential_avg_documents_indexed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ExponentialAvgDocumentsIndexed", err) + } + f := Float64(value) + s.ExponentialAvgDocumentsIndexed = f + case float64: + f := Float64(v) + s.ExponentialAvgDocumentsIndexed = f + } + + case "exponential_avg_documents_processed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ExponentialAvgDocumentsProcessed", err) + } + f := Float64(value) + s.ExponentialAvgDocumentsProcessed = f + case float64: + f := Float64(v) + s.ExponentialAvgDocumentsProcessed = f + } + + case "index_failures": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexFailures", err) + } + s.IndexFailures = value + case float64: + f := int64(v) + s.IndexFailures = f + } + + case "index_time_in_ms": + if err := dec.Decode(&s.IndexTimeInMs); err != nil { + return fmt.Errorf("%s | %w", "IndexTimeInMs", err) + } + + case "index_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IndexTotal", err) + } + s.IndexTotal = value + case float64: + f := int64(v) + s.IndexTotal = f + } + + case "pages_processed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PagesProcessed", err) + } + s.PagesProcessed = value + case float64: + f := int64(v) + s.PagesProcessed = f + } + + case "processing_time_in_ms": + if err := dec.Decode(&s.ProcessingTimeInMs); err != nil { + return fmt.Errorf("%s | %w", "ProcessingTimeInMs", err) + } + + case "processing_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ProcessingTotal", err) + } + s.ProcessingTotal = value + case float64: + f := int64(v) + s.ProcessingTotal = f + } + + case "search_failures": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SearchFailures", err) + } + s.SearchFailures = value + case float64: + f := int64(v) + s.SearchFailures = f + } + + case "search_time_in_ms": + if err := dec.Decode(&s.SearchTimeInMs); err != nil { + return fmt.Errorf("%s | %w", "SearchTimeInMs", err) + } + + case "search_total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SearchTotal", err) + } + s.SearchTotal = value + case float64: + f := int64(v) + s.SearchTotal = f + } + + case "trigger_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TriggerCount", err) + } + s.TriggerCount = value + case float64: + f := int64(v) + s.TriggerCount = f + } + + } + } + return nil +} + +// NewTransformIndexerStats returns a TransformIndexerStats. +func NewTransformIndexerStats() *TransformIndexerStats { + r := &TransformIndexerStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformprogress.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformprogress.go new file mode 100644 index 000000000..2b150a4e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformprogress.go @@ -0,0 +1,144 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TransformProgress type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/get_transform_stats/types.ts#L65-L71 +type TransformProgress struct { + DocsIndexed int64 `json:"docs_indexed"` + DocsProcessed int64 `json:"docs_processed"` + DocsRemaining *int64 `json:"docs_remaining,omitempty"` + PercentComplete *Float64 `json:"percent_complete,omitempty"` + TotalDocs *int64 `json:"total_docs,omitempty"` +} + +func (s *TransformProgress) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "docs_indexed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocsIndexed", err) + } + s.DocsIndexed = value + case float64: + f := int64(v) + s.DocsIndexed = f + } + + case "docs_processed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocsProcessed", err) + } + s.DocsProcessed = value + case float64: + f := int64(v) + s.DocsProcessed = f + } + + case "docs_remaining": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocsRemaining", err) + } + s.DocsRemaining = &value + case float64: + f := int64(v) + s.DocsRemaining = &f + } + + case "percent_complete": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PercentComplete", err) + } + f := Float64(value) + s.PercentComplete = &f + case float64: + f := Float64(v) + s.PercentComplete = &f + } + + case "total_docs": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalDocs", err) + } + s.TotalDocs = &value + case float64: + f := int64(v) + s.TotalDocs = &f + } + + } + } + return nil +} + +// NewTransformProgress returns a TransformProgress. +func NewTransformProgress() *TransformProgress { + r := &TransformProgress{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformsource.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformsource.go new file mode 100644 index 000000000..de13cdbf4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformsource.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TransformSource type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/_types/Transform.ts#L146-L165 +type TransformSource struct { + // Index The source indices for the transform. It can be a single index, an index + // pattern (for example, `"my-index-*""`), an + // array of indices (for example, `["my-index-000001", "my-index-000002"]`), or + // an array of index patterns (for + // example, `["my-index-*", "my-other-index-*"]`. For remote indices use the + // syntax `"remote_name:index_name"`. If + // any indices are in remote clusters then the master node and at least one + // transform node must have the `remote_cluster_client` node role. + Index []string `json:"index"` + // Query A query clause that retrieves a subset of data from the source index. + Query *Query `json:"query,omitempty"` + // RuntimeMappings Definitions of search-time runtime fields that can be used by the transform. + // For search runtime fields all data + // nodes, including remote nodes, must be 7.12 or later. + RuntimeMappings RuntimeFields `json:"runtime_mappings,omitempty"` +} + +func (s *TransformSource) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + s.Index = append(s.Index, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return fmt.Errorf("%s | %w", "RuntimeMappings", err) + } + + } + } + return nil +} + +// NewTransformSource returns a TransformSource. +func NewTransformSource() *TransformSource { + r := &TransformSource{} + + return r +} + +type TransformSourceVariant interface { + TransformSourceCaster() *TransformSource +} + +func (s *TransformSource) TransformSourceCaster() *TransformSource { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformsrecord.go new file mode 100644 index 000000000..9e52e8dd8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformsrecord.go @@ -0,0 +1,535 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TransformsRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cat/transforms/types.ts#L22-L197 +type TransformsRecord struct { + // ChangesLastDetectionTime The timestamp when changes were last detected in the source indices. + ChangesLastDetectionTime *string `json:"changes_last_detection_time,omitempty"` + // Checkpoint The sequence number for the checkpoint. + Checkpoint *string `json:"checkpoint,omitempty"` + // CheckpointDurationTimeExpAvg The exponential moving average of the duration of the checkpoint, in + // milliseconds. + CheckpointDurationTimeExpAvg *string `json:"checkpoint_duration_time_exp_avg,omitempty"` + // CheckpointProgress The progress of the next checkpoint that is currently in progress. + CheckpointProgress *string `json:"checkpoint_progress,omitempty"` + // CreateTime The time the transform was created. + CreateTime *string `json:"create_time,omitempty"` + // DeleteTime The total time spent deleting documents, in milliseconds. + DeleteTime *string `json:"delete_time,omitempty"` + // Description The description of the transform. + Description *string `json:"description,omitempty"` + // DestIndex The destination index for the transform. + DestIndex *string `json:"dest_index,omitempty"` + // DocsPerSecond The number of input documents per second. + DocsPerSecond *string `json:"docs_per_second,omitempty"` + // DocumentsDeleted The number of documents deleted from the destination index due to the + // retention policy for the transform. + DocumentsDeleted *string `json:"documents_deleted,omitempty"` + // DocumentsIndexed The number of documents that have been indexed into the destination index for + // the transform. + DocumentsIndexed *string `json:"documents_indexed,omitempty"` + // DocumentsProcessed The number of documents that have been processed from the source index of the + // transform. + DocumentsProcessed *string `json:"documents_processed,omitempty"` + // Frequency The interval between checks for changes in the source indices when the + // transform is running continuously. + Frequency *string `json:"frequency,omitempty"` + // Id The transform identifier. + Id *string `json:"id,omitempty"` + // IndexFailure The total number of indexing failures. + IndexFailure *string `json:"index_failure,omitempty"` + // IndexTime The total time spent indexing documents, in milliseconds. + IndexTime *string `json:"index_time,omitempty"` + // IndexTotal The total number of index operations done by the transform. + IndexTotal *string `json:"index_total,omitempty"` + // IndexedDocumentsExpAvg The exponential moving average of the number of new documents that have been + // indexed. + IndexedDocumentsExpAvg *string `json:"indexed_documents_exp_avg,omitempty"` + // LastSearchTime The timestamp of the last search in the source indices. + // This field is shown only if the transform is running. + LastSearchTime *string `json:"last_search_time,omitempty"` + // MaxPageSearchSize The initial page size that is used for the composite aggregation for each + // checkpoint. + MaxPageSearchSize *string `json:"max_page_search_size,omitempty"` + // PagesProcessed The number of search or bulk index operations processed. + // Documents are processed in batches instead of individually. + PagesProcessed *string `json:"pages_processed,omitempty"` + // Pipeline The unique identifier for the ingest pipeline. + Pipeline *string `json:"pipeline,omitempty"` + // ProcessedDocumentsExpAvg The exponential moving average of the number of documents that have been + // processed. + ProcessedDocumentsExpAvg *string `json:"processed_documents_exp_avg,omitempty"` + // ProcessingTime The total time spent processing results, in milliseconds. + ProcessingTime *string `json:"processing_time,omitempty"` + // Reason If a transform has a `failed` state, these details describe the reason for + // failure. + Reason *string `json:"reason,omitempty"` + // SearchFailure The total number of search failures. + SearchFailure *string `json:"search_failure,omitempty"` + // SearchTime The total amount of search time, in milliseconds. + SearchTime *string `json:"search_time,omitempty"` + // SearchTotal The total number of search operations on the source index for the transform. + SearchTotal *string `json:"search_total,omitempty"` + // SourceIndex The source indices for the transform. + SourceIndex *string `json:"source_index,omitempty"` + // State The status of the transform. + // Returned values include: + // `aborting`: The transform is aborting. + // `failed: The transform failed. For more information about the failure, check + // the `reason` field. + // `indexing`: The transform is actively processing data and creating new + // documents. + // `started`: The transform is running but not actively indexing data. + // `stopped`: The transform is stopped. + // `stopping`: The transform is stopping. + State *string `json:"state,omitempty"` + // TransformType The type of transform: `batch` or `continuous`. + TransformType *string `json:"transform_type,omitempty"` + // TriggerCount The number of times the transform has been triggered by the scheduler. + // For example, the scheduler triggers the transform indexer to check for + // updates or ingest new data at an interval specified in the `frequency` + // property. + TriggerCount *string `json:"trigger_count,omitempty"` + // Version The version of Elasticsearch that existed on the node when the transform was + // created. + Version *string `json:"version,omitempty"` +} + +func (s *TransformsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "changes_last_detection_time", "cldt": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ChangesLastDetectionTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ChangesLastDetectionTime = &o + + case "checkpoint", "c": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Checkpoint", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Checkpoint = &o + + case "checkpoint_duration_time_exp_avg", "cdtea", "checkpointTimeExpAvg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CheckpointDurationTimeExpAvg", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CheckpointDurationTimeExpAvg = &o + + case "checkpoint_progress", "cp", "checkpointProgress": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CheckpointProgress", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CheckpointProgress = &o + + case "create_time", "ct", "createTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "CreateTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreateTime = &o + + case "delete_time", "dtime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DeleteTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DeleteTime = &o + + case "description", "d": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "dest_index", "di", "destIndex": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DestIndex", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DestIndex = &o + + case "docs_per_second", "dps": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DocsPerSecond", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocsPerSecond = &o + + case "documents_deleted", "docd": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DocumentsDeleted", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocumentsDeleted = &o + + case "documents_indexed", "doci": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DocumentsIndexed", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocumentsIndexed = &o + + case "documents_processed", "docp", "documentsProcessed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "DocumentsProcessed", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocumentsProcessed = &o + + case "frequency", "f": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Frequency", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Frequency = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "index_failure", "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexFailure", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexFailure = &o + + case "index_time", "itime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexTime = &o + + case "index_total", "it": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexTotal = &o + + case "indexed_documents_exp_avg", "idea": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "IndexedDocumentsExpAvg", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexedDocumentsExpAvg = &o + + case "last_search_time", "lst", "lastSearchTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LastSearchTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LastSearchTime = &o + + case "max_page_search_size", "mpsz": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxPageSearchSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxPageSearchSize = &o + + case "pages_processed", "pp": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "PagesProcessed", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PagesProcessed = &o + + case "pipeline", "p": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Pipeline", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pipeline = &o + + case "processed_documents_exp_avg", "pdea": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ProcessedDocumentsExpAvg", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProcessedDocumentsExpAvg = &o + + case "processing_time", "pt": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ProcessingTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProcessingTime = &o + + case "reason", "r": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "search_failure", "sf": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchFailure", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFailure = &o + + case "search_time", "stime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchTime", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchTime = &o + + case "search_total", "st": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SearchTotal", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchTotal = &o + + case "source_index", "si", "sourceIndex": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "SourceIndex", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SourceIndex = &o + + case "state", "s": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = &o + + case "transform_type", "tt": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TransformType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TransformType = &o + + case "trigger_count", "tc": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TriggerCount", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TriggerCount = &o + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewTransformsRecord returns a TransformsRecord. +func NewTransformsRecord() *TransformsRecord { + r := &TransformsRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformstats.go new file mode 100644 index 000000000..2ad58c4ef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformstats.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TransformStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/get_transform_stats/types.ts#L31-L42 +type TransformStats struct { + Checkpointing Checkpointing `json:"checkpointing"` + Health *TransformStatsHealth `json:"health,omitempty"` + Id string `json:"id"` + Node *NodeAttributes `json:"node,omitempty"` + Reason *string `json:"reason,omitempty"` + State string `json:"state"` + Stats TransformIndexerStats `json:"stats"` +} + +func (s *TransformStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "checkpointing": + if err := dec.Decode(&s.Checkpointing); err != nil { + return fmt.Errorf("%s | %w", "Checkpointing", err) + } + + case "health": + if err := dec.Decode(&s.Health); err != nil { + return fmt.Errorf("%s | %w", "Health", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "state": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = o + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return fmt.Errorf("%s | %w", "Stats", err) + } + + } + } + return nil +} + +// NewTransformStats returns a TransformStats. +func NewTransformStats() *TransformStats { + r := &TransformStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformstatshealth.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformstatshealth.go new file mode 100644 index 000000000..b8b93e71e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformstatshealth.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/healthstatus" +) + +// TransformStatsHealth type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/get_transform_stats/types.ts#L44-L49 +type TransformStatsHealth struct { + // Issues If a non-healthy status is returned, contains a list of issues of the + // transform. + Issues []TransformHealthIssue `json:"issues,omitempty"` + Status healthstatus.HealthStatus `json:"status"` +} + +// NewTransformStatsHealth returns a TransformStatsHealth. +func NewTransformStatsHealth() *TransformStatsHealth { + r := &TransformStatsHealth{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformsummary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformsummary.go new file mode 100644 index 000000000..091ed2a67 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transformsummary.go @@ -0,0 +1,172 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TransformSummary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/transform/get_transform/types.ts#L33-L62 +type TransformSummary struct { + // Authorization The security privileges that the transform uses to run its queries. If + // Elastic Stack security features were disabled at the time of the most recent + // update to the transform, this property is omitted. + Authorization *TransformAuthorization `json:"authorization,omitempty"` + // CreateTime The time the transform was created. + CreateTime *int64 `json:"create_time,omitempty"` + CreateTimeString DateTime `json:"create_time_string,omitempty"` + // Description Free text description of the transform. + Description *string `json:"description,omitempty"` + // Dest The destination for the transform. + Dest ReindexDestination `json:"dest"` + Frequency Duration `json:"frequency,omitempty"` + Id string `json:"id"` + Latest *Latest `json:"latest,omitempty"` + Meta_ Metadata `json:"_meta,omitempty"` + // Pivot The pivot method transforms the data by aggregating and grouping it. + Pivot *Pivot `json:"pivot,omitempty"` + RetentionPolicy *RetentionPolicyContainer `json:"retention_policy,omitempty"` + // Settings Defines optional transform settings. + Settings *Settings `json:"settings,omitempty"` + // Source The source of the data for the transform. + Source TransformSource `json:"source"` + // Sync Defines the properties transforms require to run continuously. + Sync *SyncContainer `json:"sync,omitempty"` + // Version The version of Elasticsearch that existed on the node when the transform was + // created. + Version *string `json:"version,omitempty"` +} + +func (s *TransformSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "authorization": + if err := dec.Decode(&s.Authorization); err != nil { + return fmt.Errorf("%s | %w", "Authorization", err) + } + + case "create_time": + if err := dec.Decode(&s.CreateTime); err != nil { + return fmt.Errorf("%s | %w", "CreateTime", err) + } + + case "create_time_string": + if err := dec.Decode(&s.CreateTimeString); err != nil { + return fmt.Errorf("%s | %w", "CreateTimeString", err) + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "dest": + if err := dec.Decode(&s.Dest); err != nil { + return fmt.Errorf("%s | %w", "Dest", err) + } + + case "frequency": + if err := dec.Decode(&s.Frequency); err != nil { + return fmt.Errorf("%s | %w", "Frequency", err) + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return fmt.Errorf("%s | %w", "Id", err) + } + + case "latest": + if err := dec.Decode(&s.Latest); err != nil { + return fmt.Errorf("%s | %w", "Latest", err) + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "pivot": + if err := dec.Decode(&s.Pivot); err != nil { + return fmt.Errorf("%s | %w", "Pivot", err) + } + + case "retention_policy": + if err := dec.Decode(&s.RetentionPolicy); err != nil { + return fmt.Errorf("%s | %w", "RetentionPolicy", err) + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + case "source": + if err := dec.Decode(&s.Source); err != nil { + return fmt.Errorf("%s | %w", "Source", err) + } + + case "sync": + if err := dec.Decode(&s.Sync); err != nil { + return fmt.Errorf("%s | %w", "Sync", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewTransformSummary returns a TransformSummary. +func NewTransformSummary() *TransformSummary { + r := &TransformSummary{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/translog.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/translog.go new file mode 100644 index 000000000..a630c6f9d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/translog.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/translogdurability" +) + +// Translog type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L368-L390 +type Translog struct { + // Durability Whether or not to `fsync` and commit the translog after every index, delete, + // update, or bulk request. + Durability *translogdurability.TranslogDurability `json:"durability,omitempty"` + // FlushThresholdSize The translog stores all operations that are not yet safely persisted in + // Lucene (i.e., are not + // part of a Lucene commit point). Although these operations are available for + // reads, they will need + // to be replayed if the shard was stopped and had to be recovered. This setting + // controls the + // maximum total size of these operations, to prevent recoveries from taking too + // long. Once the + // maximum size has been reached a flush will happen, generating a new Lucene + // commit point. + FlushThresholdSize ByteSize `json:"flush_threshold_size,omitempty"` + Retention *TranslogRetention `json:"retention,omitempty"` + // SyncInterval How often the translog is fsynced to disk and committed, regardless of write + // operations. + // Values less than 100ms are not allowed. + SyncInterval Duration `json:"sync_interval,omitempty"` +} + +func (s *Translog) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "durability": + if err := dec.Decode(&s.Durability); err != nil { + return fmt.Errorf("%s | %w", "Durability", err) + } + + case "flush_threshold_size": + if err := dec.Decode(&s.FlushThresholdSize); err != nil { + return fmt.Errorf("%s | %w", "FlushThresholdSize", err) + } + + case "retention": + if err := dec.Decode(&s.Retention); err != nil { + return fmt.Errorf("%s | %w", "Retention", err) + } + + case "sync_interval": + if err := dec.Decode(&s.SyncInterval); err != nil { + return fmt.Errorf("%s | %w", "SyncInterval", err) + } + + } + } + return nil +} + +// NewTranslog returns a Translog. +func NewTranslog() *Translog { + r := &Translog{} + + return r +} + +type TranslogVariant interface { + TranslogCaster() *Translog +} + +func (s *Translog) TranslogCaster() *Translog { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/translogretention.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/translogretention.go new file mode 100644 index 000000000..0f662c9b4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/translogretention.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TranslogRetention type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/_types/IndexSettings.ts#L409-L428 +type TranslogRetention struct { + // Age This controls the maximum duration for which translog files are kept by each + // shard. Keeping more + // translog files increases the chance of performing an operation based sync + // when recovering replicas. If + // the translog files are not sufficient, replica recovery will fall back to a + // file based sync. This setting + // is ignored, and should not be set, if soft deletes are enabled. Soft deletes + // are enabled by default in + // indices created in Elasticsearch versions 7.0.0 and later. + Age Duration `json:"age,omitempty"` + // Size This controls the total size of translog files to keep for each shard. + // Keeping more translog files increases + // the chance of performing an operation based sync when recovering a replica. + // If the translog files are not + // sufficient, replica recovery will fall back to a file based sync. This + // setting is ignored, and should not be + // set, if soft deletes are enabled. Soft deletes are enabled by default in + // indices created in Elasticsearch + // versions 7.0.0 and later. + Size ByteSize `json:"size,omitempty"` +} + +func (s *TranslogRetention) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "age": + if err := dec.Decode(&s.Age); err != nil { + return fmt.Errorf("%s | %w", "Age", err) + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + } + } + return nil +} + +// NewTranslogRetention returns a TranslogRetention. +func NewTranslogRetention() *TranslogRetention { + r := &TranslogRetention{} + + return r +} + +type TranslogRetentionVariant interface { + TranslogRetentionCaster() *TranslogRetention +} + +func (s *TranslogRetention) TranslogRetentionCaster() *TranslogRetention { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/translogstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/translogstats.go new file mode 100644 index 000000000..bcf3894cf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/translogstats.go @@ -0,0 +1,170 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TranslogStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L427-L435 +type TranslogStats struct { + EarliestLastModifiedAge int64 `json:"earliest_last_modified_age"` + Operations int64 `json:"operations"` + Size *string `json:"size,omitempty"` + SizeInBytes int64 `json:"size_in_bytes"` + UncommittedOperations int `json:"uncommitted_operations"` + UncommittedSize *string `json:"uncommitted_size,omitempty"` + UncommittedSizeInBytes int64 `json:"uncommitted_size_in_bytes"` +} + +func (s *TranslogStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "earliest_last_modified_age": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "EarliestLastModifiedAge", err) + } + s.EarliestLastModifiedAge = value + case float64: + f := int64(v) + s.EarliestLastModifiedAge = f + } + + case "operations": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Operations", err) + } + s.Operations = value + case float64: + f := int64(v) + s.Operations = f + } + + case "size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Size = &o + + case "size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SizeInBytes", err) + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + case "uncommitted_operations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "UncommittedOperations", err) + } + s.UncommittedOperations = value + case float64: + f := int(v) + s.UncommittedOperations = f + } + + case "uncommitted_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "UncommittedSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UncommittedSize = &o + + case "uncommitted_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "UncommittedSizeInBytes", err) + } + s.UncommittedSizeInBytes = value + case float64: + f := int64(v) + s.UncommittedSizeInBytes = f + } + + } + } + return nil +} + +// NewTranslogStats returns a TranslogStats. +func NewTranslogStats() *TranslogStats { + r := &TranslogStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/translogstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/translogstatus.go new file mode 100644 index 000000000..c8d57a4f7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/translogstatus.go @@ -0,0 +1,129 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TranslogStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/recovery/types.ts#L102-L109 +type TranslogStatus struct { + Percent Percentage `json:"percent"` + Recovered int64 `json:"recovered"` + Total int64 `json:"total"` + TotalOnStart int64 `json:"total_on_start"` + TotalTime Duration `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +func (s *TranslogStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "percent": + if err := dec.Decode(&s.Percent); err != nil { + return fmt.Errorf("%s | %w", "Percent", err) + } + + case "recovered": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Recovered", err) + } + s.Recovered = value + case float64: + f := int64(v) + s.Recovered = f + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + case "total_on_start": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalOnStart", err) + } + s.TotalOnStart = value + case float64: + f := int64(v) + s.TotalOnStart = f + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return fmt.Errorf("%s | %w", "TotalTime", err) + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalTimeInMillis", err) + } + + } + } + return nil +} + +// NewTranslogStatus returns a TranslogStatus. +func NewTranslogStatus() *TranslogStatus { + r := &TranslogStatus{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transport.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transport.go new file mode 100644 index 000000000..d7f969082 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transport.go @@ -0,0 +1,221 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Transport type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L1125-L1168 +type Transport struct { + // InboundHandlingTimeHistogram The distribution of the time spent handling each inbound message on a + // transport thread, represented as a histogram. + InboundHandlingTimeHistogram []TransportHistogram `json:"inbound_handling_time_histogram,omitempty"` + // OutboundHandlingTimeHistogram The distribution of the time spent sending each outbound transport message on + // a transport thread, represented as a histogram. + OutboundHandlingTimeHistogram []TransportHistogram `json:"outbound_handling_time_histogram,omitempty"` + // RxCount Total number of RX (receive) packets received by the node during internal + // cluster communication. + RxCount *int64 `json:"rx_count,omitempty"` + // RxSize Size of RX packets received by the node during internal cluster + // communication. + RxSize *string `json:"rx_size,omitempty"` + // RxSizeInBytes Size, in bytes, of RX packets received by the node during internal cluster + // communication. + RxSizeInBytes *int64 `json:"rx_size_in_bytes,omitempty"` + // ServerOpen Current number of inbound TCP connections used for internal communication + // between nodes. + ServerOpen *int `json:"server_open,omitempty"` + // TotalOutboundConnections The cumulative number of outbound transport connections that this node has + // opened since it started. + // Each transport connection may comprise multiple TCP connections but is only + // counted once in this statistic. + // Transport connections are typically long-lived so this statistic should + // remain constant in a stable cluster. + TotalOutboundConnections *int64 `json:"total_outbound_connections,omitempty"` + // TxCount Total number of TX (transmit) packets sent by the node during internal + // cluster communication. + TxCount *int64 `json:"tx_count,omitempty"` + // TxSize Size of TX packets sent by the node during internal cluster communication. + TxSize *string `json:"tx_size,omitempty"` + // TxSizeInBytes Size, in bytes, of TX packets sent by the node during internal cluster + // communication. + TxSizeInBytes *int64 `json:"tx_size_in_bytes,omitempty"` +} + +func (s *Transport) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "inbound_handling_time_histogram": + if err := dec.Decode(&s.InboundHandlingTimeHistogram); err != nil { + return fmt.Errorf("%s | %w", "InboundHandlingTimeHistogram", err) + } + + case "outbound_handling_time_histogram": + if err := dec.Decode(&s.OutboundHandlingTimeHistogram); err != nil { + return fmt.Errorf("%s | %w", "OutboundHandlingTimeHistogram", err) + } + + case "rx_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RxCount", err) + } + s.RxCount = &value + case float64: + f := int64(v) + s.RxCount = &f + } + + case "rx_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RxSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RxSize = &o + + case "rx_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "RxSizeInBytes", err) + } + s.RxSizeInBytes = &value + case float64: + f := int64(v) + s.RxSizeInBytes = &f + } + + case "server_open": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ServerOpen", err) + } + s.ServerOpen = &value + case float64: + f := int(v) + s.ServerOpen = &f + } + + case "total_outbound_connections": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalOutboundConnections", err) + } + s.TotalOutboundConnections = &value + case float64: + f := int64(v) + s.TotalOutboundConnections = &f + } + + case "tx_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TxCount", err) + } + s.TxCount = &value + case float64: + f := int64(v) + s.TxCount = &f + } + + case "tx_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TxSize", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TxSize = &o + + case "tx_size_in_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TxSizeInBytes", err) + } + s.TxSizeInBytes = &value + case float64: + f := int64(v) + s.TxSizeInBytes = &f + } + + } + } + return nil +} + +// NewTransport returns a Transport. +func NewTransport() *Transport { + r := &Transport{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transporthistogram.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transporthistogram.go new file mode 100644 index 000000000..5aa5e540e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/transporthistogram.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TransportHistogram type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/nodes/_types/Stats.ts#L1170-L1184 +type TransportHistogram struct { + // Count The number of times a transport thread took a period of time within the + // bounds of this bucket to handle an inbound message. + Count *int64 `json:"count,omitempty"` + // GeMillis The inclusive lower bound of the bucket in milliseconds. May be omitted on + // the first bucket if this bucket has no lower bound. + GeMillis *int64 `json:"ge_millis,omitempty"` + // LtMillis The exclusive upper bound of the bucket in milliseconds. + // May be omitted on the last bucket if this bucket has no upper bound. + LtMillis *int64 `json:"lt_millis,omitempty"` +} + +func (s *TransportHistogram) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "ge_millis": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "GeMillis", err) + } + s.GeMillis = &value + case float64: + f := int64(v) + s.GeMillis = &f + } + + case "lt_millis": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LtMillis", err) + } + s.LtMillis = &value + case float64: + f := int64(v) + s.LtMillis = &f + } + + } + } + return nil +} + +// NewTransportHistogram returns a TransportHistogram. +func NewTransportHistogram() *TransportHistogram { + r := &TransportHistogram{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/triggercontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/triggercontainer.go new file mode 100644 index 000000000..dcb7241f7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/triggercontainer.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// TriggerContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Trigger.ts#L23-L28 +type TriggerContainer struct { + AdditionalTriggerContainerProperty map[string]json.RawMessage `json:"-"` + Schedule *ScheduleContainer `json:"schedule,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TriggerContainer) MarshalJSON() ([]byte, error) { + type opt TriggerContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTriggerContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTriggerContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewTriggerContainer returns a TriggerContainer. +func NewTriggerContainer() *TriggerContainer { + r := &TriggerContainer{ + AdditionalTriggerContainerProperty: make(map[string]json.RawMessage), + } + + return r +} + +type TriggerContainerVariant interface { + TriggerContainerCaster() *TriggerContainer +} + +func (s *TriggerContainer) TriggerContainerCaster() *TriggerContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/triggereventcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/triggereventcontainer.go new file mode 100644 index 000000000..ac8a2f509 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/triggereventcontainer.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// TriggerEventContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Trigger.ts#L32-L37 +type TriggerEventContainer struct { + AdditionalTriggerEventContainerProperty map[string]json.RawMessage `json:"-"` + Schedule *ScheduleTriggerEvent `json:"schedule,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s TriggerEventContainer) MarshalJSON() ([]byte, error) { + type opt TriggerEventContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalTriggerEventContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalTriggerEventContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewTriggerEventContainer returns a TriggerEventContainer. +func NewTriggerEventContainer() *TriggerEventContainer { + r := &TriggerEventContainer{ + AdditionalTriggerEventContainerProperty: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/triggereventresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/triggereventresult.go new file mode 100644 index 000000000..f9ee7dc69 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/triggereventresult.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TriggerEventResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Trigger.ts#L39-L43 +type TriggerEventResult struct { + Manual TriggerEventContainer `json:"manual"` + TriggeredTime DateTime `json:"triggered_time"` + Type string `json:"type"` +} + +func (s *TriggerEventResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "manual": + if err := dec.Decode(&s.Manual); err != nil { + return fmt.Errorf("%s | %w", "Manual", err) + } + + case "triggered_time": + if err := dec.Decode(&s.TriggeredTime); err != nil { + return fmt.Errorf("%s | %w", "TriggeredTime", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewTriggerEventResult returns a TriggerEventResult. +func NewTriggerEventResult() *TriggerEventResult { + r := &TriggerEventResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trimprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trimprocessor.go new file mode 100644 index 000000000..8fcd14dcb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trimprocessor.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TrimProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1544-L1560 +type TrimProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The string-valued field to trim whitespace from. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the trimmed value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *TrimProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewTrimProcessor returns a TrimProcessor. +func NewTrimProcessor() *TrimProcessor { + r := &TrimProcessor{} + + return r +} + +type TrimProcessorVariant interface { + TrimProcessorCaster() *TrimProcessor +} + +func (s *TrimProcessor) TrimProcessorCaster() *TrimProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trimtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trimtokenfilter.go new file mode 100644 index 000000000..fabae2b59 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/trimtokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// TrimTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L431-L433 +type TrimTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *TrimTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s TrimTokenFilter) MarshalJSON() ([]byte, error) { + type innerTrimTokenFilter TrimTokenFilter + tmp := innerTrimTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "trim" + + return json.Marshal(tmp) +} + +// NewTrimTokenFilter returns a TrimTokenFilter. +func NewTrimTokenFilter() *TrimTokenFilter { + r := &TrimTokenFilter{} + + return r +} + +type TrimTokenFilterVariant interface { + TrimTokenFilterCaster() *TrimTokenFilter +} + +func (s *TrimTokenFilter) TrimTokenFilterCaster() *TrimTokenFilter { + return s +} + +func (s *TrimTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/truncatetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/truncatetokenfilter.go new file mode 100644 index 000000000..a529f1046 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/truncatetokenfilter.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TruncateTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L435-L439 +type TruncateTokenFilter struct { + // Length Character limit for each token. Tokens exceeding this limit are truncated. + // Defaults to `10`. + Length *int `json:"length,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *TruncateTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Length", err) + } + s.Length = &value + case float64: + f := int(v) + s.Length = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s TruncateTokenFilter) MarshalJSON() ([]byte, error) { + type innerTruncateTokenFilter TruncateTokenFilter + tmp := innerTruncateTokenFilter{ + Length: s.Length, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "truncate" + + return json.Marshal(tmp) +} + +// NewTruncateTokenFilter returns a TruncateTokenFilter. +func NewTruncateTokenFilter() *TruncateTokenFilter { + r := &TruncateTokenFilter{} + + return r +} + +type TruncateTokenFilterVariant interface { + TruncateTokenFilterCaster() *TruncateTokenFilter +} + +func (s *TruncateTokenFilter) TruncateTokenFilterCaster() *TruncateTokenFilter { + return s +} + +func (s *TruncateTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ttestaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ttestaggregate.go new file mode 100644 index 000000000..63b49e86e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ttestaggregate.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TTestAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L840-L847 +type TTestAggregate struct { + Meta Metadata `json:"meta,omitempty"` + Value *Float64 `json:"value,omitempty"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *TTestAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + +// NewTTestAggregate returns a TTestAggregate. +func NewTTestAggregate() *TTestAggregate { + r := &TTestAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ttestaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ttestaggregation.go new file mode 100644 index 000000000..055336644 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/ttestaggregation.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ttesttype" +) + +// TTestAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L326-L340 +type TTestAggregation struct { + // A Test population A. + A *TestPopulation `json:"a,omitempty"` + // B Test population B. + B *TestPopulation `json:"b,omitempty"` + // Type The type of test. + Type *ttesttype.TTestType `json:"type,omitempty"` +} + +// NewTTestAggregation returns a TTestAggregation. +func NewTTestAggregation() *TTestAggregation { + r := &TTestAggregation{} + + return r +} + +type TTestAggregationVariant interface { + TTestAggregationCaster() *TTestAggregation +} + +func (s *TTestAggregation) TTestAggregationCaster() *TTestAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/turkishanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/turkishanalyzer.go new file mode 100644 index 000000000..e7dedf0e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/turkishanalyzer.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TurkishAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L310-L315 +type TurkishAnalyzer struct { + StemExclusion []string `json:"stem_exclusion,omitempty"` + Stopwords StopWords `json:"stopwords,omitempty"` + StopwordsPath *string `json:"stopwords_path,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *TurkishAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return fmt.Errorf("%s | %w", "StemExclusion", err) + } + + case "stopwords": + if err := dec.Decode(&s.Stopwords); err != nil { + return fmt.Errorf("%s | %w", "Stopwords", err) + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "StopwordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s TurkishAnalyzer) MarshalJSON() ([]byte, error) { + type innerTurkishAnalyzer TurkishAnalyzer + tmp := innerTurkishAnalyzer{ + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + } + + tmp.Type = "turkish" + + return json.Marshal(tmp) +} + +// NewTurkishAnalyzer returns a TurkishAnalyzer. +func NewTurkishAnalyzer() *TurkishAnalyzer { + r := &TurkishAnalyzer{} + + return r +} + +type TurkishAnalyzerVariant interface { + TurkishAnalyzerCaster() *TurkishAnalyzer +} + +func (s *TurkishAnalyzer) TurkishAnalyzerCaster() *TurkishAnalyzer { + return s +} + +func (s *TurkishAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/typefieldmappings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/typefieldmappings.go new file mode 100644 index 000000000..688c03c51 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/typefieldmappings.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// TypeFieldMappings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/get_field_mapping/types.ts#L24-L26 +type TypeFieldMappings struct { + Mappings map[string]FieldMapping `json:"mappings"` +} + +// NewTypeFieldMappings returns a TypeFieldMappings. +func NewTypeFieldMappings() *TypeFieldMappings { + r := &TypeFieldMappings{ + Mappings: make(map[string]FieldMapping), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/typemapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/typemapping.go new file mode 100644 index 000000000..125cd2235 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/typemapping.go @@ -0,0 +1,541 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/subobjects" +) + +// TypeMapping type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/TypeMapping.ts#L34-L57 +type TypeMapping struct { + AllField *AllField `json:"all_field,omitempty"` + DataStreamTimestamp_ *DataStreamTimestamp `json:"_data_stream_timestamp,omitempty"` + DateDetection *bool `json:"date_detection,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + DynamicDateFormats []string `json:"dynamic_date_formats,omitempty"` + DynamicTemplates []map[string]DynamicTemplate `json:"dynamic_templates,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + FieldNames_ *FieldNamesField `json:"_field_names,omitempty"` + IndexField *IndexField `json:"index_field,omitempty"` + Meta_ Metadata `json:"_meta,omitempty"` + NumericDetection *bool `json:"numeric_detection,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Routing_ *RoutingField `json:"_routing,omitempty"` + Runtime map[string]RuntimeField `json:"runtime,omitempty"` + Size_ *SizeField `json:"_size,omitempty"` + Source_ *SourceField `json:"_source,omitempty"` + Subobjects *subobjects.Subobjects `json:"subobjects,omitempty"` +} + +func (s *TypeMapping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "all_field": + if err := dec.Decode(&s.AllField); err != nil { + return fmt.Errorf("%s | %w", "AllField", err) + } + + case "_data_stream_timestamp": + if err := dec.Decode(&s.DataStreamTimestamp_); err != nil { + return fmt.Errorf("%s | %w", "DataStreamTimestamp_", err) + } + + case "date_detection": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DateDetection", err) + } + s.DateDetection = &value + case bool: + s.DateDetection = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "dynamic_date_formats": + if err := dec.Decode(&s.DynamicDateFormats); err != nil { + return fmt.Errorf("%s | %w", "DynamicDateFormats", err) + } + + case "dynamic_templates": + if err := dec.Decode(&s.DynamicTemplates); err != nil { + return fmt.Errorf("%s | %w", "DynamicTemplates", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "_field_names": + if err := dec.Decode(&s.FieldNames_); err != nil { + return fmt.Errorf("%s | %w", "FieldNames_", err) + } + + case "index_field": + if err := dec.Decode(&s.IndexField); err != nil { + return fmt.Errorf("%s | %w", "IndexField", err) + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return fmt.Errorf("%s | %w", "Meta_", err) + } + + case "numeric_detection": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "NumericDetection", err) + } + s.NumericDetection = &value + case bool: + s.NumericDetection = &v + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "_routing": + if err := dec.Decode(&s.Routing_); err != nil { + return fmt.Errorf("%s | %w", "Routing_", err) + } + + case "runtime": + if s.Runtime == nil { + s.Runtime = make(map[string]RuntimeField, 0) + } + if err := dec.Decode(&s.Runtime); err != nil { + return fmt.Errorf("%s | %w", "Runtime", err) + } + + case "_size": + if err := dec.Decode(&s.Size_); err != nil { + return fmt.Errorf("%s | %w", "Size_", err) + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + + case "subobjects": + if err := dec.Decode(&s.Subobjects); err != nil { + return fmt.Errorf("%s | %w", "Subobjects", err) + } + + } + } + return nil +} + +// NewTypeMapping returns a TypeMapping. +func NewTypeMapping() *TypeMapping { + r := &TypeMapping{ + Properties: make(map[string]Property), + Runtime: make(map[string]RuntimeField), + } + + return r +} + +type TypeMappingVariant interface { + TypeMappingCaster() *TypeMapping +} + +func (s *TypeMapping) TypeMappingCaster() *TypeMapping { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/typequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/typequery.go new file mode 100644 index 000000000..c1dd9a85b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/typequery.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// TypeQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L297-L299 +type TypeQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + Value string `json:"value"` +} + +func (s *TypeQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil +} + +// NewTypeQuery returns a TypeQuery. +func NewTypeQuery() *TypeQuery { + r := &TypeQuery{} + + return r +} + +type TypeQueryVariant interface { + TypeQueryCaster() *TypeQuery +} + +func (s *TypeQuery) TypeQueryCaster() *TypeQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uaxemailurltokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uaxemailurltokenizer.go new file mode 100644 index 000000000..d6fa6ca6d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uaxemailurltokenizer.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UaxEmailUrlTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L132-L135 +type UaxEmailUrlTokenizer struct { + MaxTokenLength *int `json:"max_token_length,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *UaxEmailUrlTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_token_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTokenLength", err) + } + s.MaxTokenLength = &value + case float64: + f := int(v) + s.MaxTokenLength = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s UaxEmailUrlTokenizer) MarshalJSON() ([]byte, error) { + type innerUaxEmailUrlTokenizer UaxEmailUrlTokenizer + tmp := innerUaxEmailUrlTokenizer{ + MaxTokenLength: s.MaxTokenLength, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "uax_url_email" + + return json.Marshal(tmp) +} + +// NewUaxEmailUrlTokenizer returns a UaxEmailUrlTokenizer. +func NewUaxEmailUrlTokenizer() *UaxEmailUrlTokenizer { + r := &UaxEmailUrlTokenizer{} + + return r +} + +type UaxEmailUrlTokenizerVariant interface { + UaxEmailUrlTokenizerCaster() *UaxEmailUrlTokenizer +} + +func (s *UaxEmailUrlTokenizer) UaxEmailUrlTokenizerCaster() *UaxEmailUrlTokenizer { + return s +} + +func (s *UaxEmailUrlTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unassignedinformation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unassignedinformation.go new file mode 100644 index 000000000..e87c91dcc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unassignedinformation.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/unassignedinformationreason" +) + +// UnassignedInformation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/allocation_explain/types.ts#L128-L136 +type UnassignedInformation struct { + AllocationStatus *string `json:"allocation_status,omitempty"` + At DateTime `json:"at"` + Delayed *bool `json:"delayed,omitempty"` + Details *string `json:"details,omitempty"` + FailedAllocationAttempts *int `json:"failed_allocation_attempts,omitempty"` + LastAllocationStatus *string `json:"last_allocation_status,omitempty"` + Reason unassignedinformationreason.UnassignedInformationReason `json:"reason"` +} + +func (s *UnassignedInformation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocation_status": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "AllocationStatus", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AllocationStatus = &o + + case "at": + if err := dec.Decode(&s.At); err != nil { + return fmt.Errorf("%s | %w", "At", err) + } + + case "delayed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Delayed", err) + } + s.Delayed = &value + case bool: + s.Delayed = &v + } + + case "details": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Details", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Details = &o + + case "failed_allocation_attempts": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "FailedAllocationAttempts", err) + } + s.FailedAllocationAttempts = &value + case float64: + f := int(v) + s.FailedAllocationAttempts = &f + } + + case "last_allocation_status": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LastAllocationStatus", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LastAllocationStatus = &o + + case "reason": + if err := dec.Decode(&s.Reason); err != nil { + return fmt.Errorf("%s | %w", "Reason", err) + } + + } + } + return nil +} + +// NewUnassignedInformation returns a UnassignedInformation. +func NewUnassignedInformation() *UnassignedInformation { + r := &UnassignedInformation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uniquetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uniquetokenfilter.go new file mode 100644 index 000000000..0d162b7cf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uniquetokenfilter.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UniqueTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L441-L445 +type UniqueTokenFilter struct { + // OnlyOnSamePosition If `true`, only remove duplicate tokens in the same position. Defaults to + // `false`. + OnlyOnSamePosition *bool `json:"only_on_same_position,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *UniqueTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "only_on_same_position": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "OnlyOnSamePosition", err) + } + s.OnlyOnSamePosition = &value + case bool: + s.OnlyOnSamePosition = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s UniqueTokenFilter) MarshalJSON() ([]byte, error) { + type innerUniqueTokenFilter UniqueTokenFilter + tmp := innerUniqueTokenFilter{ + OnlyOnSamePosition: s.OnlyOnSamePosition, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "unique" + + return json.Marshal(tmp) +} + +// NewUniqueTokenFilter returns a UniqueTokenFilter. +func NewUniqueTokenFilter() *UniqueTokenFilter { + r := &UniqueTokenFilter{} + + return r +} + +type UniqueTokenFilterVariant interface { + UniqueTokenFilterCaster() *UniqueTokenFilter +} + +func (s *UniqueTokenFilter) UniqueTokenFilterCaster() *UniqueTokenFilter { + return s +} + +func (s *UniqueTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unmappedraretermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unmappedraretermsaggregate.go new file mode 100644 index 000000000..6ceb324cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unmappedraretermsaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// UnmappedRareTermsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L493-L499 +type UnmappedRareTermsAggregate struct { + Buckets BucketsVoid `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *UnmappedRareTermsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]any, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []any{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewUnmappedRareTermsAggregate returns a UnmappedRareTermsAggregate. +func NewUnmappedRareTermsAggregate() *UnmappedRareTermsAggregate { + r := &UnmappedRareTermsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unmappedsampleraggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unmappedsampleraggregate.go new file mode 100644 index 000000000..60dee067f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unmappedsampleraggregate.go @@ -0,0 +1,638 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// UnmappedSamplerAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L561-L562 +type UnmappedSamplerAggregate struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *UnmappedSamplerAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s UnmappedSamplerAggregate) MarshalJSON() ([]byte, error) { + type opt UnmappedSamplerAggregate + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewUnmappedSamplerAggregate returns a UnmappedSamplerAggregate. +func NewUnmappedSamplerAggregate() *UnmappedSamplerAggregate { + r := &UnmappedSamplerAggregate{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unmappedsignificanttermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unmappedsignificanttermsaggregate.go new file mode 100644 index 000000000..ca9c92805 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unmappedsignificanttermsaggregate.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UnmappedSignificantTermsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L692-L698 +type UnmappedSignificantTermsAggregate struct { + BgCount *int64 `json:"bg_count,omitempty"` + Buckets BucketsVoid `json:"buckets"` + DocCount *int64 `json:"doc_count,omitempty"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *UnmappedSignificantTermsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bg_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "BgCount", err) + } + s.BgCount = &value + case float64: + f := int64(v) + s.BgCount = &f + } + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]any, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []any{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = &value + case float64: + f := int64(v) + s.DocCount = &f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewUnmappedSignificantTermsAggregate returns a UnmappedSignificantTermsAggregate. +func NewUnmappedSignificantTermsAggregate() *UnmappedSignificantTermsAggregate { + r := &UnmappedSignificantTermsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unmappedtermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unmappedtermsaggregate.go new file mode 100644 index 000000000..7e1178ee5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unmappedtermsaggregate.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UnmappedTermsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L463-L469 +type UnmappedTermsAggregate struct { + Buckets BucketsVoid `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` +} + +func (s *UnmappedTermsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]any, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []any{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "doc_count_error_upper_bound": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCountErrorUpperBound", err) + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "sum_other_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "SumOtherDocCount", err) + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f + } + + } + } + return nil +} + +// NewUnmappedTermsAggregate returns a UnmappedTermsAggregate. +func NewUnmappedTermsAggregate() *UnmappedTermsAggregate { + r := &UnmappedTermsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unrateddocument.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unrateddocument.go new file mode 100644 index 000000000..9e8903c62 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unrateddocument.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// UnratedDocument type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/rank_eval/types.ts#L150-L153 +type UnratedDocument struct { + Id_ string `json:"_id"` + Index_ string `json:"_index"` +} + +func (s *UnratedDocument) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + } + } + return nil +} + +// NewUnratedDocument returns a UnratedDocument. +func NewUnratedDocument() *UnratedDocument { + r := &UnratedDocument{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unsignedlongnumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unsignedlongnumberproperty.go new file mode 100644 index 000000000..2ad058ebe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/unsignedlongnumberproperty.go @@ -0,0 +1,988 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype" +) + +// UnsignedLongNumberProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L192-L195 +type UnsignedLongNumberProperty struct { + Boost *Float64 `json:"boost,omitempty"` + Coerce *bool `json:"coerce,omitempty"` + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + IgnoreMalformed *bool `json:"ignore_malformed,omitempty"` + Index *bool `json:"index,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *uint64 `json:"null_value,omitempty"` + OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Script *Script `json:"script,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + // TimeSeriesDimension For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesDimension *bool `json:"time_series_dimension,omitempty"` + // TimeSeriesMetric For internal use by Elastic only. Marks the field as a time series dimension. + // Defaults to false. + TimeSeriesMetric *timeseriesmetrictype.TimeSeriesMetricType `json:"time_series_metric,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "coerce": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Coerce", err) + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "ignore_malformed": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMalformed", err) + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "index": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + s.Index = &value + case bool: + s.Index = &v + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + if err := dec.Decode(&s.NullValue); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + + case "on_script_error": + if err := dec.Decode(&s.OnScriptError); err != nil { + return fmt.Errorf("%s | %w", "OnScriptError", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "time_series_dimension": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesDimension", err) + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "time_series_metric": + if err := dec.Decode(&s.TimeSeriesMetric); err != nil { + return fmt.Errorf("%s | %w", "TimeSeriesMetric", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s UnsignedLongNumberProperty) MarshalJSON() ([]byte, error) { + type innerUnsignedLongNumberProperty UnsignedLongNumberProperty + tmp := innerUnsignedLongNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "unsigned_long" + + return json.Marshal(tmp) +} + +// NewUnsignedLongNumberProperty returns a UnsignedLongNumberProperty. +func NewUnsignedLongNumberProperty() *UnsignedLongNumberProperty { + r := &UnsignedLongNumberProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type UnsignedLongNumberPropertyVariant interface { + UnsignedLongNumberPropertyCaster() *UnsignedLongNumberProperty +} + +func (s *UnsignedLongNumberProperty) UnsignedLongNumberPropertyCaster() *UnsignedLongNumberProperty { + return s +} + +func (s *UnsignedLongNumberProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/untypeddecayfunction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/untypeddecayfunction.go new file mode 100644 index 000000000..e73dc384a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/untypeddecayfunction.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/multivaluemode" +) + +// UntypedDecayFunction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/compound.ts#L204-L207 +type UntypedDecayFunction struct { + DecayFunctionBase map[string]DecayPlacement `json:"-"` + // MultiValueMode Determines how the distance is calculated when a field used for computing the + // decay contains multiple values. + MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s UntypedDecayFunction) MarshalJSON() ([]byte, error) { + type opt UntypedDecayFunction + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.DecayFunctionBase { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "DecayFunctionBase") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewUntypedDecayFunction returns a UntypedDecayFunction. +func NewUntypedDecayFunction() *UntypedDecayFunction { + r := &UntypedDecayFunction{ + DecayFunctionBase: make(map[string]DecayPlacement), + } + + return r +} + +type UntypedDecayFunctionVariant interface { + UntypedDecayFunctionCaster() *UntypedDecayFunction +} + +func (s *UntypedDecayFunction) UntypedDecayFunctionCaster() *UntypedDecayFunction { + return s +} + +func (s *UntypedDecayFunction) DecayFunctionCaster() *DecayFunction { + o := DecayFunction(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/untypeddistancefeaturequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/untypeddistancefeaturequery.go new file mode 100644 index 000000000..0ab1a293b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/untypeddistancefeaturequery.go @@ -0,0 +1,146 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UntypedDistanceFeatureQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/specialized.ts#L61-L64 +type UntypedDistanceFeatureQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Field Name of the field used to calculate distances. This field must meet the + // following criteria: + // be a `date`, `date_nanos` or `geo_point` field; + // have an `index` mapping parameter value of `true`, which is the default; + // have an `doc_values` mapping parameter value of `true`, which is the default. + Field string `json:"field"` + // Origin Date or point of origin used to calculate distances. + // If the `field` value is a `date` or `date_nanos` field, the `origin` value + // must be a date. + // Date Math, such as `now-1h`, is supported. + // If the field value is a `geo_point` field, the `origin` value must be a + // geopoint. + Origin json.RawMessage `json:"origin,omitempty"` + // Pivot Distance from the `origin` at which relevance scores receive half of the + // `boost` value. + // If the `field` value is a `date` or `date_nanos` field, the `pivot` value + // must be a time unit, such as `1h` or `10d`. If the `field` value is a + // `geo_point` field, the `pivot` value must be a distance unit, such as `1km` + // or `12m`. + Pivot json.RawMessage `json:"pivot,omitempty"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *UntypedDistanceFeatureQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "origin": + if err := dec.Decode(&s.Origin); err != nil { + return fmt.Errorf("%s | %w", "Origin", err) + } + + case "pivot": + if err := dec.Decode(&s.Pivot); err != nil { + return fmt.Errorf("%s | %w", "Pivot", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewUntypedDistanceFeatureQuery returns a UntypedDistanceFeatureQuery. +func NewUntypedDistanceFeatureQuery() *UntypedDistanceFeatureQuery { + r := &UntypedDistanceFeatureQuery{} + + return r +} + +type UntypedDistanceFeatureQueryVariant interface { + UntypedDistanceFeatureQueryCaster() *UntypedDistanceFeatureQuery +} + +func (s *UntypedDistanceFeatureQuery) UntypedDistanceFeatureQueryCaster() *UntypedDistanceFeatureQuery { + return s +} + +func (s *UntypedDistanceFeatureQuery) DistanceFeatureQueryCaster() *DistanceFeatureQuery { + o := DistanceFeatureQuery(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/untypedrangequery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/untypedrangequery.go new file mode 100644 index 000000000..76ed13ed5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/untypedrangequery.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/rangerelation" +) + +// UntypedRangeQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L146-L155 +type UntypedRangeQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Format Date format used to convert `date` values in the query. + Format *string `json:"format,omitempty"` + // Gt Greater than. + Gt json.RawMessage `json:"gt,omitempty"` + // Gte Greater than or equal to. + Gte json.RawMessage `json:"gte,omitempty"` + // Lt Less than. + Lt json.RawMessage `json:"lt,omitempty"` + // Lte Less than or equal to. + Lte json.RawMessage `json:"lte,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Relation Indicates how the range query matches values for `range` fields. + Relation *rangerelation.RangeRelation `json:"relation,omitempty"` + // TimeZone Coordinated Universal Time (UTC) offset or IANA time zone used to convert + // `date` values in the query to UTC. + TimeZone *string `json:"time_zone,omitempty"` +} + +func (s *UntypedRangeQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "format": + if err := dec.Decode(&s.Format); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + + case "gt": + if err := dec.Decode(&s.Gt); err != nil { + return fmt.Errorf("%s | %w", "Gt", err) + } + + case "gte": + if err := dec.Decode(&s.Gte); err != nil { + return fmt.Errorf("%s | %w", "Gte", err) + } + + case "lt": + if err := dec.Decode(&s.Lt); err != nil { + return fmt.Errorf("%s | %w", "Lt", err) + } + + case "lte": + if err := dec.Decode(&s.Lte); err != nil { + return fmt.Errorf("%s | %w", "Lte", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "relation": + if err := dec.Decode(&s.Relation); err != nil { + return fmt.Errorf("%s | %w", "Relation", err) + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return fmt.Errorf("%s | %w", "TimeZone", err) + } + + } + } + return nil +} + +// NewUntypedRangeQuery returns a UntypedRangeQuery. +func NewUntypedRangeQuery() *UntypedRangeQuery { + r := &UntypedRangeQuery{} + + return r +} + +type UntypedRangeQueryVariant interface { + UntypedRangeQueryCaster() *UntypedRangeQuery +} + +func (s *UntypedRangeQuery) UntypedRangeQueryCaster() *UntypedRangeQuery { + return s +} + +func (s *UntypedRangeQuery) RangeQueryCaster() *RangeQuery { + o := RangeQuery(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/updateaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/updateaction.go new file mode 100644 index 000000000..6eab7a6de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/updateaction.go @@ -0,0 +1,183 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UpdateAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/bulk/types.ts#L182-L217 +type UpdateAction struct { + // DetectNoop If true, the `result` in the response is set to 'noop' when no changes to the + // document occur. + DetectNoop *bool `json:"detect_noop,omitempty"` + // Doc A partial update to an existing document. + Doc json.RawMessage `json:"doc,omitempty"` + // DocAsUpsert Set to `true` to use the contents of `doc` as the value of `upsert`. + DocAsUpsert *bool `json:"doc_as_upsert,omitempty"` + // Script The script to run to update the document. + Script *Script `json:"script,omitempty"` + // ScriptedUpsert Set to `true` to run the script whether or not the document exists. + ScriptedUpsert *bool `json:"scripted_upsert,omitempty"` + // Source_ If `false`, source retrieval is turned off. + // You can also specify a comma-separated list of the fields you want to + // retrieve. + Source_ SourceConfig `json:"_source,omitempty"` + // Upsert If the document does not already exist, the contents of `upsert` are inserted + // as a new document. + // If the document exists, the `script` is run. + Upsert json.RawMessage `json:"upsert,omitempty"` +} + +func (s *UpdateAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "detect_noop": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DetectNoop", err) + } + s.DetectNoop = &value + case bool: + s.DetectNoop = &v + } + + case "doc": + if err := dec.Decode(&s.Doc); err != nil { + return fmt.Errorf("%s | %w", "Doc", err) + } + + case "doc_as_upsert": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocAsUpsert", err) + } + s.DocAsUpsert = &value + case bool: + s.DocAsUpsert = &v + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "scripted_upsert": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ScriptedUpsert", err) + } + s.ScriptedUpsert = &value + case bool: + s.ScriptedUpsert = &v + } + + case "_source": + message := json.RawMessage{} + if err := dec.Decode(&message); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + keyDec := json.NewDecoder(bytes.NewReader(message)) + source__field: + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Source_", err) + } + + switch t { + + case "exclude_vectors", "excludes", "includes": + o := NewSourceFilter() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + s.Source_ = o + break source__field + + } + } + if s.Source_ == nil { + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&s.Source_); err != nil { + return fmt.Errorf("%s | %w", "Source_", err) + } + } + + case "upsert": + if err := dec.Decode(&s.Upsert); err != nil { + return fmt.Errorf("%s | %w", "Upsert", err) + } + + } + } + return nil +} + +// NewUpdateAction returns a UpdateAction. +func NewUpdateAction() *UpdateAction { + r := &UpdateAction{} + + return r +} + +type UpdateActionVariant interface { + UpdateActionCaster() *UpdateAction +} + +func (s *UpdateAction) UpdateActionCaster() *UpdateAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/updatebyqueryrethrottlenode.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/updatebyqueryrethrottlenode.go new file mode 100644 index 000000000..682fd7daf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/updatebyqueryrethrottlenode.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noderole" +) + +// UpdateByQueryRethrottleNode type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/update_by_query_rethrottle/UpdateByQueryRethrottleNode.ts#L25-L27 +type UpdateByQueryRethrottleNode struct { + Attributes map[string]string `json:"attributes"` + Host string `json:"host"` + Ip string `json:"ip"` + Name string `json:"name"` + Roles []noderole.NodeRole `json:"roles,omitempty"` + Tasks map[string]TaskInfo `json:"tasks"` + TransportAddress string `json:"transport_address"` +} + +func (s *UpdateByQueryRethrottleNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return fmt.Errorf("%s | %w", "Attributes", err) + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + case "ip": + if err := dec.Decode(&s.Ip); err != nil { + return fmt.Errorf("%s | %w", "Ip", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "tasks": + if s.Tasks == nil { + s.Tasks = make(map[string]TaskInfo, 0) + } + if err := dec.Decode(&s.Tasks); err != nil { + return fmt.Errorf("%s | %w", "Tasks", err) + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return fmt.Errorf("%s | %w", "TransportAddress", err) + } + + } + } + return nil +} + +// NewUpdateByQueryRethrottleNode returns a UpdateByQueryRethrottleNode. +func NewUpdateByQueryRethrottleNode() *UpdateByQueryRethrottleNode { + r := &UpdateByQueryRethrottleNode{ + Attributes: make(map[string]string), + Tasks: make(map[string]TaskInfo), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/updateddatastreamsettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/updateddatastreamsettings.go new file mode 100644 index 000000000..ad6e3f5ef --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/updateddatastreamsettings.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UpdatedDataStreamSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/put_data_stream_settings/IndicesPutDataStreamSettingsResponse.ts#L30-L55 +type UpdatedDataStreamSettings struct { + // AppliedToDataStream If the settings were successfully applied to the data stream (or would have + // been, if running in `dry_run` + // mode), it is `true`. If an error occurred, it is `false`. + AppliedToDataStream bool `json:"applied_to_data_stream"` + // EffectiveSettings The settings that are effective on this data stream, taking into account the + // settings from the matching index + // template and the settings specific to this data stream. + EffectiveSettings IndexSettings `json:"effective_settings"` + // Error A message explaining why the settings could not be applied to the data + // stream. + Error *string `json:"error,omitempty"` + // IndexSettingsResults Information about whether and where each setting was applied. + IndexSettingsResults IndexSettingResults `json:"index_settings_results"` + // Name The data stream name. + Name string `json:"name"` + // Settings The settings that are specfic to this data stream that will override any + // settings from the matching index template. + Settings IndexSettings `json:"settings"` +} + +func (s *UpdatedDataStreamSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "applied_to_data_stream": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AppliedToDataStream", err) + } + s.AppliedToDataStream = value + case bool: + s.AppliedToDataStream = v + } + + case "effective_settings": + if err := dec.Decode(&s.EffectiveSettings); err != nil { + return fmt.Errorf("%s | %w", "EffectiveSettings", err) + } + + case "error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Error", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Error = &o + + case "index_settings_results": + if err := dec.Decode(&s.IndexSettingsResults); err != nil { + return fmt.Errorf("%s | %w", "IndexSettingsResults", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return fmt.Errorf("%s | %w", "Settings", err) + } + + } + } + return nil +} + +// NewUpdatedDataStreamSettings returns a UpdatedDataStreamSettings. +func NewUpdatedDataStreamSettings() *UpdatedDataStreamSettings { + r := &UpdatedDataStreamSettings{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/updateoperation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/updateoperation.go new file mode 100644 index 000000000..de6661a1e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/updateoperation.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype" +) + +// UpdateOperation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_global/bulk/types.ts#L146-L156 +type UpdateOperation struct { + // Id_ The document ID. + Id_ *string `json:"_id,omitempty"` + IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` + IfSeqNo *int64 `json:"if_seq_no,omitempty"` + // Index_ The name of the index or index alias to perform the action on. + Index_ *string `json:"_index,omitempty"` + // RequireAlias If `true`, the request's actions must target an index alias. + RequireAlias *bool `json:"require_alias,omitempty"` + // RetryOnConflict The number of times an update should be retried in the case of a version + // conflict. + RetryOnConflict *int `json:"retry_on_conflict,omitempty"` + // Routing A custom value used to route operations to a specific shard. + Routing *string `json:"routing,omitempty"` + Version *int64 `json:"version,omitempty"` + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *UpdateOperation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "if_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "IfPrimaryTerm", err) + } + s.IfPrimaryTerm = &value + case float64: + f := int64(v) + s.IfPrimaryTerm = &f + } + + case "if_seq_no": + if err := dec.Decode(&s.IfSeqNo); err != nil { + return fmt.Errorf("%s | %w", "IfSeqNo", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "require_alias": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RequireAlias", err) + } + s.RequireAlias = &value + case bool: + s.RequireAlias = &v + } + + case "retry_on_conflict": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "RetryOnConflict", err) + } + s.RetryOnConflict = &value + case float64: + f := int(v) + s.RetryOnConflict = &f + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return fmt.Errorf("%s | %w", "VersionType", err) + } + + } + } + return nil +} + +// NewUpdateOperation returns a UpdateOperation. +func NewUpdateOperation() *UpdateOperation { + r := &UpdateOperation{} + + return r +} + +type UpdateOperationVariant interface { + UpdateOperationCaster() *UpdateOperation +} + +func (s *UpdateOperation) UpdateOperationCaster() *UpdateOperation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uppercaseprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uppercaseprocessor.go new file mode 100644 index 000000000..f613b69fb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uppercaseprocessor.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UppercaseProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1562-L1578 +type UppercaseProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to make uppercase. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the converted value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *UppercaseProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewUppercaseProcessor returns a UppercaseProcessor. +func NewUppercaseProcessor() *UppercaseProcessor { + r := &UppercaseProcessor{} + + return r +} + +type UppercaseProcessorVariant interface { + UppercaseProcessorCaster() *UppercaseProcessor +} + +func (s *UppercaseProcessor) UppercaseProcessorCaster() *UppercaseProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uppercasetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uppercasetokenfilter.go new file mode 100644 index 000000000..155c763bf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uppercasetokenfilter.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// UppercaseTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L447-L449 +type UppercaseTokenFilter struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *UppercaseTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s UppercaseTokenFilter) MarshalJSON() ([]byte, error) { + type innerUppercaseTokenFilter UppercaseTokenFilter + tmp := innerUppercaseTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "uppercase" + + return json.Marshal(tmp) +} + +// NewUppercaseTokenFilter returns a UppercaseTokenFilter. +func NewUppercaseTokenFilter() *UppercaseTokenFilter { + r := &UppercaseTokenFilter{} + + return r +} + +type UppercaseTokenFilterVariant interface { + UppercaseTokenFilterCaster() *UppercaseTokenFilter +} + +func (s *UppercaseTokenFilter) UppercaseTokenFilterCaster() *UppercaseTokenFilter { + return s +} + +func (s *UppercaseTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uripartsprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uripartsprocessor.go new file mode 100644 index 000000000..87e7c5456 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/uripartsprocessor.go @@ -0,0 +1,196 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UriPartsProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1598-L1624 +type UriPartsProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field Field containing the URI string. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // KeepOriginal If `true`, the processor copies the unparsed URI to + // `.original`. + KeepOriginal *bool `json:"keep_original,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // RemoveIfSuccessful If `true`, the processor removes the `field` after parsing the URI string. + // If parsing fails, the processor does not remove the `field`. + RemoveIfSuccessful *bool `json:"remove_if_successful,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField Output field for the URI object. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *UriPartsProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "keep_original": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "KeepOriginal", err) + } + s.KeepOriginal = &value + case bool: + s.KeepOriginal = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "remove_if_successful": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "RemoveIfSuccessful", err) + } + s.RemoveIfSuccessful = &value + case bool: + s.RemoveIfSuccessful = &v + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewUriPartsProcessor returns a UriPartsProcessor. +func NewUriPartsProcessor() *UriPartsProcessor { + r := &UriPartsProcessor{} + + return r +} + +type UriPartsProcessorVariant interface { + UriPartsProcessorCaster() *UriPartsProcessor +} + +func (s *UriPartsProcessor) UriPartsProcessorCaster() *UriPartsProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/urldecodeprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/urldecodeprocessor.go new file mode 100644 index 000000000..04fb4294c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/urldecodeprocessor.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UrlDecodeProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L1580-L1596 +type UrlDecodeProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to decode. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the converted value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *UrlDecodeProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewUrlDecodeProcessor returns a UrlDecodeProcessor. +func NewUrlDecodeProcessor() *UrlDecodeProcessor { + r := &UrlDecodeProcessor{} + + return r +} + +type UrlDecodeProcessorVariant interface { + UrlDecodeProcessorCaster() *UrlDecodeProcessor +} + +func (s *UrlDecodeProcessor) UrlDecodeProcessorCaster() *UrlDecodeProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/usagephase.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/usagephase.go new file mode 100644 index 000000000..325ba8732 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/usagephase.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// UsagePhase type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L154-L157 +type UsagePhase struct { + Actions []string `json:"actions"` + MinAge int64 `json:"min_age"` +} + +func (s *UsagePhase) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return fmt.Errorf("%s | %w", "Actions", err) + } + + case "min_age": + if err := dec.Decode(&s.MinAge); err != nil { + return fmt.Errorf("%s | %w", "MinAge", err) + } + + } + } + return nil +} + +// NewUsagePhase returns a UsagePhase. +func NewUsagePhase() *UsagePhase { + r := &UsagePhase{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/usagephases.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/usagephases.go new file mode 100644 index 000000000..2e8a9cc21 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/usagephases.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// UsagePhases type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L159-L165 +type UsagePhases struct { + Cold *UsagePhase `json:"cold,omitempty"` + Delete *UsagePhase `json:"delete,omitempty"` + Frozen *UsagePhase `json:"frozen,omitempty"` + Hot *UsagePhase `json:"hot,omitempty"` + Warm *UsagePhase `json:"warm,omitempty"` +} + +// NewUsagePhases returns a UsagePhases. +func NewUsagePhases() *UsagePhases { + r := &UsagePhases{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/usagestatsindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/usagestatsindex.go new file mode 100644 index 000000000..820ab35cd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/usagestatsindex.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// UsageStatsIndex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L42-L44 +type UsageStatsIndex struct { + Shards []UsageStatsShards `json:"shards"` +} + +// NewUsageStatsIndex returns a UsageStatsIndex. +func NewUsageStatsIndex() *UsageStatsIndex { + r := &UsageStatsIndex{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/usagestatsshards.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/usagestatsshards.go new file mode 100644 index 000000000..442817644 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/usagestatsshards.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UsageStatsShards type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L46-L51 +type UsageStatsShards struct { + Routing ShardRouting `json:"routing"` + Stats IndicesShardsStats `json:"stats"` + TrackingId string `json:"tracking_id"` + TrackingStartedAtMillis int64 `json:"tracking_started_at_millis"` +} + +func (s *UsageStatsShards) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return fmt.Errorf("%s | %w", "Routing", err) + } + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return fmt.Errorf("%s | %w", "Stats", err) + } + + case "tracking_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TrackingId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TrackingId = o + + case "tracking_started_at_millis": + if err := dec.Decode(&s.TrackingStartedAtMillis); err != nil { + return fmt.Errorf("%s | %w", "TrackingStartedAtMillis", err) + } + + } + } + return nil +} + +// NewUsageStatsShards returns a UsageStatsShards. +func NewUsageStatsShards() *UsageStatsShards { + r := &UsageStatsShards{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/user.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/user.go new file mode 100644 index 000000000..ef6a8dec2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/user.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// User type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/User.ts#L23-L31 +type User struct { + Email *string `json:"email,omitempty"` + Enabled bool `json:"enabled"` + FullName *string `json:"full_name,omitempty"` + Metadata Metadata `json:"metadata"` + ProfileUid *string `json:"profile_uid,omitempty"` + Roles []string `json:"roles"` + Username string `json:"username"` +} + +func (s *User) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "email": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Email", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Email = &o + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "full_name": + if err := dec.Decode(&s.FullName); err != nil { + return fmt.Errorf("%s | %w", "FullName", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "profile_uid": + if err := dec.Decode(&s.ProfileUid); err != nil { + return fmt.Errorf("%s | %w", "ProfileUid", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + + } + } + return nil +} + +// NewUser returns a User. +func NewUser() *User { + r := &User{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/useragentprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/useragentprocessor.go new file mode 100644 index 000000000..d3f6a9844 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/useragentprocessor.go @@ -0,0 +1,205 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/useragentproperty" +) + +// UserAgentProcessor type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Processors.ts#L515-L546 +type UserAgentProcessor struct { + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // ExtractDeviceType Extracts device type from the user agent string on a best-effort basis. + ExtractDeviceType *bool `json:"extract_device_type,omitempty"` + // Field The field containing the user agent string. + Field string `json:"field"` + // If Conditionally execute the processor. + If *Script `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Properties Controls what properties are added to `target_field`. + Properties []useragentproperty.UserAgentProperty `json:"properties,omitempty"` + // RegexFile The name of the file in the `config/ingest-user-agent` directory containing + // the regular expressions for parsing the user agent string. Both the directory + // and the file have to be created before starting Elasticsearch. If not + // specified, ingest-user-agent will use the `regexes.yaml` from uap-core it + // ships with. + RegexFile *string `json:"regex_file,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field that will be filled with the user agent details. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *UserAgentProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "extract_device_type": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ExtractDeviceType", err) + } + s.ExtractDeviceType = &value + case bool: + s.ExtractDeviceType = &v + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "if": + if err := dec.Decode(&s.If); err != nil { + return fmt.Errorf("%s | %w", "If", err) + } + + case "ignore_failure": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreFailure", err) + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreMissing", err) + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return fmt.Errorf("%s | %w", "OnFailure", err) + } + + case "properties": + if err := dec.Decode(&s.Properties); err != nil { + return fmt.Errorf("%s | %w", "Properties", err) + } + + case "regex_file": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "RegexFile", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RegexFile = &o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Tag", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return fmt.Errorf("%s | %w", "TargetField", err) + } + + } + } + return nil +} + +// NewUserAgentProcessor returns a UserAgentProcessor. +func NewUserAgentProcessor() *UserAgentProcessor { + r := &UserAgentProcessor{} + + return r +} + +type UserAgentProcessorVariant interface { + UserAgentProcessorCaster() *UserAgentProcessor +} + +func (s *UserAgentProcessor) UserAgentProcessorCaster() *UserAgentProcessor { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userindicesprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userindicesprivileges.go new file mode 100644 index 000000000..9ac51970b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userindicesprivileges.go @@ -0,0 +1,172 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexprivilege" +) + +// UserIndicesPrivileges type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/Privileges.ts#L294-L316 +type UserIndicesPrivileges struct { + // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that + // cover restricted indices. Implicitly, restricted indices have limited + // privileges that can cause pattern tests to fail. If restricted indices are + // explicitly included in the `names` list, Elasticsearch checks privileges + // against these indices regardless of the value set for + // `allow_restricted_indices`. + AllowRestrictedIndices bool `json:"allow_restricted_indices"` + // FieldSecurity The document fields that the owners of the role have read access to. + FieldSecurity []FieldSecurity `json:"field_security,omitempty"` + // Names A list of indices (or index name patterns) to which the permissions in this + // entry apply. + Names []string `json:"names"` + // Privileges The index level privileges that owners of the role have on the specified + // indices. + Privileges []indexprivilege.IndexPrivilege `json:"privileges"` + // Query Search queries that define the documents the user has access to. A document + // within the specified indices must match these queries for it to be accessible + // by the owners of the role. + Query []IndicesPrivilegesQuery `json:"query,omitempty"` +} + +func (s *UserIndicesPrivileges) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AllowRestrictedIndices", err) + } + s.AllowRestrictedIndices = value + case bool: + s.AllowRestrictedIndices = v + } + + case "field_security": + if err := dec.Decode(&s.FieldSecurity); err != nil { + return fmt.Errorf("%s | %w", "FieldSecurity", err) + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return fmt.Errorf("%s | %w", "Names", err) + } + } + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return fmt.Errorf("%s | %w", "Privileges", err) + } + + case "query": + messageArray := []json.RawMessage{} + if err := dec.Decode(&messageArray); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + query_field: + for _, message := range messageArray { + keyDec := json.NewDecoder(bytes.NewReader(message)) + for { + t, err := keyDec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("%s | %w", "Query", err) + } + + switch t { + + case "AdditionalQueryProperty", "bool", "boosting", "combined_fields", "common", "constant_score", "dis_max", "distance_feature", "exists", "function_score", "fuzzy", "geo_bounding_box", "geo_distance", "geo_grid", "geo_polygon", "geo_shape", "has_child", "has_parent", "ids", "intervals", "knn", "match", "match_all", "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", "more_like_this", "multi_match", "nested", "parent_id", "percolate", "pinned", "prefix", "query_string", "range", "rank_feature", "regexp", "rule", "script", "script_score", "semantic", "shape", "simple_query_string", "span_containing", "span_field_masking", "span_first", "span_multi", "span_near", "span_not", "span_or", "span_term", "span_within", "sparse_vector", "term", "terms", "terms_set", "text_expansion", "type", "weighted_tokens", "wildcard", "wrapper": + o := NewQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = append(s.Query, o) + continue query_field + + case "template": + o := NewRoleTemplateQuery() + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = append(s.Query, o) + continue query_field + + } + } + + var o any + localDec := json.NewDecoder(bytes.NewReader(message)) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + s.Query = append(s.Query, o) + } + + } + } + return nil +} + +// NewUserIndicesPrivileges returns a UserIndicesPrivileges. +func NewUserIndicesPrivileges() *UserIndicesPrivileges { + r := &UserIndicesPrivileges{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userprofile.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userprofile.go new file mode 100644 index 000000000..f30b91503 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userprofile.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UserProfile type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/UserProfile.ts#L41-L47 +type UserProfile struct { + Data map[string]json.RawMessage `json:"data"` + Enabled *bool `json:"enabled,omitempty"` + Labels map[string]json.RawMessage `json:"labels"` + Uid string `json:"uid"` + User UserProfileUser `json:"user"` +} + +func (s *UserProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data": + if s.Data == nil { + s.Data = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Data); err != nil { + return fmt.Errorf("%s | %w", "Data", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "labels": + if s.Labels == nil { + s.Labels = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Labels); err != nil { + return fmt.Errorf("%s | %w", "Labels", err) + } + + case "uid": + if err := dec.Decode(&s.Uid); err != nil { + return fmt.Errorf("%s | %w", "Uid", err) + } + + case "user": + if err := dec.Decode(&s.User); err != nil { + return fmt.Errorf("%s | %w", "User", err) + } + + } + } + return nil +} + +// NewUserProfile returns a UserProfile. +func NewUserProfile() *UserProfile { + r := &UserProfile{ + Data: make(map[string]json.RawMessage), + Labels: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userprofilehitmetadata.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userprofilehitmetadata.go new file mode 100644 index 000000000..7e875a578 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userprofilehitmetadata.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UserProfileHitMetadata type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/UserProfile.ts#L27-L30 +type UserProfileHitMetadata struct { + PrimaryTerm_ int64 `json:"_primary_term"` + SeqNo_ int64 `json:"_seq_no"` +} + +func (s *UserProfileHitMetadata) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryTerm_", err) + } + s.PrimaryTerm_ = value + case float64: + f := int64(v) + s.PrimaryTerm_ = f + } + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return fmt.Errorf("%s | %w", "SeqNo_", err) + } + + } + } + return nil +} + +// NewUserProfileHitMetadata returns a UserProfileHitMetadata. +func NewUserProfileHitMetadata() *UserProfileHitMetadata { + r := &UserProfileHitMetadata{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userprofileuser.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userprofileuser.go new file mode 100644 index 000000000..c367e9878 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userprofileuser.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UserProfileUser type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/UserProfile.ts#L32-L39 +type UserProfileUser struct { + Email *string `json:"email,omitempty"` + FullName *string `json:"full_name,omitempty"` + RealmDomain *string `json:"realm_domain,omitempty"` + RealmName string `json:"realm_name"` + Roles []string `json:"roles"` + Username string `json:"username"` +} + +func (s *UserProfileUser) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "email": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Email", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Email = &o + + case "full_name": + if err := dec.Decode(&s.FullName); err != nil { + return fmt.Errorf("%s | %w", "FullName", err) + } + + case "realm_domain": + if err := dec.Decode(&s.RealmDomain); err != nil { + return fmt.Errorf("%s | %w", "RealmDomain", err) + } + + case "realm_name": + if err := dec.Decode(&s.RealmName); err != nil { + return fmt.Errorf("%s | %w", "RealmName", err) + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return fmt.Errorf("%s | %w", "Roles", err) + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return fmt.Errorf("%s | %w", "Username", err) + } + + } + } + return nil +} + +// NewUserProfileUser returns a UserProfileUser. +func NewUserProfileUser() *UserProfileUser { + r := &UserProfileUser{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userprofilewithmetadata.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userprofilewithmetadata.go new file mode 100644 index 000000000..12d49b08e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userprofilewithmetadata.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UserProfileWithMetadata type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/_types/UserProfile.ts#L49-L52 +type UserProfileWithMetadata struct { + Data map[string]json.RawMessage `json:"data"` + Doc_ UserProfileHitMetadata `json:"_doc"` + Enabled *bool `json:"enabled,omitempty"` + Labels map[string]json.RawMessage `json:"labels"` + LastSynchronized int64 `json:"last_synchronized"` + Uid string `json:"uid"` + User UserProfileUser `json:"user"` +} + +func (s *UserProfileWithMetadata) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data": + if s.Data == nil { + s.Data = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Data); err != nil { + return fmt.Errorf("%s | %w", "Data", err) + } + + case "_doc": + if err := dec.Decode(&s.Doc_); err != nil { + return fmt.Errorf("%s | %w", "Doc_", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "labels": + if s.Labels == nil { + s.Labels = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Labels); err != nil { + return fmt.Errorf("%s | %w", "Labels", err) + } + + case "last_synchronized": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "LastSynchronized", err) + } + s.LastSynchronized = value + case float64: + f := int64(v) + s.LastSynchronized = f + } + + case "uid": + if err := dec.Decode(&s.Uid); err != nil { + return fmt.Errorf("%s | %w", "Uid", err) + } + + case "user": + if err := dec.Decode(&s.User); err != nil { + return fmt.Errorf("%s | %w", "User", err) + } + + } + } + return nil +} + +// NewUserProfileWithMetadata returns a UserProfileWithMetadata. +func NewUserProfileWithMetadata() *UserProfileWithMetadata { + r := &UserProfileWithMetadata{ + Data: make(map[string]json.RawMessage), + Labels: make(map[string]json.RawMessage), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userquerycontainer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userquerycontainer.go new file mode 100644 index 000000000..072b76391 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userquerycontainer.go @@ -0,0 +1,230 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// UserQueryContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/query_user/types.ts#L37-L101 +type UserQueryContainer struct { + AdditionalUserQueryContainerProperty map[string]json.RawMessage `json:"-"` + // Bool matches users matching boolean combinations of other queries. + Bool *BoolQuery `json:"bool,omitempty"` + // Exists Returns users that contain an indexed value for a field. + Exists *ExistsQuery `json:"exists,omitempty"` + // Ids Returns users based on their IDs. + // This query uses the user document IDs stored in the `_id` field. + Ids *IdsQuery `json:"ids,omitempty"` + // Match Returns users that match a provided text, number, date or boolean value. + // The provided text is analyzed before matching. + Match map[string]MatchQuery `json:"match,omitempty"` + // MatchAll Matches all users, giving them all a `_score` of 1.0. + MatchAll *MatchAllQuery `json:"match_all,omitempty"` + // Prefix Returns users that contain a specific prefix in a provided field. + Prefix map[string]PrefixQuery `json:"prefix,omitempty"` + // Range Returns users that contain terms within a provided range. + Range map[string]RangeQuery `json:"range,omitempty"` + // SimpleQueryString Returns users based on a provided query string, using a parser with a limited + // but fault-tolerant syntax. + SimpleQueryString *SimpleQueryStringQuery `json:"simple_query_string,omitempty"` + // Term Returns users that contain an exact term in a provided field. + // To return a document, the query term must exactly match the queried field's + // value, including whitespace and capitalization. + Term map[string]TermQuery `json:"term,omitempty"` + // Terms Returns users that contain one or more exact terms in a provided field. + // To return a document, one or more terms must exactly match a field value, + // including whitespace and capitalization. + Terms *TermsQuery `json:"terms,omitempty"` + // Wildcard Returns users that contain terms matching a wildcard pattern. + Wildcard map[string]WildcardQuery `json:"wildcard,omitempty"` +} + +func (s *UserQueryContainer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bool": + if err := dec.Decode(&s.Bool); err != nil { + return fmt.Errorf("%s | %w", "Bool", err) + } + + case "exists": + if err := dec.Decode(&s.Exists); err != nil { + return fmt.Errorf("%s | %w", "Exists", err) + } + + case "ids": + if err := dec.Decode(&s.Ids); err != nil { + return fmt.Errorf("%s | %w", "Ids", err) + } + + case "match": + if s.Match == nil { + s.Match = make(map[string]MatchQuery, 0) + } + if err := dec.Decode(&s.Match); err != nil { + return fmt.Errorf("%s | %w", "Match", err) + } + + case "match_all": + if err := dec.Decode(&s.MatchAll); err != nil { + return fmt.Errorf("%s | %w", "MatchAll", err) + } + + case "prefix": + if s.Prefix == nil { + s.Prefix = make(map[string]PrefixQuery, 0) + } + if err := dec.Decode(&s.Prefix); err != nil { + return fmt.Errorf("%s | %w", "Prefix", err) + } + + case "range": + if s.Range == nil { + s.Range = make(map[string]RangeQuery, 0) + } + messages := make(map[string]json.RawMessage) + err := dec.Decode(&messages) + if err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + + untyped := NewUntypedRangeQuery() + for key, message := range messages { + err := json.Unmarshal(message, &untyped) + if err != nil { + return fmt.Errorf("%s | %w", "Range", err) + } + s.Range[key] = untyped + } + + case "simple_query_string": + if err := dec.Decode(&s.SimpleQueryString); err != nil { + return fmt.Errorf("%s | %w", "SimpleQueryString", err) + } + + case "term": + if s.Term == nil { + s.Term = make(map[string]TermQuery, 0) + } + if err := dec.Decode(&s.Term); err != nil { + return fmt.Errorf("%s | %w", "Term", err) + } + + case "terms": + if err := dec.Decode(&s.Terms); err != nil { + return fmt.Errorf("%s | %w", "Terms", err) + } + + case "wildcard": + if s.Wildcard == nil { + s.Wildcard = make(map[string]WildcardQuery, 0) + } + if err := dec.Decode(&s.Wildcard); err != nil { + return fmt.Errorf("%s | %w", "Wildcard", err) + } + + default: + + if key, ok := t.(string); ok { + if s.AdditionalUserQueryContainerProperty == nil { + s.AdditionalUserQueryContainerProperty = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return fmt.Errorf("%s | %w", "AdditionalUserQueryContainerProperty", err) + } + s.AdditionalUserQueryContainerProperty[key] = *raw + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s UserQueryContainer) MarshalJSON() ([]byte, error) { + type opt UserQueryContainer + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalUserQueryContainerProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalUserQueryContainerProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewUserQueryContainer returns a UserQueryContainer. +func NewUserQueryContainer() *UserQueryContainer { + r := &UserQueryContainer{ + AdditionalUserQueryContainerProperty: make(map[string]json.RawMessage), + Match: make(map[string]MatchQuery), + Prefix: make(map[string]PrefixQuery), + Range: make(map[string]RangeQuery), + Term: make(map[string]TermQuery), + Wildcard: make(map[string]WildcardQuery), + } + + return r +} + +type UserQueryContainerVariant interface { + UserQueryContainerCaster() *UserQueryContainer +} + +func (s *UserQueryContainer) UserQueryContainerCaster() *UserQueryContainer { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userrealm.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userrealm.go new file mode 100644 index 000000000..745626b2c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/userrealm.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// UserRealm type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/security/get_token/types.ts#L50-L53 +type UserRealm struct { + Name string `json:"name"` + Type string `json:"type"` +} + +func (s *UserRealm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + +// NewUserRealm returns a UserRealm. +func NewUserRealm() *UserRealm { + r := &UserRealm{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/validation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/validation.go new file mode 100644 index 000000000..44c7c5480 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/validation.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Validation holds the union for the following types: +// +// LessThanValidation +// GreaterThanValidation +// ListTypeValidation +// IncludedInValidation +// RegexValidation +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/connector/_types/Connector.ts#L50-L56 +type Validation any + +type ValidationVariant interface { + ValidationCaster() *Validation +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/validationloss.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/validationloss.go new file mode 100644 index 000000000..d9038eb7d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/validationloss.go @@ -0,0 +1,85 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ValidationLoss type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/DataframeAnalytics.ts#L571-L576 +type ValidationLoss struct { + // FoldValues Validation loss values for every added decision tree during the forest + // growing procedure. + FoldValues []string `json:"fold_values"` + // LossType The type of the loss metric. For example, binomial_logistic. + LossType string `json:"loss_type"` +} + +func (s *ValidationLoss) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fold_values": + if err := dec.Decode(&s.FoldValues); err != nil { + return fmt.Errorf("%s | %w", "FoldValues", err) + } + + case "loss_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "LossType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LossType = o + + } + } + return nil +} + +// NewValidationLoss returns a ValidationLoss. +func NewValidationLoss() *ValidationLoss { + r := &ValidationLoss{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/valuecountaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/valuecountaggregate.go new file mode 100644 index 000000000..f1e3c5e92 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/valuecountaggregate.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ValueCountAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L231-L236 +type ValueCountAggregate struct { + Meta Metadata `json:"meta,omitempty"` + // Value The metric value. A missing value generally means that there was no data to + // aggregate, + // unless specified otherwise. + Value *Float64 `json:"value,omitempty"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *ValueCountAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + +// NewValueCountAggregate returns a ValueCountAggregate. +func NewValueCountAggregate() *ValueCountAggregate { + r := &ValueCountAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/valuecountaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/valuecountaggregation.go new file mode 100644 index 000000000..2fa40ce73 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/valuecountaggregation.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ValueCountAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L457-L457 +type ValueCountAggregation struct { + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *ValueCountAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewValueCountAggregation returns a ValueCountAggregation. +func NewValueCountAggregation() *ValueCountAggregation { + r := &ValueCountAggregation{} + + return r +} + +type ValueCountAggregationVariant interface { + ValueCountAggregationCaster() *ValueCountAggregation +} + +func (s *ValueCountAggregation) ValueCountAggregationCaster() *ValueCountAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/variablewidthhistogramaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/variablewidthhistogramaggregate.go new file mode 100644 index 000000000..f91d679e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/variablewidthhistogramaggregate.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// VariableWidthHistogramAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L402-L404 +type VariableWidthHistogramAggregate struct { + Buckets BucketsVariableWidthHistogramBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *VariableWidthHistogramAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]VariableWidthHistogramBucket, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + case '[': + o := []VariableWidthHistogramBucket{} + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + } + } + return nil +} + +// NewVariableWidthHistogramAggregate returns a VariableWidthHistogramAggregate. +func NewVariableWidthHistogramAggregate() *VariableWidthHistogramAggregate { + r := &VariableWidthHistogramAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/variablewidthhistogramaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/variablewidthhistogramaggregation.go new file mode 100644 index 000000000..178bf05a1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/variablewidthhistogramaggregation.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// VariableWidthHistogramAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/bucket.ts#L1091-L1115 +type VariableWidthHistogramAggregation struct { + // Buckets The target number of buckets. + Buckets *int `json:"buckets,omitempty"` + // Field The name of the field. + Field *string `json:"field,omitempty"` + // InitialBuffer Specifies the number of individual documents that will be stored in memory on + // a shard before the initial bucketing algorithm is run. + // Defaults to `min(10 * shard_size, 50000)`. + InitialBuffer *int `json:"initial_buffer,omitempty"` + Script *Script `json:"script,omitempty"` + // ShardSize The number of buckets that the coordinating node will request from each + // shard. + // Defaults to `buckets * 50`. + ShardSize *int `json:"shard_size,omitempty"` +} + +func (s *VariableWidthHistogramAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Buckets", err) + } + s.Buckets = &value + case float64: + f := int(v) + s.Buckets = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "initial_buffer": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "InitialBuffer", err) + } + s.InitialBuffer = &value + case float64: + f := int(v) + s.InitialBuffer = &f + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + case "shard_size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "ShardSize", err) + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + } + } + return nil +} + +// NewVariableWidthHistogramAggregation returns a VariableWidthHistogramAggregation. +func NewVariableWidthHistogramAggregation() *VariableWidthHistogramAggregation { + r := &VariableWidthHistogramAggregation{} + + return r +} + +type VariableWidthHistogramAggregationVariant interface { + VariableWidthHistogramAggregationCaster() *VariableWidthHistogramAggregation +} + +func (s *VariableWidthHistogramAggregation) VariableWidthHistogramAggregationCaster() *VariableWidthHistogramAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/variablewidthhistogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/variablewidthhistogrambucket.go new file mode 100644 index 000000000..22ba62f69 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/variablewidthhistogrambucket.go @@ -0,0 +1,722 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// VariableWidthHistogramBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L406-L413 +type VariableWidthHistogramBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key Float64 `json:"key"` + KeyAsString *string `json:"key_as_string,omitempty"` + Max Float64 `json:"max"` + MaxAsString *string `json:"max_as_string,omitempty"` + Min Float64 `json:"min"` + MinAsString *string `json:"min_as_string,omitempty"` +} + +func (s *VariableWidthHistogramBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "DocCount", err) + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Key", err) + } + f := Float64(value) + s.Key = f + case float64: + f := Float64(v) + s.Key = f + } + + case "key_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "KeyAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o + + case "max": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Max", err) + } + f := Float64(value) + s.Max = f + case float64: + f := Float64(v) + s.Max = f + } + + case "max_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MaxAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxAsString = &o + + case "min": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Min", err) + } + f := Float64(value) + s.Min = f + case float64: + f := Float64(v) + s.Min = f + } + + case "min_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "MinAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "time_series": + o := NewTimeSeriesAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "boxplot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]any, 0) + if err := dec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Aggregations", err) + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s VariableWidthHistogramBucket) MarshalJSON() ([]byte, error) { + type opt VariableWidthHistogramBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewVariableWidthHistogramBucket returns a VariableWidthHistogramBucket. +func NewVariableWidthHistogramBucket() *VariableWidthHistogramBucket { + r := &VariableWidthHistogramBucket{ + Aggregations: make(map[string]Aggregate), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vector.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vector.go new file mode 100644 index 000000000..d48902100 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vector.go @@ -0,0 +1,144 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Vector type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L460-L464 +type Vector struct { + Available bool `json:"available"` + DenseVectorDimsAvgCount int `json:"dense_vector_dims_avg_count"` + DenseVectorFieldsCount int `json:"dense_vector_fields_count"` + Enabled bool `json:"enabled"` + SparseVectorFieldsCount *int `json:"sparse_vector_fields_count,omitempty"` +} + +func (s *Vector) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "dense_vector_dims_avg_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DenseVectorDimsAvgCount", err) + } + s.DenseVectorDimsAvgCount = value + case float64: + f := int(v) + s.DenseVectorDimsAvgCount = f + } + + case "dense_vector_fields_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "DenseVectorFieldsCount", err) + } + s.DenseVectorFieldsCount = value + case float64: + f := int(v) + s.DenseVectorFieldsCount = f + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "sparse_vector_fields_count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "SparseVectorFieldsCount", err) + } + s.SparseVectorFieldsCount = &value + case float64: + f := int(v) + s.SparseVectorFieldsCount = &f + } + + } + } + return nil +} + +// NewVector returns a Vector. +func NewVector() *Vector { + r := &Vector{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/verifyindex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/verifyindex.go new file mode 100644 index 000000000..e099b5e29 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/verifyindex.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// VerifyIndex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/indices/recovery/types.ts#L111-L116 +type VerifyIndex struct { + CheckIndexTime Duration `json:"check_index_time,omitempty"` + CheckIndexTimeInMillis int64 `json:"check_index_time_in_millis"` + TotalTime Duration `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +func (s *VerifyIndex) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "check_index_time": + if err := dec.Decode(&s.CheckIndexTime); err != nil { + return fmt.Errorf("%s | %w", "CheckIndexTime", err) + } + + case "check_index_time_in_millis": + if err := dec.Decode(&s.CheckIndexTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "CheckIndexTimeInMillis", err) + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return fmt.Errorf("%s | %w", "TotalTime", err) + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalTimeInMillis", err) + } + + } + } + return nil +} + +// NewVerifyIndex returns a VerifyIndex. +func NewVerifyIndex() *VerifyIndex { + r := &VerifyIndex{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/versionproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/versionproperty.go new file mode 100644 index 000000000..aa5011b89 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/versionproperty.go @@ -0,0 +1,872 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// VersionProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L340-L342 +type VersionProperty struct { + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *VersionProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s VersionProperty) MarshalJSON() ([]byte, error) { + type innerVersionProperty VersionProperty + tmp := innerVersionProperty{ + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "version" + + return json.Marshal(tmp) +} + +// NewVersionProperty returns a VersionProperty. +func NewVersionProperty() *VersionProperty { + r := &VersionProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type VersionPropertyVariant interface { + VersionPropertyCaster() *VersionProperty +} + +func (s *VersionProperty) VersionPropertyCaster() *VersionProperty { + return s +} + +func (s *VersionProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vertex.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vertex.go new file mode 100644 index 000000000..9896ce690 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vertex.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Vertex type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/graph/_types/Vertex.ts#L23-L28 +type Vertex struct { + Depth int64 `json:"depth"` + Field string `json:"field"` + Term string `json:"term"` + Weight Float64 `json:"weight"` +} + +func (s *Vertex) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "depth": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Depth", err) + } + s.Depth = value + case float64: + f := int64(v) + s.Depth = f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "term": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Term", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Term = o + + case "weight": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Weight", err) + } + f := Float64(value) + s.Weight = f + case float64: + f := Float64(v) + s.Weight = f + } + + } + } + return nil +} + +// NewVertex returns a Vertex. +func NewVertex() *Vertex { + r := &Vertex{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vertexdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vertexdefinition.go new file mode 100644 index 000000000..126333394 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vertexdefinition.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// VertexDefinition type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/graph/_types/Vertex.ts#L30-L59 +type VertexDefinition struct { + // Exclude Prevents the specified terms from being included in the results. + Exclude []string `json:"exclude,omitempty"` + // Field Identifies a field in the documents of interest. + Field string `json:"field"` + // Include Identifies the terms of interest that form the starting points from which you + // want to spider out. + Include []VertexInclude `json:"include,omitempty"` + // MinDocCount Specifies how many documents must contain a pair of terms before it is + // considered to be a useful connection. + // This setting acts as a certainty threshold. + MinDocCount *int64 `json:"min_doc_count,omitempty"` + // ShardMinDocCount Controls how many documents on a particular shard have to contain a pair of + // terms before the connection is returned for global consideration. + ShardMinDocCount *int64 `json:"shard_min_doc_count,omitempty"` + // Size Specifies the maximum number of vertex terms returned for each field. + Size *int `json:"size,omitempty"` +} + +func (s *VertexDefinition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exclude": + if err := dec.Decode(&s.Exclude); err != nil { + return fmt.Errorf("%s | %w", "Exclude", err) + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "include": + if err := dec.Decode(&s.Include); err != nil { + return fmt.Errorf("%s | %w", "Include", err) + } + + case "min_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "MinDocCount", err) + } + s.MinDocCount = &value + case float64: + f := int64(v) + s.MinDocCount = &f + } + + case "shard_min_doc_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "ShardMinDocCount", err) + } + s.ShardMinDocCount = &value + case float64: + f := int64(v) + s.ShardMinDocCount = &f + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + +// NewVertexDefinition returns a VertexDefinition. +func NewVertexDefinition() *VertexDefinition { + r := &VertexDefinition{} + + return r +} + +type VertexDefinitionVariant interface { + VertexDefinitionCaster() *VertexDefinition +} + +func (s *VertexDefinition) VertexDefinitionCaster() *VertexDefinition { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vertexinclude.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vertexinclude.go new file mode 100644 index 000000000..4a0e85404 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vertexinclude.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// VertexInclude type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/graph/_types/Vertex.ts#L61-L65 +type VertexInclude struct { + Boost *Float64 `json:"boost,omitempty"` + Term string `json:"term"` +} + +func (s *VertexInclude) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Term) + if err != nil { + return err + } + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f + } + + case "term": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Term", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Term = o + + } + } + return nil +} + +// NewVertexInclude returns a VertexInclude. +func NewVertexInclude() *VertexInclude { + r := &VertexInclude{} + + return r +} + +type VertexIncludeVariant interface { + VertexIncludeCaster() *VertexInclude +} + +func (s *VertexInclude) VertexIncludeCaster() *VertexInclude { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vocabulary.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vocabulary.go new file mode 100644 index 000000000..73a5416cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/vocabulary.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Vocabulary type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L248-L250 +type Vocabulary struct { + Index string `json:"index"` +} + +func (s *Vocabulary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + } + } + return nil +} + +// NewVocabulary returns a Vocabulary. +func NewVocabulary() *Vocabulary { + r := &Vocabulary{} + + return r +} + +type VocabularyVariant interface { + VocabularyCaster() *Vocabulary +} + +func (s *Vocabulary) VocabularyCaster() *Vocabulary { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/void.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/void.go new file mode 100644 index 000000000..8fcf1dd08 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/void.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +type Void any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/voyageaiservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/voyageaiservicesettings.go new file mode 100644 index 000000000..03ae27ed4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/voyageaiservicesettings.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// VoyageAIServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1616-L1647 +type VoyageAIServiceSettings struct { + // Dimensions The number of dimensions for resulting output embeddings. + // This setting maps to `output_dimension` in the VoyageAI documentation. + // Only for the `text_embedding` task type. + Dimensions *int `json:"dimensions,omitempty"` + // EmbeddingType The data type for the embeddings to be returned. + // This setting maps to `output_dtype` in the VoyageAI documentation. + // Permitted values: float, int8, bit. + // `int8` is a synonym of `byte` in the VoyageAI documentation. + // `bit` is a synonym of `binary` in the VoyageAI documentation. + // Only for the `text_embedding` task type. + EmbeddingType *float32 `json:"embedding_type,omitempty"` + // ModelId The name of the model to use for the inference task. + // Refer to the VoyageAI documentation for the list of available text embedding + // and rerank models. + ModelId string `json:"model_id"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // VoyageAI. + // The `voyageai` service sets a default number of requests allowed per minute + // depending on the task type. + // For both `text_embedding` and `rerank`, it is set to `2000`. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` +} + +func (s *VoyageAIServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dimensions": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Dimensions", err) + } + s.Dimensions = &value + case float64: + f := int(v) + s.Dimensions = &f + } + + case "embedding_type": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "EmbeddingType", err) + } + f := float32(value) + s.EmbeddingType = &f + case float64: + f := float32(v) + s.EmbeddingType = &f + } + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + } + } + return nil +} + +// NewVoyageAIServiceSettings returns a VoyageAIServiceSettings. +func NewVoyageAIServiceSettings() *VoyageAIServiceSettings { + r := &VoyageAIServiceSettings{} + + return r +} + +type VoyageAIServiceSettingsVariant interface { + VoyageAIServiceSettingsCaster() *VoyageAIServiceSettings +} + +func (s *VoyageAIServiceSettings) VoyageAIServiceSettingsCaster() *VoyageAIServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/voyageaitasksettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/voyageaitasksettings.go new file mode 100644 index 000000000..0dad0b6ae --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/voyageaitasksettings.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// VoyageAITaskSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1649-L1673 +type VoyageAITaskSettings struct { + // InputType Type of the input text. + // Permitted values: `ingest` (maps to `document` in the VoyageAI + // documentation), `search` (maps to `query` in the VoyageAI documentation). + // Only for the `text_embedding` task type. + InputType *string `json:"input_type,omitempty"` + // ReturnDocuments Whether to return the source documents in the response. + // Only for the `rerank` task type. + ReturnDocuments *bool `json:"return_documents,omitempty"` + // TopK The number of most relevant documents to return. + // If not specified, the reranking results of all documents will be returned. + // Only for the `rerank` task type. + TopK *int `json:"top_k,omitempty"` + // Truncation Whether to truncate the input texts to fit within the context length. + Truncation *bool `json:"truncation,omitempty"` +} + +func (s *VoyageAITaskSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "input_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "InputType", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InputType = &o + + case "return_documents": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ReturnDocuments", err) + } + s.ReturnDocuments = &value + case bool: + s.ReturnDocuments = &v + } + + case "top_k": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "TopK", err) + } + s.TopK = &value + case float64: + f := int(v) + s.TopK = &f + } + + case "truncation": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Truncation", err) + } + s.Truncation = &value + case bool: + s.Truncation = &v + } + + } + } + return nil +} + +// NewVoyageAITaskSettings returns a VoyageAITaskSettings. +func NewVoyageAITaskSettings() *VoyageAITaskSettings { + r := &VoyageAITaskSettings{} + + return r +} + +type VoyageAITaskSettingsVariant interface { + VoyageAITaskSettingsCaster() *VoyageAITaskSettings +} + +func (s *VoyageAITaskSettings) VoyageAITaskSettingsCaster() *VoyageAITaskSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/waitforactiveshards.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/waitforactiveshards.go new file mode 100644 index 000000000..108b43f53 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/waitforactiveshards.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// WaitForActiveShards holds the union for the following types: +// +// int +// waitforactiveshardoptions.WaitForActiveShardOptions +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/common.ts#L139-L140 +type WaitForActiveShards any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/waitfornodes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/waitfornodes.go new file mode 100644 index 000000000..7313af97f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/waitfornodes.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// WaitForNodes holds the union for the following types: +// +// string +// int +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/cluster/health/types.ts#L47-L47 +type WaitForNodes any diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/waitforsnapshotaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/waitforsnapshotaction.go new file mode 100644 index 000000000..2551643c7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/waitforsnapshotaction.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WaitForSnapshotAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ilm/_types/Phase.ts#L145-L147 +type WaitForSnapshotAction struct { + Policy string `json:"policy"` +} + +func (s *WaitForSnapshotAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "policy": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Policy", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Policy = o + + } + } + return nil +} + +// NewWaitForSnapshotAction returns a WaitForSnapshotAction. +func NewWaitForSnapshotAction() *WaitForSnapshotAction { + r := &WaitForSnapshotAction{} + + return r +} + +type WaitForSnapshotActionVariant interface { + WaitForSnapshotActionCaster() *WaitForSnapshotAction +} + +func (s *WaitForSnapshotAction) WaitForSnapshotActionCaster() *WaitForSnapshotAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/warmerstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/warmerstats.go new file mode 100644 index 000000000..ad934bc44 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/warmerstats.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WarmerStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Stats.ts#L437-L442 +type WarmerStats struct { + Current int64 `json:"current"` + Total int64 `json:"total"` + TotalTime Duration `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +func (s *WarmerStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Current", err) + } + s.Current = value + case float64: + f := int64(v) + s.Current = f + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return fmt.Errorf("%s | %w", "TotalTime", err) + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return fmt.Errorf("%s | %w", "TotalTimeInMillis", err) + } + + } + } + return nil +} + +// NewWarmerStats returns a WarmerStats. +func NewWarmerStats() *WarmerStats { + r := &WarmerStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watch.go new file mode 100644 index 000000000..462bf9d22 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watch.go @@ -0,0 +1,129 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Watch type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Watch.ts#L37-L47 +type Watch struct { + Actions map[string]WatcherAction `json:"actions"` + Condition WatcherCondition `json:"condition"` + Input WatcherInput `json:"input"` + Metadata Metadata `json:"metadata,omitempty"` + Status *WatchStatus `json:"status,omitempty"` + ThrottlePeriod Duration `json:"throttle_period,omitempty"` + ThrottlePeriodInMillis *int64 `json:"throttle_period_in_millis,omitempty"` + Transform *TransformContainer `json:"transform,omitempty"` + Trigger TriggerContainer `json:"trigger"` +} + +func (s *Watch) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if s.Actions == nil { + s.Actions = make(map[string]WatcherAction, 0) + } + if err := dec.Decode(&s.Actions); err != nil { + return fmt.Errorf("%s | %w", "Actions", err) + } + + case "condition": + if err := dec.Decode(&s.Condition); err != nil { + return fmt.Errorf("%s | %w", "Condition", err) + } + + case "input": + if err := dec.Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "throttle_period": + if err := dec.Decode(&s.ThrottlePeriod); err != nil { + return fmt.Errorf("%s | %w", "ThrottlePeriod", err) + } + + case "throttle_period_in_millis": + if err := dec.Decode(&s.ThrottlePeriodInMillis); err != nil { + return fmt.Errorf("%s | %w", "ThrottlePeriodInMillis", err) + } + + case "transform": + if err := dec.Decode(&s.Transform); err != nil { + return fmt.Errorf("%s | %w", "Transform", err) + } + + case "trigger": + if err := dec.Decode(&s.Trigger); err != nil { + return fmt.Errorf("%s | %w", "Trigger", err) + } + + } + } + return nil +} + +// NewWatch returns a Watch. +func NewWatch() *Watch { + r := &Watch{ + Actions: make(map[string]WatcherAction), + } + + return r +} + +type WatchVariant interface { + WatchCaster() *Watch +} + +func (s *Watch) WatchCaster() *Watch { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcher.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcher.go new file mode 100644 index 000000000..6473a3746 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcher.go @@ -0,0 +1,111 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Watcher type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L466-L470 +type Watcher struct { + Available bool `json:"available"` + Count Counter `json:"count"` + Enabled bool `json:"enabled"` + Execution WatcherActions `json:"execution"` + Watch WatcherWatch `json:"watch"` +} + +func (s *Watcher) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "count": + if err := dec.Decode(&s.Count); err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "execution": + if err := dec.Decode(&s.Execution); err != nil { + return fmt.Errorf("%s | %w", "Execution", err) + } + + case "watch": + if err := dec.Decode(&s.Watch); err != nil { + return fmt.Errorf("%s | %w", "Watch", err) + } + + } + } + return nil +} + +// NewWatcher returns a Watcher. +func NewWatcher() *Watcher { + r := &Watcher{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcheraction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcheraction.go new file mode 100644 index 000000000..d9dd0416a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcheraction.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actiontype" +) + +// WatcherAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Action.ts#L35-L54 +type WatcherAction struct { + ActionType *actiontype.ActionType `json:"action_type,omitempty"` + Condition *WatcherCondition `json:"condition,omitempty"` + Email *EmailAction `json:"email,omitempty"` + Foreach *string `json:"foreach,omitempty"` + Index *IndexAction `json:"index,omitempty"` + Logging *LoggingAction `json:"logging,omitempty"` + MaxIterations *int `json:"max_iterations,omitempty"` + Name *string `json:"name,omitempty"` + Pagerduty *PagerDutyAction `json:"pagerduty,omitempty"` + Slack *SlackAction `json:"slack,omitempty"` + ThrottlePeriod Duration `json:"throttle_period,omitempty"` + ThrottlePeriodInMillis *int64 `json:"throttle_period_in_millis,omitempty"` + Transform *TransformContainer `json:"transform,omitempty"` + Webhook *WebhookAction `json:"webhook,omitempty"` +} + +func (s *WatcherAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action_type": + if err := dec.Decode(&s.ActionType); err != nil { + return fmt.Errorf("%s | %w", "ActionType", err) + } + + case "condition": + if err := dec.Decode(&s.Condition); err != nil { + return fmt.Errorf("%s | %w", "Condition", err) + } + + case "email": + if err := dec.Decode(&s.Email); err != nil { + return fmt.Errorf("%s | %w", "Email", err) + } + + case "foreach": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Foreach", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Foreach = &o + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return fmt.Errorf("%s | %w", "Index", err) + } + + case "logging": + if err := dec.Decode(&s.Logging); err != nil { + return fmt.Errorf("%s | %w", "Logging", err) + } + + case "max_iterations": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxIterations", err) + } + s.MaxIterations = &value + case float64: + f := int(v) + s.MaxIterations = &f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "pagerduty": + if err := dec.Decode(&s.Pagerduty); err != nil { + return fmt.Errorf("%s | %w", "Pagerduty", err) + } + + case "slack": + if err := dec.Decode(&s.Slack); err != nil { + return fmt.Errorf("%s | %w", "Slack", err) + } + + case "throttle_period": + if err := dec.Decode(&s.ThrottlePeriod); err != nil { + return fmt.Errorf("%s | %w", "ThrottlePeriod", err) + } + + case "throttle_period_in_millis": + if err := dec.Decode(&s.ThrottlePeriodInMillis); err != nil { + return fmt.Errorf("%s | %w", "ThrottlePeriodInMillis", err) + } + + case "transform": + if err := dec.Decode(&s.Transform); err != nil { + return fmt.Errorf("%s | %w", "Transform", err) + } + + case "webhook": + if err := dec.Decode(&s.Webhook); err != nil { + return fmt.Errorf("%s | %w", "Webhook", err) + } + + } + } + return nil +} + +// NewWatcherAction returns a WatcherAction. +func NewWatcherAction() *WatcherAction { + r := &WatcherAction{} + + return r +} + +type WatcherActionVariant interface { + WatcherActionCaster() *WatcherAction +} + +func (s *WatcherAction) WatcherActionCaster() *WatcherAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcheractions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcheractions.go new file mode 100644 index 000000000..2476f9c86 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcheractions.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// WatcherActions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L402-L404 +type WatcherActions struct { + Actions map[string]WatcherActionTotals `json:"actions"` +} + +// NewWatcherActions returns a WatcherActions. +func NewWatcherActions() *WatcherActions { + r := &WatcherActions{ + Actions: make(map[string]WatcherActionTotals), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcheractiontotals.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcheractiontotals.go new file mode 100644 index 000000000..1bcc05270 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcheractiontotals.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// WatcherActionTotals type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L418-L421 +type WatcherActionTotals struct { + Total Duration `json:"total"` + TotalTimeInMs int64 `json:"total_time_in_ms"` +} + +func (s *WatcherActionTotals) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + + case "total_time_in_ms": + if err := dec.Decode(&s.TotalTimeInMs); err != nil { + return fmt.Errorf("%s | %w", "TotalTimeInMs", err) + } + + } + } + return nil +} + +// NewWatcherActionTotals returns a WatcherActionTotals. +func NewWatcherActionTotals() *WatcherActionTotals { + r := &WatcherActionTotals{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchercondition.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchercondition.go new file mode 100644 index 000000000..a75c1301c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchercondition.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditionop" +) + +// WatcherCondition type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Conditions.ts#L51-L63 +type WatcherCondition struct { + AdditionalWatcherConditionProperty map[string]json.RawMessage `json:"-"` + Always *AlwaysCondition `json:"always,omitempty"` + ArrayCompare map[string]ArrayCompareCondition `json:"array_compare,omitempty"` + Compare map[string]map[conditionop.ConditionOp]FieldValue `json:"compare,omitempty"` + Never *NeverCondition `json:"never,omitempty"` + Script *ScriptCondition `json:"script,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s WatcherCondition) MarshalJSON() ([]byte, error) { + type opt WatcherCondition + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalWatcherConditionProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalWatcherConditionProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewWatcherCondition returns a WatcherCondition. +func NewWatcherCondition() *WatcherCondition { + r := &WatcherCondition{ + AdditionalWatcherConditionProperty: make(map[string]json.RawMessage), + ArrayCompare: make(map[string]ArrayCompareCondition), + Compare: make(map[string]map[conditionop.ConditionOp]FieldValue), + } + + return r +} + +type WatcherConditionVariant interface { + WatcherConditionCaster() *WatcherCondition +} + +func (s *WatcherCondition) WatcherConditionCaster() *WatcherCondition { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherinput.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherinput.go new file mode 100644 index 000000000..e07ef45b9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherinput.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "encoding/json" + "fmt" +) + +// WatcherInput type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Input.ts#L87-L95 +type WatcherInput struct { + AdditionalWatcherInputProperty map[string]json.RawMessage `json:"-"` + Chain *ChainInput `json:"chain,omitempty"` + Http *HttpInput `json:"http,omitempty"` + Search *SearchInput `json:"search,omitempty"` + Simple map[string]json.RawMessage `json:"simple,omitempty"` +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s WatcherInput) MarshalJSON() ([]byte, error) { + type opt WatcherInput + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]any, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.AdditionalWatcherInputProperty { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "AdditionalWatcherInputProperty") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewWatcherInput returns a WatcherInput. +func NewWatcherInput() *WatcherInput { + r := &WatcherInput{ + AdditionalWatcherInputProperty: make(map[string]json.RawMessage), + Simple: make(map[string]json.RawMessage), + } + + return r +} + +type WatcherInputVariant interface { + WatcherInputCaster() *WatcherInput +} + +func (s *WatcherInput) WatcherInputCaster() *WatcherInput { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchernodestats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchernodestats.go new file mode 100644 index 000000000..2d455421e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchernodestats.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/watcherstate" +) + +// WatcherNodeStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/stats/types.ts#L33-L61 +type WatcherNodeStats struct { + // CurrentWatches The current executing watches metric gives insight into the watches that are + // currently being executed by Watcher. + // Additional information is shared per watch that is currently executing. + // This information includes the `watch_id`, the time its execution started and + // its current execution phase. + // To include this metric, the `metric` option should be set to + // `current_watches` or `_all`. + // In addition you can also specify the `emit_stacktraces=true` parameter, which + // adds stack traces for each watch that is being run. + // These stack traces can give you more insight into an execution of a watch. + CurrentWatches []WatchRecordStats `json:"current_watches,omitempty"` + ExecutionThreadPool ExecutionThreadPool `json:"execution_thread_pool"` + NodeId string `json:"node_id"` + // QueuedWatches Watcher moderates the execution of watches such that their execution won't + // put too much pressure on the node and its resources. + // If too many watches trigger concurrently and there isn't enough capacity to + // run them all, some of the watches are queued, waiting for the current running + // watches to finish.s + // The queued watches metric gives insight on these queued watches. + // + // To include this metric, the `metric` option should include `queued_watches` + // or `_all`. + QueuedWatches []WatchRecordQueuedStats `json:"queued_watches,omitempty"` + // WatchCount The number of watches currently registered. + WatchCount int64 `json:"watch_count"` + // WatcherState The current state of Watcher. + WatcherState watcherstate.WatcherState `json:"watcher_state"` +} + +func (s *WatcherNodeStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current_watches": + if err := dec.Decode(&s.CurrentWatches); err != nil { + return fmt.Errorf("%s | %w", "CurrentWatches", err) + } + + case "execution_thread_pool": + if err := dec.Decode(&s.ExecutionThreadPool); err != nil { + return fmt.Errorf("%s | %w", "ExecutionThreadPool", err) + } + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return fmt.Errorf("%s | %w", "NodeId", err) + } + + case "queued_watches": + if err := dec.Decode(&s.QueuedWatches); err != nil { + return fmt.Errorf("%s | %w", "QueuedWatches", err) + } + + case "watch_count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "WatchCount", err) + } + s.WatchCount = value + case float64: + f := int64(v) + s.WatchCount = f + } + + case "watcher_state": + if err := dec.Decode(&s.WatcherState); err != nil { + return fmt.Errorf("%s | %w", "WatcherState", err) + } + + } + } + return nil +} + +// NewWatcherNodeStats returns a WatcherNodeStats. +func NewWatcherNodeStats() *WatcherNodeStats { + r := &WatcherNodeStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherstatusactions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherstatusactions.go new file mode 100644 index 000000000..0329a73a6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherstatusactions.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// WatcherStatusActions type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Action.ts#L56-L56 +type WatcherStatusActions map[string]ActionStatus + +type WatcherStatusActionsVariant interface { + WatcherStatusActionsCaster() *WatcherStatusActions +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherwatch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherwatch.go new file mode 100644 index 000000000..e363a228e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherwatch.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// WatcherWatch type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L406-L411 +type WatcherWatch struct { + Action map[string]Counter `json:"action,omitempty"` + Condition map[string]Counter `json:"condition,omitempty"` + Input map[string]Counter `json:"input"` + Trigger WatcherWatchTrigger `json:"trigger"` +} + +// NewWatcherWatch returns a WatcherWatch. +func NewWatcherWatch() *WatcherWatch { + r := &WatcherWatch{ + Action: make(map[string]Counter), + Condition: make(map[string]Counter), + Input: make(map[string]Counter), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherwatchtrigger.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherwatchtrigger.go new file mode 100644 index 000000000..ade850968 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherwatchtrigger.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// WatcherWatchTrigger type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L413-L416 +type WatcherWatchTrigger struct { + All_ Counter `json:"_all"` + Schedule *WatcherWatchTriggerSchedule `json:"schedule,omitempty"` +} + +// NewWatcherWatchTrigger returns a WatcherWatchTrigger. +func NewWatcherWatchTrigger() *WatcherWatchTrigger { + r := &WatcherWatchTrigger{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherwatchtriggerschedule.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherwatchtriggerschedule.go new file mode 100644 index 000000000..de60cd055 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watcherwatchtriggerschedule.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WatcherWatchTriggerSchedule type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L472-L475 +type WatcherWatchTriggerSchedule struct { + Active int64 `json:"active"` + All_ Counter `json:"_all"` + Cron Counter `json:"cron"` + Total int64 `json:"total"` +} + +func (s *WatcherWatchTriggerSchedule) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Active", err) + } + s.Active = value + case float64: + f := int64(v) + s.Active = f + } + + case "_all": + if err := dec.Decode(&s.All_); err != nil { + return fmt.Errorf("%s | %w", "All_", err) + } + + case "cron": + if err := dec.Decode(&s.Cron); err != nil { + return fmt.Errorf("%s | %w", "Cron", err) + } + + case "total": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + +// NewWatcherWatchTriggerSchedule returns a WatcherWatchTriggerSchedule. +func NewWatcherWatchTriggerSchedule() *WatcherWatchTriggerSchedule { + r := &WatcherWatchTriggerSchedule{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchrecord.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchrecord.go new file mode 100644 index 000000000..c0009eac9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchrecord.go @@ -0,0 +1,138 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/executionstatus" +) + +// WatchRecord type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/execute_watch/types.ts#L27-L39 +type WatchRecord struct { + Condition WatcherCondition `json:"condition"` + Input WatcherInput `json:"input"` + Messages []string `json:"messages"` + Metadata Metadata `json:"metadata,omitempty"` + Node string `json:"node"` + Result ExecutionResult `json:"result"` + State executionstatus.ExecutionStatus `json:"state"` + Status *WatchStatus `json:"status,omitempty"` + TriggerEvent TriggerEventResult `json:"trigger_event"` + User string `json:"user"` + WatchId string `json:"watch_id"` +} + +func (s *WatchRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "condition": + if err := dec.Decode(&s.Condition); err != nil { + return fmt.Errorf("%s | %w", "Condition", err) + } + + case "input": + if err := dec.Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + case "messages": + if err := dec.Decode(&s.Messages); err != nil { + return fmt.Errorf("%s | %w", "Messages", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Node", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = o + + case "result": + if err := dec.Decode(&s.Result); err != nil { + return fmt.Errorf("%s | %w", "Result", err) + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return fmt.Errorf("%s | %w", "Status", err) + } + + case "trigger_event": + if err := dec.Decode(&s.TriggerEvent); err != nil { + return fmt.Errorf("%s | %w", "TriggerEvent", err) + } + + case "user": + if err := dec.Decode(&s.User); err != nil { + return fmt.Errorf("%s | %w", "User", err) + } + + case "watch_id": + if err := dec.Decode(&s.WatchId); err != nil { + return fmt.Errorf("%s | %w", "WatchId", err) + } + + } + } + return nil +} + +// NewWatchRecord returns a WatchRecord. +func NewWatchRecord() *WatchRecord { + r := &WatchRecord{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchrecordqueuedstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchrecordqueuedstats.go new file mode 100644 index 000000000..c84beabcb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchrecordqueuedstats.go @@ -0,0 +1,70 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// WatchRecordQueuedStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/stats/types.ts#L71-L77 +type WatchRecordQueuedStats struct { + // ExecutionTime The time the watch was run. + // This is just before the input is being run. + ExecutionTime DateTime `json:"execution_time"` +} + +func (s *WatchRecordQueuedStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "execution_time": + if err := dec.Decode(&s.ExecutionTime); err != nil { + return fmt.Errorf("%s | %w", "ExecutionTime", err) + } + + } + } + return nil +} + +// NewWatchRecordQueuedStats returns a WatchRecordQueuedStats. +func NewWatchRecordQueuedStats() *WatchRecordQueuedStats { + r := &WatchRecordQueuedStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchrecordstats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchrecordstats.go new file mode 100644 index 000000000..d7559bf81 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchrecordstats.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/executionphase" +) + +// WatchRecordStats type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/stats/types.ts#L79-L94 +type WatchRecordStats struct { + ExecutedActions []string `json:"executed_actions,omitempty"` + // ExecutionPhase The current watch execution phase. + ExecutionPhase executionphase.ExecutionPhase `json:"execution_phase"` + // ExecutionTime The time the watch was run. + // This is just before the input is being run. + ExecutionTime DateTime `json:"execution_time"` + // TriggeredTime The time the watch was triggered by the trigger engine. + TriggeredTime DateTime `json:"triggered_time"` + WatchId string `json:"watch_id"` + // WatchRecordId The watch record identifier. + WatchRecordId string `json:"watch_record_id"` +} + +func (s *WatchRecordStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "executed_actions": + if err := dec.Decode(&s.ExecutedActions); err != nil { + return fmt.Errorf("%s | %w", "ExecutedActions", err) + } + + case "execution_phase": + if err := dec.Decode(&s.ExecutionPhase); err != nil { + return fmt.Errorf("%s | %w", "ExecutionPhase", err) + } + + case "execution_time": + if err := dec.Decode(&s.ExecutionTime); err != nil { + return fmt.Errorf("%s | %w", "ExecutionTime", err) + } + + case "triggered_time": + if err := dec.Decode(&s.TriggeredTime); err != nil { + return fmt.Errorf("%s | %w", "TriggeredTime", err) + } + + case "watch_id": + if err := dec.Decode(&s.WatchId); err != nil { + return fmt.Errorf("%s | %w", "WatchId", err) + } + + case "watch_record_id": + if err := dec.Decode(&s.WatchRecordId); err != nil { + return fmt.Errorf("%s | %w", "WatchRecordId", err) + } + + } + } + return nil +} + +// NewWatchRecordStats returns a WatchRecordStats. +func NewWatchRecordStats() *WatchRecordStats { + r := &WatchRecordStats{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchstatus.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchstatus.go new file mode 100644 index 000000000..49a5b5649 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watchstatus.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WatchStatus type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Watch.ts#L49-L56 +type WatchStatus struct { + Actions WatcherStatusActions `json:"actions"` + ExecutionState *string `json:"execution_state,omitempty"` + LastChecked DateTime `json:"last_checked,omitempty"` + LastMetCondition DateTime `json:"last_met_condition,omitempty"` + State ActivationState `json:"state"` + Version int64 `json:"version"` +} + +func (s *WatchStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return fmt.Errorf("%s | %w", "Actions", err) + } + + case "execution_state": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ExecutionState", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ExecutionState = &o + + case "last_checked": + if err := dec.Decode(&s.LastChecked); err != nil { + return fmt.Errorf("%s | %w", "LastChecked", err) + } + + case "last_met_condition": + if err := dec.Decode(&s.LastMetCondition); err != nil { + return fmt.Errorf("%s | %w", "LastMetCondition", err) + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return fmt.Errorf("%s | %w", "State", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// NewWatchStatus returns a WatchStatus. +func NewWatchStatus() *WatchStatus { + r := &WatchStatus{} + + return r +} + +type WatchStatusVariant interface { + WatchStatusCaster() *WatchStatus +} + +func (s *WatchStatus) WatchStatusCaster() *WatchStatus { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watsonxservicesettings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watsonxservicesettings.go new file mode 100644 index 000000000..677329231 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/watsonxservicesettings.go @@ -0,0 +1,164 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WatsonxServiceSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/inference/_types/CommonTypes.ts#L1684-L1722 +type WatsonxServiceSettings struct { + // ApiKey A valid API key of your Watsonx account. + // You can find your Watsonx API keys or you can create a new one on the API + // keys page. + // + // IMPORTANT: You need to provide the API key only once, during the inference + // model creation. + // The get inference endpoint API does not retrieve your API key. + // After creating the inference model, you cannot change the associated API key. + // If you want to use a different API key, delete the inference model and + // recreate it with the same name and the updated API key. + ApiKey string `json:"api_key"` + // ApiVersion A version parameter that takes a version date in the format of `YYYY-MM-DD`. + // For the active version data parameters, refer to the Wastonx documentation. + ApiVersion string `json:"api_version"` + // ModelId The name of the model to use for the inference task. + // Refer to the IBM Embedding Models section in the Watsonx documentation for + // the list of available text embedding models. + // Refer to the IBM library - Foundation models in Watsonx.ai. + ModelId string `json:"model_id"` + // ProjectId The identifier of the IBM Cloud project to use for the inference task. + ProjectId string `json:"project_id"` + // RateLimit This setting helps to minimize the number of rate limit errors returned from + // Watsonx. + // By default, the `watsonxai` service sets the number of requests allowed per + // minute to 120. + RateLimit *RateLimitSetting `json:"rate_limit,omitempty"` + // Url The URL of the inference endpoint that you created on Watsonx. + Url string `json:"url"` +} + +func (s *WatsonxServiceSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiKey", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiKey = o + + case "api_version": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ApiVersion", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ApiVersion = o + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ModelId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "project_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ProjectId", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProjectId = o + + case "rate_limit": + if err := dec.Decode(&s.RateLimit); err != nil { + return fmt.Errorf("%s | %w", "RateLimit", err) + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = o + + } + } + return nil +} + +// NewWatsonxServiceSettings returns a WatsonxServiceSettings. +func NewWatsonxServiceSettings() *WatsonxServiceSettings { + r := &WatsonxServiceSettings{} + + return r +} + +type WatsonxServiceSettingsVariant interface { + WatsonxServiceSettingsCaster() *WatsonxServiceSettings +} + +func (s *WatsonxServiceSettings) WatsonxServiceSettingsCaster() *WatsonxServiceSettings { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/web.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/web.go new file mode 100644 index 000000000..f42d8a9ed --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/web.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// Web type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ingest/_types/Database.ts#L61-L61 +type Web struct { +} + +// NewWeb returns a Web. +func NewWeb() *Web { + r := &Web{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/webhookaction.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/webhookaction.go new file mode 100644 index 000000000..ddab437e0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/webhookaction.go @@ -0,0 +1,182 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectionscheme" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/httpinputmethod" +) + +// WebhookAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L293-L293 +type WebhookAction struct { + Auth *HttpInputAuthentication `json:"auth,omitempty"` + Body *string `json:"body,omitempty"` + ConnectionTimeout Duration `json:"connection_timeout,omitempty"` + Headers map[string]string `json:"headers,omitempty"` + Host *string `json:"host,omitempty"` + Method *httpinputmethod.HttpInputMethod `json:"method,omitempty"` + Params map[string]string `json:"params,omitempty"` + Path *string `json:"path,omitempty"` + Port *uint `json:"port,omitempty"` + Proxy *HttpInputProxy `json:"proxy,omitempty"` + ReadTimeout Duration `json:"read_timeout,omitempty"` + Scheme *connectionscheme.ConnectionScheme `json:"scheme,omitempty"` + Url *string `json:"url,omitempty"` +} + +func (s *WebhookAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "auth": + if err := dec.Decode(&s.Auth); err != nil { + return fmt.Errorf("%s | %w", "Auth", err) + } + + case "body": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Body", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Body = &o + + case "connection_timeout": + if err := dec.Decode(&s.ConnectionTimeout); err != nil { + return fmt.Errorf("%s | %w", "ConnectionTimeout", err) + } + + case "headers": + if s.Headers == nil { + s.Headers = make(map[string]string, 0) + } + if err := dec.Decode(&s.Headers); err != nil { + return fmt.Errorf("%s | %w", "Headers", err) + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return fmt.Errorf("%s | %w", "Host", err) + } + + case "method": + if err := dec.Decode(&s.Method); err != nil { + return fmt.Errorf("%s | %w", "Method", err) + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]string, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return fmt.Errorf("%s | %w", "Params", err) + } + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Path", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = &o + + case "port": + if err := dec.Decode(&s.Port); err != nil { + return fmt.Errorf("%s | %w", "Port", err) + } + + case "proxy": + if err := dec.Decode(&s.Proxy); err != nil { + return fmt.Errorf("%s | %w", "Proxy", err) + } + + case "read_timeout": + if err := dec.Decode(&s.ReadTimeout); err != nil { + return fmt.Errorf("%s | %w", "ReadTimeout", err) + } + + case "scheme": + if err := dec.Decode(&s.Scheme); err != nil { + return fmt.Errorf("%s | %w", "Scheme", err) + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Url", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = &o + + } + } + return nil +} + +// NewWebhookAction returns a WebhookAction. +func NewWebhookAction() *WebhookAction { + r := &WebhookAction{ + Headers: make(map[string]string), + Params: make(map[string]string), + } + + return r +} + +type WebhookActionVariant interface { + WebhookActionCaster() *WebhookAction +} + +func (s *WebhookAction) WebhookActionCaster() *WebhookAction { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/webhookresult.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/webhookresult.go new file mode 100644 index 000000000..5c8e030e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/webhookresult.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// WebhookResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/_types/Actions.ts#L295-L298 +type WebhookResult struct { + Request HttpInputRequestResult `json:"request"` + Response *HttpInputResponseResult `json:"response,omitempty"` +} + +// NewWebhookResult returns a WebhookResult. +func NewWebhookResult() *WebhookResult { + r := &WebhookResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weightedaverageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weightedaverageaggregation.go new file mode 100644 index 000000000..f1c22e0c8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weightedaverageaggregation.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/valuetype" +) + +// WeightedAverageAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L472-L486 +type WeightedAverageAggregation struct { + // Format A numeric response formatter. + Format *string `json:"format,omitempty"` + // Value Configuration for the field that provides the values. + Value *WeightedAverageValue `json:"value,omitempty"` + ValueType *valuetype.ValueType `json:"value_type,omitempty"` + // Weight Configuration for the field or script that provides the weights. + Weight *WeightedAverageValue `json:"weight,omitempty"` +} + +func (s *WeightedAverageAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Format", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + case "value_type": + if err := dec.Decode(&s.ValueType); err != nil { + return fmt.Errorf("%s | %w", "ValueType", err) + } + + case "weight": + if err := dec.Decode(&s.Weight); err != nil { + return fmt.Errorf("%s | %w", "Weight", err) + } + + } + } + return nil +} + +// NewWeightedAverageAggregation returns a WeightedAverageAggregation. +func NewWeightedAverageAggregation() *WeightedAverageAggregation { + r := &WeightedAverageAggregation{} + + return r +} + +type WeightedAverageAggregationVariant interface { + WeightedAverageAggregationCaster() *WeightedAverageAggregation +} + +func (s *WeightedAverageAggregation) WeightedAverageAggregationCaster() *WeightedAverageAggregation { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weightedaveragevalue.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weightedaveragevalue.go new file mode 100644 index 000000000..0cf199410 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weightedaveragevalue.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WeightedAverageValue type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/metric.ts#L488-L498 +type WeightedAverageValue struct { + // Field The field from which to extract the values or weights. + Field *string `json:"field,omitempty"` + // Missing A value or weight to use if the field is missing. + Missing *Float64 `json:"missing,omitempty"` + Script *Script `json:"script,omitempty"` +} + +func (s *WeightedAverageValue) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return fmt.Errorf("%s | %w", "Field", err) + } + + case "missing": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Missing", err) + } + f := Float64(value) + s.Missing = &f + case float64: + f := Float64(v) + s.Missing = &f + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return fmt.Errorf("%s | %w", "Script", err) + } + + } + } + return nil +} + +// NewWeightedAverageValue returns a WeightedAverageValue. +func NewWeightedAverageValue() *WeightedAverageValue { + r := &WeightedAverageValue{} + + return r +} + +type WeightedAverageValueVariant interface { + WeightedAverageValueCaster() *WeightedAverageValue +} + +func (s *WeightedAverageValue) WeightedAverageValueCaster() *WeightedAverageValue { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weightedavgaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weightedavgaggregate.go new file mode 100644 index 000000000..aa43b2b09 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weightedavgaggregate.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WeightedAvgAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/aggregations/Aggregate.ts#L224-L229 +type WeightedAvgAggregate struct { + Meta Metadata `json:"meta,omitempty"` + // Value The metric value. A missing value generally means that there was no data to + // aggregate, + // unless specified otherwise. + Value *Float64 `json:"value,omitempty"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *WeightedAvgAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ValueAsString", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + +// NewWeightedAvgAggregate returns a WeightedAvgAggregate. +func NewWeightedAvgAggregate() *WeightedAvgAggregate { + r := &WeightedAvgAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weightedtokensquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weightedtokensquery.go new file mode 100644 index 000000000..d5b42d591 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weightedtokensquery.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WeightedTokensQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/WeightedTokensQuery.ts#L25-L33 +type WeightedTokensQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // PruningConfig Token pruning configurations + PruningConfig *TokenPruningConfig `json:"pruning_config,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Tokens The tokens representing this query + Tokens []map[string]float32 `json:"tokens"` +} + +func (s *WeightedTokensQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "pruning_config": + if err := dec.Decode(&s.PruningConfig); err != nil { + return fmt.Errorf("%s | %w", "PruningConfig", err) + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "tokens": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]float32, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Tokens", err) + } + s.Tokens = append(s.Tokens, o) + case '[': + o := make([]map[string]float32, 0) + if err := localDec.Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Tokens", err) + } + s.Tokens = o + } + + } + } + return nil +} + +// NewWeightedTokensQuery returns a WeightedTokensQuery. +func NewWeightedTokensQuery() *WeightedTokensQuery { + r := &WeightedTokensQuery{} + + return r +} + +type WeightedTokensQueryVariant interface { + WeightedTokensQueryCaster() *WeightedTokensQuery +} + +func (s *WeightedTokensQuery) WeightedTokensQueryCaster() *WeightedTokensQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weights.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weights.go new file mode 100644 index 000000000..9a7c3d330 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/weights.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// Weights type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/put_trained_model/types.ts#L108-L110 +type Weights struct { + Weights Float64 `json:"weights"` +} + +func (s *Weights) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "weights": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Weights", err) + } + f := Float64(value) + s.Weights = f + case float64: + f := Float64(v) + s.Weights = f + } + + } + } + return nil +} + +// NewWeights returns a Weights. +func NewWeights() *Weights { + r := &Weights{} + + return r +} + +type WeightsVariant interface { + WeightsCaster() *Weights +} + +func (s *Weights) WeightsCaster() *Weights { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/whitespaceanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/whitespaceanalyzer.go new file mode 100644 index 000000000..5ceb7c8c5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/whitespaceanalyzer.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// WhitespaceAnalyzer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/analyzers.ts#L421-L425 +type WhitespaceAnalyzer struct { + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *WhitespaceAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s WhitespaceAnalyzer) MarshalJSON() ([]byte, error) { + type innerWhitespaceAnalyzer WhitespaceAnalyzer + tmp := innerWhitespaceAnalyzer{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "whitespace" + + return json.Marshal(tmp) +} + +// NewWhitespaceAnalyzer returns a WhitespaceAnalyzer. +func NewWhitespaceAnalyzer() *WhitespaceAnalyzer { + r := &WhitespaceAnalyzer{} + + return r +} + +type WhitespaceAnalyzerVariant interface { + WhitespaceAnalyzerCaster() *WhitespaceAnalyzer +} + +func (s *WhitespaceAnalyzer) WhitespaceAnalyzerCaster() *WhitespaceAnalyzer { + return s +} + +func (s *WhitespaceAnalyzer) AnalyzerCaster() *Analyzer { + o := Analyzer(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/whitespacetokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/whitespacetokenizer.go new file mode 100644 index 000000000..9ee4e021a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/whitespacetokenizer.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WhitespaceTokenizer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/tokenizers.ts#L137-L140 +type WhitespaceTokenizer struct { + MaxTokenLength *int `json:"max_token_length,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *WhitespaceTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_token_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxTokenLength", err) + } + s.MaxTokenLength = &value + case float64: + f := int(v) + s.MaxTokenLength = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s WhitespaceTokenizer) MarshalJSON() ([]byte, error) { + type innerWhitespaceTokenizer WhitespaceTokenizer + tmp := innerWhitespaceTokenizer{ + MaxTokenLength: s.MaxTokenLength, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "whitespace" + + return json.Marshal(tmp) +} + +// NewWhitespaceTokenizer returns a WhitespaceTokenizer. +func NewWhitespaceTokenizer() *WhitespaceTokenizer { + r := &WhitespaceTokenizer{} + + return r +} + +type WhitespaceTokenizerVariant interface { + WhitespaceTokenizerCaster() *WhitespaceTokenizer +} + +func (s *WhitespaceTokenizer) WhitespaceTokenizerCaster() *WhitespaceTokenizer { + return s +} + +func (s *WhitespaceTokenizer) TokenizerDefinitionCaster() *TokenizerDefinition { + o := TokenizerDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/wildcardproperty.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/wildcardproperty.go new file mode 100644 index 000000000..aceb1e5bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/wildcardproperty.go @@ -0,0 +1,886 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum" +) + +// WildcardProperty type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/mapping/core.ts#L344-L351 +type WildcardProperty struct { + CopyTo []string `json:"copy_to,omitempty"` + DocValues *bool `json:"doc_values,omitempty"` + Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` + Fields map[string]Property `json:"fields,omitempty"` + IgnoreAbove *int `json:"ignore_above,omitempty"` + // Meta Metadata about the field. + Meta map[string]string `json:"meta,omitempty"` + NullValue *string `json:"null_value,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Store *bool `json:"store,omitempty"` + SyntheticSourceKeep *syntheticsourcekeepenum.SyntheticSourceKeepEnum `json:"synthetic_source_keep,omitempty"` + Type string `json:"type,omitempty"` +} + +func (s *WildcardProperty) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "copy_to": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return fmt.Errorf("%s | %w", "CopyTo", err) + } + } + + case "doc_values": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DocValues", err) + } + s.DocValues = &value + case bool: + s.DocValues = &v + } + + case "dynamic": + if err := dec.Decode(&s.Dynamic); err != nil { + return fmt.Errorf("%s | %w", "Dynamic", err) + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Fields | %w", err) + } + s.Fields[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Fields[key] = oo + } + } + + case "ignore_above": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreAbove", err) + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f + } + + case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } + if err := dec.Decode(&s.Meta); err != nil { + return fmt.Errorf("%s | %w", "Meta", err) + } + + case "null_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "NullValue", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NullValue = &o + + case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } + refs := make(map[string]json.RawMessage, 0) + dec.Decode(&refs) + for key, message := range refs { + kind := make(map[string]any) + buf := bytes.NewReader(message) + localDec := json.NewDecoder(buf) + localDec.Decode(&kind) + buf.Seek(0, io.SeekStart) + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } + switch kind["type"] { + case "binary": + oo := NewBinaryProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "boolean": + oo := NewBooleanProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "{dynamic_type}": + oo := NewDynamicProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "join": + oo := NewJoinProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "keyword": + oo := NewKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "match_only_text": + oo := NewMatchOnlyTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "percolator": + oo := NewPercolatorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_feature": + oo := NewRankFeatureProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_features": + oo := NewRankFeaturesProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "search_as_you_type": + oo := NewSearchAsYouTypeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "text": + oo := NewTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "version": + oo := NewVersionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "wildcard": + oo := NewWildcardProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_nanos": + oo := NewDateNanosProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date": + oo := NewDateProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "aggregate_metric_double": + oo := NewAggregateMetricDoubleProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "dense_vector": + oo := NewDenseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "flattened": + oo := NewFlattenedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "nested": + oo := NewNestedProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "object": + oo := NewObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "passthrough": + oo := NewPassthroughObjectProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "rank_vectors": + oo := NewRankVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "semantic_text": + oo := NewSemanticTextProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "sparse_vector": + oo := NewSparseVectorProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "completion": + oo := NewCompletionProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "constant_keyword": + oo := NewConstantKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "counted_keyword": + oo := NewCountedKeywordProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "alias": + oo := NewFieldAliasProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "histogram": + oo := NewHistogramProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip": + oo := NewIpProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "murmur3": + oo := NewMurmur3HashProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "token_count": + oo := NewTokenCountProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_point": + oo := NewGeoPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "geo_shape": + oo := NewGeoShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "point": + oo := NewPointProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "shape": + oo := NewShapeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "byte": + oo := NewByteNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double": + oo := NewDoubleNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float": + oo := NewFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "half_float": + oo := NewHalfFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer": + oo := NewIntegerNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long": + oo := NewLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "scaled_float": + oo := NewScaledFloatNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "short": + oo := NewShortNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "unsigned_long": + oo := NewUnsignedLongNumberProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "date_range": + oo := NewDateRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "double_range": + oo := NewDoubleRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "float_range": + oo := NewFloatRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "integer_range": + oo := NewIntegerRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "ip_range": + oo := NewIpRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "long_range": + oo := NewLongRangeProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + case "icu_collation_keyword": + oo := NewIcuCollationProperty() + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("Properties | %w", err) + } + s.Properties[key] = oo + default: + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { + return fmt.Errorf("new(Property) | %w", err) + } + s.Properties[key] = oo + } + } + + case "store": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Store", err) + } + s.Store = &value + case bool: + s.Store = &v + } + + case "synthetic_source_keep": + if err := dec.Decode(&s.SyntheticSourceKeep); err != nil { + return fmt.Errorf("%s | %w", "SyntheticSourceKeep", err) + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s WildcardProperty) MarshalJSON() ([]byte, error) { + type innerWildcardProperty WildcardProperty + tmp := innerWildcardProperty{ + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Store: s.Store, + SyntheticSourceKeep: s.SyntheticSourceKeep, + Type: s.Type, + } + + tmp.Type = "wildcard" + + return json.Marshal(tmp) +} + +// NewWildcardProperty returns a WildcardProperty. +func NewWildcardProperty() *WildcardProperty { + r := &WildcardProperty{ + Fields: make(map[string]Property), + Meta: make(map[string]string), + Properties: make(map[string]Property), + } + + return r +} + +type WildcardPropertyVariant interface { + WildcardPropertyCaster() *WildcardProperty +} + +func (s *WildcardProperty) WildcardPropertyCaster() *WildcardProperty { + return s +} + +func (s *WildcardProperty) PropertyCaster() *Property { + o := Property(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/wildcardquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/wildcardquery.go new file mode 100644 index 000000000..cbedbac5f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/wildcardquery.go @@ -0,0 +1,173 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WildcardQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/term.ts#L301-L321 +type WildcardQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // CaseInsensitive Allows case insensitive matching of the pattern with the indexed field values + // when set to true. Default is false which means the case sensitivity of + // matching depends on the underlying field’s mapping. + CaseInsensitive *bool `json:"case_insensitive,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Rewrite Method used to rewrite the query. + Rewrite *string `json:"rewrite,omitempty"` + // Value Wildcard pattern for terms you wish to find in the provided field. Required, + // when wildcard is not set. + Value *string `json:"value,omitempty"` + // Wildcard Wildcard pattern for terms you wish to find in the provided field. Required, + // when value is not set. + Wildcard *string `json:"wildcard,omitempty"` +} + +func (s *WildcardQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + if !bytes.HasPrefix(data, []byte(`"`)) { + data = append([]byte{'"'}, data...) + data = append(data, []byte{'"'}...) + } + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Value) + if err != nil { + return err + } + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "case_insensitive": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CaseInsensitive", err) + } + s.CaseInsensitive = &value + case bool: + s.CaseInsensitive = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "rewrite": + if err := dec.Decode(&s.Rewrite); err != nil { + return fmt.Errorf("%s | %w", "Rewrite", err) + } + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Value", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = &o + + case "wildcard": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Wildcard", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Wildcard = &o + + } + } + return nil +} + +// NewWildcardQuery returns a WildcardQuery. +func NewWildcardQuery() *WildcardQuery { + r := &WildcardQuery{} + + return r +} + +type WildcardQueryVariant interface { + WildcardQueryCaster() *WildcardQuery +} + +func (s *WildcardQuery) WildcardQueryCaster() *WildcardQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/wktgeobounds.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/wktgeobounds.go new file mode 100644 index 000000000..c93067ecf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/wktgeobounds.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WktGeoBounds type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Geo.ts#L150-L152 +type WktGeoBounds struct { + Wkt string `json:"wkt"` +} + +func (s *WktGeoBounds) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "wkt": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Wkt", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Wkt = o + + } + } + return nil +} + +// NewWktGeoBounds returns a WktGeoBounds. +func NewWktGeoBounds() *WktGeoBounds { + r := &WktGeoBounds{} + + return r +} + +type WktGeoBoundsVariant interface { + WktGeoBoundsCaster() *WktGeoBounds +} + +func (s *WktGeoBounds) WktGeoBoundsCaster() *WktGeoBounds { + return s +} + +func (s *WktGeoBounds) GeoBoundsCaster() *GeoBounds { + o := GeoBounds(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/worddelimitergraphtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/worddelimitergraphtokenfilter.go new file mode 100644 index 000000000..1ea473dd7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/worddelimitergraphtokenfilter.go @@ -0,0 +1,346 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WordDelimiterGraphTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L205-L211 +type WordDelimiterGraphTokenFilter struct { + // AdjustOffsets If `true`, the filter adjusts the offsets of split or catenated tokens to + // better reflect their actual position in the token stream. Defaults to `true`. + AdjustOffsets *bool `json:"adjust_offsets,omitempty"` + // CatenateAll If `true`, the filter produces catenated tokens for chains of alphanumeric + // characters separated by non-alphabetic delimiters. Defaults to `false`. + CatenateAll *bool `json:"catenate_all,omitempty"` + // CatenateNumbers If `true`, the filter produces catenated tokens for chains of numeric + // characters separated by non-alphabetic delimiters. Defaults to `false`. + CatenateNumbers *bool `json:"catenate_numbers,omitempty"` + // CatenateWords If `true`, the filter produces catenated tokens for chains of alphabetical + // characters separated by non-alphabetic delimiters. Defaults to `false`. + CatenateWords *bool `json:"catenate_words,omitempty"` + // GenerateNumberParts If `true`, the filter includes tokens consisting of only numeric characters + // in the output. If `false`, the filter excludes these tokens from the output. + // Defaults to `true`. + GenerateNumberParts *bool `json:"generate_number_parts,omitempty"` + // GenerateWordParts If `true`, the filter includes tokens consisting of only alphabetical + // characters in the output. If `false`, the filter excludes these tokens from + // the output. Defaults to `true`. + GenerateWordParts *bool `json:"generate_word_parts,omitempty"` + // IgnoreKeywords If `true`, the filter skips tokens with a keyword attribute of true. Defaults + // to `false`. + IgnoreKeywords *bool `json:"ignore_keywords,omitempty"` + // PreserveOriginal If `true`, the filter includes the original version of any split tokens in + // the output. This original version includes non-alphanumeric delimiters. + // Defaults to `false`. + PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` + // ProtectedWords Array of tokens the filter won’t split. + ProtectedWords []string `json:"protected_words,omitempty"` + // ProtectedWordsPath Path to a file that contains a list of tokens the filter won’t split. + // This path must be absolute or relative to the `config` location, and the file + // must be UTF-8 encoded. Each token in the file must be separated by a line + // break. + ProtectedWordsPath *string `json:"protected_words_path,omitempty"` + // SplitOnCaseChange If `true`, the filter splits tokens at letter case transitions. For example: + // camelCase -> [ camel, Case ]. Defaults to `true`. + SplitOnCaseChange *bool `json:"split_on_case_change,omitempty"` + // SplitOnNumerics If `true`, the filter splits tokens at letter-number transitions. For + // example: j2se -> [ j, 2, se ]. Defaults to `true`. + SplitOnNumerics *bool `json:"split_on_numerics,omitempty"` + // StemEnglishPossessive If `true`, the filter removes the English possessive (`'s`) from the end of + // each token. For example: O'Neil's -> [ O, Neil ]. Defaults to `true`. + StemEnglishPossessive *bool `json:"stem_english_possessive,omitempty"` + Type string `json:"type,omitempty"` + // TypeTable Array of custom type mappings for characters. This allows you to map + // non-alphanumeric characters as numeric or alphanumeric to avoid splitting on + // those characters. + TypeTable []string `json:"type_table,omitempty"` + // TypeTablePath Path to a file that contains custom type mappings for characters. This allows + // you to map non-alphanumeric characters as numeric or alphanumeric to avoid + // splitting on those characters. + TypeTablePath *string `json:"type_table_path,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *WordDelimiterGraphTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adjust_offsets": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "AdjustOffsets", err) + } + s.AdjustOffsets = &value + case bool: + s.AdjustOffsets = &v + } + + case "catenate_all": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CatenateAll", err) + } + s.CatenateAll = &value + case bool: + s.CatenateAll = &v + } + + case "catenate_numbers": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CatenateNumbers", err) + } + s.CatenateNumbers = &value + case bool: + s.CatenateNumbers = &v + } + + case "catenate_words": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CatenateWords", err) + } + s.CatenateWords = &value + case bool: + s.CatenateWords = &v + } + + case "generate_number_parts": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "GenerateNumberParts", err) + } + s.GenerateNumberParts = &value + case bool: + s.GenerateNumberParts = &v + } + + case "generate_word_parts": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "GenerateWordParts", err) + } + s.GenerateWordParts = &value + case bool: + s.GenerateWordParts = &v + } + + case "ignore_keywords": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "IgnoreKeywords", err) + } + s.IgnoreKeywords = &value + case bool: + s.IgnoreKeywords = &v + } + + case "preserve_original": + if err := dec.Decode(&s.PreserveOriginal); err != nil { + return fmt.Errorf("%s | %w", "PreserveOriginal", err) + } + + case "protected_words": + if err := dec.Decode(&s.ProtectedWords); err != nil { + return fmt.Errorf("%s | %w", "ProtectedWords", err) + } + + case "protected_words_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ProtectedWordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProtectedWordsPath = &o + + case "split_on_case_change": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SplitOnCaseChange", err) + } + s.SplitOnCaseChange = &value + case bool: + s.SplitOnCaseChange = &v + } + + case "split_on_numerics": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SplitOnNumerics", err) + } + s.SplitOnNumerics = &value + case bool: + s.SplitOnNumerics = &v + } + + case "stem_english_possessive": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "StemEnglishPossessive", err) + } + s.StemEnglishPossessive = &value + case bool: + s.StemEnglishPossessive = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "type_table": + if err := dec.Decode(&s.TypeTable); err != nil { + return fmt.Errorf("%s | %w", "TypeTable", err) + } + + case "type_table_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TypeTablePath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TypeTablePath = &o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s WordDelimiterGraphTokenFilter) MarshalJSON() ([]byte, error) { + type innerWordDelimiterGraphTokenFilter WordDelimiterGraphTokenFilter + tmp := innerWordDelimiterGraphTokenFilter{ + AdjustOffsets: s.AdjustOffsets, + CatenateAll: s.CatenateAll, + CatenateNumbers: s.CatenateNumbers, + CatenateWords: s.CatenateWords, + GenerateNumberParts: s.GenerateNumberParts, + GenerateWordParts: s.GenerateWordParts, + IgnoreKeywords: s.IgnoreKeywords, + PreserveOriginal: s.PreserveOriginal, + ProtectedWords: s.ProtectedWords, + ProtectedWordsPath: s.ProtectedWordsPath, + SplitOnCaseChange: s.SplitOnCaseChange, + SplitOnNumerics: s.SplitOnNumerics, + StemEnglishPossessive: s.StemEnglishPossessive, + Type: s.Type, + TypeTable: s.TypeTable, + TypeTablePath: s.TypeTablePath, + Version: s.Version, + } + + tmp.Type = "word_delimiter_graph" + + return json.Marshal(tmp) +} + +// NewWordDelimiterGraphTokenFilter returns a WordDelimiterGraphTokenFilter. +func NewWordDelimiterGraphTokenFilter() *WordDelimiterGraphTokenFilter { + r := &WordDelimiterGraphTokenFilter{} + + return r +} + +type WordDelimiterGraphTokenFilterVariant interface { + WordDelimiterGraphTokenFilterCaster() *WordDelimiterGraphTokenFilter +} + +func (s *WordDelimiterGraphTokenFilter) WordDelimiterGraphTokenFilterCaster() *WordDelimiterGraphTokenFilter { + return s +} + +func (s *WordDelimiterGraphTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/worddelimitertokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/worddelimitertokenfilter.go new file mode 100644 index 000000000..0e91a0d95 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/worddelimitertokenfilter.go @@ -0,0 +1,310 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WordDelimiterTokenFilter type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/analysis/token_filters.ts#L201-L203 +type WordDelimiterTokenFilter struct { + // CatenateAll If `true`, the filter produces catenated tokens for chains of alphanumeric + // characters separated by non-alphabetic delimiters. Defaults to `false`. + CatenateAll *bool `json:"catenate_all,omitempty"` + // CatenateNumbers If `true`, the filter produces catenated tokens for chains of numeric + // characters separated by non-alphabetic delimiters. Defaults to `false`. + CatenateNumbers *bool `json:"catenate_numbers,omitempty"` + // CatenateWords If `true`, the filter produces catenated tokens for chains of alphabetical + // characters separated by non-alphabetic delimiters. Defaults to `false`. + CatenateWords *bool `json:"catenate_words,omitempty"` + // GenerateNumberParts If `true`, the filter includes tokens consisting of only numeric characters + // in the output. If `false`, the filter excludes these tokens from the output. + // Defaults to `true`. + GenerateNumberParts *bool `json:"generate_number_parts,omitempty"` + // GenerateWordParts If `true`, the filter includes tokens consisting of only alphabetical + // characters in the output. If `false`, the filter excludes these tokens from + // the output. Defaults to `true`. + GenerateWordParts *bool `json:"generate_word_parts,omitempty"` + // PreserveOriginal If `true`, the filter includes the original version of any split tokens in + // the output. This original version includes non-alphanumeric delimiters. + // Defaults to `false`. + PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` + // ProtectedWords Array of tokens the filter won’t split. + ProtectedWords []string `json:"protected_words,omitempty"` + // ProtectedWordsPath Path to a file that contains a list of tokens the filter won’t split. + // This path must be absolute or relative to the `config` location, and the file + // must be UTF-8 encoded. Each token in the file must be separated by a line + // break. + ProtectedWordsPath *string `json:"protected_words_path,omitempty"` + // SplitOnCaseChange If `true`, the filter splits tokens at letter case transitions. For example: + // camelCase -> [ camel, Case ]. Defaults to `true`. + SplitOnCaseChange *bool `json:"split_on_case_change,omitempty"` + // SplitOnNumerics If `true`, the filter splits tokens at letter-number transitions. For + // example: j2se -> [ j, 2, se ]. Defaults to `true`. + SplitOnNumerics *bool `json:"split_on_numerics,omitempty"` + // StemEnglishPossessive If `true`, the filter removes the English possessive (`'s`) from the end of + // each token. For example: O'Neil's -> [ O, Neil ]. Defaults to `true`. + StemEnglishPossessive *bool `json:"stem_english_possessive,omitempty"` + Type string `json:"type,omitempty"` + // TypeTable Array of custom type mappings for characters. This allows you to map + // non-alphanumeric characters as numeric or alphanumeric to avoid splitting on + // those characters. + TypeTable []string `json:"type_table,omitempty"` + // TypeTablePath Path to a file that contains custom type mappings for characters. This allows + // you to map non-alphanumeric characters as numeric or alphanumeric to avoid + // splitting on those characters. + TypeTablePath *string `json:"type_table_path,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *WordDelimiterTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "catenate_all": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CatenateAll", err) + } + s.CatenateAll = &value + case bool: + s.CatenateAll = &v + } + + case "catenate_numbers": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CatenateNumbers", err) + } + s.CatenateNumbers = &value + case bool: + s.CatenateNumbers = &v + } + + case "catenate_words": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "CatenateWords", err) + } + s.CatenateWords = &value + case bool: + s.CatenateWords = &v + } + + case "generate_number_parts": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "GenerateNumberParts", err) + } + s.GenerateNumberParts = &value + case bool: + s.GenerateNumberParts = &v + } + + case "generate_word_parts": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "GenerateWordParts", err) + } + s.GenerateWordParts = &value + case bool: + s.GenerateWordParts = &v + } + + case "preserve_original": + if err := dec.Decode(&s.PreserveOriginal); err != nil { + return fmt.Errorf("%s | %w", "PreserveOriginal", err) + } + + case "protected_words": + if err := dec.Decode(&s.ProtectedWords); err != nil { + return fmt.Errorf("%s | %w", "ProtectedWords", err) + } + + case "protected_words_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ProtectedWordsPath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProtectedWordsPath = &o + + case "split_on_case_change": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SplitOnCaseChange", err) + } + s.SplitOnCaseChange = &value + case bool: + s.SplitOnCaseChange = &v + } + + case "split_on_numerics": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "SplitOnNumerics", err) + } + s.SplitOnNumerics = &value + case bool: + s.SplitOnNumerics = &v + } + + case "stem_english_possessive": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "StemEnglishPossessive", err) + } + s.StemEnglishPossessive = &value + case bool: + s.StemEnglishPossessive = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return fmt.Errorf("%s | %w", "Type", err) + } + + case "type_table": + if err := dec.Decode(&s.TypeTable); err != nil { + return fmt.Errorf("%s | %w", "TypeTable", err) + } + + case "type_table_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "TypeTablePath", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TypeTablePath = &o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return fmt.Errorf("%s | %w", "Version", err) + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s WordDelimiterTokenFilter) MarshalJSON() ([]byte, error) { + type innerWordDelimiterTokenFilter WordDelimiterTokenFilter + tmp := innerWordDelimiterTokenFilter{ + CatenateAll: s.CatenateAll, + CatenateNumbers: s.CatenateNumbers, + CatenateWords: s.CatenateWords, + GenerateNumberParts: s.GenerateNumberParts, + GenerateWordParts: s.GenerateWordParts, + PreserveOriginal: s.PreserveOriginal, + ProtectedWords: s.ProtectedWords, + ProtectedWordsPath: s.ProtectedWordsPath, + SplitOnCaseChange: s.SplitOnCaseChange, + SplitOnNumerics: s.SplitOnNumerics, + StemEnglishPossessive: s.StemEnglishPossessive, + Type: s.Type, + TypeTable: s.TypeTable, + TypeTablePath: s.TypeTablePath, + Version: s.Version, + } + + tmp.Type = "word_delimiter" + + return json.Marshal(tmp) +} + +// NewWordDelimiterTokenFilter returns a WordDelimiterTokenFilter. +func NewWordDelimiterTokenFilter() *WordDelimiterTokenFilter { + r := &WordDelimiterTokenFilter{} + + return r +} + +type WordDelimiterTokenFilterVariant interface { + WordDelimiterTokenFilterCaster() *WordDelimiterTokenFilter +} + +func (s *WordDelimiterTokenFilter) WordDelimiterTokenFilterCaster() *WordDelimiterTokenFilter { + return s +} + +func (s *WordDelimiterTokenFilter) TokenFilterDefinitionCaster() *TokenFilterDefinition { + o := TokenFilterDefinition(s) + return &o +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/wrapperquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/wrapperquery.go new file mode 100644 index 000000000..09f340640 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/wrapperquery.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WrapperQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/query_dsl/abstractions.ts#L508-L517 +type WrapperQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Query A base64 encoded query. + // The binary data format can be any of JSON, YAML, CBOR or SMILE encodings + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *WrapperQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return fmt.Errorf("%s | %w", "Boost", err) + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "QueryName_", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewWrapperQuery returns a WrapperQuery. +func NewWrapperQuery() *WrapperQuery { + r := &WrapperQuery{} + + return r +} + +type WrapperQueryVariant interface { + WrapperQueryCaster() *WrapperQuery +} + +func (s *WrapperQuery) WrapperQueryCaster() *WrapperQuery { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/writeresponsebase.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/writeresponsebase.go new file mode 100644 index 000000000..8fe06acb6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/writeresponsebase.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result" +) + +// WriteResponseBase type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/_types/Base.ts#L36-L67 +type WriteResponseBase struct { + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + // Id_ The unique identifier for the added document. + Id_ string `json:"_id"` + // Index_ The name of the index the document was added to. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the indexing operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result The result of the indexing operation: `created` or `updated`. + Result result.Result `json:"result"` + // SeqNo_ The sequence number assigned to the document for the indexing operation. + // Sequence numbers are used to ensure an older version of a document doesn't + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Information about the replication process of the operation. + Shards_ ShardStatistics `json:"_shards"` + // Version_ The document version, which is incremented each time the document is updated. + Version_ int64 `json:"_version"` +} + +func (s *WriteResponseBase) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "forced_refresh": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "ForcedRefresh", err) + } + s.ForcedRefresh = &value + case bool: + s.ForcedRefresh = &v + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return fmt.Errorf("%s | %w", "Id_", err) + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return fmt.Errorf("%s | %w", "Index_", err) + } + + case "_primary_term": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "PrimaryTerm_", err) + } + s.PrimaryTerm_ = &value + case float64: + f := int64(v) + s.PrimaryTerm_ = &f + } + + case "result": + if err := dec.Decode(&s.Result); err != nil { + return fmt.Errorf("%s | %w", "Result", err) + } + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return fmt.Errorf("%s | %w", "SeqNo_", err) + } + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return fmt.Errorf("%s | %w", "Shards_", err) + } + + case "_version": + if err := dec.Decode(&s.Version_); err != nil { + return fmt.Errorf("%s | %w", "Version_", err) + } + + } + } + return nil +} + +// NewWriteResponseBase returns a WriteResponseBase. +func NewWriteResponseBase() *WriteResponseBase { + r := &WriteResponseBase{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/writesummaryinfo.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/writesummaryinfo.go new file mode 100644 index 000000000..e674f70c4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/writesummaryinfo.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// WriteSummaryInfo type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/snapshot/repository_analyze/SnapshotAnalyzeRepositoryResponse.ts#L162-L191 +type WriteSummaryInfo struct { + // Count The number of write operations performed in the test. + Count int `json:"count"` + // TotalElapsed The total elapsed time spent on writing blobs in the test. + TotalElapsed Duration `json:"total_elapsed"` + // TotalElapsedNanos The total elapsed time spent on writing blobs in the test, in nanoseconds. + TotalElapsedNanos int64 `json:"total_elapsed_nanos"` + // TotalSize The total size of all the blobs written in the test. + TotalSize ByteSize `json:"total_size"` + // TotalSizeBytes The total size of all the blobs written in the test, in bytes. + TotalSizeBytes int64 `json:"total_size_bytes"` + // TotalThrottled The total time spent waiting due to the `max_snapshot_bytes_per_sec` + // throttle. + TotalThrottled Duration `json:"total_throttled"` + // TotalThrottledNanos The total time spent waiting due to the `max_snapshot_bytes_per_sec` + // throttle, in nanoseconds. + TotalThrottledNanos int64 `json:"total_throttled_nanos"` +} + +func (s *WriteSummaryInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "total_elapsed": + if err := dec.Decode(&s.TotalElapsed); err != nil { + return fmt.Errorf("%s | %w", "TotalElapsed", err) + } + + case "total_elapsed_nanos": + if err := dec.Decode(&s.TotalElapsedNanos); err != nil { + return fmt.Errorf("%s | %w", "TotalElapsedNanos", err) + } + + case "total_size": + if err := dec.Decode(&s.TotalSize); err != nil { + return fmt.Errorf("%s | %w", "TotalSize", err) + } + + case "total_size_bytes": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalSizeBytes", err) + } + s.TotalSizeBytes = value + case float64: + f := int64(v) + s.TotalSizeBytes = f + } + + case "total_throttled": + if err := dec.Decode(&s.TotalThrottled); err != nil { + return fmt.Errorf("%s | %w", "TotalThrottled", err) + } + + case "total_throttled_nanos": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "TotalThrottledNanos", err) + } + s.TotalThrottledNanos = value + case float64: + f := int64(v) + s.TotalThrottledNanos = f + } + + } + } + return nil +} + +// NewWriteSummaryInfo returns a WriteSummaryInfo. +func NewWriteSummaryInfo() *WriteSummaryInfo { + r := &WriteSummaryInfo{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xlmrobertatokenizationconfig.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xlmrobertatokenizationconfig.go new file mode 100644 index 000000000..2c4092a2b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xlmrobertatokenizationconfig.go @@ -0,0 +1,150 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tokenizationtruncate" +) + +// XlmRobertaTokenizationConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L200-L200 +type XlmRobertaTokenizationConfig struct { + // DoLowerCase Should the tokenizer lower case the text + DoLowerCase *bool `json:"do_lower_case,omitempty"` + // MaxSequenceLength Maximum input sequence length for the model + MaxSequenceLength *int `json:"max_sequence_length,omitempty"` + // Span Tokenization spanning options. Special value of -1 indicates no spanning + // takes place + Span *int `json:"span,omitempty"` + // Truncate Should tokenization input be automatically truncated before sending to the + // model for inference + Truncate *tokenizationtruncate.TokenizationTruncate `json:"truncate,omitempty"` + // WithSpecialTokens Is tokenization completed with special tokens + WithSpecialTokens *bool `json:"with_special_tokens,omitempty"` +} + +func (s *XlmRobertaTokenizationConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "do_lower_case": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "DoLowerCase", err) + } + s.DoLowerCase = &value + case bool: + s.DoLowerCase = &v + } + + case "max_sequence_length": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "MaxSequenceLength", err) + } + s.MaxSequenceLength = &value + case float64: + f := int(v) + s.MaxSequenceLength = &f + } + + case "span": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Span", err) + } + s.Span = &value + case float64: + f := int(v) + s.Span = &f + } + + case "truncate": + if err := dec.Decode(&s.Truncate); err != nil { + return fmt.Errorf("%s | %w", "Truncate", err) + } + + case "with_special_tokens": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "WithSpecialTokens", err) + } + s.WithSpecialTokens = &value + case bool: + s.WithSpecialTokens = &v + } + + } + } + return nil +} + +// NewXlmRobertaTokenizationConfig returns a XlmRobertaTokenizationConfig. +func NewXlmRobertaTokenizationConfig() *XlmRobertaTokenizationConfig { + r := &XlmRobertaTokenizationConfig{} + + return r +} + +type XlmRobertaTokenizationConfigVariant interface { + XlmRobertaTokenizationConfigCaster() *XlmRobertaTokenizationConfig +} + +func (s *XlmRobertaTokenizationConfig) XlmRobertaTokenizationConfigCaster() *XlmRobertaTokenizationConfig { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackdatafeed.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackdatafeed.go new file mode 100644 index 000000000..7343c9b1a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackdatafeed.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// XpackDatafeed type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L74-L76 +type XpackDatafeed struct { + Count int64 `json:"count"` +} + +func (s *XpackDatafeed) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + } + } + return nil +} + +// NewXpackDatafeed returns a XpackDatafeed. +func NewXpackDatafeed() *XpackDatafeed { + r := &XpackDatafeed{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackfeature.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackfeature.go new file mode 100644 index 000000000..bf959f965 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackfeature.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// XpackFeature type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/info/types.ts#L84-L89 +type XpackFeature struct { + Available bool `json:"available"` + Description *string `json:"description,omitempty"` + Enabled bool `json:"enabled"` + NativeCodeInfo *NativeCodeInformation `json:"native_code_info,omitempty"` +} + +func (s *XpackFeature) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "Description", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "native_code_info": + if err := dec.Decode(&s.NativeCodeInfo); err != nil { + return fmt.Errorf("%s | %w", "NativeCodeInfo", err) + } + + } + } + return nil +} + +// NewXpackFeature returns a XpackFeature. +func NewXpackFeature() *XpackFeature { + r := &XpackFeature{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackfeatures.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackfeatures.go new file mode 100644 index 000000000..63addcee2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackfeatures.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +// XpackFeatures type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/info/types.ts#L42-L82 +type XpackFeatures struct { + AggregateMetric XpackFeature `json:"aggregate_metric"` + Analytics XpackFeature `json:"analytics"` + Archive XpackFeature `json:"archive"` + Ccr XpackFeature `json:"ccr"` + DataStreams XpackFeature `json:"data_streams"` + DataTiers XpackFeature `json:"data_tiers"` + Enrich XpackFeature `json:"enrich"` + EnterpriseSearch XpackFeature `json:"enterprise_search"` + Eql XpackFeature `json:"eql"` + Esql XpackFeature `json:"esql"` + Graph XpackFeature `json:"graph"` + Ilm XpackFeature `json:"ilm"` + Logsdb XpackFeature `json:"logsdb"` + Logstash XpackFeature `json:"logstash"` + Ml XpackFeature `json:"ml"` + Monitoring XpackFeature `json:"monitoring"` + Rollup XpackFeature `json:"rollup"` + RuntimeFields *XpackFeature `json:"runtime_fields,omitempty"` + SearchableSnapshots XpackFeature `json:"searchable_snapshots"` + Security XpackFeature `json:"security"` + Slm XpackFeature `json:"slm"` + Spatial XpackFeature `json:"spatial"` + Sql XpackFeature `json:"sql"` + Transform XpackFeature `json:"transform"` + UniversalProfiling XpackFeature `json:"universal_profiling"` + VotingOnly XpackFeature `json:"voting_only"` + Watcher XpackFeature `json:"watcher"` +} + +// NewXpackFeatures returns a XpackFeatures. +func NewXpackFeatures() *XpackFeatures { + r := &XpackFeatures{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackquery.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackquery.go new file mode 100644 index 000000000..6ec01335d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackquery.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// XpackQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L269-L274 +type XpackQuery struct { + Count *int `json:"count,omitempty"` + Failed *int `json:"failed,omitempty"` + Paging *int `json:"paging,omitempty"` + Total *int `json:"total,omitempty"` +} + +func (s *XpackQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Count", err) + } + s.Count = &value + case float64: + f := int(v) + s.Count = &f + } + + case "failed": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Failed", err) + } + s.Failed = &value + case float64: + f := int(v) + s.Failed = &f + } + + case "paging": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Paging", err) + } + s.Paging = &value + case float64: + f := int(v) + s.Paging = &f + } + + case "total": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Total", err) + } + s.Total = &value + case float64: + f := int(v) + s.Total = &f + } + + } + } + return nil +} + +// NewXpackQuery returns a XpackQuery. +func NewXpackQuery() *XpackQuery { + r := &XpackQuery{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackrealm.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackrealm.go new file mode 100644 index 000000000..fa878e7f5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackrealm.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// XpackRealm type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L423-L432 +type XpackRealm struct { + Available bool `json:"available"` + Cache []RealmCache `json:"cache,omitempty"` + Enabled bool `json:"enabled"` + HasAuthorizationRealms []bool `json:"has_authorization_realms,omitempty"` + HasDefaultUsernamePattern []bool `json:"has_default_username_pattern,omitempty"` + HasTruststore []bool `json:"has_truststore,omitempty"` + IsAuthenticationDelegated []bool `json:"is_authentication_delegated,omitempty"` + Name []string `json:"name,omitempty"` + Order []int64 `json:"order,omitempty"` + Size []int64 `json:"size,omitempty"` +} + +func (s *XpackRealm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "cache": + if err := dec.Decode(&s.Cache); err != nil { + return fmt.Errorf("%s | %w", "Cache", err) + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "has_authorization_realms": + if err := dec.Decode(&s.HasAuthorizationRealms); err != nil { + return fmt.Errorf("%s | %w", "HasAuthorizationRealms", err) + } + + case "has_default_username_pattern": + if err := dec.Decode(&s.HasDefaultUsernamePattern); err != nil { + return fmt.Errorf("%s | %w", "HasDefaultUsernamePattern", err) + } + + case "has_truststore": + if err := dec.Decode(&s.HasTruststore); err != nil { + return fmt.Errorf("%s | %w", "HasTruststore", err) + } + + case "is_authentication_delegated": + if err := dec.Decode(&s.IsAuthenticationDelegated); err != nil { + return fmt.Errorf("%s | %w", "IsAuthenticationDelegated", err) + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return fmt.Errorf("%s | %w", "Name", err) + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return fmt.Errorf("%s | %w", "Order", err) + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + + } + } + return nil +} + +// NewXpackRealm returns a XpackRealm. +func NewXpackRealm() *XpackRealm { + r := &XpackRealm{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackrolemapping.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackrolemapping.go new file mode 100644 index 000000000..f377172e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackrolemapping.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// XpackRoleMapping type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L280-L283 +type XpackRoleMapping struct { + Enabled int `json:"enabled"` + Size int `json:"size"` +} + +func (s *XpackRoleMapping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case float64: + f := int(v) + s.Enabled = f + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = value + case float64: + f := int(v) + s.Size = f + } + + } + } + return nil +} + +// NewXpackRoleMapping returns a XpackRoleMapping. +func NewXpackRoleMapping() *XpackRoleMapping { + r := &XpackRoleMapping{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackruntimefieldtypes.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackruntimefieldtypes.go new file mode 100644 index 000000000..abeeed80c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/xpackruntimefieldtypes.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// XpackRuntimeFieldTypes type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/types.ts#L285-L287 +type XpackRuntimeFieldTypes struct { + Available bool `json:"available"` + Enabled bool `json:"enabled"` + FieldTypes []RuntimeFieldsType `json:"field_types"` +} + +func (s *XpackRuntimeFieldTypes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Available", err) + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "Enabled", err) + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "field_types": + if err := dec.Decode(&s.FieldTypes); err != nil { + return fmt.Errorf("%s | %w", "FieldTypes", err) + } + + } + } + return nil +} + +// NewXpackRuntimeFieldTypes returns a XpackRuntimeFieldTypes. +func NewXpackRuntimeFieldTypes() *XpackRuntimeFieldTypes { + r := &XpackRuntimeFieldTypes{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/zeroshotclassificationinferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/zeroshotclassificationinferenceoptions.go new file mode 100644 index 000000000..6ca3e3d81 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/zeroshotclassificationinferenceoptions.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ZeroShotClassificationInferenceOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L216-L237 +type ZeroShotClassificationInferenceOptions struct { + // ClassificationLabels The zero shot classification labels indicating entailment, neutral, and + // contradiction + // Must contain exactly and only entailment, neutral, and contradiction + ClassificationLabels []string `json:"classification_labels"` + // HypothesisTemplate Hypothesis template used when tokenizing labels for prediction + HypothesisTemplate *string `json:"hypothesis_template,omitempty"` + // Labels The labels to predict. + Labels []string `json:"labels,omitempty"` + // MultiLabel Indicates if more than one true label exists. + MultiLabel *bool `json:"multi_label,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options to update when inferring + Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` +} + +func (s *ZeroShotClassificationInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classification_labels": + if err := dec.Decode(&s.ClassificationLabels); err != nil { + return fmt.Errorf("%s | %w", "ClassificationLabels", err) + } + + case "hypothesis_template": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "HypothesisTemplate", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HypothesisTemplate = &o + + case "labels": + if err := dec.Decode(&s.Labels); err != nil { + return fmt.Errorf("%s | %w", "Labels", err) + } + + case "multi_label": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MultiLabel", err) + } + s.MultiLabel = &value + case bool: + s.MultiLabel = &v + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + } + } + return nil +} + +// NewZeroShotClassificationInferenceOptions returns a ZeroShotClassificationInferenceOptions. +func NewZeroShotClassificationInferenceOptions() *ZeroShotClassificationInferenceOptions { + r := &ZeroShotClassificationInferenceOptions{} + + return r +} + +type ZeroShotClassificationInferenceOptionsVariant interface { + ZeroShotClassificationInferenceOptionsCaster() *ZeroShotClassificationInferenceOptions +} + +func (s *ZeroShotClassificationInferenceOptions) ZeroShotClassificationInferenceOptionsCaster() *ZeroShotClassificationInferenceOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/zeroshotclassificationinferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/zeroshotclassificationinferenceupdateoptions.go new file mode 100644 index 000000000..d9d934dc9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/types/zeroshotclassificationinferenceupdateoptions.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// ZeroShotClassificationInferenceUpdateOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/ml/_types/inference.ts#L393-L402 +type ZeroShotClassificationInferenceUpdateOptions struct { + // Labels The labels to predict. + Labels []string `json:"labels"` + // MultiLabel Update the configured multi label option. Indicates if more than one true + // label exists. Defaults to the configured value. + MultiLabel *bool `json:"multi_label,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options to update when inferring + Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` +} + +func (s *ZeroShotClassificationInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "labels": + if err := dec.Decode(&s.Labels); err != nil { + return fmt.Errorf("%s | %w", "Labels", err) + } + + case "multi_label": + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("%s | %w", "MultiLabel", err) + } + s.MultiLabel = &value + case bool: + s.MultiLabel = &v + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return fmt.Errorf("%s | %w", "ResultsField", err) + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return fmt.Errorf("%s | %w", "Tokenization", err) + } + + } + } + return nil +} + +// NewZeroShotClassificationInferenceUpdateOptions returns a ZeroShotClassificationInferenceUpdateOptions. +func NewZeroShotClassificationInferenceUpdateOptions() *ZeroShotClassificationInferenceUpdateOptions { + r := &ZeroShotClassificationInferenceUpdateOptions{} + + return r +} + +type ZeroShotClassificationInferenceUpdateOptionsVariant interface { + ZeroShotClassificationInferenceUpdateOptionsCaster() *ZeroShotClassificationInferenceUpdateOptions +} + +func (s *ZeroShotClassificationInferenceUpdateOptions) ZeroShotClassificationInferenceUpdateOptionsCaster() *ZeroShotClassificationInferenceUpdateOptions { + return s +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/ackwatch/ack_watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/ackwatch/ack_watch.go new file mode 100644 index 000000000..ba88dab78 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/ackwatch/ack_watch.go @@ -0,0 +1,414 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Acknowledge a watch. +// Acknowledging a watch enables you to manually throttle the execution of the +// watch's actions. +// +// The acknowledgement state of an action is stored in the +// `status.actions..ack.state` structure. +// +// IMPORTANT: If the specified watch is currently being executed, this API will +// return an error +// The reason for this behavior is to prevent overwriting the watch status from +// a watch execution. +// +// Acknowledging an action throttles further executions of that action until its +// `ack.state` is reset to `awaits_successful_execution`. +// This happens when the condition of the watch is not met (the condition +// evaluates to false). +// To demonstrate how throttling works in practice and how it can be configured +// for individual actions within a watch, refer to External documentation. +package ackwatch + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + watchidMask = iota + 1 + + actionidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type AckWatch struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + watchid string + actionid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewAckWatch type alias for index. +type NewAckWatch func(watchid string) *AckWatch + +// NewAckWatchFunc returns a new instance of AckWatch with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewAckWatchFunc(tp elastictransport.Interface) NewAckWatch { + return func(watchid string) *AckWatch { + n := New(tp) + + n._watchid(watchid) + + return n + } +} + +// Acknowledge a watch. +// Acknowledging a watch enables you to manually throttle the execution of the +// watch's actions. +// +// The acknowledgement state of an action is stored in the +// `status.actions..ack.state` structure. +// +// IMPORTANT: If the specified watch is currently being executed, this API will +// return an error +// The reason for this behavior is to prevent overwriting the watch status from +// a watch execution. +// +// Acknowledging an action throttles further executions of that action until its +// `ack.state` is reset to `awaits_successful_execution`. +// This happens when the condition of the watch is not met (the condition +// evaluates to false). +// To demonstrate how throttling works in practice and how it can be configured +// for individual actions within a watch, refer to External documentation. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch +func New(tp elastictransport.Interface) *AckWatch { + r := &AckWatch{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *AckWatch) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == watchidMask: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "watchid", r.watchid) + } + path.WriteString(r.watchid) + path.WriteString("/") + path.WriteString("_ack") + + method = http.MethodPut + case r.paramSet == watchidMask|actionidMask: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "watchid", r.watchid) + } + path.WriteString(r.watchid) + path.WriteString("/") + path.WriteString("_ack") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "actionid", r.actionid) + } + path.WriteString(r.actionid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r AckWatch) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "watcher.ack_watch") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.ack_watch") + if reader := instrument.RecordRequestBody(ctx, "watcher.ack_watch", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.ack_watch") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the AckWatch query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a ackwatch.Response +func (r AckWatch) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.ack_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r AckWatch) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.ack_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the AckWatch query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the AckWatch headers map. +func (r *AckWatch) Header(key, value string) *AckWatch { + r.headers.Set(key, value) + + return r +} + +// WatchId The watch identifier. +// API Name: watchid +func (r *AckWatch) _watchid(watchid string) *AckWatch { + r.paramSet |= watchidMask + r.watchid = watchid + + return r +} + +// ActionId A comma-separated list of the action identifiers to acknowledge. +// If you omit this parameter, all of the actions of the watch are acknowledged. +// API Name: actionid +func (r *AckWatch) ActionId(actionid string) *AckWatch { + r.paramSet |= actionidMask + r.actionid = actionid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *AckWatch) ErrorTrace(errortrace bool) *AckWatch { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *AckWatch) FilterPath(filterpaths ...string) *AckWatch { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *AckWatch) Human(human bool) *AckWatch { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *AckWatch) Pretty(pretty bool) *AckWatch { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/ackwatch/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/ackwatch/response.go new file mode 100644 index 000000000..a15e981d2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/ackwatch/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package ackwatch + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package ackwatch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/ack_watch/WatcherAckWatchResponse.ts#L22-L24 +type Response struct { + Status types.WatchStatus `json:"status"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/activatewatch/activate_watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/activatewatch/activate_watch.go new file mode 100644 index 000000000..29e0a4bfd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/activatewatch/activate_watch.go @@ -0,0 +1,348 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Activate a watch. +// A watch can be either active or inactive. +package activatewatch + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + watchidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ActivateWatch struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + watchid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewActivateWatch type alias for index. +type NewActivateWatch func(watchid string) *ActivateWatch + +// NewActivateWatchFunc returns a new instance of ActivateWatch with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewActivateWatchFunc(tp elastictransport.Interface) NewActivateWatch { + return func(watchid string) *ActivateWatch { + n := New(tp) + + n._watchid(watchid) + + return n + } +} + +// Activate a watch. +// A watch can be either active or inactive. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch +func New(tp elastictransport.Interface) *ActivateWatch { + r := &ActivateWatch{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ActivateWatch) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == watchidMask: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "watchid", r.watchid) + } + path.WriteString(r.watchid) + path.WriteString("/") + path.WriteString("_activate") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ActivateWatch) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "watcher.activate_watch") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.activate_watch") + if reader := instrument.RecordRequestBody(ctx, "watcher.activate_watch", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.activate_watch") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ActivateWatch query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a activatewatch.Response +func (r ActivateWatch) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.activate_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ActivateWatch) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.activate_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the ActivateWatch query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the ActivateWatch headers map. +func (r *ActivateWatch) Header(key, value string) *ActivateWatch { + r.headers.Set(key, value) + + return r +} + +// WatchId The watch identifier. +// API Name: watchid +func (r *ActivateWatch) _watchid(watchid string) *ActivateWatch { + r.paramSet |= watchidMask + r.watchid = watchid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ActivateWatch) ErrorTrace(errortrace bool) *ActivateWatch { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ActivateWatch) FilterPath(filterpaths ...string) *ActivateWatch { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ActivateWatch) Human(human bool) *ActivateWatch { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ActivateWatch) Pretty(pretty bool) *ActivateWatch { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/activatewatch/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/activatewatch/response.go new file mode 100644 index 000000000..0807f1041 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/activatewatch/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package activatewatch + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package activatewatch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/activate_watch/WatcherActivateWatchResponse.ts#L22-L24 +type Response struct { + Status types.ActivationStatus `json:"status"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deactivatewatch/deactivate_watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deactivatewatch/deactivate_watch.go new file mode 100644 index 000000000..dcb9030da --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deactivatewatch/deactivate_watch.go @@ -0,0 +1,348 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Deactivate a watch. +// A watch can be either active or inactive. +package deactivatewatch + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + watchidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeactivateWatch struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + watchid string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeactivateWatch type alias for index. +type NewDeactivateWatch func(watchid string) *DeactivateWatch + +// NewDeactivateWatchFunc returns a new instance of DeactivateWatch with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeactivateWatchFunc(tp elastictransport.Interface) NewDeactivateWatch { + return func(watchid string) *DeactivateWatch { + n := New(tp) + + n._watchid(watchid) + + return n + } +} + +// Deactivate a watch. +// A watch can be either active or inactive. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch +func New(tp elastictransport.Interface) *DeactivateWatch { + r := &DeactivateWatch{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeactivateWatch) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == watchidMask: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "watchid", r.watchid) + } + path.WriteString(r.watchid) + path.WriteString("/") + path.WriteString("_deactivate") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeactivateWatch) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "watcher.deactivate_watch") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.deactivate_watch") + if reader := instrument.RecordRequestBody(ctx, "watcher.deactivate_watch", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.deactivate_watch") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeactivateWatch query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deactivatewatch.Response +func (r DeactivateWatch) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.deactivate_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeactivateWatch) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.deactivate_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeactivateWatch query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeactivateWatch headers map. +func (r *DeactivateWatch) Header(key, value string) *DeactivateWatch { + r.headers.Set(key, value) + + return r +} + +// WatchId The watch identifier. +// API Name: watchid +func (r *DeactivateWatch) _watchid(watchid string) *DeactivateWatch { + r.paramSet |= watchidMask + r.watchid = watchid + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeactivateWatch) ErrorTrace(errortrace bool) *DeactivateWatch { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeactivateWatch) FilterPath(filterpaths ...string) *DeactivateWatch { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeactivateWatch) Human(human bool) *DeactivateWatch { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeactivateWatch) Pretty(pretty bool) *DeactivateWatch { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deactivatewatch/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deactivatewatch/response.go new file mode 100644 index 000000000..16b2efd3c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deactivatewatch/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deactivatewatch + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package deactivatewatch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/deactivate_watch/DeactivateWatchResponse.ts#L22-L24 +type Response struct { + Status types.ActivationStatus `json:"status"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deletewatch/delete_watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deletewatch/delete_watch.go new file mode 100644 index 000000000..e7577d353 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deletewatch/delete_watch.go @@ -0,0 +1,366 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Delete a watch. +// When the watch is removed, the document representing the watch in the +// `.watches` index is gone and it will never be run again. +// +// Deleting a watch does not delete any watch execution records related to this +// watch from the watch history. +// +// IMPORTANT: Deleting a watch must be done by using only this API. +// Do not delete the watch directly from the `.watches` index using the +// Elasticsearch delete document API +// When Elasticsearch security features are enabled, make sure no write +// privileges are granted to anyone for the `.watches` index. +package deletewatch + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteWatch struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewDeleteWatch type alias for index. +type NewDeleteWatch func(id string) *DeleteWatch + +// NewDeleteWatchFunc returns a new instance of DeleteWatch with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteWatchFunc(tp elastictransport.Interface) NewDeleteWatch { + return func(id string) *DeleteWatch { + n := New(tp) + + n._id(id) + + return n + } +} + +// Delete a watch. +// When the watch is removed, the document representing the watch in the +// `.watches` index is gone and it will never be run again. +// +// Deleting a watch does not delete any watch execution records related to this +// watch from the watch history. +// +// IMPORTANT: Deleting a watch must be done by using only this API. +// Do not delete the watch directly from the `.watches` index using the +// Elasticsearch delete document API +// When Elasticsearch security features are enabled, make sure no write +// privileges are granted to anyone for the `.watches` index. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch +func New(tp elastictransport.Interface) *DeleteWatch { + r := &DeleteWatch{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteWatch) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteWatch) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "watcher.delete_watch") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.delete_watch") + if reader := instrument.RecordRequestBody(ctx, "watcher.delete_watch", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.delete_watch") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the DeleteWatch query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletewatch.Response +func (r DeleteWatch) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.delete_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteWatch) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.delete_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the DeleteWatch query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the DeleteWatch headers map. +func (r *DeleteWatch) Header(key, value string) *DeleteWatch { + r.headers.Set(key, value) + + return r +} + +// Id The watch identifier. +// API Name: id +func (r *DeleteWatch) _id(id string) *DeleteWatch { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *DeleteWatch) ErrorTrace(errortrace bool) *DeleteWatch { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *DeleteWatch) FilterPath(filterpaths ...string) *DeleteWatch { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *DeleteWatch) Human(human bool) *DeleteWatch { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *DeleteWatch) Pretty(pretty bool) *DeleteWatch { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deletewatch/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deletewatch/response.go new file mode 100644 index 000000000..dd46c8e3a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deletewatch/response.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package deletewatch + +// Response holds the response body struct for the package deletewatch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/delete_watch/DeleteWatchResponse.ts#L22-L24 +type Response struct { + Found bool `json:"found"` + Id_ string `json:"_id"` + Version_ int64 `json:"_version"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/executewatch/execute_watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/executewatch/execute_watch.go new file mode 100644 index 000000000..cca08d28e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/executewatch/execute_watch.go @@ -0,0 +1,559 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Run a watch. +// This API can be used to force execution of the watch outside of its +// triggering logic or to simulate the watch execution for debugging purposes. +// +// For testing and debugging purposes, you also have fine-grained control on how +// the watch runs. +// You can run the watch without running all of its actions or alternatively by +// simulating them. +// You can also force execution by ignoring the watch condition and control +// whether a watch record would be written to the watch history after it runs. +// +// You can use the run watch API to run watches that are not yet registered by +// specifying the watch definition inline. +// This serves as great tool for testing and debugging your watches prior to +// adding them to Watcher. +// +// When Elasticsearch security features are enabled on your cluster, watches are +// run with the privileges of the user that stored the watches. +// If your user is allowed to read index `a`, but not index `b`, then the exact +// same set of rules will apply during execution of a watch. +// +// When using the run watch API, the authorization data of the user that called +// the API will be used as a base, instead of the information who stored the +// watch. +// Refer to the external documentation for examples of watch execution requests, +// including existing, customized, and inline watches. +package executewatch + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actionexecutionmode" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ExecuteWatch struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewExecuteWatch type alias for index. +type NewExecuteWatch func() *ExecuteWatch + +// NewExecuteWatchFunc returns a new instance of ExecuteWatch with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExecuteWatchFunc(tp elastictransport.Interface) NewExecuteWatch { + return func() *ExecuteWatch { + n := New(tp) + + return n + } +} + +// Run a watch. +// This API can be used to force execution of the watch outside of its +// triggering logic or to simulate the watch execution for debugging purposes. +// +// For testing and debugging purposes, you also have fine-grained control on how +// the watch runs. +// You can run the watch without running all of its actions or alternatively by +// simulating them. +// You can also force execution by ignoring the watch condition and control +// whether a watch record would be written to the watch history after it runs. +// +// You can use the run watch API to run watches that are not yet registered by +// specifying the watch definition inline. +// This serves as great tool for testing and debugging your watches prior to +// adding them to Watcher. +// +// When Elasticsearch security features are enabled on your cluster, watches are +// run with the privileges of the user that stored the watches. +// If your user is allowed to read index `a`, but not index `b`, then the exact +// same set of rules will apply during execution of a watch. +// +// When using the run watch API, the authorization data of the user that called +// the API will be used as a base, instead of the information who stored the +// watch. +// Refer to the external documentation for examples of watch execution requests, +// including existing, customized, and inline watches. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch +func New(tp elastictransport.Interface) *ExecuteWatch { + r := &ExecuteWatch{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *ExecuteWatch) Raw(raw io.Reader) *ExecuteWatch { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *ExecuteWatch) Request(req *Request) *ExecuteWatch { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ExecuteWatch) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for ExecuteWatch: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + path.WriteString("/") + path.WriteString("_execute") + + method = http.MethodPut + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + path.WriteString("_execute") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ExecuteWatch) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "watcher.execute_watch") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.execute_watch") + if reader := instrument.RecordRequestBody(ctx, "watcher.execute_watch", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.execute_watch") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the ExecuteWatch query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a executewatch.Response +func (r ExecuteWatch) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.execute_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the ExecuteWatch headers map. +func (r *ExecuteWatch) Header(key, value string) *ExecuteWatch { + r.headers.Set(key, value) + + return r +} + +// Id The watch identifier. +// API Name: id +func (r *ExecuteWatch) Id(id string) *ExecuteWatch { + r.paramSet |= idMask + r.id = id + + return r +} + +// Debug Defines whether the watch runs in debug mode. +// API name: debug +func (r *ExecuteWatch) Debug(debug bool) *ExecuteWatch { + r.values.Set("debug", strconv.FormatBool(debug)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *ExecuteWatch) ErrorTrace(errortrace bool) *ExecuteWatch { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *ExecuteWatch) FilterPath(filterpaths ...string) *ExecuteWatch { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *ExecuteWatch) Human(human bool) *ExecuteWatch { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *ExecuteWatch) Pretty(pretty bool) *ExecuteWatch { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// Determines how to handle the watch actions as part of the watch execution. +// API name: action_modes +func (r *ExecuteWatch) ActionModes(actionmodes map[string]actionexecutionmode.ActionExecutionMode) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.ActionModes = actionmodes + return r +} + +func (r *ExecuteWatch) AddActionMode(key string, value actionexecutionmode.ActionExecutionMode) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]actionexecutionmode.ActionExecutionMode + if r.req.ActionModes == nil { + r.req.ActionModes = make(map[string]actionexecutionmode.ActionExecutionMode) + } else { + tmp = r.req.ActionModes + } + + tmp[key] = value + + r.req.ActionModes = tmp + return r +} + +// When present, the watch uses this object as a payload instead of executing +// its own input. +// API name: alternative_input +func (r *ExecuteWatch) AlternativeInput(alternativeinput map[string]json.RawMessage) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.AlternativeInput = alternativeinput + return r +} + +func (r *ExecuteWatch) AddAlternativeInput(key string, value json.RawMessage) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]json.RawMessage + if r.req.AlternativeInput == nil { + r.req.AlternativeInput = make(map[string]json.RawMessage) + } else { + tmp = r.req.AlternativeInput + } + + tmp[key] = value + + r.req.AlternativeInput = tmp + return r +} + +// When set to `true`, the watch execution uses the always condition. This can +// also be specified as an HTTP parameter. +// API name: ignore_condition +func (r *ExecuteWatch) IgnoreCondition(ignorecondition bool) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IgnoreCondition = &ignorecondition + + return r +} + +// When set to `true`, the watch record representing the watch execution result +// is persisted to the `.watcher-history` index for the current time. +// In addition, the status of the watch is updated, possibly throttling +// subsequent runs. +// This can also be specified as an HTTP parameter. +// API name: record_execution +func (r *ExecuteWatch) RecordExecution(recordexecution bool) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.RecordExecution = &recordexecution + + return r +} + +// API name: simulated_actions +func (r *ExecuteWatch) SimulatedActions(simulatedactions types.SimulatedActionsVariant) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.SimulatedActions = simulatedactions.SimulatedActionsCaster() + + return r +} + +// This structure is parsed as the data of the trigger event that will be used +// during the watch execution. +// API name: trigger_data +func (r *ExecuteWatch) TriggerData(triggerdata types.ScheduleTriggerEventVariant) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.TriggerData = triggerdata.ScheduleTriggerEventCaster() + + return r +} + +// When present, this watch is used instead of the one specified in the request. +// This watch is not persisted to the index and `record_execution` cannot be +// set. +// API name: watch +func (r *ExecuteWatch) Watch(watch types.WatchVariant) *ExecuteWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Watch = watch.WatchCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/executewatch/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/executewatch/request.go new file mode 100644 index 000000000..0461baf0c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/executewatch/request.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package executewatch + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actionexecutionmode" +) + +// Request holds the request body struct for the package executewatch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/execute_watch/WatcherExecuteWatchRequest.ts#L28-L107 +type Request struct { + + // ActionModes Determines how to handle the watch actions as part of the watch execution. + ActionModes map[string]actionexecutionmode.ActionExecutionMode `json:"action_modes,omitempty"` + // AlternativeInput When present, the watch uses this object as a payload instead of executing + // its own input. + AlternativeInput map[string]json.RawMessage `json:"alternative_input,omitempty"` + // IgnoreCondition When set to `true`, the watch execution uses the always condition. This can + // also be specified as an HTTP parameter. + IgnoreCondition *bool `json:"ignore_condition,omitempty"` + // RecordExecution When set to `true`, the watch record representing the watch execution result + // is persisted to the `.watcher-history` index for the current time. + // In addition, the status of the watch is updated, possibly throttling + // subsequent runs. + // This can also be specified as an HTTP parameter. + RecordExecution *bool `json:"record_execution,omitempty"` + SimulatedActions *types.SimulatedActions `json:"simulated_actions,omitempty"` + // TriggerData This structure is parsed as the data of the trigger event that will be used + // during the watch execution. + TriggerData *types.ScheduleTriggerEvent `json:"trigger_data,omitempty"` + // Watch When present, this watch is used instead of the one specified in the request. + // This watch is not persisted to the index and `record_execution` cannot be + // set. + Watch *types.Watch `json:"watch,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + ActionModes: make(map[string]actionexecutionmode.ActionExecutionMode, 0), + AlternativeInput: make(map[string]json.RawMessage, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Executewatch request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/executewatch/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/executewatch/response.go new file mode 100644 index 000000000..c27f0a5d4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/executewatch/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package executewatch + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package executewatch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/execute_watch/WatcherExecuteWatchResponse.ts#L23-L34 +type Response struct { + + // Id_ The watch record identifier as it would be stored in the `.watcher-history` + // index. + Id_ string `json:"_id"` + // WatchRecord The watch record document as it would be stored in the `.watcher-history` + // index. + WatchRecord types.WatchRecord `json:"watch_record"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getsettings/get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getsettings/get_settings.go new file mode 100644 index 000000000..459849f35 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getsettings/get_settings.go @@ -0,0 +1,343 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get Watcher index settings. +// Get settings for the Watcher internal index (`.watches`). +// Only a subset of settings are shown, for example `index.auto_expand_replicas` +// and `index.number_of_replicas`. +package getsettings + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetSettings struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetSettings type alias for index. +type NewGetSettings func() *GetSettings + +// NewGetSettingsFunc returns a new instance of GetSettings with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetSettingsFunc(tp elastictransport.Interface) NewGetSettings { + return func() *GetSettings { + n := New(tp) + + return n + } +} + +// Get Watcher index settings. +// Get settings for the Watcher internal index (`.watches`). +// Only a subset of settings are shown, for example `index.auto_expand_replicas` +// and `index.number_of_replicas`. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings +func New(tp elastictransport.Interface) *GetSettings { + r := &GetSettings{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetSettings) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("settings") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetSettings) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "watcher.get_settings") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.get_settings") + if reader := instrument.RecordRequestBody(ctx, "watcher.get_settings", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.get_settings") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetSettings query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getsettings.Response +func (r GetSettings) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetSettings) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.get_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetSettings query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetSettings headers map. +func (r *GetSettings) Header(key, value string) *GetSettings { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *GetSettings) MasterTimeout(duration string) *GetSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetSettings) ErrorTrace(errortrace bool) *GetSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetSettings) FilterPath(filterpaths ...string) *GetSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetSettings) Human(human bool) *GetSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetSettings) Pretty(pretty bool) *GetSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getsettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getsettings/response.go new file mode 100644 index 000000000..2e787cf6c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getsettings/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getsettings + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/get_settings/WatcherGetSettingsResponse.ts#L22-L26 +type Response struct { + Index types.IndexSettings `json:"index"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getwatch/get_watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getwatch/get_watch.go new file mode 100644 index 000000000..22da956da --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getwatch/get_watch.go @@ -0,0 +1,344 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get a watch. +package getwatch + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetWatch struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewGetWatch type alias for index. +type NewGetWatch func(id string) *GetWatch + +// NewGetWatchFunc returns a new instance of GetWatch with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetWatchFunc(tp elastictransport.Interface) NewGetWatch { + return func(id string) *GetWatch { + n := New(tp) + + n._id(id) + + return n + } +} + +// Get a watch. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch +func New(tp elastictransport.Interface) *GetWatch { + r := &GetWatch{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetWatch) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetWatch) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "watcher.get_watch") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.get_watch") + if reader := instrument.RecordRequestBody(ctx, "watcher.get_watch", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.get_watch") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the GetWatch query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getwatch.Response +func (r GetWatch) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.get_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetWatch) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.get_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the GetWatch query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the GetWatch headers map. +func (r *GetWatch) Header(key, value string) *GetWatch { + r.headers.Set(key, value) + + return r +} + +// Id The watch identifier. +// API Name: id +func (r *GetWatch) _id(id string) *GetWatch { + r.paramSet |= idMask + r.id = id + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *GetWatch) ErrorTrace(errortrace bool) *GetWatch { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *GetWatch) FilterPath(filterpaths ...string) *GetWatch { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *GetWatch) Human(human bool) *GetWatch { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *GetWatch) Pretty(pretty bool) *GetWatch { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getwatch/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getwatch/response.go new file mode 100644 index 000000000..813ec4c2d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getwatch/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package getwatch + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package getwatch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/get_watch/GetWatchResponse.ts#L24-L34 +type Response struct { + Found bool `json:"found"` + Id_ string `json:"_id"` + PrimaryTerm_ *int `json:"_primary_term,omitempty"` + SeqNo_ *int64 `json:"_seq_no,omitempty"` + Status *types.WatchStatus `json:"status,omitempty"` + Version_ *int64 `json:"_version,omitempty"` + Watch *types.Watch `json:"watch,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/putwatch/put_watch.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/putwatch/put_watch.go new file mode 100644 index 000000000..309f97cf4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/putwatch/put_watch.go @@ -0,0 +1,560 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Create or update a watch. +// When a watch is registered, a new document that represents the watch is added +// to the `.watches` index and its trigger is immediately registered with the +// relevant trigger engine. +// Typically for the `schedule` trigger, the scheduler is the trigger engine. +// +// IMPORTANT: You must use Kibana or this API to create a watch. +// Do not add a watch directly to the `.watches` index by using the +// Elasticsearch index API. +// If Elasticsearch security features are enabled, do not give users write +// privileges on the `.watches` index. +// +// When you add a watch you can also define its initial active state by setting +// the *active* parameter. +// +// When Elasticsearch security features are enabled, your watch can index or +// search only on indices for which the user that stored the watch has +// privileges. +// If the user is able to read index `a`, but not index `b`, the same will apply +// when the watch runs. +package putwatch + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutWatch struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + id string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewPutWatch type alias for index. +type NewPutWatch func(id string) *PutWatch + +// NewPutWatchFunc returns a new instance of PutWatch with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutWatchFunc(tp elastictransport.Interface) NewPutWatch { + return func(id string) *PutWatch { + n := New(tp) + + n._id(id) + + return n + } +} + +// Create or update a watch. +// When a watch is registered, a new document that represents the watch is added +// to the `.watches` index and its trigger is immediately registered with the +// relevant trigger engine. +// Typically for the `schedule` trigger, the scheduler is the trigger engine. +// +// IMPORTANT: You must use Kibana or this API to create a watch. +// Do not add a watch directly to the `.watches` index by using the +// Elasticsearch index API. +// If Elasticsearch security features are enabled, do not give users write +// privileges on the `.watches` index. +// +// When you add a watch you can also define its initial active state by setting +// the *active* parameter. +// +// When Elasticsearch security features are enabled, your watch can index or +// search only on indices for which the user that stored the watch has +// privileges. +// If the user is able to read index `a`, but not index `b`, the same will apply +// when the watch runs. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch +func New(tp elastictransport.Interface) *PutWatch { + r := &PutWatch{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutWatch) Raw(raw io.Reader) *PutWatch { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutWatch) Request(req *Request) *PutWatch { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutWatch) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutWatch: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("watch") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "id", r.id) + } + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutWatch) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "watcher.put_watch") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.put_watch") + if reader := instrument.RecordRequestBody(ctx, "watcher.put_watch", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.put_watch") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the PutWatch query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putwatch.Response +func (r PutWatch) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.put_watch") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the PutWatch headers map. +func (r *PutWatch) Header(key, value string) *PutWatch { + r.headers.Set(key, value) + + return r +} + +// Id The identifier for the watch. +// API Name: id +func (r *PutWatch) _id(id string) *PutWatch { + r.paramSet |= idMask + r.id = id + + return r +} + +// Active The initial state of the watch. +// The default value is `true`, which means the watch is active by default. +// API name: active +func (r *PutWatch) Active(active bool) *PutWatch { + r.values.Set("active", strconv.FormatBool(active)) + + return r +} + +// IfPrimaryTerm only update the watch if the last operation that has changed the watch has +// the specified primary term +// API name: if_primary_term +func (r *PutWatch) IfPrimaryTerm(ifprimaryterm string) *PutWatch { + r.values.Set("if_primary_term", ifprimaryterm) + + return r +} + +// IfSeqNo only update the watch if the last operation that has changed the watch has +// the specified sequence number +// API name: if_seq_no +func (r *PutWatch) IfSeqNo(sequencenumber string) *PutWatch { + r.values.Set("if_seq_no", sequencenumber) + + return r +} + +// Version Explicit version number for concurrency control +// API name: version +func (r *PutWatch) Version(versionnumber string) *PutWatch { + r.values.Set("version", versionnumber) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *PutWatch) ErrorTrace(errortrace bool) *PutWatch { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *PutWatch) FilterPath(filterpaths ...string) *PutWatch { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *PutWatch) Human(human bool) *PutWatch { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *PutWatch) Pretty(pretty bool) *PutWatch { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The list of actions that will be run if the condition matches. +// API name: actions +func (r *PutWatch) Actions(actions map[string]types.WatcherAction) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + r.req.Actions = actions + return r +} + +func (r *PutWatch) AddAction(key string, value types.WatcherActionVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + var tmp map[string]types.WatcherAction + if r.req.Actions == nil { + r.req.Actions = make(map[string]types.WatcherAction) + } else { + tmp = r.req.Actions + } + + tmp[key] = *value.WatcherActionCaster() + + r.req.Actions = tmp + return r +} + +// The condition that defines if the actions should be run. +// API name: condition +func (r *PutWatch) Condition(condition types.WatcherConditionVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Condition = condition.WatcherConditionCaster() + + return r +} + +// The input that defines the input that loads the data for the watch. +// API name: input +func (r *PutWatch) Input(input types.WatcherInputVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Input = input.WatcherInputCaster() + + return r +} + +// Metadata JSON that will be copied into the history entries. +// API name: metadata +func (r *PutWatch) Metadata(metadata types.MetadataVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Metadata = *metadata.MetadataCaster() + + return r +} + +// The minimum time between actions being run. +// The default is 5 seconds. +// This default can be changed in the config file with the setting +// `xpack.watcher.throttle.period.default_period`. +// If both this value and the `throttle_period_in_millis` parameter are +// specified, Watcher uses the last parameter included in the request. +// API name: throttle_period +func (r *PutWatch) ThrottlePeriod(duration types.DurationVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ThrottlePeriod = *duration.DurationCaster() + + return r +} + +// Minimum time in milliseconds between actions being run. Defaults to 5000. If +// both this value and the throttle_period parameter are specified, Watcher uses +// the last parameter included in the request. +// API name: throttle_period_in_millis +func (r *PutWatch) ThrottlePeriodInMillis(durationvalueunitmillis int64) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.ThrottlePeriodInMillis = &durationvalueunitmillis + + return r +} + +// The transform that processes the watch payload to prepare it for the watch +// actions. +// API name: transform +func (r *PutWatch) Transform(transform types.TransformContainerVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Transform = transform.TransformContainerCaster() + + return r +} + +// The trigger that defines when the watch should run. +// API name: trigger +func (r *PutWatch) Trigger(trigger types.TriggerContainerVariant) *PutWatch { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Trigger = trigger.TriggerContainerCaster() + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/putwatch/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/putwatch/request.go new file mode 100644 index 000000000..f20526e54 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/putwatch/request.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putwatch + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package putwatch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/put_watch/WatcherPutWatchRequest.ts#L31-L110 +type Request struct { + + // Actions The list of actions that will be run if the condition matches. + Actions map[string]types.WatcherAction `json:"actions,omitempty"` + // Condition The condition that defines if the actions should be run. + Condition *types.WatcherCondition `json:"condition,omitempty"` + // Input The input that defines the input that loads the data for the watch. + Input *types.WatcherInput `json:"input,omitempty"` + // Metadata Metadata JSON that will be copied into the history entries. + Metadata types.Metadata `json:"metadata,omitempty"` + // ThrottlePeriod The minimum time between actions being run. + // The default is 5 seconds. + // This default can be changed in the config file with the setting + // `xpack.watcher.throttle.period.default_period`. + // If both this value and the `throttle_period_in_millis` parameter are + // specified, Watcher uses the last parameter included in the request. + ThrottlePeriod types.Duration `json:"throttle_period,omitempty"` + // ThrottlePeriodInMillis Minimum time in milliseconds between actions being run. Defaults to 5000. If + // both this value and the throttle_period parameter are specified, Watcher uses + // the last parameter included in the request. + ThrottlePeriodInMillis *int64 `json:"throttle_period_in_millis,omitempty"` + // Transform The transform that processes the watch payload to prepare it for the watch + // actions. + Transform *types.TransformContainer `json:"transform,omitempty"` + // Trigger The trigger that defines when the watch should run. + Trigger *types.TriggerContainer `json:"trigger,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Actions: make(map[string]types.WatcherAction, 0), + } + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putwatch request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if s.Actions == nil { + s.Actions = make(map[string]types.WatcherAction, 0) + } + if err := dec.Decode(&s.Actions); err != nil { + return fmt.Errorf("%s | %w", "Actions", err) + } + + case "condition": + if err := dec.Decode(&s.Condition); err != nil { + return fmt.Errorf("%s | %w", "Condition", err) + } + + case "input": + if err := dec.Decode(&s.Input); err != nil { + return fmt.Errorf("%s | %w", "Input", err) + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return fmt.Errorf("%s | %w", "Metadata", err) + } + + case "throttle_period": + if err := dec.Decode(&s.ThrottlePeriod); err != nil { + return fmt.Errorf("%s | %w", "ThrottlePeriod", err) + } + + case "throttle_period_in_millis": + if err := dec.Decode(&s.ThrottlePeriodInMillis); err != nil { + return fmt.Errorf("%s | %w", "ThrottlePeriodInMillis", err) + } + + case "transform": + if err := dec.Decode(&s.Transform); err != nil { + return fmt.Errorf("%s | %w", "Transform", err) + } + + case "trigger": + if err := dec.Decode(&s.Trigger); err != nil { + return fmt.Errorf("%s | %w", "Trigger", err) + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/putwatch/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/putwatch/response.go new file mode 100644 index 000000000..2943ac5bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/putwatch/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package putwatch + +// Response holds the response body struct for the package putwatch +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/put_watch/WatcherPutWatchResponse.ts#L23-L31 +type Response struct { + Created bool `json:"created"` + Id_ string `json:"_id"` + PrimaryTerm_ int64 `json:"_primary_term"` + SeqNo_ int64 `json:"_seq_no"` + Version_ int64 `json:"_version"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/querywatches/query_watches.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/querywatches/query_watches.go new file mode 100644 index 000000000..4db81cc90 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/querywatches/query_watches.go @@ -0,0 +1,417 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Query watches. +// Get all registered watches in a paginated manner and optionally filter +// watches by a query. +// +// Note that only the `_id` and `metadata.*` fields are queryable or sortable. +package querywatches + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type QueryWatches struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewQueryWatches type alias for index. +type NewQueryWatches func() *QueryWatches + +// NewQueryWatchesFunc returns a new instance of QueryWatches with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewQueryWatchesFunc(tp elastictransport.Interface) NewQueryWatches { + return func() *QueryWatches { + n := New(tp) + + return n + } +} + +// Query watches. +// Get all registered watches in a paginated manner and optionally filter +// watches by a query. +// +// Note that only the `_id` and `metadata.*` fields are queryable or sortable. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches +func New(tp elastictransport.Interface) *QueryWatches { + r := &QueryWatches{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *QueryWatches) Raw(raw io.Reader) *QueryWatches { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *QueryWatches) Request(req *Request) *QueryWatches { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *QueryWatches) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for QueryWatches: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("_query") + path.WriteString("/") + path.WriteString("watches") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r QueryWatches) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "watcher.query_watches") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.query_watches") + if reader := instrument.RecordRequestBody(ctx, "watcher.query_watches", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.query_watches") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the QueryWatches query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a querywatches.Response +func (r QueryWatches) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.query_watches") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the QueryWatches headers map. +func (r *QueryWatches) Header(key, value string) *QueryWatches { + r.headers.Set(key, value) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *QueryWatches) ErrorTrace(errortrace bool) *QueryWatches { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *QueryWatches) FilterPath(filterpaths ...string) *QueryWatches { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *QueryWatches) Human(human bool) *QueryWatches { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *QueryWatches) Pretty(pretty bool) *QueryWatches { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// The offset from the first result to fetch. +// It must be non-negative. +// API name: from +func (r *QueryWatches) From(from int) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.From = &from + + return r +} + +// A query that filters the watches to be returned. +// API name: query +func (r *QueryWatches) Query(query types.QueryVariant) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Query = query.QueryCaster() + + return r +} + +// Retrieve the next page of hits using a set of sort values from the previous +// page. +// API name: search_after +func (r *QueryWatches) SearchAfter(sortresults ...types.FieldValueVariant) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sortresults { + r.req.SearchAfter = append(r.req.SearchAfter, *v.FieldValueCaster()) + } + + return r +} + +// The number of hits to return. +// It must be non-negative. +// API name: size +func (r *QueryWatches) Size(size int) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.Size = &size + + return r +} + +// One or more fields used to sort the search results. +// API name: sort +func (r *QueryWatches) Sort(sorts ...types.SortCombinationsVariant) *QueryWatches { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + for _, v := range sorts { + r.req.Sort = append(r.req.Sort, *v.SortCombinationsCaster()) + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/querywatches/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/querywatches/request.go new file mode 100644 index 000000000..dd063ce33 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/querywatches/request.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package querywatches + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Request holds the request body struct for the package querywatches +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/query_watches/WatcherQueryWatchesRequest.ts#L25-L70 +type Request struct { + + // From The offset from the first result to fetch. + // It must be non-negative. + From *int `json:"from,omitempty"` + // Query A query that filters the watches to be returned. + Query *types.Query `json:"query,omitempty"` + // SearchAfter Retrieve the next page of hits using a set of sort values from the previous + // page. + SearchAfter []types.FieldValue `json:"search_after,omitempty"` + // Size The number of hits to return. + // It must be non-negative. + Size *int `json:"size,omitempty"` + // Sort One or more fields used to sort the search results. + Sort []types.SortCombinations `json:"sort,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Querywatches request: %w", err) + } + + return &req, nil +} + +func (s *Request) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "From", err) + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return fmt.Errorf("%s | %w", "Query", err) + } + + case "search_after": + if err := dec.Decode(&s.SearchAfter); err != nil { + return fmt.Errorf("%s | %w", "SearchAfter", err) + } + + case "size": + + var tmp any + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("%s | %w", "Size", err) + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(types.SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return fmt.Errorf("%s | %w", "Sort", err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/querywatches/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/querywatches/response.go new file mode 100644 index 000000000..699cd30d4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/querywatches/response.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package querywatches + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package querywatches +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/query_watches/WatcherQueryWatchesResponse.ts#L23-L34 +type Response struct { + + // Count The total number of watches found. + Count int `json:"count"` + // Watches A list of watches based on the `from`, `size`, or `search_after` request body + // parameters. + Watches []types.QueryWatch `json:"watches"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/start/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/start/response.go new file mode 100644 index 000000000..10c67de58 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/start/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package start + +// Response holds the response body struct for the package start +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/start/WatcherStartResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/start/start.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/start/start.go new file mode 100644 index 000000000..9cd669380 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/start/start.go @@ -0,0 +1,331 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Start the watch service. +// Start the Watcher service if it is not already running. +package start + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Start struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStart type alias for index. +type NewStart func() *Start + +// NewStartFunc returns a new instance of Start with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStartFunc(tp elastictransport.Interface) NewStart { + return func() *Start { + n := New(tp) + + return n + } +} + +// Start the watch service. +// Start the Watcher service if it is not already running. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start +func New(tp elastictransport.Interface) *Start { + r := &Start{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Start) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("_start") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Start) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "watcher.start") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.start") + if reader := instrument.RecordRequestBody(ctx, "watcher.start", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.start") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Start query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a start.Response +func (r Start) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.start") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Start) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.start") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Start query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Start headers map. +func (r *Start) Header(key, value string) *Start { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. +// API name: master_timeout +func (r *Start) MasterTimeout(duration string) *Start { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Start) ErrorTrace(errortrace bool) *Start { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Start) FilterPath(filterpaths ...string) *Start { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Start) Human(human bool) *Start { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Start) Pretty(pretty bool) *Start { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stats/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stats/response.go new file mode 100644 index 000000000..bf0b594dd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stats/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stats + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package stats +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/stats/WatcherStatsResponse.ts#L24-L32 +type Response struct { + ClusterName string `json:"cluster_name"` + ManuallyStopped bool `json:"manually_stopped"` + NodeStats types.NodeStatistics `json:"_nodes"` + Stats []types.WatcherNodeStats `json:"stats"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stats/stats.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stats/stats.go new file mode 100644 index 000000000..f3f705a07 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stats/stats.go @@ -0,0 +1,361 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get Watcher statistics. +// This API always returns basic metrics. +// You retrieve more metrics by using the metric parameter. +package stats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +const ( + metricMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Stats struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + metric string + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStats type alias for index. +type NewStats func() *Stats + +// NewStatsFunc returns a new instance of Stats with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStatsFunc(tp elastictransport.Interface) NewStats { + return func() *Stats { + n := New(tp) + + return n + } +} + +// Get Watcher statistics. +// This API always returns basic metrics. +// You retrieve more metrics by using the metric parameter. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats +func New(tp elastictransport.Interface) *Stats { + r := &Stats{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Stats) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("stats") + + method = http.MethodGet + case r.paramSet == metricMask: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("stats") + path.WriteString("/") + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordPathPart(ctx, "metric", r.metric) + } + path.WriteString(r.metric) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Stats) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "watcher.stats") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.stats") + if reader := instrument.RecordRequestBody(ctx, "watcher.stats", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.stats") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Stats query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stats.Response +func (r Stats) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Stats) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.stats") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Stats query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Stats headers map. +func (r *Stats) Header(key, value string) *Stats { + r.headers.Set(key, value) + + return r +} + +// Metric Defines which additional metrics are included in the response. +// API Name: metric +func (r *Stats) Metric(metrics ...string) *Stats { + r.paramSet |= metricMask + r.metric = strings.Join(metrics, ",") + + return r +} + +// EmitStacktraces Defines whether stack traces are generated for each watch that is running. +// API name: emit_stacktraces +func (r *Stats) EmitStacktraces(emitstacktraces bool) *Stats { + r.values.Set("emit_stacktraces", strconv.FormatBool(emitstacktraces)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Stats) ErrorTrace(errortrace bool) *Stats { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Stats) FilterPath(filterpaths ...string) *Stats { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Stats) Human(human bool) *Stats { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Stats) Pretty(pretty bool) *Stats { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stop/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stop/response.go new file mode 100644 index 000000000..0851367c1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stop/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package stop + +// Response holds the response body struct for the package stop +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/stop/WatcherStopResponse.ts#L22-L25 +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stop/stop.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stop/stop.go new file mode 100644 index 000000000..f8886617d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stop/stop.go @@ -0,0 +1,334 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Stop the watch service. +// Stop the Watcher service if it is running. +package stop + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Stop struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewStop type alias for index. +type NewStop func() *Stop + +// NewStopFunc returns a new instance of Stop with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewStopFunc(tp elastictransport.Interface) NewStop { + return func() *Stop { + n := New(tp) + + return n + } +} + +// Stop the watch service. +// Stop the Watcher service if it is running. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop +func New(tp elastictransport.Interface) *Stop { + r := &Stop{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Stop) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("_stop") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Stop) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "watcher.stop") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.stop") + if reader := instrument.RecordRequestBody(ctx, "watcher.stop", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.stop") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Stop query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a stop.Response +func (r Stop) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Stop) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.stop") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Stop query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Stop headers map. +func (r *Stop) Header(key, value string) *Stop { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout The period to wait for the master node. +// If the master node is not available before the timeout expires, the request +// fails and returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Stop) MasterTimeout(duration string) *Stop { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Stop) ErrorTrace(errortrace bool) *Stop { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Stop) FilterPath(filterpaths ...string) *Stop { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Stop) Human(human bool) *Stop { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Stop) Pretty(pretty bool) *Stop { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/updatesettings/request.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/updatesettings/request.go new file mode 100644 index 000000000..3aae9f298 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/updatesettings/request.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatesettings + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package updatesettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/update_settings/WatcherUpdateSettingsRequest.ts#L24-L61 +type Request struct { + IndexAutoExpandReplicas *string `json:"index.auto_expand_replicas,omitempty"` + IndexNumberOfReplicas *int `json:"index.number_of_replicas,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Updatesettings request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/updatesettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/updatesettings/response.go new file mode 100644 index 000000000..6944453ca --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/updatesettings/response.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package updatesettings + +// Response holds the response body struct for the package updatesettings +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/watcher/update_settings/WatcherUpdateSettingsResponse.ts#L20-L24 +type Response struct { + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/updatesettings/update_settings.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/updatesettings/update_settings.go new file mode 100644 index 000000000..7aeac2c4c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/watcher/updatesettings/update_settings.go @@ -0,0 +1,397 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Update Watcher index settings. +// Update settings for the Watcher internal index (`.watches`). +// Only a subset of settings can be modified. +// This includes `index.auto_expand_replicas`, `index.number_of_replicas`, +// `index.routing.allocation.exclude.*`, +// `index.routing.allocation.include.*` and +// `index.routing.allocation.require.*`. +// Modification of `index.routing.allocation.include._tier_preference` is an +// exception and is not allowed as the +// Watcher shards must always be in the `data_content` tier. +package updatesettings + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateSettings struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + req *Request + deferred []func(request *Request) error + buf *gobytes.Buffer + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUpdateSettings type alias for index. +type NewUpdateSettings func() *UpdateSettings + +// NewUpdateSettingsFunc returns a new instance of UpdateSettings with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateSettingsFunc(tp elastictransport.Interface) NewUpdateSettings { + return func() *UpdateSettings { + n := New(tp) + + return n + } +} + +// Update Watcher index settings. +// Update settings for the Watcher internal index (`.watches`). +// Only a subset of settings can be modified. +// This includes `index.auto_expand_replicas`, `index.number_of_replicas`, +// `index.routing.allocation.exclude.*`, +// `index.routing.allocation.include.*` and +// `index.routing.allocation.require.*`. +// Modification of `index.routing.allocation.include._tier_preference` is an +// exception and is not allowed as the +// Watcher shards must always be in the `data_content` tier. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings +func New(tp elastictransport.Interface) *UpdateSettings { + r := &UpdateSettings{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + + buf: gobytes.NewBuffer(nil), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *UpdateSettings) Raw(raw io.Reader) *UpdateSettings { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *UpdateSettings) Request(req *Request) *UpdateSettings { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateSettings) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw == nil && r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for UpdateSettings: %w", err) + } + + r.buf.Write(data) + + } + + if r.buf.Len() > 0 { + r.raw = r.buf + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("settings") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.raw != nil { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=9") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateSettings) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "watcher.update_settings") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "watcher.update_settings") + if reader := instrument.RecordRequestBody(ctx, "watcher.update_settings", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "watcher.update_settings") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the UpdateSettings query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatesettings.Response +func (r UpdateSettings) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "watcher.update_settings") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// Header set a key, value pair in the UpdateSettings headers map. +func (r *UpdateSettings) Header(key, value string) *UpdateSettings { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: master_timeout +func (r *UpdateSettings) MasterTimeout(duration string) *UpdateSettings { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout The period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *UpdateSettings) Timeout(duration string) *UpdateSettings { + r.values.Set("timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *UpdateSettings) ErrorTrace(errortrace bool) *UpdateSettings { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *UpdateSettings) FilterPath(filterpaths ...string) *UpdateSettings { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *UpdateSettings) Human(human bool) *UpdateSettings { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *UpdateSettings) Pretty(pretty bool) *UpdateSettings { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} + +// API name: index.auto_expand_replicas +func (r *UpdateSettings) IndexAutoExpandReplicas(indexautoexpandreplicas string) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexAutoExpandReplicas = &indexautoexpandreplicas + + return r +} + +// API name: index.number_of_replicas +func (r *UpdateSettings) IndexNumberOfReplicas(indexnumberofreplicas int) *UpdateSettings { + // Initialize the request if it is not already initialized + if r.req == nil { + r.req = NewRequest() + } + + r.req.IndexNumberOfReplicas = &indexnumberofreplicas + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/xpack/info/info.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/xpack/info/info.go new file mode 100644 index 000000000..3950b96ee --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/xpack/info/info.go @@ -0,0 +1,351 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get information. +// The information provided by the API includes: +// +// * Build information including the build number and timestamp. +// * License information about the currently installed license. +// * Feature information for the features that are currently enabled and +// available under the current license. +package info + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" + "github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/xpackcategory" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Info struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewInfo type alias for index. +type NewInfo func() *Info + +// NewInfoFunc returns a new instance of Info with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewInfoFunc(tp elastictransport.Interface) NewInfo { + return func() *Info { + n := New(tp) + + return n + } +} + +// Get information. +// The information provided by the API includes: +// +// * Build information including the build number and timestamp. +// * License information about the currently installed license. +// * Feature information for the features that are currently enabled and +// available under the current license. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info +func New(tp elastictransport.Interface) *Info { + r := &Info{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Info) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_xpack") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Info) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "xpack.info") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "xpack.info") + if reader := instrument.RecordRequestBody(ctx, "xpack.info", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "xpack.info") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Info query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a info.Response +func (r Info) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "xpack.info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Info) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "xpack.info") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Info query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Info headers map. +func (r *Info) Header(key, value string) *Info { + r.headers.Set(key, value) + + return r +} + +// Categories A comma-separated list of the information categories to include in the +// response. +// For example, `build,license,features`. +// API name: categories +func (r *Info) Categories(categories ...xpackcategory.XPackCategory) *Info { + tmp := []string{} + for _, item := range categories { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("categories", strings.Join(tmp, ",")) + + return r +} + +// AcceptEnterprise If this param is used it must be set to true +// API name: accept_enterprise +func (r *Info) AcceptEnterprise(acceptenterprise bool) *Info { + r.values.Set("accept_enterprise", strconv.FormatBool(acceptenterprise)) + + return r +} + +// Human Defines whether additional human-readable information is included in the +// response. +// In particular, it adds descriptions and a tag line. +// API name: human +func (r *Info) Human(human bool) *Info { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Info) ErrorTrace(errortrace bool) *Info { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Info) FilterPath(filterpaths ...string) *Info { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Info) Pretty(pretty bool) *Info { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/xpack/info/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/xpack/info/response.go new file mode 100644 index 000000000..ea19475c7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/xpack/info/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package info + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package info +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/info/XPackInfoResponse.ts#L22-L29 +type Response struct { + Build types.BuildInformation `json:"build"` + Features types.XpackFeatures `json:"features"` + License types.MinimalLicenseInformation `json:"license"` + Tagline string `json:"tagline"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/xpack/usage/response.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/xpack/usage/response.go new file mode 100644 index 000000000..b8f570981 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/xpack/usage/response.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +package usage + +import ( + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// Response holds the response body struct for the package usage +// +// https://github.com/elastic/elasticsearch-specification/blob/907d11a72a6bfd37b777d526880c56202889609e/specification/xpack/usage/XPackUsageResponse.ts#L42-L77 +type Response struct { + AggregateMetric types.Base `json:"aggregate_metric"` + Analytics types.Analytics `json:"analytics"` + Archive types.Archive `json:"archive"` + Ccr types.Ccr `json:"ccr"` + DataFrame *types.Base `json:"data_frame,omitempty"` + DataScience *types.Base `json:"data_science,omitempty"` + DataStreams *types.DataStreams `json:"data_streams,omitempty"` + DataTiers types.DataTiers `json:"data_tiers"` + Enrich *types.Base `json:"enrich,omitempty"` + Eql types.Eql `json:"eql"` + Flattened *types.Flattened `json:"flattened,omitempty"` + Graph types.Base `json:"graph"` + HealthApi *types.HealthStatistics `json:"health_api,omitempty"` + Ilm types.Ilm `json:"ilm"` + Logstash types.Base `json:"logstash"` + Ml types.MachineLearning `json:"ml"` + Monitoring types.Monitoring `json:"monitoring"` + Rollup types.Base `json:"rollup"` + RuntimeFields *types.XpackRuntimeFieldTypes `json:"runtime_fields,omitempty"` + SearchableSnapshots types.SearchableSnapshots `json:"searchable_snapshots"` + Security types.Security `json:"security"` + Slm types.Slm `json:"slm"` + Spatial types.Base `json:"spatial"` + Sql types.Sql `json:"sql"` + Transform types.Base `json:"transform"` + Vectors *types.Vector `json:"vectors,omitempty"` + VotingOnly types.Base `json:"voting_only"` + Watcher types.Watcher `json:"watcher"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/xpack/usage/usage.go b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/xpack/usage/usage.go new file mode 100644 index 000000000..2f69e5231 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v9/typedapi/xpack/usage/usage.go @@ -0,0 +1,338 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/907d11a72a6bfd37b777d526880c56202889609e + +// Get usage information. +// Get information about the features that are currently enabled and available +// under the current license. +// The API also provides some usage statistics. +package usage + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v9/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Usage struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + raw io.Reader + + paramSet int + + spanStarted bool + + instrument elastictransport.Instrumentation +} + +// NewUsage type alias for index. +type NewUsage func() *Usage + +// NewUsageFunc returns a new instance of Usage with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUsageFunc(tp elastictransport.Interface) NewUsage { + return func() *Usage { + n := New(tp) + + return n + } +} + +// Get usage information. +// Get information about the features that are currently enabled and available +// under the current license. +// The API also provides some usage statistics. +// +// https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack +func New(tp elastictransport.Interface) *Usage { + r := &Usage{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + } + + if instrumented, ok := r.transport.(elastictransport.Instrumented); ok { + if instrument := instrumented.InstrumentationEnabled(); instrument != nil { + r.instrument = instrument + } + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Usage) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_xpack") + path.WriteString("/") + path.WriteString("usage") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.raw) + } else { + req, err = http.NewRequest(method, r.path.String(), r.raw) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=9") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Usage) Perform(providedCtx context.Context) (*http.Response, error) { + var ctx context.Context + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + if r.spanStarted == false { + ctx := instrument.Start(providedCtx, "xpack.usage") + defer instrument.Close(ctx) + } + } + if ctx == nil { + ctx = providedCtx + } + + req, err := r.HttpRequest(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.BeforeRequest(req, "xpack.usage") + if reader := instrument.RecordRequestBody(ctx, "xpack.usage", r.raw); reader != nil { + req.Body = reader + } + } + res, err := r.transport.Perform(req) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.AfterRequest(req, "elasticsearch", "xpack.usage") + } + if err != nil { + localErr := fmt.Errorf("an error happened during the Usage query execution: %w", err) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, localErr) + } + return nil, localErr + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a usage.Response +func (r Usage) Do(providedCtx context.Context) (*Response, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "xpack.usage") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, errorResponse) + } + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Usage) IsSuccess(providedCtx context.Context) (bool, error) { + var ctx context.Context + r.spanStarted = true + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + ctx = instrument.Start(providedCtx, "xpack.usage") + defer instrument.Close(ctx) + } + if ctx == nil { + ctx = providedCtx + } + + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(io.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + if res.StatusCode != 404 { + err := fmt.Errorf("an error happened during the Usage query execution, status code: %d", res.StatusCode) + if instrument, ok := r.instrument.(elastictransport.Instrumentation); ok { + instrument.RecordError(ctx, err) + } + return false, err + } + + return false, nil +} + +// Header set a key, value pair in the Usage headers map. +func (r *Usage) Header(key, value string) *Usage { + r.headers.Set(key, value) + + return r +} + +// MasterTimeout The period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// To indicate that the request should never timeout, set it to `-1`. +// API name: master_timeout +func (r *Usage) MasterTimeout(duration string) *Usage { + r.values.Set("master_timeout", duration) + + return r +} + +// ErrorTrace When set to `true` Elasticsearch will include the full stack trace of errors +// when they occur. +// API name: error_trace +func (r *Usage) ErrorTrace(errortrace bool) *Usage { + r.values.Set("error_trace", strconv.FormatBool(errortrace)) + + return r +} + +// FilterPath Comma-separated list of filters in dot notation which reduce the response +// returned by Elasticsearch. +// API name: filter_path +func (r *Usage) FilterPath(filterpaths ...string) *Usage { + tmp := []string{} + for _, item := range filterpaths { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("filter_path", strings.Join(tmp, ",")) + + return r +} + +// Human When set to `true` will return statistics in a format suitable for humans. +// For example `"exists_time": "1h"` for humans and +// `"exists_time_in_millis": 3600000` for computers. When disabled the human +// readable values will be omitted. This makes sense for responses being +// consumed +// only by machines. +// API name: human +func (r *Usage) Human(human bool) *Usage { + r.values.Set("human", strconv.FormatBool(human)) + + return r +} + +// Pretty If set to `true` the returned JSON will be "pretty-formatted". Only use +// this option for debugging only. +// API name: pretty +func (r *Usage) Pretty(pretty bool) *Usage { + r.values.Set("pretty", strconv.FormatBool(pretty)) + + return r +} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 000000000..30568e768 --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,914 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site and timestamp. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. See New and NewJSON for details. +// +// # Custom LogSinks +// +// For users who need more control, a funcr.Formatter can be embedded inside +// your own custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods, for example. +// +// # Formatting +// +// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for +// values which are being logged. When rendering a struct, funcr will use Go's +// standard JSON tags (all except "string"). +package funcr + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, NewFormatter(opts))) +} + +// NewJSON returns a logr.Logger which is implemented by an arbitrary function +// and produces JSON output. +func NewJSON(fn func(obj string), opts Options) logr.Logger { + fnWrapper := func(_, obj string) { + fn(obj) + } + return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { + l := &fnlogger{ + Formatter: formatter, + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogCallerFunc tells funcr to also log the calling function name. This + // has no effect if caller logging is not enabled (see Options.LogCaller). + LogCallerFunc bool + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // TimestampFormat tells funcr how to render timestamps when LogTimestamp + // is enabled. If not specified, a default format will be used. For more + // details, see docs for Go's time.Layout. + TimestampFormat string + + // LogInfoLevel tells funcr what key to use to log the info level. + // If not specified, the info level will be logged as "level". + // If this is set to "", the info level will not be logged at all. + LogInfoLevel *string + + // Verbosity tells funcr which V logs to produce. Higher values enable + // more logs. Info logs at or below this level will be written, while logs + // above this level will be discarded. + Verbosity int + + // RenderBuiltinsHook allows users to mutate the list of key-value pairs + // while a log line is being rendered. The kvList argument follows logr + // conventions - each pair of slice elements is comprised of a string key + // and an arbitrary value (verified and sanitized before calling this + // hook). The value returned must follow the same conventions. This hook + // can be used to audit or modify logged data. For example, you might want + // to prefix all of funcr's built-in keys with some string. This hook is + // only called for built-in (provided by funcr itself) key-value pairs. + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). + RenderBuiltinsHook func(kvList []any) []any + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. + RenderValuesHook func(kvList []any) []any + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. + RenderArgsHook func(kvList []any) []any + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...any) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...any) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...any) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +// NewFormatter constructs a Formatter which emits a JSON-like key=value format. +func NewFormatter(opts Options) Formatter { + return newFormatter(opts, outputKeyValue) +} + +// NewFormatterJSON constructs a Formatter which emits strict JSON. +func NewFormatterJSON(opts Options) Formatter { + return newFormatter(opts, outputJSON) +} + +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 + +func newFormatter(opts Options, outfmt outputFormat) Formatter { + if opts.TimestampFormat == "" { + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth + } + if opts.LogInfoLevel == nil { + opts.LogInfoLevel = new(string) + *opts.LogInfoLevel = "level" + } + f := Formatter{ + outputFormat: outfmt, + prefix: "", + values: nil, + depth: 0, + opts: &opts, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + outputFormat outputFormat + prefix string + values []any + valuesStr string + depth int + opts *Options + groupName string // for slog groups + groups []groupDef +} + +// outputFormat indicates which outputFormat to use. +type outputFormat int + +const ( + // outputKeyValue emits a JSON-like key=value format, but not strict JSON. + outputKeyValue outputFormat = iota + // outputJSON emits strict JSON. + outputJSON +) + +// groupDef represents a saved group. The values may be empty, but we don't +// know if we need to render the group until the final record is rendered. +type groupDef struct { + name string + values string +} + +// PseudoStruct is a list of key-value pairs that gets logged as a struct. +type PseudoStruct []any + +// render produces a log line, ready to use. +func (f Formatter) render(builtins, args []any) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + if f.outputFormat == outputJSON { + buf.WriteByte('{') // for the whole record + } + + // Render builtins + vals := builtins + if hook := f.opts.RenderBuiltinsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, false) // keys are ours, no need to escape + continuing := len(builtins) > 0 + + // Turn the inner-most group into a string + argsStr := func() string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, true) // escape user-provided keys + + return buf.String() + }() + + // Render the stack of groups from the inside out. + bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) + for i := len(f.groups) - 1; i >= 0; i-- { + grp := &f.groups[i] + if grp.values == "" && bodyStr == "" { + // no contents, so we must elide the whole group + continue + } + bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) + } + + if bodyStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(bodyStr) + } + + if f.outputFormat == outputJSON { + buf.WriteByte('}') // for the whole record + } + + return buf.String() +} + +// renderGroup returns a string representation of the named group with rendered +// values and args. If the name is empty, this will return the values and args, +// joined. If the name is not empty, this will return a single key-value pair, +// where the value is a grouping of the values and args. If the values and +// args are both empty, this will return an empty string, even if the name was +// specified. +func (f Formatter) renderGroup(name string, values string, args string) string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + needClosingBrace := false + if name != "" && (values != "" || args != "") { + buf.WriteString(f.quoted(name, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') + needClosingBrace = true + } + + continuing := false + if values != "" { + buf.WriteString(values) + continuing = true + } + + if args != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(args) + } + + if needClosingBrace { + buf.WriteByte('}') + } + + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If escapeKeys is +// true, the keys are assumed to have non-JSON-compatible characters in them +// and must be evaluated for escapes. +// +// This function returns a potentially modified version of kvList, which +// ensures that there is a value for every key (adding a value if needed) and +// that each key is a string (substituting a key if needed). +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + copied := false + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + if !copied { + newList := make([]any, len(kvList)) + copy(newList, kvList) + kvList = newList + copied = true + } + k = f.nonStringKey(kvList[i]) + kvList[i] = k + } + v := kvList[i+1] + + if i > 0 { + if f.outputFormat == outputJSON { + buf.WriteByte(f.comma()) + } else { + // In theory the format could be something we don't understand. In + // practice, we control it, so it won't be. + buf.WriteByte(' ') + } + } + + buf.WriteString(f.quoted(k, escapeKeys)) + buf.WriteByte(f.colon()) + buf.WriteString(f.pretty(v)) + } + return kvList +} + +func (f Formatter) quoted(str string, escape bool) string { + if escape { + return prettyString(str) + } + // this is faster + return `"` + str + `"` +} + +func (f Formatter) comma() byte { + if f.outputFormat == outputJSON { + return ',' + } + return ' ' +} + +func (f Formatter) colon() byte { + if f.outputFormat == outputJSON { + return ':' + } + return '=' +} + +func (f Formatter) pretty(value any) string { + return f.prettyWithFlags(value, 0, 0) +} + +const ( + flagRawStruct = 0x1 // do not print braces on structs +) + +// TODO: This is not fast. Most of the overhead goes here. +func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + + // Handle types that take full control of logging. + if v, ok := value.(logr.Marshaler); ok { + // Replace the value with what the type wants to get logged. + // That then gets handled below via reflection. + value = invokeMarshaler(v) + } + + // Handle types that want to format themselves. + switch v := value.(type) { + case fmt.Stringer: + value = invokeStringer(v) + case error: + value = invokeError(v) + } + + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + return prettyString(v) + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case complex64: + return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` + case complex128: + return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` + case PseudoStruct: + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + v = f.sanitize(v) + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < len(v); i += 2 { + if i > 0 { + buf.WriteByte(f.comma()) + } + k, _ := v[i].(string) // sanitize() above means no need to check success + // arbitrary keys might need escaping + buf.WriteString(prettyString(k)) + buf.WriteByte(f.colon()) + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + return prettyString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Complex64: + return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` + case reflect.Complex128: + return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` + case reflect.Struct: + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + printComma := false // testing i>0 is not enough because of JSON omitted fields + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if !v.Field(i).CanInterface() { + // reflect isn't clear exactly what this means, but we can't use it. + continue + } + name := "" + omitempty := false + if tag, found := fld.Tag.Lookup("json"); found { + if tag == "-" { + continue + } + if comma := strings.Index(tag, ","); comma != -1 { + if n := tag[:comma]; n != "" { + name = n + } + rest := tag[comma:] + if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { + omitempty = true + } + } else { + name = tag + } + } + if omitempty && isEmpty(v.Field(i)) { + continue + } + if printComma { + buf.WriteByte(f.comma()) + } + printComma = true // if we got here, we are rendering a field + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue + } + if name == "" { + name = fld.Name + } + // field names can't contain characters which need escaping + buf.WriteString(f.quoted(name, false)) + buf.WriteByte(f.colon()) + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + case reflect.Slice, reflect.Array: + // If this is outputing as JSON make sure this isn't really a json.RawMessage. + // If so just emit "as-is" and don't pretty it as that will just print + // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. + if f.outputFormat == outputJSON { + if rm, ok := value.(json.RawMessage); ok { + // If it's empty make sure we emit an empty value as the array style would below. + if len(rm) > 0 { + buf.Write(rm) + } else { + buf.WriteString("null") + } + return buf.String() + } + } + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(f.comma()) + } + e := v.Index(i) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) + } + buf.WriteByte(']') + return buf.String() + case reflect.Map: + buf.WriteByte('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteByte(f.comma()) + } + // If a map key supports TextMarshaler, use it. + keystr := "" + if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { + txt, err := m.MarshalText() + if err != nil { + keystr = fmt.Sprintf("", err.Error()) + } else { + keystr = string(txt) + } + keystr = prettyString(keystr) + } else { + // prettyWithFlags will produce already-escaped values + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) + if t.Key().Kind() != reflect.String { + // JSON only does string keys. Unlike Go's standard JSON, we'll + // convert just about anything to a string. + keystr = prettyString(keystr) + } + } + buf.WriteString(keystr) + buf.WriteByte(f.colon()) + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) + i++ + } + buf.WriteByte('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return "null" + } + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +func prettyString(s string) string { + // Avoid escaping (which does allocations) if we can. + if needsEscape(s) { + return strconv.Quote(s) + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + b.WriteByte('"') + b.WriteString(s) + b.WriteByte('"') + return b.String() +} + +// needsEscape determines whether the input string needs to be escaped or not, +// without doing any allocations. +func needsEscape(s string) bool { + for _, r := range s { + if !strconv.IsPrint(r) || r == '\\' || r == '"' { + return true + } + } + return false +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func invokeMarshaler(m logr.Marshaler) (ret any) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + +// Caller represents the original call site for a log line, after considering +// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and +// Line fields will always be provided, while the Func field is optional. +// Users can set the render hook fields in Options to examine logged key-value +// pairs, one of which will be {"caller", Caller} if the Options.LogCaller +// field is enabled for the given MessageClass. +type Caller struct { + // File is the basename of the file for this call site. + File string `json:"file"` + // Line is the line number in the file for this call site. + Line int `json:"line"` + // Func is the function name for this call site, or empty if + // Options.LogCallerFunc is not enabled. + Func string `json:"function,omitempty"` +} + +func (f Formatter) caller() Caller { + // +1 for this frame, +1 for Info/Error. + pc, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return Caller{"", 0, ""} + } + fn := "" + if f.opts.LogCallerFunc { + if fp := runtime.FuncForPC(pc); fp != nil { + fn = fp.Name() + } + } + + return Caller{filepath.Base(file), line, fn} +} + +const noValue = "" + +func (f Formatter) nonStringKey(v any) string { + return fmt.Sprintf("", f.snippet(v)) +} + +// snippet produces a short snippet string of an arbitrary value. +func (f Formatter) snippet(v any) string { + const snipLen = 16 + + snip := f.pretty(v) + if len(snip) > snipLen { + snip = snip[:snipLen] + } + return snip +} + +// sanitize ensures that a list of key-value pairs has a value for every key +// (adding a value if needed) and that each key is a string (substituting a key +// if needed). +func (f Formatter) sanitize(kvList []any) []any { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + _, ok := kvList[i].(string) + if !ok { + kvList[i] = f.nonStringKey(kvList[i]) + } + } + return kvList +} + +// startGroup opens a new group scope (basically a sub-struct), which locks all +// the current saved values and starts them anew. This is needed to satisfy +// slog. +func (f *Formatter) startGroup(name string) { + // Unnamed groups are just inlined. + if name == "" { + return + } + + n := len(f.groups) + f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) + + // Start collecting new values. + f.groupName = name + f.valuesStr = "" + f.values = nil +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.opts.Verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo renders an Info log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Info { + args = append(args, "caller", f.caller()) + } + if key := *f.opts.LogInfoLevel; key != "" { + args = append(args, key, level) + } + args = append(args, "msg", msg) + return prefix, f.render(args, kvList) +} + +// FormatError renders an Error log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr any + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + return prefix, f.render(args, kvList) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []any) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) + + vals := f.values + if hook := f.opts.RenderValuesHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + + // Pre-render values, so we don't have to do it on each Info/Error call. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + f.flatten(buf, vals, true) // escape user-provided keys + f.valuesStr = buf.String() +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go new file mode 100644 index 000000000..7bd84761e --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go @@ -0,0 +1,105 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package funcr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +var _ logr.SlogSink = &fnlogger{} + +const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink + +func (l fnlogger) Handle(_ context.Context, record slog.Record) error { + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = attrToKVs(attr, kvList) + return true + }) + + if record.Level >= slog.LevelError { + l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...) + } + return nil +} + +func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = attrToKVs(attr, kvList) + } + l.AddValues(kvList) + return &l +} + +func (l fnlogger) WithGroup(name string) logr.SlogSink { + l.startGroup(name) + return &l +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, grpKVs) + } + if attr.Key == "" { + // slog says we have to inline these + kvList = append(kvList, grpKVs...) + } else { + kvList = append(kvList, attr.Key, PseudoStruct(grpKVs)) + } + } else if attr.Key != "" { + kvList = append(kvList, attr.Key, attrVal.Any()) + } + + return kvList +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l fnlogger) levelFromSlog(level slog.Level) int { + result := -level + if result < 0 { + result = 0 // because LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md new file mode 100644 index 000000000..515866789 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/README.md @@ -0,0 +1,6 @@ +# Minimal Go logging using logr and Go's standard library + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go new file mode 100644 index 000000000..93a8aab51 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/stdr.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stdr implements github.com/go-logr/logr.Logger in terms of +// Go's standard log package. +package stdr + +import ( + "log" + "os" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +// The global verbosity level. See SetVerbosity(). +var globalVerbosity int + +// SetVerbosity sets the global level against which all info logs will be +// compared. If this is greater than or equal to the "V" of the logger, the +// message will be logged. A higher value here means more logs will be written. +// The previous verbosity value is returned. This is not concurrent-safe - +// callers must be sure to call it from only one goroutine. +func SetVerbosity(v int) int { + old := globalVerbosity + globalVerbosity = v + return old +} + +// New returns a logr.Logger which is implemented by Go's standard log package, +// or something like it. If std is nil, this will use a default logger +// instead. +// +// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +func New(std StdLogger) logr.Logger { + return NewWithOptions(std, Options{}) +} + +// NewWithOptions returns a logr.Logger which is implemented by Go's standard +// log package, or something like it. See New for details. +func NewWithOptions(std StdLogger, opts Options) logr.Logger { + if std == nil { + // Go's log.Default() is only available in 1.16 and higher. + std = log.New(os.Stderr, "", log.LstdFlags) + } + + if opts.Depth < 0 { + opts.Depth = 0 + } + + fopts := funcr.Options{ + LogCaller: funcr.MessageClass(opts.LogCaller), + } + + sl := &logger{ + Formatter: funcr.NewFormatter(fopts), + std: std, + } + + // For skipping our own logger.Info/Error. + sl.Formatter.AddCallDepth(1 + opts.Depth) + + return logr.New(sl) +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // Depth biases the assumed number of call frames to the "true" caller. + // This is useful when the calling code calls a function which then calls + // stdr (e.g. a logging shim to another API). Values less than zero will + // be treated as zero. + Depth int + + // LogCaller tells stdr to add a "caller" key to some or all log lines. + // Go's log package has options to log this natively, too. + LogCaller MessageClass + + // TODO: add an option to log the date/time +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// StdLogger is the subset of the Go stdlib log.Logger API that is needed for +// this adapter. +type StdLogger interface { + // Output is the same as log.Output and log.Logger.Output. + Output(calldepth int, logline string) error +} + +type logger struct { + funcr.Formatter + std StdLogger +} + +var _ logr.LogSink = &logger{} +var _ logr.CallDepthLogSink = &logger{} + +func (l logger) Enabled(level int) bool { + return globalVerbosity >= level +} + +func (l logger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l logger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l logger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +// Underlier exposes access to the underlying logging implementation. Since +// callers only have a logr.Logger, they have to know which implementation is +// in use, so this interface is less of an abstraction and more of way to test +// type conversion. +type Underlier interface { + GetUnderlying() StdLogger +} + +// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger +// is itself an interface, the result may or may not be a Go log.Logger. +func (l logger) GetUnderlying() StdLogger { + return l.std +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md new file mode 100644 index 000000000..773c9b643 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contributing to go.opentelemetry.io/auto/sdk + +The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK. +It is designed to be: + +0. An OpenTelemetry compliant SDK +1. Instrumented by auto-instrumentation (serializable into OTLP JSON) +2. Lightweight +3. User-friendly + +These design choices are listed in the order of their importance. + +The primary design goal of this module is to be an OpenTelemetry SDK. +This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`. + +Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument. +The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP. +This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent. + +Outside of these first two goals, the intended use becomes relevant. +This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running. +Because of this, this package needs to not add unnecessary dependencies to that API. +Ideally, it adds none. +It also needs to operate efficiently. + +Finally, this module is designed to be user-friendly to Go development. +It hides complexity in order to provide simpler APIs when the previous goals can all still be met. diff --git a/vendor/go.opentelemetry.io/auto/sdk/LICENSE b/vendor/go.opentelemetry.io/auto/sdk/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md new file mode 100644 index 000000000..088d19a6c --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md @@ -0,0 +1,15 @@ +# Versioning + +This document describes the versioning policy for this module. +This policy is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path. + +* GitHub releases will be made for all releases. diff --git a/vendor/go.opentelemetry.io/auto/sdk/doc.go b/vendor/go.opentelemetry.io/auto/sdk/doc.go new file mode 100644 index 000000000..ad73d8cb9 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/doc.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package sdk provides an auto-instrumentable OpenTelemetry SDK. + +An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the +process running this SDK. In that case, all telemetry the SDK produces will be +processed and handled by that [go.opentelemetry.io/auto.Instrumentation]. + +By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to +auto-instrument the SDK, the SDK will not generate any telemetry. +*/ +package sdk diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go new file mode 100644 index 000000000..af6ef171f --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go new file mode 100644 index 000000000..949e2165c --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go new file mode 100644 index 000000000..e854d7e84 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go new file mode 100644 index 000000000..29e629d66 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go new file mode 100644 index 000000000..cecad8bae --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go new file mode 100644 index 000000000..b6f2e28d4 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go new file mode 100644 index 000000000..a13a6b733 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -0,0 +1,456 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), + EndTime: uint64(endT), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.StartTime = time.Unix(0, int64(val.Uint64())) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.EndTime = time.Unix(0, int64(val.Uint64())) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + se.Time = time.Unix(0, int64(val.Uint64())) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go new file mode 100644 index 000000000..1217776ea --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go new file mode 100644 index 000000000..69a348f0f --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go new file mode 100644 index 000000000..0dd01b063 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=ValueKind -trimprefix=ValueKind + +package telemetry + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{num: uint64(v), any: ValueKindInt64} +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/limit.go b/vendor/go.opentelemetry.io/auto/sdk/limit.go new file mode 100644 index 000000000..86babf1a8 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/limit.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "log/slog" + "os" + "strconv" +) + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + slog.Warn( + "invalid limit environment variable", + "error", err, + "key", key, + "value", strV, + ) + } + + return defaultVal +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go new file mode 100644 index 000000000..6ebea12a9 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -0,0 +1,432 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "encoding/json" + "fmt" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type span struct { + noop.Span + + spanContext trace.SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *span) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *span) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *span) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *span) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + s.span.DroppedAttrs += uint32(len(attrs)) + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + if limit == 0 { + return nil, uint32(len(attrs)) + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + limit = min(len(attrs), limit) + return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *span) End(opts ...trace.SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *span) end(opts []trace.SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := trace.NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*span) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *span) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *span) AddEvent(name string, opts ...trace.EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *span) AddLink(link trace.Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []trace.Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link trace.Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *span) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() } diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go new file mode 100644 index 000000000..cbcfabde3 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type tracer struct { + noop.Tracer + + name, schemaURL, version string +} + +var _ trace.Tracer = tracer{} + +func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + var psc trace.SpanContext + sampled := true + span := new(span) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = trace.ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := trace.NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *tracer) start( + ctx context.Context, + spanPtr *span, + psc *trace.SpanContext, + sampled *bool, + sc *trace.SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} + +func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + span.DroppedLinks = uint32(len(links)) + } else { + if limit > 0 { + n := max(len(links)-limit, 0) + span.DroppedLinks = uint32(n) + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind trace.SpanKind) telemetry.SpanKind { + switch kind { + case trace.SpanKindInternal: + return telemetry.SpanKindInternal + case trace.SpanKindServer: + return telemetry.SpanKindServer + case trace.SpanKindClient: + return telemetry.SpanKindClient + case trace.SpanKindProducer: + return telemetry.SpanKindProducer + case trace.SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go new file mode 100644 index 000000000..dbc477a59 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +// TracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func TracerProvider() trace.TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(tracerProvider) + +type tracerProvider struct{ noop.TracerProvider } + +var _ trace.TracerProvider = tracerProvider{} + +func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + cfg := trace.NewTracerConfig(opts...) + return tracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore new file mode 100644 index 000000000..6bf3abc41 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -0,0 +1,9 @@ +ot +fo +te +collison +consequentially +ans +nam +valu +thirdparty diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc new file mode 100644 index 000000000..e2cb3ea94 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -0,0 +1,10 @@ +# https://github.com/codespell-project/codespell +[codespell] +builtin = clear,rare,informal +check-filenames = +check-hidden = +ignore-words = .codespellignore +interactive = 1 +skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools +uri-ignore-words-list = * +write = diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes new file mode 100644 index 000000000..314766e91 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.{cmd,[cC][mM][dD]} text eol=crlf +*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore new file mode 100644 index 000000000..749e8e881 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -0,0 +1,15 @@ +.DS_Store +Thumbs.db + +.cache/ +.tools/ +venv/ +.idea/ +.vscode/ +*.iml +*.so +coverage.* +go.work +go.work.sum + +gen/ diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml new file mode 100644 index 000000000..c58e48ab0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -0,0 +1,252 @@ +# See https://github.com/golangci/golangci-lint#config-file +run: + issues-exit-code: 1 #Default + tests: true #Default + +linters: + # Disable everything by default so upgrades to not include new "default + # enabled" linters. + disable-all: true + # Specifically enable linters we want to use. + enable: + - asasalint + - bodyclose + - depguard + - errcheck + - errorlint + - godot + - gofumpt + - goimports + - gosec + - gosimple + - govet + - ineffassign + - misspell + - perfsprint + - revive + - staticcheck + - testifylint + - typecheck + - unconvert + - unused + - unparam + - usestdlibvars + - usetesting + +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + # Setting to unlimited so the linter only is run once to debug all issues. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + # Setting to unlimited so the linter only is run once to debug all issues. + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source. + exclude-rules: + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - path: '.*internal/.*' + text: "exported (method|function|type|const) (.+) should have comment or be unexported" + linters: + - revive + # Yes, they are, but it's okay in a test. + - path: _test\.go + text: "exported func.*returns unexported type.*which can be annoying to use" + linters: + - revive + # Example test functions should be treated like main. + - path: example.*_test\.go + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive + # It's okay to not run gosec and perfsprint in a test. + - path: _test\.go + linters: + - gosec + - perfsprint + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - text: "G404:" + linters: + - gosec + # Ignoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - text: "G402: TLS MinVersion too low." + linters: + - gosec + include: + # revive exported should have comment or be unexported. + - EXC0012 + # revive package comment should be of the form ... + - EXC0013 + +linters-settings: + depguard: + rules: + non-tests: + files: + - "!$test" + - "!**/*test/*.go" + - "!**/internal/matchers/*.go" + deny: + - pkg: "testing" + - pkg: "github.com/stretchr/testify" + - pkg: "crypto/md5" + - pkg: "crypto/sha1" + - pkg: "crypto/**/pkix" + auto/sdk: + files: + - "!internal/global/trace.go" + - "~internal/global/trace_test.go" + deny: + - pkg: "go.opentelemetry.io/auto/sdk" + desc: Do not use SDK from automatic instrumentation. + otlp-internal: + files: + - "!**/exporters/otlp/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" + desc: Do not use cross-module internal packages. + otlptrace-internal: + files: + - "!**/exporters/otlp/otlptrace/*.go" + - "!**/exporters/otlp/otlptrace/internal/**.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" + desc: Do not use cross-module internal packages. + otlpmetric-internal: + files: + - "!**/exporters/otlp/otlpmetric/internal/*.go" + - "!**/exporters/otlp/otlpmetric/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" + desc: Do not use cross-module internal packages. + otel-internal: + files: + - "**/sdk/*.go" + - "**/sdk/**/*.go" + - "**/exporters/*.go" + - "**/exporters/**/*.go" + - "**/schema/*.go" + - "**/schema/**/*.go" + - "**/metric/*.go" + - "**/metric/**/*.go" + - "**/bridge/*.go" + - "**/bridge/**/*.go" + - "**/trace/*.go" + - "**/trace/**/*.go" + - "**/log/*.go" + - "**/log/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/internal$" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/attribute" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/internaltest" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/matchers" + desc: Do not use cross-module internal packages. + godot: + exclude: + # Exclude links. + - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. + - ':$' + goimports: + local-prefixes: go.opentelemetry.io + misspell: + locale: US + ignore-words: + - cancelled + perfsprint: + err-error: true + errorf: true + int-conversion: true + sprintf1: true + strconcat: true + revive: + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.01 + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md + rules: + - name: blank-imports + - name: bool-literal-in-expr + - name: constant-logical-expr + - name: context-as-argument + disabled: true + arguments: + - allowTypesBefore: "*testing.T" + - name: context-keys-type + - name: deep-exit + - name: defer + arguments: + - ["call-chain", "loop"] + - name: dot-imports + - name: duplicated-imports + - name: early-return + arguments: + - "preserveScope" + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + arguments: + - "sayRepetitiveInsteadOfStutters" + - name: flag-parameter + - name: identical-branches + - name: if-return + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + arguments: + - "preserveScope" + - name: package-comments + - name: range + - name: range-val-in-closure + - name: range-val-address + - name: redefines-builtin-id + - name: string-format + arguments: + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + - name: struct-tag + - name: superfluous-else + arguments: + - "preserveScope" + - name: time-equal + - name: unconditional-recursion + - name: unexported-return + - name: unhandled-error + arguments: + - "fmt.Fprint" + - "fmt.Fprintf" + - "fmt.Fprintln" + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + - name: unnecessary-stmt + - name: useless-break + - name: var-declaration + - name: var-naming + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + - name: waitgroup-by-value + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore new file mode 100644 index 000000000..40d62fa2e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -0,0 +1,6 @@ +http://localhost +http://jaeger-collector +https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects +file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries +file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml new file mode 100644 index 000000000..3202496c3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml @@ -0,0 +1,29 @@ +# Default state for all rules +default: true + +# ul-style +MD004: false + +# hard-tabs +MD010: false + +# line-length +MD013: false + +# no-duplicate-header +MD024: + siblings_only: true + +#single-title +MD025: false + +# ol-prefix +MD029: + style: ordered + +# no-inline-html +MD033: false + +# fenced-code-language +MD040: false + diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md new file mode 100644 index 000000000..c076db282 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -0,0 +1,3344 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + + + + +## [1.35.0/0.57.0/0.11.0] 2025-03-05 + +This release is the last to support [Go 1.22]. +The next release will require at least [Go 1.23]. + +### Added + +- Add `ValueFromAttribute` and `KeyValueFromAttribute` in `go.opentelemetry.io/otel/log`. (#6180) +- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/log`. (#6187) +- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/log/logtest`. (#6187) +- `AssertRecordEqual` in `go.opentelemetry.io/otel/log/logtest` checks `Record.EventName`. (#6187) +- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/sdk/log`. (#6193) +- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest`. (#6193) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6211) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6211) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` (#6210) +- The `go.opentelemetry.io/otel/semconv/v1.28.0` package. + The package contains semantic conventions from the `v1.28.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.28.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.27.0`(#6236) +- The `go.opentelemetry.io/otel/semconv/v1.30.0` package. + The package contains semantic conventions from the `v1.30.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.30.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.28.0`(#6240) +- Document the pitfalls of using `Resource` as a comparable type. + `Resource.Equal` and `Resource.Equivalent` should be used instead. (#6272) +- Support [Go 1.24]. (#6304) +- Add `FilterProcessor` and `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. + It replaces `go.opentelemetry.io/otel/sdk/log/internal/x.FilterProcessor`. + Compared to previous version it additionally gives the possibility to filter by resource and instrumentation scope. (#6317) + +### Changed + +- Update `github.com/prometheus/common` to `v0.62.0`, which changes the `NameValidationScheme` to `NoEscaping`. + This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores. + This is controlled by the `Content-Type` header, or can be reverted by setting `NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6198) + +### Fixes + +- Eliminate goroutine leak for the processor returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `Shutdown` is called and the passed `ctx` is canceled and `SpanExporter.Shutdown` has not returned. (#6368) +- Eliminate goroutine leak for the processor returned by `NewBatchSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `ForceFlush` is called and the passed `ctx` is canceled and `SpanExporter.Export` has not returned. (#6369) + +## [1.34.0/0.56.0/0.10.0] 2025-01-17 + +### Changed + +- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167) + +### Fixed + +- Relax minimum Go version to 1.22.0 in various modules. (#6073) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143) + +## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 + +### Added + +- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994) +- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`. + This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`. + Users can use it to avoid performing computationally expensive operations when recording measurements. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016) + +### Changed + +- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package. + See that package for more information. (#5920) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929) +- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011) +- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009) + +### Fixed + +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954) +- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) + +## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850) +- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850) +- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861) +- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861) +- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861) +- The `go.opentelemetry.io/otel/semconv/v1.27.0` package. + The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894) +- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903) +- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932) + +### Changed + +- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924) +- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926) +- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925) +- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931) +- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804) + +### Fixed + +- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944) +- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900) + +### Removed + +- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930) + +## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862) +- Add `WithExportBufferSize` option to log batch processor.(#5877) + +### Changed + +- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778) +- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791) +- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791) +- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847) +- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864) +- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858) +- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874) + +### Deprecated + +- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854) + +### Fixed + +- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819) +- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803) +- Fix timer channel drain to avoid hanging on Go 1.23. (#5868) +- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827) +- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827) + +## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 + +### Added + +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739) +- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773) +- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773) +- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755) + +### Fixed + +- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754) +- Fix panic on instruments creation when setting meter provider. (#5758) +- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780) + +### Removed + +- Drop support for [Go 1.21]. (#5736, #5740, #5800) + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + +## [1.28.0/0.50.0/0.4.0] 2024-07-02 + +### Added + +- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. + This method is used to check if an `Instrument` instance is a zero-value. (#5431) +- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) +- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. + The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) +- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) +- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) + +### Changed + +- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) + - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) +- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) +- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) +- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) +- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) + +### Fixed + +- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) +- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) +- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) +- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) +- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) +- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) +- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) +- Fix stale timestamps reported by the last-value aggregation. (#5517) +- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) +- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) + +## [1.27.0/0.49.0/0.3.0] 2024-05-21 + +### Added + +- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242) +- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258) +- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263) +- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276) +- Add metrics in the `otel-collector` example. (#5283) +- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304) + - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`. + - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument. +- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349) + +### Changed + +- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189) +- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189) +- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241) +- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) +- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) +- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) +- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) +- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374) + +### Fixed + +- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306) +- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) +- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365) +- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371) +- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375) + +## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 + +### Added + +- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134) +- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194) +- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`. + This new module contains the Go implementation of the OpenTelemetry Logs SDK. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. + This new module contains an OTLP exporter that transmits log telemetry using HTTP. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. + This new module contains an exporter prints log records to STDOUT. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- The `go.opentelemetry.io/otel/semconv/v1.25.0` package. + The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254) + +### Changed + +- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177) +- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214) +- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244) + +### Fixed + +- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159) + +## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05 + +### Added + +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906) +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906) +- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032) +- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`. + This method is used to notify users if a log record will be emitted or not. (#5071) +- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`. + This value represents an unset severity level. (#5072) +- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076) +- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`. + This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes. + At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085) +- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100) +- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108) +- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116) +- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117) +- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111) +- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528) + +### Changed + +- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151) + Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). + Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated. + +### Fixed + +- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027) +- Prevent default `ErrorHandler` self-delegation. (#5137) +- Update all dependencies to address [GO-2024-2687]. (#5139) + +### Removed + +- Drop support for [Go 1.20]. (#4967) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734) + +## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23 + +This release is the last to support [Go 1.20]. +The next release will require at least [Go 1.21]. + +### Added + +- Support [Go 1.22]. (#4890) +- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4900) +- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4900) +- The `go.opentelemetry.io/otel/log` module is added. + This module includes OpenTelemetry Go's implementation of the Logs Bridge API. + This module is in an alpha state, it is subject to breaking changes. + See our [versioning policy](./VERSIONING.md) for more info. (#4961) +- Add ARM64 platform to the compatibility testing suite. (#4994) + +### Fixed + +- Fix registration of multiple callbacks when using the global meter provider from `go.opentelemetry.io/otel`. (#4945) +- Fix negative buckets in output of exponential histograms. (#4956) + +## [1.23.1] 2024-02-07 + +### Fixed + +- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888) + +## [1.23.0] 2024-02-06 + +This release contains the first stable, `v1`, release of the following modules: + +- `go.opentelemetry.io/otel/bridge/opencensus` +- `go.opentelemetry.io/otel/bridge/opencensus/test` +- `go.opentelemetry.io/otel/example/opencensus` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` +- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` + +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808) +- Experimental exemplar exporting is added to the metric SDK. + See [metric documentation](./sdk/metric/internal/x/README.md#exemplars) for more information about this feature and how to enable it. (#4871) +- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`. + This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876) + +### Changed + +- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict. + Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned. + It is up to the user to decide if they want to use the returned `Resource` or not. + It may have desired attributes overwritten or include stale semantic conventions. (#4876) + +### Fixed + +- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449) +- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820) +- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827) + +## [1.23.0-rc.1] 2024-01-18 + +This is a release candidate for the v1.23.0 release. +That release is expected to include the `v1` release of the following modules: + +- `go.opentelemetry.io/otel/bridge/opencensus` +- `go.opentelemetry.io/otel/bridge/opencensus/test` +- `go.opentelemetry.io/otel/example/opencensus` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` +- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` + +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +## [1.22.0/0.45.0] 2024-01-17 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.22.0` package. + The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735) +- The `go.opentelemetry.io/otel/semconv/v1.23.0` package. + The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746) +- The `go.opentelemetry.io/otel/semconv/v1.23.1` package. + The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749) +- The `go.opentelemetry.io/otel/semconv/v1.24.0` package. + The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770) +- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733) +- Experimental cardinality limiting is added to the metric SDK. + See [metric documentation](./sdk/metric/internal/x/README.md#cardinality-limit) for more information about this feature and how to enable it. (#4457) +- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804) + +### Changed + +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754) +- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754) +- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`. + If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671) +- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722) +- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721) +- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743) +- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774) +- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775) +- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818) +- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804) + +### Fixed + +- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755) +- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756) +- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772) +- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776) +- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804) +- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742) + +## [1.21.0/0.44.0] 2023-11-16 + +### Removed + +- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706) +- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707) +- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708) +- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723) + +### Fixed + +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719) +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719) + +## [1.20.0/0.43.0] 2023-11-10 + +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. + +### Added + +- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) +- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) +- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) +- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) +- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) +- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) +- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) +- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) +- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) +- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) +- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. + Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) +- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) + +### Changed + +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) +- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. + This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. + This extends the `Tracer` interface and is is a breaking change for any existing implementation. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. + This extends the `Span` interface and is is a breaking change for any existing implementation. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) + +### Fixed + +- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) +- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) + +## [1.19.0/0.42.0/0.0.7] 2023-09-28 + +This release contains the first stable release of the OpenTelemetry Go [metric SDK]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539) +- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507) + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) +- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507) +- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +### Removed + +- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566) + +## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14 + +This is a release candidate for the v1.19.0/v0.42.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +## [1.18.0/0.41.0/0.0.6] 2023-09-12 + +This release drops the compatibility guarantee of [Go 1.19]. + +### Added + +- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473) +- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447) + +### Changed + +- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541). + The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470) + +### Removed + +- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468) +- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469) +- Dropped guaranteed support for versions of Go less than 1.20. (#4481) + +## [1.17.0/0.40.0/0.0.5] 2023-08-28 + +### Added + +- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Add support for exponential histogram aggregations. + A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287) +- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306) +- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315) +- The `go.opentelemetry.io/otel/semconv/v1.21.0` package. + The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362) +- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365) +- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381) +- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374) +- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437) +- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444) +- Support Go 1.21. (#4463) + +### Changed + +- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145) +- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202) +- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210) +- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244) +- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244) +- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290) +- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289) +- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332) +- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333) +- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377) +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408) +- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434) +- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346) + +### Removed + +- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`. + Use the added `WithProducer` option instead. (#4346) +- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`. + Notice that `PeriodicReader.ForceFlush` is still available. (#4375) + +### Fixed + +- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143) +- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307) +- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317) +- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337) +- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338) +- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350) +- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350) +- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349) +- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353) +- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846) +- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395) +- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373) +- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409) +- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated. + OpenTelemetry dropped support for Jaeger exporter in July 2023. + Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` + or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423) +- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated. + Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435) + +## [1.16.0/0.39.0] 2023-05-18 + +This release contains the first stable release of the OpenTelemetry Go [metric API]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.19.0` package. + The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848) +- The `go.opentelemetry.io/otel/semconv/v1.20.0` package. + The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078) +- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165) +- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222) +- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271) + +### Changed + +- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049) +- `MeterProvider` returns noop meters once it has been shutdown. (#4154) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed. + Use `go.opentelemetry.io/otel/metric` instead. (#4055) + +### Fixed + +- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077) + +## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03 + +This is a release candidate for the v1.16.0/v0.39.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set. + This stages the metric API to be released as a stable module. (#4038) + +### Removed + +- The `go.opentelemetry.io/otel/metric/global` package is removed. + Use `go.opentelemetry.io/otel` instead. (#4039) + +## [1.15.1/0.38.1] 2023-05-02 + +### Fixed + +- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041) + +## [1.15.0/0.38.0] 2023-04-27 + +### Added + +- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) +- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) +- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) +- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) + - The `AddConfig` used to hold configuration for addition measurements + - `NewAddConfig` used to create a new `AddConfig` + - `AddOption` used to configure an `AddConfig` + - The `RecordConfig` used to hold configuration for recorded measurements + - `NewRecordConfig` used to create a new `RecordConfig` + - `RecordOption` used to configure a `RecordConfig` + - The `ObserveConfig` used to hold configuration for observed measurements + - `NewObserveConfig` used to create a new `ObserveConfig` + - `ObserveOption` used to configure an `ObserveConfig` +- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. + They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) + +### Changed + +- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) +- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. + This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) +- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) + - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` +- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) +- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) +- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Int64Counter.Add` method now accepts `...AddOption` + - The `Float64Counter.Add` method now accepts `...AddOption` + - The `Int64UpDownCounter.Add` method now accepts `...AddOption` + - The `Float64UpDownCounter.Add` method now accepts `...AddOption` + - The `Int64Histogram.Record` method now accepts `...RecordOption` + - The `Float64Histogram.Record` method now accepts `...RecordOption` + - The `Int64Observer.Observe` method now accepts `...ObserveOption` + - The `Float64Observer.Observe` method now accepts `...ObserveOption` +- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Observer.ObserveInt64` method now accepts `...ObserveOption` + - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` +- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) + +### Fixed + +- `TracerProvider` allows calling `Tracer()` while it's shutting down. + It used to deadlock. (#3924) +- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) +- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) +- Automatically figure out the default aggregation with `aggregation.Default`. (#3967) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. + Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) + +## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) +- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) +- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) +- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) +- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) +- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) + +### Changed + +- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) +- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) +- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) +- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) +- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) +- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) +- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) +- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) + +### Fixed + +- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) +- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) +- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `float64` instrument configuration instead. (#3895) +- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `int64` instrument configuration instead. (#3895) +- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) + +## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +This release drops the compatibility guarantee of [Go 1.18]. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Dropped compatibility testing for [Go 1.18]. + The project no longer guarantees support for this version of Go. (#3813) + +### Fixed + +- Handle empty environment variable as it they were not set. (#3764) +- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) +- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899) +- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/global` package is deprecated. + Use `go.opentelemetry.io/otel` instead. (#3818) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) + +## [1.14.0/0.37.0/0.0.4] 2023-02-27 + +This release is the last to support [Go 1.18]. +The next release will require at least [Go 1.19]. + +### Added + +- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) +- Support [Go 1.20]. (#3693) +- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. + The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) + - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeNameKey` -> `OTelScopeNameKey` + - `OtelScopeVersionKey` -> `OTelScopeVersionKey` + - `OtelLibraryNameKey` -> `OTelLibraryNameKey` + - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` + - `OtelStatusCodeKey` -> `OTelStatusCodeKey` + - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` + - `OtelStatusCodeOk` -> `OTelStatusCodeOk` + - `OtelStatusCodeError` -> `OTelStatusCodeError` + - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeName` -> `OTelScopeName` + - `OtelScopeVersion` -> `OTelScopeVersion` + - `OtelLibraryName` -> `OTelLibraryName` + - `OtelLibraryVersion` -> `OTelLibraryVersion` + - `OtelStatusDescription` -> `OTelStatusDescription` +- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. + See the [README](./bridge/opentracing/README.md) for more information. (#3570) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) +- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) + - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. + - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. + +### Changed + +- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) +- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. + This change is made to enable memory reuse by SDK users. (#3732) +- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) + +### Fixed + +- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) +- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) +- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) +- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) +- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) +- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) +- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. + Use the equivalent unit string instead. (#3776) + - Use `"1"` instead of `unit.Dimensionless` + - Use `"By"` instead of `unit.Bytes` + - Use `"ms"` instead of `unit.Milliseconds` + +## [1.13.0/0.36.0] 2023-02-07 + +### Added + +- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. + These functions ensure semantic convention type correctness. (#3675) + +### Fixed + +- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) + - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) + +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Re-enabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +### Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + +## [1.11.1/0.33.0] 2022-10-19 + +### Added + +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. + By default, it will register with the default Prometheus registerer. + A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) +- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) + +### Changed + +- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. + It will return an error if the exporter fails to register with Prometheus. (#3239) + +### Fixed + +- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) +- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. + This fixes the implementation to be compliant with the W3C specification. (#3226) +- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) +- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) +- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) +- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) +- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. + Instead the exporter is defined as an "unchecked" collector for Prometheus. + This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. + This can be disabled using the `WithoutUnits()` option added to that package. (#3352) + +## [1.11.0/0.32.3] 2022-10-12 + +### Added + +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) + +### Changed + +- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) +- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. + This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) + +## [0.32.2] Metric SDK (Alpha) - 2022-10-11 + +### Added + +- Added an example of using metric views to customize instruments. (#3177) +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) + +### Changed + +- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) +- Update histogram default bounds to match the requirements of the latest specification. (#3222) +- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) + +### Fixed + +- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) +- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) +- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) +- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) +- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) + +## [0.32.1] Metric SDK (Alpha) - 2022-09-22 + +### Changed + +- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. + Invalid characters are replaced with `_`. (#3212) + +### Added + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) +- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) + +### Fixed + +- Updated go.mods to point to valid versions of the sdk. (#3216) +- Set the `MeterProvider` resource on all exported metric data. (#3218) + +## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 + +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) +- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + +## [1.10.0] - 2022-09-09 + +### Added + +- Support Go 1.19. (#3077) + Include compatibility testing and document support. (#3077) +- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) +- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) + +### Changed + +- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) +- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) +- All exporters will be shutdown even if one reports an error (#3091) +- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) + +## [1.9.0/0.0.3] - 2022-08-01 + +### Added + +- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) +- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. + The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) +- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. + The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) +- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) + +### Fixed + +- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) + +## [1.8.0/0.31.0] - 2022-07-08 + +### Added + +- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods +of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) + +### Changed + +- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) +- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) +- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) + +### Removed + +- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) + +### Deprecated + +- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. + Use the equivalent `Scope` struct instead. (#2977) +- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. + Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) + +## [1.7.0/0.30.0] - 2022-04-28 + +### Added + +- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. + The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) +- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. + The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) +- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. + The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) +- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) + +### Fixed + +- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) +- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) + +### Changed + +- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) +- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. + The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) +- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. + Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) + +### Deprecated + +- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.Attribute` method instead. (#2790) +- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) +- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `MergeIterator.Attribute` method instead. (#2790) + +### Removed + +- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) +- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) + +## [0.29.0] - 2022-04-11 + +### Added + +- The metrics global package was added back into several test files. (#2764) +- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. + This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) + +### Removed + +- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) + +### Changed + +- Don't panic anymore when setting a global MeterProvider to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) + +## [1.6.3] - 2022-04-07 + +### Fixed + +- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) + +## [1.6.2] - 2022-04-06 + +### Changed + +- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) + +## [1.6.1] - 2022-03-28 + +### Fixed + +- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. + Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) + +### Security + +- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. + This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) + +## [1.6.0/0.28.0] - 2022-03-23 + +### ⚠️ Notice ⚠️ + +This update is a breaking change of the unstable Metrics API. +Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. + +### Added + +- Add metrics exponential histogram support. + New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) +- Add Go 1.18 to our compatibility tests. (#2679) +- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) +- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) + +### Changed + +- The metrics API has been significantly changed to match the revised OpenTelemetry specification. + High-level changes include: + + - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. + These `InstrumentProvider`s are managed with a `Meter`. + - Synchronous and asynchronous instruments are grouped into their own packages based on value types. + - Asynchronous callbacks can now be registered with a `Meter`. + + Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) + +### Fixed + +- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) + +## [1.5.0] - 2022-03-16 + +### Added + +- Log the Exporters configuration in the TracerProviders message. (#2578) +- Added support to configure the span limits with environment variables. + The following environment variables are supported. (#2606, #2637) + - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` + - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_EVENT_COUNT_LIMIT` + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` + + If the provided environment variables are invalid (negative), the default values would be used. +- Rename the `gc` runtime name to `go` (#2560) +- Add resource container ID detection. (#2418) +- Add span attribute value length limit. + The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. + The default limit for this resource is "unlimited". (#2637) +- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. + This option replaces the `WithSpanLimits` option. + Zero or negative values will not be changed to the default value like `WithSpanLimits` does. + Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. + Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) + +### Changed + +- Drop oldest tracestate `Member` when capacity is reached. (#2592) +- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) +- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) +- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) +- Introduce new internal `envconfig` package for OTLP exporters. (#2608) +- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) + +### Fixed + +- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) +- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) +- Unlimited span limits are now supported (negative values). (#2636, #2637) + +### Deprecated + +- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. + Use `WithRawSpanLimits` instead. + That option allows setting unlimited and zero limits, this option does not. + This option will be kept until the next major version incremented release. (#2637) + +## [1.4.1] - 2022-02-16 + +### Fixed + +- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) + +## [1.4.0] - 2022-02-11 + +### Added + +- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) +- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. + To enable use a logger with Verbosity (V level) `>=1`. (#2500) +- Added support to configure the batch span-processor with environment variables. + The following environment variables are used. (#2515) + - `OTEL_BSP_SCHEDULE_DELAY` + - `OTEL_BSP_EXPORT_TIMEOUT` + - `OTEL_BSP_MAX_QUEUE_SIZE`. + - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` + +### Changed + +- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) + +### Deprecated + +- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) +- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) + +### Fixed + +- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) +- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) +- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) +- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) +- W3C baggage will now decode urlescaped values. (#2529) +- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) +- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. + Instead of dropping the least-recently-used attribute, the last added attribute is dropped. + This drop order still only applies to attributes with unique keys not already contained in the span. + If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) + +### Removed + +- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) + - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) + - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) + - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) + +## [1.3.0] - 2021-12-10 + +### ⚠️ Notice ⚠️ + +We have updated the project minimum supported Go version to 1.16 + +### Added + +- Added an internal Logger. + This can be used by the SDK and API to provide users with feedback of the internal state. + To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) +- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) +- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) + +### Changed + +- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) +- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) +- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) + +### Fixed + +- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. + Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. + When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) +- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) +- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) + +### Deprecated + +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) + +### Removed + +- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) +- Remove the metric Bound Instruments interface and implementations. (#2399) +- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) +- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) + +## [1.2.0] - 2021-11-12 + +### Changed + +- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) +- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) +- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: + - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` + - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. + - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) +- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) +- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) + +## [1.1.0] - 2021-10-27 + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. + The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) +- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. + The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) +- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. + The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) + - When upgrading from the `semconv/v1.4.0` package note the following name changes: + - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` + - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` + - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` + - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` + - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` + - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` + +### Changed + +- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). + +### Fixed + +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) +- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) + +## [1.0.1] - 2021-10-01 + +### Fixed + +- json stdout exporter no longer crashes due to concurrency bug. (#2265) + +## [Metrics 0.24.0] - 2021-10-01 + +### Changed + +- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) +- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) + - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. + - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. + +## [1.0.0] - 2021-09-20 + +This is the first stable release for the project. +This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). + +### Added + +- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) + +### Fixed + +- Slice-valued attributes can correctly be used as map keys. (#2223) + +### Removed + +- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) +- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) +- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) +- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. + Use the typed functions and methods added to the package instead. (#2235) + - The `Key.Array` method is removed. + - The `Array` function is removed. + - The `Any` function is removed. + - The `ArrayValue` function is removed. + - The `AsArray` function is removed. + +## [1.0.0-RC3] - 2021-09-02 + +### Added + +- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) +- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) +- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) + - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. +- Added the `go.opentelemetry.io/otel/example/fib` example package. + Included is an example application that computes Fibonacci numbers. (#2203) + +### Changed + +- Metric instruments have been renamed to match the (feature-frozen) metric API specification: + - ValueRecorder becomes Histogram + - ValueObserver becomes Gauge + - SumObserver becomes CounterObserver + - UpDownSumObserver becomes UpDownCounterObserver + The API exported from this project is still considered experimental. (#2202) +- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) +- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) +- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) +- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) + +### Deprecated + +- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. + All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. + The functions from that package should be used instead. (#2166) +- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. + Use the typed `*Slice` functions and types added to the package instead. (#2162) +- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. + Use the typed functions instead. (#2181) +- The `go.opentelemetry.io/otel/oteltest` package is deprecated. + The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) + +### Removed + +- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) + +### Fixed + +- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) +- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) +- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) +- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) +- Fixed typos in resources.go. (#2201) + +## [1.0.0-RC2] - 2021-07-26 + +### Added + +- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) +- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) +- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) +- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) +- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. + This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. + For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) +- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. + This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) + +### Changed + +- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) +- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) + +### Deprecated + +- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) +- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) +- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. + Use the `trace.ParseTraceState` function instead. (#2122) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) +- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. + The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) +- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) +- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) + +### Fixed + +- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) +- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) +- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) +- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. + This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) +- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) +- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) + +## [Experimental Metrics v0.22.0] - 2021-07-19 + +### Added + +- Adds HTTP support for OTLP metrics exporter. (#2022) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) + +## [1.0.0-RC1] / 0.21.0 - 2021-06-18 + +With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` +while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules +with major version 0. + +### Added + +- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) + - The following status codes are defined as transient errors: + | gRPC Status Code | Description | + | ---------------- | ----------- | + | 1 | Cancelled | + | 4 | Deadline Exceeded | + | 8 | Resource Exhausted | + | 10 | Aborted | + | 10 | Out of Range | + | 14 | Unavailable | + | 15 | Data Loss | +- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) +- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. + This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) +- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) +- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) +- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) +- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. + It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) +- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. + This method returns the number of list-members the `TraceState` holds. (#1937) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. + Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) +- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) +- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. + These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) +- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) +- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) +- Several builtin resource detectors now correctly populate the schema URL. (#1938) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) +- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) + +### Changed + +- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. + `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) +- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) +- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) +- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) +- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) +- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) +- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) +- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. + This method returns the status of a span using the new `Status` type. (#1874) +- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. + This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) +- Unembed `SpanContext` in `Link`. (#1877) +- Generate Semantic conventions from the specification YAML. (#1891) +- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) +- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) +- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) +- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) +- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) +- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Refactored option types according to the contribution style guide. (#1882) +- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. + This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. + The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) +- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) +- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) +- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) +- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) +- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) + +### Removed + +- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) +- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) +- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. + The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. + The `IsRecording` method returns if the span is recording or not. + A read-only span value does not need to know if updates to it will be recorded or not. + By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) +- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. + The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. + When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) +- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. + Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. + The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) + - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) +- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) +- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) +- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. + Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) +- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. + These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) +- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) +- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) + +### Fixed + +- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) +- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) +- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) +- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) +- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) +- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) +- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) + +### Security + +## [0.20.0] - 2021-04-23 + +### Added + +- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) +- Adds semantic conventions for exceptions. (#1492) +- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` + These environment variables can be used to override Jaeger agent hostname and port (#1752) +- Option `ExportTimeout` was added to batch span processor. (#1755) +- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) +- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) +- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) +- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) +- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) +- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) +- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) + - `process.pid` + - `process.executable.name` + - `process.executable.path` + - `process.command_args` + - `process.owner` + - `process.runtime.name` + - `process.runtime.version` + - `process.runtime.description` +- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) +- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) + - `OTEL_EXPORTER_OTLP_ENDPOINT` + - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` + - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` + - `OTEL_EXPORTER_OTLP_HEADERS` + - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` + - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` + - `OTEL_EXPORTER_OTLP_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` + - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TIMEOUT` + - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` + - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` + - `OTEL_EXPORTER_OTLP_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` +- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) +- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) + +### Fixed + +- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) +- The Jaeger exporter now correctly sets tags for the Span status code and message. + This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) +- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. + Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) +- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) +- Fixed typo for default service name in Jaeger Exporter. (#1797) +- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) +- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. + Instead, the exporter now splits the batch into smaller sendable batches. (#1828) + +### Changed + +- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) +- Jaeger exporter was updated to use thrift v0.14.1. (#1712) +- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) +- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) +- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. + The Span's SpanContext can now self-identify as being remote or not. + This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) +- Improve OTLP/gRPC exporter connection errors. (#1737) +- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. + The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) +- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. + This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) +- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` + to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) +- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) +- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. + It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) +- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) +- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) +- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) +- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) +- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) +- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. + This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) +- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) +- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. + The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) +- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) + +### Removed + +- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` + These environment variables will no longer be used to override values of the Jaeger exporter (#1752) +- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. + This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. + To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) +- Setting error status while recording error with Span from oteltest package. (#1729) +- The concept of a remote and local Span stored in a context is unified to just the current Span. + Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span. + If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) +- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. + This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) +- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) +- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. + The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) +- Remove the `WithDisabled` option from the Jaeger exporter. + To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) +- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. + These functions for retrieving specific environment variable values are redundant of other internal functions and + are not intended for end user use. (#1824) +- Removed the Jaeger exporter `WithSDKOptions` `Option`. + This option was used to set SDK options for the exporter creation convenience functions. + These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. + If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) +- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. + The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter `Option` type is removed. + The type is no longer used by the exporter to configure anything. + All the previous configurations these options provided were duplicates of SDK configuration. + They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) + +## [0.19.0] - 2021-03-18 + +### Added + +- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) +- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) +- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) +- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) +- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) + +### Changed + +- `trace.SpanContext` is now immutable and has no exported fields. (#1573) + - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. +- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) +- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) +- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) +- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) +- Added non-empty string check for trace `Attribute` keys. (#1659) +- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) +- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) +- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) +- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) + +### Removed + +- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) +- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) +- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. + These are now returned as a SpanProcessor interface from their respective constructors. (#1638) +- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) +- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) +- Removed `jaeger.WithProcess` configuration option. (#1673) +- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) + +### Fixed + +- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) +- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) +- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) +- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) +- Synchronization issues in global trace delegate implementation. (#1686) +- Reduced excess memory usage by global `TracerProvider`. (#1687) + +## [0.18.0] - 2021-03-03 + +### Added + +- Added `resource.Default()` for use with meter and tracer providers. (#1507) +- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) +- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) +- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) +- Compatibility testing suite in the CI system for the following systems. (#1567) + | OS | Go Version | Architecture | + | ------- | ---------- | ------------ | + | Ubuntu | 1.15 | amd64 | + | Ubuntu | 1.14 | amd64 | + | Ubuntu | 1.15 | 386 | + | Ubuntu | 1.14 | 386 | + | MacOS | 1.15 | amd64 | + | MacOS | 1.14 | amd64 | + | Windows | 1.15 | amd64 | + | Windows | 1.14 | amd64 | + | Windows | 1.15 | 386 | + | Windows | 1.14 | 386 | + +### Changed + +- Replaced interface `oteltest.SpanRecorder` with its existing implementation + `StandardSpanRecorder`. (#1542) +- Default span limit values to 128. (#1535) +- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) +- Renamed the `otel/label` package to `otel/attribute`. (#1541) +- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) +- Parallelize the CI linting and testing. (#1567) +- Stagger timestamps in exact aggregator tests. (#1569) +- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) +- Prevent end-users from implementing some interfaces (#1575) + + ``` + "otel/exporters/otlp/otlphttp".Option + "otel/exporters/stdout".Option + "otel/oteltest".Option + "otel/trace".TracerOption + "otel/trace".SpanOption + "otel/trace".EventOption + "otel/trace".LifeCycleOption + "otel/trace".InstrumentationOption + "otel/sdk/resource".Option + "otel/sdk/trace".ParentBasedSamplerOption + "otel/sdk/trace".ReadOnlySpan + "otel/sdk/trace".ReadWriteSpan + ``` + +### Removed + +- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) +- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) +- Removed the `test-386` make target. + This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) + +### Fixed + +- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) +- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) +- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) +- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) +- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) + +## [0.17.0] - 2021-02-12 + +### Changed + +- Rename project default branch from `master` to `main`. (#1505) +- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) +- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) +- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) +- Move metric-related public global APIs from otel to otel/metric/global. (#1528) + +## Fixed + +- Fixed otlpgrpc reconnection issue. +- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) +- The otel-collector example now uses the default OTLP receiver port of the collector. + +## [0.16.0] - 2021-01-13 + +### Added + +- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) +- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) +- Added documentation about the project's versioning policy. (#1388) +- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +- Added codeql workflow to GitHub Actions (#1428) +- Added Gosec workflow to GitHub Actions (#1429) +- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) +- Add an OpenCensus exporter bridge. (#1444) + +### Changed + +- Rename `internal/testing` to `internal/internaltest`. (#1449) +- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) +- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) +- Improve span duration accuracy. (#1360) +- Migrated CI/CD from CircleCI to GitHub Actions (#1382) +- Remove duplicate checkout from GitHub Actions workflow (#1407) +- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) +- Metric `exact` aggregator includes per-point timestamps (#1412) +- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) +- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) +- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) +- Unify endpoint API that related to OTel exporter. (#1401) +- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) +- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) +- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) +- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) +- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) +- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) +- Metric Push and Pull Controller components are combined into a single "basic" Controller: + - `WithExporter()` and `Start()` to configure Push behavior + - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior + - `Start()` and `Stop()` accept Context. (#1378) +- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) + +### Removed + +- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) +- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) +- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) + +### Fixed + +- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) + +## [0.15.0] - 2020-12-10 + +### Added + +- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) + +### Changed + +- The Zipkin exporter now uses the Span status code to determine. (#1328) +- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) +- Move the OpenCensus example into `example` directory. (#1359) +- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) +- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) +- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) + +### Fixed + +- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) + +## [0.14.0] - 2020-11-19 + +### Added + +- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) +- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) +- `SpanContextFromContext` returns `SpanContext` from context. (#1255) +- `TraceState` has been added to `SpanContext`. (#1340) +- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) +- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) +- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) +- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) + +### Changed + +- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) + - `ID` has been renamed to `TraceID`. + - `IDFromHex` has been renamed to `TraceIDFromHex`. + - `EmptySpanContext` is removed. +- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) +- OTLP Exporter updates: + - supports OTLP v0.6.0 (#1230, #1354) + - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) +- The Sampler is now called on local child spans. (#1233) +- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) +- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. + This matches the returned type and fixes misuse of the term metric. (#1240) +- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) +- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) +- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) +- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) +- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) +- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) +- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) +- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) +- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) +- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) +- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) +- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and + `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) +- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) +- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) +- Updated span collection limits for attribute, event and link counts to 1000 (#1318) +- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) + +### Removed + +- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) +- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. + It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) +- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. + `Tracer` and `Span` from the same module should be used in their place instead. (#1306) +- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) +- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) + +### Fixed + +- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) +- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) +- Fix condition in `label.Any`. (#1299) +- Fix global `TracerProvider` to pass options to its configured provider. (#1329) +- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) + +## [0.13.0] - 2020-10-08 + +### Added + +- OTLP Metric exporter supports Histogram aggregation. (#1209) +- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) +- A Baggage API to implement the OpenTelemetry specification. (#1217) +- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) + +### Changed + +- Set default propagator to no-op propagator. (#1184) +- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) +- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) +- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. + They now are `Unset`, `Error`, and `Ok`. + They no longer track the gRPC codes. (#1214) +- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) +- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) +- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) + +### Fixed + +- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) + +### Removed + +- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) +- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. + The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) +- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) +- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) +- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) +- Nested array/slice support has been removed. (#1226) + +## [0.12.0] - 2020-09-24 + +### Added + +- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) +- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. + This addition was made to conform with our project option conventions. (#1155) +- Instrumentation library information was added to the Zipkin exporter. (#1119) +- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) +- More semantic conventions for k8s as resource attributes. (#1167) + +### Changed + +- Add reconnecting udp connection type to Jaeger exporter. + This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. + It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) +- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. + This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) +- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. + This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) +- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) +- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. + This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) +- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) +- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) +- Move `tools` package under `internal`. (#1141) +- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) + The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. +- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) +- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) +- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) +- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to + recommend the use of `newConfig()` instead of `configure()`. (#1163) +- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) +- Ensure exported interface types include parameter names and update the + Style Guide to reflect this styling rule. (#1172) +- Don't consider unset environment variable for resource detection to be an error. (#1170) +- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and + `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. +- ValueObserver instruments use LastValue aggregator by default. (#1165) +- OTLP Metric exporter supports LastValue aggregation. (#1165) +- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) +- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) +- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) +- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) +- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) +- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) + +### Removed + +- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the + `go.opentelemetry.io/contrib/propagators/` module. (#1191) +- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) + +### Fixed + +- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) +- Fix missing shutdown processor in otel-collector example. (#1186) +- Fix missing shutdown processor in basic and namedtracer examples. (#1197) + +## [0.11.0] - 2020-08-24 + +### Added + +- Support for exporting array-valued attributes via OTLP. (#992) +- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) +- Support for filtering metric label sets. (#1047) +- A dimensionality-reducing metric Processor. (#1057) +- Integration tests for more OTel Collector Attribute types. (#1062) +- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) + +### Changed + +- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) +- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) +- Rename `api/testharness` to `api/apitest`. (#1049) +- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) +- Change Metric Processor to merge multiple observations. (#1024) +- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. + This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) +- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) +- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) +- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) +- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) +- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) +- Unify Callback Function Naming. + Rename `*Callback` with `*Func`. (#1061) +- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) +- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. + This interface still supports the export of `SpanData`, but only as a slice. + Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. + If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. + This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) + +### Removed + +- Duplicate, unused API sampler interface. (#999) + Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. +- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. + This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) +- The `WithSpan` method of the `Tracer` interface. + The functionality this method provided was limited compared to what a user can provide themselves. + It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) +- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. + These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) +- The `oterror` package. (#1026) +- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) + +### Fixed + +- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) +- Correct instrumentation version tag in Jaeger exporter. (#1037) +- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) +- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) +- The `otel-collector` example referenced outdated collector processors. (#1006) + +## [0.10.0] - 2020-07-29 + +This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. + +### Added + +- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. + These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) +- Add propagator option for gRPC instrumentation. (#986) +- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) + +### Changed + +- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. + This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) +- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. + This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) +- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) +- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) + - `value.Bool` was replaced with `kv.BoolValue`. + - `value.Int64` was replaced with `kv.Int64Value`. + - `value.Uint64` was replaced with `kv.Uint64Value`. + - `value.Float64` was replaced with `kv.Float64Value`. + - `value.Int32` was replaced with `kv.Int32Value`. + - `value.Uint32` was replaced with `kv.Uint32Value`. + - `value.Float32` was replaced with `kv.Float32Value`. + - `value.String` was replaced with `kv.StringValue`. + - `value.Int` was replaced with `kv.IntValue`. + - `value.Uint` was replaced with `kv.UintValue`. + - `value.Array` was replaced with `kv.ArrayValue`. +- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) +- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) +- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) +- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) +- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) + +### Removed + +- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) + +### Fixed + +- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) +- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) +- Use `global.Handle` for span export errors in the OTLP exporter. (#946) +- Correct Go language formatting in the README documentation. (#961) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) +- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) + +## [0.9.0] - 2020-07-20 + +### Added + +- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) +- A Detector to automatically detect resources from an environment variable. (#939) +- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) +- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. + References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) + +### Changed + +- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) + +### Removed + +- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) + +## [0.8.0] - 2020-07-09 + +### Added + +- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. + A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) +- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) +- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) +- Add `peer.service` semantic attribute. (#898) +- Add database-specific semantic attributes. (#899) +- Add semantic convention for `faas.coldstart` and `container.id`. (#909) +- Add http content size semantic conventions. (#905) +- Include `http.request_content_length` in HTTP request basic attributes. (#905) +- Add semantic conventions for operating system process resource attribute keys. (#919) +- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) + +### Changed + +- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) +- Use lowercase header names for B3 Multiple Headers. (#881) +- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. + This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. + If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) +- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. + Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. + This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) +- Extend semantic conventions for RPC. (#900) +- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) + - `"api/standard".FaaSName` -> `FaaSNameKey` + - `"api/standard".FaaSID` -> `FaaSIDKey` + - `"api/standard".FaaSVersion` -> `FaaSVersionKey` + - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` + +### Removed + +- The `FlagsUnused` trace flag is removed. + The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) +- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. + If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) + +### Fixed + +- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) +- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) +- The B3 propagator now propagates the debug flag. + This removes the behavior of changing the debug flag into a set sampling bit. + Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) +- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) +- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) +- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) +- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) +- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) +- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) +- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) +- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) +- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) +- Update otel-collector example to use the v0.5.0 collector. (#915) +- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) +- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) +- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. + This is in accordance with OpenTelemetry semantic conventions. (#922) +- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) +- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) +- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) +- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) + +## [0.7.0] - 2020-06-26 + +This release implements the v0.5.0 version of the OpenTelemetry specification. + +### Added + +- The othttp instrumentation now includes default metrics. (#861) +- This CHANGELOG file to track all changes in the project going forward. +- Support for array type attributes. (#798) +- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) +- Timestamps are now passed to exporters for each export. (#835) +- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. + This replaces the prior `Record` `struct` use for this purpose. (#835) +- New dependabot integration to automate package upgrades. (#814) +- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. + This instrumentation version is passed on to exporters. (#811) (#805) (#802) +- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) +- Environment variables for Jaeger exporter are supported. (#796) +- New `aggregation.Kind` in the export metric API. (#808) +- New example that uses OTLP and the collector. (#790) +- Handle errors in the span `SetName` during span initialization. (#791) +- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) +- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) +- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. + There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) +- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) +- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) +- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 + +### Changed + +- Rename `Integrator` to `Processor` in the metric SDK. (#863) +- Rename `AggregationSelector` to `AggregatorSelector`. (#859) +- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) +- Rename `simple` integrator to `basic` integrator. (#857) +- Merge otlp collector examples. (#841) +- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. + With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) +- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) +- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. + All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. + Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) +- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) +- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) +- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 +- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) +- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) +- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) +- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) + +### Removed + +- `Uint64NumberKind` and related functions from the API. (#864) +- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) +- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) + +### Fixed + +- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) +- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) +- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) +- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) +- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) +- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) +- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) +- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) +- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) +- Set span status from HTTP status code in the othttp instrumentation. (#832) +- Fixed typo in push controller comment. (#834) +- The `Aggregator` testing has been updated and cleaned. (#812) +- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) +- Fixed `global` `handler_test.go` test failure. #804 +- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) +- Fixed OTLP example's accidental early close of exporter. (#807) +- Ensure zipkin exporter reads and closes response body. (#788) +- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) +- Clean up tools and RELEASING documentation. (#762) + +## [0.6.0] - 2020-05-21 + +### Added + +- Support for `Resource`s in the prometheus exporter. (#757) +- New pull controller. (#751) +- New `UpDownSumObserver` instrument. (#750) +- OpenTelemetry collector demo. (#711) +- New `SumObserver` instrument. (#747) +- New `UpDownCounter` instrument. (#745) +- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) +- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) + +### Changed + +- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) +- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) +- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) +- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) +- The prometheus exporter now uses the new pull controller. (#751) +- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) +- Support use of synchronous instruments in asynchronous callbacks (#725) +- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) +- Rename `Observer` instrument to `ValueObserver`. (#734) +- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) +- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) +- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) + +### Fixed + +- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) +- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) +- Fix `string` case in `kv` `Infer` function. (#746) +- Fix panic in grpctrace client interceptors. (#740) +- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) +- Rewrite span batch process queue batching logic. (#719) +- Remove the push controller named Meter map. (#738) +- Fix Histogram aggregator initial state (fix #735). (#736) +- Ensure golang alpine image is running `golang-1.14` for examples. (#733) +- Added test for grpctrace `UnaryInterceptorClient`. (#695) +- Rearrange `api/metric` code layout. (#724) + +## [0.5.0] - 2020-05-13 + +### Added + +- Batch `Observer` callback support. (#717) +- Alias `api` types to root package of project. (#696) +- Create basic `othttp.Transport` for simple client instrumentation. (#678) +- `SetAttribute(string, interface{})` to the trace API. (#674) +- Jaeger exporter option that allows user to specify custom http client. (#671) +- `Stringer` and `Infer` methods to `key`s. (#662) + +### Changed + +- Rename `NewKey` in the `kv` package to just `Key`. (#721) +- Move `core` and `key` to `kv` package. (#720) +- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) +- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) +- Move `Number` from `core` to `api/metric` package. (#706) +- Move `SpanContext` from `core` to `trace` package. (#692) +- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) + +### Fixed + +- Update tooling to run generators in all submodules. (#705) +- gRPC interceptor regexp to match methods without a service name. (#683) +- Use a `const` for padding 64-bit B3 trace IDs. (#701) +- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) +- Left-pad 64-bit B3 trace IDs with zero. (#698) +- Propagate at least the first W3C tracestate header. (#694) +- Remove internal `StateLocker` implementation. (#688) +- Increase instance size CI system uses. (#690) +- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) +- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) +- Reimplement histogram using mutex instead of `StateLocker`. (#669) +- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) +- Update documentation to not include any references to `WithKeys`. (#672) +- Correct misspelling. (#668) +- Fix clobbering of the span context if extraction fails. (#656) +- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) + +## [0.4.3] - 2020-04-24 + +### Added + +- `Dockerfile` and `docker-compose.yml` to run example code. (#635) +- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) +- New `api/label` package, providing common label set implementation. (#651) +- Support for JSON marshaling of `Resources`. (#654) +- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) +- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) +- `WithSpanFormatter` option to the othttp plugin. (#617) +- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) +- The prometheus exporter now supports exporting histograms. (#601) +- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) +- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) +- An `Equal` method to the `Resource` test the equivalence of resources. (#613) +- An iterable structure (`AttributeIterator`) for `Resource` attributes. + +### Changed + +- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) +- Pass `Resources` through the metrics export pipeline. (#659) + +### Removed + +- `WithKeys` option from the metric API. (#639) + +### Fixed + +- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) +- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) +- Use type names for return values in jaeger exporter. (#648) +- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) +- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) +- Do not cache `reflect.ValueOf()` in metric Labels. (#649) +- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) +- Add error wrapping to the prometheus exporter. (#631) +- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) +- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) +- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) +- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) +- Ensure spans created by httptrace client tracer reflect operation structure. (#618) +- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 +- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) + +## [0.4.2] - 2020-03-31 + +### Fixed + +- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) +- Fix time conversion from internal to OTLP in OTLP exporter. (#606) + +## [0.4.1] - 2020-03-31 + +### Fixed + +- Update `tag.sh` to create signed tags. (#604) + +## [0.4.0] - 2020-03-30 + +### Added + +- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) +- Script to verify examples after a new release. (#579) + +### Removed + +- The dogstatsd exporter due to lack of support. + This additionally removes support for statsd. (#591) +- `LabelSet` from the metric API. + This is replaced by a `[]core.KeyValue` slice. (#595) +- `Labels` from the metric API's `Meter` interface. (#595) + +### Changed + +- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) +- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) +- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) + +### Fixed + +- Corrected missing return in mock span. (#582) +- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) +- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) +- Update pre-release script to be compatible between GNU and BSD based systems. (#592) +- Add a `RecordBatch` benchmark. (#594) +- Moved span transforms of the OTLP exporter to the internal package. (#593) +- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) +- Removed unneeded allocation on empty labels in OLTP exporter. (#597) +- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) +- Update project documentation godoc.org links to pkg.go.dev. (#602) + +## [0.3.0] - 2020-03-21 + +This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. +There is still a possibility of breaking changes. + +### Added + +- Add `Observer` metric instrument. (#474) +- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) +- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) +- The zipkin trace exporter. (#495) +- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) +- Add `StatusMessage` field to the trace `Span`. (#524) +- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) +- The `Resource` type was added to the SDK. (#528) +- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) +- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. + Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) +- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) +- Scripts to better automate the release process. (#576) + +### Changed + +- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) +- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) +- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) +- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) +- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) +- Rename metric API `Options` to `Config`. (#541) +- Rename metric `Counter` aggregator to be `Sum`. (#541) +- Unify metric options into `Option` from instrument specific options. (#541) +- The trace API's `TraceProvider` now support `Resource`s. (#545) +- Correct error in zipkin module name. (#548) +- The jaeger trace exporter now supports `Resource`s. (#551) +- Metric SDK now supports `Resource`s. + The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) +- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) +- The stdout trace exporter now supports `Resource`s. (#558) +- The metric `Descriptor` is now included at the API instead of the SDK. (#560) +- Replace `Ordered` with an iterator in `export.Labels`. (#567) + +### Removed + +- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) +- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) +- `GetDescriptor` from the metric SDK. (#575) +- The `Gauge` instrument from the metric API. (#537) + +### Fixed + +- Make histogram aggregator checkpoint consistent. (#438) +- Update README with import instructions and how to build and test. (#505) +- The default label encoding was updated to be unique. (#508) +- Use `NewRoot` in the othttp plugin for public endpoints. (#513) +- Fix data race in `BatchedSpanProcessor`. (#518) +- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 +- Use a variable-size array to represent ordered labels in maps. (#523) +- Update the OTLP protobuf and update changed import path. (#532) +- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) +- Eliminate goroutine leak in histogram stress test. (#547) +- Update OTLP exporter with latest protobuf. (#550) +- Add filters to the othttp plugin. (#556) +- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) +- Encode labels once during checkpoint. + The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. + This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) +- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) + +## [0.2.3] - 2020-03-04 + +### Added + +- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) +- Configurable push frequency for exporters setup pipeline. (#504) + +### Changed + +- Rename the `exporter` directory to `exporters`. + The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. + This resulted in all subsequent releases not becoming the default latest. + A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. + Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. + Consequentially, this action also renames *all* exporter packages. (#502) + +### Removed + +- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) + +## [0.2.2] - 2020-02-27 + +### Added + +- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) +- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) +- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) +- `Config` and configuring `Option` to the propagator API. (#467) +- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) +- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) +- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) +- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) +- Histogram aggregator. (#433) +- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) +- `AlwaysParentSample` sampler to the trace API. (#455) +- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) + +### Changed + +- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) +- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) +- Move correlation context propagation to correlation package. (#479) +- Do not default to putting remote span context into links. (#480) +- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) +- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) +- Renamed the `export` package to `metric` to match directory structure. (#432) +- Rename the `api/distributedcontext` package to `api/correlation`. (#444) +- Rename the `api/propagators` package to `api/propagation`. (#444) +- Move the propagators from the `propagators` package into the `trace` API package. (#444) +- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) +- Moved all dependencies of tools package to a tools directory. (#466) + +### Removed + +- Binary propagators. (#467) +- NOOP propagator. (#467) + +### Fixed + +- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) +- Fix a possible nil-dereference crash (#478) +- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) +- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) +- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) +- Initialize `onError` based on `Config` in prometheus exporter. (#486) +- Correct module name in prometheus exporter README. (#475) +- Removed tracer name prefix from span names. (#430) +- Fix `aggregator_test.go` import package comment. (#431) +- Improved detail in stdout exporter. (#436) +- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) +- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) +- Reword function documentation in gRPC plugin. (#446) +- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) +- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) +- Upgraded to Go 1.13 in CI. (#465) +- Correct opentelemetry.io URL in trace SDK documentation. (#464) +- Refactored reference counting logic in SDK determination of stale records. (#468) +- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) + +## [0.2.1.1] - 2020-01-13 + +### Fixed + +- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) + +## [0.2.1] - 2020-01-08 + +### Added + +- Global meter forwarding implementation. + This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) +- Global trace forwarding implementation. + This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) +- Standardize export pipeline creation in all exporters. (#395) +- A testing, organization, and comments for 64-bit field alignment. (#418) +- Script to tag all modules in the project. (#414) + +### Changed + +- Renamed `propagation` package to `propagators`. (#362) +- Renamed `B3Propagator` propagator to `B3`. (#362) +- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) +- Renamed `BinaryPropagator` propagator to `Binary`. (#362) +- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) +- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) +- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) +- Renamed `SpanOption` to `StartOption` in the trace API. (#369) +- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) +- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) +- `Number` now has a pointer receiver for its methods. (#375) +- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) +- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) +- Renamed `Message` in Event to `Name` in the trace API. (#389) +- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) +- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) +- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) +- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) +- Renamed the `File` option in the stdout exporter to `Writer`. (#404) +- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. + +### Fixed + +- Aggregator import path corrected. (#421) +- Correct links in README. (#368) +- The README was updated to match latest code changes in its examples. (#374) +- Don't capitalize error statements. (#375) +- Fix ignored errors. (#375) +- Fix ambiguous variable naming. (#375) +- Removed unnecessary type casting. (#375) +- Use named parameters. (#375) +- Updated release schedule. (#378) +- Correct http-stackdriver example module name. (#394) +- Removed the `http.request` span in `httptrace` package. (#397) +- Add comments in the metrics SDK (#399) +- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) +- Add documentation of compatible exporters in the README. (#405) +- Typo fix. (#408) +- Simplify span check logic in SDK tracer implementation. (#419) + +## [0.2.0] - 2019-12-03 + +### Added + +- Unary gRPC tracing example. (#351) +- Prometheus exporter. (#334) +- Dogstatsd metrics exporter. (#326) + +### Changed + +- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) +- Rename `GetMeter` to `Meter`. (#357) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Move `/global` package to `/api/global`. (#356) +- Rename `GetTracer` to `Tracer`. (#347) + +### Removed + +- `SetAttribute` from the `Span` interface in the trace API. (#361) +- `AddLink` from the `Span` interface in the trace API. (#349) +- `Link` from the `Span` interface in the trace API. (#349) + +### Fixed + +- Exclude example directories from coverage report. (#365) +- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) +- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) +- Run the race checker for all test. (#354) +- Redundant commands in the Makefile are removed. (#354) +- Split the `generate` and `lint` targets of the Makefile. (#354) +- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) +- Add example Prometheus binary to gitignore. (#358) +- Support negative numbers with the `MaxSumCount`. (#335) +- Resolve race conditions in `push_test.go` identified in #339. (#340) +- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) +- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. + Previously it was testing `AlwaysSample` twice. (#325) +- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) +- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) +- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. + This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. + This was corrected. (#333) + +## [0.1.2] - 2019-11-18 + +### Fixed + +- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) +- Removed unnecessary unslicing of parameters that are already a slice. (#324) + +## [0.1.1] - 2019-11-18 + +This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. + +### Added + +- Metrics stdout export pipeline. (#265) +- Array aggregation for raw measure metrics. (#282) +- The core.Value now have a `MarshalJSON` method. (#281) + +### Removed + +- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) +- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) + +### Changed + +- Allocation in LabelSet construction to reduce GC overhead. (#318) +- `trace.WithAttributes` to append values instead of replacing (#315) +- Use a formula for tolerance in sampling tests. (#298) +- Move export types into trace and metric-specific sub-directories. (#289) +- `SpanKind` back to being based on an `int` type. (#288) + +### Fixed + +- URL to OpenTelemetry website in README. (#323) +- Name of othttp default tracer. (#321) +- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) +- CI modules cache to correctly restore/save from/to the cache. (#316) +- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) +- README now reflects the new code structure introduced with these changes. (#291) +- Make the basic example work. (#279) + +## [0.1.0] - 2019-11-04 + +This is the first release of open-telemetry go library. +It contains api and sdk for trace and meter. + +### Added + +- Initial OpenTelemetry trace and metric API prototypes. +- Initial OpenTelemetry trace, metric, and export SDK packages. +- A wireframe bridge to support compatibility with OpenTracing. +- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. +- Exporters for Jaeger, Stackdriver, and stdout. +- Propagators for binary, B3, and trace-context protocols. +- Project information and guidelines in the form of a README and CONTRIBUTING. +- Tools to build the project and a Makefile to automate the process. +- Apache-2.0 license. +- CircleCI build CI manifest files. +- CODEOWNERS file to track owners of this project. + +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...HEAD +[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 +[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 +[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 +[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 +[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 +[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 +[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 +[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 +[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 +[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 +[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 +[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1 +[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0 +[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1 +[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0 +[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 +[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 +[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 +[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 +[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 +[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0 +[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 +[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 +[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1 +[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 +[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 +[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 +[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 +[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 +[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 +[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 +[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 +[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 +[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 +[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 +[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 +[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 +[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 +[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 +[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 +[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 +[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 +[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 +[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 +[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 +[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 +[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 +[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 +[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 +[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 +[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 +[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 +[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 +[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 +[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 +[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 +[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 +[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 +[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 +[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 +[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 +[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 +[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 +[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 +[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 +[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 +[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 +[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 +[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 +[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 +[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 +[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 +[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 +[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 +[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 +[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 +[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 +[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 +[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 +[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 +[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 +[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 +[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 +[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + + +[Go 1.24]: https://go.dev/doc/go1.24 +[Go 1.23]: https://go.dev/doc/go1.23 +[Go 1.22]: https://go.dev/doc/go1.22 +[Go 1.21]: https://go.dev/doc/go1.21 +[Go 1.20]: https://go.dev/doc/go1.20 +[Go 1.19]: https://go.dev/doc/go1.19 +[Go 1.18]: https://go.dev/doc/go1.18 + +[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric +[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric +[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace + +[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS new file mode 100644 index 000000000..945a07d2b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -0,0 +1,17 @@ +##################################################### +# +# List of approvers for this repository +# +##################################################### +# +# Learn about membership in OpenTelemetry community: +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md +# +# +# Learn about CODEOWNERS file format: +# https://help.github.com/en/articles/about-code-owners +# + +* @MrAlias @XSAM @dashpole @pellared @dmathieu + +CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md new file mode 100644 index 000000000..7b8af585a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -0,0 +1,676 @@ +# Contributing to opentelemetry-go + +The Go special interest group (SIG) meets regularly. See the +OpenTelemetry +[community](https://github.com/open-telemetry/community#golang-sdk) +repo for information on this and other language SIGs. + +See the [public meeting +notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) +for a summary description of past meetings. To request edit access, +join the meeting or get in touch on +[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). + +## Development + +You can view and edit the source code by cloning this repository: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go.git +``` + +Run `make test` to run the tests instead of `go test`. + +There are some generated files checked into the repo. To make sure +that the generated files are up-to-date, run `make` (or `make +precommit` - the `precommit` target is the default). + +The `precommit` target also fixes the formatting of the code and +checks the status of the go module files. + +Additionally, there is a `codespell` target that checks for common +typos in the code. It is not run by default, but you can run it +manually with `make codespell`. It will set up a virtual environment +in `venv` and install `codespell` there. + +If after running `make precommit` the output of `git status` contains +`nothing to commit, working tree clean` then it means that everything +is up-to-date and properly formatted. + +## Pull Requests + +### How to Send Pull Requests + +Everyone is welcome to contribute code to `opentelemetry-go` via +GitHub pull requests (PRs). + +To create a new PR, fork the project in GitHub and clone the upstream +repo: + +```sh +go get -d go.opentelemetry.io/otel +``` + +(This may print some warning about "build constraints exclude all Go +files", just ignore it.) + +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You +can alternatively use `git` directly with: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go +``` + +(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - +that name is a kind of a redirector to GitHub that `go get` can +understand, but `git` does not.) + +This would put the project in the `opentelemetry-go` directory in +current working directory. + +Enter the newly created directory and add your fork as a new remote: + +```sh +git remote add git@github.com:/opentelemetry-go +``` + +Check out a new branch, make modifications, run linters and tests, update +`CHANGELOG.md`, and push the branch to your fork: + +```sh +git checkout -b +# edit files +# update changelog +make precommit +git add -p +git commit +git push +``` + +Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull +request ID to the entry you added to `CHANGELOG.md`. + +Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request. +Rewriting Git history makes it difficult to keep track of iterations during code review. +All pull requests are squashed to a single commit upon merge to `main`. + +### How to Receive Comments + +* If the PR is not ready for review, please put `[WIP]` in the title, + tag it as `work-in-progress`, or mark it as + [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +* Make sure CLA is signed and CI is clear. + +### How to Get PRs Merged + +A PR is considered **ready to merge** when: + +* It has received two qualified approvals[^1]. + + This is not enforced through automation, but needs to be validated by the + maintainer merging. + * The qualified approvals need to be from [Approver]s/[Maintainer]s + affiliated with different companies. Two qualified approvals from + [Approver]s or [Maintainer]s affiliated with the same company counts as a + single qualified approval. + * PRs introducing changes that have already been discussed and consensus + reached only need one qualified approval. The discussion and resolution + needs to be linked to the PR. + * Trivial changes[^2] only need one qualified approval. + +* All feedback has been addressed. + * All PR comments and suggestions are resolved. + * All GitHub Pull Request reviews with a status of "Request changes" have + been addressed. Another review by the objecting reviewer with a different + status can be submitted to clear the original review, or the review can be + dismissed by a [Maintainer] when the issues from the original review have + been addressed. + * Any comments or reviews that cannot be resolved between the PR author and + reviewers can be submitted to the community [Approver]s and [Maintainer]s + during the weekly SIG meeting. If consensus is reached among the + [Approver]s and [Maintainer]s during the SIG meeting the objections to the + PR may be dismissed or resolved or the PR closed by a [Maintainer]. + * Any substantive changes to the PR require existing Approval reviews be + cleared unless the approver explicitly states that their approval persists + across changes. This includes changes resulting from other feedback. + [Approver]s and [Maintainer]s can help in clearing reviews and they should + be consulted if there are any questions. + +* The PR branch is up to date with the base branch it is merging into. + * To ensure this does not block the PR, it should be configured to allow + maintainers to update it. + +* It has been open for review for at least one working day. This gives people + reasonable time to review. + * Trivial changes[^2] do not have to wait for one day and may be merged with + a single [Maintainer]'s approval. + +* All required GitHub workflows have succeeded. +* Urgent fix can take exception as long as it has been actively communicated + among [Maintainer]s. + +Any [Maintainer] can merge the PR once the above criteria have been met. + +[^1]: A qualified approval is a GitHub Pull Request review with "Approve" + status from an OpenTelemetry Go [Approver] or [Maintainer]. +[^2]: Trivial changes include: typo corrections, cosmetic non-substantive + changes, documentation corrections or updates, dependency updates, etc. + +## Design Choices + +As with other OpenTelemetry clients, opentelemetry-go follows the +[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel). + +It's especially valuable to read through the [library +guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). + +### Focus on Capabilities, Not Structure Compliance + +OpenTelemetry is an evolving specification, one where the desires and +use cases are clear, but the method to satisfy those uses cases are +not. + +As such, Contributions should provide functionality and behavior that +conforms to the specification, but the interface and structure is +flexible. + +It is preferable to have contributions follow the idioms of the +language rather than conform to specific API names or argument +patterns in the spec. + +For a deeper discussion, see +[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). + +## Tests + +Each functionality should be covered by tests. + +Performance-critical functionality should also be covered by benchmarks. + +- Pull requests adding a performance-critical functionality +should have `go test -bench` output in their description. +- Pull requests changing a performance-critical functionality +should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) +output in their description. + +## Documentation + +Each (non-internal, non-test) package must be documented using +[Go Doc Comments](https://go.dev/doc/comment), +preferably in a `doc.go` file. + +Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples) +instead of putting code snippets in Go doc comments. +In some cases, you can even create [Testable Examples](https://go.dev/blog/examples). + +You can install and run a "local Go Doc site" in the following way: + + ```sh + go install golang.org/x/pkgsite/cmd/pkgsite@latest + pkgsite + ``` + +[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) +is an example of a very well-documented package. + +### README files + +Each (non-internal, non-test, non-documentation) package must contain a +`README.md` file containing at least a title, and a `pkg.go.dev` badge. + +The README should not be a repetition of Go doc comments. + +You can verify the presence of all README files with the `make verify-readmes` +command. + +## Style Guide + +One of the primary goals of this project is that it is actually used by +developers. With this goal in mind the project strives to build +user-friendly and idiomatic Go code adhering to the Go community's best +practices. + +For a non-comprehensive but foundational overview of these best practices +the [Effective Go](https://golang.org/doc/effective_go.html) documentation +is an excellent starting place. + +As a convenience for developers building this project the `make precommit` +will format, lint, validate, and in some cases fix the changes you plan to +submit. This check will need to pass for your changes to be able to be +merged. + +In addition to idiomatic Go, the project has adopted certain standards for +implementations of common patterns. These standards should be followed as a +default, and if they are not followed documentation needs to be included as +to the reasons why. + +### Configuration + +When creating an instantiation function for a complex `type T struct`, it is +useful to allow variable number of options to be applied. However, the strong +type system of Go restricts the function design options. There are a few ways +to solve this problem, but we have landed on the following design. + +#### `config` + +Configuration should be held in a `struct` named `config`, or prefixed with +specific type name this Configuration applies to if there are multiple +`config` in the package. This type must contain configuration options. + +```go +// config contains configuration options for a thing. +type config struct { + // options ... +} +``` + +In general the `config` type will not need to be used externally to the +package and should be unexported. If, however, it is expected that the user +will likely want to build custom options for the configuration, the `config` +should be exported. Please, include in the documentation for the `config` +how the user can extend the configuration. + +It is important that internal `config` are not shared across package boundaries. +Meaning a `config` from one package should not be directly used by another. The +one exception is the API packages. The configs from the base API, eg. +`go.opentelemetry.io/otel/trace.TracerConfig` and +`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +by the SDK therefore it is expected that these are exported. + +When a config is exported we want to maintain forward and backward +compatibility, to achieve this no fields should be exported but should +instead be accessed by methods. + +Optionally, it is common to include a `newConfig` function (with the same +naming scheme). This function wraps any defaults setting and looping over +all options to create a configured `config`. + +```go +// newConfig returns an appropriately configured config. +func newConfig(options ...Option) config { + // Set default values for config. + config := config{/* […] */} + for _, option := range options { + config = option.apply(config) + } + // Perform any validation here. + return config +} +``` + +If validation of the `config` options is also performed this can return an +error as well that is expected to be handled by the instantiation function +or propagated to the user. + +Given the design goal of not having the user need to work with the `config`, +the `newConfig` function should also be unexported. + +#### `Option` + +To set the value of the options a `config` contains, a corresponding +`Option` interface type should be used. + +```go +type Option interface { + apply(config) config +} +``` + +Having `apply` unexported makes sure that it will not be used externally. +Moreover, the interface becomes sealed so the user cannot easily implement +the interface on its own. + +The `apply` method should return a modified version of the passed config. +This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. + +The name of the interface should be prefixed in the same way the +corresponding `config` is (if at all). + +#### Options + +All user configurable options for a `config` must have a related unexported +implementation of the `Option` interface and an exported configuration +function that wraps this implementation. + +The wrapping function name should be prefixed with `With*` (or in the +special case of a boolean options `Without*`) and should have the following +function signature. + +```go +func With*(…) Option { … } +``` + +##### `bool` Options + +```go +type defaultFalseOption bool + +func (o defaultFalseOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithOption sets a T to have an option included. +func WithOption() Option { + return defaultFalseOption(true) +} +``` + +```go +type defaultTrueOption bool + +func (o defaultTrueOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithoutOption sets a T to have Bool option excluded. +func WithoutOption() Option { + return defaultTrueOption(false) +} +``` + +##### Declared Type Options + +```go +type myTypeOption struct { + MyType MyType +} + +func (o myTypeOption) apply(c config) config { + c.MyType = o.MyType + return c +} + +// WithMyType sets T to have include MyType. +func WithMyType(t MyType) Option { + return myTypeOption{t} +} +``` + +##### Functional Options + +```go +type optionFunc func(config) config + +func (fn optionFunc) apply(c config) config { + return fn(c) +} + +// WithMyType sets t as MyType. +func WithMyType(t MyType) Option { + return optionFunc(func(c config) config { + c.MyType = t + return c + }) +} +``` + +#### Instantiation + +Using this configuration pattern to configure instantiation with a `NewT` +function. + +```go +func NewT(options ...Option) T {…} +``` + +Any required parameters can be declared before the variadic `options`. + +#### Dealing with Overlap + +Sometimes there are multiple complex `struct` that share common +configuration and also have distinct configuration. To avoid repeated +portions of `config`s, a common `config` can be used with the union of +options being handled with the `Option` interface. + +For example. + +```go +// config holds options for all animals. +type config struct { + Weight float64 + Color string + MaxAltitude float64 +} + +// DogOption apply Dog specific options. +type DogOption interface { + applyDog(config) config +} + +// BirdOption apply Bird specific options. +type BirdOption interface { + applyBird(config) config +} + +// Option apply options for all animals. +type Option interface { + BirdOption + DogOption +} + +type weightOption float64 + +func (o weightOption) applyDog(c config) config { + c.Weight = float64(o) + return c +} + +func (o weightOption) applyBird(c config) config { + c.Weight = float64(o) + return c +} + +func WithWeight(w float64) Option { return weightOption(w) } + +type furColorOption string + +func (o furColorOption) applyDog(c config) config { + c.Color = string(o) + return c +} + +func WithFurColor(c string) DogOption { return furColorOption(c) } + +type maxAltitudeOption float64 + +func (o maxAltitudeOption) applyBird(c config) config { + c.MaxAltitude = float64(o) + return c +} + +func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } + +func NewDog(name string, o ...DogOption) Dog {…} +func NewBird(name string, o ...BirdOption) Bird {…} +``` + +### Interfaces + +To allow other developers to better comprehend the code, it is important +to ensure it is sufficiently documented. One simple measure that contributes +to this aim is self-documenting by naming method parameters. Therefore, +where appropriate, methods of every exported interface type should have +their parameters appropriately named. + +#### Interface Stability + +All exported stable interfaces that include the following warning in their +documentation are allowed to be extended with additional methods. + +> Warning: methods may be added to this interface in minor releases. + +These interfaces are defined by the OpenTelemetry specification and will be +updated as the specification evolves. + +Otherwise, stable interfaces MUST NOT be modified. + +#### How to Change Specification Interfaces + +When an API change must be made, we will update the SDK with the new method one +release before the API change. This will allow the SDK one version before the +API change to work seamlessly with the new API. + +If an incompatible version of the SDK is used with the new API the application +will fail to compile. + +#### How Not to Change Specification Interfaces + +We have explored using a v2 of the API to change interfaces and found that there +was no way to introduce a v2 and have it work seamlessly with the v1 of the API. +Problems happened with libraries that upgraded to v2 when an application did not, +and would not produce any telemetry. + +More detail of the approaches considered and their limitations can be found in +the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920) +issue. + +#### How to Change Other Interfaces + +If new functionality is needed for an interface that cannot be changed it MUST +be added by including an additional interface. That added interface can be a +simple interface for the specific functionality that you want to add or it can +be a super-set of the original interface. For example, if you wanted to a +`Close` method to the `Exporter` interface: + +```go +type Exporter interface { + Export() +} +``` + +A new interface, `Closer`, can be added: + +```go +type Closer interface { + Close() +} +``` + +Code that is passed the `Exporter` interface can now check to see if the passed +value also satisfies the new interface. E.g. + +```go +func caller(e Exporter) { + /* ... */ + if c, ok := e.(Closer); ok { + c.Close() + } + /* ... */ +} +``` + +Alternatively, a new type that is the super-set of an `Exporter` can be created. + +```go +type ClosingExporter struct { + Exporter + Close() +} +``` + +This new type can be used similar to the simple interface above in that a +passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type +and the `Close` method called. + +This super-set approach can be useful if there is explicit behavior that needs +to be coupled with the original type and passed as a unified type to a new +function, but, because of this coupling, it also limits the applicability of +the added functionality. If there exist other interfaces where this +functionality should be added, each one will need their own super-set +interfaces and will duplicate the pattern. For this reason, the simple targeted +interface that defines the specific functionality should be preferred. + +See also: +[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces). + +### Testing + +The tests should never leak goroutines. + +Use the term `ConcurrentSafe` in the test name when it aims to verify the +absence of race conditions. The top-level tests with this term will be run +many times in the `test-concurrent-safe` CI job to increase the chance of +catching concurrency issues. This does not apply to subtests when this term +is not in their root name. + +### Internal packages + +The use of internal packages should be scoped to a single module. A sub-module +should never import from a parent internal package. This creates a coupling +between the two modules where a user can upgrade the parent without the child +and if the internal package API has changed it will fail to upgrade[^3]. + +There are two known exceptions to this rule: + +- `go.opentelemetry.io/otel/internal/global` + - This package manages global state for all of opentelemetry-go. It needs to + be a single package in order to ensure the uniqueness of the global state. +- `go.opentelemetry.io/otel/internal/baggage` + - This package provides values in a `context.Context` that need to be + recognized by `go.opentelemetry.io/otel/baggage` and + `go.opentelemetry.io/otel/bridge/opentracing` but remain private. + +If you have duplicate code in multiple modules, make that code into a Go +template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl] +to render the templates in the desired locations. See [#4404] for an example of +this. + +[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 + +### Ignoring context cancellation + +OpenTelemetry API implementations need to ignore the cancellation of the context that are +passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). +Recording methods should not return an error describing the cancellation state of the context +when they complete, nor should they abort any work. + +This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for +the method. In that case the context cancellation can be used for the timeout with the +restriction that this behavior is documented for the method. Otherwise, timeouts +are expected to be handled by the user calling the API, not the implementation. + +Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method +of a provider. It is assumed the context passed from a user is not used for this purpose. + +Outside of the direct recording of telemetry from the API (e.g. exporting telemetry, +force flushing telemetry, shutting down a signal provider) the context cancellation +should be honored. This means all work done on behalf of the user provided context +should be canceled. + +## Approvers and Maintainers + +### Triagers + +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent + +### Approvers + +### Maintainers + +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [David Ashpole](https://github.com/dashpole), Google +- [Robert Pająk](https://github.com/pellared), Splunk +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics +- [Tyler Yahn](https://github.com/MrAlias), Splunk + +### Emeritus + +- [Aaron Clawson](https://github.com/MadVikingGod) +- [Anthony Mirabella](https://github.com/Aneurysm9) +- [Chester Cheung](https://github.com/hanyuancheung) +- [Evan Torrie](https://github.com/evantorrie) +- [Gustavo Silva Paiva](https://github.com/paivagustavo) +- [Josh MacDonald](https://github.com/jmacd) +- [Liz Fong-Jones](https://github.com/lizthegrey) + +### Become an Approver or a Maintainer + +See the [community membership document in OpenTelemetry community +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). + +[Approver]: #approvers +[Maintainer]: #maintainers +[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl +[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404 diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile new file mode 100644 index 000000000..226410d74 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -0,0 +1,328 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +TOOLS_MOD_DIR := ./internal/tools + +ALL_DOCS := $(shell find . -name '*.md' -type f | sort) +ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) + +GO = go +TIMEOUT = 60 + +# User to run as in docker images. +DOCKER_USER=$(shell id -u):$(shell id -g) +DEPENDENCIES_DOCKERFILE=./dependencies.Dockerfile + +.DEFAULT_GOAL := precommit + +.PHONY: precommit ci +precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage + +# Tools + +TOOLS = $(CURDIR)/.tools + +$(TOOLS): + @mkdir -p $@ +$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ $(PACKAGE) + +MULTIMOD = $(TOOLS)/multimod +$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod + +SEMCONVGEN = $(TOOLS)/semconvgen +$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen + +CROSSLINK = $(TOOLS)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink + +SEMCONVKIT = $(TOOLS)/semconvkit +$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit + +GOLANGCI_LINT = $(TOOLS)/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint + +MISSPELL = $(TOOLS)/misspell +$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell + +GOCOVMERGE = $(TOOLS)/gocovmerge +$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge + +STRINGER = $(TOOLS)/stringer +$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer + +PORTO = $(TOOLS)/porto +$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto + +GOTMPL = $(TOOLS)/gotmpl +$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl + +GORELEASE = $(TOOLS)/gorelease +$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease + +GOVULNCHECK = $(TOOLS)/govulncheck +$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck + +.PHONY: tools +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) + +# Virtualized python tools via docker + +# The directory where the virtual environment is created. +VENVDIR := venv + +# The directory where the python tools are installed. +PYTOOLS := $(VENVDIR)/bin + +# The pip executable in the virtual environment. +PIP := $(PYTOOLS)/pip + +# The directory in the docker image where the current directory is mounted. +WORKDIR := /workdir + +# The python image to use for the virtual environment. +PYTHONIMAGE := $(shell awk '$$4=="python" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) + +# Run the python image with the current directory mounted. +DOCKERPY := docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) + +# Create a virtual environment for Python tools. +$(PYTOOLS): +# The `--upgrade` flag is needed to ensure that the virtual environment is +# created with the latest pip version. + @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade --cache-dir=$(WORKDIR)/.cache/pip pip" + +# Install python packages into the virtual environment. +$(PYTOOLS)/%: $(PYTOOLS) + @$(DOCKERPY) $(PIP) install --cache-dir=$(WORKDIR)/.cache/pip -r requirements.txt + +CODESPELL = $(PYTOOLS)/codespell +$(CODESPELL): PACKAGE=codespell + +# Generate + +.PHONY: generate +generate: go-generate vanity-import-fix + +.PHONY: go-generate +go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) +go-generate/%: DIR=$* +go-generate/%: $(STRINGER) $(GOTMPL) + @echo "$(GO) generate $(DIR)/..." \ + && cd $(DIR) \ + && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... + +.PHONY: vanity-import-fix +vanity-import-fix: $(PORTO) + @$(PORTO) --include-internal -w . + +# Generate go.work file for local development. +.PHONY: go-work +go-work: $(CROSSLINK) + $(CROSSLINK) work --root=$(shell pwd) --go=1.22.7 + +# Build + +.PHONY: build + +build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) +build/%: DIR=$* +build/%: + @echo "$(GO) build $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) build ./... + +build-tests/%: DIR=$* +build-tests/%: + @echo "$(GO) build tests $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null + +# Tests + +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe +.PHONY: $(TEST_TARGETS) test +test-default test-race: ARGS=-race +test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. +test-short: ARGS=-short +test-verbose: ARGS=-v -race +test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race +test-concurrent-safe: TIMEOUT=120 +$(TEST_TARGETS): test +test: $(OTEL_GO_MOD_DIRS:%=test/%) +test/%: DIR=$* +test/%: + @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) + +COVERAGE_MODE = atomic +COVERAGE_PROFILE = coverage.out +.PHONY: test-coverage +test-coverage: $(GOCOVMERGE) + @set -e; \ + printf "" > coverage.txt; \ + for dir in $(ALL_COVERAGE_MOD_DIRS); do \ + echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) list ./... \ + | grep -v third_party \ + | grep -v 'semconv/v.*' \ + | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ + $(GO) tool cover -html=coverage.out -o coverage.html); \ + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + +.PHONY: benchmark +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) +benchmark/%: + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ + && cd $* \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. + +.PHONY: golangci-lint golangci-lint-fix +golangci-lint-fix: ARGS=--fix +golangci-lint-fix: golangci-lint +golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) +golangci-lint/%: DIR=$* +golangci-lint/%: $(GOLANGCI_LINT) + @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ + && cd $(DIR) \ + && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) + +.PHONY: crosslink +crosslink: $(CROSSLINK) + @echo "Updating intra-repository dependencies in all go modules" \ + && $(CROSSLINK) --root=$(shell pwd) --prune + +.PHONY: go-mod-tidy +go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) +go-mod-tidy/%: DIR=$* +go-mod-tidy/%: crosslink + @echo "$(GO) mod tidy in $(DIR)" \ + && cd $(DIR) \ + && $(GO) mod tidy -compat=1.21 + +.PHONY: lint-modules +lint-modules: go-mod-tidy + +.PHONY: lint +lint: misspell lint-modules golangci-lint govulncheck + +.PHONY: vanity-import-check +vanity-import-check: $(PORTO) + @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) + +.PHONY: misspell +misspell: $(MISSPELL) + @$(MISSPELL) -w $(ALL_DOCS) + +.PHONY: govulncheck +govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) +govulncheck/%: DIR=$* +govulncheck/%: $(GOVULNCHECK) + @echo "govulncheck ./... in $(DIR)" \ + && cd $(DIR) \ + && $(GOVULNCHECK) ./... + +.PHONY: codespell +codespell: $(CODESPELL) + @$(DOCKERPY) $(CODESPELL) + +.PHONY: toolchain-check +toolchain-check: + @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ + awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \ + done); \ + if [ -n "$${toolchainRes}" ]; then \ + echo "toolchain checking failed:"; echo "$${toolchainRes}"; \ + exit 1; \ + fi + +.PHONY: license-check +license-check: + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ + awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: check-clean-work-tree +check-clean-work-tree: + @if ! git diff --quiet; then \ + echo; \ + echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ + echo; \ + git status; \ + exit 1; \ + fi + +# The weaver docker image to use for semconv-generate. +WEAVER_IMAGE := $(shell awk '$$4=="weaver" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) + +SEMCONVPKG ?= "semconv/" +.PHONY: semconv-generate +semconv-generate: $(SEMCONVKIT) + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) + # Ensure the target directory for source code is available. + mkdir -p $(PWD)/$(SEMCONVPKG)/${TAG} + # Note: We mount a home directory for downloading/storing the semconv repository. + # Weaver will automatically clean the cache when finished, but the directories will remain. + mkdir -p ~/.weaver + docker run --rm \ + -u $(DOCKER_USER) \ + --env HOME=/tmp/weaver \ + --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \ + --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \ + --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \ + $(WEAVER_IMAGE) registry generate \ + --registry=https://github.com/open-telemetry/semantic-conventions/archive/refs/tags/$(TAG).zip[model] \ + --templates=/home/weaver/templates \ + --param tag=$(TAG) \ + go \ + /home/weaver/target + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + +.PHONY: gorelease +gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) +gorelease/%: DIR=$* +gorelease/%:| $(GORELEASE) + @echo "gorelease in $(DIR):" \ + && cd $(DIR) \ + && $(GORELEASE) \ + || echo "" + +.PHONY: verify-mods +verify-mods: $(MULTIMOD) + $(MULTIMOD) verify + +.PHONY: prerelease +prerelease: verify-mods + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) prerelease -m ${MODSET} + +COMMIT ?= "HEAD" +.PHONY: add-tags +add-tags: verify-mods + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + +.PHONY: lint-markdown +lint-markdown: + docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + +.PHONY: verify-readmes +verify-readmes: + ./verify_readmes.sh diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md new file mode 100644 index 000000000..8421cd7e5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -0,0 +1,120 @@ +# OpenTelemetry-Go + +[![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml) +[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) +[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) + +OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). +It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. + +## Project Status + +| Signal | Status | +|---------|--------------------| +| Traces | Stable | +| Metrics | Stable | +| Logs | Beta[^1] | + +Progress and status specific to this repository is tracked in our +[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) +and +[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). + +Project versioning information and stability guarantees can be found in the +[versioning documentation](VERSIONING.md). + +[^1]: https://github.com/orgs/open-telemetry/projects/43 + +### Compatibility + +OpenTelemetry-Go ensures compatibility with the current supported versions of +the [Go language](https://golang.org/doc/devel/release#policy): + +> Each major Go release is supported until there are two newer major releases. +> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +For versions of Go that are no longer supported upstream, opentelemetry-go will +stop ensuring compatibility with these versions in the following manner: + +- A minor release of opentelemetry-go will be made to add support for the new + supported release of Go. +- The following minor release of opentelemetry-go will remove compatibility + testing for the oldest (now archived upstream) version of Go. This, and + future, releases of opentelemetry-go may include features only supported by + the currently supported versions of Go. + +Currently, this project supports the following environments. + +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.24 | amd64 | +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.24 | 386 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Ubuntu | 1.24 | arm64 | +| Ubuntu | 1.23 | arm64 | +| Ubuntu | 1.22 | arm64 | +| macOS 13 | 1.24 | amd64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS | 1.24 | arm64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| Windows | 1.24 | amd64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.24 | 386 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | + +While this project should work for other systems, no compatibility guarantees +are made for those systems currently. + +## Getting Started + +You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/). + +OpenTelemetry's goal is to provide a single set of APIs to capture distributed +traces and metrics from your application and send them to an observability +platform. This project allows you to do just that for applications written in +Go. There are two steps to this process: instrument your application, and +configure an exporter. + +### Instrumentation + +To start capturing distributed traces and metric events from your application +it first needs to be instrumented. The easiest way to do this is by using an +instrumentation library for your code. Be sure to check out [the officially +supported instrumentation +libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). + +If you need to extend the telemetry an instrumentation library provides or want +to build your own instrumentation for your application directly you will need +to use the +[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) +package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) +are a good way to see some practical uses of this process. + +### Export + +Now that your application is instrumented to collect telemetry, it needs an +export pipeline to send that telemetry to an observability platform. + +All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + +| Exporter | Logs | Metrics | Traces | +|---------------------------------------|:----:|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | | ✓ | + +## Contributing + +See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md new file mode 100644 index 000000000..1e13ae54f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -0,0 +1,132 @@ +# Release Process + +## Semantic Convention Generation + +New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. +The `semconv-generate` make target is used for this. + +1. Set the `TAG` environment variable to the semantic convention tag you want to generate. +2. Run the `make semconv-generate ...` target from this repository. + +For example, + +```sh +export TAG="v1.30.0" # Change to the release version you are generating. +make semconv-generate # Uses the exported TAG. +``` + +This should create a new sub-package of [`semconv`](./semconv). +Ensure things look correct before submitting a pull request to include the addition. + +## Breaking changes validation + +You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. + +You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). + +## Verify changes for contrib repository + +If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. + +Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. + +## Pre-Release + +First, decide which module sets will be released and update their versions +in `versions.yaml`. Commit this change to a new branch. + +Update go.mod for submodules to depend on the new release which will happen in the next step. + +1. Run the `prerelease` make target. It creates a branch + `prerelease__` that will contain all release changes. + + ``` + make prerelease MODSET= + ``` + +2. Verify the changes. + + ``` + git diff ...prerelease__ + ``` + + This should have changed the version for all modules to be ``. + If these changes look correct, merge them into your pre-release branch: + + ```go + git merge prerelease__ + ``` + +3. Update the [Changelog](./CHANGELOG.md). + - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + To verify this, you can look directly at the commits since the ``. + + ``` + git --no-pager log --pretty=oneline "..HEAD" + ``` + + - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. + - Update all the appropriate links at the bottom. + +4. Push the changes to upstream and create a Pull Request on GitHub. + Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. + +## Tag + +Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. + +***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! +Failure to do so will leave things in a broken state. As long as you do not +change `versions.yaml` between pre-release and this step, things should be fine. + +***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). +It is critical you make sure the version you push upstream is correct. +[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). + +1. For each module set that will be released, run the `add-tags` make target + using the `` of the commit on the main branch for the merged Pull Request. + + ``` + make add-tags MODSET= COMMIT= + ``` + + It should only be necessary to provide an explicit `COMMIT` value if the + current `HEAD` of your working directory is not the correct commit. + +2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). + Make sure you push all sub-modules as well. + + ``` + git push upstream + git push upstream + ... + ``` + +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +## Post-Release + +### Contrib Repository + +Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. + +### Website Documentation + +Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go]. +Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions +[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ +[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go + +### Demo Repository + +Bump the dependencies in the following Go services: + +- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md new file mode 100644 index 000000000..b8cb605c1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -0,0 +1,224 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver + 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. + * New methods may be added to exported API interfaces. All exported + interfaces that fall within this exception will include the following + paragraph in their public documentation. + + > Warning: methods may be added to this interface in minor releases. + + * If a module is version `v2` or higher, the major version of the module + must be included as a `/vN` at the end of the module paths used in + `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require + go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path + (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the + paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a + `@v2.0.1` in that example. One way to think about it is that the module + name now includes the `/v2`, so include `/v2` whenever you are using the + module name). + * If a module is version `v0` or `v1`, do not include the major version in + either the module path or the import path. + * Modules will be used to encapsulate signals and components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API will be versioned + with a major version greater than `v0`. + * The decision to make a module stable will be made on a case-by-case + basis by the maintainers of this project. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * All stable modules that use the same major version number will use the + same entire version number. + * Stable modules may be released with an incremented minor or patch + version even though that module has not been changed, but rather so + that it will remain at the same version as other stable modules that + did undergo change. + * When an experimental module becomes stable a new stable module version + will be released and will include this now stable module. The new + stable module version will be an increment of the minor version number + and will be applied to all existing stable modules as well as the newly + stable module being released. +* Versioning of the associated [contrib + repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of + this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the + major version of the module must be included as a `/vN` at the end of the + module paths used in `go.mod` files (e.g., `module + go.opentelemetry.io/contrib/instrumentation/host/v2`, `require + go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the + package import path (e.g., `import + "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes + the paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there + is both a `/v2` and a `@v2.0.1` in that example. One way to think about + it is that the module name now includes the `/v2`, so include `/v2` + whenever you are using the module name). + * If a module is version `v0` or `v1`, do not include the major version + in either the module path or the import path. + * In addition to public APIs, telemetry produced by stable instrumentation + will remain stable and backwards compatible. This is to avoid breaking + alerts and dashboard. + * Modules will be used to encapsulate instrumentation, detectors, exporters, + propagators, and any other independent sets of related components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API and telemetry will + be versioned with a major version greater than `v0`. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * Stable contrib modules cannot depend on experimental modules from this + project. + * All stable contrib modules of the same major version with this project + will use the same entire version as this project. + * Stable modules may be released with an incremented minor or patch + version even though that module's code has not been changed. Instead + the only change that will have been included is to have updated that + modules dependency on this project's stable APIs. + * When an experimental module in contrib becomes stable a new stable + module version will be released and will include this now stable + module. The new stable module version will be an increment of the minor + version number and will be applied to all existing stable contrib + modules, this project's modules, and the newly stable module being + released. + * Contrib modules will be kept up to date with this project's releases. + * Due to the dependency contrib modules will implicitly have on this + project's modules the release of stable contrib modules to match the + released version number will be staggered after this project's release. + There is no explicit time guarantee for how long after this projects + release the contrib release will be. Effort should be made to keep them + as close in time as possible. + * No additional stable release in this project can be made until the + contrib repository has a matching stable release. + * No release can be made in the contrib repository after this project's + stable release except for a stable release of the contrib repository. +* GitHub releases will be made for all releases. +* Go modules will be made available at Go package mirrors. + +## Example Versioning Lifecycle + +To better understand the implementation of the above policy the following +example is provided. This project is simplified to include only the following +modules and their versions: + +* `otel`: `v0.14.0` +* `otel/trace`: `v0.14.0` +* `otel/metric`: `v0.14.0` +* `otel/baggage`: `v0.14.0` +* `otel/sdk/trace`: `v0.14.0` +* `otel/sdk/metric`: `v0.14.0` + +These modules have been developed to a point where the `otel/trace`, +`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they +should be considered for a stable release. The `otel/metric` and +`otel/sdk/metric` are still under active development and the `otel` module +depends on both `otel/trace` and `otel/metric`. + +The `otel` package is refactored to remove its dependencies on `otel/metric` so +it can be released as stable as well. With that done the following release +candidates are made: + +* `otel`: `v1.0.0-RC1` +* `otel/trace`: `v1.0.0-RC1` +* `otel/baggage`: `v1.0.0-RC1` +* `otel/sdk/trace`: `v1.0.0-RC1` + +The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. + +A few minor issues are discovered in the `otel/trace` package. These issues are +resolved with some minor, but backwards incompatible, changes and are released +as a second release candidate: + +* `otel`: `v1.0.0-RC2` +* `otel/trace`: `v1.0.0-RC2` +* `otel/baggage`: `v1.0.0-RC2` +* `otel/sdk/trace`: `v1.0.0-RC2` + +Notice that all module version numbers are incremented to adhere to our +versioning policy. + +After these release candidates have been evaluated to satisfaction, they are +released as version `v1.0.0`. + +* `otel`: `v1.0.0` +* `otel/trace`: `v1.0.0` +* `otel/baggage`: `v1.0.0` +* `otel/sdk/trace`: `v1.0.0` + +Since both the `go` utility and the Go module system support [the semantic +versioning definition of +precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release +will correctly be interpreted as the successor to the previous release +candidates. + +Active development of this project continues. The `otel/metric` module now has +backwards incompatible changes to its API that need to be released and the +`otel/baggage` module has a minor bug fix that needs to be released. The +following release is made: + +* `otel`: `v1.0.1` +* `otel/trace`: `v1.0.1` +* `otel/metric`: `v0.15.0` +* `otel/baggage`: `v1.0.1` +* `otel/sdk/trace`: `v1.0.1` +* `otel/sdk/metric`: `v0.15.0` + +Notice that, again, all stable module versions are incremented in unison and +the `otel/sdk/metric` package, which depends on the `otel/metric` package, also +bumped its version. This bump of the `otel/sdk/metric` package makes sense +given their coupling, though it is not explicitly required by our versioning +policy. + +As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a +point where they should be evaluated for stability. The `otel` module is +reintegrated with the `otel/metric` package and the following release is made: + +* `otel`: `v1.1.0-RC1` +* `otel/trace`: `v1.1.0-RC1` +* `otel/metric`: `v1.1.0-RC1` +* `otel/baggage`: `v1.1.0-RC1` +* `otel/sdk/trace`: `v1.1.0-RC1` +* `otel/sdk/metric`: `v1.1.0-RC1` + +All the modules are evaluated and determined to a viable stable release. They +are then released as version `v1.1.0` (the minor version is incremented to +indicate the addition of new signal). + +* `otel`: `v1.1.0` +* `otel/trace`: `v1.1.0` +* `otel/metric`: `v1.1.0` +* `otel/baggage`: `v1.1.0` +* `otel/sdk/trace`: `v1.1.0` +* `otel/sdk/metric`: `v1.1.0` diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md new file mode 100644 index 000000000..5b3da8f14 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/README.md @@ -0,0 +1,3 @@ +# Attribute + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go new file mode 100644 index 000000000..eef51ebc2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -0,0 +1,5 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package attribute provides key and value attributes. +package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go new file mode 100644 index 000000000..318e42fca --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -0,0 +1,135 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "bytes" + "sync" + "sync/atomic" +) + +type ( + // Encoder is a mechanism for serializing an attribute set into a specific + // string representation that supports caching, to avoid repeated + // serialization. An example could be an exporter encoding the attribute + // set into a wire representation. + Encoder interface { + // Encode returns the serialized encoding of the attribute set using + // its Iterator. This result may be cached by a attribute.Set. + Encode(iterator Iterator) string + + // ID returns a value that is unique for each class of attribute + // encoder. Attribute encoders allocate these using `NewEncoderID`. + ID() EncoderID + } + + // EncoderID is used to identify distinct Encoder + // implementations, for caching encoded results. + EncoderID struct { + value uint64 + } + + // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of + // allocations used in encoding attributes. This implementation encodes a + // comma-separated list of key=value, with '/'-escaping of '=', ',', and + // '\'. + defaultAttrEncoder struct { + // pool is a pool of attribute set builders. The buffers in this pool + // grow to a size that most attribute encodings will not allocate new + // memory. + pool sync.Pool // *bytes.Buffer + } +) + +// escapeChar is used to ensure uniqueness of the attribute encoding where +// keys or values contain either '=' or ','. Since there is no parser needed +// for this encoding and its only requirement is to be unique, this choice is +// arbitrary. Users will see these in some exporters (e.g., stdout), so the +// backslash ('\') is used as a conventional choice. +const escapeChar = '\\' + +var ( + _ Encoder = &defaultAttrEncoder{} + + // encoderIDCounter is for generating IDs for other attribute encoders. + encoderIDCounter uint64 + + defaultEncoderOnce sync.Once + defaultEncoderID = NewEncoderID() + defaultEncoderInstance *defaultAttrEncoder +) + +// NewEncoderID returns a unique attribute encoder ID. It should be called +// once per each type of attribute encoder. Preferably in init() or in var +// definition. +func NewEncoderID() EncoderID { + return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} +} + +// DefaultEncoder returns an attribute encoder that encodes attributes in such +// a way that each escaped attribute's key is followed by an equal sign and +// then by an escaped attribute's value. All key-value pairs are separated by +// a comma. +// +// Escaping is done by prepending a backslash before either a backslash, equal +// sign or a comma. +func DefaultEncoder() Encoder { + defaultEncoderOnce.Do(func() { + defaultEncoderInstance = &defaultAttrEncoder{ + pool: sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + }, + } + }) + return defaultEncoderInstance +} + +// Encode is a part of an implementation of the AttributeEncoder interface. +func (d *defaultAttrEncoder) Encode(iter Iterator) string { + buf := d.pool.Get().(*bytes.Buffer) + defer d.pool.Put(buf) + buf.Reset() + + for iter.Next() { + i, keyValue := iter.IndexedAttribute() + if i > 0 { + _, _ = buf.WriteRune(',') + } + copyAndEscape(buf, string(keyValue.Key)) + + _, _ = buf.WriteRune('=') + + if keyValue.Value.Type() == STRING { + copyAndEscape(buf, keyValue.Value.AsString()) + } else { + _, _ = buf.WriteString(keyValue.Value.Emit()) + } + } + return buf.String() +} + +// ID is a part of an implementation of the AttributeEncoder interface. +func (*defaultAttrEncoder) ID() EncoderID { + return defaultEncoderID +} + +// copyAndEscape escapes `=`, `,` and its own escape character (`\`), +// making the default encoding unique. +func copyAndEscape(buf *bytes.Buffer, val string) { + for _, ch := range val { + switch ch { + case '=', ',', escapeChar: + _, _ = buf.WriteRune(escapeChar) + } + _, _ = buf.WriteRune(ch) + } +} + +// Valid returns true if this encoder ID was allocated by +// `NewEncoderID`. Invalid encoder IDs will not be cached. +func (id EncoderID) Valid() bool { + return id.value != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go new file mode 100644 index 000000000..be9cd922d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Filter supports removing certain attributes from attribute sets. When +// the filter returns true, the attribute will be kept in the filtered +// attribute set. When the filter returns false, the attribute is excluded +// from the filtered attribute set, and the attribute instead appears in +// the removed list of excluded attributes. +type Filter func(KeyValue) bool + +// NewAllowKeysFilter returns a Filter that only allows attributes with one of +// the provided keys. +// +// If keys is empty a deny-all filter is returned. +func NewAllowKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return false } + } + + allowed := make(map[Key]struct{}) + for _, k := range keys { + allowed[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := allowed[kv.Key] + return ok + } +} + +// NewDenyKeysFilter returns a Filter that only allows attributes +// that do not have one of the provided keys. +// +// If keys is empty an allow-all filter is returned. +func NewDenyKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return true } + } + + forbid := make(map[Key]struct{}) + for _, k := range keys { + forbid[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := forbid[kv.Key] + return !ok + } +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go new file mode 100644 index 000000000..f2ba89ce4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -0,0 +1,150 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Iterator allows iterating over the set of attributes in order, sorted by +// key. +type Iterator struct { + storage *Set + idx int +} + +// MergeIterator supports iterating over two sets of attributes while +// eliminating duplicate values from the combined set. The first iterator +// value takes precedence. +type MergeIterator struct { + one oneIterator + two oneIterator + current KeyValue +} + +type oneIterator struct { + iter Iterator + done bool + attr KeyValue +} + +// Next moves the iterator to the next position. Returns false if there are no +// more attributes. +func (i *Iterator) Next() bool { + i.idx++ + return i.idx < i.Len() +} + +// Label returns current KeyValue. Must be called only after Next returns +// true. +// +// Deprecated: Use Attribute instead. +func (i *Iterator) Label() KeyValue { + return i.Attribute() +} + +// Attribute returns the current KeyValue of the Iterator. It must be called +// only after Next returns true. +func (i *Iterator) Attribute() KeyValue { + kv, _ := i.storage.Get(i.idx) + return kv +} + +// IndexedLabel returns current index and attribute. Must be called only +// after Next returns true. +// +// Deprecated: Use IndexedAttribute instead. +func (i *Iterator) IndexedLabel() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// IndexedAttribute returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedAttribute() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// Len returns a number of attributes in the iterated set. +func (i *Iterator) Len() int { + return i.storage.Len() +} + +// ToSlice is a convenience function that creates a slice of attributes from +// the passed iterator. The iterator is set up to start from the beginning +// before creating the slice. +func (i *Iterator) ToSlice() []KeyValue { + l := i.Len() + if l == 0 { + return nil + } + i.idx = -1 + slice := make([]KeyValue, 0, l) + for i.Next() { + slice = append(slice, i.Attribute()) + } + return slice +} + +// NewMergeIterator returns a MergeIterator for merging two attribute sets. +// Duplicates are resolved by taking the value from the first set. +func NewMergeIterator(s1, s2 *Set) MergeIterator { + mi := MergeIterator{ + one: makeOne(s1.Iter()), + two: makeOne(s2.Iter()), + } + return mi +} + +func makeOne(iter Iterator) oneIterator { + oi := oneIterator{ + iter: iter, + } + oi.advance() + return oi +} + +func (oi *oneIterator) advance() { + if oi.done = !oi.iter.Next(); !oi.done { + oi.attr = oi.iter.Attribute() + } +} + +// Next returns true if there is another attribute available. +func (m *MergeIterator) Next() bool { + if m.one.done && m.two.done { + return false + } + if m.one.done { + m.current = m.two.attr + m.two.advance() + return true + } + if m.two.done { + m.current = m.one.attr + m.one.advance() + return true + } + if m.one.attr.Key == m.two.attr.Key { + m.current = m.one.attr // first iterator attribute value wins + m.one.advance() + m.two.advance() + return true + } + if m.one.attr.Key < m.two.attr.Key { + m.current = m.one.attr + m.one.advance() + return true + } + m.current = m.two.attr + m.two.advance() + return true +} + +// Label returns the current value after Next() returns true. +// +// Deprecated: Use Attribute instead. +func (m *MergeIterator) Label() KeyValue { + return m.current +} + +// Attribute returns the current value after Next() returns true. +func (m *MergeIterator) Attribute() KeyValue { + return m.current +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go new file mode 100644 index 000000000..d9a22c650 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -0,0 +1,123 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Key represents the key part in key-value pairs. It's a string. The +// allowed character set in the key depends on the use of the key. +type Key string + +// Bool creates a KeyValue instance with a BOOL Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Bool(name, value). +func (k Key) Bool(v bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolValue(v), + } +} + +// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- BoolSlice(name, value). +func (k Key) BoolSlice(v []bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolSliceValue(v), + } +} + +// Int creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int(name, value). +func (k Key) Int(v int) KeyValue { + return KeyValue{ + Key: k, + Value: IntValue(v), + } +} + +// IntSlice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- IntSlice(name, value). +func (k Key) IntSlice(v []int) KeyValue { + return KeyValue{ + Key: k, + Value: IntSliceValue(v), + } +} + +// Int64 creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64(name, value). +func (k Key) Int64(v int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64Value(v), + } +} + +// Int64Slice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64Slice(name, value). +func (k Key) Int64Slice(v []int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64SliceValue(v), + } +} + +// Float64 creates a KeyValue instance with a FLOAT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64(v float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64Value(v), + } +} + +// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64Slice(v []float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64SliceValue(v), + } +} + +// String creates a KeyValue instance with a STRING Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- String(name, value). +func (k Key) String(v string) KeyValue { + return KeyValue{ + Key: k, + Value: StringValue(v), + } +} + +// StringSlice creates a KeyValue instance with a STRINGSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- StringSlice(name, value). +func (k Key) StringSlice(v []string) KeyValue { + return KeyValue{ + Key: k, + Value: StringSliceValue(v), + } +} + +// Defined returns true for non-empty keys. +func (k Key) Defined() bool { + return len(k) != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go new file mode 100644 index 000000000..3028f9a40 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" +) + +// KeyValue holds a key and value pair. +type KeyValue struct { + Key Key + Value Value +} + +// Valid returns if kv is a valid OpenTelemetry attribute. +func (kv KeyValue) Valid() bool { + return kv.Key.Defined() && kv.Value.Type() != INVALID +} + +// Bool creates a KeyValue with a BOOL Value type. +func Bool(k string, v bool) KeyValue { + return Key(k).Bool(v) +} + +// BoolSlice creates a KeyValue with a BOOLSLICE Value type. +func BoolSlice(k string, v []bool) KeyValue { + return Key(k).BoolSlice(v) +} + +// Int creates a KeyValue with an INT64 Value type. +func Int(k string, v int) KeyValue { + return Key(k).Int(v) +} + +// IntSlice creates a KeyValue with an INT64SLICE Value type. +func IntSlice(k string, v []int) KeyValue { + return Key(k).IntSlice(v) +} + +// Int64 creates a KeyValue with an INT64 Value type. +func Int64(k string, v int64) KeyValue { + return Key(k).Int64(v) +} + +// Int64Slice creates a KeyValue with an INT64SLICE Value type. +func Int64Slice(k string, v []int64) KeyValue { + return Key(k).Int64Slice(v) +} + +// Float64 creates a KeyValue with a FLOAT64 Value type. +func Float64(k string, v float64) KeyValue { + return Key(k).Float64(v) +} + +// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. +func Float64Slice(k string, v []float64) KeyValue { + return Key(k).Float64Slice(v) +} + +// String creates a KeyValue with a STRING Value type. +func String(k, v string) KeyValue { + return Key(k).String(v) +} + +// StringSlice creates a KeyValue with a STRINGSLICE Value type. +func StringSlice(k string, v []string) KeyValue { + return Key(k).StringSlice(v) +} + +// Stringer creates a new key-value pair with a passed name and a string +// value generated by the passed Stringer interface. +func Stringer(k string, v fmt.Stringer) KeyValue { + return Key(k).String(v.String()) +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go new file mode 100644 index 000000000..6cbefcead --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -0,0 +1,411 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "cmp" + "encoding/json" + "reflect" + "slices" + "sort" +) + +type ( + // Set is the representation for a distinct attribute set. It manages an + // immutable set of attributes, with an internal cache for storing + // attribute encodings. + // + // This type will remain comparable for backwards compatibility. The + // equivalence of Sets across versions is not guaranteed to be stable. + // Prior versions may find two Sets to be equal or not when compared + // directly (i.e. ==), but subsequent versions may not. Users should use + // the Equals method to ensure stable equivalence checking. + // + // Users should also use the Distinct returned from Equivalent as a map key + // instead of a Set directly. In addition to that type providing guarantees + // on stable equivalence, it may also provide performance improvements. + Set struct { + equivalent Distinct + } + + // Distinct is a unique identifier of a Set. + // + // Distinct is designed to be ensures equivalence stability: comparisons + // will return the save value across versions. For this reason, Distinct + // should always be used as a map key instead of a Set. + Distinct struct { + iface interface{} + } + + // Sortable implements sort.Interface, used for sorting KeyValue. + // + // Deprecated: This type is no longer used. It was added as a performance + // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no + // longer supported by the module). + Sortable []KeyValue +) + +var ( + // keyValueType is used in computeDistinctReflect. + keyValueType = reflect.TypeOf(KeyValue{}) + + // emptySet is returned for empty attribute sets. + emptySet = &Set{ + equivalent: Distinct{ + iface: [0]KeyValue{}, + }, + } +) + +// EmptySet returns a reference to a Set with no elements. +// +// This is a convenience provided for optimized calling utility. +func EmptySet() *Set { + return emptySet +} + +// reflectValue abbreviates reflect.ValueOf(d). +func (d Distinct) reflectValue() reflect.Value { + return reflect.ValueOf(d.iface) +} + +// Valid returns true if this value refers to a valid Set. +func (d Distinct) Valid() bool { + return d.iface != nil +} + +// Len returns the number of attributes in this set. +func (l *Set) Len() int { + if l == nil || !l.equivalent.Valid() { + return 0 + } + return l.equivalent.reflectValue().Len() +} + +// Get returns the KeyValue at ordered position idx in this set. +func (l *Set) Get(idx int) (KeyValue, bool) { + if l == nil || !l.equivalent.Valid() { + return KeyValue{}, false + } + value := l.equivalent.reflectValue() + + if idx >= 0 && idx < value.Len() { + // Note: The Go compiler successfully avoids an allocation for + // the interface{} conversion here: + return value.Index(idx).Interface().(KeyValue), true + } + + return KeyValue{}, false +} + +// Value returns the value of a specified key in this set. +func (l *Set) Value(k Key) (Value, bool) { + if l == nil || !l.equivalent.Valid() { + return Value{}, false + } + rValue := l.equivalent.reflectValue() + vlen := rValue.Len() + + idx := sort.Search(vlen, func(idx int) bool { + return rValue.Index(idx).Interface().(KeyValue).Key >= k + }) + if idx >= vlen { + return Value{}, false + } + keyValue := rValue.Index(idx).Interface().(KeyValue) + if k == keyValue.Key { + return keyValue.Value, true + } + return Value{}, false +} + +// HasValue tests whether a key is defined in this set. +func (l *Set) HasValue(k Key) bool { + if l == nil { + return false + } + _, ok := l.Value(k) + return ok +} + +// Iter returns an iterator for visiting the attributes in this set. +func (l *Set) Iter() Iterator { + return Iterator{ + storage: l, + idx: -1, + } +} + +// ToSlice returns the set of attributes belonging to this set, sorted, where +// keys appear no more than once. +func (l *Set) ToSlice() []KeyValue { + iter := l.Iter() + return iter.ToSlice() +} + +// Equivalent returns a value that may be used as a map key. The Distinct type +// guarantees that the result will equal the equivalent. Distinct value of any +// attribute set with the same elements as this, where sets are made unique by +// choosing the last value in the input for any given key. +func (l *Set) Equivalent() Distinct { + if l == nil || !l.equivalent.Valid() { + return emptySet.equivalent + } + return l.equivalent +} + +// Equals returns true if the argument set is equivalent to this set. +func (l *Set) Equals(o *Set) bool { + return l.Equivalent() == o.Equivalent() +} + +// Encoded returns the encoded form of this set, according to encoder. +func (l *Set) Encoded(encoder Encoder) string { + if l == nil || encoder == nil { + return "" + } + + return encoder.Encode(l.Iter()) +} + +func empty() Set { + return Set{ + equivalent: emptySet.equivalent, + } +} + +// NewSet returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// Except for empty sets, this method adds an additional allocation compared +// with calls that include a Sortable. +func NewSet(kvs ...KeyValue) Set { + s, _ := NewSetWithFiltered(kvs, nil) + return s +} + +// NewSetWithSortable returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Sortable option as a memory optimization. +// +// Deprecated: Use [NewSet] instead. +func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { + s, _ := NewSetWithFiltered(kvs, nil) + return s +} + +// NewSetWithFiltered returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Filter to include/exclude attribute keys from the +// return value. Excluded keys are returned as a slice of attribute values. +func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + + // Stable sort so the following de-duplication can implement + // last-value-wins semantics. + slices.SortStableFunc(kvs, func(a, b KeyValue) int { + return cmp.Compare(a.Key, b.Key) + }) + + position := len(kvs) - 1 + offset := position - 1 + + // The requirements stated above require that the stable + // result be placed in the end of the input slice, while + // overwritten values are swapped to the beginning. + // + // De-duplicate with last-value-wins semantics. Preserve + // duplicate values at the beginning of the input slice. + for ; offset >= 0; offset-- { + if kvs[offset].Key == kvs[position].Key { + continue + } + position-- + kvs[offset], kvs[position] = kvs[position], kvs[offset] + } + kvs = kvs[position:] + + if filter != nil { + if div := filteredToFront(kvs, filter); div != 0 { + return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + } + } + return Set{equivalent: computeDistinct(kvs)}, nil +} + +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +// +// Deprecated: Use [NewSetWithFiltered] instead. +func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { + return NewSetWithFiltered(kvs, filter) +} + +// filteredToFront filters slice in-place using keep function. All KeyValues that need to +// be removed are moved to the front. All KeyValues that need to be kept are +// moved (in-order) to the back. The index for the first KeyValue to be kept is +// returned. +func filteredToFront(slice []KeyValue, keep Filter) int { + n := len(slice) + j := n + for i := n - 1; i >= 0; i-- { + if keep(slice[i]) { + j-- + slice[i], slice[j] = slice[j], slice[i] + } + } + return j +} + +// Filter returns a filtered copy of this Set. See the documentation for +// NewSetWithSortableFiltered for more details. +func (l *Set) Filter(re Filter) (Set, []KeyValue) { + if re == nil { + return *l, nil + } + + // Iterate in reverse to the first attribute that will be filtered out. + n := l.Len() + first := n - 1 + for ; first >= 0; first-- { + kv, _ := l.Get(first) + if !re(kv) { + break + } + } + + // No attributes will be dropped, return the immutable Set l and nil. + if first < 0 { + return *l, nil + } + + // Copy now that we know we need to return a modified set. + // + // Do not do this in-place on the underlying storage of *Set l. Sets are + // immutable and filtering should not change this. + slice := l.ToSlice() + + // Don't re-iterate the slice if only slice[0] is filtered. + if first == 0 { + // It is safe to assume len(slice) >= 1 given we found at least one + // attribute above that needs to be filtered out. + return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + } + + // Move the filtered slice[first] to the front (preserving order). + kv := slice[first] + copy(slice[1:first+1], slice[:first]) + slice[0] = kv + + // Do not re-evaluate re(slice[first+1:]). + div := filteredToFront(slice[1:first+1], re) + 1 + return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] +} + +// computeDistinct returns a Distinct using either the fixed- or +// reflect-oriented code path, depending on the size of the input. The input +// slice is assumed to already be sorted and de-duplicated. +func computeDistinct(kvs []KeyValue) Distinct { + iface := computeDistinctFixed(kvs) + if iface == nil { + iface = computeDistinctReflect(kvs) + } + return Distinct{ + iface: iface, + } +} + +// computeDistinctFixed computes a Distinct for small slices. It returns nil +// if the input is too large for this code path. +func computeDistinctFixed(kvs []KeyValue) interface{} { + switch len(kvs) { + case 1: + return [1]KeyValue(kvs) + case 2: + return [2]KeyValue(kvs) + case 3: + return [3]KeyValue(kvs) + case 4: + return [4]KeyValue(kvs) + case 5: + return [5]KeyValue(kvs) + case 6: + return [6]KeyValue(kvs) + case 7: + return [7]KeyValue(kvs) + case 8: + return [8]KeyValue(kvs) + case 9: + return [9]KeyValue(kvs) + case 10: + return [10]KeyValue(kvs) + default: + return nil + } +} + +// computeDistinctReflect computes a Distinct using reflection, works for any +// size input. +func computeDistinctReflect(kvs []KeyValue) interface{} { + at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() + for i, keyValue := range kvs { + *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue + } + return at.Interface() +} + +// MarshalJSON returns the JSON encoding of the Set. +func (l *Set) MarshalJSON() ([]byte, error) { + return json.Marshal(l.equivalent.iface) +} + +// MarshalLog is the marshaling function used by the logging system to represent this Set. +func (l Set) MarshalLog() interface{} { + kvs := make(map[string]string) + for _, kv := range l.ToSlice() { + kvs[string(kv.Key)] = kv.Value.Emit() + } + return kvs +} + +// Len implements sort.Interface. +func (l *Sortable) Len() int { + return len(*l) +} + +// Swap implements sort.Interface. +func (l *Sortable) Swap(i, j int) { + (*l)[i], (*l)[j] = (*l)[j], (*l)[i] +} + +// Less implements sort.Interface. +func (l *Sortable) Less(i, j int) bool { + return (*l)[i].Key < (*l)[j].Key +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go new file mode 100644 index 000000000..e584b2477 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package attribute + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[INVALID-0] + _ = x[BOOL-1] + _ = x[INT64-2] + _ = x[FLOAT64-3] + _ = x[STRING-4] + _ = x[BOOLSLICE-5] + _ = x[INT64SLICE-6] + _ = x[FLOAT64SLICE-7] + _ = x[STRINGSLICE-8] +} + +const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" + +var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go new file mode 100644 index 000000000..9ea0ecbbd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -0,0 +1,271 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" +) + +//go:generate stringer -type=Type + +// Type describes the type of the data Value holds. +type Type int // nolint: revive // redefines builtin Type. + +// Value represents the value part in key-value pairs. +type Value struct { + vtype Type + numeric uint64 + stringly string + slice interface{} +} + +const ( + // INVALID is used for a Value with no value set. + INVALID Type = iota + // BOOL is a boolean Type Value. + BOOL + // INT64 is a 64-bit signed integral Type Value. + INT64 + // FLOAT64 is a 64-bit floating point Type Value. + FLOAT64 + // STRING is a string Type Value. + STRING + // BOOLSLICE is a slice of booleans Type Value. + BOOLSLICE + // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. + INT64SLICE + // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. + FLOAT64SLICE + // STRINGSLICE is a slice of strings Type Value. + STRINGSLICE +) + +// BoolValue creates a BOOL Value. +func BoolValue(v bool) Value { + return Value{ + vtype: BOOL, + numeric: internal.BoolToRaw(v), + } +} + +// BoolSliceValue creates a BOOLSLICE Value. +func BoolSliceValue(v []bool) Value { + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} +} + +// IntValue creates an INT64 Value. +func IntValue(v int) Value { + return Int64Value(int64(v)) +} + +// IntSliceValue creates an INTSLICE Value. +func IntSliceValue(v []int) Value { + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) + } + return Value{ + vtype: INT64SLICE, + slice: cp.Elem().Interface(), + } +} + +// Int64Value creates an INT64 Value. +func Int64Value(v int64) Value { + return Value{ + vtype: INT64, + numeric: internal.Int64ToRaw(v), + } +} + +// Int64SliceValue creates an INT64SLICE Value. +func Int64SliceValue(v []int64) Value { + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} +} + +// Float64Value creates a FLOAT64 Value. +func Float64Value(v float64) Value { + return Value{ + vtype: FLOAT64, + numeric: internal.Float64ToRaw(v), + } +} + +// Float64SliceValue creates a FLOAT64SLICE Value. +func Float64SliceValue(v []float64) Value { + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} +} + +// StringValue creates a STRING Value. +func StringValue(v string) Value { + return Value{ + vtype: STRING, + stringly: v, + } +} + +// StringSliceValue creates a STRINGSLICE Value. +func StringSliceValue(v []string) Value { + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} +} + +// Type returns a type of the Value. +func (v Value) Type() Type { + return v.vtype +} + +// AsBool returns the bool value. Make sure that the Value's type is +// BOOL. +func (v Value) AsBool() bool { + return internal.RawToBool(v.numeric) +} + +// AsBoolSlice returns the []bool value. Make sure that the Value's type is +// BOOLSLICE. +func (v Value) AsBoolSlice() []bool { + if v.vtype != BOOLSLICE { + return nil + } + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsBoolSlice(v.slice) +} + +// AsInt64 returns the int64 value. Make sure that the Value's type is +// INT64. +func (v Value) AsInt64() int64 { + return internal.RawToInt64(v.numeric) +} + +// AsInt64Slice returns the []int64 value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsInt64Slice() []int64 { + if v.vtype != INT64SLICE { + return nil + } + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsInt64Slice(v.slice) +} + +// AsFloat64 returns the float64 value. Make sure that the Value's +// type is FLOAT64. +func (v Value) AsFloat64() float64 { + return internal.RawToFloat64(v.numeric) +} + +// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is +// FLOAT64SLICE. +func (v Value) AsFloat64Slice() []float64 { + if v.vtype != FLOAT64SLICE { + return nil + } + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsFloat64Slice(v.slice) +} + +// AsString returns the string value. Make sure that the Value's type +// is STRING. +func (v Value) AsString() string { + return v.stringly +} + +// AsStringSlice returns the []string value. Make sure that the Value's type is +// STRINGSLICE. +func (v Value) AsStringSlice() []string { + if v.vtype != STRINGSLICE { + return nil + } + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsStringSlice(v.slice) +} + +type unknownValueType struct{} + +// AsInterface returns Value's data as interface{}. +func (v Value) AsInterface() interface{} { + switch v.Type() { + case BOOL: + return v.AsBool() + case BOOLSLICE: + return v.asBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: + return v.asInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: + return v.asFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: + return v.asStringSlice() + } + return unknownValueType{} +} + +// Emit returns a string representation of Value's data. +func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: + return fmt.Sprint(v.asBoolSlice()) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: + j, err := json.Marshal(v.asInt64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asInt64Slice()) + } + return string(j) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: + j, err := json.Marshal(v.asFloat64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) + } + return string(j) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: + j, err := json.Marshal(v.asStringSlice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asStringSlice()) + } + return string(j) + case STRING: + return v.stringly + default: + return "unknown" + } +} + +// MarshalJSON returns the JSON encoding of the Value. +func (v Value) MarshalJSON() ([]byte, error) { + var jsonVal struct { + Type string + Value interface{} + } + jsonVal.Type = v.Type().String() + jsonVal.Value = v.AsInterface() + return json.Marshal(jsonVal) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md new file mode 100644 index 000000000..7d798435e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/README.md @@ -0,0 +1,3 @@ +# Baggage + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage) diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go new file mode 100644 index 000000000..0e1fe2422 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -0,0 +1,1018 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "errors" + "fmt" + "net/url" + "strings" + "unicode/utf8" + + "go.opentelemetry.io/otel/internal/baggage" +) + +const ( + maxMembers = 180 + maxBytesPerMembers = 4096 + maxBytesPerBaggageString = 8192 + + listDelimiter = "," + keyValueDelimiter = "=" + propertyDelimiter = ";" +) + +var ( + errInvalidKey = errors.New("invalid key") + errInvalidValue = errors.New("invalid value") + errInvalidProperty = errors.New("invalid baggage list-member property") + errInvalidMember = errors.New("invalid baggage list-member") + errMemberNumber = errors.New("too many list-members in baggage-string") + errMemberBytes = errors.New("list-member too large") + errBaggageBytes = errors.New("baggage-string too large") +) + +// Property is an additional metadata entry for a baggage list-member. +type Property struct { + key, value string + + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool +} + +// NewKeyProperty returns a new Property for key. +// +// The passed key must be valid, non-empty UTF-8 string. +// If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. +func NewKeyProperty(key string) (Property, error) { + if !validateBaggageName(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{key: key} + return p, nil +} + +// NewKeyValueProperty returns a new Property for key with value. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewKeyValuePropertyRaw] instead +// that does not require percent-encoding of the value. +func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + if !validateValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewKeyValuePropertyRaw(key, decodedValue) +} + +// NewKeyValuePropertyRaw returns a new Property for key with value. +// +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. +func NewKeyValuePropertyRaw(key, value string) (Property, error) { + if !validateBaggageName(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + + p := Property{ + key: key, + value: value, + hasValue: true, + } + return p, nil +} + +func newInvalidProperty() Property { + return Property{} +} + +// parseProperty attempts to decode a Property from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +func parseProperty(property string) (Property, error) { + if property == "" { + return newInvalidProperty(), nil + } + + p, ok := parsePropertyInternal(property) + if !ok { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) + } + + return p, nil +} + +// validate ensures p conforms to the W3C Baggage specification, returning an +// error otherwise. +func (p Property) validate() error { + errFunc := func(err error) error { + return fmt.Errorf("invalid property: %w", err) + } + + if !validateBaggageName(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } + if !p.hasValue && p.value != "" { + return errFunc(errors.New("inconsistent value")) + } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } + return nil +} + +// Key returns the Property key. +func (p Property) Key() string { + return p.key +} + +// Value returns the Property value. Additionally, a boolean value is returned +// indicating if the returned value is the empty if the Property has a value +// that is empty or if the value is not set. +func (p Property) Value() (string, bool) { + return p.value, p.hasValue +} + +// String encodes Property into a header string compliant with the W3C Baggage +// specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. +func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + + if p.hasValue { + return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) + } + return p.key +} + +type properties []Property + +func fromInternalProperties(iProps []baggage.Property) properties { + if len(iProps) == 0 { + return nil + } + + props := make(properties, len(iProps)) + for i, p := range iProps { + props[i] = Property{ + key: p.Key, + value: p.Value, + hasValue: p.HasValue, + } + } + return props +} + +func (p properties) asInternal() []baggage.Property { + if len(p) == 0 { + return nil + } + + iProps := make([]baggage.Property, len(p)) + for i, prop := range p { + iProps[i] = baggage.Property{ + Key: prop.key, + Value: prop.value, + HasValue: prop.hasValue, + } + } + return iProps +} + +func (p properties) Copy() properties { + if len(p) == 0 { + return nil + } + + props := make(properties, len(p)) + copy(props, p) + return props +} + +// validate ensures each Property in p conforms to the W3C Baggage +// specification, returning an error otherwise. +func (p properties) validate() error { + for _, prop := range p { + if err := prop.validate(); err != nil { + return err + } + } + return nil +} + +// String encodes properties into a header string compliant with the W3C Baggage +// specification. +func (p properties) String() string { + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } + } + return strings.Join(props, propertyDelimiter) +} + +// Member is a list-member of a baggage-string as defined by the W3C Baggage +// specification. +type Member struct { + key, value string + properties properties + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +// NewMember returns a new Member from the passed arguments. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewMemberRaw] instead +// that does not require percent-encoding of the value. +func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + if !validateValue(value) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewMemberRaw(key, decodedValue, props...) +} + +// NewMemberRaw returns a new Member from the passed arguments. +// +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key. +func NewMemberRaw(key, value string, props ...Property) (Member, error) { + m := Member{ + key: key, + value: value, + properties: properties(props).Copy(), + hasData: true, + } + if err := m.validate(); err != nil { + return newInvalidMember(), err + } + return m, nil +} + +func newInvalidMember() Member { + return Member{} +} + +// parseMember attempts to decode a Member from the passed string. It returns +// an error if the input is invalid according to the W3C Baggage +// specification. +func parseMember(member string) (Member, error) { + if n := len(member); n > maxBytesPerMembers { + return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) + } + + var props properties + keyValue, properties, found := strings.Cut(member, propertyDelimiter) + if found { + // Parse the member properties. + for _, pStr := range strings.Split(properties, propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return newInvalidMember(), err + } + props = append(props, p) + } + } + // Parse the member key/value pair. + + // Take into account a value can contain equal signs (=). + k, v, found := strings.Cut(keyValue, keyValueDelimiter) + if !found { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) + } + // "Leading and trailing whitespaces are allowed but MUST be trimmed + // when converting the header into a data structure." + key := strings.TrimSpace(k) + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) + } + + // Decode a percent-encoded value. + unescapeVal, err := url.PathUnescape(rawVal) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) + } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) + return Member{key: key, value: value, properties: props, hasData: true}, nil +} + +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(c) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + +// validate ensures m conforms to the W3C Baggage specification. +// A key must be an ASCII string, returning an error otherwise. +func (m Member) validate() error { + if !m.hasData { + return fmt.Errorf("%w: %q", errInvalidMember, m) + } + + if !validateBaggageName(m.key) { + return fmt.Errorf("%w: %q", errInvalidKey, m.key) + } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } + return m.properties.validate() +} + +// Key returns the Member key. +func (m Member) Key() string { return m.key } + +// Value returns the Member value. +func (m Member) Value() string { return m.value } + +// Properties returns a copy of the Member properties. +func (m Member) Properties() []Property { return m.properties.Copy() } + +// String encodes Member into a header string compliant with the W3C Baggage +// specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. +func (m Member) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + + s := m.key + keyValueDelimiter + valueEscape(m.value) + if len(m.properties) > 0 { + s += propertyDelimiter + m.properties.String() + } + return s +} + +// Baggage is a list of baggage members representing the baggage-string as +// defined by the W3C Baggage specification. +type Baggage struct { //nolint:golint + list baggage.List +} + +// New returns a new valid Baggage. It returns an error if it results in a +// Baggage exceeding limits set in that specification. +// +// It expects all the provided members to have already been validated. +func New(members ...Member) (Baggage, error) { + if len(members) == 0 { + return Baggage{}, nil + } + + b := make(baggage.List) + for _, m := range members { + if !m.hasData { + return Baggage{}, errInvalidMember + } + + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // Check member numbers after deduplication. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + bag := Baggage{b} + if n := len(bag.String()); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + return bag, nil +} + +// Parse attempts to decode a baggage-string from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +// +// If there are duplicate list-members contained in baggage, the last one +// defined (reading left-to-right) will be the only one kept. This diverges +// from the W3C Baggage specification which allows duplicate list-members, but +// conforms to the OpenTelemetry Baggage specification. +func Parse(bStr string) (Baggage, error) { + if bStr == "" { + return Baggage{}, nil + } + + if n := len(bStr); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + b := make(baggage.List) + for _, memberStr := range strings.Split(bStr, listDelimiter) { + m, err := parseMember(memberStr) + if err != nil { + return Baggage{}, err + } + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // OpenTelemetry does not allow for duplicate list-members, but the W3C + // specification does. Now that we have deduplicated, ensure the baggage + // does not exceed list-member limits. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + return Baggage{b}, nil +} + +// Member returns the baggage list-member identified by key. +// +// If there is no list-member matching the passed key the returned Member will +// be a zero-value Member. +// The returned member is not validated, as we assume the validation happened +// when it was added to the Baggage. +func (b Baggage) Member(key string) Member { + v, ok := b.list[key] + if !ok { + // We do not need to worry about distinguishing between the situation + // where a zero-valued Member is included in the Baggage because a + // zero-valued Member is invalid according to the W3C Baggage + // specification (it has an empty key). + return newInvalidMember() + } + + return Member{ + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + } +} + +// Members returns all the baggage list-members. +// The order of the returned list-members is not significant. +// +// The returned members are not validated, as we assume the validation happened +// when they were added to the Baggage. +func (b Baggage) Members() []Member { + if len(b.list) == 0 { + return nil + } + + members := make([]Member, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + }) + } + return members +} + +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is +// replaced. +// +// If member is invalid according to the W3C Baggage specification, an error +// is returned with the original Baggage. +func (b Baggage) SetMember(member Member) (Baggage, error) { + if !member.hasData { + return b, errInvalidMember + } + + n := len(b.list) + if _, ok := b.list[member.key]; !ok { + n++ + } + list := make(baggage.List, n) + + for k, v := range b.list { + // Do not copy if we are just going to overwrite. + if k == member.key { + continue + } + list[k] = v + } + + list[member.key] = baggage.Item{ + Value: member.value, + Properties: member.properties.asInternal(), + } + + return Baggage{list: list}, nil +} + +// DeleteMember returns a copy of the Baggage with the list-member identified +// by key removed. +func (b Baggage) DeleteMember(key string) Baggage { + n := len(b.list) + if _, ok := b.list[key]; ok { + n-- + } + list := make(baggage.List, n) + + for k, v := range b.list { + if k == key { + continue + } + list[k] = v + } + + return Baggage{list: list} +} + +// Len returns the number of list-members in the Baggage. +func (b Baggage) Len() int { + return len(b.list) +} + +// String encodes Baggage into a header string compliant with the W3C Baggage +// specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. +func (b Baggage) String() string { + members := make([]string, 0, len(b.list)) + for k, v := range b.list { + s := Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } + } + return strings.Join(members, listDelimiter) +} + +// parsePropertyInternal attempts to decode a Property from the passed string. +// It follows the spec at https://www.w3.org/TR/baggage/#definition. +func parsePropertyInternal(s string) (p Property, ok bool) { + // For the entire function we will use " key = value " as an example. + // Attempting to parse the key. + // First skip spaces at the beginning "< >key = value " (they could be empty). + index := skipSpace(s, 0) + + // Parse the key: " = value ". + keyStart := index + keyEnd := index + for _, c := range s[keyStart:] { + if !validateKeyChar(c) { + break + } + keyEnd++ + } + + // If we couldn't find any valid key character, + // it means the key is either empty or invalid. + if keyStart == keyEnd { + return + } + + // Skip spaces after the key: " key< >= value ". + index = skipSpace(s, keyEnd) + + if index == len(s) { + // A key can have no value, like: " key ". + ok = true + p.key = s[keyStart:keyEnd] + return + } + + // If we have not reached the end and we can't find the '=' delimiter, + // it means the property is invalid. + if s[index] != keyValueDelimiter[0] { + return + } + + // Attempting to parse the value. + // Match: " key =< >value ". + index = skipSpace(s, index+1) + + // Match the value string: " key = ". + // A valid property can be: " key =". + // Therefore, we don't have to check if the value is empty. + valueStart := index + valueEnd := index + for _, c := range s[valueStart:] { + if !validateValueChar(c) { + break + } + valueEnd++ + } + + // Skip all trailing whitespaces: " key = value< >". + index = skipSpace(s, valueEnd) + + // If after looking for the value and skipping whitespaces + // we have not reached the end, it means the property is + // invalid, something like: " key = value value1". + if index != len(s) { + return + } + + // Decode a percent-encoded value. + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) + if err != nil { + return + } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) + + ok = true + p.key = s[keyStart:keyEnd] + p.hasValue = true + + p.value = value + return +} + +func skipSpace(s string, offset int) int { + i := offset + for ; i < len(s); i++ { + c := s[i] + if c != ' ' && c != '\t' { + break + } + } + return i +} + +var safeKeyCharset = [utf8.RuneSelf]bool{ + // 0x23 to 0x27 + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + + // 0x30 to 0x39 + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + + // 0x41 to 0x5a + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + + // 0x5e to 0x7a + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + + // remainder + '!': true, + '*': true, + '+': true, + '-': true, + '.': true, + '|': true, + '~': true, +} + +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. +func validateKey(s string) bool { + if len(s) == 0 { + return false + } + + for _, c := range s { + if !validateKeyChar(c) { + return false + } + } + + return true +} + +func validateKeyChar(c int32) bool { + return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] +} + +// validateValue checks if the string is a valid W3C Baggage value. +func validateValue(s string) bool { + for _, c := range s { + if !validateValueChar(c) { + return false + } + } + + return true +} + +var safeValueCharset = [utf8.RuneSelf]bool{ + '!': true, // 0x21 + + // 0x23 to 0x2b + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + + // 0x2d to 0x3a + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + + // 0x3c to 0x5b + '<': true, // 0x3C + '=': true, // 0x3D + '>': true, // 0x3E + '?': true, // 0x3F + '@': true, // 0x40 + 'A': true, // 0x41 + 'B': true, // 0x42 + 'C': true, // 0x43 + 'D': true, // 0x44 + 'E': true, // 0x45 + 'F': true, // 0x46 + 'G': true, // 0x47 + 'H': true, // 0x48 + 'I': true, // 0x49 + 'J': true, // 0x4A + 'K': true, // 0x4B + 'L': true, // 0x4C + 'M': true, // 0x4D + 'N': true, // 0x4E + 'O': true, // 0x4F + 'P': true, // 0x50 + 'Q': true, // 0x51 + 'R': true, // 0x52 + 'S': true, // 0x53 + 'T': true, // 0x54 + 'U': true, // 0x55 + 'V': true, // 0x56 + 'W': true, // 0x57 + 'X': true, // 0x58 + 'Y': true, // 0x59 + 'Z': true, // 0x5A + '[': true, // 0x5B + + // 0x5d to 0x7e + ']': true, // 0x5D + '^': true, // 0x5E + '_': true, // 0x5F + '`': true, // 0x60 + 'a': true, // 0x61 + 'b': true, // 0x62 + 'c': true, // 0x63 + 'd': true, // 0x64 + 'e': true, // 0x65 + 'f': true, // 0x66 + 'g': true, // 0x67 + 'h': true, // 0x68 + 'i': true, // 0x69 + 'j': true, // 0x6A + 'k': true, // 0x6B + 'l': true, // 0x6C + 'm': true, // 0x6D + 'n': true, // 0x6E + 'o': true, // 0x6F + 'p': true, // 0x70 + 'q': true, // 0x71 + 'r': true, // 0x72 + 's': true, // 0x73 + 't': true, // 0x74 + 'u': true, // 0x75 + 'v': true, // 0x76 + 'w': true, // 0x77 + 'x': true, // 0x78 + 'y': true, // 0x79 + 'z': true, // 0x7A + '{': true, // 0x7B + '|': true, // 0x7C + '}': true, // 0x7D + '~': true, // 0x7E +} + +func validateValueChar(c int32) bool { + return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] +} + +// valueEscape escapes the string so it can be safely placed inside a baggage value, +// replacing special characters with %XX sequences as needed. +// +// The implementation is based on: +// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285. +func valueEscape(s string) string { + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return s + } + + var buf [64]byte + var t []byte + + required := len(s) + 2*hexCount + if required <= len(buf) { + t = buf[:required] + } else { + t = make([]byte, required) + } + + j := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(s[i]) { + const upperhex = "0123456789ABCDEF" + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + } else { + t[j] = c + j++ + } + } + + return string(t) +} + +// shouldEscape returns true if the specified byte should be escaped when +// appearing in a baggage value string. +func shouldEscape(c byte) bool { + if c == '%' { + // The percent character must be encoded so that percent-encoding can work. + return true + } + return !validateValueChar(int32(c)) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go new file mode 100644 index 000000000..a572461a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "context" + + "go.opentelemetry.io/otel/internal/baggage" +) + +// ContextWithBaggage returns a copy of parent with baggage. +func ContextWithBaggage(parent context.Context, b Baggage) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, b.list) +} + +// ContextWithoutBaggage returns a copy of parent with no baggage. +func ContextWithoutBaggage(parent context.Context) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, nil) +} + +// FromContext returns the baggage contained in ctx. +func FromContext(ctx context.Context) Baggage { + // Delegate so any hooks for the OpenTracing bridge are handled. + return Baggage{list: baggage.ListFromContext(ctx)} +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go new file mode 100644 index 000000000..b51d87cab --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package baggage provides functionality for storing and retrieving +baggage items in Go context. For propagating the baggage, see the +go.opentelemetry.io/otel/propagation package. +*/ +package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md new file mode 100644 index 000000000..24c52b387 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/README.md @@ -0,0 +1,3 @@ +# Codes + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes) diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go new file mode 100644 index 000000000..49a35b122 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -0,0 +1,106 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package codes // import "go.opentelemetry.io/otel/codes" + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" +) + +const ( + // Unset is the default status code. + Unset Code = 0 + + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Error Code = 1 + + // Ok indicates operation has been validated by an Application developers + // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Ok Code = 2 + + maxCode = 3 +) + +// Code is an 32-bit representation of a status state. +type Code uint32 + +var codeToStr = map[Code]string{ + Unset: "Unset", + Error: "Error", + Ok: "Ok", +} + +var strToCode = map[string]Code{ + `"Unset"`: Unset, + `"Error"`: Error, + `"Ok"`: Ok, +} + +// String returns the Code as a string. +func (c Code) String() string { + return codeToStr[c] +} + +// UnmarshalJSON unmarshals b into the Code. +// +// This is based on the functionality in the gRPC codes package: +// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return errors.New("nil receiver passed to UnmarshalJSON") + } + + var x interface{} + if err := json.Unmarshal(b, &x); err != nil { + return err + } + switch x.(type) { + case string: + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + case float64: + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + default: + return fmt.Errorf("invalid code: %q", string(b)) + } +} + +// MarshalJSON returns c as the JSON encoding of c. +func (c *Code) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte("null"), nil + } + str, ok := codeToStr[*c] + if !ok { + return nil, fmt.Errorf("invalid code: %d", *c) + } + return []byte(fmt.Sprintf("%q", str)), nil +} diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go new file mode 100644 index 000000000..ee8db448b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package codes defines the canonical error codes used by OpenTelemetry. + +It conforms to [the OpenTelemetry +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). +*/ +package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile new file mode 100644 index 000000000..e4c4a753c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -0,0 +1,3 @@ +# This is a renovate-friendly source of Docker images. +FROM python:3.13.2-slim-bullseye@sha256:31b581c8218e1f3c58672481b3b7dba8e898852866b408c6a984c22832523935 AS python +FROM otel/weaver:v0.13.2@sha256:ae7346b992e477f629ea327e0979e8a416a97f7956ab1f7e95ac1f44edf1a893 AS weaver diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go new file mode 100644 index 000000000..921f85961 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otel provides global access to the OpenTelemetry API. The subpackages of +the otel package provide an implementation of the OpenTelemetry API. + +The provided API is used to instrument code and measure data about that code's +performance and operation. The measured data, by default, is not processed or +transmitted anywhere. An implementation of the OpenTelemetry SDK, like the +default SDK implementation (go.opentelemetry.io/otel/sdk), and associated +exporters are used to process and transport this data. + +To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/. + +To read more about tracing, see go.opentelemetry.io/otel/trace. + +To read more about metrics, see go.opentelemetry.io/otel/metric. + +To read more about logs, see go.opentelemetry.io/otel/log. + +To read more about propagation, see go.opentelemetry.io/otel/propagation and +go.opentelemetry.io/otel/baggage. +*/ +package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go new file mode 100644 index 000000000..67414c71e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// ErrorHandlerFunc is a convenience adapter to allow the use of a function +// as an ErrorHandler. +type ErrorHandlerFunc func(error) + +var _ ErrorHandler = ErrorHandlerFunc(nil) + +// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. +func (f ErrorHandlerFunc) Handle(err error) { + f(err) +} diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh new file mode 100644 index 000000000..93e80ea30 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +top_dir='.' +if [[ $# -gt 0 ]]; then + top_dir="${1}" +fi + +p=$(pwd) +mod_dirs=() + +# Note `mapfile` does not exist in older bash versions: +# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash + +while IFS= read -r line; do + mod_dirs+=("$line") +done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) + +for mod_dir in "${mod_dirs[@]}"; do + cd "${mod_dir}" + + while IFS= read -r line; do + echo ".${line#${p}}" + done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') + cd "${p}" +done diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go new file mode 100644 index 000000000..07623b679 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" +) + +// Compile-time check global.ErrDelegator implements ErrorHandler. +var _ ErrorHandler = (*global.ErrDelegator)(nil) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } + +// Handle is a convenience function for GetErrorHandler().Handle(err). +func Handle(err error) { global.GetErrorHandler().Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 000000000..691d96c75 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + cpy := make([]bool, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + cpy := make([]int64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + cpy := make([]float64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + cpy := make([]string, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go new file mode 100644 index 000000000..b4f85f44a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package baggage provides base types and functionality to store and retrieve +baggage in Go context. This package exists because the OpenTracing bridge to +OpenTelemetry needs to synchronize state whenever baggage for a context is +modified and that context contains an OpenTracing span. If it were not for +this need this package would not need to exist and the +`go.opentelemetry.io/otel/baggage` package would be the singular place where +W3C baggage is handled. +*/ +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +// List is the collection of baggage members. The W3C allows for duplicates, +// but OpenTelemetry does not, therefore, this is represented as a map. +type List map[string]Item + +// Item is the value and metadata properties part of a list-member. +type Item struct { + Value string + Properties []Property +} + +// Property is a metadata entry for a list-member. +type Property struct { + Key, Value string + + // HasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + HasValue bool +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go new file mode 100644 index 000000000..3aea9c491 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +import "context" + +type baggageContextKeyType int + +const baggageKey baggageContextKeyType = iota + +// SetHookFunc is a callback called when storing baggage in the context. +type SetHookFunc func(context.Context, List) context.Context + +// GetHookFunc is a callback called when getting baggage from the context. +type GetHookFunc func(context.Context, List) List + +type baggageState struct { + list List + + setHook SetHookFunc + getHook GetHookFunc +} + +// ContextWithSetHook returns a copy of parent with hook configured to be +// invoked every time ContextWithBaggage is called. +// +// Passing nil SetHookFunc creates a context with no set hook to call. +func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.setHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithGetHook returns a copy of parent with hook configured to be +// invoked every time FromContext is called. +// +// Passing nil GetHookFunc creates a context with no get hook to call. +func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.getHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithList returns a copy of parent with baggage. Passing nil list +// returns a context without any baggage. +func ContextWithList(parent context.Context, list List) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.list = list + ctx := context.WithValue(parent, baggageKey, s) + if s.setHook != nil { + ctx = s.setHook(ctx, list) + } + + return ctx +} + +// ListFromContext returns the baggage contained in ctx. +func ListFromContext(ctx context.Context) List { + switch v := ctx.Value(baggageKey).(type) { + case baggageState: + if v.getHook != nil { + return v.getHook(ctx, v.list) + } + return v.list + default: + return nil + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go new file mode 100644 index 000000000..4259f0320 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/internal" + +//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go +//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go +//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go + +//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go +//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go +//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go +//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go +//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go new file mode 100644 index 000000000..c657ff8e7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "sync/atomic" +) + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) +} + +type ErrDelegator struct { + delegate atomic.Pointer[ErrorHandler] +} + +// Compile-time check that delegator implements ErrorHandler. +var _ ErrorHandler = (*ErrDelegator)(nil) + +func (d *ErrDelegator) Handle(err error) { + if eh := d.delegate.Load(); eh != nil { + (*eh).Handle(err) + return + } + log.Print(err) +} + +// setDelegate sets the ErrorHandler delegate. +func (d *ErrDelegator) setDelegate(eh ErrorHandler) { + d.delegate.Store(&eh) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go new file mode 100644 index 000000000..ae92a4251 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -0,0 +1,412 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// unwrapper unwraps to return the underlying instrument implementation. +type unwrapper interface { + unwrap() metric.Observable +} + +type afCounter struct { + embedded.Float64ObservableCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableCounterOption + + delegate atomic.Value // metric.Float64ObservableCounter +} + +var ( + _ unwrapper = (*afCounter)(nil) + _ metric.Float64ObservableCounter = (*afCounter)(nil) +) + +func (i *afCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afCounter) unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableCounter) + } + return nil +} + +type afUpDownCounter struct { + embedded.Float64ObservableUpDownCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableUpDownCounterOption + + delegate atomic.Value // metric.Float64ObservableUpDownCounter +} + +var ( + _ unwrapper = (*afUpDownCounter)(nil) + _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +) + +func (i *afUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afUpDownCounter) unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableUpDownCounter) + } + return nil +} + +type afGauge struct { + embedded.Float64ObservableGauge + metric.Float64Observable + + name string + opts []metric.Float64ObservableGaugeOption + + delegate atomic.Value // metric.Float64ObservableGauge +} + +var ( + _ unwrapper = (*afGauge)(nil) + _ metric.Float64ObservableGauge = (*afGauge)(nil) +) + +func (i *afGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afGauge) unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableGauge) + } + return nil +} + +type aiCounter struct { + embedded.Int64ObservableCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableCounterOption + + delegate atomic.Value // metric.Int64ObservableCounter +} + +var ( + _ unwrapper = (*aiCounter)(nil) + _ metric.Int64ObservableCounter = (*aiCounter)(nil) +) + +func (i *aiCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiCounter) unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableCounter) + } + return nil +} + +type aiUpDownCounter struct { + embedded.Int64ObservableUpDownCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableUpDownCounterOption + + delegate atomic.Value // metric.Int64ObservableUpDownCounter +} + +var ( + _ unwrapper = (*aiUpDownCounter)(nil) + _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +) + +func (i *aiUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiUpDownCounter) unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableUpDownCounter) + } + return nil +} + +type aiGauge struct { + embedded.Int64ObservableGauge + metric.Int64Observable + + name string + opts []metric.Int64ObservableGaugeOption + + delegate atomic.Value // metric.Int64ObservableGauge +} + +var ( + _ unwrapper = (*aiGauge)(nil) + _ metric.Int64ObservableGauge = (*aiGauge)(nil) +) + +func (i *aiGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiGauge) unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableGauge) + } + return nil +} + +// Sync Instruments. +type sfCounter struct { + embedded.Float64Counter + + name string + opts []metric.Float64CounterOption + + delegate atomic.Value // metric.Float64Counter +} + +var _ metric.Float64Counter = (*sfCounter)(nil) + +func (i *sfCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Counter).Add(ctx, incr, opts...) + } +} + +type sfUpDownCounter struct { + embedded.Float64UpDownCounter + + name string + opts []metric.Float64UpDownCounterOption + + delegate atomic.Value // metric.Float64UpDownCounter +} + +var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) + +func (i *sfUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...) + } +} + +type sfHistogram struct { + embedded.Float64Histogram + + name string + opts []metric.Float64HistogramOption + + delegate atomic.Value // metric.Float64Histogram +} + +var _ metric.Float64Histogram = (*sfHistogram)(nil) + +func (i *sfHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Float64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Histogram).Record(ctx, x, opts...) + } +} + +type sfGauge struct { + embedded.Float64Gauge + + name string + opts []metric.Float64GaugeOption + + delegate atomic.Value // metric.Float64Gauge +} + +var _ metric.Float64Gauge = (*sfGauge)(nil) + +func (i *sfGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Gauge).Record(ctx, x, opts...) + } +} + +type siCounter struct { + embedded.Int64Counter + + name string + opts []metric.Int64CounterOption + + delegate atomic.Value // metric.Int64Counter +} + +var _ metric.Int64Counter = (*siCounter)(nil) + +func (i *siCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Counter).Add(ctx, x, opts...) + } +} + +type siUpDownCounter struct { + embedded.Int64UpDownCounter + + name string + opts []metric.Int64UpDownCounterOption + + delegate atomic.Value // metric.Int64UpDownCounter +} + +var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) + +func (i *siUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...) + } +} + +type siHistogram struct { + embedded.Int64Histogram + + name string + opts []metric.Int64HistogramOption + + delegate atomic.Value // metric.Int64Histogram +} + +var _ metric.Int64Histogram = (*siHistogram)(nil) + +func (i *siHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Int64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Histogram).Record(ctx, x, opts...) + } +} + +type siGauge struct { + embedded.Int64Gauge + + name string + opts []metric.Int64GaugeOption + + delegate atomic.Value // metric.Int64Gauge +} + +var _ metric.Int64Gauge = (*siGauge)(nil) + +func (i *siGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Gauge).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go new file mode 100644 index 000000000..adbca7d34 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -0,0 +1,62 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" +) + +// globalLogger holds a reference to the [logr.Logger] used within +// go.opentelemetry.io/otel. +// +// The default logger uses stdr which is backed by the standard `log.Logger` +// interface. This logger will only show messages at the Error Level. +var globalLogger = func() *atomic.Pointer[logr.Logger] { + l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) + + p := new(atomic.Pointer[logr.Logger]) + p.Store(&l) + return p +}() + +// SetLogger sets the global Logger to l. +// +// To see Warn messages use a logger with `l.V(1).Enabled() == true` +// To see Info messages use a logger with `l.V(4).Enabled() == true` +// To see Debug messages use a logger with `l.V(8).Enabled() == true`. +func SetLogger(l logr.Logger) { + globalLogger.Store(&l) +} + +// GetLogger returns the global logger. +func GetLogger() logr.Logger { + return *globalLogger.Load() +} + +// Info prints messages about the general state of the API or SDK. +// This should usually be less than 5 messages a minute. +func Info(msg string, keysAndValues ...interface{}) { + GetLogger().V(4).Info(msg, keysAndValues...) +} + +// Error prints messages about exceptional states of the API or SDK. +func Error(err error, msg string, keysAndValues ...interface{}) { + GetLogger().Error(err, msg, keysAndValues...) +} + +// Debug prints messages about all internal changes in the API or SDK. +func Debug(msg string, keysAndValues ...interface{}) { + GetLogger().V(8).Info(msg, keysAndValues...) +} + +// Warn prints messages about warnings in the API or SDK. +// Not an error but is likely more important than an informational event. +func Warn(msg string, keysAndValues ...interface{}) { + GetLogger().V(1).Info(msg, keysAndValues...) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go new file mode 100644 index 000000000..a6acd8dca --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -0,0 +1,598 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "container/list" + "context" + "reflect" + "sync" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// meterProvider is a placeholder for a configured SDK MeterProvider. +// +// All MeterProvider functionality is forwarded to a delegate once +// configured. +type meterProvider struct { + embedded.MeterProvider + + mtx sync.Mutex + meters map[il]*meter + + delegate metric.MeterProvider +} + +// setDelegate configures p to delegate all MeterProvider functionality to +// provider. +// +// All Meters provided prior to this function call are switched out to be +// Meters provided by provider. All instruments and callbacks are recreated and +// delegated. +// +// It is guaranteed by the caller that this happens only once. +func (p *meterProvider) setDelegate(provider metric.MeterProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.meters) == 0 { + return + } + + for _, meter := range p.meters { + meter.setDelegate(provider) + } + + p.meters = nil +} + +// Meter implements MeterProvider. +func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Meter(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. + + c := metric.NewMeterConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), + } + + if p.meters == nil { + p.meters = make(map[il]*meter) + } + + if val, ok := p.meters[key]; ok { + return val + } + + t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)} + p.meters[key] = t + return t +} + +// meter is a placeholder for a metric.Meter. +// +// All Meter functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopMeter. +type meter struct { + embedded.Meter + + name string + opts []metric.MeterOption + + mtx sync.Mutex + instruments map[instID]delegatedInstrument + + registry list.List + + delegate metric.Meter +} + +type delegatedInstrument interface { + setDelegate(metric.Meter) +} + +// instID are the identifying properties of a instrument. +type instID struct { + // name is the name of the stream. + name string + // description is the description of the stream. + description string + // kind defines the functional group of the instrument. + kind reflect.Type + // unit is the unit of the stream. + unit string +} + +// setDelegate configures m to delegate all Meter functionality to Meters +// created by provider. +// +// All subsequent calls to the Meter methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (m *meter) setDelegate(provider metric.MeterProvider) { + m.mtx.Lock() + defer m.mtx.Unlock() + + meter := provider.Meter(m.name, m.opts...) + m.delegate = meter + + for _, inst := range m.instruments { + inst.setDelegate(meter) + } + + var n *list.Element + for e := m.registry.Front(); e != nil; e = n { + r := e.Value.(*registration) + r.setDelegate(meter) + n = e.Next() + m.registry.Remove(e) + } + + m.instruments = nil + m.registry.Init() +} + +func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Counter(name, options...) + } + + cfg := metric.NewInt64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Counter), nil + } + i := &siCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64UpDownCounter(name, options...) + } + + cfg := metric.NewInt64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64UpDownCounter), nil + } + i := &siUpDownCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Histogram(name, options...) + } + + cfg := metric.NewInt64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Histogram), nil + } + i := &siHistogram{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Gauge(name, options...) + } + + cfg := metric.NewInt64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Gauge), nil + } + i := &siGauge{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableCounter(name, options...) + } + + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableCounter), nil + } + i := &aiCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableUpDownCounter), nil + } + i := &aiUpDownCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableGauge(name, options...) + } + + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableGauge), nil + } + i := &aiGauge{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Counter(name, options...) + } + + cfg := metric.NewFloat64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Counter), nil + } + i := &sfCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64UpDownCounter(name, options...) + } + + cfg := metric.NewFloat64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64UpDownCounter), nil + } + i := &sfUpDownCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Histogram(name, options...) + } + + cfg := metric.NewFloat64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Histogram), nil + } + i := &sfHistogram{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Gauge(name, options...) + } + + cfg := metric.NewFloat64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Gauge), nil + } + i := &sfGauge{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableCounter), nil + } + i := &afCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableUpDownCounter), nil + } + i := &afUpDownCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableGauge(name, options...) + } + + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableGauge), nil + } + i := &afGauge{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +// RegisterCallback captures the function that will be called during Collect. +func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) + } + + reg := ®istration{instruments: insts, function: f} + e := m.registry.PushBack(reg) + reg.unreg = func() error { + m.mtx.Lock() + _ = m.registry.Remove(e) + m.mtx.Unlock() + return nil + } + return reg, nil +} + +func unwrapInstruments(instruments []metric.Observable) []metric.Observable { + out := make([]metric.Observable, 0, len(instruments)) + + for _, inst := range instruments { + if in, ok := inst.(unwrapper); ok { + out = append(out, in.unwrap()) + } else { + out = append(out, inst) + } + } + + return out +} + +type registration struct { + embedded.Registration + + instruments []metric.Observable + function metric.Callback + + unreg func() error + unregMu sync.Mutex +} + +type unwrapObs struct { + embedded.Observer + obs metric.Observer +} + +// unwrapFloat64Observable returns an expected metric.Float64Observable after +// unwrapping the global object. +func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok { + // Note: if the unwrapped object does not + // unwrap as an observable for either of the + // predicates here, it means an internal bug in + // this package. We avoid logging an error in + // this case, because the SDK has to try its + // own type conversion on the object. The SDK + // will see this and be forced to respond with + // its own error. + // + // This code uses a double-nested if statement + // to avoid creating a branch that is + // impossible to cover. + inst = floatObs + } + } + return inst +} + +// unwrapInt64Observable returns an expected metric.Int64Observable after +// unwrapping the global object. +func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok { + // See the comment in unwrapFloat64Observable(). + inst = unint + } + } + return inst +} + +func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) { + uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...) +} + +func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) { + uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...) +} + +func unwrapCallback(f metric.Callback) metric.Callback { + return func(ctx context.Context, obs metric.Observer) error { + return f(ctx, &unwrapObs{obs: obs}) + } +} + +func (c *registration) setDelegate(m metric.Meter) { + c.unregMu.Lock() + defer c.unregMu.Unlock() + + if c.unreg == nil { + // Unregister already called. + return + } + + reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + + c.unreg = reg.Unregister +} + +func (c *registration) Unregister() error { + c.unregMu.Lock() + defer c.unregMu.Unlock() + if c.unreg == nil { + // Unregister already called. + return nil + } + + var err error + err, c.unreg = c.unreg(), nil + return err +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go new file mode 100644 index 000000000..38560ff99 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -0,0 +1,71 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/propagation" +) + +// textMapPropagator is a default TextMapPropagator that delegates calls to a +// registered delegate if one is set, otherwise it defaults to delegating the +// calls to a the default no-op propagation.TextMapPropagator. +type textMapPropagator struct { + mtx sync.Mutex + once sync.Once + delegate propagation.TextMapPropagator + noop propagation.TextMapPropagator +} + +// Compile-time guarantee that textMapPropagator implements the +// propagation.TextMapPropagator interface. +var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) + +func newTextMapPropagator() *textMapPropagator { + return &textMapPropagator{ + noop: propagation.NewCompositeTextMapPropagator(), + } +} + +// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are +// forwarded to. Delegation can only be performed once, all subsequent calls +// perform no delegation. +func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { + if delegate == nil { + return + } + + p.mtx.Lock() + p.once.Do(func() { p.delegate = delegate }) + p.mtx.Unlock() +} + +// effectiveDelegate returns the current delegate of p if one is set, +// otherwise the default noop TextMapPropagator is returned. This method +// can be called concurrently. +func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { + p.mtx.Lock() + defer p.mtx.Unlock() + if p.delegate != nil { + return p.delegate + } + return p.noop +} + +// Inject set cross-cutting concerns from the Context into the carrier. +func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + p.effectiveDelegate().Inject(ctx, carrier) +} + +// Extract reads cross-cutting concerns from the carrier into a Context. +func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + return p.effectiveDelegate().Extract(ctx, carrier) +} + +// Fields returns the keys whose values are set with Inject. +func (p *textMapPropagator) Fields() []string { + return p.effectiveDelegate().Fields() +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go new file mode 100644 index 000000000..204ea142a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type ( + errorHandlerHolder struct { + eh ErrorHandler + } + + tracerProviderHolder struct { + tp trace.TracerProvider + } + + propagatorsHolder struct { + tm propagation.TextMapPropagator + } + + meterProviderHolder struct { + mp metric.MeterProvider + } +) + +var ( + globalErrorHandler = defaultErrorHandler() + globalTracer = defaultTracerValue() + globalPropagators = defaultPropagatorsValue() + globalMeterProvider = defaultMeterProvider() + + delegateErrorHandlerOnce sync.Once + delegateTraceOnce sync.Once + delegateTextMapPropagatorOnce sync.Once + delegateMeterOnce sync.Once +) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler.Load().(errorHandlerHolder).eh +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + current := GetErrorHandler() + + if _, cOk := current.(*ErrDelegator); cOk { + if _, ehOk := h.(*ErrDelegator); ehOk && current == h { + // Do not assign to the delegate of the default ErrDelegator to be + // itself. + Error( + errors.New("no ErrorHandler delegate configured"), + "ErrorHandler remains its current value.", + ) + return + } + } + + delegateErrorHandlerOnce.Do(func() { + if def, ok := current.(*ErrDelegator); ok { + def.setDelegate(h) + } + }) + globalErrorHandler.Store(errorHandlerHolder{eh: h}) +} + +// TracerProvider is the internal implementation for global.TracerProvider. +func TracerProvider() trace.TracerProvider { + return globalTracer.Load().(tracerProviderHolder).tp +} + +// SetTracerProvider is the internal implementation for global.SetTracerProvider. +func SetTracerProvider(tp trace.TracerProvider) { + current := TracerProvider() + + if _, cOk := current.(*tracerProvider); cOk { + if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { + // Do not assign the default delegating TracerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in tracer provider"), + "Setting tracer provider to its current value. No delegate will be configured", + ) + return + } + } + + delegateTraceOnce.Do(func() { + if def, ok := current.(*tracerProvider); ok { + def.setDelegate(tp) + } + }) + globalTracer.Store(tracerProviderHolder{tp: tp}) +} + +// TextMapPropagator is the internal implementation for global.TextMapPropagator. +func TextMapPropagator() propagation.TextMapPropagator { + return globalPropagators.Load().(propagatorsHolder).tm +} + +// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. +func SetTextMapPropagator(p propagation.TextMapPropagator) { + current := TextMapPropagator() + + if _, cOk := current.(*textMapPropagator); cOk { + if _, pOk := p.(*textMapPropagator); pOk && current == p { + // Do not assign the default delegating TextMapPropagator to + // delegate to itself. + Error( + errors.New("no delegate configured in text map propagator"), + "Setting text map propagator to its current value. No delegate will be configured", + ) + return + } + } + + // For the textMapPropagator already returned by TextMapPropagator + // delegate to p. + delegateTextMapPropagatorOnce.Do(func() { + if def, ok := current.(*textMapPropagator); ok { + def.SetDelegate(p) + } + }) + // Return p when subsequent calls to TextMapPropagator are made. + globalPropagators.Store(propagatorsHolder{tm: p}) +} + +// MeterProvider is the internal implementation for global.MeterProvider. +func MeterProvider() metric.MeterProvider { + return globalMeterProvider.Load().(meterProviderHolder).mp +} + +// SetMeterProvider is the internal implementation for global.SetMeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + current := MeterProvider() + if _, cOk := current.(*meterProvider); cOk { + if _, mpOk := mp.(*meterProvider); mpOk && current == mp { + // Do not assign the default delegating MeterProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in meter provider"), + "Setting meter provider to its current value. No delegate will be configured", + ) + return + } + } + + delegateMeterOnce.Do(func() { + if def, ok := current.(*meterProvider); ok { + def.setDelegate(mp) + } + }) + globalMeterProvider.Store(meterProviderHolder{mp: mp}) +} + +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) + return v +} + +func defaultTracerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(tracerProviderHolder{tp: &tracerProvider{}}) + return v +} + +func defaultPropagatorsValue() *atomic.Value { + v := &atomic.Value{} + v.Store(propagatorsHolder{tm: newTextMapPropagator()}) + return v +} + +func defaultMeterProvider() *atomic.Value { + v := &atomic.Value{} + v.Store(meterProviderHolder{mp: &meterProvider{}}) + return v +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go new file mode 100644 index 000000000..8982aa0dc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -0,0 +1,220 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +/* +This file contains the forwarding implementation of the TracerProvider used as +the default global instance. Prior to initialization of an SDK, Tracers +returned by the global TracerProvider will provide no-op functionality. This +means that all Span created prior to initialization are no-op Spans. + +Once an SDK has been initialized, all provided no-op Tracers are swapped for +Tracers provided by the SDK defined TracerProvider. However, any Span started +prior to this initialization does not change its behavior. Meaning, the Span +remains a no-op Span. + +The implementation to track and swap Tracers locks all new Tracer creation +until the swap is complete. This assumes that this operation is not +performance-critical. If that assumption is incorrect, be sure to configure an +SDK prior to any Tracer creation. +*/ + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/auto/sdk" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +// tracerProvider is a placeholder for a configured SDK TracerProvider. +// +// All TracerProvider functionality is forwarded to a delegate once +// configured. +type tracerProvider struct { + embedded.TracerProvider + + mtx sync.Mutex + tracers map[il]*tracer + delegate trace.TracerProvider +} + +// Compile-time guarantee that tracerProvider implements the TracerProvider +// interface. +var _ trace.TracerProvider = &tracerProvider{} + +// setDelegate configures p to delegate all TracerProvider functionality to +// provider. +// +// All Tracers provided prior to this function call are switched out to be +// Tracers provided by provider. +// +// It is guaranteed by the caller that this happens only once. +func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.tracers) == 0 { + return + } + + for _, t := range p.tracers { + t.setDelegate(provider) + } + + p.tracers = nil +} + +// Tracer implements TracerProvider. +func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Tracer(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. + + c := trace.NewTracerConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), + } + + if p.tracers == nil { + p.tracers = make(map[il]*tracer) + } + + if val, ok := p.tracers[key]; ok { + return val + } + + t := &tracer{name: name, opts: opts, provider: p} + p.tracers[key] = t + return t +} + +type il struct { + name string + version string + schema string + attrs attribute.Set +} + +// tracer is a placeholder for a trace.Tracer. +// +// All Tracer functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopTracer. +type tracer struct { + embedded.Tracer + + name string + opts []trace.TracerOption + provider *tracerProvider + + delegate atomic.Value +} + +// Compile-time guarantee that tracer implements the trace.Tracer interface. +var _ trace.Tracer = &tracer{} + +// setDelegate configures t to delegate all Tracer functionality to Tracers +// created by provider. +// +// All subsequent calls to the Tracer methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (t *tracer) setDelegate(provider trace.TracerProvider) { + t.delegate.Store(provider.Tracer(t.name, t.opts...)) +} + +// Start implements trace.Tracer by forwarding the call to t.delegate if +// set, otherwise it forwards the call to a NoopTracer. +func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + delegate := t.delegate.Load() + if delegate != nil { + return delegate.(trace.Tracer).Start(ctx, name, opts...) + } + + return t.newSpan(ctx, autoInstEnabled, name, opts) +} + +// autoInstEnabled determines if the auto-instrumentation SDK span is returned +// from the tracer when not backed by a delegate and auto-instrumentation has +// attached to this process. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches. By default, this will point to false and mean a tracer will return +// a nonRecordingSpan by default. +var autoInstEnabled = new(bool) + +func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { + // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is + // so the auto-instrumentation can define a uprobe for (*t).newSpan and be + // provided with the address of the bool autoInstEnabled points to. It + // needs to be a parameter so that pointer can be reliably determined, it + // should not be read from the global. + + if *autoSpan { + tracer := sdk.TracerProvider().Tracer(t.name, t.opts...) + return tracer.Start(ctx, name, opts...) + } + + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} + ctx = trace.ContextWithSpan(ctx, s) + return ctx, s +} + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + embedded.Span + + sc trace.SpanContext + tracer *tracer +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go new file mode 100644 index 000000000..b2fe3e41d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/internal" + +import ( + "math" + "unsafe" +) + +func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func RawToBool(r uint64) bool { + return r != 0 +} + +func Int64ToRaw(i int64) uint64 { + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec +} + +func RawToInt64(r uint64) int64 { + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec +} + +func Float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func RawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} + +func RawPtrToFloat64Ptr(r *uint64) *float64 { + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec +} + +func RawPtrToInt64Ptr(r *uint64) *int64 { + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec +} diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go new file mode 100644 index 000000000..6de7f2e4d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "github.com/go-logr/logr" + + "go.opentelemetry.io/otel/internal/global" +) + +// SetLogger configures the logger used internally to opentelemetry. +func SetLogger(logger logr.Logger) { + global.SetLogger(logger) +} diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go new file mode 100644 index 000000000..1e6473b32 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" +) + +// Meter returns a Meter from the global MeterProvider. The name must be the +// name of the library providing instrumentation. This name may be the same as +// the instrumented code only if that code provides built-in instrumentation. +// If the name is empty, then a implementation defined default name will be +// used instead. +// +// If this is called before a global MeterProvider is registered the returned +// Meter will be a No-op implementation of a Meter. When a global MeterProvider +// is registered for the first time, the returned Meter, and all the +// instruments it has created or will create, are recreated automatically from +// the new MeterProvider. +// +// This is short for GetMeterProvider().Meter(name). +func Meter(name string, opts ...metric.MeterOption) metric.Meter { + return GetMeterProvider().Meter(name, opts...) +} + +// GetMeterProvider returns the registered global meter provider. +// +// If no global GetMeterProvider has been registered, a No-op GetMeterProvider +// implementation is returned. When a global GetMeterProvider is registered for +// the first time, the returned GetMeterProvider, and all the Meters it has +// created or will create, are recreated automatically from the new +// GetMeterProvider. +func GetMeterProvider() metric.MeterProvider { + return global.MeterProvider() +} + +// SetMeterProvider registers mp as the global MeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + global.SetMeterProvider(mp) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md new file mode 100644 index 000000000..0cf902e01 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/README.md @@ -0,0 +1,3 @@ +# Metric API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric) diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go new file mode 100644 index 000000000..f8435d8f2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -0,0 +1,260 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Observable describes a set of instruments used asynchronously to +// record float64 measurements once per collection cycle. Observations of +// these instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Float64Observable interface { + Observable + + float64Observable() +} + +// Float64ObservableCounter is an instrument used to asynchronously record +// increasing float64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for +// unimplemented methods. +type Float64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableCounter + + Float64Observable +} + +// Float64ObservableCounterConfig contains options for asynchronous counter +// instruments that record float64 values. +type Float64ObservableCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableCounterConfig returns a new +// [Float64ObservableCounterConfig] with all opts applied. +func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { + var config Float64ObservableCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableCounterOption applies options to a +// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableCounterOption. +type Float64ObservableCounterOption interface { + applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig +} + +// Float64ObservableUpDownCounter is an instrument used to asynchronously +// record float64 measurements once per collection cycle. Observations are only +// made within a callback for this instrument. The value observed is assumed +// the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableUpDownCounter + + Float64Observable +} + +// Float64ObservableUpDownCounterConfig contains options for asynchronous +// counter instruments that record float64 values. +type Float64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableUpDownCounterConfig returns a new +// [Float64ObservableUpDownCounterConfig] with all opts applied. +func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { + var config Float64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableUpDownCounterOption applies options to a +// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableUpDownCounterOption. +type Float64ObservableUpDownCounterOption interface { + applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig +} + +// Float64ObservableGauge is an instrument used to asynchronously record +// instantaneous float64 measurements once per collection cycle. Observations +// are only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableGauge + + Float64Observable +} + +// Float64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record float64 values. +type Float64ObservableGaugeConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] +// with all opts applied. +func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { + var config Float64ObservableGaugeConfig + for _, o := range opts { + config = o.applyFloat64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableGaugeOption applies options to a +// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableGaugeOption. +type Float64ObservableGaugeOption interface { + applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig +} + +// Float64Observer is a recorder of float64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Observer + + // Observe records the float64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value float64, options ...ObserveOption) +} + +// Float64Callback is a function registered with a Meter that makes +// observations for a Float64Observable instrument it is registered with. +// Calls to the Float64Observer record measurement values for the +// Float64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Float64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Float64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Float64Callback func(context.Context, Float64Observer) error + +// Float64ObservableOption applies options to float64 Observer instruments. +type Float64ObservableOption interface { + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +type float64CallbackOpt struct { + cback Float64Callback +} + +func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithFloat64Callback adds callback to be called for an instrument. +func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { + return float64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go new file mode 100644 index 000000000..e079aaef1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -0,0 +1,258 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Observable describes a set of instruments used asynchronously to record +// int64 measurements once per collection cycle. Observations of these +// instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Int64Observable interface { + Observable + + int64Observable() +} + +// Int64ObservableCounter is an instrument used to asynchronously record +// increasing int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableCounter + + Int64Observable +} + +// Int64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] +// with all opts applied. +func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { + var config Int64ObservableCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableCounterOption applies options to a +// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableCounterOption. +type Int64ObservableCounterOption interface { + applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig +} + +// Int64ObservableUpDownCounter is an instrument used to asynchronously record +// int64 measurements once per collection cycle. Observations are only made +// within a callback for this instrument. The value observed is assumed the to +// be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableUpDownCounter + + Int64Observable +} + +// Int64ObservableUpDownCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableUpDownCounterConfig returns a new +// [Int64ObservableUpDownCounterConfig] with all opts applied. +func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { + var config Int64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableUpDownCounterOption applies options to a +// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableUpDownCounterOption. +type Int64ObservableUpDownCounterOption interface { + applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig +} + +// Int64ObservableGauge is an instrument used to asynchronously record +// instantaneous int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableGauge + + Int64Observable +} + +// Int64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableGaugeConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] +// with all opts applied. +func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { + var config Int64ObservableGaugeConfig + for _, o := range opts { + config = o.applyInt64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableGaugeOption applies options to a +// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableGaugeOption. +type Int64ObservableGaugeOption interface { + applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig +} + +// Int64Observer is a recorder of int64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Observer + + // Observe records the int64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value int64, options ...ObserveOption) +} + +// Int64Callback is a function registered with a Meter that makes observations +// for an Int64Observable instrument it is registered with. Calls to the +// Int64Observer record measurement values for the Int64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Int64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Int64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Int64Callback func(context.Context, Int64Observer) error + +// Int64ObservableOption applies options to int64 Observer instruments. +type Int64ObservableOption interface { + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption +} + +type int64CallbackOpt struct { + cback Int64Callback +} + +func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithInt64Callback adds callback to be called for an instrument. +func WithInt64Callback(callback Int64Callback) Int64ObservableOption { + return int64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go new file mode 100644 index 000000000..d9e3b13e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// MeterConfig contains options for Meters. +type MeterConfig struct { + instrumentationVersion string + schemaURL string + attrs attribute.Set + + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. +} + +// InstrumentationVersion returns the version of the library providing +// instrumentation. +func (cfg MeterConfig) InstrumentationVersion() string { + return cfg.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { + return cfg.attrs +} + +// SchemaURL is the schema_url of the library providing instrumentation. +func (cfg MeterConfig) SchemaURL() string { + return cfg.schemaURL +} + +// MeterOption is an interface for applying Meter options. +type MeterOption interface { + // applyMeter is used to set a MeterOption value of a MeterConfig. + applyMeter(MeterConfig) MeterConfig +} + +// NewMeterConfig creates a new MeterConfig and applies +// all the given options. +func NewMeterConfig(opts ...MeterOption) MeterConfig { + var config MeterConfig + for _, o := range opts { + config = o.applyMeter(config) + } + return config +} + +type meterOptionFunc func(MeterConfig) MeterConfig + +func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { + return fn(cfg) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.instrumentationVersion = version + return config + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL. +func WithSchemaURL(schemaURL string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.schemaURL = schemaURL + return config + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go new file mode 100644 index 000000000..f153745b0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package metric provides the OpenTelemetry API used to measure metrics about +source code operation. + +This API is separate from its implementation so the instrumentation built from +it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official +OpenTelemetry implementation of this API. + +All measurements made with this package are made via instruments. These +instruments are created by a [Meter] which itself is created by a +[MeterProvider]. Applications need to accept a [MeterProvider] implementation +as a starting point when instrumenting. This can be done directly, or by using +the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an +appropriately named [Meter] from the accepted [MeterProvider], instrumentation +can then be built from the [Meter]'s instruments. + +# Instruments + +Each instrument is designed to make measurements of a particular type. Broadly, +all instruments fall into two overlapping logical categories: asynchronous or +synchronous, and int64 or float64. + +All synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are used to measure the operation and performance of source +code during the source code execution. These instruments only make measurements +when the source code they instrument is run. + +All asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) are used to measure metrics outside of the execution +of source code. They are said to make "observations" via a callback function +called once every measurement collection cycle. + +Each instrument is also grouped by the value type it measures. Either int64 or +float64. The value being measured will dictate which instrument in these +categories to use. + +Outside of these two broad categories, instruments are described by the +function they are designed to serve. All Counters ([Int64Counter], +[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are +designed to measure values that never decrease in value, but instead only +incrementally increase in value. UpDownCounters ([Int64UpDownCounter], +[Float64UpDownCounter], [Int64ObservableUpDownCounter], and +[Float64ObservableUpDownCounter]) on the other hand, are designed to measure +values that can increase and decrease. When more information needs to be +conveyed about all the synchronous measurements made during a collection cycle, +a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, +when just the most recent measurement needs to be conveyed about an +asynchronous measurement, a Gauge ([Int64ObservableGauge] and +[Float64ObservableGauge]) should be used. + +See the [OpenTelemetry documentation] for more information about instruments +and their intended use. + +# Instrument Name + +OpenTelemetry defines an [instrument name syntax] that restricts what +instrument names are allowed. + +Instrument names should ... + + - Not be empty. + - Have an alphabetic character as their first letter. + - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, + ‘-’, or ‘/’. + - Have a maximum length of 255 letters. + +To ensure compatibility with observability platforms, all instruments created +need to conform to this syntax. Not all implementations of the API will validate +these names, it is the callers responsibility to ensure compliance. + +# Measurements + +Measurements are made by recording values and information about the values with +an instrument. How these measurements are recorded depends on the instrument. + +Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are recorded using the instrument methods directly. All +counter instruments have an Add method that is used to measure an increment +value, and all histogram instruments have a Record method to measure a data +point. + +Asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) record measurements within a callback function. The +callback is registered with the Meter which ensures the callback is called once +per collection cycle. A callback can be registered two ways: during the +instrument's creation using an option, or later using the RegisterCallback +method of the [Meter] that created the instrument. + +If the following criteria are met, an option ([WithInt64Callback] or +[WithFloat64Callback]) can be used during the asynchronous instrument's +creation to register a callback ([Int64Callback] or [Float64Callback], +respectively): + + - The measurement process is known when the instrument is created + - Only that instrument will make a measurement within the callback + - The callback never needs to be unregistered + +If the criteria are not met, use the RegisterCallback method of the [Meter] that +created the instrument to register a [Callback]. + +# API Implementations + +This package does not conform to the standard Go versioning policy, all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/metric/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/metric/embedded" + + type MeterProvider struct { + embedded.MeterProvider + // ... + } + +If an author wants the default behavior of their implementations to a panic, +they need to embed the API interface directly. + + import "go.opentelemetry.io/otel/metric" + + type MeterProvider struct { + metric.MeterProvider + // ... + } + +This is not a recommended behavior as it could lead to publishing packages that +contain runtime panics when users update other package that use newer versions +of [go.opentelemetry.io/otel/metric]. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/metric/noop]: + + import "go.opentelemetry.io/otel/metric/noop" + + type MeterProvider struct { + noop.MeterProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. + +[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax +[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ +[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider +*/ +package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md new file mode 100644 index 000000000..1f6e0efa7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md @@ -0,0 +1,3 @@ +# Metric Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded) diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go new file mode 100644 index 000000000..1a9dc6809 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -0,0 +1,243 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// metric API]. +// +// Implementers of the [OpenTelemetry metric API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry metric API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric +package embedded // import "go.opentelemetry.io/otel/metric/embedded" + +// MeterProvider is embedded in +// [go.opentelemetry.io/otel/metric.MeterProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type MeterProvider interface{ meterProvider() } + +// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Meter interface{ meter() } + +// Float64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Float64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Observer] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Observer interface{ float64Observer() } + +// Int64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Int64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Observer interface{ int64Observer() } + +// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Observer] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Observer interface{ observer() } + +// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Registration] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Registration] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Registration interface{ registration() } + +// Float64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Float64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Counter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Counter interface{ float64Counter() } + +// Float64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Float64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Histogram interface{ float64Histogram() } + +// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64Gauge interface{ float64Gauge() } + +// Float64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableCounter interface{ float64ObservableCounter() } + +// Float64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableGauge interface{ float64ObservableGauge() } + +// Float64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// if you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } + +// Float64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Float64UpDownCounter interface{ float64UpDownCounter() } + +// Int64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Int64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Counter interface{ int64Counter() } + +// Int64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Int64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Histogram interface{ int64Histogram() } + +// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience +// a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64Gauge interface{ int64Gauge() } + +// Int64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableCounter interface{ int64ObservableCounter() } + +// Int64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Int64ObservableGauge interface{ int64ObservableGauge() } + +// Int64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if +// you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } + +// Int64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64UpDownCounter interface{ int64UpDownCounter() } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go new file mode 100644 index 000000000..a535782e1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -0,0 +1,368 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// Observable is used as a grouping mechanism for all instruments that are +// updated within a Callback. +type Observable interface { + observable() +} + +// InstrumentOption applies options to all instruments. +type InstrumentOption interface { + Int64CounterOption + Int64UpDownCounterOption + Int64HistogramOption + Int64GaugeOption + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption + + Float64CounterOption + Float64UpDownCounterOption + Float64HistogramOption + Float64GaugeOption + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +// HistogramOption applies options to histogram instruments. +type HistogramOption interface { + Int64HistogramOption + Float64HistogramOption +} + +type descOpt string + +func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.description = string(o) + return c +} + +// WithDescription sets the instrument description. +func WithDescription(desc string) InstrumentOption { return descOpt(desc) } + +type unitOpt string + +func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +// WithUnit sets the instrument unit. +// +// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. +func WithUnit(u string) InstrumentOption { return unitOpt(u) } + +// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. +// +// This option is considered "advisory", and may be ignored by API implementations. +func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } + +type bucketOpt []float64 + +func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +// AddOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as an AddOption. +type AddOption interface { + applyAdd(AddConfig) AddConfig +} + +// AddConfig contains options for an addition measurement. +type AddConfig struct { + attrs attribute.Set +} + +// NewAddConfig returns a new [AddConfig] with all opts applied. +func NewAddConfig(opts []AddOption) AddConfig { + config := AddConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyAdd(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c AddConfig) Attributes() attribute.Set { + return c.attrs +} + +// RecordOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a RecordOption. +type RecordOption interface { + applyRecord(RecordConfig) RecordConfig +} + +// RecordConfig contains options for a recorded measurement. +type RecordConfig struct { + attrs attribute.Set +} + +// NewRecordConfig returns a new [RecordConfig] with all opts applied. +func NewRecordConfig(opts []RecordOption) RecordConfig { + config := RecordConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyRecord(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c RecordConfig) Attributes() attribute.Set { + return c.attrs +} + +// ObserveOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a ObserveOption. +type ObserveOption interface { + applyObserve(ObserveConfig) ObserveConfig +} + +// ObserveConfig contains options for an observed measurement. +type ObserveConfig struct { + attrs attribute.Set +} + +// NewObserveConfig returns a new [ObserveConfig] with all opts applied. +func NewObserveConfig(opts []ObserveOption) ObserveConfig { + config := ObserveConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyObserve(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c ObserveConfig) Attributes() attribute.Set { + return c.attrs +} + +// MeasurementOption applies options to all instrument measurement. +type MeasurementOption interface { + AddOption + RecordOption + ObserveOption +} + +type attrOpt struct { + set attribute.Set +} + +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +func (o attrOpt) applyAdd(c AddConfig) AddConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +// WithAttributeSet sets the attribute Set associated with a measurement is +// made with. +// +// If multiple WithAttributeSet or WithAttributes options are passed the +// attributes will be merged together in the order they are passed. Attributes +// with duplicate keys will use the last value passed. +func WithAttributeSet(attributes attribute.Set) MeasurementOption { + return attrOpt{set: attributes} +} + +// WithAttributes converts attributes into an attribute Set and sets the Set to +// be associated with a measurement. This is shorthand for: +// +// cp := make([]attribute.KeyValue, len(attributes)) +// copy(cp, attributes) +// WithAttributeSet(attribute.NewSet(cp...)) +// +// [attribute.NewSet] may modify the passed attributes so this will make a copy +// of attributes before creating a set in order to ensure this function is +// concurrent safe. This makes this option function less optimized in +// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be +// preferred for performance sensitive code. +// +// See [WithAttributeSet] for information about how multiple WithAttributes are +// merged. +func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { + cp := make([]attribute.KeyValue, len(attributes)) + copy(cp, attributes) + return attrOpt{set: attribute.NewSet(cp...)} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go new file mode 100644 index 000000000..14e08c24a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -0,0 +1,278 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// MeterProvider provides access to named Meter instances, for instrumenting +// an application or package. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type MeterProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.MeterProvider + + // Meter returns a new Meter with the provided name and configuration. + // + // A Meter should be scoped at most to a single package. The name needs to + // be unique so it does not collide with other names used by + // an application, nor other applications. To achieve this, the import path + // of the instrumentation package is recommended to be used as name. + // + // If the name is empty, then an implementation defined default name will + // be used instead. + Meter(name string, opts ...MeterOption) Meter +} + +// Meter provides access to instrument instances for recording metrics. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Meter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Meter + + // Int64Counter returns a new Int64Counter instrument identified by name + // and configured with options. The instrument is used to synchronously + // record increasing int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + + // Int64UpDownCounter returns a new Int64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record int64 measurements during a computational + // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + + // Int64Histogram returns a new Int64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of int64 measurements during a + // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + + // Int64Gauge returns a new Int64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + + // Int64ObservableCounter returns a new Int64ObservableCounter identified + // by name and configured with options. The instrument is used to + // asynchronously record increasing int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record int64 measurements once per + // a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + + // Int64ObservableGauge returns a new Int64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) + + // Float64Counter returns a new Float64Counter instrument identified by + // name and configured with options. The instrument is used to + // synchronously record increasing float64 measurements during a + // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + + // Float64UpDownCounter returns a new Float64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record float64 measurements during a computational + // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + + // Float64Histogram returns a new Float64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of float64 measurements during a + // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + + // Float64Gauge returns a new Float64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous float64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + + // Float64ObservableCounter returns a new Float64ObservableCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record increasing float64 + // measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + + // Float64ObservableUpDownCounter returns a new + // Float64ObservableUpDownCounter instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // float64 measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + + // Float64ObservableGauge returns a new Float64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous float64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) + + // RegisterCallback registers f to be called during the collection of a + // measurement cycle. + // + // If Unregister of the returned Registration is called, f needs to be + // unregistered and not called during collection. + // + // The instruments f is registered with are the only instruments that f may + // observe values for. + // + // If no instruments are passed, f should not be registered nor called + // during collection. + // + // The function f needs to be concurrent safe. + RegisterCallback(f Callback, instruments ...Observable) (Registration, error) +} + +// Callback is a function registered with a Meter that makes observations for +// the set of instruments it is registered with. The Observer parameter is used +// to record measurement observations for these instruments. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Callbacks. Meaning, it should not report measurements for an instrument with +// the same attributes as another Callback will report. +// +// The function needs to be concurrent safe. +type Callback func(context.Context, Observer) error + +// Observer records measurements for multiple instruments in a Callback. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Observer + + // ObserveFloat64 records the float64 value for obsrv. + ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + + // ObserveInt64 records the int64 value for obsrv. + ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) +} + +// Registration is an token representing the unique registration of a callback +// for a set of instruments with a Meter. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Registration interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Registration + + // Unregister removes the callback registration from a Meter. + // + // This method needs to be idempotent and concurrent safe. + Unregister() error +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go new file mode 100644 index 000000000..8403a4bad --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -0,0 +1,226 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Counter is an instrument that records increasing float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64CounterConfig contains options for synchronous counter instruments that +// record float64 values. +type Float64CounterConfig struct { + description string + unit string +} + +// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts +// applied. +func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { + var config Float64CounterConfig + for _, o := range opts { + config = o.applyFloat64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64CounterConfig) Unit() string { + return c.unit +} + +// Float64CounterOption applies options to a [Float64CounterConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64CounterOption. +type Float64CounterOption interface { + applyFloat64Counter(Float64CounterConfig) Float64CounterConfig +} + +// Float64UpDownCounter is an instrument that records increasing or decreasing +// float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64UpDownCounterConfig contains options for synchronous counter +// instruments that record float64 values. +type Float64UpDownCounterConfig struct { + description string + unit string +} + +// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] +// with all opts applied. +func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { + var config Float64UpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Float64UpDownCounterOption applies options to a +// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that +// can be used as a Float64UpDownCounterOption. +type Float64UpDownCounterOption interface { + applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig +} + +// Float64Histogram is an instrument that records a distribution of float64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr float64, options ...RecordOption) +} + +// Float64HistogramConfig contains options for synchronous histogram +// instruments that record float64 values. +type Float64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all +// opts applied. +func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { + var config Float64HistogramConfig + for _, o := range opts { + config = o.applyFloat64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Float64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Float64HistogramOption applies options to a [Float64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64HistogramOption. +type Float64HistogramOption interface { + applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig +} + +// Float64Gauge is an instrument that records instantaneous float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value float64, options ...RecordOption) +} + +// Float64GaugeConfig contains options for synchronous gauge instruments that +// record float64 values. +type Float64GaugeConfig struct { + description string + unit string +} + +// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts +// applied. +func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { + var config Float64GaugeConfig + for _, o := range opts { + config = o.applyFloat64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64GaugeConfig) Unit() string { + return c.unit +} + +// Float64GaugeOption applies options to a [Float64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64GaugeOption. +type Float64GaugeOption interface { + applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go new file mode 100644 index 000000000..783fdfba7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -0,0 +1,226 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Counter is an instrument that records increasing int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Int64CounterConfig struct { + description string + unit string +} + +// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts +// applied. +func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { + var config Int64CounterConfig + for _, o := range opts { + config = o.applyInt64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64CounterConfig) Unit() string { + return c.unit +} + +// Int64CounterOption applies options to a [Int64CounterConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64CounterOption. +type Int64CounterOption interface { + applyInt64Counter(Int64CounterConfig) Int64CounterConfig +} + +// Int64UpDownCounter is an instrument that records increasing or decreasing +// int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Int64UpDownCounterConfig struct { + description string + unit string +} + +// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with +// all opts applied. +func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { + var config Int64UpDownCounterConfig + for _, o := range opts { + config = o.applyInt64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. +// See [InstrumentOption] for other options that can be used as an +// Int64UpDownCounterOption. +type Int64UpDownCounterOption interface { + applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig +} + +// Int64Histogram is an instrument that records a distribution of int64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr int64, options ...RecordOption) +} + +// Int64HistogramConfig contains options for synchronous histogram instruments +// that record int64 values. +type Int64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts +// applied. +func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { + var config Int64HistogramConfig + for _, o := range opts { + config = o.applyInt64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Int64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Int64HistogramOption applies options to a [Int64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64HistogramOption. +type Int64HistogramOption interface { + applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig +} + +// Int64Gauge is an instrument that records instantaneous int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value int64, options ...RecordOption) +} + +// Int64GaugeConfig contains options for synchronous gauge instruments that +// record int64 values. +type Int64GaugeConfig struct { + description string + unit string +} + +// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts +// applied. +func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { + var config Int64GaugeConfig + for _, o := range opts { + config = o.applyInt64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64GaugeConfig) Unit() string { + return c.unit +} + +// Int64GaugeOption applies options to a [Int64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Int64GaugeOption. +type Int64GaugeOption interface { + applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go new file mode 100644 index 000000000..2fd949733 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/propagation" +) + +// GetTextMapPropagator returns the global TextMapPropagator. If none has been +// set, a No-Op TextMapPropagator is returned. +func GetTextMapPropagator() propagation.TextMapPropagator { + return global.TextMapPropagator() +} + +// SetTextMapPropagator sets propagator as the global TextMapPropagator. +func SetTextMapPropagator(propagator propagation.TextMapPropagator) { + global.SetTextMapPropagator(propagator) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md new file mode 100644 index 000000000..e2959ac74 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/README.md @@ -0,0 +1,3 @@ +# Propagation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/propagation)](https://pkg.go.dev/go.opentelemetry.io/otel/propagation) diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go new file mode 100644 index 000000000..552263ba7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + + "go.opentelemetry.io/otel/baggage" +) + +const baggageHeader = "baggage" + +// Baggage is a propagator that supports the W3C Baggage format. +// +// This propagates user-defined baggage associated with a trace. The complete +// specification is defined at https://www.w3.org/TR/baggage/. +type Baggage struct{} + +var _ TextMapPropagator = Baggage{} + +// Inject sets baggage key-values from ctx into the carrier. +func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { + bStr := baggage.FromContext(ctx).String() + if bStr != "" { + carrier.Set(baggageHeader, bStr) + } +} + +// Extract returns a copy of parent with the baggage from the carrier added. +func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + bStr := carrier.Get(baggageHeader) + if bStr == "" { + return parent + } + + bag, err := baggage.Parse(bStr) + if err != nil { + return parent + } + return baggage.ContextWithBaggage(parent, bag) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go new file mode 100644 index 000000000..33a3baf15 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package propagation contains OpenTelemetry context propagators. + +OpenTelemetry propagators are used to extract and inject context data from and +into messages exchanged by applications. The propagator supported by this +package is the W3C Trace Context encoding +(https://www.w3.org/TR/trace-context/), and W3C Baggage +(https://www.w3.org/TR/baggage/). +*/ +package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go new file mode 100644 index 000000000..8c8286aab --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -0,0 +1,142 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "net/http" +) + +// TextMapCarrier is the storage medium used by a TextMapPropagator. +type TextMapCarrier interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Get returns the value associated with the passed key. + Get(key string) string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Set stores the key-value pair. + Set(key string, value string) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Keys lists the keys stored in this carrier. + Keys() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage +// medium for propagated key-value pairs. +type MapCarrier map[string]string + +// Compile time check that MapCarrier implements the TextMapCarrier. +var _ TextMapCarrier = MapCarrier{} + +// Get returns the value associated with the passed key. +func (c MapCarrier) Get(key string) string { + return c[key] +} + +// Set stores the key-value pair. +func (c MapCarrier) Set(key, value string) { + c[key] = value +} + +// Keys lists the keys stored in this carrier. +func (c MapCarrier) Keys() []string { + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + return keys +} + +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +type HeaderCarrier http.Header + +// Get returns the value associated with the passed key. +func (hc HeaderCarrier) Get(key string) string { + return http.Header(hc).Get(key) +} + +// Set stores the key-value pair. +func (hc HeaderCarrier) Set(key string, value string) { + http.Header(hc).Set(key, value) +} + +// Keys lists the keys stored in this carrier. +func (hc HeaderCarrier) Keys() []string { + keys := make([]string, 0, len(hc)) + for k := range hc { + keys = append(keys, k) + } + return keys +} + +// TextMapPropagator propagates cross-cutting concerns as key-value text +// pairs within a carrier that travels in-band across process boundaries. +type TextMapPropagator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Inject set cross-cutting concerns from the Context into the carrier. + Inject(ctx context.Context, carrier TextMapCarrier) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Extract reads cross-cutting concerns from the carrier into a Context. + Extract(ctx context.Context, carrier TextMapCarrier) context.Context + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Fields returns the keys whose values are set with Inject. + Fields() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type compositeTextMapPropagator []TextMapPropagator + +func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { + for _, i := range p { + i.Inject(ctx, carrier) + } +} + +func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + for _, i := range p { + ctx = i.Extract(ctx, carrier) + } + return ctx +} + +func (p compositeTextMapPropagator) Fields() []string { + unique := make(map[string]struct{}) + for _, i := range p { + for _, k := range i.Fields() { + unique[k] = struct{}{} + } + } + + fields := make([]string, 0, len(unique)) + for k := range unique { + fields = append(fields, k) + } + return fields +} + +// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the +// group of passed TextMapPropagator. This allows different cross-cutting +// concerns to be propagates in a unified manner. +// +// The returned TextMapPropagator will inject and extract cross-cutting +// concerns in the order the TextMapPropagators were provided. Additionally, +// the Fields method will return a de-duplicated slice of the keys that are +// set with the Inject method. +func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { + return compositeTextMapPropagator(p) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go new file mode 100644 index 000000000..6870e316d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -0,0 +1,156 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "encoding/hex" + "fmt" + "strings" + + "go.opentelemetry.io/otel/trace" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" + delimiter = "-" +) + +// TraceContext is a propagator that supports the W3C Trace Context format +// (https://www.w3.org/TR/trace-context/) +// +// This propagator will propagate the traceparent and tracestate headers to +// guarantee traces are not broken. It is up to the users of this propagator +// to choose if they want to participate in a trace by modifying the +// traceparent header and relevant parts of the tracestate header containing +// their proprietary information. +type TraceContext struct{} + +var ( + _ TextMapPropagator = TraceContext{} + versionPart = fmt.Sprintf("%.2X", supportedVersion) +) + +// Inject injects the trace context from ctx into carrier. +func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { + sc := trace.SpanContextFromContext(ctx) + if !sc.IsValid() { + return + } + + if ts := sc.TraceState().String(); ts != "" { + carrier.Set(tracestateHeader, ts) + } + + // Clear all flags other than the trace-context supported sampling bit. + flags := sc.TraceFlags() & trace.FlagsSampled + + var sb strings.Builder + sb.Grow(2 + 32 + 16 + 2 + 3) + _, _ = sb.WriteString(versionPart) + traceID := sc.TraceID() + spanID := sc.SpanID() + flagByte := [1]byte{byte(flags)} + var buf [32]byte + for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} { + _ = sb.WriteByte(delimiter[0]) + n := hex.Encode(buf[:], src) + _, _ = sb.Write(buf[:n]) + } + carrier.Set(traceparentHeader, sb.String()) +} + +// Extract reads tracecontext from the carrier into a returned Context. +// +// The returned Context will be a copy of ctx and contain the extracted +// tracecontext as the remote SpanContext. If the extracted tracecontext is +// invalid, the passed ctx will be returned directly instead. +func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + sc := tc.extract(carrier) + if !sc.IsValid() { + return ctx + } + return trace.ContextWithRemoteSpanContext(ctx, sc) +} + +func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { + h := carrier.Get(traceparentHeader) + if h == "" { + return trace.SpanContext{} + } + + var ver [1]byte + if !extractPart(ver[:], &h, 2) { + return trace.SpanContext{} + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{} + } + + var scc trace.SpanContextConfig + if !extractPart(scc.TraceID[:], &h, 32) { + return trace.SpanContext{} + } + if !extractPart(scc.SpanID[:], &h, 16) { + return trace.SpanContext{} + } + + var opts [1]byte + if !extractPart(opts[:], &h, 2) { + return trace.SpanContext{} + } + if version == 0 && (h != "" || opts[0] > 2) { + // version 0 not allow extra + // version 0 not allow other flag + return trace.SpanContext{} + } + + // Clear all flags other than the trace-context supported sampling bit. + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + + // Ignore the error returned here. Failure to parse tracestate MUST NOT + // affect the parsing of traceparent according to the W3C tracecontext + // specification. + scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) + scc.Remote = true + + sc := trace.NewSpanContext(scc) + if !sc.IsValid() { + return trace.SpanContext{} + } + + return sc +} + +// upperHex detect hex is upper case Unicode characters. +func upperHex(v string) bool { + for _, c := range v { + if c >= 'A' && c <= 'F' { + return true + } + } + return false +} + +func extractPart(dst []byte, h *string, n int) bool { + part, left, _ := strings.Cut(*h, delimiter) + *h = left + // hex.Decode decodes unsupported upper-case characters, so exclude explicitly. + if len(part) != n || upperHex(part) { + return false + } + if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 { + return false + } + return true +} + +// Fields returns the keys who's values are set with Inject. +func (tc TraceContext) Fields() []string { + return []string{traceparentHeader, tracestateHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json new file mode 100644 index 000000000..a6fa353f9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -0,0 +1,30 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:best-practices" + ], + "ignorePaths": [], + "labels": ["Skip Changelog", "dependencies"], + "postUpdateOptions" : [ + "gomodTidy" + ], + "packageRules": [ + { + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": true + }, + { + "matchPackageNames": ["go.opentelemetry.io/build-tools/**"], + "groupName": "build-tools" + }, + { + "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], + "groupName": "googleapis" + }, + { + "matchPackageNames": ["golang.org/x/**"], + "groupName": "golang.org/x" + } + ] +} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt new file mode 100644 index 000000000..1bb55fb1c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -0,0 +1 @@ +codespell==2.4.1 diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md new file mode 100644 index 000000000..2de1fc3c6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.26.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go new file mode 100644 index 000000000..d8dc822b2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go @@ -0,0 +1,8996 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +import "go.opentelemetry.io/otel/attribute" + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: stable + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming + // to the "aspnetcore.diagnostics.exception.result" semantic conventions. + // It represents the aSP.NET Core exception middleware handling result + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") + + // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the + // "aspnetcore.routing.match_status" semantic conventions. It represents + // the match result - success or failure + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'success', 'failure' + AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +var ( + // Exception was handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") + // Exception was not handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") + // Exception handling was skipped because the response had started + AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") + // Exception handling didn't run because the request was aborted + AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") +) + +var ( + // Match succeeded + AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") + // Match failed + AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// Generic attributes for AWS services. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes for AWS DynamoDB. +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID + // MUST be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is + // populated.) + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a + // running [ECS + // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family + // name of the [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSTaskID returns an attribute KeyValue conforming to the +// "aws.ecs.task.id" semantic conventions. It represents the ID of a running +// ECS task. The ID MUST be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS +// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) +// used to create the ECS task. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Attributes for AWS Logs. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Attributes for AWS Lambda. +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for AWS S3. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// The web browser attributes +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Attributes for CloudEvents. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCPUStateKey is the attribute Key conforming to the + // "container.cpu.state" semantic conventions. It represents the CPU state + // for this data point. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + ContainerCPUStateKey = attribute.Key("container.cpu.state") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +var ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + ContainerCPUStateUser = ContainerCPUStateKey.String("user") + // When CPU is used by the system (host OS) + ContainerCPUStateSystem = ContainerCPUStateKey.String("system") + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // DBClientConnectionsPoolNameKey is the attribute Key conforming to the + // "db.client.connections.pool.name" semantic conventions. It represents + // the name of the connection pool; unique within the instrumented + // application. In case the connection pool implementation doesn't provide + // a name, instrumentation should use a combination of `server.address` and + // `server.port` attributes formatted as `server.address:server.port`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myDataSource' + DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") + + // DBClientConnectionsStateKey is the attribute Key conforming to the + // "db.client.connections.state" semantic conventions. It represents the + // state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle' + DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match + // the value provided in the query and may be qualified with the schema and + // database name. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" + // semantic conventions. It represents the name of the database, fully + // qualified within the server address and port. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they + // SHOULD be concatenated (potentially using database system specific + // conventions) from most general to most specific namespace component, and + // more specific namespaces SHOULD NOT be captured without the more general + // namespaces, to ensure that "startswith" queries for the more general + // namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document + // what `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationNameKey is the attribute Key conforming to the + // "db.operation.name" semantic conventions. It represents the name of the + // operation or command being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the + // application without attempting to do any case normalization. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey + // "WuValue"' + DBQueryTextKey = attribute.Key("db.query.text") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents the database management system (DBMS) product + // as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. + // For example, when using PostgreSQL client libraries to connect to a + // CockroachDB, the `db.system` is set to `postgresql` based on the + // instrumentation's best knowledge. + DBSystemKey = attribute.Key("db.system") +) + +var ( + // idle + DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") + // used + DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBClientConnectionsPoolName returns an attribute KeyValue conforming to +// the "db.client.connections.pool.name" semantic conventions. It represents +// the name of the connection pool; unique within the instrumented application. +// In case the connection pool implementation doesn't provide a name, +// instrumentation should use a combination of `server.address` and +// `server.port` attributes formatted as `server.address:server.port`. +func DBClientConnectionsPoolName(val string) attribute.KeyValue { + return DBClientConnectionsPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the +// "db.namespace" semantic conventions. It represents the name of the database, +// fully qualified within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the +// "db.query.text" semantic conventions. It represents the database query being +// executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// This group defines attributes for Cassandra. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// This group defines attributes for Azure Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// This group defines attributes for Elasticsearch. +const ( + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") +) + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// Attributes for software deployments. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the deprecated use the + // `device.app.lifecycle` event definition including `android.state` as a + // payload field instead. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report a DNS query. +const ( + // DNSQuestionNameKey is the attribute Key conforming to the + // "dns.question.name" semantic conventions. It represents the name being + // queried. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or + // above 126), those characters should be represented as escaped base 10 + // integers (\DDD). Back slashes and quotes should be escaped. Tabs, + // carriage returns, and line feeds should be converted to \t, \r, and \n + // respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// FaaS attributes +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Attributes for Feature Flags. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// Describes file attributes. +const ( + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is + // located. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the + // leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only + // the last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.png' + FileNameKey = attribute.Key("file.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program + // Files\\MyApp\\myapp.exe' + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + FileSizeKey = attribute.Key("file.size") +) + +// FileDirectory returns an attribute KeyValue conforming to the +// "file.directory" semantic conventions. It represents the directory where the +// file is located. It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the +// "file.extension" semantic conventions. It represents the file extension, +// excluding the leading dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" +// semantic conventions. It represents the name of the file including the +// extension, without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" +// semantic conventions. It represents the full path to the file, including the +// file name. It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" +// semantic conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// Attributes for Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Attributes for Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. +const ( + // GenAiCompletionKey is the attribute Key conforming to the + // "gen_ai.completion" semantic conventions. It represents the full + // response received from the LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching + // [OpenAI messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiCompletionKey = attribute.Key("gen_ai.completion") + + // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" + // semantic conventions. It represents the full prompt sent to an LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of + // France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI + // messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiPromptKey = attribute.Key("gen_ai.prompt") + + // GenAiRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the + // maximum number of tokens the LLM generates for a request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAiRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of + // the LLM a request is being made to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4' + GenAiRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAiRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0.0 + GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAiRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p + // sampling setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0 + GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAiResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to + // each generation received. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'stop' + GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAiResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + GenAiResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAiResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of + // the LLM a response was generated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + GenAiResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as + // identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate + // with Mistral, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge. + GenAiSystemKey = attribute.Key("gen_ai.system") + + // GenAiUsageCompletionTokensKey is the attribute Key conforming to the + // "gen_ai.usage.completion_tokens" semantic conventions. It represents the + // number of tokens used in the LLM response (completion). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 180 + GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") + + // GenAiUsagePromptTokensKey is the attribute Key conforming to the + // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the + // number of tokens used in the LLM prompt. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") +) + +var ( + // OpenAI + GenAiSystemOpenai = GenAiSystemKey.String("openai") +) + +// GenAiCompletion returns an attribute KeyValue conforming to the +// "gen_ai.completion" semantic conventions. It represents the full response +// received from the LLM. +func GenAiCompletion(val string) attribute.KeyValue { + return GenAiCompletionKey.String(val) +} + +// GenAiPrompt returns an attribute KeyValue conforming to the +// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to +// an LLM. +func GenAiPrompt(val string) attribute.KeyValue { + return GenAiPromptKey.String(val) +} + +// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the LLM generates for a request. +func GenAiRequestMaxTokens(val int) attribute.KeyValue { + return GenAiRequestMaxTokensKey.Int(val) +} + +// GenAiRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// LLM a request is being made to. +func GenAiRequestModel(val string) attribute.KeyValue { + return GenAiRequestModelKey.String(val) +} + +// GenAiRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the LLM request. +func GenAiRequestTemperature(val float64) attribute.KeyValue { + return GenAiRequestTemperatureKey.Float64(val) +} + +// GenAiRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p +// sampling setting for the LLM request. +func GenAiRequestTopP(val float64) attribute.KeyValue { + return GenAiRequestTopPKey.Float64(val) +} + +// GenAiResponseFinishReasons returns an attribute KeyValue conforming to +// the "gen_ai.response.finish_reasons" semantic conventions. It represents the +// array of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAiResponseFinishReasonsKey.StringSlice(val) +} + +// GenAiResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique +// identifier for the completion. +func GenAiResponseID(val string) attribute.KeyValue { + return GenAiResponseIDKey.String(val) +} + +// GenAiResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// LLM a response was generated from. +func GenAiResponseModel(val string) attribute.KeyValue { + return GenAiResponseModelKey.String(val) +} + +// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the +// number of tokens used in the LLM response (completion). +func GenAiUsageCompletionTokens(val int) attribute.KeyValue { + return GenAiUsageCompletionTokensKey.Int(val) +} + +// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number +// of tokens used in the LLM prompt. +func GenAiUsagePromptTokens(val int) attribute.KeyValue { + return GenAiUsagePromptTokensKey.Int(val) +} + +// Attributes for GraphQL. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of + // the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'active', 'idle' + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the + // "http.request.size" semantic conventions. It represents the total size + // of the request in bytes. This should be the total number of bytes sent + // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and request body if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size + // of the response in bytes. This should be the total number of bytes sent + // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and + // HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // active state + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of +// the request in bytes. This should be the total number of bytes sent over the +// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of +// the response in bytes. This should be the total number of bytes sent over +// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Java Virtual machine related attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") + + // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" + // semantic conventions. It represents the name of the garbage collector + // action. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). + JvmGcActionKey = attribute.Key("jvm.gc.action") + + // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" + // semantic conventions. It represents the name of the garbage collector. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). + JvmGcNameKey = attribute.Key("jvm.gc.name") + + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") + + // JvmThreadDaemonKey is the attribute Key conforming to the + // "jvm.thread.daemon" semantic conventions. It represents the whether the + // thread is daemon or not. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") + + // JvmThreadStateKey is the attribute Key conforming to the + // "jvm.thread.state" semantic conventions. It represents the state of the + // thread. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + JvmThreadStateKey = attribute.Key("jvm.thread.state") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +var ( + // A thread that has not yet started is in this state + JvmThreadStateNew = JvmThreadStateKey.String("new") + // A thread executing in the Java virtual machine is in this state + JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") + // A thread that is blocked waiting for a monitor lock is in this state + JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") + // A thread that has exited is in this state + JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// JvmGcAction returns an attribute KeyValue conforming to the +// "jvm.gc.action" semantic conventions. It represents the name of the garbage +// collector action. +func JvmGcAction(val string) attribute.KeyValue { + return JvmGcActionKey.String(val) +} + +// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" +// semantic conventions. It represents the name of the garbage collector. +func JvmGcName(val string) attribute.KeyValue { + return JvmGcNameKey.String(val) +} + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// JvmThreadDaemon returns an attribute KeyValue conforming to the +// "jvm.thread.daemon" semantic conventions. It represents the whether the +// thread is daemon or not. +func JvmThreadDaemon(val bool) attribute.KeyValue { + return JvmThreadDaemonKey.Bool(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key + // conforming to the "k8s.container.status.last_terminated_reason" semantic + // conventions. It represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Attributes for a file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// The generic attributes that may be used in any Log Record. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to + // the "messaging.destination.partition.id" semantic conventions. It + // represents the identifier of the partition messages are sent to or + // received from, unique within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1' + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents the messaging + // system as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate + // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on + // the instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are delivered to or processed by a consumer + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") + // One or more messages are settled + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + MessagingSystemServicebus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming +// to the "messaging.destination.partition.id" semantic conventions. It +// represents the identifier of the partition messages are sent to or received +// from, unique within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// This group describes attributes specific to Apache Kafka. +const ( + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// This group describes attributes specific to RabbitMQ. +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming + // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. + // It represents the rabbitMQ message delivery tag + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 123 + MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic +// conventions. It represents the rabbitMQ message delivery tag +func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitmqMessageDeliveryTagKey.Int(val) +} + +// This group describes attributes specific to RocketMQ. +const ( + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. + // It represents the ack deadline in seconds set for the modify ack + // deadline request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It + // represents the ack id for a given message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack_id' + MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key + // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" + // semantic conventions. It represents the delivery attempt for a given + // message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") +) + +// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic +// conventions. It represents the ack deadline in seconds set for the modify +// ack deadline request. +func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It +// represents the ack id for a given message. +func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageAckIDKey.String(val) +} + +// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// This group describes attributes specific to Azure Service Bus. +const ( + // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key + // conforming to the "messaging.servicebus.destination.subscription_name" + // semantic conventions. It represents the name of the subscription in the + // topic messages are received from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mySubscription' + MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") + + // MessagingServicebusDispositionStatusKey is the attribute Key conforming + // to the "messaging.servicebus.disposition_status" semantic conventions. + // It represents the describes the [settlement + // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServicebusMessageDeliveryCountKey is the attribute Key + // conforming to the "messaging.servicebus.message.delivery_count" semantic + // conventions. It represents the number of deliveries that have been + // attempted for this message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key + // conforming to the "messaging.servicebus.message.enqueued_time" semantic + // conventions. It represents the UTC epoch seconds at which the message + // has been accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") +) + +var ( + // Message is completed + MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") + // Message is abandoned + MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") + // Message is deferred + MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") +) + +// MessagingServicebusDestinationSubscriptionName returns an attribute +// KeyValue conforming to the +// "messaging.servicebus.destination.subscription_name" semantic conventions. +// It represents the name of the subscription in the topic messages are +// received from. +func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingServicebusDestinationSubscriptionNameKey.String(val) +} + +// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServicebusMessageDeliveryCountKey.Int(val) +} + +// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServicebusMessageEnqueuedTimeKey.Int(val) +} + +// This group describes attributes specific to Azure Event Hubs. +const ( + // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to + // the "messaging.eventhubs.consumer.group" semantic conventions. It + // represents the name of the consumer group the event consumer is + // associated with. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'indexer' + MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") + + // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming + // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. + // It represents the UTC epoch seconds at which the message has been + // accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") +) + +// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming +// to the "messaging.eventhubs.consumer.group" semantic conventions. It +// represents the name of the consumer group the event consumer is associated +// with. +func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { + return MessagingEventhubsConsumerGroupKey.String(val) +} + +// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.eventhubs.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // actual version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using + // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute + // SHOULD be set to the negotiated version. If the actual protocol version + // is not known, this attribute SHOULD NOT be set. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// Attributes used by the OpenTracing Shim layer. +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Attributes reserved for OpenTelemetry +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were + // voluntary or involuntary. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and + // time the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the + // "process.exit.code" semantic conventions. It represents the exit code of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the + // "process.exit.time" semantic conventions. It represents the date and + // time the process exited, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID + // of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether + // the process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type + // of page fault for this data point. Type `major` is for major/hard page + // faults, and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user + // ID (RUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the + // username of the real user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved + // user ID (SUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the + // username of the saved user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID + // of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessUserIDKey is the attribute Key conforming to the + // "process.user.id" semantic conventions. It represents the effective user + // ID (EUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the + // "process.user.name" semantic conventions. It represents the username of + // the effective user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" + // semantic conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily + // unique across all processes on the host but it is unique within the + // process namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") +) + +var ( + // voluntary + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +var ( + // major + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and +// time the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time +// the process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of +// the process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user +// ID (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username +// of the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the +// "process.vpid" semantic conventions. It represents the virtual process +// identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// Attributes for process CPU +const ( + // ProcessCPUStateKey is the attribute Key conforming to the + // "process.cpu.state" semantic conventions. It represents the CPU state of + // the process. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessCPUStateKey = attribute.Key("process.cpu.state") +) + +var ( + // system + ProcessCPUStateSystem = ProcessCPUStateKey.String("system") + // user + ProcessCPUStateUser = ProcessCPUStateKey.String("user") + // wait + ProcessCPUStateWait = ProcessCPUStateKey.String("wait") +) + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the + // "rpc.message.type" semantic conventions. It represents the whether this + // is a received or sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // sent + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the +// "rpc.message.id" semantic conventions. It represents the mUST be calculated +// as two different counters starting from `1` one for sent messages and one +// for received message. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to +// the "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to + // distinguish instances of the same service that exist at the same time + // (e.g. instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 [RFC + // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an + // inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be + // used as source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the + // purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) + // file, the underlying + // data, such as pod name and namespace should be treated as confidential, + // being the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we + // do not recommend using one identifier + // for all processes participating in the application. Instead, it's + // recommended each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it + // can't unambiguously determine the + // service instance that is generating that telemetry. For instance, + // creating an UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container + // within that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, + // as they know the target address and + // port. + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If + // `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Describes System attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process attributes +const ( + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +var ( + // running + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Attributes for telemetry SDK. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" + // semantic conventions. It represents the domain extracted from the + // `url.full`, such as "opentelemetry.io". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, + // without a domain name. In this case, the IP address would go to the + // domain field. If the URL contains a [literal IPv6 + // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by + // `[` and `]`, the `[` and `]` characters should also be captured in the + // domain field. + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from + // the `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has + // a file extension. When the file name has multiple extensions + // `example.tar.gz`, only the last one should be captured `gz`, not + // `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed). Sensitive content provided in `url.full` SHOULD be + // scrubbed when instrumentations can identify it. + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" + // semantic conventions. It represents the unmodified original URL as seen + // in the event source. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas + // in access logs, the URL is often just represented as a path. This field + // is meant to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the + // same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full` + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). For example, the registered domain for + // `foo.example.com` is `example.com`. Trying to approximate this by simply + // taking the last two labels will not work well for TLDs such as `co.uk`. + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name + // under the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain + // contains all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If + // the domain has multiple levels of subdomain, such as + // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, + // with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" + // semantic conventions. It represents the low-cardinality template of an + // [absolute path + // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective + // top level domain (eTLD), also known as the domain suffix, is the last + // part of the domain name. For example, the top level domain for + // example.com is `com`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the +// `url.full`, such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the +// "url.extension" semantic conventions. It represents the file extension +// extracted from the `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the +// "url.original" semantic conventions. It represents the unmodified original +// URL as seen in the event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" +// semantic conventions. It represents the port extracted from the `url.full` +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the +// "url.subdomain" semantic conventions. It represents the subdomain portion of +// a fully qualified domain name includes all of the names except the host name +// under the registered_domain. In a partially qualified domain, or if the +// qualification level of the full name cannot be determined, subdomain +// contains all of the names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the +// "url.template" semantic conventions. It represents the low-cardinality +// template of an [absolute path +// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of +// the domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentNameKey is the attribute Key conforming to the + // "user_agent.name" semantic conventions. It represents the name of the + // user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's name + // from original string. In the case of using a user-agent for non-browser + // products, such as microservices with multiple names/versions inside the + // `user_agent.original`, the most significant name SHOULD be selected. In + // such a scenario it should align with `user_agent.version` + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 + // grpc-java-okhttp/1.27.2' + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of + // the user-agent extracted from original. Usually refers to the browser's + // version + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's + // version from original string. In the case of using a user-agent for + // non-browser products, such as microservices with multiple names/versions + // inside the `user_agent.original`, the most significant version SHOULD be + // selected. In such a scenario it should align with `user_agent.name` + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// The attributes used to describe the packaged software running the +// application code. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go new file mode 100644 index 000000000..d031bbea7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.26.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go new file mode 100644 index 000000000..bfaee0d56 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go new file mode 100644 index 000000000..fcdb9f485 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go @@ -0,0 +1,1307 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + + // ContainerCPUTime is the metric conforming to the "container.cpu.time" + // semantic conventions. It represents the total CPU time consumed. + // Instrument: counter + // Unit: s + // Stability: Experimental + ContainerCPUTimeName = "container.cpu.time" + ContainerCPUTimeUnit = "s" + ContainerCPUTimeDescription = "Total CPU time consumed" + + // ContainerMemoryUsage is the metric conforming to the + // "container.memory.usage" semantic conventions. It represents the memory + // usage of the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerMemoryUsageName = "container.memory.usage" + ContainerMemoryUsageUnit = "By" + ContainerMemoryUsageDescription = "Memory usage of the container." + + // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic + // conventions. It represents the disk bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerDiskIoName = "container.disk.io" + ContainerDiskIoUnit = "By" + ContainerDiskIoDescription = "Disk bytes for the container." + + // ContainerNetworkIo is the metric conforming to the "container.network.io" + // semantic conventions. It represents the network bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerNetworkIoName = "container.network.io" + ContainerNetworkIoUnit = "By" + ContainerNetworkIoDescription = "Network bytes for the container." + + // DBClientOperationDuration is the metric conforming to the + // "db.client.operation.duration" semantic conventions. It represents the + // duration of database client operations. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientOperationDurationName = "db.client.operation.duration" + DBClientOperationDurationUnit = "s" + DBClientOperationDurationDescription = "Duration of database client operations." + + // DBClientConnectionCount is the metric conforming to the + // "db.client.connection.count" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionCountName = "db.client.connection.count" + DBClientConnectionCountUnit = "{connection}" + DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionIdleMax is the metric conforming to the + // "db.client.connection.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMaxName = "db.client.connection.idle.max" + DBClientConnectionIdleMaxUnit = "{connection}" + DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionIdleMin is the metric conforming to the + // "db.client.connection.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMinName = "db.client.connection.idle.min" + DBClientConnectionIdleMinUnit = "{connection}" + DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionMax is the metric conforming to the + // "db.client.connection.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionMaxName = "db.client.connection.max" + DBClientConnectionMaxUnit = "{connection}" + DBClientConnectionMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionPendingRequests is the metric conforming to the + // "db.client.connection.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" + DBClientConnectionPendingRequestsUnit = "{request}" + DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionTimeouts is the metric conforming to the + // "db.client.connection.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionTimeoutsName = "db.client.connection.timeouts" + DBClientConnectionTimeoutsUnit = "{timeout}" + DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionCreateTime is the metric conforming to the + // "db.client.connection.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionCreateTimeName = "db.client.connection.create_time" + DBClientConnectionCreateTimeUnit = "s" + DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionWaitTime is the metric conforming to the + // "db.client.connection.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionWaitTimeName = "db.client.connection.wait_time" + DBClientConnectionWaitTimeUnit = "s" + DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionUseTime is the metric conforming to the + // "db.client.connection.use_time" semantic conventions. It represents the time + // between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionUseTimeName = "db.client.connection.use_time" + DBClientConnectionUseTimeUnit = "s" + DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the + // deprecated, use `db.client.connection.count` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.min` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the deprecated, use `db.client.connection.pending_requests` instead. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // deprecated, use `db.client.connection.timeouts` instead. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.create_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.wait_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.use_time` instead. Note: the unit also + // changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Stable + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Stable + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Stable + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Stable + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingProcessDuration is the metric conforming to the + // "messaging.process.duration" semantic conventions. It represents the + // measures the duration of process operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingProcessDurationName = "messaging.process.duration" + MessagingProcessDurationUnit = "s" + MessagingProcessDurationDescription = "Measures the duration of process operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingProcessMessages is the metric conforming to the + // "messaging.process.messages" semantic conventions. It represents the + // measures the number of processed messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingProcessMessagesName = "messaging.process.messages" + MessagingProcessMessagesUnit = "{message}" + MessagingProcessMessagesDescription = "Measures the number of processed messages." + + // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic + // conventions. It represents the total CPU seconds broken down by different + // states. + // Instrument: counter + // Unit: s + // Stability: Experimental + ProcessCPUTimeName = "process.cpu.time" + ProcessCPUTimeUnit = "s" + ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." + + // ProcessCPUUtilization is the metric conforming to the + // "process.cpu.utilization" semantic conventions. It represents the difference + // in process.cpu.time since the last measurement, divided by the elapsed time + // and number of CPUs available to the process. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + ProcessCPUUtilizationName = "process.cpu.utilization" + ProcessCPUUtilizationUnit = "1" + ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." + + // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" + // semantic conventions. It represents the amount of physical memory in use. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryUsageName = "process.memory.usage" + ProcessMemoryUsageUnit = "By" + ProcessMemoryUsageDescription = "The amount of physical memory in use." + + // ProcessMemoryVirtual is the metric conforming to the + // "process.memory.virtual" semantic conventions. It represents the amount of + // committed virtual memory. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryVirtualName = "process.memory.virtual" + ProcessMemoryVirtualUnit = "By" + ProcessMemoryVirtualDescription = "The amount of committed virtual memory." + + // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic + // conventions. It represents the disk bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessDiskIoName = "process.disk.io" + ProcessDiskIoUnit = "By" + ProcessDiskIoDescription = "Disk bytes transferred." + + // ProcessNetworkIo is the metric conforming to the "process.network.io" + // semantic conventions. It represents the network bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessNetworkIoName = "process.network.io" + ProcessNetworkIoUnit = "By" + ProcessNetworkIoDescription = "Network bytes transferred." + + // ProcessThreadCount is the metric conforming to the "process.thread.count" + // semantic conventions. It represents the process threads count. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Experimental + ProcessThreadCountName = "process.thread.count" + ProcessThreadCountUnit = "{thread}" + ProcessThreadCountDescription = "Process threads count." + + // ProcessOpenFileDescriptorCount is the metric conforming to the + // "process.open_file_descriptor.count" semantic conventions. It represents the + // number of file descriptors in use by the process. + // Instrument: updowncounter + // Unit: {count} + // Stability: Experimental + ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" + ProcessOpenFileDescriptorCountUnit = "{count}" + ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." + + // ProcessContextSwitches is the metric conforming to the + // "process.context_switches" semantic conventions. It represents the number of + // times the process has been context switched. + // Instrument: counter + // Unit: {count} + // Stability: Experimental + ProcessContextSwitchesName = "process.context_switches" + ProcessContextSwitchesUnit = "{count}" + ProcessContextSwitchesDescription = "Number of times the process has been context switched." + + // ProcessPagingFaults is the metric conforming to the "process.paging.faults" + // semantic conventions. It represents the number of page faults the process + // has made. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + ProcessPagingFaultsName = "process.paging.faults" + ProcessPagingFaultsUnit = "{fault}" + ProcessPagingFaultsDescription = "Number of page faults the process has made." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryShared is the metric conforming to the "system.memory.shared" + // semantic conventions. It represents the shared memory used (mostly by + // tmpfs). + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemorySharedName = "system.memory.shared" + SystemMemorySharedUnit = "By" + SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessCount is the metric conforming to the "system.process.count" + // semantic conventions. It represents the total number of processes in each + // state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessCountName = "system.process.count" + SystemProcessCountUnit = "{process}" + SystemProcessCountDescription = "Total number of processes in each state" + + // SystemProcessCreated is the metric conforming to the + // "system.process.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessCreatedName = "system.process.created" + SystemProcessCreatedUnit = "{process}" + SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go new file mode 100644 index 000000000..4c87c7adc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go new file mode 100644 index 000000000..6836c6547 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/trace" +) + +// Tracer creates a named tracer that implements Tracer interface. +// If the name is an empty string then provider uses default name. +// +// This is short for GetTracerProvider().Tracer(name, opts...) +func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return GetTracerProvider().Tracer(name, opts...) +} + +// GetTracerProvider returns the registered global trace provider. +// If none is registered then an instance of NoopTracerProvider is returned. +// +// Use the trace provider to create a named tracer. E.g. +// +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// +// or +// +// tracer := otel.Tracer("example.com/foo") +func GetTracerProvider() trace.TracerProvider { + return global.TracerProvider() +} + +// SetTracerProvider registers `tp` as the global trace provider. +func SetTracerProvider(tp trace.TracerProvider) { + global.SetTracerProvider(tp) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md new file mode 100644 index 000000000..58ccaba69 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/README.md @@ -0,0 +1,3 @@ +# Trace API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace) diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go new file mode 100644 index 000000000..7e2910025 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -0,0 +1,661 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + "encoding/json" + "fmt" + "math" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace/embedded" + "go.opentelemetry.io/otel/trace/internal/telemetry" +) + +// newAutoTracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func newAutoTracerProvider() TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(autoTracerProvider) + +type autoTracerProvider struct{ embedded.TracerProvider } + +var _ TracerProvider = autoTracerProvider{} + +func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { + cfg := NewTracerConfig(opts...) + return autoTracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} + +type autoTracer struct { + embedded.Tracer + + name, schemaURL, version string +} + +var _ Tracer = autoTracer{} + +func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) { + var psc SpanContext + sampled := true + span := new(autoSpan) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *autoTracer) start( + ctx context.Context, + spanPtr *autoSpan, + psc *SpanContext, + sampled *bool, + sc *SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *autoSpan, *SpanContext, *bool, *SpanContext) {} + +func (t autoTracer) traces(name string, cfg SpanConfig, sc, psc SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + n := int64(len(links)) + if n > 0 { + span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + } + } else { + if limit > 0 { + n := int64(max(len(links)-limit, 0)) + span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind SpanKind) telemetry.SpanKind { + switch kind { + case SpanKindInternal: + return telemetry.SpanKindInternal + case SpanKindServer: + return telemetry.SpanKindServer + case SpanKindClient: + return telemetry.SpanKindClient + case SpanKindProducer: + return telemetry.SpanKindProducer + case SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} + +type autoSpan struct { + embedded.Span + + spanContext SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *autoSpan) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *autoSpan) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *autoSpan) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *autoSpan) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + n := int64(len(attrs)) + if n > 0 { + s.span.DroppedAttrs += uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + } + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + n := len(attrs) + if limit == 0 { + var out uint32 + if n > 0 { + out = uint32(min(int64(n), math.MaxUint32)) // nolint: gosec // Bounds checked. + } + return nil, out + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + if n < 0 { + n = 0 + } + + limit = min(n, limit) + return convAttrs(attrs[:limit]), uint32(n - limit) // nolint: gosec // Bounds checked. +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *autoSpan) End(opts ...SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *autoSpan) end(opts []SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*autoSpan) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *autoSpan) RecordError(err error, opts ...EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *autoSpan) AddEvent(name string, opts ...EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *autoSpan) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *autoSpan) AddLink(link Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *autoSpan) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*autoSpan) TracerProvider() TracerProvider { return newAutoTracerProvider() } + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + // Ignore invalid environment variable. + } + + return defaultVal +} diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go new file mode 100644 index 000000000..9c0b720a4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -0,0 +1,323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// TracerConfig is a group of options for a Tracer. +type TracerConfig struct { + instrumentationVersion string + // Schema URL of the telemetry emitted by the Tracer. + schemaURL string + attrs attribute.Set +} + +// InstrumentationVersion returns the version of the library providing instrumentation. +func (t *TracerConfig) InstrumentationVersion() string { + return t.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *TracerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + +// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. +func (t *TracerConfig) SchemaURL() string { + return t.schemaURL +} + +// NewTracerConfig applies all the options to a returned TracerConfig. +func NewTracerConfig(options ...TracerOption) TracerConfig { + var config TracerConfig + for _, option := range options { + config = option.apply(config) + } + return config +} + +// TracerOption applies an option to a TracerConfig. +type TracerOption interface { + apply(TracerConfig) TracerConfig +} + +type tracerOptionFunc func(TracerConfig) TracerConfig + +func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { + return fn(cfg) +} + +// SpanConfig is a group of options for a Span. +type SpanConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + links []Link + newRoot bool + spanKind SpanKind + stackTrace bool +} + +// Attributes describe the associated qualities of a Span. +func (cfg *SpanConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in a Span life-cycle. +func (cfg *SpanConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *SpanConfig) StackTrace() bool { + return cfg.stackTrace +} + +// Links are the associations a Span has with other Spans. +func (cfg *SpanConfig) Links() []Link { + return cfg.links +} + +// NewRoot identifies a Span as the root Span for a new trace. This is +// commonly used when an existing trace crosses trust boundaries and the +// remote parent span context should be ignored for security. +func (cfg *SpanConfig) NewRoot() bool { + return cfg.newRoot +} + +// SpanKind is the role a Span has in a trace. +func (cfg *SpanConfig) SpanKind() SpanKind { + return cfg.spanKind +} + +// NewSpanStartConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanStart(c) + } + return c +} + +// NewSpanEndConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanEnd(c) + } + return c +} + +// SpanStartOption applies an option to a SpanConfig. These options are applicable +// only when the span is created. +type SpanStartOption interface { + applySpanStart(SpanConfig) SpanConfig +} + +type spanOptionFunc func(SpanConfig) SpanConfig + +func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { + return fn(cfg) +} + +// SpanEndOption applies an option to a SpanConfig. These options are +// applicable only when the span is ended. +type SpanEndOption interface { + applySpanEnd(SpanConfig) SpanConfig +} + +// EventConfig is a group of options for an Event. +type EventConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + stackTrace bool +} + +// Attributes describe the associated qualities of an Event. +func (cfg *EventConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in an Event life-cycle. +func (cfg *EventConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *EventConfig) StackTrace() bool { + return cfg.stackTrace +} + +// NewEventConfig applies all the EventOptions to a returned EventConfig. If no +// timestamp option is passed, the returned EventConfig will have a Timestamp +// set to the call time, otherwise no validation is performed on the returned +// EventConfig. +func NewEventConfig(options ...EventOption) EventConfig { + var c EventConfig + for _, option := range options { + c = option.applyEvent(c) + } + if c.timestamp.IsZero() { + c.timestamp = time.Now() + } + return c +} + +// EventOption applies span event options to an EventConfig. +type EventOption interface { + applyEvent(EventConfig) EventConfig +} + +// SpanOption are options that can be used at both the beginning and end of a span. +type SpanOption interface { + SpanStartOption + SpanEndOption +} + +// SpanStartEventOption are options that can be used at the start of a span, or with an event. +type SpanStartEventOption interface { + SpanStartOption + EventOption +} + +// SpanEndEventOption are options that can be used at the end of a span, or with an event. +type SpanEndEventOption interface { + SpanEndOption + EventOption +} + +type attributeOption []attribute.KeyValue + +func (o attributeOption) applySpan(c SpanConfig) SpanConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} +func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o attributeOption) applyEvent(c EventConfig) EventConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} + +var _ SpanStartEventOption = attributeOption{} + +// WithAttributes adds the attributes related to a span life-cycle event. +// These attributes are used to describe the work a Span represents when this +// option is provided to a Span's start event. Otherwise, these +// attributes provide additional information about the event being recorded +// (e.g. error, state change, processing progress, system event). +// +// If multiple of these options are passed the attributes of each successive +// option will extend the attributes instead of overwriting. There is no +// guarantee of uniqueness in the resulting attributes. +func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { + return attributeOption(attributes) +} + +// SpanEventOption are options that can be used with an event or a span. +type SpanEventOption interface { + SpanOption + EventOption +} + +type timestampOption time.Time + +func (o timestampOption) applySpan(c SpanConfig) SpanConfig { + c.timestamp = time.Time(o) + return c +} +func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applyEvent(c EventConfig) EventConfig { + c.timestamp = time.Time(o) + return c +} + +var _ SpanEventOption = timestampOption{} + +// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. +// started, stopped, errored). +func WithTimestamp(t time.Time) SpanEventOption { + return timestampOption(t) +} + +type stackTraceOption bool + +func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { + c.stackTrace = bool(o) + return c +} + +func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } + +// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). +func WithStackTrace(b bool) SpanEndEventOption { + return stackTraceOption(b) +} + +// WithLinks adds links to a Span. The links are added to the existing Span +// links, i.e. this does not overwrite. Links with invalid span context are ignored. +func WithLinks(links ...Link) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.links = append(cfg.links, links...) + return cfg + }) +} + +// WithNewRoot specifies that the Span should be treated as a root Span. Any +// existing parent span context will be ignored when defining the Span's trace +// identifiers. +func WithNewRoot() SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.newRoot = true + return cfg + }) +} + +// WithSpanKind sets the SpanKind of a Span. +func WithSpanKind(kind SpanKind) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.spanKind = kind + return cfg + }) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.instrumentationVersion = version + return cfg + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL for the Tracer. +func WithSchemaURL(schemaURL string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.schemaURL = schemaURL + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go new file mode 100644 index 000000000..8c45a7107 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "context" + +type traceContextKeyType int + +const currentSpanKey traceContextKeyType = iota + +// ContextWithSpan returns a copy of parent with span set as the current Span. +func ContextWithSpan(parent context.Context, span Span) context.Context { + return context.WithValue(parent, currentSpanKey, span) +} + +// ContextWithSpanContext returns a copy of parent with sc as the current +// Span. The Span implementation that wraps sc is non-recording and performs +// no operations other than to return sc as the SpanContext from the +// SpanContext method. +func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { + return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) +} + +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly +// as a remote SpanContext and as the current Span. The Span implementation +// that wraps rsc is non-recording and performs no operations other than to +// return rsc as the SpanContext from the SpanContext method. +func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { + return ContextWithSpanContext(parent, rsc.WithRemote(true)) +} + +// SpanFromContext returns the current Span from ctx. +// +// If no Span is currently set in ctx an implementation of a Span that +// performs no operations is returned. +func SpanFromContext(ctx context.Context) Span { + if ctx == nil { + return noopSpanInstance + } + if span, ok := ctx.Value(currentSpanKey).(Span); ok { + return span + } + return noopSpanInstance +} + +// SpanContextFromContext returns the current Span's SpanContext. +func SpanContextFromContext(ctx context.Context) SpanContext { + return SpanFromContext(ctx).SpanContext() +} diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go new file mode 100644 index 000000000..cdbf41d6d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package trace provides an implementation of the tracing part of the +OpenTelemetry API. + +To participate in distributed traces a Span needs to be created for the +operation being performed as part of a traced workflow. In its simplest form: + + var tracer trace.Tracer + + func init() { + tracer = otel.Tracer("instrumentation/package/name") + } + + func operation(ctx context.Context) { + var span trace.Span + ctx, span = tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +A Tracer is unique to the instrumentation and is used to create Spans. +Instrumentation should be designed to accept a TracerProvider from which it +can create its own unique Tracer. Alternatively, the registered global +TracerProvider from the go.opentelemetry.io/otel package can be used as +a default. + + const ( + name = "instrumentation/package/name" + version = "0.1.0" + ) + + type Instrumentation struct { + tracer trace.Tracer + } + + func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { + if tp == nil { + tp = otel.TracerProvider() + } + return &Instrumentation{ + tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), + } + } + + func operation(ctx context.Context, inst *Instrumentation) { + var span trace.Span + ctx, span = inst.tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +# API Implementations + +This package does not conform to the standard Go versioning policy; all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/trace/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/trace/embedded" + + type TracerProvider struct { + embedded.TracerProvider + // ... + } + +If an author wants the default behavior of their implementations to panic, they +can embed the API interface directly. + + import "go.opentelemetry.io/otel/trace" + + type TracerProvider struct { + trace.TracerProvider + // ... + } + +This option is not recommended. It will lead to publishing packages that +contain runtime panics when users update to newer versions of +[go.opentelemetry.io/otel/trace], which may be done with a transitive +dependency. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/trace/noop]: + + import "go.opentelemetry.io/otel/trace/noop" + + type TracerProvider struct { + noop.TracerProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. +*/ +package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md new file mode 100644 index 000000000..7754a239e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md @@ -0,0 +1,3 @@ +# Trace Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded) diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go new file mode 100644 index 000000000..3e359a00b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// trace API]. +// +// Implementers of the [OpenTelemetry trace API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry trace API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace +package embedded // import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider is embedded in +// [go.opentelemetry.io/otel/trace.TracerProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type TracerProvider interface{ tracerProvider() } + +// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Tracer interface{ tracer() } + +// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Span interface{ span() } diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go new file mode 100644 index 000000000..f663547b4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go new file mode 100644 index 000000000..5debe90bb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go new file mode 100644 index 000000000..7b1ae3c4e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go new file mode 100644 index 000000000..f5e3a8cec --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go new file mode 100644 index 000000000..1798a702d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go new file mode 100644 index 000000000..c2b4c635b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go new file mode 100644 index 000000000..3c5e1cdb1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go @@ -0,0 +1,460 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), // nolint:gosec // >0 checked above. + EndTime: uint64(endT), // nolint:gosec // >0 checked above. + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + s.StartTime = time.Unix(0, v) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + s.EndTime = time.Unix(0, v) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), // nolint: gosec // >0 checked above + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + se.Time = time.Unix(0, v) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go new file mode 100644 index 000000000..1d013a8fa --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go new file mode 100644 index 000000000..b03940708 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go new file mode 100644 index 000000000..7251492da --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go @@ -0,0 +1,453 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{ + num: uint64(v), // nolint: gosec // Store raw bytes. + any: ValueKindInt64, + } +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) // nolint: gosec // From raw bytes. + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go new file mode 100644 index 000000000..c00221e7b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + noopSpan + + sc SpanContext +} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go new file mode 100644 index 000000000..c8b1ae5d6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// NewNoopTracerProvider returns an implementation of TracerProvider that +// performs no operations. The Tracer and Spans created from the returned +// TracerProvider also perform no operations. +// +// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] +// instead. +func NewNoopTracerProvider() TracerProvider { + return noopTracerProvider{} +} + +type noopTracerProvider struct{ embedded.TracerProvider } + +var _ TracerProvider = noopTracerProvider{} + +// Tracer returns noop implementation of Tracer. +func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return noopTracer{} +} + +// noopTracer is an implementation of Tracer that performs no operations. +type noopTracer struct{ embedded.Tracer } + +var _ Tracer = noopTracer{} + +// Start carries forward a non-recording Span, if one is present in the context, otherwise it +// creates a no-op Span. +func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { + span := SpanFromContext(ctx) + if _, ok := span.(nonRecordingSpan); !ok { + // span is likely already a noopSpan, but let's be sure + span = noopSpanInstance + } + return ContextWithSpan(ctx, span), span +} + +// noopSpan is an implementation of Span that performs no operations. +type noopSpan struct{ embedded.Span } + +var noopSpanInstance Span = noopSpan{} + +// SpanContext returns an empty span context. +func (noopSpan) SpanContext() SpanContext { return SpanContext{} } + +// IsRecording always returns false. +func (noopSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (noopSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (noopSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (noopSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (noopSpan) End(...SpanEndOption) {} + +// RecordError does nothing. +func (noopSpan) RecordError(error, ...EventOption) {} + +// AddEvent does nothing. +func (noopSpan) AddEvent(string, ...EventOption) {} + +// AddLink does nothing. +func (noopSpan) AddLink(Link) {} + +// SetName does nothing. +func (noopSpan) SetName(string) {} + +// TracerProvider returns a no-op TracerProvider. +func (s noopSpan) TracerProvider() TracerProvider { + return s.tracerProvider(autoInstEnabled) +} + +// autoInstEnabled defines if the auto-instrumentation SDK is enabled. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches to the process. +var autoInstEnabled = new(bool) + +// tracerProvider return a noopTracerProvider if autoEnabled is false, +// otherwise it will return a TracerProvider from the sdk package used in +// auto-instrumentation. +func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider { + if *autoEnabled { + return newAutoTracerProvider() + } + return noopTracerProvider{} +} diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/README.md b/vendor/go.opentelemetry.io/otel/trace/noop/README.md new file mode 100644 index 000000000..cd382c82a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop/README.md @@ -0,0 +1,3 @@ +# Trace Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/noop) diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go new file mode 100644 index 000000000..64a4f1b36 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package noop provides an implementation of the OpenTelemetry trace API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry trace API will effectively +// disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry trace API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/trace/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ trace.TracerProvider = TracerProvider{} + _ trace.Tracer = Tracer{} + _ trace.Span = Span{} +) + +// TracerProvider is an OpenTelemetry No-Op TracerProvider. +type TracerProvider struct{ embedded.TracerProvider } + +// NewTracerProvider returns a TracerProvider that does not record any telemetry. +func NewTracerProvider() TracerProvider { + return TracerProvider{} +} + +// Tracer returns an OpenTelemetry Tracer that does not record any telemetry. +func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { + return Tracer{} +} + +// Tracer is an OpenTelemetry No-Op Tracer. +type Tracer struct{ embedded.Tracer } + +// Start creates a span. The created span will be set in a child context of ctx +// and returned with the span. +// +// If ctx contains a span context, the returned span will also contain that +// span context. If the span context in ctx is for a non-recording span, that +// span instance will be returned directly. +func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { + span := trace.SpanFromContext(ctx) + + // If the parent context contains a non-zero span context, that span + // context needs to be returned as a non-recording span + // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk). + var zeroSC trace.SpanContext + if sc := span.SpanContext(); !sc.Equal(zeroSC) { + if !span.IsRecording() { + // If the span is not recording return it directly. + return ctx, span + } + // Otherwise, return the span context needs in a non-recording span. + span = Span{sc: sc} + } else { + // No parent, return a No-Op span with an empty span context. + span = noopSpanInstance + } + return trace.ContextWithSpan(ctx, span), span +} + +var noopSpanInstance trace.Span = Span{} + +// Span is an OpenTelemetry No-Op Span. +type Span struct { + embedded.Span + + sc trace.SpanContext +} + +// SpanContext returns an empty span context. +func (s Span) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (Span) IsRecording() bool { return false } + +// SetStatus does nothing. +func (Span) SetStatus(codes.Code, string) {} + +// SetAttributes does nothing. +func (Span) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (Span) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (Span) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (Span) AddEvent(string, ...trace.EventOption) {} + +// AddLink does nothing. +func (Span) AddLink(trace.Link) {} + +// SetName does nothing. +func (Span) SetName(string) {} + +// TracerProvider returns a No-Op TracerProvider. +func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 000000000..ef85cb70c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 000000000..d3aa476ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go new file mode 100644 index 000000000..d49adf671 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -0,0 +1,323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "bytes" + "encoding/hex" + "encoding/json" +) + +const ( + // FlagsSampled is a bitmask with the sampled bit set. A SpanContext + // with the sampling bit set means the span is sampled. + FlagsSampled = TraceFlags(0x01) + + errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" + + errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" + errNilTraceID errorConst = "trace-id can't be all zero" + + errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" + errNilSpanID errorConst = "span-id can't be all zero" +) + +type errorConst string + +func (e errorConst) Error() string { + return string(e) +} + +// TraceID is a unique identity of a trace. +// nolint:revive // revive complains about stutter of `trace.TraceID`. +type TraceID [16]byte + +var ( + nilTraceID TraceID + _ json.Marshaler = nilTraceID +) + +// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// not consist of zeros only. +func (t TraceID) IsValid() bool { + return !bytes.Equal(t[:], nilTraceID[:]) +} + +// MarshalJSON implements a custom marshal function to encode TraceID +// as a hex string. +func (t TraceID) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String returns the hex string representation form of a TraceID. +func (t TraceID) String() string { + return hex.EncodeToString(t[:]) +} + +// SpanID is a unique identity of a span in a trace. +type SpanID [8]byte + +var ( + nilSpanID SpanID + _ json.Marshaler = nilSpanID +) + +// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// of zeros only. +func (s SpanID) IsValid() bool { + return !bytes.Equal(s[:], nilSpanID[:]) +} + +// MarshalJSON implements a custom marshal function to encode SpanID +// as a hex string. +func (s SpanID) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// String returns the hex string representation form of a SpanID. +func (s SpanID) String() string { + return hex.EncodeToString(s[:]) +} + +// TraceIDFromHex returns a TraceID from a hex string if it is compliant with +// the W3C trace-context specification. See more at +// https://www.w3.org/TR/trace-context/#trace-id +// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. +func TraceIDFromHex(h string) (TraceID, error) { + t := TraceID{} + if len(h) != 32 { + return t, errInvalidTraceIDLength + } + + if err := decodeHex(h, t[:]); err != nil { + return t, err + } + + if !t.IsValid() { + return t, errNilTraceID + } + return t, nil +} + +// SpanIDFromHex returns a SpanID from a hex string if it is compliant +// with the w3c trace-context specification. +// See more at https://www.w3.org/TR/trace-context/#parent-id +func SpanIDFromHex(h string) (SpanID, error) { + s := SpanID{} + if len(h) != 16 { + return s, errInvalidSpanIDLength + } + + if err := decodeHex(h, s[:]); err != nil { + return s, err + } + + if !s.IsValid() { + return s, errNilSpanID + } + return s, nil +} + +func decodeHex(h string, b []byte) error { + for _, r := range h { + switch { + case 'a' <= r && r <= 'f': + continue + case '0' <= r && r <= '9': + continue + default: + return errInvalidHexID + } + } + + decoded, err := hex.DecodeString(h) + if err != nil { + return err + } + + copy(b, decoded) + return nil +} + +// TraceFlags contains flags that can be set on a SpanContext. +type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. + +// IsSampled returns if the sampling bit is set in the TraceFlags. +func (tf TraceFlags) IsSampled() bool { + return tf&FlagsSampled == FlagsSampled +} + +// WithSampled sets the sampling bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. + if sampled { + return tf | FlagsSampled + } + + return tf &^ FlagsSampled +} + +// MarshalJSON implements a custom marshal function to encode TraceFlags +// as a hex string. +func (tf TraceFlags) MarshalJSON() ([]byte, error) { + return json.Marshal(tf.String()) +} + +// String returns the hex string representation form of TraceFlags. +func (tf TraceFlags) String() string { + return hex.EncodeToString([]byte{byte(tf)}[:]) +} + +// SpanContextConfig contains mutable fields usable for constructing +// an immutable SpanContext. +type SpanContextConfig struct { + TraceID TraceID + SpanID SpanID + TraceFlags TraceFlags + TraceState TraceState + Remote bool +} + +// NewSpanContext constructs a SpanContext using values from the provided +// SpanContextConfig. +func NewSpanContext(config SpanContextConfig) SpanContext { + return SpanContext{ + traceID: config.TraceID, + spanID: config.SpanID, + traceFlags: config.TraceFlags, + traceState: config.TraceState, + remote: config.Remote, + } +} + +// SpanContext contains identifying trace information about a Span. +type SpanContext struct { + traceID TraceID + spanID SpanID + traceFlags TraceFlags + traceState TraceState + remote bool +} + +var _ json.Marshaler = SpanContext{} + +// IsValid returns if the SpanContext is valid. A valid span context has a +// valid TraceID and SpanID. +func (sc SpanContext) IsValid() bool { + return sc.HasTraceID() && sc.HasSpanID() +} + +// IsRemote indicates whether the SpanContext represents a remotely-created Span. +func (sc SpanContext) IsRemote() bool { + return sc.remote +} + +// WithRemote returns a copy of sc with the Remote property set to remote. +func (sc SpanContext) WithRemote(remote bool) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: remote, + } +} + +// TraceID returns the TraceID from the SpanContext. +func (sc SpanContext) TraceID() TraceID { + return sc.traceID +} + +// HasTraceID checks if the SpanContext has a valid TraceID. +func (sc SpanContext) HasTraceID() bool { + return sc.traceID.IsValid() +} + +// WithTraceID returns a new SpanContext with the TraceID replaced. +func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { + return SpanContext{ + traceID: traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// SpanID returns the SpanID from the SpanContext. +func (sc SpanContext) SpanID() SpanID { + return sc.spanID +} + +// HasSpanID checks if the SpanContext has a valid SpanID. +func (sc SpanContext) HasSpanID() bool { + return sc.spanID.IsValid() +} + +// WithSpanID returns a new SpanContext with the SpanID replaced. +func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceFlags returns the flags from the SpanContext. +func (sc SpanContext) TraceFlags() TraceFlags { + return sc.traceFlags +} + +// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsSampled() bool { + return sc.traceFlags.IsSampled() +} + +// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. +func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: flags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceState returns the TraceState from the SpanContext. +func (sc SpanContext) TraceState() TraceState { + return sc.traceState +} + +// WithTraceState returns a new SpanContext with the TraceState replaced. +func (sc SpanContext) WithTraceState(state TraceState) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: state, + remote: sc.remote, + } +} + +// Equal is a predicate that determines whether two SpanContext values are equal. +func (sc SpanContext) Equal(other SpanContext) bool { + return sc.traceID == other.traceID && + sc.spanID == other.spanID && + sc.traceFlags == other.traceFlags && + sc.traceState.String() == other.traceState.String() && + sc.remote == other.remote +} + +// MarshalJSON implements a custom marshal function to encode a SpanContext. +func (sc SpanContext) MarshalJSON() ([]byte, error) { + return json.Marshal(SpanContextConfig{ + TraceID: sc.traceID, + SpanID: sc.spanID, + TraceFlags: sc.traceFlags, + TraceState: sc.traceState, + Remote: sc.remote, + }) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 000000000..77952d2a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go new file mode 100644 index 000000000..dc5e34cad --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -0,0 +1,330 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "encoding/json" + "fmt" + "strings" +) + +const ( + maxListMembers = 32 + + listDelimiters = "," + memberDelimiter = "=" + + errInvalidKey errorConst = "invalid tracestate key" + errInvalidValue errorConst = "invalid tracestate value" + errInvalidMember errorConst = "invalid tracestate list-member" + errMemberNumber errorConst = "too many list-members in tracestate" + errDuplicate errorConst = "duplicate list-member in tracestate" +) + +type member struct { + Key string + Value string +} + +// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) ) +// means (chr = %x20-2B / %x2D-3C / %x3E-7E) . +func checkValueChar(v byte) bool { + return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) . +func checkValueLast(v byte) bool { + return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// based on the W3C Trace Context specification +// +// value = (0*255(chr)) nblk-chr +// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E +// chr = %x20 / nblk-chr +// +// see https://www.w3.org/TR/trace-context-1/#value +func checkValue(val string) bool { + n := len(val) + if n == 0 || n > 256 { + return false + } + for i := 0; i < n-1; i++ { + if !checkValueChar(val[i]) { + return false + } + } + return checkValueLast(val[n-1]) +} + +func checkKeyRemain(key string) bool { + // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) + for _, v := range key { + if isAlphaNum(byte(v)) { + continue + } + switch v { + case '_', '-', '*', '/': + continue + } + return false + } + return true +} + +// according to +// +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// +// param n is remain part length, should be 255 in simple-key or 13 in system-id. +func checkKeyPart(key string, n int) bool { + if len(key) == 0 { + return false + } + first := key[0] // key's first char + ret := len(key[1:]) <= n + ret = ret && first >= 'a' && first <= 'z' + return ret && checkKeyRemain(key[1:]) +} + +func isAlphaNum(c byte) bool { + if c >= 'a' && c <= 'z' { + return true + } + return c >= '0' && c <= '9' +} + +// according to +// +// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) +// +// param n is remain part length, should be 240 exactly. +func checkKeyTenant(key string, n int) bool { + if len(key) == 0 { + return false + } + return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) +} + +// based on the W3C Trace Context specification +// +// key = simple-key / multi-tenant-key +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// multi-tenant-key = tenant-id "@" system-id +// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// lcalpha = %x61-7A ; a-z +// +// see https://www.w3.org/TR/trace-context-1/#tracestate-header. +func checkKey(key string) bool { + tenant, system, ok := strings.Cut(key, "@") + if !ok { + return checkKeyPart(key, 255) + } + return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13) +} + +func newMember(key, value string) (member, error) { + if !checkKey(key) { + return member{}, errInvalidKey + } + if !checkValue(value) { + return member{}, errInvalidValue + } + return member{Key: key, Value: value}, nil +} + +func parseMember(m string) (member, error) { + key, val, ok := strings.Cut(m, memberDelimiter) + if !ok { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + key = strings.TrimLeft(key, " \t") + val = strings.TrimRight(val, " \t") + result, e := newMember(key, val) + if e != nil { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + return result, nil +} + +// String encodes member into a string compliant with the W3C Trace Context +// specification. +func (m member) String() string { + return m.Key + "=" + m.Value +} + +// TraceState provides additional vendor-specific trace identification +// information across different distributed tracing systems. It represents an +// immutable list consisting of key/value pairs, each pair is referred to as a +// list-member. +// +// TraceState conforms to the W3C Trace Context specification +// (https://www.w3.org/TR/trace-context-1). All operations that create or copy +// a TraceState do so by validating all input and will only produce TraceState +// that conform to the specification. Specifically, this means that all +// list-member's key/value pairs are valid, no duplicate list-members exist, +// and the maximum number of list-members (32) is not exceeded. +type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` + // list is the members in order. + list []member +} + +var _ json.Marshaler = TraceState{} + +// ParseTraceState attempts to decode a TraceState from the passed +// string. It returns an error if the input is invalid according to the W3C +// Trace Context specification. +func ParseTraceState(ts string) (TraceState, error) { + if ts == "" { + return TraceState{}, nil + } + + wrapErr := func(err error) error { + return fmt.Errorf("failed to parse tracestate: %w", err) + } + + var members []member + found := make(map[string]struct{}) + for ts != "" { + var memberStr string + memberStr, ts, _ = strings.Cut(ts, listDelimiters) + if len(memberStr) == 0 { + continue + } + + m, err := parseMember(memberStr) + if err != nil { + return TraceState{}, wrapErr(err) + } + + if _, ok := found[m.Key]; ok { + return TraceState{}, wrapErr(errDuplicate) + } + found[m.Key] = struct{}{} + + members = append(members, m) + if n := len(members); n > maxListMembers { + return TraceState{}, wrapErr(errMemberNumber) + } + } + + return TraceState{list: members}, nil +} + +// MarshalJSON marshals the TraceState into JSON. +func (ts TraceState) MarshalJSON() ([]byte, error) { + return json.Marshal(ts.String()) +} + +// String encodes the TraceState into a string compliant with the W3C +// Trace Context specification. The returned string will be invalid if the +// TraceState contains any invalid members. +func (ts TraceState) String() string { + if len(ts.list) == 0 { + return "" + } + var n int + n += len(ts.list) // member delimiters: '=' + n += len(ts.list) - 1 // list delimiters: ',' + for _, mem := range ts.list { + n += len(mem.Key) + n += len(mem.Value) + } + + var sb strings.Builder + sb.Grow(n) + _, _ = sb.WriteString(ts.list[0].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[0].Value) + for i := 1; i < len(ts.list); i++ { + _ = sb.WriteByte(listDelimiters[0]) + _, _ = sb.WriteString(ts.list[i].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[i].Value) + } + return sb.String() +} + +// Get returns the value paired with key from the corresponding TraceState +// list-member if it exists, otherwise an empty string is returned. +func (ts TraceState) Get(key string) string { + for _, member := range ts.list { + if member.Key == key { + return member.Value + } + } + + return "" +} + +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + +// Insert adds a new list-member defined by the key/value pair to the +// TraceState. If a list-member already exists for the given key, that +// list-member's value is updated. The new or updated list-member is always +// moved to the beginning of the TraceState as specified by the W3C Trace +// Context specification. +// +// If key or value are invalid according to the W3C Trace Context +// specification an error is returned with the original TraceState. +// +// If adding a new list-member means the TraceState would have more members +// then is allowed, the new list-member will be inserted and the right-most +// list-member will be dropped in the returned TraceState. +func (ts TraceState) Insert(key, value string) (TraceState, error) { + m, err := newMember(key, value) + if err != nil { + return ts, err + } + n := len(ts.list) + found := n + for i := range ts.list { + if ts.list[i].Key == key { + found = i + } + } + cTS := TraceState{} + if found == n && n < maxListMembers { + cTS.list = make([]member, n+1) + } else { + cTS.list = make([]member, n) + } + cTS.list[0] = m + // When the number of members exceeds capacity, drop the "right-most". + copy(cTS.list[1:], ts.list[0:found]) + if found < n { + copy(cTS.list[1+found:], ts.list[found+1:]) + } + return cTS, nil +} + +// Delete returns a copy of the TraceState with the list-member identified by +// key removed. +func (ts TraceState) Delete(key string) TraceState { + members := make([]member, ts.Len()) + copy(members, ts.list) + for i, member := range ts.list { + if member.Key == key { + members = append(members[:i], members[i+1:]...) + // TraceState should contain no duplicate members. + break + } + } + return TraceState{list: members} +} + +// Len returns the number of list-members in the TraceState. +func (ts TraceState) Len() int { + return len(ts.list) +} diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh new file mode 100644 index 000000000..1e87855ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_readmes.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) + +missingReadme=false +for dir in $dirs; do + if [ ! -f "$dir/README.md" ]; then + echo "couldn't find README.md for $dir" + missingReadme=true + fi +done + +if [ "$missingReadme" = true ] ; then + echo "Error: some READMEs couldn't be found." + exit 1 +fi diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 000000000..c9b7cdbbf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go new file mode 100644 index 000000000..d5fa71f67 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +// Version is the current release version of OpenTelemetry in use. +func Version() string { + return "1.35.0" +} diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml new file mode 100644 index 000000000..2b4cb4b41 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -0,0 +1,43 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +module-sets: + stable-v1: + version: v1.35.0 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/bridge/opentracing/test + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk + - go.opentelemetry.io/otel/sdk/metric + - go.opentelemetry.io/otel/trace + experimental-metrics: + version: v0.57.0 + modules: + - go.opentelemetry.io/otel/exporters/prometheus + experimental-logs: + version: v0.11.0 + modules: + - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutlog + experimental-schema: + version: v0.0.12 + modules: + - go.opentelemetry.io/otel/schema +excluded-modules: + - go.opentelemetry.io/otel/internal/tools + - go.opentelemetry.io/otel/trace/internal/telemetry/test diff --git a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/appbinding_types.go b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/appbinding_types.go index 61a4c764c..92a5d6fb6 100644 --- a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/appbinding_types.go +++ b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/appbinding_types.go @@ -68,7 +68,7 @@ type AppBindingSpec struct { // Secret is the name of the secret to create in the AppBinding's // namespace that will hold the credentials associated with the AppBinding. - Secret *core.LocalObjectReference `json:"secret,omitempty"` + Secret *TypedLocalObjectReference `json:"secret,omitempty"` // List of transformations that should be applied to the credentials // associated with the ServiceBinding before they are inserted into the Secret. @@ -90,7 +90,7 @@ type AppBindingSpec struct { // TLSSecret is the name of the secret that will hold // the client certificate and private key associated with the AppBinding. - TLSSecret *core.LocalObjectReference `json:"tlsSecret,omitempty"` + TLSSecret *TypedLocalObjectReference `json:"tlsSecret,omitempty"` } type AppType string @@ -175,6 +175,21 @@ type ServiceReference struct { Query string `json:"query,omitempty"` } +// +structType=atomic +type TypedLocalObjectReference struct { + // APIGroup is the group for the resource being referenced. + // If APIGroup is not specified, the specified Kind must be in the core API group. + // For any other third-party types, APIGroup is required. + // +optional + // +kubebuilder:default="" + APIGroup string `json:"apiGroup"` + // Kind is the type of resource being referenced + // +kubebuilder:default="Secret" + Kind string `json:"kind"` + // Name is the name of resource being referenced + Name string `json:"name"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // AppBindingList is a list of Apps diff --git a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/openapi_generated.go b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/openapi_generated.go index f486bbe29..535c3de3f 100644 --- a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/openapi_generated.go +++ b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/openapi_generated.go @@ -36,361 +36,362 @@ import ( func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ - "k8s.io/api/apps/v1.ControllerRevision": schema_k8sio_api_apps_v1_ControllerRevision(ref), - "k8s.io/api/apps/v1.ControllerRevisionList": schema_k8sio_api_apps_v1_ControllerRevisionList(ref), - "k8s.io/api/apps/v1.DaemonSet": schema_k8sio_api_apps_v1_DaemonSet(ref), - "k8s.io/api/apps/v1.DaemonSetCondition": schema_k8sio_api_apps_v1_DaemonSetCondition(ref), - "k8s.io/api/apps/v1.DaemonSetList": schema_k8sio_api_apps_v1_DaemonSetList(ref), - "k8s.io/api/apps/v1.DaemonSetSpec": schema_k8sio_api_apps_v1_DaemonSetSpec(ref), - "k8s.io/api/apps/v1.DaemonSetStatus": schema_k8sio_api_apps_v1_DaemonSetStatus(ref), - "k8s.io/api/apps/v1.DaemonSetUpdateStrategy": schema_k8sio_api_apps_v1_DaemonSetUpdateStrategy(ref), - "k8s.io/api/apps/v1.Deployment": schema_k8sio_api_apps_v1_Deployment(ref), - "k8s.io/api/apps/v1.DeploymentCondition": schema_k8sio_api_apps_v1_DeploymentCondition(ref), - "k8s.io/api/apps/v1.DeploymentList": schema_k8sio_api_apps_v1_DeploymentList(ref), - "k8s.io/api/apps/v1.DeploymentSpec": schema_k8sio_api_apps_v1_DeploymentSpec(ref), - "k8s.io/api/apps/v1.DeploymentStatus": schema_k8sio_api_apps_v1_DeploymentStatus(ref), - "k8s.io/api/apps/v1.DeploymentStrategy": schema_k8sio_api_apps_v1_DeploymentStrategy(ref), - "k8s.io/api/apps/v1.ReplicaSet": schema_k8sio_api_apps_v1_ReplicaSet(ref), - "k8s.io/api/apps/v1.ReplicaSetCondition": schema_k8sio_api_apps_v1_ReplicaSetCondition(ref), - "k8s.io/api/apps/v1.ReplicaSetList": schema_k8sio_api_apps_v1_ReplicaSetList(ref), - "k8s.io/api/apps/v1.ReplicaSetSpec": schema_k8sio_api_apps_v1_ReplicaSetSpec(ref), - "k8s.io/api/apps/v1.ReplicaSetStatus": schema_k8sio_api_apps_v1_ReplicaSetStatus(ref), - "k8s.io/api/apps/v1.RollingUpdateDaemonSet": schema_k8sio_api_apps_v1_RollingUpdateDaemonSet(ref), - "k8s.io/api/apps/v1.RollingUpdateDeployment": schema_k8sio_api_apps_v1_RollingUpdateDeployment(ref), - "k8s.io/api/apps/v1.RollingUpdateStatefulSetStrategy": schema_k8sio_api_apps_v1_RollingUpdateStatefulSetStrategy(ref), - "k8s.io/api/apps/v1.StatefulSet": schema_k8sio_api_apps_v1_StatefulSet(ref), - "k8s.io/api/apps/v1.StatefulSetCondition": schema_k8sio_api_apps_v1_StatefulSetCondition(ref), - "k8s.io/api/apps/v1.StatefulSetList": schema_k8sio_api_apps_v1_StatefulSetList(ref), - "k8s.io/api/apps/v1.StatefulSetOrdinals": schema_k8sio_api_apps_v1_StatefulSetOrdinals(ref), - "k8s.io/api/apps/v1.StatefulSetPersistentVolumeClaimRetentionPolicy": schema_k8sio_api_apps_v1_StatefulSetPersistentVolumeClaimRetentionPolicy(ref), - "k8s.io/api/apps/v1.StatefulSetSpec": schema_k8sio_api_apps_v1_StatefulSetSpec(ref), - "k8s.io/api/apps/v1.StatefulSetStatus": schema_k8sio_api_apps_v1_StatefulSetStatus(ref), - "k8s.io/api/apps/v1.StatefulSetUpdateStrategy": schema_k8sio_api_apps_v1_StatefulSetUpdateStrategy(ref), - "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource": schema_k8sio_api_core_v1_AWSElasticBlockStoreVolumeSource(ref), - "k8s.io/api/core/v1.Affinity": schema_k8sio_api_core_v1_Affinity(ref), - "k8s.io/api/core/v1.AppArmorProfile": schema_k8sio_api_core_v1_AppArmorProfile(ref), - "k8s.io/api/core/v1.AttachedVolume": schema_k8sio_api_core_v1_AttachedVolume(ref), - "k8s.io/api/core/v1.AvoidPods": schema_k8sio_api_core_v1_AvoidPods(ref), - "k8s.io/api/core/v1.AzureDiskVolumeSource": schema_k8sio_api_core_v1_AzureDiskVolumeSource(ref), - "k8s.io/api/core/v1.AzureFilePersistentVolumeSource": schema_k8sio_api_core_v1_AzureFilePersistentVolumeSource(ref), - "k8s.io/api/core/v1.AzureFileVolumeSource": schema_k8sio_api_core_v1_AzureFileVolumeSource(ref), - "k8s.io/api/core/v1.Binding": schema_k8sio_api_core_v1_Binding(ref), - "k8s.io/api/core/v1.CSIPersistentVolumeSource": schema_k8sio_api_core_v1_CSIPersistentVolumeSource(ref), - "k8s.io/api/core/v1.CSIVolumeSource": schema_k8sio_api_core_v1_CSIVolumeSource(ref), - "k8s.io/api/core/v1.Capabilities": schema_k8sio_api_core_v1_Capabilities(ref), - "k8s.io/api/core/v1.CephFSPersistentVolumeSource": schema_k8sio_api_core_v1_CephFSPersistentVolumeSource(ref), - "k8s.io/api/core/v1.CephFSVolumeSource": schema_k8sio_api_core_v1_CephFSVolumeSource(ref), - "k8s.io/api/core/v1.CinderPersistentVolumeSource": schema_k8sio_api_core_v1_CinderPersistentVolumeSource(ref), - "k8s.io/api/core/v1.CinderVolumeSource": schema_k8sio_api_core_v1_CinderVolumeSource(ref), - "k8s.io/api/core/v1.ClientIPConfig": schema_k8sio_api_core_v1_ClientIPConfig(ref), - "k8s.io/api/core/v1.ClusterTrustBundleProjection": schema_k8sio_api_core_v1_ClusterTrustBundleProjection(ref), - "k8s.io/api/core/v1.ComponentCondition": schema_k8sio_api_core_v1_ComponentCondition(ref), - "k8s.io/api/core/v1.ComponentStatus": schema_k8sio_api_core_v1_ComponentStatus(ref), - "k8s.io/api/core/v1.ComponentStatusList": schema_k8sio_api_core_v1_ComponentStatusList(ref), - "k8s.io/api/core/v1.ConfigMap": schema_k8sio_api_core_v1_ConfigMap(ref), - "k8s.io/api/core/v1.ConfigMapEnvSource": schema_k8sio_api_core_v1_ConfigMapEnvSource(ref), - "k8s.io/api/core/v1.ConfigMapKeySelector": schema_k8sio_api_core_v1_ConfigMapKeySelector(ref), - "k8s.io/api/core/v1.ConfigMapList": schema_k8sio_api_core_v1_ConfigMapList(ref), - "k8s.io/api/core/v1.ConfigMapNodeConfigSource": schema_k8sio_api_core_v1_ConfigMapNodeConfigSource(ref), - "k8s.io/api/core/v1.ConfigMapProjection": schema_k8sio_api_core_v1_ConfigMapProjection(ref), - "k8s.io/api/core/v1.ConfigMapVolumeSource": schema_k8sio_api_core_v1_ConfigMapVolumeSource(ref), - "k8s.io/api/core/v1.Container": schema_k8sio_api_core_v1_Container(ref), - "k8s.io/api/core/v1.ContainerImage": schema_k8sio_api_core_v1_ContainerImage(ref), - "k8s.io/api/core/v1.ContainerPort": schema_k8sio_api_core_v1_ContainerPort(ref), - "k8s.io/api/core/v1.ContainerResizePolicy": schema_k8sio_api_core_v1_ContainerResizePolicy(ref), - "k8s.io/api/core/v1.ContainerState": schema_k8sio_api_core_v1_ContainerState(ref), - "k8s.io/api/core/v1.ContainerStateRunning": schema_k8sio_api_core_v1_ContainerStateRunning(ref), - "k8s.io/api/core/v1.ContainerStateTerminated": schema_k8sio_api_core_v1_ContainerStateTerminated(ref), - "k8s.io/api/core/v1.ContainerStateWaiting": schema_k8sio_api_core_v1_ContainerStateWaiting(ref), - "k8s.io/api/core/v1.ContainerStatus": schema_k8sio_api_core_v1_ContainerStatus(ref), - "k8s.io/api/core/v1.ContainerUser": schema_k8sio_api_core_v1_ContainerUser(ref), - "k8s.io/api/core/v1.DaemonEndpoint": schema_k8sio_api_core_v1_DaemonEndpoint(ref), - "k8s.io/api/core/v1.DownwardAPIProjection": schema_k8sio_api_core_v1_DownwardAPIProjection(ref), - "k8s.io/api/core/v1.DownwardAPIVolumeFile": schema_k8sio_api_core_v1_DownwardAPIVolumeFile(ref), - "k8s.io/api/core/v1.DownwardAPIVolumeSource": schema_k8sio_api_core_v1_DownwardAPIVolumeSource(ref), - "k8s.io/api/core/v1.EmptyDirVolumeSource": schema_k8sio_api_core_v1_EmptyDirVolumeSource(ref), - "k8s.io/api/core/v1.EndpointAddress": schema_k8sio_api_core_v1_EndpointAddress(ref), - "k8s.io/api/core/v1.EndpointPort": schema_k8sio_api_core_v1_EndpointPort(ref), - "k8s.io/api/core/v1.EndpointSubset": schema_k8sio_api_core_v1_EndpointSubset(ref), - "k8s.io/api/core/v1.Endpoints": schema_k8sio_api_core_v1_Endpoints(ref), - "k8s.io/api/core/v1.EndpointsList": schema_k8sio_api_core_v1_EndpointsList(ref), - "k8s.io/api/core/v1.EnvFromSource": schema_k8sio_api_core_v1_EnvFromSource(ref), - "k8s.io/api/core/v1.EnvVar": schema_k8sio_api_core_v1_EnvVar(ref), - "k8s.io/api/core/v1.EnvVarSource": schema_k8sio_api_core_v1_EnvVarSource(ref), - "k8s.io/api/core/v1.EphemeralContainer": schema_k8sio_api_core_v1_EphemeralContainer(ref), - "k8s.io/api/core/v1.EphemeralContainerCommon": schema_k8sio_api_core_v1_EphemeralContainerCommon(ref), - "k8s.io/api/core/v1.EphemeralVolumeSource": schema_k8sio_api_core_v1_EphemeralVolumeSource(ref), - "k8s.io/api/core/v1.Event": schema_k8sio_api_core_v1_Event(ref), - "k8s.io/api/core/v1.EventList": schema_k8sio_api_core_v1_EventList(ref), - "k8s.io/api/core/v1.EventSeries": schema_k8sio_api_core_v1_EventSeries(ref), - "k8s.io/api/core/v1.EventSource": schema_k8sio_api_core_v1_EventSource(ref), - "k8s.io/api/core/v1.ExecAction": schema_k8sio_api_core_v1_ExecAction(ref), - "k8s.io/api/core/v1.FCVolumeSource": schema_k8sio_api_core_v1_FCVolumeSource(ref), - "k8s.io/api/core/v1.FlexPersistentVolumeSource": schema_k8sio_api_core_v1_FlexPersistentVolumeSource(ref), - "k8s.io/api/core/v1.FlexVolumeSource": schema_k8sio_api_core_v1_FlexVolumeSource(ref), - "k8s.io/api/core/v1.FlockerVolumeSource": schema_k8sio_api_core_v1_FlockerVolumeSource(ref), - "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource": schema_k8sio_api_core_v1_GCEPersistentDiskVolumeSource(ref), - "k8s.io/api/core/v1.GRPCAction": schema_k8sio_api_core_v1_GRPCAction(ref), - "k8s.io/api/core/v1.GitRepoVolumeSource": schema_k8sio_api_core_v1_GitRepoVolumeSource(ref), - "k8s.io/api/core/v1.GlusterfsPersistentVolumeSource": schema_k8sio_api_core_v1_GlusterfsPersistentVolumeSource(ref), - "k8s.io/api/core/v1.GlusterfsVolumeSource": schema_k8sio_api_core_v1_GlusterfsVolumeSource(ref), - "k8s.io/api/core/v1.HTTPGetAction": schema_k8sio_api_core_v1_HTTPGetAction(ref), - "k8s.io/api/core/v1.HTTPHeader": schema_k8sio_api_core_v1_HTTPHeader(ref), - "k8s.io/api/core/v1.HostAlias": schema_k8sio_api_core_v1_HostAlias(ref), - "k8s.io/api/core/v1.HostIP": schema_k8sio_api_core_v1_HostIP(ref), - "k8s.io/api/core/v1.HostPathVolumeSource": schema_k8sio_api_core_v1_HostPathVolumeSource(ref), - "k8s.io/api/core/v1.ISCSIPersistentVolumeSource": schema_k8sio_api_core_v1_ISCSIPersistentVolumeSource(ref), - "k8s.io/api/core/v1.ISCSIVolumeSource": schema_k8sio_api_core_v1_ISCSIVolumeSource(ref), - "k8s.io/api/core/v1.ImageVolumeSource": schema_k8sio_api_core_v1_ImageVolumeSource(ref), - "k8s.io/api/core/v1.KeyToPath": schema_k8sio_api_core_v1_KeyToPath(ref), - "k8s.io/api/core/v1.Lifecycle": schema_k8sio_api_core_v1_Lifecycle(ref), - "k8s.io/api/core/v1.LifecycleHandler": schema_k8sio_api_core_v1_LifecycleHandler(ref), - "k8s.io/api/core/v1.LimitRange": schema_k8sio_api_core_v1_LimitRange(ref), - "k8s.io/api/core/v1.LimitRangeItem": schema_k8sio_api_core_v1_LimitRangeItem(ref), - "k8s.io/api/core/v1.LimitRangeList": schema_k8sio_api_core_v1_LimitRangeList(ref), - "k8s.io/api/core/v1.LimitRangeSpec": schema_k8sio_api_core_v1_LimitRangeSpec(ref), - "k8s.io/api/core/v1.LinuxContainerUser": schema_k8sio_api_core_v1_LinuxContainerUser(ref), - "k8s.io/api/core/v1.List": schema_k8sio_api_core_v1_List(ref), - "k8s.io/api/core/v1.LoadBalancerIngress": schema_k8sio_api_core_v1_LoadBalancerIngress(ref), - "k8s.io/api/core/v1.LoadBalancerStatus": schema_k8sio_api_core_v1_LoadBalancerStatus(ref), - "k8s.io/api/core/v1.LocalObjectReference": schema_k8sio_api_core_v1_LocalObjectReference(ref), - "k8s.io/api/core/v1.LocalVolumeSource": schema_k8sio_api_core_v1_LocalVolumeSource(ref), - "k8s.io/api/core/v1.ModifyVolumeStatus": schema_k8sio_api_core_v1_ModifyVolumeStatus(ref), - "k8s.io/api/core/v1.NFSVolumeSource": schema_k8sio_api_core_v1_NFSVolumeSource(ref), - "k8s.io/api/core/v1.Namespace": schema_k8sio_api_core_v1_Namespace(ref), - "k8s.io/api/core/v1.NamespaceCondition": schema_k8sio_api_core_v1_NamespaceCondition(ref), - "k8s.io/api/core/v1.NamespaceList": schema_k8sio_api_core_v1_NamespaceList(ref), - "k8s.io/api/core/v1.NamespaceSpec": schema_k8sio_api_core_v1_NamespaceSpec(ref), - "k8s.io/api/core/v1.NamespaceStatus": schema_k8sio_api_core_v1_NamespaceStatus(ref), - "k8s.io/api/core/v1.Node": schema_k8sio_api_core_v1_Node(ref), - "k8s.io/api/core/v1.NodeAddress": schema_k8sio_api_core_v1_NodeAddress(ref), - "k8s.io/api/core/v1.NodeAffinity": schema_k8sio_api_core_v1_NodeAffinity(ref), - "k8s.io/api/core/v1.NodeCondition": schema_k8sio_api_core_v1_NodeCondition(ref), - "k8s.io/api/core/v1.NodeConfigSource": schema_k8sio_api_core_v1_NodeConfigSource(ref), - "k8s.io/api/core/v1.NodeConfigStatus": schema_k8sio_api_core_v1_NodeConfigStatus(ref), - "k8s.io/api/core/v1.NodeDaemonEndpoints": schema_k8sio_api_core_v1_NodeDaemonEndpoints(ref), - "k8s.io/api/core/v1.NodeFeatures": schema_k8sio_api_core_v1_NodeFeatures(ref), - "k8s.io/api/core/v1.NodeList": schema_k8sio_api_core_v1_NodeList(ref), - "k8s.io/api/core/v1.NodeProxyOptions": schema_k8sio_api_core_v1_NodeProxyOptions(ref), - "k8s.io/api/core/v1.NodeRuntimeHandler": schema_k8sio_api_core_v1_NodeRuntimeHandler(ref), - "k8s.io/api/core/v1.NodeRuntimeHandlerFeatures": schema_k8sio_api_core_v1_NodeRuntimeHandlerFeatures(ref), - "k8s.io/api/core/v1.NodeSelector": schema_k8sio_api_core_v1_NodeSelector(ref), - "k8s.io/api/core/v1.NodeSelectorRequirement": schema_k8sio_api_core_v1_NodeSelectorRequirement(ref), - "k8s.io/api/core/v1.NodeSelectorTerm": schema_k8sio_api_core_v1_NodeSelectorTerm(ref), - "k8s.io/api/core/v1.NodeSpec": schema_k8sio_api_core_v1_NodeSpec(ref), - "k8s.io/api/core/v1.NodeStatus": schema_k8sio_api_core_v1_NodeStatus(ref), - "k8s.io/api/core/v1.NodeSystemInfo": schema_k8sio_api_core_v1_NodeSystemInfo(ref), - "k8s.io/api/core/v1.ObjectFieldSelector": schema_k8sio_api_core_v1_ObjectFieldSelector(ref), - "k8s.io/api/core/v1.ObjectReference": schema_k8sio_api_core_v1_ObjectReference(ref), - "k8s.io/api/core/v1.PersistentVolume": schema_k8sio_api_core_v1_PersistentVolume(ref), - "k8s.io/api/core/v1.PersistentVolumeClaim": schema_k8sio_api_core_v1_PersistentVolumeClaim(ref), - "k8s.io/api/core/v1.PersistentVolumeClaimCondition": schema_k8sio_api_core_v1_PersistentVolumeClaimCondition(ref), - "k8s.io/api/core/v1.PersistentVolumeClaimList": schema_k8sio_api_core_v1_PersistentVolumeClaimList(ref), - "k8s.io/api/core/v1.PersistentVolumeClaimSpec": schema_k8sio_api_core_v1_PersistentVolumeClaimSpec(ref), - "k8s.io/api/core/v1.PersistentVolumeClaimStatus": schema_k8sio_api_core_v1_PersistentVolumeClaimStatus(ref), - "k8s.io/api/core/v1.PersistentVolumeClaimTemplate": schema_k8sio_api_core_v1_PersistentVolumeClaimTemplate(ref), - "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource": schema_k8sio_api_core_v1_PersistentVolumeClaimVolumeSource(ref), - "k8s.io/api/core/v1.PersistentVolumeList": schema_k8sio_api_core_v1_PersistentVolumeList(ref), - "k8s.io/api/core/v1.PersistentVolumeSource": schema_k8sio_api_core_v1_PersistentVolumeSource(ref), - "k8s.io/api/core/v1.PersistentVolumeSpec": schema_k8sio_api_core_v1_PersistentVolumeSpec(ref), - "k8s.io/api/core/v1.PersistentVolumeStatus": schema_k8sio_api_core_v1_PersistentVolumeStatus(ref), - "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource": schema_k8sio_api_core_v1_PhotonPersistentDiskVolumeSource(ref), - "k8s.io/api/core/v1.Pod": schema_k8sio_api_core_v1_Pod(ref), - "k8s.io/api/core/v1.PodAffinity": schema_k8sio_api_core_v1_PodAffinity(ref), - "k8s.io/api/core/v1.PodAffinityTerm": schema_k8sio_api_core_v1_PodAffinityTerm(ref), - "k8s.io/api/core/v1.PodAntiAffinity": schema_k8sio_api_core_v1_PodAntiAffinity(ref), - "k8s.io/api/core/v1.PodAttachOptions": schema_k8sio_api_core_v1_PodAttachOptions(ref), - "k8s.io/api/core/v1.PodCondition": schema_k8sio_api_core_v1_PodCondition(ref), - "k8s.io/api/core/v1.PodDNSConfig": schema_k8sio_api_core_v1_PodDNSConfig(ref), - "k8s.io/api/core/v1.PodDNSConfigOption": schema_k8sio_api_core_v1_PodDNSConfigOption(ref), - "k8s.io/api/core/v1.PodExecOptions": schema_k8sio_api_core_v1_PodExecOptions(ref), - "k8s.io/api/core/v1.PodIP": schema_k8sio_api_core_v1_PodIP(ref), - "k8s.io/api/core/v1.PodList": schema_k8sio_api_core_v1_PodList(ref), - "k8s.io/api/core/v1.PodLogOptions": schema_k8sio_api_core_v1_PodLogOptions(ref), - "k8s.io/api/core/v1.PodOS": schema_k8sio_api_core_v1_PodOS(ref), - "k8s.io/api/core/v1.PodPortForwardOptions": schema_k8sio_api_core_v1_PodPortForwardOptions(ref), - "k8s.io/api/core/v1.PodProxyOptions": schema_k8sio_api_core_v1_PodProxyOptions(ref), - "k8s.io/api/core/v1.PodReadinessGate": schema_k8sio_api_core_v1_PodReadinessGate(ref), - "k8s.io/api/core/v1.PodResourceClaim": schema_k8sio_api_core_v1_PodResourceClaim(ref), - "k8s.io/api/core/v1.PodResourceClaimStatus": schema_k8sio_api_core_v1_PodResourceClaimStatus(ref), - "k8s.io/api/core/v1.PodSchedulingGate": schema_k8sio_api_core_v1_PodSchedulingGate(ref), - "k8s.io/api/core/v1.PodSecurityContext": schema_k8sio_api_core_v1_PodSecurityContext(ref), - "k8s.io/api/core/v1.PodSignature": schema_k8sio_api_core_v1_PodSignature(ref), - "k8s.io/api/core/v1.PodSpec": schema_k8sio_api_core_v1_PodSpec(ref), - "k8s.io/api/core/v1.PodStatus": schema_k8sio_api_core_v1_PodStatus(ref), - "k8s.io/api/core/v1.PodStatusResult": schema_k8sio_api_core_v1_PodStatusResult(ref), - "k8s.io/api/core/v1.PodTemplate": schema_k8sio_api_core_v1_PodTemplate(ref), - "k8s.io/api/core/v1.PodTemplateList": schema_k8sio_api_core_v1_PodTemplateList(ref), - "k8s.io/api/core/v1.PodTemplateSpec": schema_k8sio_api_core_v1_PodTemplateSpec(ref), - "k8s.io/api/core/v1.PortStatus": schema_k8sio_api_core_v1_PortStatus(ref), - "k8s.io/api/core/v1.PortworxVolumeSource": schema_k8sio_api_core_v1_PortworxVolumeSource(ref), - "k8s.io/api/core/v1.PreferAvoidPodsEntry": schema_k8sio_api_core_v1_PreferAvoidPodsEntry(ref), - "k8s.io/api/core/v1.PreferredSchedulingTerm": schema_k8sio_api_core_v1_PreferredSchedulingTerm(ref), - "k8s.io/api/core/v1.Probe": schema_k8sio_api_core_v1_Probe(ref), - "k8s.io/api/core/v1.ProbeHandler": schema_k8sio_api_core_v1_ProbeHandler(ref), - "k8s.io/api/core/v1.ProjectedVolumeSource": schema_k8sio_api_core_v1_ProjectedVolumeSource(ref), - "k8s.io/api/core/v1.QuobyteVolumeSource": schema_k8sio_api_core_v1_QuobyteVolumeSource(ref), - "k8s.io/api/core/v1.RBDPersistentVolumeSource": schema_k8sio_api_core_v1_RBDPersistentVolumeSource(ref), - "k8s.io/api/core/v1.RBDVolumeSource": schema_k8sio_api_core_v1_RBDVolumeSource(ref), - "k8s.io/api/core/v1.RangeAllocation": schema_k8sio_api_core_v1_RangeAllocation(ref), - "k8s.io/api/core/v1.ReplicationController": schema_k8sio_api_core_v1_ReplicationController(ref), - "k8s.io/api/core/v1.ReplicationControllerCondition": schema_k8sio_api_core_v1_ReplicationControllerCondition(ref), - "k8s.io/api/core/v1.ReplicationControllerList": schema_k8sio_api_core_v1_ReplicationControllerList(ref), - "k8s.io/api/core/v1.ReplicationControllerSpec": schema_k8sio_api_core_v1_ReplicationControllerSpec(ref), - "k8s.io/api/core/v1.ReplicationControllerStatus": schema_k8sio_api_core_v1_ReplicationControllerStatus(ref), - "k8s.io/api/core/v1.ResourceClaim": schema_k8sio_api_core_v1_ResourceClaim(ref), - "k8s.io/api/core/v1.ResourceFieldSelector": schema_k8sio_api_core_v1_ResourceFieldSelector(ref), - "k8s.io/api/core/v1.ResourceHealth": schema_k8sio_api_core_v1_ResourceHealth(ref), - "k8s.io/api/core/v1.ResourceQuota": schema_k8sio_api_core_v1_ResourceQuota(ref), - "k8s.io/api/core/v1.ResourceQuotaList": schema_k8sio_api_core_v1_ResourceQuotaList(ref), - "k8s.io/api/core/v1.ResourceQuotaSpec": schema_k8sio_api_core_v1_ResourceQuotaSpec(ref), - "k8s.io/api/core/v1.ResourceQuotaStatus": schema_k8sio_api_core_v1_ResourceQuotaStatus(ref), - "k8s.io/api/core/v1.ResourceRequirements": schema_k8sio_api_core_v1_ResourceRequirements(ref), - "k8s.io/api/core/v1.ResourceStatus": schema_k8sio_api_core_v1_ResourceStatus(ref), - "k8s.io/api/core/v1.SELinuxOptions": schema_k8sio_api_core_v1_SELinuxOptions(ref), - "k8s.io/api/core/v1.ScaleIOPersistentVolumeSource": schema_k8sio_api_core_v1_ScaleIOPersistentVolumeSource(ref), - "k8s.io/api/core/v1.ScaleIOVolumeSource": schema_k8sio_api_core_v1_ScaleIOVolumeSource(ref), - "k8s.io/api/core/v1.ScopeSelector": schema_k8sio_api_core_v1_ScopeSelector(ref), - "k8s.io/api/core/v1.ScopedResourceSelectorRequirement": schema_k8sio_api_core_v1_ScopedResourceSelectorRequirement(ref), - "k8s.io/api/core/v1.SeccompProfile": schema_k8sio_api_core_v1_SeccompProfile(ref), - "k8s.io/api/core/v1.Secret": schema_k8sio_api_core_v1_Secret(ref), - "k8s.io/api/core/v1.SecretEnvSource": schema_k8sio_api_core_v1_SecretEnvSource(ref), - "k8s.io/api/core/v1.SecretKeySelector": schema_k8sio_api_core_v1_SecretKeySelector(ref), - "k8s.io/api/core/v1.SecretList": schema_k8sio_api_core_v1_SecretList(ref), - "k8s.io/api/core/v1.SecretProjection": schema_k8sio_api_core_v1_SecretProjection(ref), - "k8s.io/api/core/v1.SecretReference": schema_k8sio_api_core_v1_SecretReference(ref), - "k8s.io/api/core/v1.SecretVolumeSource": schema_k8sio_api_core_v1_SecretVolumeSource(ref), - "k8s.io/api/core/v1.SecurityContext": schema_k8sio_api_core_v1_SecurityContext(ref), - "k8s.io/api/core/v1.SerializedReference": schema_k8sio_api_core_v1_SerializedReference(ref), - "k8s.io/api/core/v1.Service": schema_k8sio_api_core_v1_Service(ref), - "k8s.io/api/core/v1.ServiceAccount": schema_k8sio_api_core_v1_ServiceAccount(ref), - "k8s.io/api/core/v1.ServiceAccountList": schema_k8sio_api_core_v1_ServiceAccountList(ref), - "k8s.io/api/core/v1.ServiceAccountTokenProjection": schema_k8sio_api_core_v1_ServiceAccountTokenProjection(ref), - "k8s.io/api/core/v1.ServiceList": schema_k8sio_api_core_v1_ServiceList(ref), - "k8s.io/api/core/v1.ServicePort": schema_k8sio_api_core_v1_ServicePort(ref), - "k8s.io/api/core/v1.ServiceProxyOptions": schema_k8sio_api_core_v1_ServiceProxyOptions(ref), - "k8s.io/api/core/v1.ServiceSpec": schema_k8sio_api_core_v1_ServiceSpec(ref), - "k8s.io/api/core/v1.ServiceStatus": schema_k8sio_api_core_v1_ServiceStatus(ref), - "k8s.io/api/core/v1.SessionAffinityConfig": schema_k8sio_api_core_v1_SessionAffinityConfig(ref), - "k8s.io/api/core/v1.SleepAction": schema_k8sio_api_core_v1_SleepAction(ref), - "k8s.io/api/core/v1.StorageOSPersistentVolumeSource": schema_k8sio_api_core_v1_StorageOSPersistentVolumeSource(ref), - "k8s.io/api/core/v1.StorageOSVolumeSource": schema_k8sio_api_core_v1_StorageOSVolumeSource(ref), - "k8s.io/api/core/v1.Sysctl": schema_k8sio_api_core_v1_Sysctl(ref), - "k8s.io/api/core/v1.TCPSocketAction": schema_k8sio_api_core_v1_TCPSocketAction(ref), - "k8s.io/api/core/v1.Taint": schema_k8sio_api_core_v1_Taint(ref), - "k8s.io/api/core/v1.Toleration": schema_k8sio_api_core_v1_Toleration(ref), - "k8s.io/api/core/v1.TopologySelectorLabelRequirement": schema_k8sio_api_core_v1_TopologySelectorLabelRequirement(ref), - "k8s.io/api/core/v1.TopologySelectorTerm": schema_k8sio_api_core_v1_TopologySelectorTerm(ref), - "k8s.io/api/core/v1.TopologySpreadConstraint": schema_k8sio_api_core_v1_TopologySpreadConstraint(ref), - "k8s.io/api/core/v1.TypedLocalObjectReference": schema_k8sio_api_core_v1_TypedLocalObjectReference(ref), - "k8s.io/api/core/v1.TypedObjectReference": schema_k8sio_api_core_v1_TypedObjectReference(ref), - "k8s.io/api/core/v1.Volume": schema_k8sio_api_core_v1_Volume(ref), - "k8s.io/api/core/v1.VolumeDevice": schema_k8sio_api_core_v1_VolumeDevice(ref), - "k8s.io/api/core/v1.VolumeMount": schema_k8sio_api_core_v1_VolumeMount(ref), - "k8s.io/api/core/v1.VolumeMountStatus": schema_k8sio_api_core_v1_VolumeMountStatus(ref), - "k8s.io/api/core/v1.VolumeNodeAffinity": schema_k8sio_api_core_v1_VolumeNodeAffinity(ref), - "k8s.io/api/core/v1.VolumeProjection": schema_k8sio_api_core_v1_VolumeProjection(ref), - "k8s.io/api/core/v1.VolumeResourceRequirements": schema_k8sio_api_core_v1_VolumeResourceRequirements(ref), - "k8s.io/api/core/v1.VolumeSource": schema_k8sio_api_core_v1_VolumeSource(ref), - "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource": schema_k8sio_api_core_v1_VsphereVirtualDiskVolumeSource(ref), - "k8s.io/api/core/v1.WeightedPodAffinityTerm": schema_k8sio_api_core_v1_WeightedPodAffinityTerm(ref), - "k8s.io/api/core/v1.WindowsSecurityContextOptions": schema_k8sio_api_core_v1_WindowsSecurityContextOptions(ref), - "k8s.io/apimachinery/pkg/api/resource.Quantity": schema_apimachinery_pkg_api_resource_Quantity(ref), - "k8s.io/apimachinery/pkg/api/resource.int64Amount": schema_apimachinery_pkg_api_resource_int64Amount(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup": schema_pkg_apis_meta_v1_APIGroup(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList": schema_pkg_apis_meta_v1_APIGroupList(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.APIResource": schema_pkg_apis_meta_v1_APIResource(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.APIResourceList": schema_pkg_apis_meta_v1_APIResourceList(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.APIVersions": schema_pkg_apis_meta_v1_APIVersions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ApplyOptions": schema_pkg_apis_meta_v1_ApplyOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Condition": schema_pkg_apis_meta_v1_Condition(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.CreateOptions": schema_pkg_apis_meta_v1_CreateOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.DeleteOptions": schema_pkg_apis_meta_v1_DeleteOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration": schema_pkg_apis_meta_v1_Duration(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.FieldSelectorRequirement": schema_pkg_apis_meta_v1_FieldSelectorRequirement(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1": schema_pkg_apis_meta_v1_FieldsV1(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GetOptions": schema_pkg_apis_meta_v1_GetOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupKind": schema_pkg_apis_meta_v1_GroupKind(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupResource": schema_pkg_apis_meta_v1_GroupResource(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersion": schema_pkg_apis_meta_v1_GroupVersion(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery": schema_pkg_apis_meta_v1_GroupVersionForDiscovery(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionKind": schema_pkg_apis_meta_v1_GroupVersionKind(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionResource": schema_pkg_apis_meta_v1_GroupVersionResource(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.InternalEvent": schema_pkg_apis_meta_v1_InternalEvent(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector": schema_pkg_apis_meta_v1_LabelSelector(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement": schema_pkg_apis_meta_v1_LabelSelectorRequirement(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.List": schema_pkg_apis_meta_v1_List(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta": schema_pkg_apis_meta_v1_ListMeta(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ListOptions": schema_pkg_apis_meta_v1_ListOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry": schema_pkg_apis_meta_v1_ManagedFieldsEntry(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime": schema_pkg_apis_meta_v1_MicroTime(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta": schema_pkg_apis_meta_v1_ObjectMeta(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference": schema_pkg_apis_meta_v1_OwnerReference(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata": schema_pkg_apis_meta_v1_PartialObjectMetadata(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadataList": schema_pkg_apis_meta_v1_PartialObjectMetadataList(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Patch": schema_pkg_apis_meta_v1_Patch(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.PatchOptions": schema_pkg_apis_meta_v1_PatchOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions": schema_pkg_apis_meta_v1_Preconditions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.RootPaths": schema_pkg_apis_meta_v1_RootPaths(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR": schema_pkg_apis_meta_v1_ServerAddressByClientCIDR(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Status": schema_pkg_apis_meta_v1_Status(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause": schema_pkg_apis_meta_v1_StatusCause(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails": schema_pkg_apis_meta_v1_StatusDetails(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Table": schema_pkg_apis_meta_v1_Table(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition": schema_pkg_apis_meta_v1_TableColumnDefinition(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.TableOptions": schema_pkg_apis_meta_v1_TableOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.TableRow": schema_pkg_apis_meta_v1_TableRow(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition": schema_pkg_apis_meta_v1_TableRowCondition(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Time": schema_pkg_apis_meta_v1_Time(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Timestamp": schema_pkg_apis_meta_v1_Timestamp(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta": schema_pkg_apis_meta_v1_TypeMeta(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.UpdateOptions": schema_pkg_apis_meta_v1_UpdateOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.WatchEvent": schema_pkg_apis_meta_v1_WatchEvent(ref), - "k8s.io/apimachinery/pkg/runtime.RawExtension": schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref), - "k8s.io/apimachinery/pkg/runtime.TypeMeta": schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref), - "k8s.io/apimachinery/pkg/runtime.Unknown": schema_k8sio_apimachinery_pkg_runtime_Unknown(ref), - "k8s.io/apimachinery/pkg/util/intstr.IntOrString": schema_apimachinery_pkg_util_intstr_IntOrString(ref), - "k8s.io/apimachinery/pkg/version.Info": schema_k8sio_apimachinery_pkg_version_Info(ref), - "kmodules.xyz/client-go/api/v1.CAPIClusterInfo": schema_kmodulesxyz_client_go_api_v1_CAPIClusterInfo(ref), - "kmodules.xyz/client-go/api/v1.CertificatePrivateKey": schema_kmodulesxyz_client_go_api_v1_CertificatePrivateKey(ref), - "kmodules.xyz/client-go/api/v1.CertificateSpec": schema_kmodulesxyz_client_go_api_v1_CertificateSpec(ref), - "kmodules.xyz/client-go/api/v1.ClusterClaimFeatures": schema_kmodulesxyz_client_go_api_v1_ClusterClaimFeatures(ref), - "kmodules.xyz/client-go/api/v1.ClusterClaimInfo": schema_kmodulesxyz_client_go_api_v1_ClusterClaimInfo(ref), - "kmodules.xyz/client-go/api/v1.ClusterInfo": schema_kmodulesxyz_client_go_api_v1_ClusterInfo(ref), - "kmodules.xyz/client-go/api/v1.ClusterMetadata": schema_kmodulesxyz_client_go_api_v1_ClusterMetadata(ref), - "kmodules.xyz/client-go/api/v1.Condition": schema_kmodulesxyz_client_go_api_v1_Condition(ref), - "kmodules.xyz/client-go/api/v1.HealthCheckSpec": schema_kmodulesxyz_client_go_api_v1_HealthCheckSpec(ref), - "kmodules.xyz/client-go/api/v1.ImageInfo": schema_kmodulesxyz_client_go_api_v1_ImageInfo(ref), - "kmodules.xyz/client-go/api/v1.Lineage": schema_kmodulesxyz_client_go_api_v1_Lineage(ref), - "kmodules.xyz/client-go/api/v1.ObjectID": schema_kmodulesxyz_client_go_api_v1_ObjectID(ref), - "kmodules.xyz/client-go/api/v1.ObjectInfo": schema_kmodulesxyz_client_go_api_v1_ObjectInfo(ref), - "kmodules.xyz/client-go/api/v1.ObjectReference": schema_kmodulesxyz_client_go_api_v1_ObjectReference(ref), - "kmodules.xyz/client-go/api/v1.PullCredentials": schema_kmodulesxyz_client_go_api_v1_PullCredentials(ref), - "kmodules.xyz/client-go/api/v1.ReadonlyHealthCheckSpec": schema_kmodulesxyz_client_go_api_v1_ReadonlyHealthCheckSpec(ref), - "kmodules.xyz/client-go/api/v1.ResourceID": schema_kmodulesxyz_client_go_api_v1_ResourceID(ref), - "kmodules.xyz/client-go/api/v1.TLSConfig": schema_kmodulesxyz_client_go_api_v1_TLSConfig(ref), - "kmodules.xyz/client-go/api/v1.TimeOfDay": schema_kmodulesxyz_client_go_api_v1_TimeOfDay(ref), - "kmodules.xyz/client-go/api/v1.TypeReference": schema_kmodulesxyz_client_go_api_v1_TypeReference(ref), - "kmodules.xyz/client-go/api/v1.TypedObjectReference": schema_kmodulesxyz_client_go_api_v1_TypedObjectReference(ref), - "kmodules.xyz/client-go/api/v1.X509Subject": schema_kmodulesxyz_client_go_api_v1_X509Subject(ref), - "kmodules.xyz/client-go/api/v1.stringSetMerger": schema_kmodulesxyz_client_go_api_v1_stringSetMerger(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AddKeyTransform": schema_custom_resources_apis_appcatalog_v1alpha1_AddKeyTransform(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AddKeysFromTransform": schema_custom_resources_apis_appcatalog_v1alpha1_AddKeysFromTransform(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBinding": schema_custom_resources_apis_appcatalog_v1alpha1_AppBinding(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBindingList": schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingList(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBindingSpec": schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppReference": schema_custom_resources_apis_appcatalog_v1alpha1_AppReference(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig": schema_custom_resources_apis_appcatalog_v1alpha1_ClientConfig(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_ObjectReference(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.Param": schema_custom_resources_apis_appcatalog_v1alpha1_Param(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.RemoveKeyTransform": schema_custom_resources_apis_appcatalog_v1alpha1_RemoveKeyTransform(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.RenameKeyTransform": schema_custom_resources_apis_appcatalog_v1alpha1_RenameKeyTransform(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform": schema_custom_resources_apis_appcatalog_v1alpha1_SecretTransform(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ServiceReference": schema_custom_resources_apis_appcatalog_v1alpha1_ServiceReference(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddon": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddon(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), - "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "k8s.io/api/apps/v1.ControllerRevision": schema_k8sio_api_apps_v1_ControllerRevision(ref), + "k8s.io/api/apps/v1.ControllerRevisionList": schema_k8sio_api_apps_v1_ControllerRevisionList(ref), + "k8s.io/api/apps/v1.DaemonSet": schema_k8sio_api_apps_v1_DaemonSet(ref), + "k8s.io/api/apps/v1.DaemonSetCondition": schema_k8sio_api_apps_v1_DaemonSetCondition(ref), + "k8s.io/api/apps/v1.DaemonSetList": schema_k8sio_api_apps_v1_DaemonSetList(ref), + "k8s.io/api/apps/v1.DaemonSetSpec": schema_k8sio_api_apps_v1_DaemonSetSpec(ref), + "k8s.io/api/apps/v1.DaemonSetStatus": schema_k8sio_api_apps_v1_DaemonSetStatus(ref), + "k8s.io/api/apps/v1.DaemonSetUpdateStrategy": schema_k8sio_api_apps_v1_DaemonSetUpdateStrategy(ref), + "k8s.io/api/apps/v1.Deployment": schema_k8sio_api_apps_v1_Deployment(ref), + "k8s.io/api/apps/v1.DeploymentCondition": schema_k8sio_api_apps_v1_DeploymentCondition(ref), + "k8s.io/api/apps/v1.DeploymentList": schema_k8sio_api_apps_v1_DeploymentList(ref), + "k8s.io/api/apps/v1.DeploymentSpec": schema_k8sio_api_apps_v1_DeploymentSpec(ref), + "k8s.io/api/apps/v1.DeploymentStatus": schema_k8sio_api_apps_v1_DeploymentStatus(ref), + "k8s.io/api/apps/v1.DeploymentStrategy": schema_k8sio_api_apps_v1_DeploymentStrategy(ref), + "k8s.io/api/apps/v1.ReplicaSet": schema_k8sio_api_apps_v1_ReplicaSet(ref), + "k8s.io/api/apps/v1.ReplicaSetCondition": schema_k8sio_api_apps_v1_ReplicaSetCondition(ref), + "k8s.io/api/apps/v1.ReplicaSetList": schema_k8sio_api_apps_v1_ReplicaSetList(ref), + "k8s.io/api/apps/v1.ReplicaSetSpec": schema_k8sio_api_apps_v1_ReplicaSetSpec(ref), + "k8s.io/api/apps/v1.ReplicaSetStatus": schema_k8sio_api_apps_v1_ReplicaSetStatus(ref), + "k8s.io/api/apps/v1.RollingUpdateDaemonSet": schema_k8sio_api_apps_v1_RollingUpdateDaemonSet(ref), + "k8s.io/api/apps/v1.RollingUpdateDeployment": schema_k8sio_api_apps_v1_RollingUpdateDeployment(ref), + "k8s.io/api/apps/v1.RollingUpdateStatefulSetStrategy": schema_k8sio_api_apps_v1_RollingUpdateStatefulSetStrategy(ref), + "k8s.io/api/apps/v1.StatefulSet": schema_k8sio_api_apps_v1_StatefulSet(ref), + "k8s.io/api/apps/v1.StatefulSetCondition": schema_k8sio_api_apps_v1_StatefulSetCondition(ref), + "k8s.io/api/apps/v1.StatefulSetList": schema_k8sio_api_apps_v1_StatefulSetList(ref), + "k8s.io/api/apps/v1.StatefulSetOrdinals": schema_k8sio_api_apps_v1_StatefulSetOrdinals(ref), + "k8s.io/api/apps/v1.StatefulSetPersistentVolumeClaimRetentionPolicy": schema_k8sio_api_apps_v1_StatefulSetPersistentVolumeClaimRetentionPolicy(ref), + "k8s.io/api/apps/v1.StatefulSetSpec": schema_k8sio_api_apps_v1_StatefulSetSpec(ref), + "k8s.io/api/apps/v1.StatefulSetStatus": schema_k8sio_api_apps_v1_StatefulSetStatus(ref), + "k8s.io/api/apps/v1.StatefulSetUpdateStrategy": schema_k8sio_api_apps_v1_StatefulSetUpdateStrategy(ref), + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource": schema_k8sio_api_core_v1_AWSElasticBlockStoreVolumeSource(ref), + "k8s.io/api/core/v1.Affinity": schema_k8sio_api_core_v1_Affinity(ref), + "k8s.io/api/core/v1.AppArmorProfile": schema_k8sio_api_core_v1_AppArmorProfile(ref), + "k8s.io/api/core/v1.AttachedVolume": schema_k8sio_api_core_v1_AttachedVolume(ref), + "k8s.io/api/core/v1.AvoidPods": schema_k8sio_api_core_v1_AvoidPods(ref), + "k8s.io/api/core/v1.AzureDiskVolumeSource": schema_k8sio_api_core_v1_AzureDiskVolumeSource(ref), + "k8s.io/api/core/v1.AzureFilePersistentVolumeSource": schema_k8sio_api_core_v1_AzureFilePersistentVolumeSource(ref), + "k8s.io/api/core/v1.AzureFileVolumeSource": schema_k8sio_api_core_v1_AzureFileVolumeSource(ref), + "k8s.io/api/core/v1.Binding": schema_k8sio_api_core_v1_Binding(ref), + "k8s.io/api/core/v1.CSIPersistentVolumeSource": schema_k8sio_api_core_v1_CSIPersistentVolumeSource(ref), + "k8s.io/api/core/v1.CSIVolumeSource": schema_k8sio_api_core_v1_CSIVolumeSource(ref), + "k8s.io/api/core/v1.Capabilities": schema_k8sio_api_core_v1_Capabilities(ref), + "k8s.io/api/core/v1.CephFSPersistentVolumeSource": schema_k8sio_api_core_v1_CephFSPersistentVolumeSource(ref), + "k8s.io/api/core/v1.CephFSVolumeSource": schema_k8sio_api_core_v1_CephFSVolumeSource(ref), + "k8s.io/api/core/v1.CinderPersistentVolumeSource": schema_k8sio_api_core_v1_CinderPersistentVolumeSource(ref), + "k8s.io/api/core/v1.CinderVolumeSource": schema_k8sio_api_core_v1_CinderVolumeSource(ref), + "k8s.io/api/core/v1.ClientIPConfig": schema_k8sio_api_core_v1_ClientIPConfig(ref), + "k8s.io/api/core/v1.ClusterTrustBundleProjection": schema_k8sio_api_core_v1_ClusterTrustBundleProjection(ref), + "k8s.io/api/core/v1.ComponentCondition": schema_k8sio_api_core_v1_ComponentCondition(ref), + "k8s.io/api/core/v1.ComponentStatus": schema_k8sio_api_core_v1_ComponentStatus(ref), + "k8s.io/api/core/v1.ComponentStatusList": schema_k8sio_api_core_v1_ComponentStatusList(ref), + "k8s.io/api/core/v1.ConfigMap": schema_k8sio_api_core_v1_ConfigMap(ref), + "k8s.io/api/core/v1.ConfigMapEnvSource": schema_k8sio_api_core_v1_ConfigMapEnvSource(ref), + "k8s.io/api/core/v1.ConfigMapKeySelector": schema_k8sio_api_core_v1_ConfigMapKeySelector(ref), + "k8s.io/api/core/v1.ConfigMapList": schema_k8sio_api_core_v1_ConfigMapList(ref), + "k8s.io/api/core/v1.ConfigMapNodeConfigSource": schema_k8sio_api_core_v1_ConfigMapNodeConfigSource(ref), + "k8s.io/api/core/v1.ConfigMapProjection": schema_k8sio_api_core_v1_ConfigMapProjection(ref), + "k8s.io/api/core/v1.ConfigMapVolumeSource": schema_k8sio_api_core_v1_ConfigMapVolumeSource(ref), + "k8s.io/api/core/v1.Container": schema_k8sio_api_core_v1_Container(ref), + "k8s.io/api/core/v1.ContainerImage": schema_k8sio_api_core_v1_ContainerImage(ref), + "k8s.io/api/core/v1.ContainerPort": schema_k8sio_api_core_v1_ContainerPort(ref), + "k8s.io/api/core/v1.ContainerResizePolicy": schema_k8sio_api_core_v1_ContainerResizePolicy(ref), + "k8s.io/api/core/v1.ContainerState": schema_k8sio_api_core_v1_ContainerState(ref), + "k8s.io/api/core/v1.ContainerStateRunning": schema_k8sio_api_core_v1_ContainerStateRunning(ref), + "k8s.io/api/core/v1.ContainerStateTerminated": schema_k8sio_api_core_v1_ContainerStateTerminated(ref), + "k8s.io/api/core/v1.ContainerStateWaiting": schema_k8sio_api_core_v1_ContainerStateWaiting(ref), + "k8s.io/api/core/v1.ContainerStatus": schema_k8sio_api_core_v1_ContainerStatus(ref), + "k8s.io/api/core/v1.ContainerUser": schema_k8sio_api_core_v1_ContainerUser(ref), + "k8s.io/api/core/v1.DaemonEndpoint": schema_k8sio_api_core_v1_DaemonEndpoint(ref), + "k8s.io/api/core/v1.DownwardAPIProjection": schema_k8sio_api_core_v1_DownwardAPIProjection(ref), + "k8s.io/api/core/v1.DownwardAPIVolumeFile": schema_k8sio_api_core_v1_DownwardAPIVolumeFile(ref), + "k8s.io/api/core/v1.DownwardAPIVolumeSource": schema_k8sio_api_core_v1_DownwardAPIVolumeSource(ref), + "k8s.io/api/core/v1.EmptyDirVolumeSource": schema_k8sio_api_core_v1_EmptyDirVolumeSource(ref), + "k8s.io/api/core/v1.EndpointAddress": schema_k8sio_api_core_v1_EndpointAddress(ref), + "k8s.io/api/core/v1.EndpointPort": schema_k8sio_api_core_v1_EndpointPort(ref), + "k8s.io/api/core/v1.EndpointSubset": schema_k8sio_api_core_v1_EndpointSubset(ref), + "k8s.io/api/core/v1.Endpoints": schema_k8sio_api_core_v1_Endpoints(ref), + "k8s.io/api/core/v1.EndpointsList": schema_k8sio_api_core_v1_EndpointsList(ref), + "k8s.io/api/core/v1.EnvFromSource": schema_k8sio_api_core_v1_EnvFromSource(ref), + "k8s.io/api/core/v1.EnvVar": schema_k8sio_api_core_v1_EnvVar(ref), + "k8s.io/api/core/v1.EnvVarSource": schema_k8sio_api_core_v1_EnvVarSource(ref), + "k8s.io/api/core/v1.EphemeralContainer": schema_k8sio_api_core_v1_EphemeralContainer(ref), + "k8s.io/api/core/v1.EphemeralContainerCommon": schema_k8sio_api_core_v1_EphemeralContainerCommon(ref), + "k8s.io/api/core/v1.EphemeralVolumeSource": schema_k8sio_api_core_v1_EphemeralVolumeSource(ref), + "k8s.io/api/core/v1.Event": schema_k8sio_api_core_v1_Event(ref), + "k8s.io/api/core/v1.EventList": schema_k8sio_api_core_v1_EventList(ref), + "k8s.io/api/core/v1.EventSeries": schema_k8sio_api_core_v1_EventSeries(ref), + "k8s.io/api/core/v1.EventSource": schema_k8sio_api_core_v1_EventSource(ref), + "k8s.io/api/core/v1.ExecAction": schema_k8sio_api_core_v1_ExecAction(ref), + "k8s.io/api/core/v1.FCVolumeSource": schema_k8sio_api_core_v1_FCVolumeSource(ref), + "k8s.io/api/core/v1.FlexPersistentVolumeSource": schema_k8sio_api_core_v1_FlexPersistentVolumeSource(ref), + "k8s.io/api/core/v1.FlexVolumeSource": schema_k8sio_api_core_v1_FlexVolumeSource(ref), + "k8s.io/api/core/v1.FlockerVolumeSource": schema_k8sio_api_core_v1_FlockerVolumeSource(ref), + "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource": schema_k8sio_api_core_v1_GCEPersistentDiskVolumeSource(ref), + "k8s.io/api/core/v1.GRPCAction": schema_k8sio_api_core_v1_GRPCAction(ref), + "k8s.io/api/core/v1.GitRepoVolumeSource": schema_k8sio_api_core_v1_GitRepoVolumeSource(ref), + "k8s.io/api/core/v1.GlusterfsPersistentVolumeSource": schema_k8sio_api_core_v1_GlusterfsPersistentVolumeSource(ref), + "k8s.io/api/core/v1.GlusterfsVolumeSource": schema_k8sio_api_core_v1_GlusterfsVolumeSource(ref), + "k8s.io/api/core/v1.HTTPGetAction": schema_k8sio_api_core_v1_HTTPGetAction(ref), + "k8s.io/api/core/v1.HTTPHeader": schema_k8sio_api_core_v1_HTTPHeader(ref), + "k8s.io/api/core/v1.HostAlias": schema_k8sio_api_core_v1_HostAlias(ref), + "k8s.io/api/core/v1.HostIP": schema_k8sio_api_core_v1_HostIP(ref), + "k8s.io/api/core/v1.HostPathVolumeSource": schema_k8sio_api_core_v1_HostPathVolumeSource(ref), + "k8s.io/api/core/v1.ISCSIPersistentVolumeSource": schema_k8sio_api_core_v1_ISCSIPersistentVolumeSource(ref), + "k8s.io/api/core/v1.ISCSIVolumeSource": schema_k8sio_api_core_v1_ISCSIVolumeSource(ref), + "k8s.io/api/core/v1.ImageVolumeSource": schema_k8sio_api_core_v1_ImageVolumeSource(ref), + "k8s.io/api/core/v1.KeyToPath": schema_k8sio_api_core_v1_KeyToPath(ref), + "k8s.io/api/core/v1.Lifecycle": schema_k8sio_api_core_v1_Lifecycle(ref), + "k8s.io/api/core/v1.LifecycleHandler": schema_k8sio_api_core_v1_LifecycleHandler(ref), + "k8s.io/api/core/v1.LimitRange": schema_k8sio_api_core_v1_LimitRange(ref), + "k8s.io/api/core/v1.LimitRangeItem": schema_k8sio_api_core_v1_LimitRangeItem(ref), + "k8s.io/api/core/v1.LimitRangeList": schema_k8sio_api_core_v1_LimitRangeList(ref), + "k8s.io/api/core/v1.LimitRangeSpec": schema_k8sio_api_core_v1_LimitRangeSpec(ref), + "k8s.io/api/core/v1.LinuxContainerUser": schema_k8sio_api_core_v1_LinuxContainerUser(ref), + "k8s.io/api/core/v1.List": schema_k8sio_api_core_v1_List(ref), + "k8s.io/api/core/v1.LoadBalancerIngress": schema_k8sio_api_core_v1_LoadBalancerIngress(ref), + "k8s.io/api/core/v1.LoadBalancerStatus": schema_k8sio_api_core_v1_LoadBalancerStatus(ref), + "k8s.io/api/core/v1.LocalObjectReference": schema_k8sio_api_core_v1_LocalObjectReference(ref), + "k8s.io/api/core/v1.LocalVolumeSource": schema_k8sio_api_core_v1_LocalVolumeSource(ref), + "k8s.io/api/core/v1.ModifyVolumeStatus": schema_k8sio_api_core_v1_ModifyVolumeStatus(ref), + "k8s.io/api/core/v1.NFSVolumeSource": schema_k8sio_api_core_v1_NFSVolumeSource(ref), + "k8s.io/api/core/v1.Namespace": schema_k8sio_api_core_v1_Namespace(ref), + "k8s.io/api/core/v1.NamespaceCondition": schema_k8sio_api_core_v1_NamespaceCondition(ref), + "k8s.io/api/core/v1.NamespaceList": schema_k8sio_api_core_v1_NamespaceList(ref), + "k8s.io/api/core/v1.NamespaceSpec": schema_k8sio_api_core_v1_NamespaceSpec(ref), + "k8s.io/api/core/v1.NamespaceStatus": schema_k8sio_api_core_v1_NamespaceStatus(ref), + "k8s.io/api/core/v1.Node": schema_k8sio_api_core_v1_Node(ref), + "k8s.io/api/core/v1.NodeAddress": schema_k8sio_api_core_v1_NodeAddress(ref), + "k8s.io/api/core/v1.NodeAffinity": schema_k8sio_api_core_v1_NodeAffinity(ref), + "k8s.io/api/core/v1.NodeCondition": schema_k8sio_api_core_v1_NodeCondition(ref), + "k8s.io/api/core/v1.NodeConfigSource": schema_k8sio_api_core_v1_NodeConfigSource(ref), + "k8s.io/api/core/v1.NodeConfigStatus": schema_k8sio_api_core_v1_NodeConfigStatus(ref), + "k8s.io/api/core/v1.NodeDaemonEndpoints": schema_k8sio_api_core_v1_NodeDaemonEndpoints(ref), + "k8s.io/api/core/v1.NodeFeatures": schema_k8sio_api_core_v1_NodeFeatures(ref), + "k8s.io/api/core/v1.NodeList": schema_k8sio_api_core_v1_NodeList(ref), + "k8s.io/api/core/v1.NodeProxyOptions": schema_k8sio_api_core_v1_NodeProxyOptions(ref), + "k8s.io/api/core/v1.NodeRuntimeHandler": schema_k8sio_api_core_v1_NodeRuntimeHandler(ref), + "k8s.io/api/core/v1.NodeRuntimeHandlerFeatures": schema_k8sio_api_core_v1_NodeRuntimeHandlerFeatures(ref), + "k8s.io/api/core/v1.NodeSelector": schema_k8sio_api_core_v1_NodeSelector(ref), + "k8s.io/api/core/v1.NodeSelectorRequirement": schema_k8sio_api_core_v1_NodeSelectorRequirement(ref), + "k8s.io/api/core/v1.NodeSelectorTerm": schema_k8sio_api_core_v1_NodeSelectorTerm(ref), + "k8s.io/api/core/v1.NodeSpec": schema_k8sio_api_core_v1_NodeSpec(ref), + "k8s.io/api/core/v1.NodeStatus": schema_k8sio_api_core_v1_NodeStatus(ref), + "k8s.io/api/core/v1.NodeSystemInfo": schema_k8sio_api_core_v1_NodeSystemInfo(ref), + "k8s.io/api/core/v1.ObjectFieldSelector": schema_k8sio_api_core_v1_ObjectFieldSelector(ref), + "k8s.io/api/core/v1.ObjectReference": schema_k8sio_api_core_v1_ObjectReference(ref), + "k8s.io/api/core/v1.PersistentVolume": schema_k8sio_api_core_v1_PersistentVolume(ref), + "k8s.io/api/core/v1.PersistentVolumeClaim": schema_k8sio_api_core_v1_PersistentVolumeClaim(ref), + "k8s.io/api/core/v1.PersistentVolumeClaimCondition": schema_k8sio_api_core_v1_PersistentVolumeClaimCondition(ref), + "k8s.io/api/core/v1.PersistentVolumeClaimList": schema_k8sio_api_core_v1_PersistentVolumeClaimList(ref), + "k8s.io/api/core/v1.PersistentVolumeClaimSpec": schema_k8sio_api_core_v1_PersistentVolumeClaimSpec(ref), + "k8s.io/api/core/v1.PersistentVolumeClaimStatus": schema_k8sio_api_core_v1_PersistentVolumeClaimStatus(ref), + "k8s.io/api/core/v1.PersistentVolumeClaimTemplate": schema_k8sio_api_core_v1_PersistentVolumeClaimTemplate(ref), + "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource": schema_k8sio_api_core_v1_PersistentVolumeClaimVolumeSource(ref), + "k8s.io/api/core/v1.PersistentVolumeList": schema_k8sio_api_core_v1_PersistentVolumeList(ref), + "k8s.io/api/core/v1.PersistentVolumeSource": schema_k8sio_api_core_v1_PersistentVolumeSource(ref), + "k8s.io/api/core/v1.PersistentVolumeSpec": schema_k8sio_api_core_v1_PersistentVolumeSpec(ref), + "k8s.io/api/core/v1.PersistentVolumeStatus": schema_k8sio_api_core_v1_PersistentVolumeStatus(ref), + "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource": schema_k8sio_api_core_v1_PhotonPersistentDiskVolumeSource(ref), + "k8s.io/api/core/v1.Pod": schema_k8sio_api_core_v1_Pod(ref), + "k8s.io/api/core/v1.PodAffinity": schema_k8sio_api_core_v1_PodAffinity(ref), + "k8s.io/api/core/v1.PodAffinityTerm": schema_k8sio_api_core_v1_PodAffinityTerm(ref), + "k8s.io/api/core/v1.PodAntiAffinity": schema_k8sio_api_core_v1_PodAntiAffinity(ref), + "k8s.io/api/core/v1.PodAttachOptions": schema_k8sio_api_core_v1_PodAttachOptions(ref), + "k8s.io/api/core/v1.PodCondition": schema_k8sio_api_core_v1_PodCondition(ref), + "k8s.io/api/core/v1.PodDNSConfig": schema_k8sio_api_core_v1_PodDNSConfig(ref), + "k8s.io/api/core/v1.PodDNSConfigOption": schema_k8sio_api_core_v1_PodDNSConfigOption(ref), + "k8s.io/api/core/v1.PodExecOptions": schema_k8sio_api_core_v1_PodExecOptions(ref), + "k8s.io/api/core/v1.PodIP": schema_k8sio_api_core_v1_PodIP(ref), + "k8s.io/api/core/v1.PodList": schema_k8sio_api_core_v1_PodList(ref), + "k8s.io/api/core/v1.PodLogOptions": schema_k8sio_api_core_v1_PodLogOptions(ref), + "k8s.io/api/core/v1.PodOS": schema_k8sio_api_core_v1_PodOS(ref), + "k8s.io/api/core/v1.PodPortForwardOptions": schema_k8sio_api_core_v1_PodPortForwardOptions(ref), + "k8s.io/api/core/v1.PodProxyOptions": schema_k8sio_api_core_v1_PodProxyOptions(ref), + "k8s.io/api/core/v1.PodReadinessGate": schema_k8sio_api_core_v1_PodReadinessGate(ref), + "k8s.io/api/core/v1.PodResourceClaim": schema_k8sio_api_core_v1_PodResourceClaim(ref), + "k8s.io/api/core/v1.PodResourceClaimStatus": schema_k8sio_api_core_v1_PodResourceClaimStatus(ref), + "k8s.io/api/core/v1.PodSchedulingGate": schema_k8sio_api_core_v1_PodSchedulingGate(ref), + "k8s.io/api/core/v1.PodSecurityContext": schema_k8sio_api_core_v1_PodSecurityContext(ref), + "k8s.io/api/core/v1.PodSignature": schema_k8sio_api_core_v1_PodSignature(ref), + "k8s.io/api/core/v1.PodSpec": schema_k8sio_api_core_v1_PodSpec(ref), + "k8s.io/api/core/v1.PodStatus": schema_k8sio_api_core_v1_PodStatus(ref), + "k8s.io/api/core/v1.PodStatusResult": schema_k8sio_api_core_v1_PodStatusResult(ref), + "k8s.io/api/core/v1.PodTemplate": schema_k8sio_api_core_v1_PodTemplate(ref), + "k8s.io/api/core/v1.PodTemplateList": schema_k8sio_api_core_v1_PodTemplateList(ref), + "k8s.io/api/core/v1.PodTemplateSpec": schema_k8sio_api_core_v1_PodTemplateSpec(ref), + "k8s.io/api/core/v1.PortStatus": schema_k8sio_api_core_v1_PortStatus(ref), + "k8s.io/api/core/v1.PortworxVolumeSource": schema_k8sio_api_core_v1_PortworxVolumeSource(ref), + "k8s.io/api/core/v1.PreferAvoidPodsEntry": schema_k8sio_api_core_v1_PreferAvoidPodsEntry(ref), + "k8s.io/api/core/v1.PreferredSchedulingTerm": schema_k8sio_api_core_v1_PreferredSchedulingTerm(ref), + "k8s.io/api/core/v1.Probe": schema_k8sio_api_core_v1_Probe(ref), + "k8s.io/api/core/v1.ProbeHandler": schema_k8sio_api_core_v1_ProbeHandler(ref), + "k8s.io/api/core/v1.ProjectedVolumeSource": schema_k8sio_api_core_v1_ProjectedVolumeSource(ref), + "k8s.io/api/core/v1.QuobyteVolumeSource": schema_k8sio_api_core_v1_QuobyteVolumeSource(ref), + "k8s.io/api/core/v1.RBDPersistentVolumeSource": schema_k8sio_api_core_v1_RBDPersistentVolumeSource(ref), + "k8s.io/api/core/v1.RBDVolumeSource": schema_k8sio_api_core_v1_RBDVolumeSource(ref), + "k8s.io/api/core/v1.RangeAllocation": schema_k8sio_api_core_v1_RangeAllocation(ref), + "k8s.io/api/core/v1.ReplicationController": schema_k8sio_api_core_v1_ReplicationController(ref), + "k8s.io/api/core/v1.ReplicationControllerCondition": schema_k8sio_api_core_v1_ReplicationControllerCondition(ref), + "k8s.io/api/core/v1.ReplicationControllerList": schema_k8sio_api_core_v1_ReplicationControllerList(ref), + "k8s.io/api/core/v1.ReplicationControllerSpec": schema_k8sio_api_core_v1_ReplicationControllerSpec(ref), + "k8s.io/api/core/v1.ReplicationControllerStatus": schema_k8sio_api_core_v1_ReplicationControllerStatus(ref), + "k8s.io/api/core/v1.ResourceClaim": schema_k8sio_api_core_v1_ResourceClaim(ref), + "k8s.io/api/core/v1.ResourceFieldSelector": schema_k8sio_api_core_v1_ResourceFieldSelector(ref), + "k8s.io/api/core/v1.ResourceHealth": schema_k8sio_api_core_v1_ResourceHealth(ref), + "k8s.io/api/core/v1.ResourceQuota": schema_k8sio_api_core_v1_ResourceQuota(ref), + "k8s.io/api/core/v1.ResourceQuotaList": schema_k8sio_api_core_v1_ResourceQuotaList(ref), + "k8s.io/api/core/v1.ResourceQuotaSpec": schema_k8sio_api_core_v1_ResourceQuotaSpec(ref), + "k8s.io/api/core/v1.ResourceQuotaStatus": schema_k8sio_api_core_v1_ResourceQuotaStatus(ref), + "k8s.io/api/core/v1.ResourceRequirements": schema_k8sio_api_core_v1_ResourceRequirements(ref), + "k8s.io/api/core/v1.ResourceStatus": schema_k8sio_api_core_v1_ResourceStatus(ref), + "k8s.io/api/core/v1.SELinuxOptions": schema_k8sio_api_core_v1_SELinuxOptions(ref), + "k8s.io/api/core/v1.ScaleIOPersistentVolumeSource": schema_k8sio_api_core_v1_ScaleIOPersistentVolumeSource(ref), + "k8s.io/api/core/v1.ScaleIOVolumeSource": schema_k8sio_api_core_v1_ScaleIOVolumeSource(ref), + "k8s.io/api/core/v1.ScopeSelector": schema_k8sio_api_core_v1_ScopeSelector(ref), + "k8s.io/api/core/v1.ScopedResourceSelectorRequirement": schema_k8sio_api_core_v1_ScopedResourceSelectorRequirement(ref), + "k8s.io/api/core/v1.SeccompProfile": schema_k8sio_api_core_v1_SeccompProfile(ref), + "k8s.io/api/core/v1.Secret": schema_k8sio_api_core_v1_Secret(ref), + "k8s.io/api/core/v1.SecretEnvSource": schema_k8sio_api_core_v1_SecretEnvSource(ref), + "k8s.io/api/core/v1.SecretKeySelector": schema_k8sio_api_core_v1_SecretKeySelector(ref), + "k8s.io/api/core/v1.SecretList": schema_k8sio_api_core_v1_SecretList(ref), + "k8s.io/api/core/v1.SecretProjection": schema_k8sio_api_core_v1_SecretProjection(ref), + "k8s.io/api/core/v1.SecretReference": schema_k8sio_api_core_v1_SecretReference(ref), + "k8s.io/api/core/v1.SecretVolumeSource": schema_k8sio_api_core_v1_SecretVolumeSource(ref), + "k8s.io/api/core/v1.SecurityContext": schema_k8sio_api_core_v1_SecurityContext(ref), + "k8s.io/api/core/v1.SerializedReference": schema_k8sio_api_core_v1_SerializedReference(ref), + "k8s.io/api/core/v1.Service": schema_k8sio_api_core_v1_Service(ref), + "k8s.io/api/core/v1.ServiceAccount": schema_k8sio_api_core_v1_ServiceAccount(ref), + "k8s.io/api/core/v1.ServiceAccountList": schema_k8sio_api_core_v1_ServiceAccountList(ref), + "k8s.io/api/core/v1.ServiceAccountTokenProjection": schema_k8sio_api_core_v1_ServiceAccountTokenProjection(ref), + "k8s.io/api/core/v1.ServiceList": schema_k8sio_api_core_v1_ServiceList(ref), + "k8s.io/api/core/v1.ServicePort": schema_k8sio_api_core_v1_ServicePort(ref), + "k8s.io/api/core/v1.ServiceProxyOptions": schema_k8sio_api_core_v1_ServiceProxyOptions(ref), + "k8s.io/api/core/v1.ServiceSpec": schema_k8sio_api_core_v1_ServiceSpec(ref), + "k8s.io/api/core/v1.ServiceStatus": schema_k8sio_api_core_v1_ServiceStatus(ref), + "k8s.io/api/core/v1.SessionAffinityConfig": schema_k8sio_api_core_v1_SessionAffinityConfig(ref), + "k8s.io/api/core/v1.SleepAction": schema_k8sio_api_core_v1_SleepAction(ref), + "k8s.io/api/core/v1.StorageOSPersistentVolumeSource": schema_k8sio_api_core_v1_StorageOSPersistentVolumeSource(ref), + "k8s.io/api/core/v1.StorageOSVolumeSource": schema_k8sio_api_core_v1_StorageOSVolumeSource(ref), + "k8s.io/api/core/v1.Sysctl": schema_k8sio_api_core_v1_Sysctl(ref), + "k8s.io/api/core/v1.TCPSocketAction": schema_k8sio_api_core_v1_TCPSocketAction(ref), + "k8s.io/api/core/v1.Taint": schema_k8sio_api_core_v1_Taint(ref), + "k8s.io/api/core/v1.Toleration": schema_k8sio_api_core_v1_Toleration(ref), + "k8s.io/api/core/v1.TopologySelectorLabelRequirement": schema_k8sio_api_core_v1_TopologySelectorLabelRequirement(ref), + "k8s.io/api/core/v1.TopologySelectorTerm": schema_k8sio_api_core_v1_TopologySelectorTerm(ref), + "k8s.io/api/core/v1.TopologySpreadConstraint": schema_k8sio_api_core_v1_TopologySpreadConstraint(ref), + "k8s.io/api/core/v1.TypedLocalObjectReference": schema_k8sio_api_core_v1_TypedLocalObjectReference(ref), + "k8s.io/api/core/v1.TypedObjectReference": schema_k8sio_api_core_v1_TypedObjectReference(ref), + "k8s.io/api/core/v1.Volume": schema_k8sio_api_core_v1_Volume(ref), + "k8s.io/api/core/v1.VolumeDevice": schema_k8sio_api_core_v1_VolumeDevice(ref), + "k8s.io/api/core/v1.VolumeMount": schema_k8sio_api_core_v1_VolumeMount(ref), + "k8s.io/api/core/v1.VolumeMountStatus": schema_k8sio_api_core_v1_VolumeMountStatus(ref), + "k8s.io/api/core/v1.VolumeNodeAffinity": schema_k8sio_api_core_v1_VolumeNodeAffinity(ref), + "k8s.io/api/core/v1.VolumeProjection": schema_k8sio_api_core_v1_VolumeProjection(ref), + "k8s.io/api/core/v1.VolumeResourceRequirements": schema_k8sio_api_core_v1_VolumeResourceRequirements(ref), + "k8s.io/api/core/v1.VolumeSource": schema_k8sio_api_core_v1_VolumeSource(ref), + "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource": schema_k8sio_api_core_v1_VsphereVirtualDiskVolumeSource(ref), + "k8s.io/api/core/v1.WeightedPodAffinityTerm": schema_k8sio_api_core_v1_WeightedPodAffinityTerm(ref), + "k8s.io/api/core/v1.WindowsSecurityContextOptions": schema_k8sio_api_core_v1_WindowsSecurityContextOptions(ref), + "k8s.io/apimachinery/pkg/api/resource.Quantity": schema_apimachinery_pkg_api_resource_Quantity(ref), + "k8s.io/apimachinery/pkg/api/resource.int64Amount": schema_apimachinery_pkg_api_resource_int64Amount(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup": schema_pkg_apis_meta_v1_APIGroup(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList": schema_pkg_apis_meta_v1_APIGroupList(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIResource": schema_pkg_apis_meta_v1_APIResource(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIResourceList": schema_pkg_apis_meta_v1_APIResourceList(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIVersions": schema_pkg_apis_meta_v1_APIVersions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ApplyOptions": schema_pkg_apis_meta_v1_ApplyOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Condition": schema_pkg_apis_meta_v1_Condition(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.CreateOptions": schema_pkg_apis_meta_v1_CreateOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.DeleteOptions": schema_pkg_apis_meta_v1_DeleteOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration": schema_pkg_apis_meta_v1_Duration(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.FieldSelectorRequirement": schema_pkg_apis_meta_v1_FieldSelectorRequirement(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1": schema_pkg_apis_meta_v1_FieldsV1(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GetOptions": schema_pkg_apis_meta_v1_GetOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupKind": schema_pkg_apis_meta_v1_GroupKind(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupResource": schema_pkg_apis_meta_v1_GroupResource(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersion": schema_pkg_apis_meta_v1_GroupVersion(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery": schema_pkg_apis_meta_v1_GroupVersionForDiscovery(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionKind": schema_pkg_apis_meta_v1_GroupVersionKind(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionResource": schema_pkg_apis_meta_v1_GroupVersionResource(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.InternalEvent": schema_pkg_apis_meta_v1_InternalEvent(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector": schema_pkg_apis_meta_v1_LabelSelector(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement": schema_pkg_apis_meta_v1_LabelSelectorRequirement(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.List": schema_pkg_apis_meta_v1_List(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta": schema_pkg_apis_meta_v1_ListMeta(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ListOptions": schema_pkg_apis_meta_v1_ListOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry": schema_pkg_apis_meta_v1_ManagedFieldsEntry(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime": schema_pkg_apis_meta_v1_MicroTime(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta": schema_pkg_apis_meta_v1_ObjectMeta(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference": schema_pkg_apis_meta_v1_OwnerReference(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata": schema_pkg_apis_meta_v1_PartialObjectMetadata(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadataList": schema_pkg_apis_meta_v1_PartialObjectMetadataList(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Patch": schema_pkg_apis_meta_v1_Patch(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.PatchOptions": schema_pkg_apis_meta_v1_PatchOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions": schema_pkg_apis_meta_v1_Preconditions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.RootPaths": schema_pkg_apis_meta_v1_RootPaths(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR": schema_pkg_apis_meta_v1_ServerAddressByClientCIDR(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Status": schema_pkg_apis_meta_v1_Status(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause": schema_pkg_apis_meta_v1_StatusCause(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails": schema_pkg_apis_meta_v1_StatusDetails(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Table": schema_pkg_apis_meta_v1_Table(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition": schema_pkg_apis_meta_v1_TableColumnDefinition(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TableOptions": schema_pkg_apis_meta_v1_TableOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TableRow": schema_pkg_apis_meta_v1_TableRow(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition": schema_pkg_apis_meta_v1_TableRowCondition(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Time": schema_pkg_apis_meta_v1_Time(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Timestamp": schema_pkg_apis_meta_v1_Timestamp(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta": schema_pkg_apis_meta_v1_TypeMeta(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.UpdateOptions": schema_pkg_apis_meta_v1_UpdateOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.WatchEvent": schema_pkg_apis_meta_v1_WatchEvent(ref), + "k8s.io/apimachinery/pkg/runtime.RawExtension": schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref), + "k8s.io/apimachinery/pkg/runtime.TypeMeta": schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref), + "k8s.io/apimachinery/pkg/runtime.Unknown": schema_k8sio_apimachinery_pkg_runtime_Unknown(ref), + "k8s.io/apimachinery/pkg/util/intstr.IntOrString": schema_apimachinery_pkg_util_intstr_IntOrString(ref), + "k8s.io/apimachinery/pkg/version.Info": schema_k8sio_apimachinery_pkg_version_Info(ref), + "kmodules.xyz/client-go/api/v1.CAPIClusterInfo": schema_kmodulesxyz_client_go_api_v1_CAPIClusterInfo(ref), + "kmodules.xyz/client-go/api/v1.CertificatePrivateKey": schema_kmodulesxyz_client_go_api_v1_CertificatePrivateKey(ref), + "kmodules.xyz/client-go/api/v1.CertificateSpec": schema_kmodulesxyz_client_go_api_v1_CertificateSpec(ref), + "kmodules.xyz/client-go/api/v1.ClusterClaimFeatures": schema_kmodulesxyz_client_go_api_v1_ClusterClaimFeatures(ref), + "kmodules.xyz/client-go/api/v1.ClusterClaimInfo": schema_kmodulesxyz_client_go_api_v1_ClusterClaimInfo(ref), + "kmodules.xyz/client-go/api/v1.ClusterInfo": schema_kmodulesxyz_client_go_api_v1_ClusterInfo(ref), + "kmodules.xyz/client-go/api/v1.ClusterMetadata": schema_kmodulesxyz_client_go_api_v1_ClusterMetadata(ref), + "kmodules.xyz/client-go/api/v1.Condition": schema_kmodulesxyz_client_go_api_v1_Condition(ref), + "kmodules.xyz/client-go/api/v1.HealthCheckSpec": schema_kmodulesxyz_client_go_api_v1_HealthCheckSpec(ref), + "kmodules.xyz/client-go/api/v1.ImageInfo": schema_kmodulesxyz_client_go_api_v1_ImageInfo(ref), + "kmodules.xyz/client-go/api/v1.Lineage": schema_kmodulesxyz_client_go_api_v1_Lineage(ref), + "kmodules.xyz/client-go/api/v1.ObjectID": schema_kmodulesxyz_client_go_api_v1_ObjectID(ref), + "kmodules.xyz/client-go/api/v1.ObjectInfo": schema_kmodulesxyz_client_go_api_v1_ObjectInfo(ref), + "kmodules.xyz/client-go/api/v1.ObjectReference": schema_kmodulesxyz_client_go_api_v1_ObjectReference(ref), + "kmodules.xyz/client-go/api/v1.PullCredentials": schema_kmodulesxyz_client_go_api_v1_PullCredentials(ref), + "kmodules.xyz/client-go/api/v1.ReadonlyHealthCheckSpec": schema_kmodulesxyz_client_go_api_v1_ReadonlyHealthCheckSpec(ref), + "kmodules.xyz/client-go/api/v1.ResourceID": schema_kmodulesxyz_client_go_api_v1_ResourceID(ref), + "kmodules.xyz/client-go/api/v1.TLSConfig": schema_kmodulesxyz_client_go_api_v1_TLSConfig(ref), + "kmodules.xyz/client-go/api/v1.TimeOfDay": schema_kmodulesxyz_client_go_api_v1_TimeOfDay(ref), + "kmodules.xyz/client-go/api/v1.TypeReference": schema_kmodulesxyz_client_go_api_v1_TypeReference(ref), + "kmodules.xyz/client-go/api/v1.TypedObjectReference": schema_kmodulesxyz_client_go_api_v1_TypedObjectReference(ref), + "kmodules.xyz/client-go/api/v1.X509Subject": schema_kmodulesxyz_client_go_api_v1_X509Subject(ref), + "kmodules.xyz/client-go/api/v1.stringSetMerger": schema_kmodulesxyz_client_go_api_v1_stringSetMerger(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AddKeyTransform": schema_custom_resources_apis_appcatalog_v1alpha1_AddKeyTransform(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AddKeysFromTransform": schema_custom_resources_apis_appcatalog_v1alpha1_AddKeysFromTransform(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBinding": schema_custom_resources_apis_appcatalog_v1alpha1_AppBinding(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBindingList": schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingList(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBindingSpec": schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppReference": schema_custom_resources_apis_appcatalog_v1alpha1_AppReference(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig": schema_custom_resources_apis_appcatalog_v1alpha1_ClientConfig(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_ObjectReference(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.Param": schema_custom_resources_apis_appcatalog_v1alpha1_Param(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.RemoveKeyTransform": schema_custom_resources_apis_appcatalog_v1alpha1_RemoveKeyTransform(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.RenameKeyTransform": schema_custom_resources_apis_appcatalog_v1alpha1_RenameKeyTransform(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform": schema_custom_resources_apis_appcatalog_v1alpha1_SecretTransform(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ServiceReference": schema_custom_resources_apis_appcatalog_v1alpha1_ServiceReference(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddon": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddon(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), } } @@ -19268,7 +19269,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -19294,7 +19295,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -19302,7 +19303,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -19717,3 +19718,45 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.Param"}, } } + +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} diff --git a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/zz_generated.deepcopy.go b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/zz_generated.deepcopy.go index 22b722ec4..c11815112 100644 --- a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/zz_generated.deepcopy.go @@ -146,7 +146,7 @@ func (in *AppBindingSpec) DeepCopyInto(out *AppBindingSpec) { in.ClientConfig.DeepCopyInto(&out.ClientConfig) if in.Secret != nil { in, out := &in.Secret, &out.Secret - *out = new(v1.LocalObjectReference) + *out = new(TypedLocalObjectReference) **out = **in } if in.SecretTransforms != nil { @@ -163,7 +163,7 @@ func (in *AppBindingSpec) DeepCopyInto(out *AppBindingSpec) { } if in.TLSSecret != nil { in, out := &in.TLSSecret, &out.TLSSecret - *out = new(v1.LocalObjectReference) + *out = new(TypedLocalObjectReference) **out = **in } return @@ -428,3 +428,19 @@ func (in *TaskRef) DeepCopy() *TaskRef { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedLocalObjectReference. +func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference { + if in == nil { + return nil + } + out := new(TypedLocalObjectReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/kmodules.xyz/custom-resources/crds/appcatalog.appscode.com_appbindings.yaml b/vendor/kmodules.xyz/custom-resources/crds/appcatalog.appscode.com_appbindings.yaml index 522d737b3..65a8212d0 100644 --- a/vendor/kmodules.xyz/custom-resources/crds/appcatalog.appscode.com_appbindings.yaml +++ b/vendor/kmodules.xyz/custom-resources/crds/appcatalog.appscode.com_appbindings.yaml @@ -173,15 +173,23 @@ spec: Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding. properties: - name: + apiGroup: default: "" description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + default: Secret + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic secretTransforms: @@ -277,15 +285,23 @@ spec: TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding. properties: - name: + apiGroup: default: "" description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + default: Secret + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: diff --git a/vendor/kubedb.dev/apimachinery/apis/archiver/v1alpha1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/archiver/v1alpha1/openapi_generated.go index 58da65e30..49f162768 100644 --- a/vendor/kubedb.dev/apimachinery/apis/archiver/v1alpha1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/archiver/v1alpha1/openapi_generated.go @@ -454,6 +454,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec": schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AlertPreset": schema_kmodulesxyz_monitoring_agent_api_api_v1_AlertPreset(ref), "kmodules.xyz/monitoring-agent-api/api/v1.BasicAuth": schema_kmodulesxyz_monitoring_agent_api_api_v1_BasicAuth(ref), @@ -22578,7 +22579,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -22604,7 +22605,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -22612,7 +22613,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -23028,6 +23029,48 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen } } +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} + func schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kubedb.dev/apimachinery/apis/autoscaling/v1alpha1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/autoscaling/v1alpha1/openapi_generated.go index d28f4e680..8d9628c76 100644 --- a/vendor/kubedb.dev/apimachinery/apis/autoscaling/v1alpha1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/autoscaling/v1alpha1/openapi_generated.go @@ -454,6 +454,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec": schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AlertPreset": schema_kmodulesxyz_monitoring_agent_api_api_v1_AlertPreset(ref), "kmodules.xyz/monitoring-agent-api/api/v1.BasicAuth": schema_kmodulesxyz_monitoring_agent_api_api_v1_BasicAuth(ref), @@ -22715,7 +22716,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -22741,7 +22742,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -22749,7 +22750,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -23165,6 +23166,48 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen } } +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} + func schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/openapi_generated.go index 40120962a..d10e4609f 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/openapi_generated.go @@ -454,6 +454,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec": schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AlertPreset": schema_kmodulesxyz_monitoring_agent_api_api_v1_AlertPreset(ref), "kmodules.xyz/monitoring-agent-api/api/v1.BasicAuth": schema_kmodulesxyz_monitoring_agent_api_api_v1_BasicAuth(ref), @@ -22757,7 +22758,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -22783,7 +22784,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -22791,7 +22792,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -23207,6 +23208,48 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen } } +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} + func schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kubedb.dev/apimachinery/apis/config/v1alpha1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/config/v1alpha1/openapi_generated.go index 2f1d9c6db..a2de16e12 100644 --- a/vendor/kubedb.dev/apimachinery/apis/config/v1alpha1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/config/v1alpha1/openapi_generated.go @@ -454,6 +454,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec": schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AlertPreset": schema_kmodulesxyz_monitoring_agent_api_api_v1_AlertPreset(ref), "kmodules.xyz/monitoring-agent-api/api/v1.BasicAuth": schema_kmodulesxyz_monitoring_agent_api_api_v1_BasicAuth(ref), @@ -22555,7 +22556,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -22581,7 +22582,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -22589,7 +22590,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -23005,6 +23006,48 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen } } +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} + func schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kubedb.dev/apimachinery/apis/elasticsearch/v1alpha1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/elasticsearch/v1alpha1/openapi_generated.go index 8ef508165..b20ce175e 100644 --- a/vendor/kubedb.dev/apimachinery/apis/elasticsearch/v1alpha1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/elasticsearch/v1alpha1/openapi_generated.go @@ -454,6 +454,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec": schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AlertPreset": schema_kmodulesxyz_monitoring_agent_api_api_v1_AlertPreset(ref), "kmodules.xyz/monitoring-agent-api/api/v1.BasicAuth": schema_kmodulesxyz_monitoring_agent_api_api_v1_BasicAuth(ref), @@ -22554,7 +22555,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -22580,7 +22581,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -22588,7 +22589,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -23004,6 +23005,48 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen } } +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} + func schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kubedb.dev/apimachinery/apis/gitops/v1alpha1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/gitops/v1alpha1/openapi_generated.go index 19dcfba71..756f22b28 100644 --- a/vendor/kubedb.dev/apimachinery/apis/gitops/v1alpha1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/gitops/v1alpha1/openapi_generated.go @@ -454,6 +454,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec": schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AlertPreset": schema_kmodulesxyz_monitoring_agent_api_api_v1_AlertPreset(ref), "kmodules.xyz/monitoring-agent-api/api/v1.BasicAuth": schema_kmodulesxyz_monitoring_agent_api_api_v1_BasicAuth(ref), @@ -22613,7 +22614,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -22639,7 +22640,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -22647,7 +22648,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -23063,6 +23064,48 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen } } +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} + func schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/openapi_generated.go index c020162a8..da1ab86db 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/openapi_generated.go @@ -454,6 +454,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec": schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AlertPreset": schema_kmodulesxyz_monitoring_agent_api_api_v1_AlertPreset(ref), "kmodules.xyz/monitoring-agent-api/api/v1.BasicAuth": schema_kmodulesxyz_monitoring_agent_api_api_v1_BasicAuth(ref), @@ -22572,7 +22573,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -22598,7 +22599,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -22606,7 +22607,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -23022,6 +23023,48 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen } } +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} + func schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/constants.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/constants.go index 722fbad44..d3ae03b32 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/constants.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/constants.go @@ -36,16 +36,32 @@ const ( LabelRole = GroupName + "/role" LabelPetSet = GroupName + "/petset" + PrometheusAddressFile = "/var/prometheus-data/address" + PrometheusCaFile = "/var/prometheus-data/ca.crt" + PrometheusTokenFile = "/var/prometheus-data/token.txt" + MonitoringAgentAnnotation = GroupName + "/monitoring-agent" + // distributed const - DistributedCustomConfigSecretNameSuffix = "custom-config" - DistributedRBACNameSuffix = "rbac" - DistributedServiceExportNameSuffix = "serviceexports" - DistributedTLSSecretNameSuffix = "tls-secrets" - DistributedGRPCSecretNameSuffix = "grpc-secrets" - DistributedAuthSecretNameSuffix = "auth" - KubeSliceNSMIPKey = "kubeslice.io/nsmIP" - KubeSlicePodIPVolumeName = "podip" - KubeSlicePodIPFileName = "podip" + DistributedDatabaseLabel = GroupName + "/distributed" + DistributedCustomConfigSecretNameSuffix = "custom-config" + DistributedRBACNameSuffix = "rbac" + DistributedServiceExportNameSuffix = "serviceexports" + DistributedTLSSecretNameSuffix = "tls-secrets" + DistributedGRPCSecretNameSuffix = "grpc-secrets" + DistributedAuthSecretNameSuffix = "auth" + DistributedPromethuesSecretNameSuffix = "prometheus-data" + DistributedPromethuesSecretVolumeName = "prometheus-data" + DistributedPromethuesSecretVolumeMountPath = "/var/prometheus-data" + DistributedMonitoringAgentENV = "MONITORING_AGENT" + DistributedMonitoringAgentPrometheus = "prometheus" + DistributedDBReplicaENV = "DB_REPLICAS" + DistributedMaxVolumeUsed = "max_used" + DistributedVolumeCapacity = "capacity" + + KubeSliceNSMIPKey = "kubeslice.io/nsmIP" + KubeSlicePodIPVolumeName = "podip" + KubeSlicePodIPFileName = "podip" + KubeSliceNSMContainerName = "cmd-nsc" ReplicationModeDetectorContainerName = "replication-mode-detector" DatabasePodPrimary = "primary" @@ -370,9 +386,16 @@ const ( MariaDBMetricsExporterTLSVolumeName = "metrics-exporter-config" MariaDBMetricsExporterConfigPath = "/etc/mysql/config/exporter" MariaDBDataVolumeName = "data" - DatabasePodPrimaryComponent = "Primary" - DatabasePodMasterComponent = "Master" - DatabasePodSlaveComponent = "Slave" + + DatabasePodPrimaryComponent = "Primary" + DatabasePodMasterComponent = "Master" + DatabasePodSlaveComponent = "Slave" + + MariaDBDistributedUpgradeCommand = "mariadb-upgrade" + MariaDBDistributedPodMetricGetCommand = "get-pod-metrics" + MariaDBDistributedPodGetCommand = "get-pod" + MariaDBDistributedVolumeUsageGetCommand = "get-volume-usage" + MariaDBDistributedVolumeCapacityGetCommand = "get-volume-capacity" // Maxscale MaxscaleCommonName = "mx" @@ -585,7 +608,8 @@ const ( ProxySQLConfigSecretKey = "proxysql.cnf" // =========================== Redis Constants ============================ - RedisConfigKey = "redis.conf" // RedisConfigKey is going to create for the customize redis configuration + RedisConfigKey = "redis.conf" // RedisConfigKey is going to create for the customize redis configuration + RedisAclUserListKey = "user_acl.conf" // RedisAclUserListKey is going to create for the redis acl user list configuration // DefaultConfigKey is going to create for the default redis configuration RedisContainerName = "redis" RedisSentinelContainerName = "redissentinel" @@ -929,6 +953,8 @@ const ( KafkaClusterID = "cluster.id" KafkaClientID = "client.id" KafkaDataDirName = "log.dirs" + KafkaReplicaSelectorClassKey = "replica.selector.class" + KafkaReplicaSelectorClassName = "org.apache.kafka.common.replica.RackAwareReplicaSelector" KafkaMetadataDirName = "metadata.log.dir" KafkaServerKeystoreKey = "server.keystore.jks" KafkaServerTruststoreKey = "server.truststore.jks" @@ -1612,10 +1638,13 @@ const ( ClickHouseClientKey = "tls.key" ClickHouseClientPath = "client.key" + ClickHouseUserInitScriptVolumeName = "initial-script" + ClickHouseUserInitScriptVolumeMountPath = "/docker-entrypoint-initdb.d" + // keeper - ClickHouseKeeperDataPath = "/var/lib/clickhouse_keeper" - ClickHouseKeeperLogPath = "/var/lib/clickhouse_keeper/coordination/logs" - ClickHouseKeeperSnapshotPath = "/var/lib/clickhouse_keeper/coordination/snapshots" + ClickHouseKeeperDataPath = "/var/lib/clickhouse" + ClickHouseKeeperLogPath = "/var/lib/clickhouse/coordination/logs" + ClickHouseKeeperSnapshotPath = "/var/lib/clickhouse/coordination/snapshots" ClickHouseInternalKeeperDataPath = "/var/lib/clickhouse/coordination/log" ClickHouseInternalKeeperSnapshotPath = "/var/lib/clickhouse/coordination/snapshots" @@ -1796,6 +1825,7 @@ const ( const ( ResourceKindStatefulSet = "StatefulSet" ResourceKindPetSet = "PetSet" + ResourceKindSecret = "Secret" ) var ( diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/elasticsearch_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/elasticsearch_helpers.go index 2a2a6d69c..6394aa40a 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/elasticsearch_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/elasticsearch_helpers.go @@ -40,6 +40,7 @@ import ( core_util "kmodules.xyz/client-go/core/v1" meta_util "kmodules.xyz/client-go/meta" "kmodules.xyz/client-go/policy/secomp" + app_api "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1" appcat "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1" mona "kmodules.xyz/monitoring-agent-api/api/v1" ofstv2 "kmodules.xyz/offshoot-api/api/v2" @@ -463,6 +464,15 @@ func (e *Elasticsearch) SetDefaults(esVersion *catalog.ElasticsearchVersion) { e.Spec.PodTemplate.Spec.ServiceAccountName = e.OffshootName() } + if !e.Spec.DisableSecurity { + if e.Spec.AuthSecret == nil { + e.Spec.AuthSecret = &SecretReference{} + } + if e.Spec.AuthSecret.Kind == "" { + e.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + } + // set default elasticsearch node name prefix if e.Spec.Topology != nil { // Required nodes, must exist! @@ -862,7 +872,8 @@ func (e *Elasticsearch) SetDefaultInternalUsersAndRoleMappings(esVersion *catalo userSpec.SecretName = e.GetAuthSecretName() } e.Spec.AuthSecret = &SecretReference{ - LocalObjectReference: core.LocalObjectReference{ + TypedLocalObjectReference: app_api.TypedLocalObjectReference{ + Kind: "Secret", Name: userSpec.SecretName, }, } diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/helpers.go index 72d974668..30556832a 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/helpers.go @@ -182,5 +182,5 @@ func GetActivationTimeFromSecret(secretName *core.Secret) (*metav1.Time, error) } func IsVirtualAuthSecretReferred(authSecret *SecretReference) bool { - return authSecret != nil && authSecret.ApiGroup == vsecretapi.GroupName + return authSecret != nil && authSecret.APIGroup == vsecretapi.GroupName } diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_helpers.go index a7821c7e9..4a1f0fe37 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_helpers.go @@ -293,6 +293,18 @@ func (k *Kafka) CertSecretVolumeMountPath(configDir string, cert string) string return filepath.Join(configDir, cert) } +func (k *Kafka) ServiceAccountName() string { + return k.OffshootName() +} + +func (k *Kafka) ClusterRoleName() string { + return meta_util.NameWithSuffix(k.OffshootName(), "clusterrole") +} + +func (k *Kafka) ClusterRoleBindingName() string { + return meta_util.NameWithSuffix(k.OffshootName(), "clusterrolebinding") +} + func (k *Kafka) PVCName(alias string) string { return meta_util.NameWithSuffix(k.Name, alias) } @@ -340,6 +352,15 @@ func (k *Kafka) SetDefaults(kc client.Client) { k.Spec.StorageType = StorageTypeDurable } + if !k.Spec.DisableSecurity { + if k.Spec.AuthSecret == nil { + k.Spec.AuthSecret = &SecretReference{} + } + if k.Spec.AuthSecret.Kind == "" { + k.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + } + var kfVersion catalog.KafkaVersion err := kc.Get(context.TODO(), types.NamespacedName{Name: k.Spec.Version}, &kfVersion) if err != nil { diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_types.go index ed77f5806..15c185f61 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_types.go @@ -79,6 +79,9 @@ type KafkaSpec struct { // To enable ssl for http layer EnableSSL bool `json:"enableSSL,omitempty"` + // Broker Rack defines the rack awareness configuration for Kafka brokers + BrokerRack *BrokerRack `json:"brokerRack,omitempty"` + // disable security. It disables authentication security of user. // If unset, default is false // +optional @@ -157,6 +160,11 @@ type KafkaNode struct { PodTemplate ofstv2.PodTemplateSpec `json:"podTemplate,omitempty"` } +type BrokerRack struct { + // TopologyKey is the node label key which is used to identify the rack of a broker + TopologyKey string `json:"topologyKey,omitempty"` +} + // KafkaStatus defines the observed state of Kafka type KafkaStatus struct { // Specifies the current phase of the database diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mariadb_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mariadb_helpers.go index 25406e55f..a1ccaf345 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mariadb_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mariadb_helpers.go @@ -82,6 +82,10 @@ func (m MariaDB) OffshootDistributedTLSName() string { return meta_util.NameWithSuffix(m.Name, kubedb.DistributedTLSSecretNameSuffix) } +func (m MariaDB) OffshootDistributedPromethuesSecretName() string { + return meta_util.NameWithSuffix(m.Name, kubedb.DistributedPromethuesSecretNameSuffix) +} + func (m MariaDB) OffshootSelectors() map[string]string { label := map[string]string{ meta_util.NameLabelKey: m.ResourceFQN(), @@ -173,19 +177,17 @@ func (m MariaDB) StandbyServiceName() string { } func (m MariaDB) IsCluster() bool { - return pointer.Int32(m.Spec.Replicas) > 1 + return m.Spec.Topology != nil } func (m MariaDB) IsGaleraCluster() bool { return m.Spec.Topology != nil && - m.IsCluster() && m.Spec.Topology.Mode != nil && *m.Spec.Topology.Mode == MariaDBModeGaleraCluster } func (m MariaDB) IsMariaDBReplication() bool { return m.Spec.Topology != nil && - m.IsCluster() && m.Spec.Topology.Mode != nil && *m.Spec.Topology.Mode == MariaDBModeReplication } @@ -285,6 +287,13 @@ func (m *MariaDB) SetDefaults(mdVersion *v1alpha1.MariaDBVersion) { m.Spec.DeletionPolicy = DeletionPolicyDelete } + if m.Spec.AuthSecret == nil { + m.Spec.AuthSecret = &SecretReference{} + } + if m.Spec.AuthSecret.Kind == "" { + m.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + if m.Spec.Replicas == nil { m.Spec.Replicas = pointer.Int32P(1) } @@ -297,7 +306,7 @@ func (m *MariaDB) SetDefaults(mdVersion *v1alpha1.MariaDBVersion) { m.Spec.PodTemplate.Spec.ServiceAccountName = m.OffshootName() } if m.Spec.Init != nil && m.Spec.Init.Archiver != nil && m.Spec.Init.Archiver.ReplicationStrategy == nil { - m.Spec.Init.Archiver.ReplicationStrategy = ptr.To(ReplicationStrategyNone) + m.Spec.Init.Archiver.ReplicationStrategy = ptr.To(ReplicationStrategySync) } m.setDefaultContainerSecurityContext(mdVersion, &m.Spec.PodTemplate) m.setDefaultContainerResourceLimits(&m.Spec.PodTemplate) diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/memcached_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/memcached_helpers.go index 24a62cf76..373ef4b92 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/memcached_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/memcached_helpers.go @@ -209,6 +209,15 @@ func (m *Memcached) SetDefaults(mcVersion *catalog.MemcachedVersion) { m.Spec.PodTemplate.Spec.ServiceAccountName = m.OffshootName() } + if !m.Spec.DisableAuth { + if m.Spec.AuthSecret == nil { + m.Spec.AuthSecret = &SecretReference{} + } + if m.Spec.AuthSecret.Kind == "" { + m.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + } + m.setDefaultContainerSecurityContext(mcVersion, &m.Spec.PodTemplate) m.setDefaultContainerResourceLimits(&m.Spec.PodTemplate) diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mongodb_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mongodb_helpers.go index 5f71609f9..353cc907c 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mongodb_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mongodb_helpers.go @@ -631,6 +631,13 @@ func (m *MongoDB) SetDefaults(mgVersion *v1alpha1.MongoDBVersion) { m.Spec.DeletionPolicy = DeletionPolicyDelete } + if m.Spec.AuthSecret == nil { + m.Spec.AuthSecret = &SecretReference{} + } + if m.Spec.AuthSecret.Kind == "" { + m.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + if m.Spec.SSLMode == "" { if m.Spec.TLS != nil { m.Spec.SSLMode = SSLModeRequireSSL diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mysql_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mysql_helpers.go index d89f33140..f159ea759 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mysql_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mysql_helpers.go @@ -284,6 +284,13 @@ func (m *MySQL) SetDefaults(myVersion *v1alpha1.MySQLVersion) error { m.Spec.DeletionPolicy = DeletionPolicyDelete } + if m.Spec.AuthSecret == nil { + m.Spec.AuthSecret = &SecretReference{} + } + if m.Spec.AuthSecret.Kind == "" { + m.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + if m.UsesGroupReplication() || m.IsInnoDBCluster() || m.IsSemiSync() { if m.Spec.Replicas == nil { m.Spec.Replicas = pointer.Int32P(kubedb.MySQLDefaultGroupSize) diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/openapi_generated.go index d388e8a49..36d8eba54 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/openapi_generated.go @@ -454,6 +454,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec": schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AlertPreset": schema_kmodulesxyz_monitoring_agent_api_api_v1_AlertPreset(ref), "kmodules.xyz/monitoring-agent-api/api/v1.BasicAuth": schema_kmodulesxyz_monitoring_agent_api_api_v1_BasicAuth(ref), @@ -508,6 +509,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kubedb.dev/apimachinery/apis/kubedb/v1.Archiver": schema_apimachinery_apis_kubedb_v1_Archiver(ref), "kubedb.dev/apimachinery/apis/kubedb/v1.ArchiverRecovery": schema_apimachinery_apis_kubedb_v1_ArchiverRecovery(ref), "kubedb.dev/apimachinery/apis/kubedb/v1.AutoOpsSpec": schema_apimachinery_apis_kubedb_v1_AutoOpsSpec(ref), + "kubedb.dev/apimachinery/apis/kubedb/v1.BrokerRack": schema_apimachinery_apis_kubedb_v1_BrokerRack(ref), "kubedb.dev/apimachinery/apis/kubedb/v1.ConnectionPoolConfig": schema_apimachinery_apis_kubedb_v1_ConnectionPoolConfig(ref), "kubedb.dev/apimachinery/apis/kubedb/v1.ConsumerNamespaces": schema_apimachinery_apis_kubedb_v1_ConsumerNamespaces(ref), "kubedb.dev/apimachinery/apis/kubedb/v1.Database": schema_apimachinery_apis_kubedb_v1_Database(ref), @@ -586,6 +588,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kubedb.dev/apimachinery/apis/kubedb/v1.ProxySQLStatus": schema_apimachinery_apis_kubedb_v1_ProxySQLStatus(ref), "kubedb.dev/apimachinery/apis/kubedb/v1.RecoveryTarget": schema_apimachinery_apis_kubedb_v1_RecoveryTarget(ref), "kubedb.dev/apimachinery/apis/kubedb/v1.Redis": schema_apimachinery_apis_kubedb_v1_Redis(ref), + "kubedb.dev/apimachinery/apis/kubedb/v1.RedisAclSpec": schema_apimachinery_apis_kubedb_v1_RedisAclSpec(ref), "kubedb.dev/apimachinery/apis/kubedb/v1.RedisClusterSpec": schema_apimachinery_apis_kubedb_v1_RedisClusterSpec(ref), "kubedb.dev/apimachinery/apis/kubedb/v1.RedisList": schema_apimachinery_apis_kubedb_v1_RedisList(ref), "kubedb.dev/apimachinery/apis/kubedb/v1.RedisSentinel": schema_apimachinery_apis_kubedb_v1_RedisSentinel(ref), @@ -22673,7 +22676,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -22699,7 +22702,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -22707,7 +22710,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -23123,6 +23126,48 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen } } +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} + func schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -26153,6 +26198,25 @@ func schema_apimachinery_apis_kubedb_v1_AutoOpsSpec(ref common.ReferenceCallback } } +func schema_apimachinery_apis_kubedb_v1_BrokerRack(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "topologyKey": { + SchemaProps: spec.SchemaProps{ + Description: "TopologyKey is the node label key which is used to identify the rack of a broker", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_apimachinery_apis_kubedb_v1_ConnectionPoolConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -27461,6 +27525,12 @@ func schema_apimachinery_apis_kubedb_v1_KafkaSpec(ref common.ReferenceCallback) Format: "", }, }, + "brokerRack": { + SchemaProps: spec.SchemaProps{ + Description: "Broker Rack defines the rack awareness configuration for Kafka brokers", + Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1.BrokerRack"), + }, + }, "disableSecurity": { SchemaProps: spec.SchemaProps{ Description: "disable security. It disables authentication security of user. If unset, default is false", @@ -27551,7 +27621,7 @@ func schema_apimachinery_apis_kubedb_v1_KafkaSpec(ref common.ReferenceCallback) }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/client-go/api/v1.HealthCheckSpec", "kmodules.xyz/client-go/api/v1.TLSConfig", "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec", "kmodules.xyz/offshoot-api/api/v2.PodTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.AutoOpsSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.KafkaClusterTopology", "kubedb.dev/apimachinery/apis/kubedb/v1.KafkaCruiseControl", "kubedb.dev/apimachinery/apis/kubedb/v1.NamedServiceTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.SecretReference"}, + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/client-go/api/v1.HealthCheckSpec", "kmodules.xyz/client-go/api/v1.TLSConfig", "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec", "kmodules.xyz/offshoot-api/api/v2.PodTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.AutoOpsSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.BrokerRack", "kubedb.dev/apimachinery/apis/kubedb/v1.KafkaClusterTopology", "kubedb.dev/apimachinery/apis/kubedb/v1.KafkaCruiseControl", "kubedb.dev/apimachinery/apis/kubedb/v1.NamedServiceTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.SecretReference"}, } } @@ -30926,6 +30996,41 @@ func schema_apimachinery_apis_kubedb_v1_Redis(ref common.ReferenceCallback) comm } } +func schema_apimachinery_apis_kubedb_v1_RedisAclSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "SecretRef holds the password against which ACLs will be created if Rules are given.", + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + "rules": { + SchemaProps: spec.SchemaProps{ + Description: "Rules specifies the ACL rules to be applied to the user associated with the provided SecretRef. If provided, the system will update the ACLs for this user to ensure they are in sync with the new authentication settings.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference"}, + } +} + func schema_apimachinery_apis_kubedb_v1_RedisClusterSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -31357,6 +31462,12 @@ func schema_apimachinery_apis_kubedb_v1_RedisSpec(ref common.ReferenceCallback) Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1.SecretReference"), }, }, + "acl": { + SchemaProps: spec.SchemaProps{ + Description: "Redis ACL Configuration", + Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1.RedisAclSpec"), + }, + }, "disableAuth": { SchemaProps: spec.SchemaProps{ Description: "If disable Auth true then don't create any auth secret", @@ -31441,7 +31552,7 @@ func schema_apimachinery_apis_kubedb_v1_RedisSpec(ref common.ReferenceCallback) }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/client-go/api/v1.HealthCheckSpec", "kmodules.xyz/client-go/api/v1.TLSConfig", "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec", "kmodules.xyz/offshoot-api/api/v2.PodTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.AllowedConsumers", "kubedb.dev/apimachinery/apis/kubedb/v1.AutoOpsSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.InitSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.NamedServiceTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.RedisClusterSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.RedisSentinelRef", "kubedb.dev/apimachinery/apis/kubedb/v1.SecretReference"}, + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/client-go/api/v1.HealthCheckSpec", "kmodules.xyz/client-go/api/v1.TLSConfig", "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec", "kmodules.xyz/offshoot-api/api/v2.PodTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.AllowedConsumers", "kubedb.dev/apimachinery/apis/kubedb/v1.AutoOpsSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.InitSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.NamedServiceTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.RedisAclSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.RedisClusterSpec", "kubedb.dev/apimachinery/apis/kubedb/v1.RedisSentinelRef", "kubedb.dev/apimachinery/apis/kubedb/v1.SecretReference"}, } } @@ -31725,23 +31836,32 @@ func schema_apimachinery_apis_kubedb_v1_SecretReference(ref common.ReferenceCall SchemaProps: spec.SchemaProps{ Type: []string{"object"}, Properties: map[string]spec.Schema{ + "secretStoreName": { + SchemaProps: spec.SchemaProps{ + Description: "SecretSource references the secret manager used for virtual secret", + Type: []string{"string"}, + Format: "", + }, + }, "apiGroup": { SchemaProps: spec.SchemaProps{ - Description: "Two possible groups: \"\", virtual-secrets.dev", + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", Type: []string{"string"}, Format: "", }, }, - "secretStoreName": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "SecretSource references the secret manager used for virtual secret", + Description: "Kind is the type of resource being referenced", + Default: "", Type: []string{"string"}, Format: "", }, }, "name": { SchemaProps: spec.SchemaProps{ - Description: "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Description: "Name is the name of resource being referenced", Default: "", Type: []string{"string"}, Format: "", @@ -31766,6 +31886,7 @@ func schema_apimachinery_apis_kubedb_v1_SecretReference(ref common.ReferenceCall }, }, }, + Required: []string{"kind", "name"}, }, }, Dependencies: []string{ diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/perconaxtradb_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/perconaxtradb_helpers.go index d3ffa5746..ebe83cdca 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/perconaxtradb_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/perconaxtradb_helpers.go @@ -346,6 +346,13 @@ func (p *PerconaXtraDB) SetHealthCheckerDefaults() { } func (p *PerconaXtraDB) SetTLSDefaults() { + if p.Spec.AuthSecret == nil { + p.Spec.AuthSecret = &SecretReference{} + } + if p.Spec.AuthSecret.Kind == "" { + p.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + if p.Spec.TLS == nil || p.Spec.TLS.IssuerRef == nil { return } diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/pgbouncer_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/pgbouncer_helpers.go index 1bdfd9a53..4f31f839f 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/pgbouncer_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/pgbouncer_helpers.go @@ -227,6 +227,13 @@ func (p *PgBouncer) SetDefaults(pgBouncerVersion *catalog.PgBouncerVersion, uses } } + if p.Spec.AuthSecret == nil { + p.Spec.AuthSecret = &SecretReference{} + } + if p.Spec.AuthSecret.Kind == "" { + p.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + p.setPgBouncerContainerDefaults(&p.Spec.PodTemplate, pgBouncerVersion) p.setDefaultPodSecurityContext() diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/postgres_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/postgres_helpers.go index c8917e62d..fc75de9e4 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/postgres_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/postgres_helpers.go @@ -249,6 +249,13 @@ func (p *Postgres) SetDefaults(postgresVersion *catalog.PostgresVersion) { p.Spec.DeletionPolicy = DeletionPolicyDelete } + if p.Spec.AuthSecret == nil { + p.Spec.AuthSecret = &SecretReference{} + } + if p.Spec.AuthSecret.Kind == "" { + p.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + if p.Spec.LeaderElection == nil { p.Spec.LeaderElection = &PostgreLeaderElectionConfig{ // The upper limit of election timeout is 50000ms (50s), which should only be used when deploying a diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/proxysql_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/proxysql_helpers.go index b0e991207..a9f36d85b 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/proxysql_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/proxysql_helpers.go @@ -194,6 +194,13 @@ func (p *ProxySQL) SetDefaults(psVersion *v1alpha1.ProxySQLVersion, usesAcme boo p.Spec.Replicas = pointer.Int32P(1) } + if p.Spec.AuthSecret == nil { + p.Spec.AuthSecret = &SecretReference{} + } + if p.Spec.AuthSecret.Kind == "" { + p.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + p.setDefaultContainerSecurityContext(psVersion, &p.Spec.PodTemplate) p.Spec.Monitor.SetDefaults() diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/redis_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/redis_helpers.go index 98a2b8bfc..596a25105 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/redis_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/redis_helpers.go @@ -258,6 +258,15 @@ func (r *Redis) SetDefaults(rdVersion *catalog.RedisVersion) error { r.Spec.StorageType = StorageTypeDurable } + if !r.Spec.DisableAuth { + if r.Spec.AuthSecret == nil { + r.Spec.AuthSecret = &SecretReference{} + } + if r.Spec.AuthSecret.Kind == "" { + r.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + } + r.setDefaultContainerSecurityContext(rdVersion, &r.Spec.PodTemplate) r.setDefaultContainerResourceLimits(&r.Spec.PodTemplate) diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/redis_sentinel_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/redis_sentinel_helpers.go index 5a56606cc..3d4550e42 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/redis_sentinel_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/redis_sentinel_helpers.go @@ -213,6 +213,15 @@ func (rs *RedisSentinel) SetDefaults(rdVersion *catalog.RedisVersion) error { rs.Spec.StorageType = StorageTypeDurable } + if !rs.Spec.DisableAuth { + if rs.Spec.AuthSecret == nil { + rs.Spec.AuthSecret = &SecretReference{} + } + if rs.Spec.AuthSecret.Kind == "" { + rs.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + } + rs.setDefaultContainerSecurityContext(rdVersion, &rs.Spec.PodTemplate) rs.setDefaultContainerResourceLimits(&rs.Spec.PodTemplate) diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/redis_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/redis_types.go index b6a7c67de..a4b47bab2 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/redis_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/redis_types.go @@ -90,6 +90,10 @@ type RedisSpec struct { // +optional AuthSecret *SecretReference `json:"authSecret,omitempty"` + // Redis ACL Configuration + // +optional + Acl *RedisAclSpec `json:"acl,omitempty"` + // If disable Auth true then don't create any auth secret // +optional DisableAuth bool `json:"disableAuth,omitempty"` @@ -178,6 +182,16 @@ type Announce struct { Shards []Shards `json:"shards,omitempty"` } +type RedisAclSpec struct { + // SecretRef holds the password against which ACLs will be created if Rules are given. + // +optional + SecretRef *core.LocalObjectReference `json:"secretRef,omitempty"` + + // Rules specifies the ACL rules to be applied to the user associated with the provided SecretRef. + // If provided, the system will update the ACLs for this user to ensure they are in sync with the new authentication settings. + Rules []string `json:"rules,omitempty"` +} + type Shards struct { // Endpoints contains the cluster-announce information for all the replicas in a shard. // This will be used to set cluster-announce-ip/hostname, cluster-announce-port/cluster-announce-tls-port diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/types.go index 3a72abc98..0f73c8f15 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/types.go @@ -20,6 +20,7 @@ import ( core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kmapi "kmodules.xyz/client-go/api/v1" + app_api "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1" ofstv1 "kmodules.xyz/offshoot-api/api/v1" ) @@ -190,15 +191,11 @@ type SystemUserSecretsSpec struct { } type SecretReference struct { - // +optional - // Two possible groups: "", virtual-secrets.dev - ApiGroup string `json:"apiGroup,omitempty"` - // +optional // SecretSource references the secret manager used for virtual secret SecretStoreName string `json:"secretStoreName,omitempty"` - core.LocalObjectReference `json:",inline,omitempty"` + app_api.TypedLocalObjectReference `json:",inline,omitempty"` // Recommendation engine will generate RotateAuth opsReq using this field // +optional RotateAfter *metav1.Duration `json:"rotateAfter,omitempty"` diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/zz_generated.deepcopy.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/zz_generated.deepcopy.go index 4c134967d..8fd8315ec 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/zz_generated.deepcopy.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/zz_generated.deepcopy.go @@ -203,6 +203,22 @@ func (in *AutoOpsSpec) DeepCopy() *AutoOpsSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerRack) DeepCopyInto(out *BrokerRack) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerRack. +func (in *BrokerRack) DeepCopy() *BrokerRack { + if in == nil { + return nil + } + out := new(BrokerRack) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConnectionPoolConfig) DeepCopyInto(out *ConnectionPoolConfig) { *out = *in @@ -971,6 +987,11 @@ func (in *KafkaSpec) DeepCopyInto(out *KafkaSpec) { *out = new(corev1.PersistentVolumeClaimSpec) (*in).DeepCopyInto(*out) } + if in.BrokerRack != nil { + in, out := &in.BrokerRack, &out.BrokerRack + *out = new(BrokerRack) + **out = **in + } if in.AuthSecret != nil { in, out := &in.AuthSecret, &out.AuthSecret *out = new(SecretReference) @@ -2989,6 +3010,32 @@ func (in *Redis) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisAclSpec) DeepCopyInto(out *RedisAclSpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisAclSpec. +func (in *RedisAclSpec) DeepCopy() *RedisAclSpec { + if in == nil { + return nil + } + out := new(RedisAclSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RedisClusterSpec) DeepCopyInto(out *RedisClusterSpec) { *out = *in @@ -3238,6 +3285,11 @@ func (in *RedisSpec) DeepCopyInto(out *RedisSpec) { *out = new(SecretReference) (*in).DeepCopyInto(*out) } + if in.Acl != nil { + in, out := &in.Acl, &out.Acl + *out = new(RedisAclSpec) + (*in).DeepCopyInto(*out) + } if in.Init != nil { in, out := &in.Init, &out.Init *out = new(InitSpec) @@ -3355,7 +3407,7 @@ func (in *ScriptSourceSpec) DeepCopy() *ScriptSourceSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretReference) DeepCopyInto(out *SecretReference) { *out = *in - out.LocalObjectReference = in.LocalObjectReference + out.TypedLocalObjectReference = in.TypedLocalObjectReference if in.RotateAfter != nil { in, out := &in.RotateAfter, &out.RotateAfter *out = new(metav1.Duration) diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha1/conversion.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha1/conversion.go index f08f5ed43..68d6987c6 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha1/conversion.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha1/conversion.go @@ -27,6 +27,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/util/intstr" + app_api "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1" mona "kmodules.xyz/monitoring-agent-api/api/v1" ofst "kmodules.xyz/offshoot-api/api/v1" ) @@ -96,7 +97,8 @@ func Convert_v1alpha1_ElasticsearchSpec_To_v1alpha2_ElasticsearchSpec(in *Elasti // WARNING: in.AuthPlugin requires manual conversion: does not exist in peer-type if in.DatabaseSecret != nil { out.AuthSecret = &v1alpha2.SecretReference{ - LocalObjectReference: v1.LocalObjectReference{ + TypedLocalObjectReference: app_api.TypedLocalObjectReference{ + Kind: "Secret", Name: in.DatabaseSecret.SecretName, }, } @@ -255,7 +257,8 @@ func Convert_v1alpha1_EtcdSpec_To_v1alpha2_EtcdSpec(in *EtcdSpec, out *v1alpha2. out.Storage = (*v1.PersistentVolumeClaimSpec)(unsafe.Pointer(in.Storage)) if in.DatabaseSecret != nil { out.AuthSecret = &v1alpha2.SecretReference{ - LocalObjectReference: v1.LocalObjectReference{ + TypedLocalObjectReference: app_api.TypedLocalObjectReference{ + Kind: "Secret", Name: in.DatabaseSecret.SecretName, }, } @@ -331,7 +334,8 @@ func Convert_v1alpha1_MariaDBSpec_To_v1alpha2_MariaDBSpec(in *MariaDBSpec, out * out.Storage = (*v1.PersistentVolumeClaimSpec)(unsafe.Pointer(in.Storage)) if in.DatabaseSecret != nil { out.AuthSecret = &v1alpha2.SecretReference{ - LocalObjectReference: v1.LocalObjectReference{ + TypedLocalObjectReference: app_api.TypedLocalObjectReference{ + Kind: "Secret", Name: in.DatabaseSecret.SecretName, }, } @@ -493,7 +497,8 @@ func Convert_v1alpha1_MongoDBSpec_To_v1alpha2_MongoDBSpec(in *MongoDBSpec, out * out.Storage = (*v1.PersistentVolumeClaimSpec)(unsafe.Pointer(in.Storage)) if in.DatabaseSecret != nil { out.AuthSecret = &v1alpha2.SecretReference{ - LocalObjectReference: v1.LocalObjectReference{ + TypedLocalObjectReference: app_api.TypedLocalObjectReference{ + Kind: "Secret", Name: in.DatabaseSecret.SecretName, }, } @@ -687,7 +692,8 @@ func Convert_v1alpha1_MySQLSpec_To_v1alpha2_MySQLSpec(in *MySQLSpec, out *v1alph out.Storage = (*v1.PersistentVolumeClaimSpec)(unsafe.Pointer(in.Storage)) if in.DatabaseSecret != nil { out.AuthSecret = &v1alpha2.SecretReference{ - LocalObjectReference: v1.LocalObjectReference{ + TypedLocalObjectReference: app_api.TypedLocalObjectReference{ + Kind: "Secret", Name: in.DatabaseSecret.SecretName, }, } @@ -799,7 +805,8 @@ func Convert_v1alpha1_PerconaXtraDBSpec_To_v1alpha2_PerconaXtraDBSpec(in *Percon out.Storage = (*v1.PersistentVolumeClaimSpec)(unsafe.Pointer(in.Storage)) if in.DatabaseSecret != nil { out.AuthSecret = &v1alpha2.SecretReference{ - LocalObjectReference: v1.LocalObjectReference{ + TypedLocalObjectReference: app_api.TypedLocalObjectReference{ + Kind: "Secret", Name: in.DatabaseSecret.SecretName, }, } @@ -889,7 +896,8 @@ func Convert_v1alpha1_PostgresSpec_To_v1alpha2_PostgresSpec(in *PostgresSpec, ou } if in.DatabaseSecret != nil { out.AuthSecret = &v1alpha2.SecretReference{ - LocalObjectReference: v1.LocalObjectReference{ + TypedLocalObjectReference: app_api.TypedLocalObjectReference{ + Kind: "Secret", Name: in.DatabaseSecret.SecretName, }, } diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha1/openapi_generated.go index 3f488ad07..f95f96fca 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha1/openapi_generated.go @@ -454,6 +454,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec": schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AlertPreset": schema_kmodulesxyz_monitoring_agent_api_api_v1_AlertPreset(ref), "kmodules.xyz/monitoring-agent-api/api/v1.BasicAuth": schema_kmodulesxyz_monitoring_agent_api_api_v1_BasicAuth(ref), @@ -22612,7 +22613,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -22638,7 +22639,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -22646,7 +22647,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -23062,6 +23063,48 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen } } +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} + func schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/cassandra_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/cassandra_helpers.go index 2feaef10a..e4f93a65f 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/cassandra_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/cassandra_helpers.go @@ -40,6 +40,7 @@ import ( coreutil "kmodules.xyz/client-go/core/v1" meta_util "kmodules.xyz/client-go/meta" "kmodules.xyz/client-go/policy/secomp" + app_api "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1" appcat "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1" mona "kmodules.xyz/monitoring-agent-api/api/v1" ofst "kmodules.xyz/offshoot-api/api/v2" @@ -300,13 +301,23 @@ func (r *Cassandra) SetDefaults(kc client.Client) { if r.Spec.EnableSSL { if r.Spec.KeystoreCredSecret == nil { r.Spec.KeystoreCredSecret = &SecretReference{ - LocalObjectReference: core.LocalObjectReference{ + TypedLocalObjectReference: app_api.TypedLocalObjectReference{ + Kind: "Secret", Name: r.CassandraKeystoreCredSecretName(), }, } } } + if !r.Spec.DisableSecurity { + if r.Spec.AuthSecret == nil { + r.Spec.AuthSecret = &SecretReference{} + } + if r.Spec.AuthSecret.Kind == "" { + r.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + } + var casVersion catalog.CassandraVersion err := kc.Get(context.TODO(), types.NamespacedName{ Name: r.Spec.Version, diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/clickhouse_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/clickhouse_helpers.go index b817a768c..bd7e85d77 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/clickhouse_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/clickhouse_helpers.go @@ -332,6 +332,16 @@ func (c *ClickHouse) SetDefaults(kc client.Client) { klog.Errorf("can't get the clickhouse version object %s for %s \n", err.Error(), c.Spec.Version) return } + + if !c.Spec.DisableSecurity { + if c.Spec.AuthSecret == nil { + c.Spec.AuthSecret = &SecretReference{} + } + if c.Spec.AuthSecret.Kind == "" { + c.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + } + if c.Spec.TLS != nil { if c.Spec.TLS.ClientCACertificateRefs != nil { for i, secret := range c.Spec.TLS.ClientCACertificateRefs { @@ -421,7 +431,25 @@ func (c *ClickHouse) SetDefaults(kc client.Client) { apis.SetDefaultResourceLimits(&dbContainer.Resources, kubedb.ClickHouseDefaultResources) } } + c.SetTLSDefaults() c.SetHealthCheckerDefaults() + if c.Spec.Monitor != nil { + if c.Spec.Monitor.Prometheus == nil { + c.Spec.Monitor.Prometheus = &mona.PrometheusSpec{} + } + if c.Spec.Monitor.Prometheus != nil && c.Spec.Monitor.Prometheus.Exporter.Port == 0 { + c.Spec.Monitor.Prometheus.Exporter.Port = kubedb.ClickhousePromethues + } + c.Spec.Monitor.SetDefaults() + if c.Spec.Monitor.Prometheus != nil { + if c.Spec.Monitor.Prometheus.Exporter.SecurityContext.RunAsUser == nil { + c.Spec.Monitor.Prometheus.Exporter.SecurityContext.RunAsUser = chVersion.Spec.SecurityContext.RunAsUser + } + if c.Spec.Monitor.Prometheus.Exporter.SecurityContext.RunAsGroup == nil { + c.Spec.Monitor.Prometheus.Exporter.SecurityContext.RunAsGroup = chVersion.Spec.SecurityContext.RunAsUser + } + } + } } func (c *ClickHouse) setDefaultContainerSecurityContext(chVersion *catalog.ClickHouseVersion, podTemplate *ofst.PodTemplateSpec) { diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/clickhouse_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/clickhouse_types.go index 8ae9e2f52..560dd160f 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/clickhouse_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/clickhouse_types.go @@ -77,6 +77,10 @@ type ClickHouseSpec struct { // Storage to specify how storage shall be used. Storage *core.PersistentVolumeClaimSpec `json:"storage,omitempty"` + // Init is used to initialize database + // +optional + Init *InitSpec `json:"init,omitempty"` + // disable security. It disables authentication security of user. // If unset, default is false // +optional diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/druid_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/druid_helpers.go index c35b644d8..257d363b2 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/druid_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/druid_helpers.go @@ -44,6 +44,7 @@ import ( coreutil "kmodules.xyz/client-go/core/v1" meta_util "kmodules.xyz/client-go/meta" "kmodules.xyz/client-go/policy/secomp" + app_api "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1" appcat "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1" mona "kmodules.xyz/monitoring-agent-api/api/v1" ofst "kmodules.xyz/offshoot-api/api/v2" @@ -434,10 +435,20 @@ func (d *Druid) SetDefaults(kc client.Client) { d.Spec.DeletionPolicy = DeletionPolicyDelete } + if !d.Spec.DisableSecurity { + if d.Spec.AuthSecret == nil { + d.Spec.AuthSecret = &SecretReference{} + } + if d.Spec.AuthSecret.Kind == "" { + d.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + } + if d.Spec.EnableSSL { if d.Spec.KeystoreCredSecret == nil { d.Spec.KeystoreCredSecret = &SecretReference{ - LocalObjectReference: core.LocalObjectReference{ + TypedLocalObjectReference: app_api.TypedLocalObjectReference{ + Kind: "Secret", Name: d.DruidSecretName(kubedb.DruidKeystoreSecretKey), }, } diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/elasticsearch_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/elasticsearch_helpers.go index bc22cd810..24e903f49 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/elasticsearch_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/elasticsearch_helpers.go @@ -41,6 +41,7 @@ import ( core_util "kmodules.xyz/client-go/core/v1" meta_util "kmodules.xyz/client-go/meta" "kmodules.xyz/client-go/policy/secomp" + app_api "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1" appcat "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1" mona "kmodules.xyz/monitoring-agent-api/api/v1" ofst "kmodules.xyz/offshoot-api/api/v1" @@ -834,7 +835,8 @@ func (e *Elasticsearch) setDefaultInternalUsersAndRoleMappings(esVersion *catalo userSpec.SecretName = e.DefaultUserCredSecretName(username) } e.Spec.AuthSecret = &SecretReference{ - LocalObjectReference: core.LocalObjectReference{ + TypedLocalObjectReference: app_api.TypedLocalObjectReference{ + Kind: "Secret", Name: userSpec.SecretName, }, } diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/ferretdb_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/ferretdb_helpers.go index 8f21a245d..ed4752f5a 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/ferretdb_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/ferretdb_helpers.go @@ -230,6 +230,13 @@ func (f *FerretDB) SetDefaults(kc client.Client) { f.Spec.SSLMode = SSLModeDisabled } + if f.Spec.AuthSecret == nil { + f.Spec.AuthSecret = &SecretReference{} + } + if f.Spec.AuthSecret.Kind == "" { + f.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + var frVersion catalog.FerretDBVersion err := kc.Get(context.TODO(), types.NamespacedName{ Name: f.Spec.Version, diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/hazelcast_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/hazelcast_helpers.go index b363b417e..313ca6ad3 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/hazelcast_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/hazelcast_helpers.go @@ -233,6 +233,15 @@ func (h *Hazelcast) setDefaultProbes(podTemplate *ofst.PodTemplateSpec) { } func (h *Hazelcast) SetTLSDefaults() { + if !h.Spec.DisableSecurity { + if h.Spec.AuthSecret == nil { + h.Spec.AuthSecret = &SecretReference{} + } + if h.Spec.AuthSecret.Kind == "" { + h.Spec.AuthSecret.Kind = kube.ResourceKindSecret + } + } + if h.Spec.TLS == nil || h.Spec.TLS.IssuerRef == nil { return } diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/helpers.go index 82b0afd15..4b3bc1809 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/helpers.go @@ -178,5 +178,5 @@ func UsesAcmeIssuer(kc client.Client, ns string, issuerRef core.TypedLocalObject } func IsVirtualAuthSecretReferred(authSecret *SecretReference) bool { - return authSecret != nil && authSecret.ApiGroup == vsecretapi.GroupName + return authSecret != nil && authSecret.APIGroup == vsecretapi.GroupName } diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/ignite_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/ignite_helpers.go index 1c5c18ef5..5e4cfb862 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/ignite_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/ignite_helpers.go @@ -116,6 +116,15 @@ func (i *Ignite) SetDefaults(kc client.Client) { i.Spec.StorageType = StorageTypeDurable } + if !i.Spec.DisableSecurity { + if i.Spec.AuthSecret == nil { + i.Spec.AuthSecret = &SecretReference{} + } + if i.Spec.AuthSecret.Kind == "" { + i.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + } + var igVersion catalog.IgniteVersion err := kc.Get(context.TODO(), types.NamespacedName{ Name: i.Spec.Version, diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_types.go index 5157ac3ef..9a3796e19 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_types.go @@ -78,6 +78,9 @@ type KafkaSpec struct { // To enable ssl for http layer EnableSSL bool `json:"enableSSL,omitempty"` + // Broker Rack defines the rack awareness configuration for Kafka brokers + BrokerRack *BrokerRack `json:"brokerRack,omitempty"` + // disable security. It disables authentication security of user. // If unset, default is false // +optional @@ -166,6 +169,11 @@ type KafkaNode struct { Tolerations []core.Toleration `json:"tolerations,omitempty"` } +type BrokerRack struct { + // TopologyKey is the node label key which is used to identify the rack of a broker + TopologyKey string `json:"topologyKey,omitempty"` +} + // KafkaStatus defines the observed state of Kafka type KafkaStatus struct { // Specifies the current phase of the database diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/mssqlserver_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/mssqlserver_helpers.go index b2f8e334b..8b234c0fa 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/mssqlserver_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/mssqlserver_helpers.go @@ -364,6 +364,13 @@ func (m *MSSQLServer) SetDefaults(kc client.Client) { m.Spec.DeletionPolicy = DeletionPolicyDelete } + if m.Spec.AuthSecret == nil { + m.Spec.AuthSecret = &SecretReference{} + } + if m.Spec.AuthSecret.Kind == "" { + m.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + if m.IsStandalone() { if m.Spec.Replicas == nil { m.Spec.Replicas = pointer.Int32P(1) diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/openapi_generated.go index 8dba6fdea..3c7b6b929 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/openapi_generated.go @@ -454,6 +454,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec": schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AlertPreset": schema_kmodulesxyz_monitoring_agent_api_api_v1_AlertPreset(ref), "kmodules.xyz/monitoring-agent-api/api/v1.BasicAuth": schema_kmodulesxyz_monitoring_agent_api_api_v1_BasicAuth(ref), @@ -508,6 +509,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.Archiver": schema_apimachinery_apis_kubedb_v1alpha2_Archiver(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.ArchiverRecovery": schema_apimachinery_apis_kubedb_v1alpha2_ArchiverRecovery(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.AutoOpsSpec": schema_apimachinery_apis_kubedb_v1alpha2_AutoOpsSpec(ref), + "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.BrokerRack": schema_apimachinery_apis_kubedb_v1alpha2_BrokerRack(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.Cassandra": schema_apimachinery_apis_kubedb_v1alpha2_Cassandra(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.CassandraApp": schema_apimachinery_apis_kubedb_v1alpha2_CassandraApp(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.CassandraList": schema_apimachinery_apis_kubedb_v1alpha2_CassandraList(ref), @@ -22794,7 +22796,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -22820,7 +22822,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -22828,7 +22830,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -23244,6 +23246,48 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen } } +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} + func schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -26274,6 +26318,25 @@ func schema_apimachinery_apis_kubedb_v1alpha2_AutoOpsSpec(ref common.ReferenceCa } } +func schema_apimachinery_apis_kubedb_v1alpha2_BrokerRack(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "topologyKey": { + SchemaProps: spec.SchemaProps{ + Description: "TopologyKey is the node label key which is used to identify the rack of a broker", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_apimachinery_apis_kubedb_v1alpha2_Cassandra(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -26825,6 +26888,12 @@ func schema_apimachinery_apis_kubedb_v1alpha2_ClickHouseSpec(ref common.Referenc Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"), }, }, + "init": { + SchemaProps: spec.SchemaProps{ + Description: "Init is used to initialize database", + Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1alpha2.InitSpec"), + }, + }, "disableSecurity": { SchemaProps: spec.SchemaProps{ Description: "disable security. It disables authentication security of user. If unset, default is false", @@ -26909,7 +26978,7 @@ func schema_apimachinery_apis_kubedb_v1alpha2_ClickHouseSpec(ref common.Referenc }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/client-go/api/v1.HealthCheckSpec", "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec", "kmodules.xyz/offshoot-api/api/v2.PodTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.AutoOpsSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.ClickHouseTLSConfig", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.ClusterTopology", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.NamedServiceTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.SecretReference"}, + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/client-go/api/v1.HealthCheckSpec", "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec", "kmodules.xyz/offshoot-api/api/v2.PodTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.AutoOpsSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.ClickHouseTLSConfig", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.ClusterTopology", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.InitSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.NamedServiceTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.SecretReference"}, } } @@ -30259,6 +30328,12 @@ func schema_apimachinery_apis_kubedb_v1alpha2_KafkaSpec(ref common.ReferenceCall Format: "", }, }, + "brokerRack": { + SchemaProps: spec.SchemaProps{ + Description: "Broker Rack defines the rack awareness configuration for Kafka brokers", + Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1alpha2.BrokerRack"), + }, + }, "disableSecurity": { SchemaProps: spec.SchemaProps{ Description: "disable security. It disables authentication security of user. If unset, default is false", @@ -30349,7 +30424,7 @@ func schema_apimachinery_apis_kubedb_v1alpha2_KafkaSpec(ref common.ReferenceCall }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/client-go/api/v1.HealthCheckSpec", "kmodules.xyz/client-go/api/v1.TLSConfig", "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec", "kmodules.xyz/offshoot-api/api/v2.PodTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.AutoOpsSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.KafkaClusterTopology", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.KafkaCruiseControl", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.NamedServiceTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.SecretReference"}, + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/client-go/api/v1.HealthCheckSpec", "kmodules.xyz/client-go/api/v1.TLSConfig", "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec", "kmodules.xyz/offshoot-api/api/v2.PodTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.AutoOpsSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.BrokerRack", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.KafkaClusterTopology", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.KafkaCruiseControl", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.NamedServiceTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.SecretReference"}, } } @@ -36138,23 +36213,32 @@ func schema_apimachinery_apis_kubedb_v1alpha2_SecretReference(ref common.Referen SchemaProps: spec.SchemaProps{ Type: []string{"object"}, Properties: map[string]spec.Schema{ + "secretStoreName": { + SchemaProps: spec.SchemaProps{ + Description: "SecretStoreName references the secret manager used for virtual secret", + Type: []string{"string"}, + Format: "", + }, + }, "apiGroup": { SchemaProps: spec.SchemaProps{ - Description: "Two possible groups: \"\", virtual-secrets.dev", + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", Type: []string{"string"}, Format: "", }, }, - "secretStoreName": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "SecretStoreName references the secret manager used for virtual secret", + Description: "Kind is the type of resource being referenced", + Default: "", Type: []string{"string"}, Format: "", }, }, "name": { SchemaProps: spec.SchemaProps{ - Description: "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Description: "Name is the name of resource being referenced", Default: "", Type: []string{"string"}, Format: "", @@ -36179,6 +36263,7 @@ func schema_apimachinery_apis_kubedb_v1alpha2_SecretReference(ref common.Referen }, }, }, + Required: []string{"kind", "name"}, }, }, Dependencies: []string{ diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/oracle_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/oracle_helpers.go index c28abc492..7fcb606e1 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/oracle_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/oracle_helpers.go @@ -293,6 +293,13 @@ func (o *Oracle) SetDefaults(kc client.Client) { o.Spec.StorageType = StorageTypeDurable } + if o.Spec.AuthSecret == nil { + o.Spec.AuthSecret = &SecretReference{} + } + if o.Spec.AuthSecret.Kind == "" { + o.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + o.SetListenerDefaults() o.initializePodTemplates() diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/pgpool_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/pgpool_helpers.go index bb792399f..f1c9ef1cd 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/pgpool_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/pgpool_helpers.go @@ -283,6 +283,13 @@ func (p *Pgpool) GetCertSecretName(alias PgpoolCertificateAlias) string { } func (p *Pgpool) SetTLSDefaults() { + if p.Spec.AuthSecret == nil { + p.Spec.AuthSecret = &SecretReference{} + } + if p.Spec.AuthSecret.Kind == "" { + p.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + if p.Spec.TLS == nil || p.Spec.TLS.IssuerRef == nil { return } diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/postgres_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/postgres_helpers.go index 25551b81c..c3e633aba 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/postgres_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/postgres_helpers.go @@ -200,6 +200,13 @@ func (p *Postgres) SetDefaults(postgresVersion *catalog.PostgresVersion, topolog p.Spec.TerminationPolicy = DeletionPolicyDelete } + if p.Spec.AuthSecret == nil { + p.Spec.AuthSecret = &SecretReference{} + } + if p.Spec.AuthSecret.Kind == "" { + p.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + if p.Spec.LeaderElection == nil { p.Spec.LeaderElection = &PostgreLeaderElectionConfig{ // The upper limit of election timeout is 50000ms (50s), which should only be used when deploying a diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/rabbitmq_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/rabbitmq_helpers.go index bcaf12b92..9f1e9fbe2 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/rabbitmq_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/rabbitmq_helpers.go @@ -288,6 +288,15 @@ func (r *RabbitMQ) SetDefaults(kc client.Client) { r.Spec.StorageType = StorageTypeDurable } + if !r.Spec.DisableSecurity { + if r.Spec.AuthSecret == nil { + r.Spec.AuthSecret = &SecretReference{} + } + if r.Spec.AuthSecret.Kind == "" { + r.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + } + var rmVersion catalog.RabbitMQVersion err := kc.Get(context.TODO(), types.NamespacedName{ Name: r.Spec.Version, diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/singlestore_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/singlestore_helpers.go index 7432bc1f2..d6fa1d1b2 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/singlestore_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/singlestore_helpers.go @@ -330,6 +330,13 @@ func (s *Singlestore) SetDefaults(kc client.Client) { s.Spec.DeletionPolicy = DeletionPolicyDelete } + if s.Spec.AuthSecret == nil { + s.Spec.AuthSecret = &SecretReference{} + } + if s.Spec.AuthSecret.Kind == "" { + s.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + if s.Spec.Topology == nil { if s.Spec.PodTemplate == nil { s.Spec.PodTemplate = &ofst.PodTemplateSpec{} diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/solr_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/solr_helpers.go index 45fe87fd4..02c69e0f8 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/solr_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/solr_helpers.go @@ -314,6 +314,15 @@ func (s *Solr) SetDefaults(kc client.Client) { s.Spec.StorageType = StorageTypeDurable } + if !s.Spec.DisableSecurity { + if s.Spec.AuthSecret == nil { + s.Spec.AuthSecret = &SecretReference{} + } + if s.Spec.AuthSecret.Kind == "" { + s.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + } + s.SetDefaultsToZooKeeperRef() s.SetZooKeeperObjectRef() diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/types.go index d57d96f6c..88bf08d84 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/types.go @@ -20,6 +20,7 @@ import ( core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kmapi "kmodules.xyz/client-go/api/v1" + app_api "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1" ofst "kmodules.xyz/offshoot-api/api/v1" ) @@ -207,15 +208,11 @@ type SystemUserSecretsSpec struct { } type SecretReference struct { - // +optional - // Two possible groups: "", virtual-secrets.dev - ApiGroup string `json:"apiGroup,omitempty"` - // +optional // SecretStoreName references the secret manager used for virtual secret SecretStoreName string `json:"secretStoreName,omitempty"` - core.LocalObjectReference `json:",inline,omitempty"` + app_api.TypedLocalObjectReference `json:",inline,omitempty"` // Recommendation engine will generate RotateAuth opsReq using this field // +optional RotateAfter *metav1.Duration `json:"rotateAfter,omitempty"` diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zookeeper_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zookeeper_helpers.go index ebf37930b..6c7452ffb 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zookeeper_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zookeeper_helpers.go @@ -203,6 +203,15 @@ func (z *ZooKeeper) SetDefaults(kc client.Client) { z.Spec.DeletionPolicy = DeletionPolicyHalt } + if !z.Spec.DisableAuth { + if z.Spec.AuthSecret == nil { + z.Spec.AuthSecret = &SecretReference{} + } + if z.Spec.AuthSecret.Kind == "" { + z.Spec.AuthSecret.Kind = kubedb.ResourceKindSecret + } + } + var zkVersion catalog.ZooKeeperVersion err := kc.Get(context.TODO(), types.NamespacedName{Name: z.Spec.Version}, &zkVersion) if err != nil { diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.conversion.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.conversion.go index aa0064197..048ac051b 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.conversion.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.conversion.go @@ -114,6 +114,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*BrokerRack)(nil), (*v1.BrokerRack)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_BrokerRack_To_v1_BrokerRack(a.(*BrokerRack), b.(*v1.BrokerRack), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.BrokerRack)(nil), (*BrokerRack)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_BrokerRack_To_v1alpha2_BrokerRack(a.(*v1.BrokerRack), b.(*BrokerRack), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*ConnectionPoolConfig)(nil), (*v1.ConnectionPoolConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha2_ConnectionPoolConfig_To_v1_ConnectionPoolConfig(a.(*ConnectionPoolConfig), b.(*v1.ConnectionPoolConfig), scope) }); err != nil { @@ -1197,6 +1207,26 @@ func Convert_v1_AutoOpsSpec_To_v1alpha2_AutoOpsSpec(in *v1.AutoOpsSpec, out *Aut return autoConvert_v1_AutoOpsSpec_To_v1alpha2_AutoOpsSpec(in, out, s) } +func autoConvert_v1alpha2_BrokerRack_To_v1_BrokerRack(in *BrokerRack, out *v1.BrokerRack, s conversion.Scope) error { + out.TopologyKey = in.TopologyKey + return nil +} + +// Convert_v1alpha2_BrokerRack_To_v1_BrokerRack is an autogenerated conversion function. +func Convert_v1alpha2_BrokerRack_To_v1_BrokerRack(in *BrokerRack, out *v1.BrokerRack, s conversion.Scope) error { + return autoConvert_v1alpha2_BrokerRack_To_v1_BrokerRack(in, out, s) +} + +func autoConvert_v1_BrokerRack_To_v1alpha2_BrokerRack(in *v1.BrokerRack, out *BrokerRack, s conversion.Scope) error { + out.TopologyKey = in.TopologyKey + return nil +} + +// Convert_v1_BrokerRack_To_v1alpha2_BrokerRack is an autogenerated conversion function. +func Convert_v1_BrokerRack_To_v1alpha2_BrokerRack(in *v1.BrokerRack, out *BrokerRack, s conversion.Scope) error { + return autoConvert_v1_BrokerRack_To_v1alpha2_BrokerRack(in, out, s) +} + func autoConvert_v1alpha2_ConnectionPoolConfig_To_v1_ConnectionPoolConfig(in *ConnectionPoolConfig, out *v1.ConnectionPoolConfig, s conversion.Scope) error { out.Port = (*int32)(unsafe.Pointer(in.Port)) out.PoolMode = in.PoolMode @@ -2059,6 +2089,7 @@ func autoConvert_v1alpha2_KafkaSpec_To_v1_KafkaSpec(in *KafkaSpec, out *v1.Kafka out.StorageType = v1.StorageType(in.StorageType) out.Storage = (*corev1.PersistentVolumeClaimSpec)(unsafe.Pointer(in.Storage)) out.EnableSSL = in.EnableSSL + out.BrokerRack = (*v1.BrokerRack)(unsafe.Pointer(in.BrokerRack)) out.DisableSecurity = in.DisableSecurity out.AuthSecret = (*v1.SecretReference)(unsafe.Pointer(in.AuthSecret)) out.ConfigSecret = (*corev1.LocalObjectReference)(unsafe.Pointer(in.ConfigSecret)) @@ -2097,6 +2128,7 @@ func autoConvert_v1_KafkaSpec_To_v1alpha2_KafkaSpec(in *v1.KafkaSpec, out *Kafka out.StorageType = StorageType(in.StorageType) out.Storage = (*corev1.PersistentVolumeClaimSpec)(unsafe.Pointer(in.Storage)) out.EnableSSL = in.EnableSSL + out.BrokerRack = (*BrokerRack)(unsafe.Pointer(in.BrokerRack)) out.DisableSecurity = in.DisableSecurity out.AuthSecret = (*SecretReference)(unsafe.Pointer(in.AuthSecret)) out.ConfigSecret = (*corev1.LocalObjectReference)(unsafe.Pointer(in.ConfigSecret)) @@ -4315,6 +4347,7 @@ func autoConvert_v1_RedisSpec_To_v1alpha2_RedisSpec(in *v1.RedisSpec, out *Redis out.StorageType = StorageType(in.StorageType) out.Storage = (*corev1.PersistentVolumeClaimSpec)(unsafe.Pointer(in.Storage)) out.AuthSecret = (*SecretReference)(unsafe.Pointer(in.AuthSecret)) + // WARNING: in.Acl requires manual conversion: does not exist in peer-type out.DisableAuth = in.DisableAuth out.Init = (*InitSpec)(unsafe.Pointer(in.Init)) out.Monitor = (*monitoringagentapiapiv1.AgentSpec)(unsafe.Pointer(in.Monitor)) @@ -4402,9 +4435,8 @@ func Convert_v1_ScriptSourceSpec_To_v1alpha2_ScriptSourceSpec(in *v1.ScriptSourc } func autoConvert_v1alpha2_SecretReference_To_v1_SecretReference(in *SecretReference, out *v1.SecretReference, s conversion.Scope) error { - out.ApiGroup = in.ApiGroup out.SecretStoreName = in.SecretStoreName - out.LocalObjectReference = in.LocalObjectReference + out.TypedLocalObjectReference = in.TypedLocalObjectReference out.RotateAfter = (*metav1.Duration)(unsafe.Pointer(in.RotateAfter)) out.ActiveFrom = (*metav1.Time)(unsafe.Pointer(in.ActiveFrom)) out.ExternallyManaged = in.ExternallyManaged @@ -4417,9 +4449,8 @@ func Convert_v1alpha2_SecretReference_To_v1_SecretReference(in *SecretReference, } func autoConvert_v1_SecretReference_To_v1alpha2_SecretReference(in *v1.SecretReference, out *SecretReference, s conversion.Scope) error { - out.ApiGroup = in.ApiGroup out.SecretStoreName = in.SecretStoreName - out.LocalObjectReference = in.LocalObjectReference + out.TypedLocalObjectReference = in.TypedLocalObjectReference out.RotateAfter = (*metav1.Duration)(unsafe.Pointer(in.RotateAfter)) out.ActiveFrom = (*metav1.Time)(unsafe.Pointer(in.ActiveFrom)) out.ExternallyManaged = in.ExternallyManaged diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.deepcopy.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.deepcopy.go index 7bab18e45..a7f4f20d0 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.deepcopy.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.deepcopy.go @@ -204,6 +204,22 @@ func (in *AutoOpsSpec) DeepCopy() *AutoOpsSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerRack) DeepCopyInto(out *BrokerRack) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerRack. +func (in *BrokerRack) DeepCopy() *BrokerRack { + if in == nil { + return nil + } + out := new(BrokerRack) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Cassandra) DeepCopyInto(out *Cassandra) { *out = *in @@ -558,6 +574,11 @@ func (in *ClickHouseSpec) DeepCopyInto(out *ClickHouseSpec) { *out = new(corev1.PersistentVolumeClaimSpec) (*in).DeepCopyInto(*out) } + if in.Init != nil { + in, out := &in.Init, &out.Init + *out = new(InitSpec) + (*in).DeepCopyInto(*out) + } if in.AuthSecret != nil { in, out := &in.AuthSecret, &out.AuthSecret *out = new(SecretReference) @@ -2687,6 +2708,11 @@ func (in *KafkaSpec) DeepCopyInto(out *KafkaSpec) { *out = new(corev1.PersistentVolumeClaimSpec) (*in).DeepCopyInto(*out) } + if in.BrokerRack != nil { + in, out := &in.BrokerRack, &out.BrokerRack + *out = new(BrokerRack) + **out = **in + } if in.AuthSecret != nil { in, out := &in.AuthSecret, &out.AuthSecret *out = new(SecretReference) @@ -6007,7 +6033,7 @@ func (in *ScriptSourceSpec) DeepCopy() *ScriptSourceSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretReference) DeepCopyInto(out *SecretReference) { *out = *in - out.LocalObjectReference = in.LocalObjectReference + out.TypedLocalObjectReference = in.TypedLocalObjectReference if in.RotateAfter != nil { in, out := &in.RotateAfter, &out.RotateAfter *out = new(v1.Duration) diff --git a/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/constant.go b/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/constant.go index 9756fa81d..22caaa643 100644 --- a/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/constant.go +++ b/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/constant.go @@ -119,6 +119,7 @@ const ( Restart = "Restart" RestartNodes = "RestartNodes" RestartPods = "RestartPods" + RestartKeeperPods = "RestartKeeperPods" RestartPodsSucceeded = "RestartPodsSucceeded" RestartPodsFailed = "RestartPodsFailed" ) @@ -366,6 +367,8 @@ const ( HorizontalScaleReplicasDown = "HorizontalScaleReplicasDown" HorizontalScaleSentinelUp = "HorizontalScaleSentinelUp" HorizontalScaleSentinelDown = "HorizontalScaleSentinelDown" + + RedisUpdateAclSecret = "UpdateAclSecret" ) // Druid Constants diff --git a/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/openapi_generated.go index 273abf94d..323ab006e 100644 --- a/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/openapi_generated.go @@ -454,6 +454,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec": schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AlertPreset": schema_kmodulesxyz_monitoring_agent_api_api_v1_AlertPreset(ref), "kmodules.xyz/monitoring-agent-api/api/v1.BasicAuth": schema_kmodulesxyz_monitoring_agent_api_api_v1_BasicAuth(ref), @@ -707,6 +708,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kubedb.dev/apimachinery/apis/ops/v1alpha1.RabbitMQUpdateVersionSpec": schema_apimachinery_apis_ops_v1alpha1_RabbitMQUpdateVersionSpec(ref), "kubedb.dev/apimachinery/apis/ops/v1alpha1.RabbitMQVerticalScalingSpec": schema_apimachinery_apis_ops_v1alpha1_RabbitMQVerticalScalingSpec(ref), "kubedb.dev/apimachinery/apis/ops/v1alpha1.RabbitMQVolumeExpansionSpec": schema_apimachinery_apis_ops_v1alpha1_RabbitMQVolumeExpansionSpec(ref), + "kubedb.dev/apimachinery/apis/ops/v1alpha1.RedisAclSpec": schema_apimachinery_apis_ops_v1alpha1_RedisAclSpec(ref), "kubedb.dev/apimachinery/apis/ops/v1alpha1.RedisCustomConfigurationSpec": schema_apimachinery_apis_ops_v1alpha1_RedisCustomConfigurationSpec(ref), "kubedb.dev/apimachinery/apis/ops/v1alpha1.RedisHorizontalScalingSpec": schema_apimachinery_apis_ops_v1alpha1_RedisHorizontalScalingSpec(ref), "kubedb.dev/apimachinery/apis/ops/v1alpha1.RedisOpsRequest": schema_apimachinery_apis_ops_v1alpha1_RedisOpsRequest(ref), @@ -22808,7 +22810,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -22834,7 +22836,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -22842,7 +22844,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -23258,6 +23260,48 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen } } +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} + func schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -26125,14 +26169,14 @@ func schema_apimachinery_apis_ops_v1alpha1_AuthSpec(ref common.ReferenceCallback "secretRef": { SchemaProps: spec.SchemaProps{ Description: "SecretRef holds the new authSecret If it is given, ops-manager will use this. Otherwise, will generate random password", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference"}, + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -34338,6 +34382,56 @@ func schema_apimachinery_apis_ops_v1alpha1_RabbitMQVolumeExpansionSpec(ref commo } } +func schema_apimachinery_apis_ops_v1alpha1_RedisAclSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "SecretRef holds the password against which ACLs will be created if syncACL is given.", + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + "syncACL": { + SchemaProps: spec.SchemaProps{ + Description: "SyncACL specifies the list of users whose ACLs should be synchronized with the new authentication secret. If provided, the system will update the ACLs for these users to ensure they are in sync with the new authentication settings.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "deleteUsers": { + SchemaProps: spec.SchemaProps{ + Description: "DeleteUsers specifies the list of users that should be deleted from the database. If provided, the system will remove these users from the database to enhance security or manage", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference"}, + } +} + func schema_apimachinery_apis_ops_v1alpha1_RedisCustomConfigurationSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -34370,11 +34464,16 @@ func schema_apimachinery_apis_ops_v1alpha1_RedisCustomConfigurationSpec(ref comm Format: "", }, }, + "auth": { + SchemaProps: spec.SchemaProps{ + Ref: ref("kubedb.dev/apimachinery/apis/ops/v1alpha1.RedisAclSpec"), + }, + }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference"}, + "k8s.io/api/core/v1.LocalObjectReference", "kubedb.dev/apimachinery/apis/ops/v1alpha1.RedisAclSpec"}, } } diff --git a/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/redis_ops_types.go b/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/redis_ops_types.go index cdcbff903..2475f92d9 100644 --- a/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/redis_ops_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/redis_ops_types.go @@ -143,6 +143,20 @@ type RedisVerticalScalingSpec struct { Coordinator *ContainerResources `json:"coordinator,omitempty"` } +type RedisAclSpec struct { + // SecretRef holds the password against which ACLs will be created if syncACL is given. + // +optional + SecretRef *core.LocalObjectReference `json:"secretRef,omitempty"` + + // SyncACL specifies the list of users whose ACLs should be synchronized with the new authentication secret. + // If provided, the system will update the ACLs for these users to ensure they are in sync with the new authentication settings. + SyncACL []string `json:"syncACL,omitempty"` + + // DeleteUsers specifies the list of users that should be deleted from the database. + // If provided, the system will remove these users from the database to enhance security or manage + DeleteUsers []string `json:"deleteUsers,omitempty"` +} + // RedisVolumeExpansionSpec is the spec for Redis volume expansion type RedisVolumeExpansionSpec struct { Mode VolumeExpansionMode `json:"mode"` @@ -153,6 +167,7 @@ type RedisCustomConfigurationSpec struct { ConfigSecret *core.LocalObjectReference `json:"configSecret,omitempty"` ApplyConfig map[string]string `json:"applyConfig,omitempty"` RemoveCustomConfig bool `json:"removeCustomConfig,omitempty"` + Auth *RedisAclSpec `json:"auth,omitempty"` } // +kubebuilder:validation:Enum=ip;hostname diff --git a/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/type.go b/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/type.go index 2e7a51dd4..46ca6c215 100644 --- a/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/type.go +++ b/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/type.go @@ -20,6 +20,7 @@ import ( core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kmapi "kmodules.xyz/client-go/api/v1" + app_api "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1" nodemeta "kmodules.xyz/resource-metadata/apis/node/v1alpha1" ) @@ -95,7 +96,7 @@ type TLSSpec struct { type AuthSpec struct { // SecretRef holds the new authSecret // If it is given, ops-manager will use this. Otherwise, will generate random password - SecretRef *core.LocalObjectReference `json:"secretRef,omitempty"` + SecretRef *app_api.TypedLocalObjectReference `json:"secretRef,omitempty"` } // +kubebuilder:validation:Enum=IfReady;Always diff --git a/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/zz_generated.deepcopy.go b/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/zz_generated.deepcopy.go index f9f129156..5063d9817 100644 --- a/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/kubedb.dev/apimachinery/apis/ops/v1alpha1/zz_generated.deepcopy.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" apiv1 "kmodules.xyz/client-go/api/v1" + appcatalogv1alpha1 "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -76,7 +77,7 @@ func (in *AuthSpec) DeepCopyInto(out *AuthSpec) { *out = *in if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.LocalObjectReference) + *out = new(appcatalogv1alpha1.TypedLocalObjectReference) **out = **in } return @@ -6055,6 +6056,37 @@ func (in *RabbitMQVolumeExpansionSpec) DeepCopy() *RabbitMQVolumeExpansionSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisAclSpec) DeepCopyInto(out *RedisAclSpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.SyncACL != nil { + in, out := &in.SyncACL, &out.SyncACL + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DeleteUsers != nil { + in, out := &in.DeleteUsers, &out.DeleteUsers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisAclSpec. +func (in *RedisAclSpec) DeepCopy() *RedisAclSpec { + if in == nil { + return nil + } + out := new(RedisAclSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RedisCustomConfigurationSpec) DeepCopyInto(out *RedisCustomConfigurationSpec) { *out = *in @@ -6070,6 +6102,11 @@ func (in *RedisCustomConfigurationSpec) DeepCopyInto(out *RedisCustomConfigurati (*out)[key] = val } } + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = new(RedisAclSpec) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/kubedb.dev/apimachinery/apis/postgres/v1alpha1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/postgres/v1alpha1/openapi_generated.go index 1031d2de8..c0fd667d5 100644 --- a/vendor/kubedb.dev/apimachinery/apis/postgres/v1alpha1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/postgres/v1alpha1/openapi_generated.go @@ -454,6 +454,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec": schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AlertPreset": schema_kmodulesxyz_monitoring_agent_api_api_v1_AlertPreset(ref), "kmodules.xyz/monitoring-agent-api/api/v1.BasicAuth": schema_kmodulesxyz_monitoring_agent_api_api_v1_BasicAuth(ref), @@ -22565,7 +22566,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -22591,7 +22592,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -22599,7 +22600,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -23015,6 +23016,48 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen } } +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} + func schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kubedb.dev/apimachinery/apis/schema/v1alpha1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/schema/v1alpha1/openapi_generated.go index 71d2932b1..a464ec680 100644 --- a/vendor/kubedb.dev/apimachinery/apis/schema/v1alpha1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/schema/v1alpha1/openapi_generated.go @@ -454,6 +454,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec": schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AlertPreset": schema_kmodulesxyz_monitoring_agent_api_api_v1_AlertPreset(ref), "kmodules.xyz/monitoring-agent-api/api/v1.BasicAuth": schema_kmodulesxyz_monitoring_agent_api_api_v1_BasicAuth(ref), @@ -22576,7 +22577,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -22602,7 +22603,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -22610,7 +22611,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -23026,6 +23027,48 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen } } +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} + func schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kubedb.dev/apimachinery/apis/ui/v1alpha1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/ui/v1alpha1/openapi_generated.go index 3b3e53c0c..8bae6e2f7 100644 --- a/vendor/kubedb.dev/apimachinery/apis/ui/v1alpha1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/ui/v1alpha1/openapi_generated.go @@ -454,6 +454,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashAddonSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashAddonSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.StashTaskSpec": schema_custom_resources_apis_appcatalog_v1alpha1_StashTaskSpec(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TaskRef": schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref), + "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference": schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AgentSpec": schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref), "kmodules.xyz/monitoring-agent-api/api/v1.AlertPreset": schema_kmodulesxyz_monitoring_agent_api_api_v1_AlertPreset(ref), "kmodules.xyz/monitoring-agent-api/api/v1.BasicAuth": schema_kmodulesxyz_monitoring_agent_api_api_v1_BasicAuth(ref), @@ -22684,7 +22685,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "secret": { SchemaProps: spec.SchemaProps{ Description: "Secret is the name of the secret to create in the AppBinding's namespace that will hold the credentials associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, "secretTransforms": { @@ -22710,7 +22711,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. "tlsSecret": { SchemaProps: spec.SchemaProps{ Description: "TLSSecret is the name of the secret that will hold the client certificate and private key associated with the AppBinding.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"), }, }, }, @@ -22718,7 +22719,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.TypedLocalObjectReference"}, } } @@ -23134,6 +23135,48 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_TaskRef(ref common.Referen } } +func schema_custom_resources_apis_appcatalog_v1alpha1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + }, + } +} + func schema_kmodulesxyz_monitoring_agent_api_api_v1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_druids.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_druids.yaml index c2ad444fe..6b4da9469 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_druids.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_druids.yaml @@ -32,16 +32,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -1163,16 +1169,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic metadataStorage: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_elasticsearches.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_elasticsearches.yaml index 8e4854954..45b784643 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_elasticsearches.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_elasticsearches.yaml @@ -32,16 +32,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_ferretdbs.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_ferretdbs.yaml index 7afb67a3d..806ef6865 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_ferretdbs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_ferretdbs.yaml @@ -32,16 +32,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_kafkas.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_kafkas.yaml index 64995b5e0..e4468c219 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_kafkas.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_kafkas.yaml @@ -32,16 +32,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -49,6 +55,11 @@ spec: disabled: type: boolean type: object + brokerRack: + properties: + topologyKey: + type: string + type: object configSecret: properties: name: @@ -71,16 +82,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic podTemplate: @@ -3253,16 +3270,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic monitor: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mariadbs.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mariadbs.yaml index bd7f05531..ab5f8db2d 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mariadbs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mariadbs.yaml @@ -118,16 +118,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_memcacheds.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_memcacheds.yaml index 0ea8f3059..bce4ec769 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_memcacheds.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_memcacheds.yaml @@ -32,16 +32,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic configSecret: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mongodbs.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mongodbs.yaml index 8dc61480f..a7142be5b 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mongodbs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mongodbs.yaml @@ -3220,16 +3220,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mssqlservers.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mssqlservers.yaml index 88d842ea7..9629caddf 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mssqlservers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mssqlservers.yaml @@ -105,16 +105,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mysqls.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mysqls.yaml index b791bcaca..4172c86c3 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mysqls.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_mysqls.yaml @@ -188,16 +188,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_perconaxtradbs.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_perconaxtradbs.yaml index d8c5e7be1..19d3e0aa5 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_perconaxtradbs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_perconaxtradbs.yaml @@ -102,16 +102,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -4674,16 +4680,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic replicationUserSecret: @@ -4692,16 +4704,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_pgbouncers.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_pgbouncers.yaml index 9eeeefa8b..213275d46 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_pgbouncers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_pgbouncers.yaml @@ -32,16 +32,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_pgpools.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_pgpools.yaml index b3827de61..5f7d4f0f7 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_pgpools.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_pgpools.yaml @@ -32,16 +32,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_postgreses.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_postgreses.yaml index 1c82ac644..e347305af 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_postgreses.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_postgreses.yaml @@ -175,16 +175,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_proxysqls.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_proxysqls.yaml index f66aeea63..cef6182f7 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_proxysqls.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_proxysqls.yaml @@ -32,16 +32,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_rabbitmqs.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_rabbitmqs.yaml index ca36a62a2..02ff39c84 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_rabbitmqs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_rabbitmqs.yaml @@ -32,16 +32,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_redises.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_redises.yaml index ffc4fd35a..ef143d2d5 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_redises.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_redises.yaml @@ -26,6 +26,20 @@ spec: type: object spec: properties: + acl: + properties: + rules: + items: + type: string + type: array + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + type: object allowedSchemas: default: namespaces: @@ -102,16 +116,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_redissentinels.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_redissentinels.yaml index e5d832ab6..0181d6d93 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_redissentinels.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_redissentinels.yaml @@ -32,16 +32,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_singlestores.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_singlestores.yaml index 87d6e6020..fdb6df8d7 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_singlestores.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_singlestores.yaml @@ -32,16 +32,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -1140,16 +1146,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic monitor: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_solrs.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_solrs.yaml index 0c50588eb..db9361e68 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_solrs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_solrs.yaml @@ -39,16 +39,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_zookeepers.yaml b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_zookeepers.yaml index 0fb9f4ad9..379e0c308 100644 --- a/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_zookeepers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/gitops.kubedb.com_zookeepers.yaml @@ -36,16 +36,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -104,16 +110,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic monitor: diff --git a/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_connectclusters.yaml b/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_connectclusters.yaml index 473304918..68893515d 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_connectclusters.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_connectclusters.yaml @@ -51,16 +51,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic configSecret: @@ -121,16 +127,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic monitor: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_cassandras.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_cassandras.yaml index 112fa3868..bdbd0d8bb 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_cassandras.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_cassandras.yaml @@ -57,16 +57,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -119,16 +125,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic monitor: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_clickhouses.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_clickhouses.yaml index ff57777e2..725666d89 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_clickhouses.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_clickhouses.yaml @@ -57,16 +57,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -6514,6 +6520,1054 @@ spec: format: int32 type: integer type: object + init: + properties: + archiver: + properties: + encryptionSecret: + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object + fullDBRepository: + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object + manifestOptions: + properties: + archiver: + default: false + type: boolean + archiverRef: + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object + initScript: + default: false + type: boolean + type: object + manifestRepository: + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object + recoveryTimestamp: + format: date-time + type: string + replicationStrategy: + enum: + - fscopy + - clone + - sync + - none + type: string + required: + - recoveryTimestamp + type: object + initialized: + type: boolean + script: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + git: + properties: + args: + items: + type: string + type: array + authSecret: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + required: + - args + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + scriptPath: + type: string + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + type: object + waitForInitialRestore: + type: boolean + type: object monitor: properties: agent: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_druids.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_druids.yaml index 724df659a..9f319a264 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_druids.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_druids.yaml @@ -57,16 +57,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -1188,16 +1194,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic metadataStorage: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_elasticsearches.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_elasticsearches.yaml index d0627615e..ddabe72d1 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_elasticsearches.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_elasticsearches.yaml @@ -54,16 +54,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -40058,16 +40064,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_etcds.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_etcds.yaml index e69394ec7..3a36abe0f 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_etcds.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_etcds.yaml @@ -54,16 +54,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic deletionPolicy: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_ferretdbs.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_ferretdbs.yaml index 9a1baadf3..d658d9b38 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_ferretdbs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_ferretdbs.yaml @@ -57,16 +57,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_hazelcasts.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_hazelcasts.yaml index 79b81253b..3eae56e0f 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_hazelcasts.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_hazelcasts.yaml @@ -52,16 +52,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_ignites.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_ignites.yaml index a9b412b18..61f3a261b 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_ignites.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_ignites.yaml @@ -52,16 +52,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -114,16 +120,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic monitor: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_kafkas.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_kafkas.yaml index 7f6a2c134..f5cd3d304 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_kafkas.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_kafkas.yaml @@ -57,16 +57,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -74,6 +80,11 @@ spec: disabled: type: boolean type: object + brokerRack: + properties: + topologyKey: + type: string + type: object configSecret: properties: name: @@ -96,16 +107,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic podTemplate: @@ -3278,16 +3295,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic monitor: @@ -13310,16 +13333,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -13327,6 +13356,11 @@ spec: disabled: type: boolean type: object + brokerRack: + properties: + topologyKey: + type: string + type: object configSecret: properties: name: @@ -13349,16 +13383,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic podTemplate: @@ -16531,16 +16571,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic monitor: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mariadbs.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mariadbs.yaml index ac1e9df22..14e1d5cdc 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mariadbs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mariadbs.yaml @@ -140,16 +140,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -8191,16 +8197,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_memcacheds.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_memcacheds.yaml index d189d8293..45ea68d4a 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_memcacheds.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_memcacheds.yaml @@ -54,16 +54,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic configSecret: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mongodbs.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mongodbs.yaml index af324b447..73e3ce2d2 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mongodbs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mongodbs.yaml @@ -3242,16 +3242,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -23521,16 +23527,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mssqlservers.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mssqlservers.yaml index 714a425fe..13fa01d51 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mssqlservers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mssqlservers.yaml @@ -127,16 +127,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mysqls.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mysqls.yaml index d71ab25a6..9bf21e839 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mysqls.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mysqls.yaml @@ -210,16 +210,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -8294,16 +8300,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_oracles.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_oracles.yaml index dc2337781..72a67bf2d 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_oracles.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_oracles.yaml @@ -51,16 +51,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_perconaxtradbs.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_perconaxtradbs.yaml index ca1dbfa96..f2f2c5224 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_perconaxtradbs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_perconaxtradbs.yaml @@ -124,16 +124,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -4696,16 +4702,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic replicationUserSecret: @@ -4714,16 +4726,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object @@ -4983,16 +5001,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -9220,16 +9244,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic replicationUserSecret: @@ -9238,16 +9268,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgbouncers.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgbouncers.yaml index f5d464d77..baa3d7eab 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgbouncers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgbouncers.yaml @@ -54,16 +54,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -4776,16 +4782,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgpools.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgpools.yaml index 9aa665a17..569060d92 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgpools.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgpools.yaml @@ -57,16 +57,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_postgreses.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_postgreses.yaml index 37382f4e1..6f2eaba3a 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_postgreses.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_postgreses.yaml @@ -197,16 +197,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -5191,16 +5197,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_proxysqls.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_proxysqls.yaml index b9a962038..36269a039 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_proxysqls.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_proxysqls.yaml @@ -54,16 +54,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -3719,16 +3725,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_rabbitmqs.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_rabbitmqs.yaml index 8832de4a1..0843c7f63 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_rabbitmqs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_rabbitmqs.yaml @@ -57,16 +57,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redises.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redises.yaml index 61755a2c3..366719eef 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redises.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redises.yaml @@ -48,6 +48,20 @@ spec: type: object spec: properties: + acl: + properties: + rules: + items: + type: string + type: array + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + type: object allowedSchemas: default: namespaces: @@ -124,16 +138,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -4984,16 +5004,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redissentinels.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redissentinels.yaml index 87e07852c..2d2542dbf 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redissentinels.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redissentinels.yaml @@ -54,16 +54,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -3749,16 +3755,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_singlestores.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_singlestores.yaml index 1b08e4517..19549d99f 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_singlestores.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_singlestores.yaml @@ -57,16 +57,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -1165,16 +1171,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic monitor: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_solrs.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_solrs.yaml index c353b294a..3bb7c9048 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_solrs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_solrs.yaml @@ -64,16 +64,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_zookeepers.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_zookeepers.yaml index 8bed8f8e6..3ce0c2b3d 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_zookeepers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_zookeepers.yaml @@ -61,16 +61,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic autoOps: @@ -129,16 +135,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic monitor: diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_cassandraopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_cassandraopsrequests.yaml index 96be44f21..a7c781778 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_cassandraopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_cassandraopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object @@ -93,16 +101,22 @@ spec: format: date-time type: string apiGroup: + default: "" type: string externallyManaged: type: boolean + kind: + default: Secret + type: string name: - default: "" type: string rotateAfter: type: string secretStoreName: type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic restart: diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_clickhouseopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_clickhouseopsrequests.yaml index 798f6404d..394c6d6d7 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_clickhouseopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_clickhouseopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_druidopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_druidopsrequests.yaml index 77f2ba2ce..4dd87a60e 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_druidopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_druidopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_elasticsearchopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_elasticsearchopsrequests.yaml index dd361d257..9da04e6a3 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_elasticsearchopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_elasticsearchopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_etcdopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_etcdopsrequests.yaml index f4570dfde..eb9009cf4 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_etcdopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_etcdopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_ferretdbopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_ferretdbopsrequests.yaml index bb7f54495..c9beaa313 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_ferretdbopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_ferretdbopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_hazelcastopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_hazelcastopsrequests.yaml index ba098fd14..10dd97dc3 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_hazelcastopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_hazelcastopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_igniteopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_igniteopsrequests.yaml index 069fec6bf..7977728fa 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_igniteopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_igniteopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_kafkaopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_kafkaopsrequests.yaml index 2bcc28b71..eab70f5e9 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_kafkaopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_kafkaopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mariadbopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mariadbopsrequests.yaml index af8fb6c45..7741ec251 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mariadbopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mariadbopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_memcachedopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_memcachedopsrequests.yaml index b91f54ee5..f7af00d2d 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_memcachedopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_memcachedopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mongodbopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mongodbopsrequests.yaml index 4ab15ff47..d02e8e0be 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mongodbopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mongodbopsrequests.yaml @@ -72,9 +72,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mssqlserveropsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mssqlserveropsrequests.yaml index 40e162977..d659c593d 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mssqlserveropsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mssqlserveropsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mysqlopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mysqlopsrequests.yaml index b5c1bce67..cc0f83104 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mysqlopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mysqlopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_perconaxtradbopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_perconaxtradbopsrequests.yaml index 0687b967e..fd84e4ff8 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_perconaxtradbopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_perconaxtradbopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgbounceropsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgbounceropsrequests.yaml index 87fc70dd0..2b5720f0e 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgbounceropsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgbounceropsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgpoolopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgpoolopsrequests.yaml index 070f59e1a..f286e788a 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgpoolopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgpoolopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_postgresopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_postgresopsrequests.yaml index c39d4808c..b3e279222 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_postgresopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_postgresopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_proxysqlopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_proxysqlopsrequests.yaml index 4de85c9bd..5dfc997ac 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_proxysqlopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_proxysqlopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_rabbitmqopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_rabbitmqopsrequests.yaml index 8f480e0ba..cd9645165 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_rabbitmqopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_rabbitmqopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redisopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redisopsrequests.yaml index e88668f2a..5fefc2468 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redisopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redisopsrequests.yaml @@ -70,9 +70,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object @@ -82,6 +90,24 @@ spec: additionalProperties: type: string type: object + auth: + properties: + deleteUsers: + items: + type: string + type: array + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + syncACL: + items: + type: string + type: array + type: object configSecret: properties: name: diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redissentinelopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redissentinelopsrequests.yaml index 445d6fd97..157678904 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redissentinelopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redissentinelopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_singlestoreopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_singlestoreopsrequests.yaml index 64d92a4e3..435b3e120 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_singlestoreopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_singlestoreopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_solropsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_solropsrequests.yaml index ef378e14c..dadf19b2c 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_solropsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_solropsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_zookeeperopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_zookeeperopsrequests.yaml index 3ec74b2a9..cb36fddab 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_zookeeperopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_zookeeperopsrequests.yaml @@ -52,9 +52,17 @@ spec: properties: secretRef: properties: - name: + apiGroup: default: "" type: string + kind: + default: Secret + type: string + name: + type: string + required: + - kind + - name type: object x-kubernetes-map-type: atomic type: object diff --git a/vendor/kubedb.dev/db-client-go/elasticsearch/es_client_v9.go b/vendor/kubedb.dev/db-client-go/elasticsearch/es_client_v9.go new file mode 100644 index 000000000..33dc60d99 --- /dev/null +++ b/vendor/kubedb.dev/db-client-go/elasticsearch/es_client_v9.go @@ -0,0 +1,554 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package elasticsearch + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + dbapi "kubedb.dev/apimachinery/apis/kubedb/v1" + + esv9 "github.com/elastic/go-elasticsearch/v9" + "github.com/elastic/go-elasticsearch/v9/esapi" + "github.com/pkg/errors" + core "k8s.io/api/core/v1" + "k8s.io/klog/v2" + kutil "kmodules.xyz/client-go" +) + +var _ ESClient = &ESClientV9{} + +type ESClientV9 struct { + client *esv9.Client +} + +func (es *ESClientV9) ClusterHealthInfo() (map[string]interface{}, error) { + res, err := es.client.Cluster.Health( + es.client.Cluster.Health.WithPretty(), + ) + if err != nil { + return nil, err + } + defer res.Body.Close() + + response := make(map[string]interface{}) + if err2 := json.NewDecoder(res.Body).Decode(&response); err2 != nil { + return nil, errors.Wrap(err2, "failed to parse the response body") + } + return response, nil +} + +func (es *ESClientV9) NodesStats() (map[string]interface{}, error) { + req := esapi.NodesStatsRequest{ + Pretty: true, + Human: true, + } + + resp, err := req.Do(context.Background(), es.client) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + nodesStats := make(map[string]interface{}) + if err := json.NewDecoder(resp.Body).Decode(&nodesStats); err != nil { + return nil, fmt.Errorf("failed to deserialize the response: %v", err) + } + + return nodesStats, nil +} + +func (es *ESClientV9) ShardStats() ([]ShardInfo, error) { + req := esapi.CatShardsRequest{ + Bytes: "b", + Format: "json", + Pretty: true, + Human: true, + H: []string{"index", "shard", "prirep", "state", "unassigned.reason"}, + } + + resp, err := req.Do(context.Background(), es.client) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + fmt.Println("Error reading body:", err) + } + + var shardStats []ShardInfo + err = json.Unmarshal(body, &shardStats) + if err != nil { + return nil, err + } + return shardStats, nil +} + +// GetIndicesInfo will return the indices' info of an Elasticsearch database +func (es *ESClientV9) GetIndicesInfo() ([]interface{}, error) { + req := esapi.CatIndicesRequest{ + Bytes: "b", // will return resource size field into byte unit + Format: "json", + Pretty: true, + Human: true, + } + + resp, err := req.Do(context.Background(), es.client) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + indicesInfo := make([]interface{}, 0) + if err := json.NewDecoder(resp.Body).Decode(&indicesInfo); err != nil { + return nil, fmt.Errorf("failed to deserialize the response: %v", err) + } + + return indicesInfo, nil +} + +func (es *ESClientV9) ClusterStatus() (string, error) { + res, err := es.client.Cluster.Health( + es.client.Cluster.Health.WithPretty(), + ) + if err != nil { + return "", err + } + defer res.Body.Close() + + response := make(map[string]interface{}) + if err2 := json.NewDecoder(res.Body).Decode(&response); err2 != nil { + return "", errors.Wrap(err2, "failed to parse the response body") + } + if value, ok := response["status"]; ok { + if strValue, ok := value.(string); ok { + return strValue, nil + } + return "", errors.New("failed to convert response to string") + } + return "", errors.New("status is missing") +} + +func (es *ESClientV9) SyncCredentialFromSecret(secret *core.Secret) error { + // get auth creds from secret + var username, password string + if value, ok := secret.Data[core.BasicAuthUsernameKey]; ok { + username = string(value) + } else { + return errors.New("username is missing") + } + if value, ok := secret.Data[core.BasicAuthPasswordKey]; ok { + password = string(value) + } else { + return errors.New("password is missing") + } + + // Build the request body. + reqBody := map[string]string{ + "password": password, + } + body, err2 := json.Marshal(reqBody) + if err2 != nil { + return err2 + } + + // send change password request via _security/user/username/_password api + // use admin client to make request + req := esapi.SecurityChangePasswordRequest{ + Body: strings.NewReader(string(body)), + Username: username, + Pretty: true, + } + + res, err := req.Do(context.Background(), es.client.Transport) + if err != nil { + klog.Errorf("failed to send change password request, reason: %s", username) + return err + } + + defer func(Body io.ReadCloser) { + err = Body.Close() + if err != nil { + klog.Errorf("failed to close auth response body, reason: %s", err) + } + }(res.Body) + + if !res.IsError() { + klog.V(5).Infoln(username, "user credentials successfully synced") + return nil + } + + klog.V(5).Infoln("Failed to sync", username, "credentials") + return errors.New("CredSyncFailed") +} + +func (es *ESClientV9) GetClusterWriteStatus(ctx context.Context, db *dbapi.Elasticsearch) error { + // Build the request index & request body + // send the db specs as body + indexBody := WriteRequestIndexBody{ + ID: writeRequestID, + } + + indexReq := WriteRequestIndex{indexBody} + ReqBody := db.Spec + + // encode the request index & request body + index, err1 := json.Marshal(indexReq) + if err1 != nil { + return errors.Wrap(err1, "Failed to encode index for performing write request") + } + body, err2 := json.Marshal(ReqBody) + if err2 != nil { + return errors.Wrap(err2, "Failed to encode request body for performing write request") + } + + // make write request & fetch response + // check for write request failure & error from response body + // Bulk API Performs multiple indexing or delete operations in a single API call + // This reduces overhead and can greatly increase indexing speed it Indexes the specified document + // If the document exists, replaces the document and increments the version + res, err3 := esapi.BulkRequest{ + Index: writeRequestIndex, + Body: strings.NewReader(strings.Join([]string{string(index), string(body)}, "\n") + "\n"), + Pretty: true, + }.Do(ctx, es.client.Transport) + if err3 != nil { + return errors.Wrap(err3, "Failed to perform write request") + } + if res.IsError() { + return fmt.Errorf("failed to get response from write request with error statuscode %d", res.StatusCode) + } + + defer func(res *esapi.Response) { + if res != nil { + err3 = res.Body.Close() + if err3 != nil { + klog.Errorf("Failed to close write request response body, reason: %s", err3) + } + } + }(res) + + responseBody := make(map[string]interface{}) + if err4 := json.NewDecoder(res.Body).Decode(&responseBody); err4 != nil { + return errors.Wrap(err4, "Failed to decode response from write request") + } + + // Parse the responseBody to check if write operation failed after request being successful + // `errors` field(boolean) in the json response becomes true if there's and error caused, otherwise it stays nil + if value, ok := responseBody["errors"]; ok { + if strValue, ok := value.(bool); ok { + if !strValue { + return nil + } + return errors.Errorf("Write request responded with error, %v", responseBody) + } + return errors.New("Failed to parse value for `errors` in response from write request") + } + return errors.New("Failed to parse key `errors` in response from write request") +} + +func (es *ESClientV9) GetClusterReadStatus(ctx context.Context, db *dbapi.Elasticsearch) error { + // Perform a read request in writeRequestIndex/writeRequestID (kubedb-system/info) API + // Handle error specifically if index has not been created yet + res, err := esapi.GetRequest{ + Index: writeRequestIndex, + DocumentID: writeRequestID, + }.Do(ctx, es.client.Transport) + if err != nil { + return errors.Wrap(err, "Failed to perform read request") + } + + defer func(res *esapi.Response) { + if res != nil { + err = res.Body.Close() + if err != nil { + klog.Errorf("failed to close read request response body, reason: %s", err) + } + } + }(res) + + if res.StatusCode == http.StatusNotFound { + return kutil.ErrNotFound + } + if res.IsError() { + return fmt.Errorf("failed to get response from write request with error statuscode %d", res.StatusCode) + } + + return nil +} + +func (es *ESClientV9) GetTotalDiskUsage(ctx context.Context) (string, error) { + // Perform a DiskUsageRequest to database to calculate store size of all the elasticsearch indices + // primary purpose of this function is to provide operator calculated storage of interimVolumeTemplate while taking backup + // Analyzing field disk usage is resource-intensive. To use the API, RunExpensiveTasks must be set to true. Defaults to false. + // Get disk usage for all indices using "*" wildcard. + flag := true + res, err := esapi.IndicesDiskUsageRequest{ + Index: diskUsageRequestIndex, + Pretty: true, + Human: true, + RunExpensiveTasks: &flag, + ExpandWildcards: diskUsageRequestWildcards, + }.Do(ctx, es.client.Transport) + if err != nil { + return "", errors.Wrap(err, "Failed to perform Disk Usage Request") + } + defer func(Body io.ReadCloser) { + err = Body.Close() + if err != nil { + klog.Errorf("failed to close response body from Disk Usage Request, reason: %s", err) + } + }(res.Body) + + // Parse the json response to get total storage used for all index + totalDiskUsage, err := calculateDatabaseSize(res.Body) + if err != nil { + return "", errors.Wrap(err, "Failed to parse json response to get disk usage") + } + + return totalDiskUsage, nil +} + +func (es *ESClientV9) GetDBUserRole(ctx context.Context) (error, bool) { + req := esapi.SecurityGetRoleRequest{ + Name: []string{CustomRoleName}, + } + res, err := req.Do(ctx, es.client.Transport) + defer func(Body io.ReadCloser) { + err := Body.Close() + if err != nil { + klog.Errorf("failed to close response body from GetDBUserRole, reason: %s", err) + } + }(res.Body) + + if err != nil { + klog.Errorf("failed to get existing DB user role, reason: %s", err) + return err, false + } + if res.IsError() { + err = fmt.Errorf("fetching DB user role failed with error status code %d", res.StatusCode) + klog.Errorf("Failed to fetch DB user role, reason: %s", err) + return nil, false + } + + return nil, true +} + +func (es *ESClientV9) CreateDBUserRole(ctx context.Context) error { + userRoleReqStruct := UserRoleReq{ + []string{PrivilegeCreateSnapshot, PrivilegeManage, PrivilegeManageILM, PrivilegeManageRoleup, PrivilegeMonitor, PrivilegeManageCCR}, + []DBPrivileges{ + { + []string{PrivilegeIndexAny}, + []string{PrivilegeRead, PrivilegeWrite, PrivilegeCreateIndex}, + false, + }, + }, + []ApplicationPrivileges{ + { + ApplicationKibana, + []string{PrivilegeRead, PrivilegeWrite}, + []string{PrivilegeIndexAny}, + }, + }, + []string{}, + TransientMetaPrivileges{ + true, + }, + } + + userRoleReqJSON, err := json.Marshal(userRoleReqStruct) + if err != nil { + klog.Errorf("failed to parse rollRequest body to json, reason: %s", err) + return err + } + body := bytes.NewReader(userRoleReqJSON) + req := esapi.SecurityPutRoleRequest{ + Name: CustomRoleName, + Body: body, + } + + res, err := req.Do(ctx, es.client.Transport) + defer func(Body io.ReadCloser) { + err := Body.Close() + if err != nil { + klog.Errorf("failed to close response body from EnsureDBUserRole function, reason: %s", err) + } + }(res.Body) + if err != nil { + klog.Errorf("Failed to perform request to create DB user role, reason: %s", err) + return err + } + + if res.IsError() { + err = fmt.Errorf("DB user role creation failed with error status code %d", res.StatusCode) + klog.Errorf("Failed to create DB user role, reason: %s", err) + return err + } + return nil +} + +func (es *ESClientV9) IndexExistsOrNot(index string) error { + req := esapi.IndicesExistsRequest{ + Index: []string{index}, + } + res, err := req.Do(context.Background(), es.client) + if err != nil { + klog.Errorf("failed to get response while checking either index exists or not %v", err) + return err + } + defer func(Body io.ReadCloser) { + err = Body.Close() + if err != nil { + klog.Errorf("failed to close response body for checking the existence of index, reason: %s", err) + } + }(res.Body) + + if res.IsError() { + klog.Errorf("failed to get index with statuscode %d", res.StatusCode) + return errors.New("index does not exist") + } + return nil +} + +func (es *ESClientV9) CreateIndex(index string) error { + req := esapi.IndicesCreateRequest{ + Index: index, + Pretty: true, + Human: true, + } + + res, err := req.Do(context.Background(), es.client) + if err != nil { + klog.Errorf("failed to apply create index request, reason: %s", err) + return err + } + defer func(Body io.ReadCloser) { + err = Body.Close() + if err != nil { + klog.Errorf("failed to close response body for creating index, reason: %s", err) + } + }(res.Body) + + if res.IsError() { + klog.Errorf("creating index failed with statuscode %d", res.StatusCode) + return errors.New("failed to create index") + } + + return nil +} + +func (es *ESClientV9) DeleteIndex(index string) error { + req := esapi.IndicesDeleteRequest{ + Index: []string{index}, + } + + res, err := req.Do(context.Background(), es.client) + if err != nil { + klog.Errorf("failed to apply delete index request, reason: %s", err) + return err + } + defer func(Body io.ReadCloser) { + err = Body.Close() + if err != nil { + klog.Errorf("failed to close response body for deleting index, reason: %s", err) + } + }(res.Body) + + if res.IsError() { + klog.Errorf("failed to delete index with status code %d", res.StatusCode) + return errors.New("failed to delete index") + } + + return nil +} + +func (es *ESClientV9) CountData(index string) (int, error) { + req := esapi.CountRequest{ + Index: []string{index}, + } + + res, err := req.Do(context.Background(), es.client) + if err != nil { + return 0, err + } + defer func(Body io.ReadCloser) { + err = Body.Close() + if err != nil { + klog.Errorf("failed to close response body for counting data, reason: %s", err) + } + }(res.Body) + + if res.IsError() { + klog.Errorf("failed to count number of documents in index with statuscode %d", res.StatusCode) + return 0, errors.New("failed to count number of documents in index") + } + + var response map[string]interface{} + if err := json.NewDecoder(res.Body).Decode(&response); err != nil { + return 0, err + } + + count, ok := response["count"] + if !ok { + return 0, errors.New("failed to parse value for index count in response body") + } + + return int(count.(float64)), nil +} + +func (es *ESClientV9) PutData(index, id string, data map[string]interface{}) error { + var b strings.Builder + dataBytes, err := json.Marshal(data) + if err != nil { + return errors.Wrap(err, "failed to Marshal data") + } + b.Write(dataBytes) + + req := esapi.CreateRequest{ + Index: index, + DocumentID: id, + Body: strings.NewReader(b.String()), + Pretty: true, + Human: true, + } + + res, err := req.Do(context.Background(), es.client) + if err != nil { + klog.Errorf("failed to put data in the index, reason: %s", err) + return err + } + defer func(Body io.ReadCloser) { + err = Body.Close() + if err != nil { + klog.Errorf("failed to close response body for putting data in the index, reason: %s", err) + } + }(res.Body) + + if res.IsError() { + klog.Errorf("failed to put data in an index with statuscode %d", res.StatusCode) + return errors.New("failed to put data in an index") + } + return nil +} diff --git a/vendor/kubedb.dev/db-client-go/elasticsearch/kubedb_client_builder.go b/vendor/kubedb.dev/db-client-go/elasticsearch/kubedb_client_builder.go index 1d69a3dda..dada31189 100644 --- a/vendor/kubedb.dev/db-client-go/elasticsearch/kubedb_client_builder.go +++ b/vendor/kubedb.dev/db-client-go/elasticsearch/kubedb_client_builder.go @@ -38,6 +38,8 @@ import ( esv7 "github.com/elastic/go-elasticsearch/v7" esv8 "github.com/elastic/go-elasticsearch/v8" "github.com/elastic/go-elasticsearch/v8/esapi" + esv9 "github.com/elastic/go-elasticsearch/v9" + esapi9 "github.com/elastic/go-elasticsearch/v9/esapi" "github.com/go-resty/resty/v2" osv1 "github.com/opensearch-project/opensearch-go" osapiv1 "github.com/opensearch-project/opensearch-go/opensearchapi" @@ -272,8 +274,6 @@ func (o *KubeDBClientBuilder) GetElasticClient() (*Client, error) { &ESClientV7{client: esClient}, }, nil - // for Elasticsearch 8.x.x - // for Elasticsearch 8.x.x case version.Major() == 8: defaultTLSConfig, err := o.getDefaultTLSConfig() @@ -321,6 +321,54 @@ func (o *KubeDBClientBuilder) GetElasticClient() (*Client, error) { return &Client{ &ESClientV8{client: esClient}, }, nil + + // for Elasticsearch 9.x.x + case version.Major() == 9: + defaultTLSConfig, err := o.getDefaultTLSConfig() + if err != nil { + klog.Errorf("Failed get default TLS configuration") + return nil, err + + } + + esClient, err := esv9.NewClient(esv9.Config{ + Addresses: []string{o.url}, + Username: username, + Password: password, + EnableDebugLogger: true, + DisableRetry: true, + Transport: &http.Transport{ + IdleConnTimeout: 3 * time.Second, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + }).DialContext, + TLSClientConfig: defaultTLSConfig, + }, + }) + if err != nil { + klog.Errorf("Failed to create HTTP client for Elasticsearch: %s/%s with: %s", o.db.Namespace, o.db.Name, err) + return nil, err + } + + res, err := esapi9.PingRequest{}.Do(o.ctx, esClient.Transport) + if err != nil { + return nil, err + } + + defer func(Body io.ReadCloser) { + err = Body.Close() + if err != nil { + klog.Errorf("failed to close response body, reason: %s", err) + } + }(res.Body) + + if res.IsError() { + return nil, fmt.Errorf("cluster ping request failed with status code: %d", res.StatusCode) + } + + return &Client{ + &ESClientV9{client: esClient}, + }, nil } case esVersion.Spec.AuthPlugin == catalog.ElasticsearchAuthPluginOpenSearch: diff --git a/vendor/modules.txt b/vendor/modules.txt index 98a3ffe09..285a8b9ff 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -54,8 +54,8 @@ github.com/davecgh/go-spew/spew # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ## explicit github.com/dgryski/go-rendezvous -# github.com/elastic/elastic-transport-go/v8 v8.1.0 -## explicit; go 1.13 +# github.com/elastic/elastic-transport-go/v8 v8.7.0 +## explicit; go 1.20 github.com/elastic/elastic-transport-go/v8/elastictransport github.com/elastic/elastic-transport-go/v8/elastictransport/version # github.com/elastic/go-elasticsearch/v5 v5.6.1 @@ -668,6 +668,855 @@ github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stats github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stop github.com/elastic/go-elasticsearch/v8/typedapi/xpack/info github.com/elastic/go-elasticsearch/v8/typedapi/xpack/usage +# github.com/elastic/go-elasticsearch/v9 v9.1.0 +## explicit; go 1.23 +github.com/elastic/go-elasticsearch/v9 +github.com/elastic/go-elasticsearch/v9/esapi +github.com/elastic/go-elasticsearch/v9/internal/version +github.com/elastic/go-elasticsearch/v9/typedapi +github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/delete +github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/get +github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/status +github.com/elastic/go-elasticsearch/v9/typedapi/asyncsearch/submit +github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/deleteautoscalingpolicy +github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingcapacity +github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/getautoscalingpolicy +github.com/elastic/go-elasticsearch/v9/typedapi/autoscaling/putautoscalingpolicy +github.com/elastic/go-elasticsearch/v9/typedapi/capabilities +github.com/elastic/go-elasticsearch/v9/typedapi/cat/aliases +github.com/elastic/go-elasticsearch/v9/typedapi/cat/allocation +github.com/elastic/go-elasticsearch/v9/typedapi/cat/componenttemplates +github.com/elastic/go-elasticsearch/v9/typedapi/cat/count +github.com/elastic/go-elasticsearch/v9/typedapi/cat/fielddata +github.com/elastic/go-elasticsearch/v9/typedapi/cat/health +github.com/elastic/go-elasticsearch/v9/typedapi/cat/help +github.com/elastic/go-elasticsearch/v9/typedapi/cat/indices +github.com/elastic/go-elasticsearch/v9/typedapi/cat/master +github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldatafeeds +github.com/elastic/go-elasticsearch/v9/typedapi/cat/mldataframeanalytics +github.com/elastic/go-elasticsearch/v9/typedapi/cat/mljobs +github.com/elastic/go-elasticsearch/v9/typedapi/cat/mltrainedmodels +github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodeattrs +github.com/elastic/go-elasticsearch/v9/typedapi/cat/nodes +github.com/elastic/go-elasticsearch/v9/typedapi/cat/pendingtasks +github.com/elastic/go-elasticsearch/v9/typedapi/cat/plugins +github.com/elastic/go-elasticsearch/v9/typedapi/cat/recovery +github.com/elastic/go-elasticsearch/v9/typedapi/cat/repositories +github.com/elastic/go-elasticsearch/v9/typedapi/cat/segments +github.com/elastic/go-elasticsearch/v9/typedapi/cat/shards +github.com/elastic/go-elasticsearch/v9/typedapi/cat/snapshots +github.com/elastic/go-elasticsearch/v9/typedapi/cat/tasks +github.com/elastic/go-elasticsearch/v9/typedapi/cat/templates +github.com/elastic/go-elasticsearch/v9/typedapi/cat/threadpool +github.com/elastic/go-elasticsearch/v9/typedapi/cat/transforms +github.com/elastic/go-elasticsearch/v9/typedapi/ccr/deleteautofollowpattern +github.com/elastic/go-elasticsearch/v9/typedapi/ccr/follow +github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followinfo +github.com/elastic/go-elasticsearch/v9/typedapi/ccr/followstats +github.com/elastic/go-elasticsearch/v9/typedapi/ccr/forgetfollower +github.com/elastic/go-elasticsearch/v9/typedapi/ccr/getautofollowpattern +github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pauseautofollowpattern +github.com/elastic/go-elasticsearch/v9/typedapi/ccr/pausefollow +github.com/elastic/go-elasticsearch/v9/typedapi/ccr/putautofollowpattern +github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumeautofollowpattern +github.com/elastic/go-elasticsearch/v9/typedapi/ccr/resumefollow +github.com/elastic/go-elasticsearch/v9/typedapi/ccr/stats +github.com/elastic/go-elasticsearch/v9/typedapi/ccr/unfollow +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/allocationexplain +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/deletecomponenttemplate +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/deletevotingconfigexclusions +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/existscomponenttemplate +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getcomponenttemplate +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/getsettings +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/health +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/info +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/pendingtasks +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/postvotingconfigexclusions +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putcomponenttemplate +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/putsettings +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/remoteinfo +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/reroute +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/state +github.com/elastic/go-elasticsearch/v9/typedapi/cluster/stats +github.com/elastic/go-elasticsearch/v9/typedapi/connector/checkin +github.com/elastic/go-elasticsearch/v9/typedapi/connector/delete +github.com/elastic/go-elasticsearch/v9/typedapi/connector/get +github.com/elastic/go-elasticsearch/v9/typedapi/connector/lastsync +github.com/elastic/go-elasticsearch/v9/typedapi/connector/list +github.com/elastic/go-elasticsearch/v9/typedapi/connector/post +github.com/elastic/go-elasticsearch/v9/typedapi/connector/put +github.com/elastic/go-elasticsearch/v9/typedapi/connector/secretpost +github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcancel +github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobcheckin +github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobclaim +github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobdelete +github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoberror +github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobget +github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjoblist +github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobpost +github.com/elastic/go-elasticsearch/v9/typedapi/connector/syncjobupdatestats +github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateactivefiltering +github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateapikeyid +github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateconfiguration +github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateerror +github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefeatures +github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefiltering +github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatefilteringvalidation +github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateindexname +github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatename +github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatenative +github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatepipeline +github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatescheduling +github.com/elastic/go-elasticsearch/v9/typedapi/connector/updateservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/connector/updatestatus +github.com/elastic/go-elasticsearch/v9/typedapi/core/bulk +github.com/elastic/go-elasticsearch/v9/typedapi/core/clearscroll +github.com/elastic/go-elasticsearch/v9/typedapi/core/closepointintime +github.com/elastic/go-elasticsearch/v9/typedapi/core/count +github.com/elastic/go-elasticsearch/v9/typedapi/core/create +github.com/elastic/go-elasticsearch/v9/typedapi/core/delete +github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyquery +github.com/elastic/go-elasticsearch/v9/typedapi/core/deletebyqueryrethrottle +github.com/elastic/go-elasticsearch/v9/typedapi/core/deletescript +github.com/elastic/go-elasticsearch/v9/typedapi/core/exists +github.com/elastic/go-elasticsearch/v9/typedapi/core/existssource +github.com/elastic/go-elasticsearch/v9/typedapi/core/explain +github.com/elastic/go-elasticsearch/v9/typedapi/core/fieldcaps +github.com/elastic/go-elasticsearch/v9/typedapi/core/get +github.com/elastic/go-elasticsearch/v9/typedapi/core/getscript +github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptcontext +github.com/elastic/go-elasticsearch/v9/typedapi/core/getscriptlanguages +github.com/elastic/go-elasticsearch/v9/typedapi/core/getsource +github.com/elastic/go-elasticsearch/v9/typedapi/core/healthreport +github.com/elastic/go-elasticsearch/v9/typedapi/core/index +github.com/elastic/go-elasticsearch/v9/typedapi/core/info +github.com/elastic/go-elasticsearch/v9/typedapi/core/mget +github.com/elastic/go-elasticsearch/v9/typedapi/core/msearch +github.com/elastic/go-elasticsearch/v9/typedapi/core/msearchtemplate +github.com/elastic/go-elasticsearch/v9/typedapi/core/mtermvectors +github.com/elastic/go-elasticsearch/v9/typedapi/core/openpointintime +github.com/elastic/go-elasticsearch/v9/typedapi/core/ping +github.com/elastic/go-elasticsearch/v9/typedapi/core/putscript +github.com/elastic/go-elasticsearch/v9/typedapi/core/rankeval +github.com/elastic/go-elasticsearch/v9/typedapi/core/reindex +github.com/elastic/go-elasticsearch/v9/typedapi/core/reindexrethrottle +github.com/elastic/go-elasticsearch/v9/typedapi/core/rendersearchtemplate +github.com/elastic/go-elasticsearch/v9/typedapi/core/scriptspainlessexecute +github.com/elastic/go-elasticsearch/v9/typedapi/core/scroll +github.com/elastic/go-elasticsearch/v9/typedapi/core/search +github.com/elastic/go-elasticsearch/v9/typedapi/core/searchmvt +github.com/elastic/go-elasticsearch/v9/typedapi/core/searchshards +github.com/elastic/go-elasticsearch/v9/typedapi/core/searchtemplate +github.com/elastic/go-elasticsearch/v9/typedapi/core/termsenum +github.com/elastic/go-elasticsearch/v9/typedapi/core/termvectors +github.com/elastic/go-elasticsearch/v9/typedapi/core/update +github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyquery +github.com/elastic/go-elasticsearch/v9/typedapi/core/updatebyqueryrethrottle +github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/deletedanglingindex +github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/importdanglingindex +github.com/elastic/go-elasticsearch/v9/typedapi/danglingindices/listdanglingindices +github.com/elastic/go-elasticsearch/v9/typedapi/enrich/deletepolicy +github.com/elastic/go-elasticsearch/v9/typedapi/enrich/executepolicy +github.com/elastic/go-elasticsearch/v9/typedapi/enrich/getpolicy +github.com/elastic/go-elasticsearch/v9/typedapi/enrich/putpolicy +github.com/elastic/go-elasticsearch/v9/typedapi/enrich/stats +github.com/elastic/go-elasticsearch/v9/typedapi/eql/delete +github.com/elastic/go-elasticsearch/v9/typedapi/eql/get +github.com/elastic/go-elasticsearch/v9/typedapi/eql/getstatus +github.com/elastic/go-elasticsearch/v9/typedapi/eql/search +github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquery +github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerydelete +github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncqueryget +github.com/elastic/go-elasticsearch/v9/typedapi/esql/asyncquerystop +github.com/elastic/go-elasticsearch/v9/typedapi/esql/getquery +github.com/elastic/go-elasticsearch/v9/typedapi/esql/listqueries +github.com/elastic/go-elasticsearch/v9/typedapi/esql/query +github.com/elastic/go-elasticsearch/v9/typedapi/features/getfeatures +github.com/elastic/go-elasticsearch/v9/typedapi/features/resetfeatures +github.com/elastic/go-elasticsearch/v9/typedapi/fleet/globalcheckpoints +github.com/elastic/go-elasticsearch/v9/typedapi/fleet/msearch +github.com/elastic/go-elasticsearch/v9/typedapi/fleet/postsecret +github.com/elastic/go-elasticsearch/v9/typedapi/fleet/search +github.com/elastic/go-elasticsearch/v9/typedapi/graph/explore +github.com/elastic/go-elasticsearch/v9/typedapi/ilm/deletelifecycle +github.com/elastic/go-elasticsearch/v9/typedapi/ilm/explainlifecycle +github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getlifecycle +github.com/elastic/go-elasticsearch/v9/typedapi/ilm/getstatus +github.com/elastic/go-elasticsearch/v9/typedapi/ilm/migratetodatatiers +github.com/elastic/go-elasticsearch/v9/typedapi/ilm/movetostep +github.com/elastic/go-elasticsearch/v9/typedapi/ilm/putlifecycle +github.com/elastic/go-elasticsearch/v9/typedapi/ilm/removepolicy +github.com/elastic/go-elasticsearch/v9/typedapi/ilm/retry +github.com/elastic/go-elasticsearch/v9/typedapi/ilm/start +github.com/elastic/go-elasticsearch/v9/typedapi/ilm/stop +github.com/elastic/go-elasticsearch/v9/typedapi/indices/addblock +github.com/elastic/go-elasticsearch/v9/typedapi/indices/analyze +github.com/elastic/go-elasticsearch/v9/typedapi/indices/cancelmigratereindex +github.com/elastic/go-elasticsearch/v9/typedapi/indices/clearcache +github.com/elastic/go-elasticsearch/v9/typedapi/indices/clone +github.com/elastic/go-elasticsearch/v9/typedapi/indices/close +github.com/elastic/go-elasticsearch/v9/typedapi/indices/create +github.com/elastic/go-elasticsearch/v9/typedapi/indices/createdatastream +github.com/elastic/go-elasticsearch/v9/typedapi/indices/createfrom +github.com/elastic/go-elasticsearch/v9/typedapi/indices/datastreamsstats +github.com/elastic/go-elasticsearch/v9/typedapi/indices/delete +github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletealias +github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatalifecycle +github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastream +github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletedatastreamoptions +github.com/elastic/go-elasticsearch/v9/typedapi/indices/deleteindextemplate +github.com/elastic/go-elasticsearch/v9/typedapi/indices/deletetemplate +github.com/elastic/go-elasticsearch/v9/typedapi/indices/diskusage +github.com/elastic/go-elasticsearch/v9/typedapi/indices/downsample +github.com/elastic/go-elasticsearch/v9/typedapi/indices/exists +github.com/elastic/go-elasticsearch/v9/typedapi/indices/existsalias +github.com/elastic/go-elasticsearch/v9/typedapi/indices/existsindextemplate +github.com/elastic/go-elasticsearch/v9/typedapi/indices/existstemplate +github.com/elastic/go-elasticsearch/v9/typedapi/indices/explaindatalifecycle +github.com/elastic/go-elasticsearch/v9/typedapi/indices/fieldusagestats +github.com/elastic/go-elasticsearch/v9/typedapi/indices/flush +github.com/elastic/go-elasticsearch/v9/typedapi/indices/forcemerge +github.com/elastic/go-elasticsearch/v9/typedapi/indices/get +github.com/elastic/go-elasticsearch/v9/typedapi/indices/getalias +github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecycle +github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatalifecyclestats +github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastream +github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamoptions +github.com/elastic/go-elasticsearch/v9/typedapi/indices/getdatastreamsettings +github.com/elastic/go-elasticsearch/v9/typedapi/indices/getfieldmapping +github.com/elastic/go-elasticsearch/v9/typedapi/indices/getindextemplate +github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmapping +github.com/elastic/go-elasticsearch/v9/typedapi/indices/getmigratereindexstatus +github.com/elastic/go-elasticsearch/v9/typedapi/indices/getsettings +github.com/elastic/go-elasticsearch/v9/typedapi/indices/gettemplate +github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratereindex +github.com/elastic/go-elasticsearch/v9/typedapi/indices/migratetodatastream +github.com/elastic/go-elasticsearch/v9/typedapi/indices/modifydatastream +github.com/elastic/go-elasticsearch/v9/typedapi/indices/open +github.com/elastic/go-elasticsearch/v9/typedapi/indices/promotedatastream +github.com/elastic/go-elasticsearch/v9/typedapi/indices/putalias +github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatalifecycle +github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamoptions +github.com/elastic/go-elasticsearch/v9/typedapi/indices/putdatastreamsettings +github.com/elastic/go-elasticsearch/v9/typedapi/indices/putindextemplate +github.com/elastic/go-elasticsearch/v9/typedapi/indices/putmapping +github.com/elastic/go-elasticsearch/v9/typedapi/indices/putsettings +github.com/elastic/go-elasticsearch/v9/typedapi/indices/puttemplate +github.com/elastic/go-elasticsearch/v9/typedapi/indices/recovery +github.com/elastic/go-elasticsearch/v9/typedapi/indices/refresh +github.com/elastic/go-elasticsearch/v9/typedapi/indices/reloadsearchanalyzers +github.com/elastic/go-elasticsearch/v9/typedapi/indices/removeblock +github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolvecluster +github.com/elastic/go-elasticsearch/v9/typedapi/indices/resolveindex +github.com/elastic/go-elasticsearch/v9/typedapi/indices/rollover +github.com/elastic/go-elasticsearch/v9/typedapi/indices/segments +github.com/elastic/go-elasticsearch/v9/typedapi/indices/shardstores +github.com/elastic/go-elasticsearch/v9/typedapi/indices/shrink +github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulateindextemplate +github.com/elastic/go-elasticsearch/v9/typedapi/indices/simulatetemplate +github.com/elastic/go-elasticsearch/v9/typedapi/indices/split +github.com/elastic/go-elasticsearch/v9/typedapi/indices/stats +github.com/elastic/go-elasticsearch/v9/typedapi/indices/updatealiases +github.com/elastic/go-elasticsearch/v9/typedapi/indices/validatequery +github.com/elastic/go-elasticsearch/v9/typedapi/inference/chatcompletionunified +github.com/elastic/go-elasticsearch/v9/typedapi/inference/completion +github.com/elastic/go-elasticsearch/v9/typedapi/inference/delete +github.com/elastic/go-elasticsearch/v9/typedapi/inference/get +github.com/elastic/go-elasticsearch/v9/typedapi/inference/inference +github.com/elastic/go-elasticsearch/v9/typedapi/inference/put +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putalibabacloud +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonbedrock +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putamazonsagemaker +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putanthropic +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureaistudio +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putazureopenai +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcohere +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putcustom +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putdeepseek +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelasticsearch +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putelser +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgoogleaistudio +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putgooglevertexai +github.com/elastic/go-elasticsearch/v9/typedapi/inference/puthuggingface +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putjinaai +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putmistral +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putopenai +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putvoyageai +github.com/elastic/go-elasticsearch/v9/typedapi/inference/putwatsonx +github.com/elastic/go-elasticsearch/v9/typedapi/inference/rerank +github.com/elastic/go-elasticsearch/v9/typedapi/inference/sparseembedding +github.com/elastic/go-elasticsearch/v9/typedapi/inference/streamcompletion +github.com/elastic/go-elasticsearch/v9/typedapi/inference/textembedding +github.com/elastic/go-elasticsearch/v9/typedapi/inference/update +github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletegeoipdatabase +github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deleteiplocationdatabase +github.com/elastic/go-elasticsearch/v9/typedapi/ingest/deletepipeline +github.com/elastic/go-elasticsearch/v9/typedapi/ingest/geoipstats +github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getgeoipdatabase +github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getiplocationdatabase +github.com/elastic/go-elasticsearch/v9/typedapi/ingest/getpipeline +github.com/elastic/go-elasticsearch/v9/typedapi/ingest/processorgrok +github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putgeoipdatabase +github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putiplocationdatabase +github.com/elastic/go-elasticsearch/v9/typedapi/ingest/putpipeline +github.com/elastic/go-elasticsearch/v9/typedapi/ingest/simulate +github.com/elastic/go-elasticsearch/v9/typedapi/license/delete +github.com/elastic/go-elasticsearch/v9/typedapi/license/get +github.com/elastic/go-elasticsearch/v9/typedapi/license/getbasicstatus +github.com/elastic/go-elasticsearch/v9/typedapi/license/gettrialstatus +github.com/elastic/go-elasticsearch/v9/typedapi/license/post +github.com/elastic/go-elasticsearch/v9/typedapi/license/poststartbasic +github.com/elastic/go-elasticsearch/v9/typedapi/license/poststarttrial +github.com/elastic/go-elasticsearch/v9/typedapi/logstash/deletepipeline +github.com/elastic/go-elasticsearch/v9/typedapi/logstash/getpipeline +github.com/elastic/go-elasticsearch/v9/typedapi/logstash/putpipeline +github.com/elastic/go-elasticsearch/v9/typedapi/migration/deprecations +github.com/elastic/go-elasticsearch/v9/typedapi/migration/getfeatureupgradestatus +github.com/elastic/go-elasticsearch/v9/typedapi/migration/postfeatureupgrade +github.com/elastic/go-elasticsearch/v9/typedapi/ml/cleartrainedmodeldeploymentcache +github.com/elastic/go-elasticsearch/v9/typedapi/ml/closejob +github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendar +github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarevent +github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletecalendarjob +github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedatafeed +github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletedataframeanalytics +github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteexpireddata +github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletefilter +github.com/elastic/go-elasticsearch/v9/typedapi/ml/deleteforecast +github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletejob +github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletemodelsnapshot +github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodel +github.com/elastic/go-elasticsearch/v9/typedapi/ml/deletetrainedmodelalias +github.com/elastic/go-elasticsearch/v9/typedapi/ml/estimatemodelmemory +github.com/elastic/go-elasticsearch/v9/typedapi/ml/evaluatedataframe +github.com/elastic/go-elasticsearch/v9/typedapi/ml/explaindataframeanalytics +github.com/elastic/go-elasticsearch/v9/typedapi/ml/flushjob +github.com/elastic/go-elasticsearch/v9/typedapi/ml/forecast +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getbuckets +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendarevents +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcalendars +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getcategories +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeeds +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdatafeedstats +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalytics +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getdataframeanalyticsstats +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getfilters +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getinfluencers +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobs +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getjobstats +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmemorystats +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshots +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getmodelsnapshotupgradestats +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getoverallbuckets +github.com/elastic/go-elasticsearch/v9/typedapi/ml/getrecords +github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodels +github.com/elastic/go-elasticsearch/v9/typedapi/ml/gettrainedmodelsstats +github.com/elastic/go-elasticsearch/v9/typedapi/ml/infertrainedmodel +github.com/elastic/go-elasticsearch/v9/typedapi/ml/info +github.com/elastic/go-elasticsearch/v9/typedapi/ml/openjob +github.com/elastic/go-elasticsearch/v9/typedapi/ml/postcalendarevents +github.com/elastic/go-elasticsearch/v9/typedapi/ml/postdata +github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdatafeed +github.com/elastic/go-elasticsearch/v9/typedapi/ml/previewdataframeanalytics +github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendar +github.com/elastic/go-elasticsearch/v9/typedapi/ml/putcalendarjob +github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdatafeed +github.com/elastic/go-elasticsearch/v9/typedapi/ml/putdataframeanalytics +github.com/elastic/go-elasticsearch/v9/typedapi/ml/putfilter +github.com/elastic/go-elasticsearch/v9/typedapi/ml/putjob +github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodel +github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelalias +github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodeldefinitionpart +github.com/elastic/go-elasticsearch/v9/typedapi/ml/puttrainedmodelvocabulary +github.com/elastic/go-elasticsearch/v9/typedapi/ml/resetjob +github.com/elastic/go-elasticsearch/v9/typedapi/ml/revertmodelsnapshot +github.com/elastic/go-elasticsearch/v9/typedapi/ml/setupgrademode +github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdatafeed +github.com/elastic/go-elasticsearch/v9/typedapi/ml/startdataframeanalytics +github.com/elastic/go-elasticsearch/v9/typedapi/ml/starttrainedmodeldeployment +github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdatafeed +github.com/elastic/go-elasticsearch/v9/typedapi/ml/stopdataframeanalytics +github.com/elastic/go-elasticsearch/v9/typedapi/ml/stoptrainedmodeldeployment +github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedatafeed +github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatedataframeanalytics +github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatefilter +github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatejob +github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatemodelsnapshot +github.com/elastic/go-elasticsearch/v9/typedapi/ml/updatetrainedmodeldeployment +github.com/elastic/go-elasticsearch/v9/typedapi/ml/upgradejobsnapshot +github.com/elastic/go-elasticsearch/v9/typedapi/ml/validate +github.com/elastic/go-elasticsearch/v9/typedapi/ml/validatedetector +github.com/elastic/go-elasticsearch/v9/typedapi/monitoring/bulk +github.com/elastic/go-elasticsearch/v9/typedapi/nodes/clearrepositoriesmeteringarchive +github.com/elastic/go-elasticsearch/v9/typedapi/nodes/getrepositoriesmeteringinfo +github.com/elastic/go-elasticsearch/v9/typedapi/nodes/hotthreads +github.com/elastic/go-elasticsearch/v9/typedapi/nodes/info +github.com/elastic/go-elasticsearch/v9/typedapi/nodes/reloadsecuresettings +github.com/elastic/go-elasticsearch/v9/typedapi/nodes/stats +github.com/elastic/go-elasticsearch/v9/typedapi/nodes/usage +github.com/elastic/go-elasticsearch/v9/typedapi/profiling/flamegraph +github.com/elastic/go-elasticsearch/v9/typedapi/profiling/stacktraces +github.com/elastic/go-elasticsearch/v9/typedapi/profiling/status +github.com/elastic/go-elasticsearch/v9/typedapi/profiling/topnfunctions +github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleterule +github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/deleteruleset +github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getrule +github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/getruleset +github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/listrulesets +github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putrule +github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/putruleset +github.com/elastic/go-elasticsearch/v9/typedapi/queryrules/test +github.com/elastic/go-elasticsearch/v9/typedapi/rollup/deletejob +github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getjobs +github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupcaps +github.com/elastic/go-elasticsearch/v9/typedapi/rollup/getrollupindexcaps +github.com/elastic/go-elasticsearch/v9/typedapi/rollup/putjob +github.com/elastic/go-elasticsearch/v9/typedapi/rollup/rollupsearch +github.com/elastic/go-elasticsearch/v9/typedapi/rollup/startjob +github.com/elastic/go-elasticsearch/v9/typedapi/rollup/stopjob +github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/cachestats +github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/clearcache +github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/mount +github.com/elastic/go-elasticsearch/v9/typedapi/searchablesnapshots/stats +github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/delete +github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/deletebehavioralanalytics +github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/get +github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/getbehavioralanalytics +github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/list +github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/postbehavioralanalyticsevent +github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/put +github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/putbehavioralanalytics +github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/renderquery +github.com/elastic/go-elasticsearch/v9/typedapi/searchapplication/search +github.com/elastic/go-elasticsearch/v9/typedapi/security/activateuserprofile +github.com/elastic/go-elasticsearch/v9/typedapi/security/authenticate +github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkdeleterole +github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkputrole +github.com/elastic/go-elasticsearch/v9/typedapi/security/bulkupdateapikeys +github.com/elastic/go-elasticsearch/v9/typedapi/security/changepassword +github.com/elastic/go-elasticsearch/v9/typedapi/security/clearapikeycache +github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedprivileges +github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedrealms +github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedroles +github.com/elastic/go-elasticsearch/v9/typedapi/security/clearcachedservicetokens +github.com/elastic/go-elasticsearch/v9/typedapi/security/createapikey +github.com/elastic/go-elasticsearch/v9/typedapi/security/createcrossclusterapikey +github.com/elastic/go-elasticsearch/v9/typedapi/security/createservicetoken +github.com/elastic/go-elasticsearch/v9/typedapi/security/delegatepki +github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteprivileges +github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterole +github.com/elastic/go-elasticsearch/v9/typedapi/security/deleterolemapping +github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteservicetoken +github.com/elastic/go-elasticsearch/v9/typedapi/security/deleteuser +github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuser +github.com/elastic/go-elasticsearch/v9/typedapi/security/disableuserprofile +github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuser +github.com/elastic/go-elasticsearch/v9/typedapi/security/enableuserprofile +github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollkibana +github.com/elastic/go-elasticsearch/v9/typedapi/security/enrollnode +github.com/elastic/go-elasticsearch/v9/typedapi/security/getapikey +github.com/elastic/go-elasticsearch/v9/typedapi/security/getbuiltinprivileges +github.com/elastic/go-elasticsearch/v9/typedapi/security/getprivileges +github.com/elastic/go-elasticsearch/v9/typedapi/security/getrole +github.com/elastic/go-elasticsearch/v9/typedapi/security/getrolemapping +github.com/elastic/go-elasticsearch/v9/typedapi/security/getserviceaccounts +github.com/elastic/go-elasticsearch/v9/typedapi/security/getservicecredentials +github.com/elastic/go-elasticsearch/v9/typedapi/security/getsettings +github.com/elastic/go-elasticsearch/v9/typedapi/security/gettoken +github.com/elastic/go-elasticsearch/v9/typedapi/security/getuser +github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprivileges +github.com/elastic/go-elasticsearch/v9/typedapi/security/getuserprofile +github.com/elastic/go-elasticsearch/v9/typedapi/security/grantapikey +github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivileges +github.com/elastic/go-elasticsearch/v9/typedapi/security/hasprivilegesuserprofile +github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidateapikey +github.com/elastic/go-elasticsearch/v9/typedapi/security/invalidatetoken +github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcauthenticate +github.com/elastic/go-elasticsearch/v9/typedapi/security/oidclogout +github.com/elastic/go-elasticsearch/v9/typedapi/security/oidcprepareauthentication +github.com/elastic/go-elasticsearch/v9/typedapi/security/putprivileges +github.com/elastic/go-elasticsearch/v9/typedapi/security/putrole +github.com/elastic/go-elasticsearch/v9/typedapi/security/putrolemapping +github.com/elastic/go-elasticsearch/v9/typedapi/security/putuser +github.com/elastic/go-elasticsearch/v9/typedapi/security/queryapikeys +github.com/elastic/go-elasticsearch/v9/typedapi/security/queryrole +github.com/elastic/go-elasticsearch/v9/typedapi/security/queryuser +github.com/elastic/go-elasticsearch/v9/typedapi/security/samlauthenticate +github.com/elastic/go-elasticsearch/v9/typedapi/security/samlcompletelogout +github.com/elastic/go-elasticsearch/v9/typedapi/security/samlinvalidate +github.com/elastic/go-elasticsearch/v9/typedapi/security/samllogout +github.com/elastic/go-elasticsearch/v9/typedapi/security/samlprepareauthentication +github.com/elastic/go-elasticsearch/v9/typedapi/security/samlserviceprovidermetadata +github.com/elastic/go-elasticsearch/v9/typedapi/security/suggestuserprofiles +github.com/elastic/go-elasticsearch/v9/typedapi/security/updateapikey +github.com/elastic/go-elasticsearch/v9/typedapi/security/updatecrossclusterapikey +github.com/elastic/go-elasticsearch/v9/typedapi/security/updatesettings +github.com/elastic/go-elasticsearch/v9/typedapi/security/updateuserprofiledata +github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/deletenode +github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/getnode +github.com/elastic/go-elasticsearch/v9/typedapi/shutdown/putnode +github.com/elastic/go-elasticsearch/v9/typedapi/simulate/ingest +github.com/elastic/go-elasticsearch/v9/typedapi/slm/deletelifecycle +github.com/elastic/go-elasticsearch/v9/typedapi/slm/executelifecycle +github.com/elastic/go-elasticsearch/v9/typedapi/slm/executeretention +github.com/elastic/go-elasticsearch/v9/typedapi/slm/getlifecycle +github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstats +github.com/elastic/go-elasticsearch/v9/typedapi/slm/getstatus +github.com/elastic/go-elasticsearch/v9/typedapi/slm/putlifecycle +github.com/elastic/go-elasticsearch/v9/typedapi/slm/start +github.com/elastic/go-elasticsearch/v9/typedapi/slm/stop +github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/cleanuprepository +github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/clone +github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/create +github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/createrepository +github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/delete +github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/deleterepository +github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/get +github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/getrepository +github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryanalyze +github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/repositoryverifyintegrity +github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/restore +github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/status +github.com/elastic/go-elasticsearch/v9/typedapi/snapshot/verifyrepository +github.com/elastic/go-elasticsearch/v9/typedapi/sql/clearcursor +github.com/elastic/go-elasticsearch/v9/typedapi/sql/deleteasync +github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasync +github.com/elastic/go-elasticsearch/v9/typedapi/sql/getasyncstatus +github.com/elastic/go-elasticsearch/v9/typedapi/sql/query +github.com/elastic/go-elasticsearch/v9/typedapi/sql/translate +github.com/elastic/go-elasticsearch/v9/typedapi/ssl/certificates +github.com/elastic/go-elasticsearch/v9/typedapi/streams/logsdisable +github.com/elastic/go-elasticsearch/v9/typedapi/streams/logsenable +github.com/elastic/go-elasticsearch/v9/typedapi/streams/status +github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonym +github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/deletesynonymrule +github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonym +github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymrule +github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/getsynonymssets +github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonym +github.com/elastic/go-elasticsearch/v9/typedapi/synonyms/putsynonymrule +github.com/elastic/go-elasticsearch/v9/typedapi/tasks/cancel +github.com/elastic/go-elasticsearch/v9/typedapi/tasks/get +github.com/elastic/go-elasticsearch/v9/typedapi/tasks/list +github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findfieldstructure +github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findmessagestructure +github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/findstructure +github.com/elastic/go-elasticsearch/v9/typedapi/textstructure/testgrokpattern +github.com/elastic/go-elasticsearch/v9/typedapi/transform/deletetransform +github.com/elastic/go-elasticsearch/v9/typedapi/transform/getnodestats +github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransform +github.com/elastic/go-elasticsearch/v9/typedapi/transform/gettransformstats +github.com/elastic/go-elasticsearch/v9/typedapi/transform/previewtransform +github.com/elastic/go-elasticsearch/v9/typedapi/transform/puttransform +github.com/elastic/go-elasticsearch/v9/typedapi/transform/resettransform +github.com/elastic/go-elasticsearch/v9/typedapi/transform/schedulenowtransform +github.com/elastic/go-elasticsearch/v9/typedapi/transform/starttransform +github.com/elastic/go-elasticsearch/v9/typedapi/transform/stoptransform +github.com/elastic/go-elasticsearch/v9/typedapi/transform/updatetransform +github.com/elastic/go-elasticsearch/v9/typedapi/transform/upgradetransforms +github.com/elastic/go-elasticsearch/v9/typedapi/types +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/accesstokengranttype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/acknowledgementoptions +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actionexecutionmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actionstatusoptions +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/actiontype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/alibabacloudservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/allocationexplaindecision +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonbedrockservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonsagemakerapi +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/amazonsagemakerservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/anthropicservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/apikeygranttype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/apikeytype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/appliesto +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/azureaistudioservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/azureopenaiservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/boundaryscanner +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/bytes +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/calendarinterval +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cardinalityexecutionmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catanomalydetectorcolumn +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catdatafeedcolumn +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catdfacolumn +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/categorizationstatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catnodecolumn +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catrecoverycolumn +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catsegmentscolumn +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catshardcolumn +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catsnapshotscolumn +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/catthreadpoolcolumn +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cattrainedmodelscolumn +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cattransformcolumn +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/childscoremode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/chunkingmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cjkbigramignoredscript +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterinfotarget +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clusterprivilege +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/clustersearchstatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereembeddingtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereinputtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/cohereservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/coheresimilaritytype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/coheretruncatetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/combinedfieldsoperator +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/combinedfieldszeroterms +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditionop +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditionoperator +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conditiontype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/conflicts +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectionscheme +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectorfieldtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/connectorstatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/converttype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/customservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dataattachmentformat +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/datafeedstate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dataframestate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/day +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/decision +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deepseekservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/delimitedpayloadencoding +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorelementtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorindexoptionstype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/densevectorsimilarity +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deploymentallocationstate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deploymentassignmentstate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/deprecationlevel +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfiindependencemeasure +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfraftereffect +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dfrbasicmodel +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/displaytype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/distanceunit +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/dynamicmapping +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ecscompatibilitytype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/edgengramside +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/elasticsearchservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/elserservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/emailpriority +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/enrichpolicyphase +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/esqlclusterstatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/esqlformat +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/excludefrequent +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/executionphase +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/executionstatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/expandwildcard +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/failurestorestatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/feature +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldsortnumerictype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fieldvaluefactormodifier +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringpolicy +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringrulerule +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filteringvalidationstate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/filtertype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/fingerprintdigest +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/followerindexstatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/formattype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/functionboostmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/functionscoremode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gappolicy +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geodistancetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoexecution +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geogridtargetformat +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geogridtiletype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoorientation +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geopointmetrictype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geoshaperelation +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geostrategy +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/geovalidationmethod +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/googleaiservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/googlevertexaiservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/granttype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gridaggregationtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/gridtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/groupby +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/healthstatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterencoder +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterfragmenter +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlighterorder +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlightertagsschema +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/highlightertype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/holtwinterstype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/httpinputmethod +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/huggingfaceservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ibdistribution +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/iblambda +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationalternate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationcasefirst +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationdecomposition +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icucollationstrength +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icunormalizationmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icunormalizationtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/icutransformdirection +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/impactarea +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/include +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexcheckonstartup +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexingjobstate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexmetadatastate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexoptions +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexprivilege +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexroutingallocationoptions +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indexroutingrebalanceoptions +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/indicatorhealthstatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/inputtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaiservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaisimilaritytype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jinaaitextembeddingtask +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jobblockedreason +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jobstate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/jsonprocessorconflictstrategy +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/keeptypesmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/kuromojitokenizationmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/level +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensestatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/licensetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/lifecycleoperationmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/lowercasetokenfilterlanguages +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/managedby +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/matchtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/memorystatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/metric +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/migrationstatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/minimuminterval +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/missingorder +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/mistralservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/modeenum +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/month +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/multivaluemode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noderole +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/noridecompoundmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/normalization +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/normalizemethod +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/numericfielddataformat +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/onscripterror +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/openaiservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operationtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/operator +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/optype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pagerdutycontexttype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pagerdutyeventtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/painlesscontext +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticencoder +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticlanguage +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticnametype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/phoneticruletype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/pipelinesimulationstatusoptions +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/policytype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/quantifier +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/queryrulecriteriatype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/queryruletype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/rangerelation +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/rankvectorelementtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ratemode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/refresh +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/remoteclusterprivilege +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/responsecontenttype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/restrictionworkflow +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/result +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/resultposition +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/routingstate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ruleaction +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/runtimefieldtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sampleraggregationexecutionhint +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scoremode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scorenormalizer +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptlanguage +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/scriptsorttype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/searchtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortmissing +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/segmentsortorder +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shapetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardroutingstate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardsstatsstage +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstoreallocation +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shardstorestatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shutdownstatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/shutdowntype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotsort +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotstate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snapshotupgradestate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/snowballlanguage +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sortorder +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sourcefieldmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sourcemode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/sqlformat +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/statslevel +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/storagetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/stringdistance +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/subobjects +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/suggestmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/suggestsort +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtriggermethod +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncjobtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syncstatus +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/synonymformat +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/syntheticsourcekeepenum +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypealibabacloudai +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeamazonbedrock +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeamazonsagemaker +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeanthropic +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeazureaistudio +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeazureopenai +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypecohere +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypecustom +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypedeepseek +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeelasticsearch +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeelser +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypegoogleaistudio +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypegooglevertexai +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypehuggingface +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypejinaai +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypemistral +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypeopenai +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypevoyageai +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tasktypewatsonx +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tdigestexecutionhint +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/templateformat +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termsaggregationcollectmode +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termsaggregationexecutionhint +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/termvectoroption +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/textquerytype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/threadtype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeseriesmetrictype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/timeunit +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tokenchar +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/tokenizationtruncate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/totalhitsrelation +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainedmodeltype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/trainingpriority +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/translogdurability +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/ttesttype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/type_ +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/unassignedinformationreason +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/useragentproperty +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/valuetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/versiontype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/voyageaiservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/waitforevents +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/watcherstate +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/watsonxservicetype +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/xpackcategory +github.com/elastic/go-elasticsearch/v9/typedapi/types/enums/zerotermsquery +github.com/elastic/go-elasticsearch/v9/typedapi/watcher/ackwatch +github.com/elastic/go-elasticsearch/v9/typedapi/watcher/activatewatch +github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deactivatewatch +github.com/elastic/go-elasticsearch/v9/typedapi/watcher/deletewatch +github.com/elastic/go-elasticsearch/v9/typedapi/watcher/executewatch +github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getsettings +github.com/elastic/go-elasticsearch/v9/typedapi/watcher/getwatch +github.com/elastic/go-elasticsearch/v9/typedapi/watcher/putwatch +github.com/elastic/go-elasticsearch/v9/typedapi/watcher/querywatches +github.com/elastic/go-elasticsearch/v9/typedapi/watcher/start +github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stats +github.com/elastic/go-elasticsearch/v9/typedapi/watcher/stop +github.com/elastic/go-elasticsearch/v9/typedapi/watcher/updatesettings +github.com/elastic/go-elasticsearch/v9/typedapi/xpack/info +github.com/elastic/go-elasticsearch/v9/typedapi/xpack/usage # github.com/emicklei/go-restful/v3 v3.12.1 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 @@ -701,6 +1550,10 @@ github.com/go-errors/errors # github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr +github.com/go-logr/logr/funcr +# github.com/go-logr/stdr v1.2.2 +## explicit; go 1.16 +github.com/go-logr/stdr # github.com/go-openapi/jsonpointer v0.21.0 ## explicit; go 1.20 github.com/go-openapi/jsonpointer @@ -931,6 +1784,32 @@ github.com/yudai/golcs # github.com/zeebo/xxh3 v1.0.2 ## explicit; go 1.17 github.com/zeebo/xxh3 +# go.opentelemetry.io/auto/sdk v1.1.0 +## explicit; go 1.22.0 +go.opentelemetry.io/auto/sdk +go.opentelemetry.io/auto/sdk/internal/telemetry +# go.opentelemetry.io/otel v1.35.0 +## explicit; go 1.22.0 +go.opentelemetry.io/otel +go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/baggage +go.opentelemetry.io/otel/codes +go.opentelemetry.io/otel/internal +go.opentelemetry.io/otel/internal/attribute +go.opentelemetry.io/otel/internal/baggage +go.opentelemetry.io/otel/internal/global +go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/v1.26.0 +# go.opentelemetry.io/otel/metric v1.35.0 +## explicit; go 1.22.0 +go.opentelemetry.io/otel/metric +go.opentelemetry.io/otel/metric/embedded +# go.opentelemetry.io/otel/trace v1.35.0 +## explicit; go 1.22.0 +go.opentelemetry.io/otel/trace +go.opentelemetry.io/otel/trace/embedded +go.opentelemetry.io/otel/trace/internal/telemetry +go.opentelemetry.io/otel/trace/noop # go.virtual-secrets.dev/apimachinery v0.0.1 ## explicit; go 1.23.0 go.virtual-secrets.dev/apimachinery/apis/virtual/v1alpha1 @@ -1614,7 +2493,7 @@ kmodules.xyz/client-go/meta kmodules.xyz/client-go/policy/secomp kmodules.xyz/client-go/tools/exec kmodules.xyz/client-go/tools/portforward -# kmodules.xyz/custom-resources v0.32.0 +# kmodules.xyz/custom-resources v0.32.2 ## explicit; go 1.23.0 kmodules.xyz/custom-resources/apis/appcatalog kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1 @@ -1645,7 +2524,7 @@ kmodules.xyz/prober/api/v1 kmodules.xyz/resource-metadata/apis/node kmodules.xyz/resource-metadata/apis/node/v1alpha1 kmodules.xyz/resource-metadata/crds -# kubedb.dev/apimachinery v0.58.0 +# kubedb.dev/apimachinery v0.59.0 ## explicit; go 1.24.0 kubedb.dev/apimachinery/apis kubedb.dev/apimachinery/apis/archiver/v1alpha1 @@ -1690,7 +2569,7 @@ kubedb.dev/apimachinery/client/clientset/versioned/typed/ui/v1alpha1 kubedb.dev/apimachinery/crds kubedb.dev/apimachinery/pkg/double_optin kubedb.dev/apimachinery/pkg/factory -# kubedb.dev/db-client-go v0.13.0 +# kubedb.dev/db-client-go v0.14.0 ## explicit; go 1.24.0 kubedb.dev/db-client-go/elasticsearch kubedb.dev/db-client-go/redis @@ -1712,7 +2591,7 @@ kubeops.dev/petset/crds kubeops.dev/sidekick/apis/apps kubeops.dev/sidekick/apis/apps/v1alpha1 kubeops.dev/sidekick/crds -# kubestash.dev/apimachinery v0.20.0 +# kubestash.dev/apimachinery v0.21.0 ## explicit; go 1.23.0 kubestash.dev/apimachinery/apis kubestash.dev/apimachinery/apis/core/v1alpha1